From 67ea635fe037707924417893674e94275e849d7e Mon Sep 17 00:00:00 2001 From: aishanshan Date: Thu, 30 Mar 2023 03:26:43 +0000 Subject: [PATCH] push dsv0.8.2 version --- CODEOWNERS | 57 +- MANIFEST.in | 3 + MANIFEST_win.in | 1 + README.md | 247 +- accelerator/__init__.py | 4 + accelerator/abstract_accelerator.py | 236 ++ accelerator/cuda_accelerator.py | 254 ++ accelerator/real_accelerator.py | 105 + azure/README.md | 2 +- benchmarks/__init__.py | 1 + benchmarks/communication/README.md | 75 + benchmarks/communication/__init__.py | 1 + benchmarks/communication/all_gather.py | 159 ++ benchmarks/communication/all_reduce.py | 113 + benchmarks/communication/all_to_all.py | 134 ++ benchmarks/communication/broadcast.py | 114 + benchmarks/communication/constants.py | 10 + benchmarks/communication/pt2pt.py | 132 + benchmarks/communication/run_all.py | 49 + benchmarks/communication/utils.py | 220 ++ benchmarks/inference/bert-bench.py | 92 + benchmarks/inference/collect_results.py | 147 ++ benchmarks/inference/gpt-bench.py | 124 + benchmarks/inference/requirements.txt | 1 + benchmarks/inference/run_model.sh | 36 + benchmarks/inference/sweep.sh | 41 + bin/deepspeed | 7 +- bin/deepspeed.pt | 7 +- bin/ds | 0 bin/ds_bench | 17 + bin/ds_elastic | 0 bin/ds_report | 0 bin/ds_ssh | 0 bin/dsr | 1 + build_win.bat | 19 + csrc/adagrad/cpu_adagrad.cpp | 42 +- csrc/adam/cpu_adam.cpp | 40 +- csrc/adam/multi_tensor_adam.cu | 4 + csrc/aio/common/deepspeed_aio_utils.cpp | 5 +- csrc/aio/py_lib/deepspeed_pin_tensor.cpp | 43 + csrc/aio/py_lib/deepspeed_pin_tensor.h | 24 + csrc/aio/py_lib/deepspeed_py_aio_handle.cpp | 14 +- csrc/aio/py_lib/deepspeed_py_aio_handle.h | 7 + csrc/aio/py_lib/py_ds_aio.cpp | 3 + csrc/aio/py_test/aio_bench_perf_sweep.py | 3 +- csrc/aio/py_test/ds_aio_basic.py | 8 +- csrc/aio/py_test/ds_aio_handle.py | 37 +- csrc/aio/py_test/parse_aio_stats.py | 1 - csrc/aio/py_test/perf_sweep_utils.py | 2 + csrc/aio/py_test/run_read_sweep.sh | 0 csrc/aio/py_test/run_write_sweep.sh | 0 csrc/aio/py_test/test_ds_aio.py | 8 +- csrc/aio/py_test/test_ds_aio_utils.py | 2 - csrc/aio/py_test/validate_async_io.py | 3 +- csrc/common/custom_cuda_kernel.cu | 8 +- csrc/includes/StopWatch.h | 4 + csrc/includes/Timer.h | 3 + csrc/includes/context.h | 4 + csrc/includes/conversion_utils.h | 626 +++++ csrc/includes/cpu_adagrad.h | 53 +- csrc/includes/cpu_adam.h | 64 +- csrc/includes/cublas_wrappers.h | 4 + csrc/includes/custom_cuda_layers.h | 93 +- csrc/includes/dequantization_utils.h | 176 ++ csrc/includes/dropout.h | 4 + csrc/includes/ds_kernel_utils.h | 49 + csrc/includes/ds_transformer_cuda.h | 4 + csrc/includes/feed_forward.h | 4 + csrc/includes/gelu.h | 4 + csrc/includes/gemm_test.h | 3 + csrc/includes/general_kernels.h | 4 + csrc/includes/memory_access_utils.h | 1114 +++++++++ csrc/includes/normalize_layer.h | 4 + csrc/includes/quantization.h | 65 + csrc/includes/quantization_utils.h | 467 ++++ csrc/includes/quantizer.h | 4 + csrc/includes/reduction_utils.h | 591 +++++ csrc/includes/simd.h | 16 +- csrc/includes/softmax.h | 4 + csrc/includes/strided_batch_gemm.h | 4 + csrc/includes/type_shim.h | 4 + csrc/lamb/fused_lamb_cuda_kernel.cu | 6 +- csrc/quantization/dequantize.cu | 71 + .../quantization/fake_quantizer.cu | 352 ++- csrc/quantization/pt_binding.cpp | 93 +- csrc/quantization/quantize.cu | 150 ++ csrc/random_ltd/gather_scatter.cu | 185 ++ csrc/random_ltd/pt_binding.cpp | 215 ++ csrc/random_ltd/slice_attn_masks.cu | 127 + csrc/random_ltd/token_sort.cu | 193 ++ csrc/spatial/csrc/opt_bias_add.cu | 148 ++ csrc/spatial/csrc/pt_binding.cpp | 111 + csrc/spatial/includes/spatial_cuda_layers.h | 36 + csrc/transformer/cublas_wrappers.cu | 4 + csrc/transformer/dropout_kernels.cu | 4 + csrc/transformer/ds_transformer_cuda.cpp | 1 - csrc/transformer/gelu_kernels.cu | 4 + csrc/transformer/general_kernels.cu | 4 + .../inference/csrc/apply_rotary_pos_emb.cu | 81 +- .../inference/csrc/apply_rotary_pos_emb.hip | 81 +- csrc/transformer/inference/csrc/dequantize.cu | 92 +- .../transformer/inference/csrc/dequantize.hip | 92 +- csrc/transformer/inference/csrc/gelu.cu | 910 ++++--- csrc/transformer/inference/csrc/gelu.hip | 910 ++++--- csrc/transformer/inference/csrc/layer_norm.cu | 529 ++++ .../transformer/inference/csrc/layer_norm.hip | 531 +++++ .../transformer/inference/csrc/pt_binding.cpp | 1403 ++++++++--- .../inference/csrc/pt_binding_hip.cpp | 1403 ++++++++--- csrc/transformer/inference/csrc/relu.cu | 63 + csrc/transformer/inference/csrc/relu.hip | 65 + csrc/transformer/inference/csrc/softmax.cu | 156 +- csrc/transformer/inference/csrc/softmax.hip | 156 +- .../transformer/inference/csrc/transform.cu | 491 ++-- .../transformer/inference/csrc/transform.hip | 491 ++-- .../inference/includes/inference_context.h | 115 +- .../includes/inference_context_hip.h | 115 +- .../includes/inference_cublas_wrappers.h | 4 + .../includes/inference_cublas_wrappers_hip.h | 4 + .../includes/inference_cuda_layers.h | 222 ++ .../inference/includes/inference_hip_layers.h | 223 ++ csrc/transformer/normalize_kernels.cu | 12 + csrc/transformer/softmax_kernels.cu | 111 +- csrc/transformer/transform_kernels.cu | 4 + deepspeed/__init__.py | 142 +- deepspeed/accelerator | 1 + deepspeed/autotuning/README.md | 0 deepspeed/autotuning/__init__.py | 2 + deepspeed/autotuning/autotuner.py | 192 +- deepspeed/autotuning/config.py | 5 +- deepspeed/autotuning/constants.py | 8 +- deepspeed/autotuning/scheduler.py | 43 +- deepspeed/autotuning/tuner/__init__.py | 2 + deepspeed/autotuning/tuner/base_tuner.py | 5 +- deepspeed/autotuning/tuner/cost_model.py | 2 +- .../autotuning/tuner/index_based_tuner.py | 4 +- .../autotuning/tuner/model_based_tuner.py | 6 +- deepspeed/autotuning/tuner/utils.py | 2 + deepspeed/autotuning/utils.py | 18 +- deepspeed/checkpoint/__init__.py | 19 + deepspeed/checkpoint/constants.py | 44 +- deepspeed/checkpoint/deepspeed_checkpoint.py | 317 +++ deepspeed/checkpoint/reshape_3d_utils.py | 120 + deepspeed/checkpoint/reshape_meg_2d.py | 228 ++ deepspeed/checkpoint/reshape_utils.py | 100 + deepspeed/checkpoint/universal_checkpoint.py | 108 + deepspeed/checkpoint/utils.py | 31 + deepspeed/checkpoint/zero_checkpoint.py | 148 ++ deepspeed/comm/__init__.py | 52 + deepspeed/comm/backend.py | 43 + deepspeed/comm/comm.py | 778 ++++++ deepspeed/comm/config.py | 32 + deepspeed/comm/constants.py | 46 + deepspeed/comm/torch.py | 237 ++ deepspeed/comm/utils.py | 158 ++ deepspeed/compression/__init__.py | 5 + deepspeed/compression/basic_layer.py | 925 +++++++ deepspeed/compression/compress.py | 233 ++ deepspeed/compression/config.py | 492 ++++ deepspeed/compression/constants.py | 170 ++ deepspeed/compression/helper.py | 283 +++ deepspeed/compression/scheduler.py | 173 ++ deepspeed/compression/utils.py | 218 ++ deepspeed/constants.py | 1 - deepspeed/elasticity/__init__.py | 6 + deepspeed/elasticity/config.py | 15 + deepspeed/elasticity/constants.py | 8 +- deepspeed/elasticity/elastic_agent.py | 189 ++ deepspeed/elasticity/elasticity.py | 130 +- deepspeed/elasticity/utils.py | 16 + deepspeed/env_report.py | 40 +- deepspeed/git_version_info.py | 6 +- deepspeed/inference/__init__.py | 2 + deepspeed/inference/config.py | 278 +++ deepspeed/inference/engine.py | 596 +++-- deepspeed/launcher/__init__.py | 1 + deepspeed/launcher/constants.py | 5 +- deepspeed/launcher/launch.py | 170 +- deepspeed/launcher/multinode_runner.py | 128 +- deepspeed/launcher/runner.py | 175 +- deepspeed/model_implementations/__init__.py | 4 + .../diffusers/__init__.py | 1 + .../model_implementations/diffusers/unet.py | 63 + .../model_implementations/diffusers/vae.py | 148 ++ .../features/__init__.py | 1 + .../features/cuda_graph.py | 24 + .../transformers/__init__.py | 1 + .../transformers/clip_encoder.py | 79 + .../transformers/ds_base.py | 11 + .../transformers/ds_bert.py | 23 + .../transformers/ds_bloom.py | 23 + .../transformers/ds_gpt.py | 23 + .../transformers/ds_megatron_gpt.py | 23 + .../transformers/ds_opt.py | 23 + .../transformers/ds_transformer.py | 188 ++ deepspeed/module_inject/__init__.py | 8 +- deepspeed/module_inject/auto_tp.py | 124 + .../module_inject/containers/__init__.py | 15 + deepspeed/module_inject/containers/base.py | 248 ++ .../module_inject/containers/base_moe.py | 141 ++ deepspeed/module_inject/containers/bert.py | 81 + deepspeed/module_inject/containers/bloom.py | 128 + deepspeed/module_inject/containers/clip.py | 66 + .../module_inject/containers/distil_bert.py | 75 + .../containers/features/__init__.py | 4 + .../containers/features/megatron.py | 37 + .../containers/features/meta_tensor.py | 58 + deepspeed/module_inject/containers/gpt2.py | 54 + deepspeed/module_inject/containers/gptj.py | 110 + deepspeed/module_inject/containers/gptneo.py | 111 + deepspeed/module_inject/containers/gptneox.py | 129 + .../module_inject/containers/megatron_gpt.py | 106 + .../containers/megatron_gpt_moe.py | 82 + deepspeed/module_inject/containers/opt.py | 134 ++ deepspeed/module_inject/containers/unet.py | 51 + deepspeed/module_inject/containers/vae.py | 33 + deepspeed/module_inject/inject.py | 2 + deepspeed/module_inject/layers.py | 101 + deepspeed/module_inject/load_checkpoint.py | 289 +++ deepspeed/module_inject/module_quantize.py | 4 +- deepspeed/module_inject/policy.py | 205 ++ deepspeed/module_inject/replace_module.py | 1036 ++++---- deepspeed/module_inject/replace_policy.py | 395 +-- deepspeed/module_inject/utils.py | 40 + deepspeed/moe/__init__.py | 1 + deepspeed/moe/layer.py | 62 +- deepspeed/moe/mappings.py | 108 + deepspeed/moe/sharded_moe.py | 58 +- deepspeed/moe/utils.py | 39 +- deepspeed/monitor/__init__.py | 1 + deepspeed/monitor/config.py | 87 + deepspeed/monitor/csv_monitor.py | 63 + deepspeed/monitor/monitor.py | 48 + deepspeed/monitor/tensorboard.py | 54 + deepspeed/monitor/utils.py | 21 + deepspeed/monitor/wandb.py | 34 + deepspeed/nebula/__init__.py | 1 + deepspeed/nebula/config.py | 54 + deepspeed/nebula/constants.py | 87 + deepspeed/ops/__init__.py | 2 + deepspeed/ops/adagrad/__init__.py | 2 + deepspeed/ops/adagrad/cpu_adagrad.py | 5 +- deepspeed/ops/adam/__init__.py | 2 + deepspeed/ops/adam/cpu_adam.py | 21 +- deepspeed/ops/adam/fused_adam.py | 24 +- deepspeed/ops/adam/multi_tensor_apply.py | 1 - deepspeed/ops/aio/__init__.py | 0 deepspeed/ops/csrc | 1 + deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp | 227 -- .../ops/csrc/adagrad/cpu_adagrad_hip.cpp | 228 -- deepspeed/ops/csrc/adam/cpu_adam.cpp | 292 --- deepspeed/ops/csrc/adam/cpu_adam_hip.cpp | 293 --- .../ops/csrc/adam/fused_adam_frontend.cpp | 20 - deepspeed/ops/csrc/adam/multi_tensor_adam.cu | 163 -- deepspeed/ops/csrc/adam/multi_tensor_adam.hip | 164 -- .../ops/csrc/adam/multi_tensor_apply.cuh | 127 - .../ops/csrc/adam/multi_tensor_apply_hip.cuh | 129 - .../csrc/aio/common/deepspeed_aio_common.cpp | 333 --- .../csrc/aio/common/deepspeed_aio_common.h | 36 - .../csrc/aio/common/deepspeed_aio_types.cpp | 74 - .../ops/csrc/aio/common/deepspeed_aio_types.h | 57 - .../csrc/aio/common/deepspeed_aio_utils.cpp | 123 - .../ops/csrc/aio/common/deepspeed_aio_utils.h | 77 - .../csrc/aio/py_lib/deepspeed_aio_thread.cpp | 84 - .../csrc/aio/py_lib/deepspeed_aio_thread.h | 57 - .../ops/csrc/aio/py_lib/deepspeed_py_aio.cpp | 121 - .../ops/csrc/aio/py_lib/deepspeed_py_aio.h | 27 - .../aio/py_lib/deepspeed_py_aio_handle.cpp | 282 --- .../csrc/aio/py_lib/deepspeed_py_aio_handle.h | 68 - .../ops/csrc/aio/py_lib/deepspeed_py_copy.cpp | 133 -- .../ops/csrc/aio/py_lib/deepspeed_py_copy.h | 42 - deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp | 41 - .../aio/py_test/aio_bench_generate_param.py | 96 - .../csrc/aio/py_test/aio_bench_perf_sweep.py | 397 --- .../ops/csrc/aio/py_test/ds_aio_basic.py | 144 -- .../ops/csrc/aio/py_test/ds_aio_handle.py | 176 -- .../ops/csrc/aio/py_test/parse_aio_stats.py | 154 -- .../ops/csrc/aio/py_test/perf_sweep_utils.py | 8 - .../ops/csrc/aio/py_test/run_read_sweep.sh | 75 - .../ops/csrc/aio/py_test/run_write_sweep.sh | 82 - .../aio/py_test/single_process_config.json | 29 - deepspeed/ops/csrc/aio/py_test/test_ds_aio.py | 101 - .../ops/csrc/aio/py_test/test_ds_aio_utils.py | 59 - .../ops/csrc/aio/py_test/validate_async_io.py | 9 - .../ops/csrc/common/custom_cuda_kernel.cu | 39 - .../ops/csrc/common/custom_hip_kernel.hip | 41 - deepspeed/ops/csrc/includes/StopWatch.h | 98 - deepspeed/ops/csrc/includes/Timer.h | 47 - deepspeed/ops/csrc/includes/Timer_hip.h | 48 - deepspeed/ops/csrc/includes/compat.h | 14 - deepspeed/ops/csrc/includes/context.h | 171 -- deepspeed/ops/csrc/includes/context_hip.h | 172 -- deepspeed/ops/csrc/includes/cpu_adagrad.h | 150 -- deepspeed/ops/csrc/includes/cpu_adagrad_hip.h | 151 -- deepspeed/ops/csrc/includes/cpu_adam.h | 225 -- deepspeed/ops/csrc/includes/cpu_adam_hip.h | 226 -- deepspeed/ops/csrc/includes/cublas_wrappers.h | 87 - .../ops/csrc/includes/cublas_wrappers_hip.h | 88 - .../ops/csrc/includes/custom_cuda_layers.h | 303 --- .../ops/csrc/includes/custom_hip_layers.h | 304 --- deepspeed/ops/csrc/includes/dropout.h | 76 - deepspeed/ops/csrc/includes/dropout_hip.h | 77 - .../ops/csrc/includes/ds_transformer_cuda.h | 184 -- .../ops/csrc/includes/ds_transformer_hip.h | 185 -- deepspeed/ops/csrc/includes/feed_forward.h | 105 - .../ops/csrc/includes/feed_forward_hip.h | 106 - deepspeed/ops/csrc/includes/gelu.h | 36 - deepspeed/ops/csrc/includes/gelu_hip.h | 37 - deepspeed/ops/csrc/includes/gemm_test.h | 327 --- deepspeed/ops/csrc/includes/gemm_test_hip.h | 328 --- deepspeed/ops/csrc/includes/general_kernels.h | 51 - .../ops/csrc/includes/general_kernels_hip.h | 52 - deepspeed/ops/csrc/includes/normalize_layer.h | 202 -- .../ops/csrc/includes/normalize_layer_hip.h | 203 -- deepspeed/ops/csrc/includes/quantizer.h | 9 - deepspeed/ops/csrc/includes/quantizer_hip.h | 10 - deepspeed/ops/csrc/includes/simd.h | 137 -- deepspeed/ops/csrc/includes/softmax.h | 60 - deepspeed/ops/csrc/includes/softmax_hip.h | 61 - .../ops/csrc/includes/strided_batch_gemm.h | 195 -- .../csrc/includes/strided_batch_gemm_hip.h | 196 -- deepspeed/ops/csrc/includes/type_shim.h | 119 - deepspeed/ops/csrc/includes/type_shim_hip.h | 121 - deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp | 109 - .../ops/csrc/lamb/fused_lamb_cuda_kernel.cu | 474 ---- .../ops/csrc/lamb/fused_lamb_hip_kernel.hip | 475 ---- .../ops/csrc/quantization/pt_binding.cpp | 77 - .../ops/csrc/quantization/pt_binding_hip.cpp | 78 - deepspeed/ops/csrc/quantization/quantizer.hip | 1039 -------- deepspeed/ops/csrc/sparse_attention/utils.cpp | 120 - .../ops/csrc/transformer/cublas_wrappers.cu | 403 ---- .../ops/csrc/transformer/cublas_wrappers.hip | 404 ---- .../ops/csrc/transformer/dropout_kernels.cu | 868 ------- .../ops/csrc/transformer/dropout_kernels.hip | 870 ------- .../csrc/transformer/ds_transformer_cuda.cpp | 1051 -------- .../csrc/transformer/ds_transformer_hip.cpp | 1052 -------- .../ops/csrc/transformer/gelu_kernels.cu | 330 --- .../ops/csrc/transformer/gelu_kernels.hip | 332 --- .../ops/csrc/transformer/general_kernels.cu | 411 ---- .../ops/csrc/transformer/general_kernels.hip | 413 ---- .../inference/csrc/apply_rotary_pos_emb.cu | 374 --- .../inference/csrc/apply_rotary_pos_emb.hip | 376 --- .../transformer/inference/csrc/dequantize.cu | 110 - .../transformer/inference/csrc/dequantize.hip | 112 - .../csrc/transformer/inference/csrc/gelu.cu | 525 ---- .../csrc/transformer/inference/csrc/gelu.hip | 527 ---- .../transformer/inference/csrc/normalize.cu | 453 ---- .../transformer/inference/csrc/normalize.hip | 455 ---- .../transformer/inference/csrc/pt_binding.cpp | 951 -------- .../inference/csrc/pt_binding_hip.cpp | 952 -------- .../transformer/inference/csrc/softmax.cu | 434 ---- .../transformer/inference/csrc/softmax.hip | 436 ---- .../inference/includes/custom_cuda_layers.h | 124 - .../inference/includes/custom_hip_layers.h | 125 - .../ops/csrc/transformer/normalize_kernels.cu | 2121 ---------------- .../csrc/transformer/normalize_kernels.hip | 2123 ----------------- .../ops/csrc/transformer/softmax_kernels.cu | 595 ----- .../ops/csrc/transformer/softmax_kernels.hip | 597 ----- .../ops/csrc/transformer/transform_kernels.cu | 575 ----- .../csrc/transformer/transform_kernels.hip | 577 ----- .../csrc/transformer_bak/cublas_wrappers.cu | 403 ---- .../csrc/transformer_bak/cublas_wrappers.hip | 404 ---- .../csrc/transformer_bak/dropout_kernels.cu | 868 ------- .../csrc/transformer_bak/dropout_kernels.hip | 870 ------- .../transformer_bak/ds_transformer_cuda.cpp | 1051 -------- .../transformer_bak/ds_transformer_hip.cpp | 1052 -------- .../ops/csrc/transformer_bak/gelu_kernels.cu | 330 --- .../ops/csrc/transformer_bak/gelu_kernels.hip | 332 --- .../csrc/transformer_bak/general_kernels.cu | 411 ---- .../csrc/transformer_bak/general_kernels.hip | 413 ---- .../inference/csrc/apply_rotary_pos_emb.cu | 372 --- .../inference/csrc/apply_rotary_pos_emb.hip | 374 --- .../inference/csrc/dequantize.cu | 110 - .../inference/csrc/dequantize.hip | 112 - .../transformer_bak/inference/csrc/gelu.cu | 525 ---- .../transformer_bak/inference/csrc/gelu.hip | 527 ---- .../inference/csrc/normalize.cu | 451 ---- .../inference/csrc/normalize.hip | 453 ---- .../inference/csrc/pt_binding.cpp | 911 ------- .../inference/csrc/pt_binding_hip.cpp | 912 ------- .../transformer_bak/inference/csrc/softmax.cu | 432 ---- .../inference/csrc/softmax.hip | 434 ---- .../inference/includes/context.h | 177 -- .../inference/includes/context_hip.h | 178 -- .../inference/includes/cublas_wrappers.h | 207 -- .../inference/includes/cublas_wrappers_hip.h | 208 -- .../inference/includes/custom_cuda_layers.h | 124 - .../inference/includes/custom_hip_layers.h | 125 - .../csrc/transformer_bak/normalize_kernels.cu | 2121 ---------------- .../transformer_bak/normalize_kernels.hip | 2123 ----------------- .../csrc/transformer_bak/softmax_kernels.cu | 595 ----- .../csrc/transformer_bak/softmax_kernels.hip | 597 ----- .../ops/csrc/utils/flatten_unflatten.cpp | 25 - deepspeed/ops/lamb/__init__.py | 2 + deepspeed/ops/lamb/fused_lamb.py | 2 +- deepspeed/ops/op_builder | 1 + deepspeed/ops/op_builder/__init__.py | 32 - deepspeed/ops/op_builder/async_io.py | 106 - deepspeed/ops/op_builder/builder.py | 699 ------ deepspeed/ops/op_builder/cpu_adagrad.py | 44 - deepspeed/ops/op_builder/cpu_adam.py | 53 - deepspeed/ops/op_builder/fused_adam.py | 32 - deepspeed/ops/op_builder/fused_lamb.py | 38 - deepspeed/ops/op_builder/quantizer.py | 22 - deepspeed/ops/op_builder/sparse_attn.py | 87 - .../ops/op_builder/stochastic_transformer.py | 20 - .../ops/op_builder/transformer_inference.py | 32 - deepspeed/ops/op_builder/utils.py | 18 - deepspeed/ops/quantizer/__init__.py | 2 + deepspeed/ops/quantizer/quantizer.py | 7 +- deepspeed/ops/random_ltd/__init__.py | 3 + deepspeed/ops/random_ltd/dropping_utils.py | 145 ++ deepspeed/ops/sparse_attention/__init__.py | 4 +- .../bert_sparse_self_attention.py | 0 deepspeed/ops/sparse_attention/matmul.py | 11 +- deepspeed/ops/sparse_attention/softmax.py | 6 +- .../sparse_attention_utils.py | 8 +- .../sparse_attention/sparse_self_attention.py | 2 - .../ops/sparse_attention/sparsity_config.py | 86 +- .../ops/sparse_attention/trsrc/__init__.py | 2 + deepspeed/ops/transformer/__init__.py | 5 +- .../ops/transformer/inference/__init__.py | 5 +- .../ops/transformer/inference/bias_add.py | 28 + deepspeed/ops/transformer/inference/config.py | 115 + .../inference/diffusers_2d_transformer.py | 8 + .../inference/diffusers_attention.py | 248 ++ .../inference/diffusers_transformer_block.py | 126 + .../ops/transformer/inference/ds_attention.py | 277 +++ deepspeed/ops/transformer/inference/ds_mlp.py | 94 + .../transformer/inference/moe_inference.py | 37 +- .../inference/op_binding/__init__.py | 10 + .../transformer/inference/op_binding/base.py | 17 + .../inference/op_binding/gelu_gemm.py | 32 + .../inference/op_binding/linear.py | 31 + .../inference/op_binding/mlp_gemm.py | 41 + .../inference/op_binding/qkv_gemm.py | 44 + .../inference/op_binding/residual_add.py | 38 + .../inference/op_binding/softmax.py | 41 + .../inference/op_binding/softmax_context.py | 48 + .../inference/op_binding/vector_matmul.py | 20 + .../ops/transformer/inference/triton_ops.py | 152 ++ deepspeed/ops/transformer/transformer.py | 10 +- deepspeed/pipe/__init__.py | 2 + deepspeed/profiling/__init__.py | 1 + deepspeed/profiling/config.py | 1 + deepspeed/profiling/constants.py | 1 + .../profiling/flops_profiler/__init__.py | 2 + .../profiling/flops_profiler/profiler.py | 214 +- deepspeed/runtime/__init__.py | 9 + .../activation_checkpointing/__init__.py | 1 + .../activation_checkpointing/checkpointing.py | 66 +- .../activation_checkpointing/config.py | 1 + deepspeed/runtime/bf16_optimizer.py | 348 +-- deepspeed/runtime/checkpoint_engine/README.md | 37 + .../runtime/checkpoint_engine/__init__.py | 1 + .../checkpoint_engine/checkpoint_engine.py | 22 + .../nebula_checkpoint_engine.py | 115 + .../torch_checkpoint_engine.py | 30 + deepspeed/runtime/comm/__init__.py | 1 + .../runtime/comm/coalesced_collectives.py | 50 +- deepspeed/runtime/comm/nccl.py | 9 +- deepspeed/runtime/compression/__init__.py | 1 + deepspeed/runtime/config.py | 273 +-- deepspeed/runtime/config_utils.py | 130 +- deepspeed/runtime/constants.py | 115 +- deepspeed/runtime/data_pipeline/__init__.py | 1 + deepspeed/runtime/data_pipeline/config.py | 180 ++ deepspeed/runtime/data_pipeline/constants.py | 115 + .../data_pipeline/curriculum_scheduler.py | 156 +- .../data_pipeline/data_routing/__init__.py | 1 + .../data_pipeline/data_routing/basic_layer.py | 117 + .../data_pipeline/data_routing/helper.py | 45 + .../data_pipeline/data_routing/scheduler.py | 112 + .../data_pipeline/data_routing/utils.py | 27 + .../data_pipeline/data_sampling/__init__.py | 1 + .../data_sampling/data_analyzer.py | 537 +++++ .../data_sampling/data_sampler.py | 390 +++ .../data_sampling/indexed_dataset.py | 645 +++++ .../data_pipeline/data_sampling/utils.py | 59 + deepspeed/runtime/dataloader.py | 125 +- deepspeed/runtime/eigenvalue.py | 2 + deepspeed/runtime/engine.py | 1224 ++++++---- deepspeed/runtime/fp16/__init__.py | 1 + deepspeed/runtime/fp16/fused_optimizer.py | 63 +- deepspeed/runtime/fp16/loss_scaler.py | 22 +- deepspeed/runtime/fp16/onebit/__init__.py | 5 + deepspeed/runtime/fp16/onebit/adam.py | 23 +- deepspeed/runtime/fp16/onebit/lamb.py | 27 +- deepspeed/runtime/fp16/onebit/zoadam.py | 19 +- deepspeed/runtime/fp16/unfused_optimizer.py | 62 +- deepspeed/runtime/lr_schedules.py | 2 - deepspeed/runtime/pipe/__init__.py | 2 + deepspeed/runtime/pipe/engine.py | 150 +- deepspeed/runtime/pipe/module.py | 114 +- deepspeed/runtime/pipe/p2p.py | 20 +- deepspeed/runtime/pipe/schedule.py | 2 + deepspeed/runtime/pipe/topology.py | 7 +- deepspeed/runtime/progressive_layer_drop.py | 2 + deepspeed/runtime/quantize.py | 220 +- deepspeed/runtime/state_dict_factory.py | 65 +- .../runtime/swap_tensor/async_swapper.py | 5 +- .../runtime/swap_tensor/optimizer_utils.py | 17 +- .../partitioned_optimizer_swapper.py | 12 +- .../swap_tensor/partitioned_param_swapper.py | 34 +- .../pipelined_optimizer_swapper.py | 21 +- deepspeed/runtime/swap_tensor/utils.py | 14 +- deepspeed/runtime/utils.py | 178 +- deepspeed/runtime/weight_quantizer.py | 20 +- deepspeed/runtime/zero/config.py | 460 ++-- .../zero/contiguous_memory_allocator.py | 6 +- deepspeed/runtime/zero/linear.py | 26 +- deepspeed/runtime/zero/offload_config.py | 167 +- deepspeed/runtime/zero/parameter_offload.py | 516 ++++ .../runtime/zero/partition_parameters.py | 341 +-- .../zero/partitioned_param_coordinator.py | 147 +- deepspeed/runtime/zero/stage3.py | 926 +++---- deepspeed/runtime/zero/stage_1_and_2.py | 599 +++-- deepspeed/runtime/zero/test.py | 2 + deepspeed/runtime/zero/tiling.py | 2 + deepspeed/runtime/zero/utils.py | 11 +- deepspeed/utils/__init__.py | 10 +- deepspeed/utils/comms_logging.py | 141 ++ deepspeed/utils/debug.py | 11 +- deepspeed/utils/groups.py | 99 +- deepspeed/utils/init_on_device.py | 81 + deepspeed/utils/logging.py | 13 +- deepspeed/utils/mixed_precision_linkage.py | 58 + deepspeed/utils/nvtx.py | 18 +- deepspeed/utils/tensor_fragment.py | 284 +++ deepspeed/utils/timer.py | 121 +- deepspeed/utils/types.py | 9 + deepspeed/utils/zero_to_fp32.py | 3 +- docker/Dockerfile | 0 docker/Dockerfile.rocm | 2 +- docs/README.md | 10 + docs/_config.yml | 7 + docs/_data/navigation.yml | 57 +- docs/_pages/compression.md | 12 + docs/_pages/config-json.md | 691 +++++- docs/_pages/inference.md | 13 + docs/_pages/training.md | 580 +++++ docs/_posts/2020-09-09-ZeRO-Offload.md | 0 ...0-10-28-progressive-layer-dropping-news.md | 0 docs/_posts/2022-03-21-amd-support.md | 2 +- docs/_posts/2022-07-26-deepspeed-azure.md | 135 ++ docs/_posts/2022-09-10-zero-inference.md | 122 + docs/_posts/2022-10-11-mii.md | 216 ++ docs/_posts/2022-12-12-data-efficiency.md | 144 ++ docs/_tutorials/advanced-install.md | 0 .../automatic-tensor-parallelism.md | 154 ++ docs/_tutorials/autotuning.md | 4 + docs/_tutorials/azure.md | 128 +- docs/_tutorials/bert-finetuning.md | 0 docs/_tutorials/bert-pretraining.md | 4 + docs/_tutorials/cifar-10.md | 3 +- docs/_tutorials/comms-logging.md | 116 + docs/_tutorials/curriculum-learning.md | 4 + docs/_tutorials/data-efficiency.md | 100 + docs/_tutorials/flops-profiler.md | 2 +- docs/_tutorials/gan.md | 0 docs/_tutorials/inference-tutorial.md | 6 +- docs/_tutorials/large-models-w-deepspeed.md | 2 +- docs/_tutorials/megatron.md | 2 +- .../mixture-of-experts-inference.md | 7 +- docs/_tutorials/mixture-of-experts-nlg.md | 8 +- docs/_tutorials/model-compression.md | 441 ++++ docs/_tutorials/monitor.md | 105 + docs/_tutorials/progressive_layer_dropping.md | 0 docs/_tutorials/transformer_kernel.md | 0 docs/_tutorials/zero-offload.md | 8 +- docs/assets/images/175b-trend.png | Bin 0 -> 90652 bytes docs/assets/images/1t-trend.png | Bin 0 -> 39821 bytes docs/assets/images/3pillars.png | Bin 0 -> 92467 bytes docs/assets/images/530b-trend.png | Bin 0 -> 49726 bytes .../images/DeepSpeed_dark_transparent.svg | 0 .../images/DeepSpeed_light_transparent.svg | 0 docs/assets/images/accelerate-dark.png | Bin 0 -> 9208 bytes docs/assets/images/accelerate-light.png | Bin 0 -> 9020 bytes docs/assets/images/accelerate.png | Bin 0 -> 12653 bytes docs/assets/images/adam-convergence.png | Bin docs/assets/images/bert-ib.png | Bin docs/assets/images/bert-scaling.png | Bin docs/assets/images/bert-tcp.png | Bin docs/assets/images/bingbert-mixedbit.png | Bin docs/assets/images/convergence-table.png | Bin .../data_efficiency/data_efficiecy_fig0.png | Bin 0 -> 382954 bytes .../data_efficiency/data_efficiecy_fig1.png | Bin 0 -> 143313 bytes .../data_efficiency/data_efficiecy_fig2.png | Bin 0 -> 78605 bytes .../data_efficiency/data_efficiecy_fig3.png | Bin 0 -> 146105 bytes docs/assets/images/determined.svg | 22 + docs/assets/images/gpu-numbers.png | Bin docs/assets/images/hf-logo.png | Bin 0 -> 164177 bytes docs/assets/images/hf-transformers.png | Bin 0 -> 7258 bytes .../images/inference-gemm-scheduling.png | Bin .../assets/images/inference-kernel-fusion.png | Bin docs/assets/images/inference-latency.png | Bin docs/assets/images/inference-throughput.png | Bin docs/assets/images/large-model-graph.png | Bin 0 -> 142599 bytes docs/assets/images/lightning-dark.png | Bin 0 -> 11515 bytes docs/assets/images/lightning-dark.svg | 10 + docs/assets/images/lightning-light.svg | 10 + docs/assets/images/lightning.png | Bin 0 -> 28443 bytes docs/assets/images/mii/azure-cost.png | Bin 0 -> 43734 bytes docs/assets/images/mii/bert.png | Bin 0 -> 640259 bytes docs/assets/images/mii/bloom.png | Bin 0 -> 271878 bytes docs/assets/images/mii/gpt.png | Bin 0 -> 438479 bytes docs/assets/images/mii/hero-transparent.png | Bin 0 -> 285831 bytes docs/assets/images/mii/hero.png | Bin 0 -> 270485 bytes .../mii/llm-latency-sd-latency-zoom.png | Bin 0 -> 56292 bytes .../images/mii/llm-latency-sd-latency.png | Bin 0 -> 52128 bytes docs/assets/images/mii/mii-arch.png | Bin 0 -> 208527 bytes docs/assets/images/mii/multi-gpu-latency.png | Bin 0 -> 41388 bytes docs/assets/images/mii/opt-bloom.png | Bin 0 -> 405057 bytes docs/assets/images/mii/opt.png | Bin 0 -> 237056 bytes docs/assets/images/mii/roberta.png | Bin 0 -> 641018 bytes docs/assets/images/mii/sd-latency.png | Bin 0 -> 37452 bytes docs/assets/images/mii/tput-llms.png | Bin 0 -> 42967 bytes docs/assets/images/moe-nlg.png | Bin docs/assets/images/mosaicml.svg | 38 + docs/assets/images/old-vs-new-azure.png | Bin 0 -> 507296 bytes docs/assets/images/onebit-adam-overview.png | Bin docs/assets/images/onebit-convergence.png | Bin docs/assets/images/perf-overview.png | Bin 0 -> 158509 bytes docs/assets/images/pipe-schedule.png | Bin docs/assets/images/quantization-8bit.png | Bin docs/assets/images/quantization-mixedbit.png | Bin docs/assets/images/squad-ib.png | Bin docs/assets/images/squad-scaling.png | Bin docs/assets/images/squad-tcp.png | Bin docs/assets/images/tensorboard_monitor.PNG | Bin 0 -> 454551 bytes docs/assets/images/transformers-dark.png | Bin 0 -> 9037 bytes docs/assets/images/transformers-light.png | Bin 0 -> 8850 bytes docs/assets/images/vmss-setup.png | Bin 0 -> 18951 bytes docs/assets/images/wandb_monitor.PNG | Bin 0 -> 450591 bytes docs/assets/images/xtc-1.png | Bin 0 -> 869335 bytes docs/assets/images/xtc-2.png | Bin 0 -> 903225 bytes docs/assets/images/xtc-3.png | Bin 0 -> 521280 bytes docs/assets/images/xtc-4.png | Bin 0 -> 533166 bytes .../images/zero_inference_full_offload.png | Bin 0 -> 41898 bytes .../images/zero_inference_model_scale.png | Bin 0 -> 81092 bytes docs/assets/images/zero_inference_models.png | Bin 0 -> 31742 bytes .../images/zero_inference_multi_gpu.png | Bin 0 -> 45206 bytes .../assets/images/zero_inference_prefetch.png | Bin 0 -> 45225 bytes .../zero_inference_token_count_batch_size.png | Bin 0 -> 27655 bytes ...o_inference_token_count_cpu_throughput.png | Bin 0 -> 31308 bytes ..._inference_token_count_nvme_throughput.png | Bin 0 -> 32629 bytes docs/code-docs/build-api-docs.sh | 0 docs/code-docs/source/conf.py | 25 +- docs/code-docs/source/index.rst | 11 +- docs/code-docs/source/inference-init.rst | 33 +- docs/code-docs/source/memory.rst | 42 +- docs/code-docs/source/monitor.rst | 35 + docs/code-docs/source/optimizers.rst | 0 docs/code-docs/source/schedulers.rst | 0 docs/code-docs/source/zero3.rst | 56 + docs/index.md | 298 +-- env.sh | 3 + examples/README.md | 9 + inference/test_checkpoint_sharding.py | 96 + inference/test_inference.py | 560 +++++ inference/test_inference_config.py | 41 + inference/test_model_profiling.py | 90 + install.sh | 0 op_builder/__init__.py | 72 +- op_builder/all_ops.py | 30 + op_builder/async_io.py | 7 +- op_builder/builder.py | 287 ++- op_builder/cpu_adagrad.py | 24 +- op_builder/cpu_adam.py | 27 +- op_builder/fused_adam.py | 9 +- op_builder/fused_lamb.py | 9 +- op_builder/quantizer.py | 9 +- .../random_ltd.py | 22 +- op_builder/sparse_attn.py | 8 +- op_builder/spatial_inference.py | 45 + op_builder/transformer_inference.py | 47 +- release/bump_patch_version.py | 2 + requirements/requirements-autotuning.txt | 0 requirements/requirements-dev.txt | 10 +- requirements/requirements-inf.txt | 5 + requirements/requirements-readthedocs.txt | 4 + requirements/requirements-sd.txt | 2 + requirements/requirements-sparse_attn.txt | 2 +- requirements/requirements.txt | 5 +- run.sh | 4 + scripts/check-license.py | 38 + scripts/check-torchdist.py | 40 + setup.py | 75 +- tests/accelerator/ds_config.json | 19 + tests/accelerator/test_ds_init.py | 47 + tests/benchmarks/flatten_bench.py | 35 +- tests/benchmarks/unflatten_bench.py | 33 +- tests/conftest.py | 62 + tests/lightning/test_simple.py | 6 +- .../BingBertSquad_run_func_test.py | 4 +- .../BingBertSquad_test_common.py | 1 - tests/model/BingBertSquad/__init__.py | 0 .../deepspeed_bsz24_fp16_config.json | 0 ...bsz24_fp16_eigenvalue_quantize_config.json | 0 .../deepspeed_bsz24_fp16_zero2_config.json | 0 .../deepspeed_bsz24_fp32_config.json | 0 .../model/BingBertSquad/run_BingBertSquad.sh | 0 .../BingBertSquad/run_BingBertSquad_sanity.sh | 0 tests/model/BingBertSquad/run_tests.sh | 0 tests/model/BingBertSquad/test_e2e_squad.py | 4 +- .../ds_config_func_bs4_zero1.json | 0 .../ds_config_func_bs4_zero2.json | 0 .../ds_config_func_bs4_zero2_offload.json | 0 .../ds_config_func_bs8_no_zero.json | 0 .../ds_config_func_bs8_zero0_gas3.json | 0 .../ds_config_func_bs8_zero1.json | 0 .../ds_config_func_bs8_zero2.json | 0 .../ds_config_func_bs8_zero2_gas3.json | 0 .../ds_config_func_bs8_zero2_offload.json | 0 .../ds_config_func_scheduler.json | 0 .../Megatron_GPT2/ds_config_perf_bs16.json | 0 .../Megatron_GPT2/ds_config_perf_bs32.json | 0 .../Megatron_GPT2/ds_config_perf_bs8.json | 0 tests/model/Megatron_GPT2/ds_gpt2_test.sh | 0 .../Megatron_GPT2/run_checkpoint_test.py | 3 +- tests/model/Megatron_GPT2/run_func_test.py | 4 +- .../model/Megatron_GPT2/run_perf_baseline.py | 5 +- tests/model/Megatron_GPT2/run_perf_test.py | 8 +- tests/model/Megatron_GPT2/test_common.py | 1 - tests/model/run_sanity_check.py | 2 - tests/onebit/test_mpi_backend.py | 20 +- tests/onebit/test_mpi_perf.py | 13 +- tests/onebit/test_nccl_backend.py | 20 +- tests/onebit/test_nccl_perf.py | 14 +- tests/perf/adagrad_test.py | 35 + tests/perf/adam_test.py | 51 +- tests/perf/adam_test1.py | 10 +- tests/pytest.ini | 8 + tests/small_model_debugging/stage3_test.py | 2 + tests/small_model_debugging/test.py | 24 +- tests/small_model_debugging/test_model.py | 9 +- tests/unit/__init__.py | 1 + tests/unit/alexnet_model.py | 164 ++ tests/unit/autotuning/test_autotuning.py | 86 + tests/unit/checkpoint/common.py | 220 ++ .../unit/checkpoint/test_latest_checkpoint.py | 53 + tests/unit/checkpoint/test_lr_scheduler.py | 122 + tests/unit/checkpoint/test_moe_checkpoint.py | 109 + tests/unit/checkpoint/test_other_optimizer.py | 132 + tests/unit/checkpoint/test_pipeline.py | 109 + .../checkpoint/test_reshape_checkpoint.py | 57 + tests/unit/checkpoint/test_sparse.py | 96 + tests/unit/checkpoint/test_tag_validation.py | 63 + tests/unit/checkpoint/test_zero_optimizer.py | 460 ++++ tests/unit/comm/test_dist.py | 200 ++ tests/unit/common.py | 397 ++- tests/unit/compression/test_compression.py | 268 +++ tests/unit/elasticity/test_elastic.py | 292 +++ tests/unit/launcher/test_ds_arguments.py | 102 + tests/unit/launcher/test_multinode_runner.py | 52 + tests/unit/launcher/test_run.py | 177 ++ tests/unit/megatron_model.py | 13 +- .../test_configurable_parallel_mp.py | 188 ++ .../test_configurable_parallel_pp.py | 352 +++ tests/unit/modeling.py | 48 +- tests/unit/modelingpreln.py | 45 +- tests/unit/moe/test_moe.py | 84 + tests/unit/moe/test_moe_tp.py | 98 + tests/unit/monitor/test_monitor.py | 96 + tests/unit/multi_output_model.py | 5 +- .../accelerators/test_accelerator_backward.py | 345 +++ .../accelerators/test_accelerator_forward.py | 341 +++ tests/unit/ops/adagrad/test_cpu_adagrad.py | 148 ++ tests/unit/ops/adam/test_adamw.py | 72 + tests/unit/ops/adam/test_cpu_adam.py | 129 + tests/unit/ops/aio/test_aio.py | 380 +++ tests/unit/ops/quantizer/test_dequantize.py | 96 + .../ops/quantizer/test_fake_quantization.py | 64 + tests/unit/ops/quantizer/test_quantize.py | 162 ++ .../sparse_attention/test_sparse_attention.py | 271 +++ tests/unit/ops/spatial/test_nhwc_bias_add.py | 136 ++ .../transformer/inference/test_bias_add.py | 57 + .../transformer/inference/test_bias_geglu.py | 58 + .../transformer/inference/test_bias_gelu.py | 66 + .../transformer/inference/test_bias_relu.py | 61 + .../transformer/inference/test_layer_norm.py | 202 ++ .../inference/test_moe_res_matmult.py | 69 + .../inference/test_residual_add.py | 151 ++ tests/unit/pipe/test_pipe_module.py | 101 + .../flops_profiler/test_flops_profiler.py | 128 + tests/unit/run_test.sh | 18 + .../test_activation_checkpointing.py | 267 +++ .../comm/test_coalesced_collectives.py | 70 + .../half_precision/onebit/test_onebit.py | 1308 ++++++++++ .../unit/runtime/half_precision/test_bf16.py | 357 +++ .../half_precision/test_dynamic_loss_scale.py | 279 +++ .../unit/runtime/half_precision/test_fp16.py | 829 +++++++ tests/unit/runtime/pipe/test_pipe.py | 119 + tests/unit/runtime/pipe/test_pipe_schedule.py | 146 ++ tests/unit/runtime/pipe/test_topology.py | 225 ++ .../test_averaging_sparse_gradients.py | 81 + tests/unit/runtime/sparse_tensor/test_csr.py | 52 + .../sparse_tensor/test_sparse_grads.py | 74 + tests/unit/runtime/test_autocast.py | 76 + tests/unit/runtime/test_data.py | 59 + tests/unit/runtime/test_data_efficiency.py | 228 ++ tests/unit/runtime/test_ds_config_dict.py | 298 +++ tests/unit/runtime/test_ds_config_model.py | 88 + tests/unit/runtime/test_ds_initialize.py | 280 +++ tests/unit/runtime/test_lr_schedulers.py | 455 ++++ tests/unit/runtime/test_multi_output_model.py | 136 ++ tests/unit/runtime/test_pld.py | 108 + tests/unit/runtime/test_runtime_utils.py | 78 + tests/unit/runtime/utils/test_partition.py | 197 ++ .../zero/test_ignore_unused_parameters.py | 64 + tests/unit/runtime/zero/test_zero.py | 1386 +++++++++++ tests/unit/runtime/zero/test_zero_config.py | 74 + tests/unit/runtime/zero/test_zero_context.py | 269 +++ .../zero/test_zero_context_ancestry.py | 111 + .../runtime/zero/test_zero_context_return.py | 184 ++ .../runtime/zero/test_zero_tensor_fragment.py | 156 ++ tests/unit/runtime/zero/test_zero_tiled.py | 173 ++ tests/unit/runtime/zero/utils.py | 13 + tests/unit/simple_model.py | 26 +- ...t.tfevents.1679370169.9dad78d721ca.29247.0 | Bin 0 -> 40 bytes ...t.tfevents.1679370366.9dad78d721ca.39331.0 | Bin 0 -> 40 bytes tests/unit/util.py | 49 +- tests/unit/utils/test_get_optim_files.py | 19 + tests/unit/utils/test_groups.py | 55 + tests/unit/utils/test_init_on_device.py | 26 + version.txt | 2 +- 825 files changed, 53863 insertions(+), 57884 deletions(-) mode change 100644 => 100755 README.md create mode 100644 accelerator/__init__.py create mode 100644 accelerator/abstract_accelerator.py create mode 100644 accelerator/cuda_accelerator.py create mode 100644 accelerator/real_accelerator.py create mode 100644 benchmarks/__init__.py create mode 100644 benchmarks/communication/README.md create mode 100644 benchmarks/communication/__init__.py create mode 100644 benchmarks/communication/all_gather.py create mode 100644 benchmarks/communication/all_reduce.py create mode 100644 benchmarks/communication/all_to_all.py create mode 100644 benchmarks/communication/broadcast.py create mode 100644 benchmarks/communication/constants.py create mode 100644 benchmarks/communication/pt2pt.py create mode 100644 benchmarks/communication/run_all.py create mode 100644 benchmarks/communication/utils.py create mode 100644 benchmarks/inference/bert-bench.py create mode 100644 benchmarks/inference/collect_results.py create mode 100644 benchmarks/inference/gpt-bench.py create mode 100644 benchmarks/inference/requirements.txt create mode 100644 benchmarks/inference/run_model.sh create mode 100644 benchmarks/inference/sweep.sh mode change 100644 => 120000 bin/deepspeed mode change 100644 => 120000 bin/deepspeed.pt mode change 100644 => 100755 bin/ds create mode 100755 bin/ds_bench mode change 100644 => 100755 bin/ds_elastic mode change 100644 => 100755 bin/ds_report mode change 100644 => 100755 bin/ds_ssh create mode 120000 bin/dsr create mode 100644 build_win.bat create mode 100644 csrc/aio/py_lib/deepspeed_pin_tensor.cpp create mode 100644 csrc/aio/py_lib/deepspeed_pin_tensor.h mode change 100644 => 100755 csrc/aio/py_lib/py_ds_aio.cpp mode change 100644 => 100755 csrc/aio/py_test/ds_aio_basic.py mode change 100644 => 100755 csrc/aio/py_test/ds_aio_handle.py mode change 100644 => 100755 csrc/aio/py_test/parse_aio_stats.py mode change 100644 => 100755 csrc/aio/py_test/run_read_sweep.sh mode change 100644 => 100755 csrc/aio/py_test/run_write_sweep.sh mode change 100644 => 100755 csrc/aio/py_test/test_ds_aio.py mode change 100644 => 100755 csrc/aio/py_test/test_ds_aio_utils.py create mode 100644 csrc/includes/conversion_utils.h create mode 100644 csrc/includes/dequantization_utils.h create mode 100644 csrc/includes/ds_kernel_utils.h mode change 100644 => 100755 csrc/includes/ds_transformer_cuda.h create mode 100644 csrc/includes/memory_access_utils.h create mode 100644 csrc/includes/quantization.h create mode 100644 csrc/includes/quantization_utils.h create mode 100644 csrc/includes/reduction_utils.h mode change 100644 => 100755 csrc/includes/softmax.h create mode 100644 csrc/quantization/dequantize.cu rename deepspeed/ops/csrc/quantization/quantizer.cu => csrc/quantization/fake_quantizer.cu (81%) create mode 100644 csrc/quantization/quantize.cu create mode 100644 csrc/random_ltd/gather_scatter.cu create mode 100644 csrc/random_ltd/pt_binding.cpp create mode 100644 csrc/random_ltd/slice_attn_masks.cu create mode 100644 csrc/random_ltd/token_sort.cu create mode 100644 csrc/spatial/csrc/opt_bias_add.cu create mode 100644 csrc/spatial/csrc/pt_binding.cpp create mode 100644 csrc/spatial/includes/spatial_cuda_layers.h mode change 100644 => 100755 csrc/transformer/dropout_kernels.cu create mode 100644 csrc/transformer/inference/csrc/layer_norm.cu create mode 100644 csrc/transformer/inference/csrc/layer_norm.hip create mode 100644 csrc/transformer/inference/csrc/relu.cu create mode 100644 csrc/transformer/inference/csrc/relu.hip rename deepspeed/ops/csrc/transformer_bak/transform_kernels.cu => csrc/transformer/inference/csrc/transform.cu (55%) rename deepspeed/ops/csrc/transformer_bak/transform_kernels.hip => csrc/transformer/inference/csrc/transform.hip (55%) rename deepspeed/ops/csrc/transformer/inference/includes/context.h => csrc/transformer/inference/includes/inference_context.h (53%) rename deepspeed/ops/csrc/transformer/inference/includes/context_hip.h => csrc/transformer/inference/includes/inference_context_hip.h (54%) rename deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers.h => csrc/transformer/inference/includes/inference_cublas_wrappers.h (99%) rename deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers_hip.h => csrc/transformer/inference/includes/inference_cublas_wrappers_hip.h (99%) create mode 100644 csrc/transformer/inference/includes/inference_cuda_layers.h create mode 100644 csrc/transformer/inference/includes/inference_hip_layers.h mode change 100644 => 100755 csrc/transformer/transform_kernels.cu mode change 100644 => 100755 deepspeed/__init__.py create mode 120000 deepspeed/accelerator mode change 100644 => 100755 deepspeed/autotuning/README.md mode change 100644 => 100755 deepspeed/autotuning/autotuner.py mode change 100644 => 100755 deepspeed/autotuning/scheduler.py mode change 100644 => 100755 deepspeed/autotuning/tuner/__init__.py mode change 100644 => 100755 deepspeed/autotuning/tuner/base_tuner.py mode change 100644 => 100755 deepspeed/autotuning/tuner/cost_model.py mode change 100644 => 100755 deepspeed/autotuning/tuner/index_based_tuner.py mode change 100644 => 100755 deepspeed/autotuning/tuner/model_based_tuner.py mode change 100644 => 100755 deepspeed/autotuning/tuner/utils.py create mode 100644 deepspeed/checkpoint/deepspeed_checkpoint.py create mode 100644 deepspeed/checkpoint/reshape_3d_utils.py create mode 100644 deepspeed/checkpoint/reshape_meg_2d.py create mode 100644 deepspeed/checkpoint/reshape_utils.py create mode 100644 deepspeed/checkpoint/universal_checkpoint.py create mode 100644 deepspeed/checkpoint/utils.py create mode 100644 deepspeed/checkpoint/zero_checkpoint.py create mode 100644 deepspeed/comm/__init__.py create mode 100644 deepspeed/comm/backend.py create mode 100644 deepspeed/comm/comm.py create mode 100644 deepspeed/comm/config.py create mode 100644 deepspeed/comm/constants.py create mode 100644 deepspeed/comm/torch.py create mode 100644 deepspeed/comm/utils.py create mode 100644 deepspeed/compression/__init__.py create mode 100644 deepspeed/compression/basic_layer.py create mode 100644 deepspeed/compression/compress.py create mode 100644 deepspeed/compression/config.py create mode 100644 deepspeed/compression/constants.py create mode 100644 deepspeed/compression/helper.py create mode 100644 deepspeed/compression/scheduler.py create mode 100644 deepspeed/compression/utils.py create mode 100644 deepspeed/elasticity/elastic_agent.py create mode 100644 deepspeed/elasticity/utils.py create mode 100644 deepspeed/inference/config.py mode change 100644 => 100755 deepspeed/inference/engine.py mode change 100644 => 100755 deepspeed/launcher/launch.py mode change 100644 => 100755 deepspeed/launcher/runner.py create mode 100644 deepspeed/model_implementations/__init__.py create mode 100644 deepspeed/model_implementations/diffusers/__init__.py create mode 100644 deepspeed/model_implementations/diffusers/unet.py create mode 100644 deepspeed/model_implementations/diffusers/vae.py create mode 100644 deepspeed/model_implementations/features/__init__.py create mode 100644 deepspeed/model_implementations/features/cuda_graph.py create mode 100644 deepspeed/model_implementations/transformers/__init__.py create mode 100644 deepspeed/model_implementations/transformers/clip_encoder.py create mode 100644 deepspeed/model_implementations/transformers/ds_base.py create mode 100644 deepspeed/model_implementations/transformers/ds_bert.py create mode 100644 deepspeed/model_implementations/transformers/ds_bloom.py create mode 100644 deepspeed/model_implementations/transformers/ds_gpt.py create mode 100644 deepspeed/model_implementations/transformers/ds_megatron_gpt.py create mode 100644 deepspeed/model_implementations/transformers/ds_opt.py create mode 100644 deepspeed/model_implementations/transformers/ds_transformer.py mode change 100644 => 100755 deepspeed/module_inject/__init__.py create mode 100644 deepspeed/module_inject/auto_tp.py create mode 100644 deepspeed/module_inject/containers/__init__.py create mode 100644 deepspeed/module_inject/containers/base.py create mode 100644 deepspeed/module_inject/containers/base_moe.py create mode 100644 deepspeed/module_inject/containers/bert.py create mode 100644 deepspeed/module_inject/containers/bloom.py create mode 100644 deepspeed/module_inject/containers/clip.py create mode 100644 deepspeed/module_inject/containers/distil_bert.py create mode 100644 deepspeed/module_inject/containers/features/__init__.py create mode 100644 deepspeed/module_inject/containers/features/megatron.py create mode 100644 deepspeed/module_inject/containers/features/meta_tensor.py create mode 100644 deepspeed/module_inject/containers/gpt2.py create mode 100644 deepspeed/module_inject/containers/gptj.py create mode 100644 deepspeed/module_inject/containers/gptneo.py create mode 100644 deepspeed/module_inject/containers/gptneox.py create mode 100644 deepspeed/module_inject/containers/megatron_gpt.py create mode 100644 deepspeed/module_inject/containers/megatron_gpt_moe.py create mode 100644 deepspeed/module_inject/containers/opt.py create mode 100644 deepspeed/module_inject/containers/unet.py create mode 100644 deepspeed/module_inject/containers/vae.py mode change 100644 => 100755 deepspeed/module_inject/inject.py create mode 100644 deepspeed/module_inject/layers.py create mode 100644 deepspeed/module_inject/load_checkpoint.py mode change 100644 => 100755 deepspeed/module_inject/module_quantize.py create mode 100644 deepspeed/module_inject/policy.py mode change 100644 => 100755 deepspeed/module_inject/replace_policy.py create mode 100644 deepspeed/module_inject/utils.py create mode 100644 deepspeed/moe/mappings.py create mode 100644 deepspeed/monitor/__init__.py create mode 100644 deepspeed/monitor/config.py create mode 100644 deepspeed/monitor/csv_monitor.py create mode 100644 deepspeed/monitor/monitor.py create mode 100644 deepspeed/monitor/tensorboard.py create mode 100644 deepspeed/monitor/utils.py create mode 100644 deepspeed/monitor/wandb.py create mode 100644 deepspeed/nebula/__init__.py create mode 100644 deepspeed/nebula/config.py create mode 100644 deepspeed/nebula/constants.py mode change 100644 => 100755 deepspeed/ops/__init__.py mode change 100644 => 100755 deepspeed/ops/adagrad/cpu_adagrad.py mode change 100644 => 100755 deepspeed/ops/adam/__init__.py mode change 100644 => 100755 deepspeed/ops/adam/cpu_adam.py mode change 100644 => 100755 deepspeed/ops/aio/__init__.py create mode 120000 deepspeed/ops/csrc delete mode 100644 deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp delete mode 100644 deepspeed/ops/csrc/adagrad/cpu_adagrad_hip.cpp delete mode 100644 deepspeed/ops/csrc/adam/cpu_adam.cpp delete mode 100644 deepspeed/ops/csrc/adam/cpu_adam_hip.cpp delete mode 100644 deepspeed/ops/csrc/adam/fused_adam_frontend.cpp delete mode 100644 deepspeed/ops/csrc/adam/multi_tensor_adam.cu delete mode 100644 deepspeed/ops/csrc/adam/multi_tensor_adam.hip delete mode 100644 deepspeed/ops/csrc/adam/multi_tensor_apply.cuh delete mode 100644 deepspeed/ops/csrc/adam/multi_tensor_apply_hip.cuh delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp delete mode 100644 deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp delete mode 100644 deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h delete mode 100644 deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp delete mode 100644 deepspeed/ops/csrc/aio/py_test/aio_bench_generate_param.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/aio_bench_perf_sweep.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/ds_aio_basic.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/ds_aio_handle.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/parse_aio_stats.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/perf_sweep_utils.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/run_read_sweep.sh delete mode 100644 deepspeed/ops/csrc/aio/py_test/run_write_sweep.sh delete mode 100644 deepspeed/ops/csrc/aio/py_test/single_process_config.json delete mode 100644 deepspeed/ops/csrc/aio/py_test/test_ds_aio.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/test_ds_aio_utils.py delete mode 100644 deepspeed/ops/csrc/aio/py_test/validate_async_io.py delete mode 100644 deepspeed/ops/csrc/common/custom_cuda_kernel.cu delete mode 100644 deepspeed/ops/csrc/common/custom_hip_kernel.hip delete mode 100644 deepspeed/ops/csrc/includes/StopWatch.h delete mode 100644 deepspeed/ops/csrc/includes/Timer.h delete mode 100644 deepspeed/ops/csrc/includes/Timer_hip.h delete mode 100644 deepspeed/ops/csrc/includes/compat.h delete mode 100644 deepspeed/ops/csrc/includes/context.h delete mode 100644 deepspeed/ops/csrc/includes/context_hip.h delete mode 100644 deepspeed/ops/csrc/includes/cpu_adagrad.h delete mode 100644 deepspeed/ops/csrc/includes/cpu_adagrad_hip.h delete mode 100644 deepspeed/ops/csrc/includes/cpu_adam.h delete mode 100644 deepspeed/ops/csrc/includes/cpu_adam_hip.h delete mode 100644 deepspeed/ops/csrc/includes/cublas_wrappers.h delete mode 100644 deepspeed/ops/csrc/includes/cublas_wrappers_hip.h delete mode 100644 deepspeed/ops/csrc/includes/custom_cuda_layers.h delete mode 100644 deepspeed/ops/csrc/includes/custom_hip_layers.h delete mode 100644 deepspeed/ops/csrc/includes/dropout.h delete mode 100644 deepspeed/ops/csrc/includes/dropout_hip.h delete mode 100644 deepspeed/ops/csrc/includes/ds_transformer_cuda.h delete mode 100644 deepspeed/ops/csrc/includes/ds_transformer_hip.h delete mode 100644 deepspeed/ops/csrc/includes/feed_forward.h delete mode 100644 deepspeed/ops/csrc/includes/feed_forward_hip.h delete mode 100644 deepspeed/ops/csrc/includes/gelu.h delete mode 100644 deepspeed/ops/csrc/includes/gelu_hip.h delete mode 100644 deepspeed/ops/csrc/includes/gemm_test.h delete mode 100644 deepspeed/ops/csrc/includes/gemm_test_hip.h delete mode 100644 deepspeed/ops/csrc/includes/general_kernels.h delete mode 100644 deepspeed/ops/csrc/includes/general_kernels_hip.h delete mode 100644 deepspeed/ops/csrc/includes/normalize_layer.h delete mode 100644 deepspeed/ops/csrc/includes/normalize_layer_hip.h delete mode 100644 deepspeed/ops/csrc/includes/quantizer.h delete mode 100644 deepspeed/ops/csrc/includes/quantizer_hip.h delete mode 100644 deepspeed/ops/csrc/includes/simd.h delete mode 100644 deepspeed/ops/csrc/includes/softmax.h delete mode 100644 deepspeed/ops/csrc/includes/softmax_hip.h delete mode 100644 deepspeed/ops/csrc/includes/strided_batch_gemm.h delete mode 100644 deepspeed/ops/csrc/includes/strided_batch_gemm_hip.h delete mode 100644 deepspeed/ops/csrc/includes/type_shim.h delete mode 100644 deepspeed/ops/csrc/includes/type_shim_hip.h delete mode 100644 deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp delete mode 100644 deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu delete mode 100644 deepspeed/ops/csrc/lamb/fused_lamb_hip_kernel.hip delete mode 100644 deepspeed/ops/csrc/quantization/pt_binding.cpp delete mode 100644 deepspeed/ops/csrc/quantization/pt_binding_hip.cpp delete mode 100644 deepspeed/ops/csrc/quantization/quantizer.hip delete mode 100644 deepspeed/ops/csrc/sparse_attention/utils.cpp delete mode 100644 deepspeed/ops/csrc/transformer/cublas_wrappers.cu delete mode 100644 deepspeed/ops/csrc/transformer/cublas_wrappers.hip delete mode 100644 deepspeed/ops/csrc/transformer/dropout_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/dropout_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp delete mode 100644 deepspeed/ops/csrc/transformer/ds_transformer_hip.cpp delete mode 100644 deepspeed/ops/csrc/transformer/gelu_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/gelu_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer/general_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/general_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/dequantize.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/gelu.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/normalize.cu delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/normalize.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/pt_binding_hip.cpp delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu delete mode 100644 deepspeed/ops/csrc/transformer/inference/csrc/softmax.hip delete mode 100644 deepspeed/ops/csrc/transformer/inference/includes/custom_cuda_layers.h delete mode 100644 deepspeed/ops/csrc/transformer/inference/includes/custom_hip_layers.h delete mode 100644 deepspeed/ops/csrc/transformer/normalize_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/normalize_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer/softmax_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/softmax_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer/transform_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer/transform_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/cublas_wrappers.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/cublas_wrappers.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/dropout_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/dropout_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/ds_transformer_cuda.cpp delete mode 100644 deepspeed/ops/csrc/transformer_bak/ds_transformer_hip.cpp delete mode 100644 deepspeed/ops/csrc/transformer_bak/gelu_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/gelu_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/general_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/general_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding.cpp delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding_hip.cpp delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/context.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/context_hip.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers_hip.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/custom_cuda_layers.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/inference/includes/custom_hip_layers.h delete mode 100644 deepspeed/ops/csrc/transformer_bak/normalize_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/normalize_kernels.hip delete mode 100644 deepspeed/ops/csrc/transformer_bak/softmax_kernels.cu delete mode 100644 deepspeed/ops/csrc/transformer_bak/softmax_kernels.hip delete mode 100644 deepspeed/ops/csrc/utils/flatten_unflatten.cpp create mode 120000 deepspeed/ops/op_builder delete mode 100644 deepspeed/ops/op_builder/__init__.py delete mode 100644 deepspeed/ops/op_builder/async_io.py delete mode 100644 deepspeed/ops/op_builder/builder.py delete mode 100644 deepspeed/ops/op_builder/cpu_adagrad.py delete mode 100644 deepspeed/ops/op_builder/cpu_adam.py delete mode 100644 deepspeed/ops/op_builder/fused_adam.py delete mode 100644 deepspeed/ops/op_builder/fused_lamb.py delete mode 100644 deepspeed/ops/op_builder/quantizer.py delete mode 100644 deepspeed/ops/op_builder/sparse_attn.py delete mode 100644 deepspeed/ops/op_builder/stochastic_transformer.py delete mode 100644 deepspeed/ops/op_builder/transformer_inference.py delete mode 100644 deepspeed/ops/op_builder/utils.py mode change 100644 => 100755 deepspeed/ops/quantizer/quantizer.py create mode 100644 deepspeed/ops/random_ltd/__init__.py create mode 100644 deepspeed/ops/random_ltd/dropping_utils.py mode change 100644 => 100755 deepspeed/ops/sparse_attention/bert_sparse_self_attention.py mode change 100644 => 100755 deepspeed/ops/sparse_attention/matmul.py mode change 100644 => 100755 deepspeed/ops/sparse_attention/softmax.py mode change 100644 => 100755 deepspeed/ops/transformer/__init__.py create mode 100644 deepspeed/ops/transformer/inference/bias_add.py create mode 100644 deepspeed/ops/transformer/inference/config.py create mode 100644 deepspeed/ops/transformer/inference/diffusers_2d_transformer.py create mode 100644 deepspeed/ops/transformer/inference/diffusers_attention.py create mode 100644 deepspeed/ops/transformer/inference/diffusers_transformer_block.py create mode 100644 deepspeed/ops/transformer/inference/ds_attention.py create mode 100644 deepspeed/ops/transformer/inference/ds_mlp.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/__init__.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/base.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/linear.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/residual_add.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/softmax.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/softmax_context.py create mode 100644 deepspeed/ops/transformer/inference/op_binding/vector_matmul.py create mode 100644 deepspeed/ops/transformer/inference/triton_ops.py mode change 100644 => 100755 deepspeed/ops/transformer/transformer.py mode change 100644 => 100755 deepspeed/runtime/activation_checkpointing/config.py create mode 100644 deepspeed/runtime/checkpoint_engine/README.md create mode 100644 deepspeed/runtime/checkpoint_engine/__init__.py create mode 100644 deepspeed/runtime/checkpoint_engine/checkpoint_engine.py create mode 100644 deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py create mode 100644 deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py mode change 100644 => 100755 deepspeed/runtime/config.py mode change 100644 => 100755 deepspeed/runtime/config_utils.py mode change 100644 => 100755 deepspeed/runtime/constants.py create mode 100644 deepspeed/runtime/data_pipeline/config.py create mode 100644 deepspeed/runtime/data_pipeline/constants.py create mode 100644 deepspeed/runtime/data_pipeline/data_routing/__init__.py create mode 100644 deepspeed/runtime/data_pipeline/data_routing/basic_layer.py create mode 100644 deepspeed/runtime/data_pipeline/data_routing/helper.py create mode 100644 deepspeed/runtime/data_pipeline/data_routing/scheduler.py create mode 100644 deepspeed/runtime/data_pipeline/data_routing/utils.py create mode 100644 deepspeed/runtime/data_pipeline/data_sampling/__init__.py create mode 100644 deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py create mode 100644 deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py create mode 100644 deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py create mode 100644 deepspeed/runtime/data_pipeline/data_sampling/utils.py mode change 100644 => 100755 deepspeed/runtime/eigenvalue.py mode change 100644 => 100755 deepspeed/runtime/fp16/fused_optimizer.py mode change 100644 => 100755 deepspeed/runtime/fp16/loss_scaler.py mode change 100644 => 100755 deepspeed/runtime/fp16/unfused_optimizer.py mode change 100644 => 100755 deepspeed/runtime/lr_schedules.py mode change 100644 => 100755 deepspeed/runtime/progressive_layer_drop.py mode change 100644 => 100755 deepspeed/runtime/quantize.py mode change 100644 => 100755 deepspeed/runtime/state_dict_factory.py mode change 100644 => 100755 deepspeed/runtime/utils.py create mode 100644 deepspeed/runtime/zero/parameter_offload.py mode change 100644 => 100755 deepspeed/runtime/zero/partition_parameters.py mode change 100644 => 100755 deepspeed/runtime/zero/stage_1_and_2.py mode change 100644 => 100755 deepspeed/runtime/zero/utils.py create mode 100644 deepspeed/utils/comms_logging.py create mode 100644 deepspeed/utils/init_on_device.py create mode 100644 deepspeed/utils/mixed_precision_linkage.py create mode 100644 deepspeed/utils/tensor_fragment.py mode change 100644 => 100755 deepspeed/utils/timer.py create mode 100644 deepspeed/utils/types.py mode change 100644 => 100755 deepspeed/utils/zero_to_fp32.py mode change 100644 => 100755 docker/Dockerfile mode change 100644 => 100755 docs/_data/navigation.yml create mode 100644 docs/_pages/compression.md mode change 100644 => 100755 docs/_pages/config-json.md create mode 100755 docs/_pages/inference.md create mode 100644 docs/_pages/training.md mode change 100644 => 100755 docs/_posts/2020-09-09-ZeRO-Offload.md mode change 100644 => 100755 docs/_posts/2020-10-28-progressive-layer-dropping-news.md create mode 100644 docs/_posts/2022-07-26-deepspeed-azure.md create mode 100644 docs/_posts/2022-09-10-zero-inference.md create mode 100644 docs/_posts/2022-10-11-mii.md create mode 100644 docs/_posts/2022-12-12-data-efficiency.md mode change 100644 => 100755 docs/_tutorials/advanced-install.md create mode 100644 docs/_tutorials/automatic-tensor-parallelism.md mode change 100644 => 100755 docs/_tutorials/bert-finetuning.md mode change 100644 => 100755 docs/_tutorials/bert-pretraining.md create mode 100644 docs/_tutorials/comms-logging.md create mode 100644 docs/_tutorials/data-efficiency.md mode change 100644 => 100755 docs/_tutorials/gan.md mode change 100644 => 100755 docs/_tutorials/mixture-of-experts-nlg.md create mode 100644 docs/_tutorials/model-compression.md create mode 100644 docs/_tutorials/monitor.md mode change 100644 => 100755 docs/_tutorials/progressive_layer_dropping.md mode change 100644 => 100755 docs/_tutorials/transformer_kernel.md create mode 100755 docs/assets/images/175b-trend.png create mode 100755 docs/assets/images/1t-trend.png create mode 100755 docs/assets/images/3pillars.png create mode 100755 docs/assets/images/530b-trend.png mode change 100644 => 100755 docs/assets/images/DeepSpeed_dark_transparent.svg mode change 100644 => 100755 docs/assets/images/DeepSpeed_light_transparent.svg create mode 100755 docs/assets/images/accelerate-dark.png create mode 100755 docs/assets/images/accelerate-light.png create mode 100755 docs/assets/images/accelerate.png mode change 100644 => 100755 docs/assets/images/adam-convergence.png mode change 100644 => 100755 docs/assets/images/bert-ib.png mode change 100644 => 100755 docs/assets/images/bert-scaling.png mode change 100644 => 100755 docs/assets/images/bert-tcp.png mode change 100644 => 100755 docs/assets/images/bingbert-mixedbit.png mode change 100644 => 100755 docs/assets/images/convergence-table.png create mode 100644 docs/assets/images/data_efficiency/data_efficiecy_fig0.png create mode 100644 docs/assets/images/data_efficiency/data_efficiecy_fig1.png create mode 100644 docs/assets/images/data_efficiency/data_efficiecy_fig2.png create mode 100644 docs/assets/images/data_efficiency/data_efficiecy_fig3.png create mode 100644 docs/assets/images/determined.svg mode change 100644 => 100755 docs/assets/images/gpu-numbers.png create mode 100755 docs/assets/images/hf-logo.png create mode 100755 docs/assets/images/hf-transformers.png mode change 100644 => 100755 docs/assets/images/inference-gemm-scheduling.png mode change 100644 => 100755 docs/assets/images/inference-kernel-fusion.png mode change 100644 => 100755 docs/assets/images/inference-latency.png mode change 100644 => 100755 docs/assets/images/inference-throughput.png create mode 100755 docs/assets/images/large-model-graph.png create mode 100755 docs/assets/images/lightning-dark.png create mode 100755 docs/assets/images/lightning-dark.svg create mode 100755 docs/assets/images/lightning-light.svg create mode 100755 docs/assets/images/lightning.png create mode 100755 docs/assets/images/mii/azure-cost.png create mode 100644 docs/assets/images/mii/bert.png create mode 100644 docs/assets/images/mii/bloom.png create mode 100644 docs/assets/images/mii/gpt.png create mode 100755 docs/assets/images/mii/hero-transparent.png create mode 100755 docs/assets/images/mii/hero.png create mode 100755 docs/assets/images/mii/llm-latency-sd-latency-zoom.png create mode 100755 docs/assets/images/mii/llm-latency-sd-latency.png create mode 100755 docs/assets/images/mii/mii-arch.png create mode 100755 docs/assets/images/mii/multi-gpu-latency.png create mode 100755 docs/assets/images/mii/opt-bloom.png create mode 100644 docs/assets/images/mii/opt.png create mode 100644 docs/assets/images/mii/roberta.png create mode 100755 docs/assets/images/mii/sd-latency.png create mode 100755 docs/assets/images/mii/tput-llms.png mode change 100644 => 100755 docs/assets/images/moe-nlg.png create mode 100755 docs/assets/images/mosaicml.svg create mode 100755 docs/assets/images/old-vs-new-azure.png mode change 100644 => 100755 docs/assets/images/onebit-adam-overview.png mode change 100644 => 100755 docs/assets/images/onebit-convergence.png create mode 100755 docs/assets/images/perf-overview.png mode change 100644 => 100755 docs/assets/images/pipe-schedule.png mode change 100644 => 100755 docs/assets/images/quantization-8bit.png mode change 100644 => 100755 docs/assets/images/quantization-mixedbit.png mode change 100644 => 100755 docs/assets/images/squad-ib.png mode change 100644 => 100755 docs/assets/images/squad-scaling.png mode change 100644 => 100755 docs/assets/images/squad-tcp.png create mode 100644 docs/assets/images/tensorboard_monitor.PNG create mode 100755 docs/assets/images/transformers-dark.png create mode 100755 docs/assets/images/transformers-light.png create mode 100755 docs/assets/images/vmss-setup.png create mode 100644 docs/assets/images/wandb_monitor.PNG create mode 100644 docs/assets/images/xtc-1.png create mode 100644 docs/assets/images/xtc-2.png create mode 100644 docs/assets/images/xtc-3.png create mode 100644 docs/assets/images/xtc-4.png create mode 100644 docs/assets/images/zero_inference_full_offload.png create mode 100644 docs/assets/images/zero_inference_model_scale.png create mode 100644 docs/assets/images/zero_inference_models.png create mode 100644 docs/assets/images/zero_inference_multi_gpu.png create mode 100644 docs/assets/images/zero_inference_prefetch.png create mode 100644 docs/assets/images/zero_inference_token_count_batch_size.png create mode 100644 docs/assets/images/zero_inference_token_count_cpu_throughput.png create mode 100644 docs/assets/images/zero_inference_token_count_nvme_throughput.png mode change 100644 => 100755 docs/code-docs/build-api-docs.sh create mode 100644 docs/code-docs/source/monitor.rst mode change 100644 => 100755 docs/code-docs/source/optimizers.rst mode change 100644 => 100755 docs/code-docs/source/schedulers.rst mode change 100644 => 100755 docs/index.md create mode 100644 env.sh create mode 100644 examples/README.md create mode 100644 inference/test_checkpoint_sharding.py create mode 100644 inference/test_inference.py create mode 100644 inference/test_inference_config.py create mode 100644 inference/test_model_profiling.py mode change 100644 => 100755 install.sh create mode 100644 op_builder/all_ops.py rename deepspeed/ops/op_builder/transformer.py => op_builder/random_ltd.py (53%) create mode 100644 op_builder/spatial_inference.py mode change 100644 => 100755 op_builder/transformer_inference.py mode change 100644 => 100755 requirements/requirements-autotuning.txt create mode 100644 requirements/requirements-inf.txt create mode 100644 requirements/requirements-sd.txt mode change 100644 => 100755 requirements/requirements-sparse_attn.txt mode change 100644 => 100755 requirements/requirements.txt create mode 100644 run.sh create mode 100755 scripts/check-license.py create mode 100755 scripts/check-torchdist.py mode change 100644 => 100755 setup.py create mode 100644 tests/accelerator/ds_config.json create mode 100644 tests/accelerator/test_ds_init.py mode change 100644 => 100755 tests/benchmarks/flatten_bench.py mode change 100644 => 100755 tests/benchmarks/unflatten_bench.py mode change 100644 => 100755 tests/model/BingBertSquad/BingBertSquad_run_func_test.py mode change 100644 => 100755 tests/model/BingBertSquad/BingBertSquad_test_common.py mode change 100644 => 100755 tests/model/BingBertSquad/__init__.py mode change 100644 => 100755 tests/model/BingBertSquad/deepspeed_bsz24_fp16_config.json mode change 100644 => 100755 tests/model/BingBertSquad/deepspeed_bsz24_fp16_eigenvalue_quantize_config.json mode change 100644 => 100755 tests/model/BingBertSquad/deepspeed_bsz24_fp16_zero2_config.json mode change 100644 => 100755 tests/model/BingBertSquad/deepspeed_bsz24_fp32_config.json mode change 100644 => 100755 tests/model/BingBertSquad/run_BingBertSquad.sh mode change 100644 => 100755 tests/model/BingBertSquad/run_BingBertSquad_sanity.sh mode change 100644 => 100755 tests/model/BingBertSquad/run_tests.sh mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs4_zero1.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs4_zero2.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs4_zero2_offload.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_no_zero.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_zero0_gas3.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_zero1.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_zero2.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_zero2_gas3.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_bs8_zero2_offload.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_func_scheduler.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_perf_bs16.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_perf_bs32.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_config_perf_bs8.json mode change 100644 => 100755 tests/model/Megatron_GPT2/ds_gpt2_test.sh mode change 100644 => 100755 tests/model/Megatron_GPT2/run_checkpoint_test.py mode change 100644 => 100755 tests/model/Megatron_GPT2/run_func_test.py mode change 100644 => 100755 tests/model/Megatron_GPT2/run_perf_baseline.py mode change 100644 => 100755 tests/model/Megatron_GPT2/run_perf_test.py mode change 100644 => 100755 tests/model/Megatron_GPT2/test_common.py mode change 100644 => 100755 tests/model/run_sanity_check.py create mode 100755 tests/perf/adagrad_test.py mode change 100644 => 100755 tests/perf/adam_test.py mode change 100644 => 100755 tests/perf/adam_test1.py create mode 100644 tests/pytest.ini mode change 100644 => 100755 tests/small_model_debugging/test_model.py create mode 100644 tests/unit/alexnet_model.py create mode 100644 tests/unit/autotuning/test_autotuning.py create mode 100644 tests/unit/checkpoint/common.py create mode 100644 tests/unit/checkpoint/test_latest_checkpoint.py create mode 100644 tests/unit/checkpoint/test_lr_scheduler.py create mode 100644 tests/unit/checkpoint/test_moe_checkpoint.py create mode 100644 tests/unit/checkpoint/test_other_optimizer.py create mode 100644 tests/unit/checkpoint/test_pipeline.py create mode 100644 tests/unit/checkpoint/test_reshape_checkpoint.py create mode 100644 tests/unit/checkpoint/test_sparse.py create mode 100644 tests/unit/checkpoint/test_tag_validation.py create mode 100644 tests/unit/checkpoint/test_zero_optimizer.py create mode 100644 tests/unit/comm/test_dist.py create mode 100644 tests/unit/compression/test_compression.py create mode 100644 tests/unit/elasticity/test_elastic.py create mode 100644 tests/unit/launcher/test_ds_arguments.py create mode 100644 tests/unit/launcher/test_multinode_runner.py create mode 100644 tests/unit/launcher/test_run.py create mode 100644 tests/unit/model_parallelism/test_configurable_parallel_mp.py create mode 100644 tests/unit/model_parallelism/test_configurable_parallel_pp.py create mode 100644 tests/unit/moe/test_moe.py create mode 100644 tests/unit/moe/test_moe_tp.py create mode 100644 tests/unit/monitor/test_monitor.py create mode 100644 tests/unit/ops/accelerators/test_accelerator_backward.py create mode 100644 tests/unit/ops/accelerators/test_accelerator_forward.py create mode 100644 tests/unit/ops/adagrad/test_cpu_adagrad.py create mode 100644 tests/unit/ops/adam/test_adamw.py create mode 100644 tests/unit/ops/adam/test_cpu_adam.py create mode 100644 tests/unit/ops/aio/test_aio.py create mode 100644 tests/unit/ops/quantizer/test_dequantize.py create mode 100644 tests/unit/ops/quantizer/test_fake_quantization.py create mode 100644 tests/unit/ops/quantizer/test_quantize.py create mode 100644 tests/unit/ops/sparse_attention/test_sparse_attention.py create mode 100644 tests/unit/ops/spatial/test_nhwc_bias_add.py create mode 100644 tests/unit/ops/transformer/inference/test_bias_add.py create mode 100644 tests/unit/ops/transformer/inference/test_bias_geglu.py create mode 100644 tests/unit/ops/transformer/inference/test_bias_gelu.py create mode 100644 tests/unit/ops/transformer/inference/test_bias_relu.py create mode 100644 tests/unit/ops/transformer/inference/test_layer_norm.py create mode 100644 tests/unit/ops/transformer/inference/test_moe_res_matmult.py create mode 100644 tests/unit/ops/transformer/inference/test_residual_add.py create mode 100644 tests/unit/pipe/test_pipe_module.py create mode 100644 tests/unit/profiling/flops_profiler/test_flops_profiler.py create mode 100644 tests/unit/run_test.sh create mode 100644 tests/unit/runtime/activation_checkpointing/test_activation_checkpointing.py create mode 100644 tests/unit/runtime/comm/test_coalesced_collectives.py create mode 100644 tests/unit/runtime/half_precision/onebit/test_onebit.py create mode 100644 tests/unit/runtime/half_precision/test_bf16.py create mode 100644 tests/unit/runtime/half_precision/test_dynamic_loss_scale.py create mode 100644 tests/unit/runtime/half_precision/test_fp16.py create mode 100644 tests/unit/runtime/pipe/test_pipe.py create mode 100644 tests/unit/runtime/pipe/test_pipe_schedule.py create mode 100644 tests/unit/runtime/pipe/test_topology.py create mode 100644 tests/unit/runtime/sparse_tensor/test_averaging_sparse_gradients.py create mode 100644 tests/unit/runtime/sparse_tensor/test_csr.py create mode 100644 tests/unit/runtime/sparse_tensor/test_sparse_grads.py create mode 100644 tests/unit/runtime/test_autocast.py create mode 100644 tests/unit/runtime/test_data.py create mode 100644 tests/unit/runtime/test_data_efficiency.py create mode 100644 tests/unit/runtime/test_ds_config_dict.py create mode 100644 tests/unit/runtime/test_ds_config_model.py create mode 100644 tests/unit/runtime/test_ds_initialize.py create mode 100644 tests/unit/runtime/test_lr_schedulers.py create mode 100644 tests/unit/runtime/test_multi_output_model.py create mode 100644 tests/unit/runtime/test_pld.py create mode 100644 tests/unit/runtime/test_runtime_utils.py create mode 100644 tests/unit/runtime/utils/test_partition.py create mode 100644 tests/unit/runtime/zero/test_ignore_unused_parameters.py create mode 100644 tests/unit/runtime/zero/test_zero.py create mode 100644 tests/unit/runtime/zero/test_zero_config.py create mode 100644 tests/unit/runtime/zero/test_zero_context.py create mode 100644 tests/unit/runtime/zero/test_zero_context_ancestry.py create mode 100644 tests/unit/runtime/zero/test_zero_context_return.py create mode 100644 tests/unit/runtime/zero/test_zero_tensor_fragment.py create mode 100644 tests/unit/runtime/zero/test_zero_tiled.py create mode 100644 tests/unit/runtime/zero/utils.py create mode 100644 tests/unit/test_output/ds_logs/test/events.out.tfevents.1679370169.9dad78d721ca.29247.0 create mode 100644 tests/unit/test_output/ds_logs/test/events.out.tfevents.1679370366.9dad78d721ca.39331.0 create mode 100644 tests/unit/utils/test_get_optim_files.py create mode 100644 tests/unit/utils/test_groups.py create mode 100644 tests/unit/utils/test_init_on_device.py diff --git a/CODEOWNERS b/CODEOWNERS index ec7993c..5fc2040 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,56 @@ -* @jeffra @samyam @tjruwase @ShadenSmith @conglongli @awan-10 @arashashari @cli99 @eltonzheng @minjiaz @RezaYazdaniAminabadi @niumanar +# This file is used to subscribe for notifications for PRs +# related to specific file paths, does not necessarily mean +# approval is required from these people before merging. +# +# Learn more about CODEOWNERS syntax here: +# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners + + +# top-level repo folders +/.github/ @jeffra @mrwyattii +/azure/ @jeffra @awan-10 +/benchmarks/ @jeffra @awan-10 @mrwyattii @molly-smith +/bin/ @jeffra +/csrc/ @RezaYazdaniAminabadi @awan-10 @jeffra @cmikeh2 @arashb +/deepspeed/ @jeffra +/docker/ @jeffra @awan-10 +/docs/ @jeffra @mrwyattii +/examples/ @jeffra @awan-10 @mrwyattii +/op_builder/ @jeffra @RezaYazdaniAminabadi @cmikeh2 +/release/ @jeffra @mrwyattii +/requirements/ @jeffra @mrwyattii +/scripts/ @jeffra @awan-10 +/tests/ @jeffra @mrwyattii @tjruwase + +# deepspeed +/deepspeed/autotuning/ @cli99 +/deepspeed/checkpoint/ @tjruwase +/deepspeed/comm/ @awan-10 +/deepspeed/compression/ @yaozhewei @minjiaz @xiaoxiawu-microsoft @conglongli +/deepspeed/elasticity/ @jeffra @awan-10 +/deepspeed/launcher/ @jeffra @awan-10 +/deepspeed/module_inject/ @RezaYazdaniAminabadi @jeffra @mrwyattii @awan-10 @cmikeh2 @arashb +/deepspeed/moe/ @awan-10 +/deepspeed/monitor/ @awan-10 @jeffra +/deepspeed/nebula/ @tjruwase @jeffra +/deepspeed/ops/ @RezaYazdaniAminabadi @jeffra @mrwyattii @awan-10 @cmikeh2 @arashb +/deepspeed/pipe/ @ShadenSmith @duli2012 +/deepspeed/profiling/ @cli99 +/deepspeed/utils/ @jeffra @tjruwase @awan-10 + +# inference +/deepspeed/inference/ @RezaYazdaniAminabadi @jeffra @mrwyattii @awan-10 @cmikeh2 @arashb +/deepspeed/model_implementations/ @RezaYazdaniAminabadi @jeffra @mrwyattii @awan-10 @cmikeh2 @arashb + +# training +/deepspeed/runtime/ @jeffra @tjruwase +/deepspeed/runtime/activation_checkpointing/ @jeffra @tjruwase +/deepspeed/runtime/checkpoint_engine/ @tjruwase @jeffra +/deepspeed/runtime/comm/ @awan-10 +/deepspeed/runtime/compression/ @awan-10 @conglongli +/deepspeed/runtime/data_pipeline/ @conglongli +/deepspeed/runtime/fp16/ @jeffra @tjruwase +/deepspeed/runtime/fp16/onebit/ @conglongli @awan-10 +/deepspeed/runtime/pipe/ @ShadenSmith @duli2012 +/deepspeed/runtime/swap_tensor/ @tjruwase @mrwyattii +/deepspeed/runtime/zero/ @jeffra @tjruwase @samyam @mrwyattii diff --git a/MANIFEST.in b/MANIFEST.in index a918b92..2fec750 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,3 +2,6 @@ include *.txt README.md recursive-include requirements *.txt recursive-include deepspeed *.cpp *.h *.cu *.hip *.tr *.cuh *.cc *.json recursive-include csrc *.cpp *.h *.cu *.tr *.cuh *.cc +recursive-include op_builder *.py +recursive-include benchmarks *.py +recursive-include accelerator *.py diff --git a/MANIFEST_win.in b/MANIFEST_win.in index ddfe73e..f0426fb 100644 --- a/MANIFEST_win.in +++ b/MANIFEST_win.in @@ -6,3 +6,4 @@ recursive-include deepspeed *.tr recursive-exclude deepspeed/ops/csrc *.cpp *.h *.cu *.cuh *.cc prune csrc prune op_builder +prune accelerator diff --git a/README.md b/README.md old mode 100644 new mode 100755 index aafbbe5..bfa03a6 --- a/README.md +++ b/README.md @@ -1,75 +1,114 @@ -[![Build Status](https://github.com/microsoft/deepspeed/workflows/Build/badge.svg)](https://github.com/microsoft/DeepSpeed/actions) +[![License MIT](https://badgen.net/badge/license/MIT/blue)](https://github.com/Microsoft/DeepSpeed/blob/master/LICENSE) [![PyPI version](https://badge.fury.io/py/deepspeed.svg)](https://pypi.org/project/deepspeed/) -[![Documentation Status](https://readthedocs.org/projects/deepspeed/badge/?version=latest)](https://deepspeed.readthedocs.io/en/latest/?badge=latest) -[![License MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/Microsoft/DeepSpeed/blob/master/LICENSE) +[![Downloads](https://pepy.tech/badge/deepspeed)](https://pepy.tech/project/deepspeed) +[![Build](https://badgen.net/badge/build/check-status/blue)](#build-pipeline-status) +
- ## Latest News -* [2022/03/21] [Supporting efficient large model training on AMD Instinct GPUs with DeepSpeed](https://cloudblogs.microsoft.com/opensource/2022/03/21/supporting-efficient-large-model-training-on-amd-instinct-gpus-with-deepspeed/) -* [2022/03/07] [Maximizing Communication Efficiency for Large-scale Training via 0/1 Adam](https://www.deepspeed.ai/tutorials/zero-one-adam/) -* [2022/01/19] [DeepSpeed: Advancing MoE inference and training to power next-generation AI scale](https://www.microsoft.com/en-us/research/blog/deepspeed-advancing-moe-inference-and-training-to-power-next-generation-ai-scale/) - * [Mixture of Experts (MoE) for NLG tutorial](https://www.deepspeed.ai/tutorials/mixture-of-experts-nlg/). - * [Mixture of Experts (MoE) Inference tutorial](https://www.deepspeed.ai/tutorials/moe-inference-tutorial). -* [2021/11/15] [Autotuning: Automatically discover the optimal DeepSpeed configuration that delivers good training speed](https://www.deepspeed.ai/news/2021/11/15/autotuning.html) -* [2021/10/11] [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, the World’s Largest and Most Powerful Generative Language Model](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/) - * Read more on how to [train large models with DeepSpeed](https://www.deepspeed.ai/tutorials/large-models-w-deepspeed/) - -### DeepSpeed is hiring, [come join us!](https://careers.microsoft.com/us/en/search-results?keywords=http:%2F%2Fdeepspeed.ai) + DeepSpeed trained the world's most powerful language models ([MT-530B](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/), [BLOOM](https://huggingface.co/blog/bloom-megatron-deepspeed)); [learn how](https://www.deepspeed.ai/tutorials/large-models-w-deepspeed/). + +* [2023/02] [Automatic Tensor Parallelism: Enables tensor parallelism by default without providing an injection policy](https://www.deepspeed.ai/tutorials/automatic-tensor-parallelism/) +* [2022/12] [DeepSpeed Data Efficiency: A composable library that makes better use of data, increases training efficiency, and improves model quality](https://www.deepspeed.ai/2022/12/11/data-efficiency.html) +* [2022/11] [Stable Diffusion Image Generation under 1 second w. DeepSpeed MII](https://github.com/microsoft/DeepSpeed-MII/tree/main/examples/benchmark/txt2img) +* [2022/10] [DeepSpeed-MII: instant speedup on 24,000+ open-source DL models with up to 40x cheaper inference](https://www.deepspeed.ai/2022/10/10/mii.html) +* [2022/09] [ZeRO-Inference: Democratizing massive model inference](https://www.deepspeed.ai/2022/09/09/zero-inference.html) +* [2022/07] [Azure and DeepSpeed empower easy-to-use and high-performance model training](https://azure.microsoft.com/en-us/blog/azure-empowers-easytouse-highperformance-and-hyperscale-model-training-using-deepspeed/) + +--- + +# Extreme Speed and Scale for DL Training and Inference + +[DeepSpeed](https://www.deepspeed.ai/) is an easy-to-use deep learning optimization software suite that enables unprecedented scale and speed for Deep Learning Training and Inference. With DeepSpeed you can: + +* Train/Inference dense or sparse models with billions or trillions of parameters +* Achieve excellent system throughput and efficiently scale to thousands of GPUs +* Train/Inference on resource constrained GPU systems +* Achieve unprecedented low latency and high throughput for inference +* Achieve extreme compression for an unparalleled inference latency and model size reduction with low costs + --- -[DeepSpeed](https://www.deepspeed.ai/) is a deep learning optimization -library that makes distributed training easy, efficient, and effective. +# DeepSpeed's three innovation pillars + + + + +## DeepSpeed-Training + +DeepSpeed offers a confluence of system innovations, that has made large scale DL training effective, and efficient, greatly improved ease of use, and redefined the DL training landscape in terms of scale that is possible. These innovations such as ZeRO, 3D-Parallelism, DeepSpeed-MoE, ZeRO-Infinity, etc. fall under the training pillar. Learn more: [DeepSpeed-Training](https://www.deepspeed.ai/training/) + +## DeepSpeed-Inference + +DeepSpeed brings together innovations in parallelism technology such as tensor, pipeline, expert and ZeRO-parallelism, and combines them with high performance custom inference kernels, communication optimizations and heterogeneous memory technologies to enable inference at an unprecedented scale, while achieving unparalleled latency, throughput and cost reduction. This systematic composition of system technologies for inference falls under the inference pillar. Learn more: [DeepSpeed-Inference](https://www.deepspeed.ai/inference) + + +## DeepSpeed-Compression + +To further increase the inference efficiency, DeepSpeed offers easy-to-use and flexible-to-compose compression techniques for researchers and practitioners to compress their models while delivering faster speed, smaller model size, and significantly reduced compression cost. Moreover, SoTA innovations on compression like ZeroQuant and XTC are included under the compression pillar. Learn more: [DeepSpeed-Compression](https://www.deepspeed.ai/compression) + +--- -

10x Larger Models

-

10x Faster Training

-

Minimal Code Change

+# DeepSpeed Software Suite -DeepSpeed delivers extreme-scale model training for everyone, from data scientists training on massive supercomputers to those training on low-end clusters or even on a single GPU: -* Extreme scale: Using current generation of GPU clusters with hundreds of devices, 3D parallelism of DeepSpeed can efficiently train deep learning models with trillions of parameters. -* Extremely memory efficient: With just a single GPU, ZeRO-Offload of DeepSpeed can train models with over 10B parameters, 10x bigger than the state of arts, democratizing multi-billion-parameter model training such that many deep learning scientists can explore bigger and better models. -* Extremely long sequence length: Sparse attention of DeepSpeed powers an order-of-magnitude longer input sequence and obtains up to 6x faster execution comparing with dense transformers. -* Extremely communication efficient: 3D parallelism improves communication efficiency allows users to train multi-billion-parameter models 2–7x faster on clusters with limited network bandwidth. 1-bit Adam, 0/1 Adam and 1-bit LAMB reduce communication volume by up to 26x while achieving similar convergence efficiency to Adam/LAMB, allowing for scaling to different types of GPU clusters and networks. +## DeepSpeed Library -Early adopters of DeepSpeed have already produced -a language model (LM) with over 17B parameters called -[Turing-NLG](https://www.microsoft.com/en-us/research/blog/turing-nlg-a-17-billion-parameter-language-model-by-microsoft), -establishing a new SOTA in the LM category. + The [DeepSpeed](https://github.com/microsoft/deepspeed) library (this repository) implements and packages the innovations and technologies in DeepSpeed Training, Inference and Compression Pillars into a single easy-to-use, open-sourced repository. It allows for easy composition of multitude of features within a single training, inference or compression pipeline. The DeepSpeed Library is heavily adopted by the DL community, and has been used to enable some of the most powerful models (see [DeepSpeed Adoption](#deepspeed-adoption)). + +## Model Implementations for Inference (MII) + + [Model Implementations for Inference (MII)](https://github.com/microsoft/deepspeed-mii) is an open-sourced repository for making low-latency and high-throughput inference accessible to all data scientists by alleviating the need to apply complex system optimization techniques themselves. Out-of-box, MII offers support for thousands of widely used DL models, optimized using DeepSpeed-Inference, that can be deployed with a few lines of code, while achieving significant latency reduction compared to their vanilla open-sourced versions. + +## DeepSpeed on Azure + + DeepSpeed users are diverse and have access to different environments. We recommend to try DeepSpeed on Azure as it is the simplest and easiest method. The recommended method to try DeepSpeed on Azure is through AzureML [recipes](https://github.com/Azure/azureml-examples/tree/main/v1/python-sdk/workflows/train/deepspeed). The job submission and data preparation scripts have been made available [here](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azureml). For more details on how to use DeepSpeed on Azure, please follow the [Azure tutorial](https://www.deepspeed.ai/tutorials/azure/). + +--- + +# DeepSpeed Adoption DeepSpeed is an important part of Microsoft’s new [AI at Scale](https://www.microsoft.com/en-us/research/project/ai-at-scale/) initiative to enable next-generation AI capabilities at scale, where you can find more information [here](https://innovation.microsoft.com/en-us/exploring-ai-at-scale). -**_For further documentation, tutorials, and technical deep-dives please see [deepspeed.ai](https://www.deepspeed.ai/)!_** - -# Table of Contents -| Section | Description | -| --------------------------------------- | ------------------------------------------- | -| [Why DeepSpeed?](#why-deepspeed) | DeepSpeed overview | -| [Install](#installation) | Installation details | -| [Features](#features) | Feature list and overview | -| [Further Reading](#further-reading) | Documentation, tutorials, etc. | -| [Contributing](#contributing) | Instructions for contributing | -| [Publications](#publications) | Publications related to DeepSpeed | -| [Videos](#videos) | Videos related to DeepSpeed | - -# Why DeepSpeed? -Training advanced deep learning models is challenging. Beyond model design, -model scientists also need to set up the state-of-the-art training techniques -such as distributed training, mixed precision, gradient accumulation, and -checkpointing. Yet still, scientists may not achieve the desired system -performance and convergence rate. Large model sizes are even more challenging: -a large model easily runs out of memory with pure data parallelism and it is -difficult to use model parallelism. DeepSpeed addresses these challenges to -accelerate model development *and* training. +DeepSpeed has been used to train many different large-scale models, below is a list of several examples that we are aware of (if you'd like to include your model please submit a PR): + + * [Megatron-Turing NLG (530B)](https://www.microsoft.com/en-us/research/blog/using-deepspeed-and-megatron-to-train-megatron-turing-nlg-530b-the-worlds-largest-and-most-powerful-generative-language-model/) + * [Jurassic-1 (178B)](https://uploads-ssl.webflow.com/60fd4503684b466578c0d307/61138924626a6981ee09caf6_jurassic_tech_paper.pdf) + * [BLOOM (176B)](https://huggingface.co/blog/bloom-megatron-deepspeed) + * [GLM (130B)](https://github.com/THUDM/GLM-130B) + * [YaLM (100B)](https://github.com/yandex/YaLM-100B) + * [GPT-NeoX (20B)](https://github.com/EleutherAI/gpt-neox) + * [AlexaTM (20B)](https://www.amazon.science/blog/20b-parameter-alexa-model-sets-new-marks-in-few-shot-learning) + * [Turing NLG (17B)](https://www.microsoft.com/en-us/research/blog/turing-nlg-a-17-billion-parameter-language-model-by-microsoft/) + * [METRO-LM (5.4B)](https://arxiv.org/pdf/2204.06644.pdf) + +DeepSpeed has been integrated with several different popular open-source DL frameworks such as: + +| | Documentation | +| ---------------------------------------------------------------------------------------------- | -------------------------------------------- | + | [Transformers with DeepSpeed](https://huggingface.co/docs/transformers/main/main_classes/deepspeed) | +| | [Accelerate with DeepSpeed](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) | +| | [Lightning with DeepSpeed](https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.strategies.DeepSpeedStrategy.html) | +| | [MosaicML with DeepSpeed](https://docs.mosaicml.com/en/latest/trainer/using_the_trainer.html?highlight=deepspeed#deepspeed-integration) | +| | [Determined with DeepSpeed](https://docs.determined.ai/latest/training/apis-howto/deepspeed/overview.html) | + +--- + +# Build Pipeline Status + +| Description | Status | +| ----------- | ------ | +| NVIDIA | [![nv-torch12-p40](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch12-p40.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch12-p40.yml) [![nv-torch18-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch18-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch18-v100.yml) [![nv-torch-latest-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-latest-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-latest-v100.yml) [![nv-inference](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-inference.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-inference.yml) [![nv-nightly](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-nightly.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-nightly.yml) | +| AMD | [![amd](https://github.com/microsoft/DeepSpeed/actions/workflows/amd.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/amd.yml) | +| PyTorch Nightly | [![nv-torch-nightly-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-nightly-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-torch-nightly-v100.yml) | +| Integrations | [![nv-transformers-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-transformers-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-transformers-v100.yml) [![nv-lightning-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-lightning-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-lightning-v100.yml) [![nv-accelerate-v100](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-accelerate-v100.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/nv-accelerate-v100.yml) | +| Misc | [![Formatting](https://github.com/microsoft/DeepSpeed/actions/workflows/formatting.yml/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/formatting.yml) [![pages-build-deployment](https://github.com/microsoft/DeepSpeed/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/microsoft/DeepSpeed/actions/workflows/pages/pages-build-deployment) [![Documentation Status](https://readthedocs.org/projects/deepspeed/badge/?version=latest)](https://deepspeed.readthedocs.io/en/latest/?badge=latest)| # Installation @@ -81,8 +120,16 @@ just-in-time (JIT) using [torch's JIT C++ extension loader that relies on ninja](https://pytorch.org/docs/stable/cpp_extension.html) to build and dynamically link them at runtime. -**Note:** [PyTorch](https://pytorch.org/) must be installed _before_ installing -DeepSpeed. +## Requirements +* [PyTorch](https://pytorch.org/) must be installed _before_ installing DeepSpeed. +* For full feature support we recommend a version of PyTorch that is >= 1.8 and ideally the latest PyTorch stable release. +* A CUDA or ROCm compiler such as [nvcc](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/#introduction) or [hipcc](https://github.com/ROCm-Developer-Tools/HIPCC) used to compile C++/CUDA/HIP extensions. +* Specific GPUs we develop and test against are listed below, this doesn't mean your GPU will not work if it doesn't fall into this category it's just DeepSpeed is most well tested on the following: + * NVIDIA: Pascal, Volta, Ampere, and Hopper architectures + * AMD: MI100 and MI200 + +## PyPI +We regularly push releases to [PyPI](https://pypi.org/project/deepspeed/) and encourage users to install from there in most cases. ```bash pip install deepspeed @@ -99,83 +146,29 @@ If you would like to pre-install any of the DeepSpeed extensions/ops (instead of JIT compiling) or install pre-compiled ops via PyPI please see our [advanced installation instructions](https://www.deepspeed.ai/tutorials/advanced-install/). -On Windows you can build wheel with following steps, currently only inference mode is supported. +## Windows +Windows support is partially supported with DeepSpeed. On Windows you can build wheel with following steps, currently only inference mode is supported. 1. Install pytorch, such as pytorch 1.8 + cuda 11.1 2. Install visual cpp build tools, such as VS2019 C++ x64/x86 build tools 3. Launch cmd console with Administrator privilege for creating required symlink folders 4. Run `python setup.py bdist_wheel` to build wheel in `dist` folder # Features -Below we provide a brief feature list, see our detailed [feature -overview](https://www.deepspeed.ai/features/) for descriptions and usage. - -* [Distributed Training with Mixed Precision](https://www.deepspeed.ai/features/#distributed-training-with-mixed-precision) - * 16-bit mixed precision - * Single-GPU/Multi-GPU/Multi-Node -* [Model Parallelism](https://www.deepspeed.ai/features/#model-parallelism) - * Support for Custom Model Parallelism - * Integration with Megatron-LM -* [Pipeline Parallelism](https://www.deepspeed.ai/tutorials/pipeline/) - * 3D Parallelism -* [The Zero Redundancy Optimizer (ZeRO)](https://www.deepspeed.ai/tutorials/zero/) - * Optimizer State and Gradient Partitioning - * Activation Partitioning - * Constant Buffer Optimization - * Contiguous Memory Optimization -* [ZeRO-Offload](https://www.deepspeed.ai/tutorials/zero-offload/) - * Leverage both CPU/GPU memory for model training - * Support 10B model training on a single GPU -* [Ultra-fast dense transformer kernels](https://www.deepspeed.ai/2020/05/18/bert-record.html) -* [Sparse attention](https://www.deepspeed.ai/2020/09/08/sparse-attention-news.html) - * Memory- and compute-efficient sparse kernels - * Support 10x longer sequences than dense - * Flexible support to different sparse structures -* [1-bit Adam](https://www.deepspeed.ai/2020/09/08/onebit-adam-blog-post.html), [0/1 Adam](https://www.deepspeed.ai/tutorials/zero-one-adam/) and [1-bit LAMB](https://www.deepspeed.ai/tutorials/onebit-lamb/) - * Custom communication collective - * Up to 26x communication volume saving -* [Additional Memory and Bandwidth Optimizations](https://www.deepspeed.ai/features/#additional-memory-and-bandwidth-optimizations) - * Smart Gradient Accumulation - * Communication/Computation Overlap -* [Training Features](https://www.deepspeed.ai/features/#training-features) - * Simplified training API - * Gradient Clipping - * Automatic loss scaling with mixed precision -* [Training Optimizers](https://www.deepspeed.ai/features/#training-optimizers) - * Fused Adam optimizer and arbitrary `torch.optim.Optimizer` - * Memory bandwidth optimized FP16 Optimizer - * Large Batch Training with LAMB Optimizer - * Memory efficient Training with ZeRO Optimizer - * CPU-Adam -* [Training Agnostic Checkpointing](https://www.deepspeed.ai/features/#training-agnostic-checkpointing) -* [Advanced Parameter Search](https://www.deepspeed.ai/features/#advanced-parameter-search) - * Learning Rate Range Test - * 1Cycle Learning Rate Schedule -* [Simplified Data Loader](https://www.deepspeed.ai/features/#simplified-data-loader) -* [Curriculum Learning](https://www.deepspeed.ai/tutorials/curriculum-learning/) - * A curriculum learning-based data pipeline that presents easier or simpler examples earlier during training - * Stable and 3.3x faster GPT-2 pre-training with 8x/4x larger batch size/learning rate while maintaining token-wise convergence speed - * Complementary to many other DeepSpeed features -* [Performance Analysis and Debugging](https://www.deepspeed.ai/features/#performance-analysis-and-debugging) -* [Mixture of Experts (MoE)](https://www.deepspeed.ai/tutorials/mixture-of-experts/) +Please checkout [DeepSpeed-Training](https://www.deepspeed.ai/training), [DeepSpeed-Inference](https://www.deepspeed.ai/inference) and [DeepSpeed-Compression](https://www.deepspeed.ai/compression) pages for full set of features offered along each of these three pillars. # Further Reading -All DeepSpeed documentation can be found on our website: [deepspeed.ai](https://www.deepspeed.ai/) +All DeepSpeed documentation, tutorials, and blogs can be found on our website: [deepspeed.ai](https://www.deepspeed.ai/) -| Article | Description | +| | Description | | ---------------------------------------------------------------------------------------------- | -------------------------------------------- | -| [DeepSpeed Features](https://www.deepspeed.ai/features/) | DeepSpeed features | | [Getting Started](https://www.deepspeed.ai/getting-started/) | First steps with DeepSpeed | | [DeepSpeed JSON Configuration](https://www.deepspeed.ai/docs/config-json/) | Configuring DeepSpeed | | [API Documentation](https://deepspeed.readthedocs.io/en/latest/) | Generated DeepSpeed API documentation | -| [CIFAR-10 Tutorial](https://www.deepspeed.ai/tutorials/cifar-10) | Getting started with CIFAR-10 and DeepSpeed | -| [Megatron-LM Tutorial](https://www.deepspeed.ai/tutorials/megatron/) | Train GPT2 with DeepSpeed and Megatron-LM | -| [BERT Pre-training Tutorial](https://www.deepspeed.ai/tutorials/bert-pretraining/) | Pre-train BERT with DeepSpeed | -| [Learning Rate Range Test Tutorial](https://www.deepspeed.ai/tutorials/lrrt/) | Faster training with large learning rates | -| [1Cycle Tutorial](https://www.deepspeed.ai/tutorials/one-cycle/) | SOTA learning schedule in DeepSpeed | - +| [Tutorials](https://www.deepspeed.ai/tutorials/) | Tutorials | +| [Blogs](https://www.deepspeed.ai/posts/) | Blogs | # Contributing @@ -204,13 +197,20 @@ Conduct](https://opensource.microsoft.com/codeofconduct/). For more information 1. Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He. (2019) ZeRO: memory optimizations toward training trillion parameter models. [arXiv:1910.02054](https://arxiv.org/abs/1910.02054) and [In Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (SC '20)](https://dl.acm.org/doi/10.5555/3433701.3433727). 2. Jeff Rasley, Samyam Rajbhandari, Olatunji Ruwase, and Yuxiong He. (2020) DeepSpeed: System Optimizations Enable Training Deep Learning Models with Over 100 Billion Parameters. [In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD '20, Tutorial)](https://dl.acm.org/doi/10.1145/3394486.3406703). 3. Minjia Zhang, Yuxiong He. (2020) Accelerating Training of Transformer-Based Language Models with Progressive Layer Dropping. [arXiv:2010.13369](https://arxiv.org/abs/2010.13369) and [NeurIPS 2020](https://proceedings.neurips.cc/paper/2020/hash/a1140a3d0df1c81e24ae954d935e8926-Abstract.html). -4. Jie Ren, Samyam Rajbhandari, Reza Yazdani Aminabadi, Olatunji Ruwase, Shuangyan Yang, Minjia Zhang, Dong Li, Yuxiong He. (2021) ZeRO-Offload: Democratizing Billion-Scale Model Training. [arXiv:2101.06840](https://arxiv.org/abs/2101.06840). +4. Jie Ren, Samyam Rajbhandari, Reza Yazdani Aminabadi, Olatunji Ruwase, Shuangyan Yang, Minjia Zhang, Dong Li, Yuxiong He. (2021) ZeRO-Offload: Democratizing Billion-Scale Model Training. [arXiv:2101.06840](https://arxiv.org/abs/2101.06840) and [USENIX ATC 2021](https://www.usenix.org/conference/atc21/presentation/ren-jie). 5. Hanlin Tang, Shaoduo Gan, Ammar Ahmad Awan, Samyam Rajbhandari, Conglong Li, Xiangru Lian, Ji Liu, Ce Zhang, Yuxiong He. (2021) 1-bit Adam: Communication Efficient Large-Scale Training with Adam's Convergence Speed. [arXiv:2102.02888](https://arxiv.org/abs/2102.02888) and [ICML 2021](http://proceedings.mlr.press/v139/tang21a.html). -6. Samyam Rajbhandari, Olatunji Ruwase, Jeff Rasley, Shaden Smith, Yuxiong He. (2021) ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning. [arXiv:2104.07857](https://arxiv.org/abs/2104.07857). -7. Conglong Li, Ammar Ahmad Awan, Hanlin Tang, Samyam Rajbhandari, Yuxiong He. (2021) 1-bit LAMB: Communication Efficient Large-Scale Large-Batch Training with LAMB's Convergence Speed. [arXiv:2104.06069](https://arxiv.org/abs/2104.06069). -8. Conglong Li, Minjia Zhang, Yuxiong He. (2021) Curriculum Learning: A Regularization Method for Efficient and Stable Billion-Scale GPT Model Pre-Training. [arXiv:2108.06084](https://arxiv.org/abs/2108.06084). +6. Samyam Rajbhandari, Olatunji Ruwase, Jeff Rasley, Shaden Smith, Yuxiong He. (2021) ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning. [arXiv:2104.07857](https://arxiv.org/abs/2104.07857) and [SC 2021](https://dl.acm.org/doi/abs/10.1145/3458817.3476205). +7. Conglong Li, Ammar Ahmad Awan, Hanlin Tang, Samyam Rajbhandari, Yuxiong He. (2021) 1-bit LAMB: Communication Efficient Large-Scale Large-Batch Training with LAMB's Convergence Speed. [arXiv:2104.06069](https://arxiv.org/abs/2104.06069) and [HiPC 2022](https://hipc.org/advance-program/). +8. Conglong Li, Minjia Zhang, Yuxiong He. (2021) The Stability-Efficiency Dilemma: Investigating Sequence Length Warmup for Training GPT Models. [arXiv:2108.06084](https://arxiv.org/abs/2108.06084) and [NeurIPS 2022](https://openreview.net/forum?id=JpZ5du_Kdh). 9. Yucheng Lu, Conglong Li, Minjia Zhang, Christopher De Sa, Yuxiong He. (2022) Maximizing Communication Efficiency for Large-scale Training via 0/1 Adam. [arXiv:2202.06009](https://arxiv.org/abs/2202.06009). -10. Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, Yuxiong He. (2022) DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale [arXiv:2201.05596](https://arxiv.org/abs/2201.05596). +10. Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, Yuxiong He. (2022) DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale [arXiv:2201.05596](https://arxiv.org/abs/2201.05596) and [ICML 2022](https://proceedings.mlr.press/v162/rajbhandari22a.html). +11. Shaden Smith, Mostofa Patwary, Brandon Norick, Patrick LeGresley, Samyam Rajbhandari, Jared Casper, Zhun Liu, Shrimai Prabhumoye, George Zerveas, Vijay Korthikanti, Elton Zhang, Rewon Child, Reza Yazdani Aminabadi, Julie Bernauer, Xia Song, Mohammad Shoeybi, Yuxiong He, Michael Houston, Saurabh Tiwary, Bryan Catanzaro. (2022) Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model [arXiv:2201.11990](https://arxiv.org/abs/2201.11990). +12. Xiaoxia Wu, Zhewei Yao, Minjia Zhang, Conglong Li, Yuxiong He. (2022) Extreme Compression for Pre-trained Transformers Made Simple and Efficient. [arXiv:2206.01859](https://arxiv.org/abs/2206.01859) and [NeurIPS 2022](https://openreview.net/forum?id=xNeAhc2CNAl). +13. Zhewei Yao, Reza Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, Yuxiong He. (2022) ZeroQuant: Efficient and Affordable Post-Training Quantization for Large-Scale Transformers. [arXiv:2206.01861](https://arxiv.org/abs/2206.01861) and [NeurIPS 2022](https://openreview.net/forum?id=f-fVCElZ-G1). +14. Reza Yazdani Aminabadi, Samyam Rajbhandari, Minjia Zhang, Ammar Ahmad Awan, Cheng Li, Du Li, Elton Zheng, Jeff Rasley, Shaden Smith, Olatunji Ruwase, Yuxiong He. (2022) DeepSpeed Inference: Enabling Efficient Inference of Transformer Models at Unprecedented Scale. [arXiv:2207.00032](https://arxiv.org/abs/2207.00032) and [SC 2022](https://dl.acm.org/doi/abs/10.5555/3571885.3571946). +15. Zhewei Yao, Xiaoxia Wu, Conglong Li, Connor Holmes, Minjia Zhang, Cheng Li, Yuxiong He. (2022) Random-LTD: Random and Layerwise Token Dropping Brings Efficient Training for Large-scale Transformers. [arXiv:2211.11586](https://arxiv.org/abs/2211.11586). +16. Conglong Li, Zhewei Yao, Xiaoxia Wu, Minjia Zhang, Yuxiong He. (2022) DeepSpeed Data Efficiency: Improving Deep Learning Model Quality and Training Efficiency via Efficient Data Sampling and Routing. [arXiv:2212.03597](https://arxiv.org/abs/2212.03597). + # Videos 1. DeepSpeed KDD 2020 Tutorial @@ -225,5 +225,6 @@ Conduct](https://opensource.microsoft.com/codeofconduct/). For more information * [ZeRO & Fastest BERT: Increasing the scale and speed of deep learning training in DeepSpeed](https://note.microsoft.com/MSR-Webinar-DeepSpeed-Registration-On-Demand.html). 3. [DeepSpeed on AzureML](https://youtu.be/yBVXR8G8Bg8) 4. Community Tutorials - * [DeepSpeed: All the tricks to scale to gigantic models](https://www.youtube.com/watch?v=pDGI668pNg0) - * [Turing-NLG, DeepSpeed and the ZeRO optimizer](https://www.youtube.com/watch?v=tC01FRB0M7w) + * [DeepSpeed: All the tricks to scale to gigantic models (Mark Saroufim)](https://www.youtube.com/watch?v=pDGI668pNg0) + * [Turing-NLG, DeepSpeed and the ZeRO optimizer (Yannic Kilcher)](https://www.youtube.com/watch?v=tC01FRB0M7w) + * [Ultimate Guide To Scaling ML Models (The AI Epiphany)](https://www.youtube.com/watch?v=hc0u4avAkuM) diff --git a/accelerator/__init__.py b/accelerator/__init__.py new file mode 100644 index 0000000..d29c3b1 --- /dev/null +++ b/accelerator/__init__.py @@ -0,0 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .abstract_accelerator import DeepSpeedAccelerator +from .real_accelerator import get_accelerator, set_accelerator diff --git a/accelerator/abstract_accelerator.py b/accelerator/abstract_accelerator.py new file mode 100644 index 0000000..03dfbe9 --- /dev/null +++ b/accelerator/abstract_accelerator.py @@ -0,0 +1,236 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import abc +from abc import ABC + + +class DeepSpeedAccelerator(ABC): + def __init__(self): + self._name = None + self._communication_backend_name = None + + # Device APIs + @abc.abstractmethod + def device_name(self, device_index): + ... + + @abc.abstractmethod + def device(self, device_index): + ... + + @abc.abstractmethod + def set_device(self, device_index): + ... + + @abc.abstractmethod + def current_device(self): + ... + + @abc.abstractmethod + def current_device_name(self): + ... + + @abc.abstractmethod + def device_count(self): + ... + + @abc.abstractmethod + def synchronize(self, device_index=None): + ... + + # RNG APIs + @abc.abstractmethod + def random(self): + ... + + @abc.abstractmethod + def set_rng_state(self, new_state, device_index=None): + ... + + @abc.abstractmethod + def get_rng_state(self, device_index=None): + ... + + @abc.abstractmethod + def manual_seed(self, seed): + ... + + @abc.abstractmethod + def manual_seed_all(self, seed): + ... + + @abc.abstractmethod + def initial_seed(self, seed): + ... + + @abc.abstractmethod + def default_generator(self, device_index): + ... + + # Streams/Events + @property + @abc.abstractmethod + def Stream(self): + ... + + @abc.abstractmethod + def stream(self, stream): + ... + + @abc.abstractmethod + def current_stream(self, device_index=None): + ... + + @abc.abstractmethod + def default_stream(self, device_index=None): + ... + + @property + @abc.abstractmethod + def Event(self): + ... + + # Memory management + @abc.abstractmethod + def empty_cache(self): + ... + + @abc.abstractmethod + def memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def reset_max_memory_allocated(self, device_index=None): + ... + + @abc.abstractmethod + def memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def reset_max_memory_cached(self, device_index=None): + ... + + @abc.abstractmethod + def memory_stats(self, device_index=None): + ... + + @abc.abstractmethod + def reset_peak_memory_stats(self, device_index=None): + ... + + @abc.abstractmethod + def memory_reserved(self, device_index=None): + ... + + @abc.abstractmethod + def max_memory_reserved(self, device_index=None): + ... + + @abc.abstractmethod + def total_memory(self, device_index=None): + ... + + # Data types + @abc.abstractmethod + def is_bf16_supported(self): + ... + + @abc.abstractmethod + def is_fp16_supported(self): + ... + + # Misc + @abc.abstractmethod + def amp(self): + ... + + @abc.abstractmethod + def is_available(self): + ... + + @abc.abstractmethod + def range_push(self, msg): + ... + + @abc.abstractmethod + def range_pop(self): + ... + + @abc.abstractmethod + def lazy_call(self, callback): + ... + + @abc.abstractmethod + def communication_backend_name(self): + ... + + # Tensor operations + @property + @abc.abstractmethod + def BFloat16Tensor(self): + ... + + @property + @abc.abstractmethod + def ByteTensor(self): + ... + + @property + @abc.abstractmethod + def DoubleTensor(self): + ... + + @property + @abc.abstractmethod + def FloatTensor(self): + ... + + @property + @abc.abstractmethod + def HalfTensor(self): + ... + + @property + @abc.abstractmethod + def IntTensor(self): + ... + + @property + @abc.abstractmethod + def LongTensor(self): + ... + + @abc.abstractmethod + def pin_memory(self, tensor): + ... + + @abc.abstractmethod + def on_accelerator(self, tensor): + ... + + @abc.abstractmethod + def op_builder_dir(self): + ... + + # create an instance of op builder, specified by class_name + @abc.abstractmethod + def create_op_builder(self, class_name): + ... + + # return an op builder class, specified by class_name + @abc.abstractmethod + def get_op_builder(self, class_name): + ... + + @abc.abstractmethod + def build_extension(self): + ... diff --git a/accelerator/cuda_accelerator.py b/accelerator/cuda_accelerator.py new file mode 100644 index 0000000..cdf4cd4 --- /dev/null +++ b/accelerator/cuda_accelerator.py @@ -0,0 +1,254 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import pkgutil +import importlib + +from .abstract_accelerator import DeepSpeedAccelerator +# During setup stage torch may not be installed, pass on no torch will +# allow op builder related API to be executed. +try: + import torch.cuda +except ImportError: + pass + + +class CUDA_Accelerator(DeepSpeedAccelerator): + def __init__(self): + self._name = 'cuda' + self._communication_backend_name = 'nccl' + + # begin initialize for create_op_builder() + # put all valid class name <--> class type mapping into class_dict + op_builder_dir = self.op_builder_dir() + op_builder_module = importlib.import_module(op_builder_dir) + + for _, module_name, _ in pkgutil.iter_modules([os.path.dirname(op_builder_module.__file__)]): + # avoid self references + if module_name != 'all_ops' and module_name != 'builder': + module = importlib.import_module("{}.{}".format( + op_builder_dir, + module_name)) + for member_name in module.__dir__(): + if member_name.endswith( + 'Builder' + ) and member_name != "OpBuilder" and member_name != "CUDAOpBuilder" and member_name != "TorchCPUOpBuilder": # avoid abstract classes + if not member_name in self.class_dict: + self.class_dict[member_name] = getattr(module, member_name) + # end initialize for create_op_builder() + + # Device APIs + def device_name(self, device_index=None): + if device_index == None: + return 'cuda' + return 'cuda:{}'.format(device_index) + + def device(self, device_index=None): + return torch.cuda.device(device_index) + + def set_device(self, device_index): + torch.cuda.set_device(device_index) + + def current_device(self): + return torch.cuda.current_device() + + def current_device_name(self): + return 'cuda:{}'.format(torch.cuda.current_device()) + + def device_count(self): + return torch.cuda.device_count() + + def synchronize(self, device_index=None): + return torch.cuda.synchronize(device_index) + + # RNG APIs + def random(self): + return torch.random + + def set_rng_state(self, new_state, device_index=None): + if device_index is None: + return torch.cuda.set_rng_state(new_state) + + return torch.cuda.set_rng_state(new_state, device_index) + + def get_rng_state(self, device_index=None): + if device_index is None: + return torch.cuda.get_rng_state() + + return torch.cuda.get_rng_state(device_index) + + def manual_seed(self, seed): + return torch.cuda.manual_seed(seed) + + def manual_seed_all(self, seed): + return torch.cuda.manual_seed_all(seed) + + def initial_seed(self, seed): + return torch.cuda.initial_seed(seed) + + def default_generator(self, device_index): + return torch.cuda.default_generators[device_index] + + # Streams/Events + @property + def Stream(self): + return torch.cuda.Stream + + def stream(self, stream): + return torch.cuda.stream(stream) + + def current_stream(self, device_index=None): + return torch.cuda.current_stream(device_index) + + def default_stream(self, device_index=None): + return torch.cuda.default_stream(device_index) + + @property + def Event(self): + return torch.cuda.Event + + # Memory management + def empty_cache(self): + return torch.cuda.empty_cache() + + def memory_allocated(self, device_index=None): + return torch.cuda.memory_allocated(device_index) + + def max_memory_allocated(self, device_index=None): + return torch.cuda.max_memory_allocated(device_index) + + def reset_max_memory_allocated(self, device_index=None): + return torch.cuda.reset_max_memory_allocated(device_index) + + def memory_cached(self, device_index=None): + return torch.cuda.memory_cached(device_index) + + def max_memory_cached(self, device_index=None): + return torch.cuda.max_memory_cached(device_index) + + def reset_max_memory_cached(self, device_index=None): + return torch.cuda.reset_max_memory_cached(device_index) + + def memory_stats(self, device_index=None): + if hasattr(torch.cuda, 'memory_stats'): + return torch.cuda.memory_stats(device_index) + + def reset_peak_memory_stats(self, device_index=None): + if hasattr(torch.cuda, 'reset_peak_memory_stats'): + return torch.cuda.reset_peak_memory_stats(device_index) + + def memory_reserved(self, device_index=None): + if hasattr(torch.cuda, 'memory_reserved'): + return torch.cuda.memory_reserved(device_index) + + def max_memory_reserved(self, device_index=None): + if hasattr(torch.cuda, 'max_memory_reserved'): + return torch.cuda.max_memory_reserved(device_index) + + def total_memory(self, device_index=None): + return torch.cuda.get_device_properties(device_index).total_memory + + # Data types + def is_bf16_supported(self): + return torch.cuda.is_bf16_supported() + + def is_fp16_supported(self): + major, _ = torch.cuda.get_device_capability() + if major >= 7: + return True + else: + return False + + # Misc + def amp(self): + if hasattr(torch.cuda, 'amp'): + return torch.cuda.amp + return None + + def is_available(self): + return torch.cuda.is_available() + + def range_push(self, msg): + if hasattr(torch.cuda.nvtx, 'range_push'): + return torch.cuda.nvtx.range_push(msg) + + def range_pop(self): + if hasattr(torch.cuda.nvtx, 'range_pop'): + return torch.cuda.nvtx.range_pop() + + def lazy_call(self, callback): + return torch.cuda._lazy_call(callback) + + def communication_backend_name(self): + return self._communication_backend_name + + # Tensor operations + + @property + def BFloat16Tensor(self): + return torch.cuda.BFloat16Tensor + + @property + def ByteTensor(self): + return torch.cuda.ByteTensor + + @property + def DoubleTensor(self): + return torch.cuda.DoubleTensor + + @property + def FloatTensor(self): + return torch.cuda.FloatTensor + + @property + def HalfTensor(self): + return torch.cuda.HalfTensor + + @property + def IntTensor(self): + return torch.cuda.IntTensor + + @property + def LongTensor(self): + return torch.cuda.LongTensor + + def pin_memory(self, tensor): + return tensor.pin_memory() + + def on_accelerator(self, tensor): + device_str = str(tensor.device) + if device_str.startswith('cuda:'): + return True + else: + return False + + def op_builder_dir(self): + try: + # during installation time op_builder is visible, otherwise return deepspeed.ops.op_builder + import op_builder # noqa: F401 + return "op_builder" + except ImportError: + return "deepspeed.ops.op_builder" + + # dict that holds class name <--> class type mapping i.e. + # 'AsyncIOBuilder': + # this dict will be filled at init stage + class_dict = {} + + # create an instance of op builder and return, name specified by class_name + def create_op_builder(self, class_name): + if class_name in self.class_dict: + return self.class_dict[class_name]() + else: + return None + + # return an op builder class, name specified by class_name + def get_op_builder(self, class_name): + if class_name in self.class_dict: + return self.class_dict[class_name] + else: + return None + + def build_extension(self): + from torch.utils.cpp_extension import BuildExtension + return BuildExtension diff --git a/accelerator/real_accelerator.py b/accelerator/real_accelerator.py new file mode 100644 index 0000000..06cbb0b --- /dev/null +++ b/accelerator/real_accelerator.py @@ -0,0 +1,105 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +try: + from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1 +except ImportError as e: + dsa1 = None +try: + from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2 +except ImportError as e: + dsa2 = None + +ds_accelerator = None + + +def _validate_accelerator(accel_obj): + # because abstract_accelerator has different path during + # build time (accelerator.abstract_accelerator) + # and run time (deepspeed.accelerator.abstract_accelerator) + # and extension would import the + # run time abstract_accelerator/DeepSpeedAccelerator as its base + # class, so we need to compare accel_obj with both base class. + # if accel_obj is instance of DeepSpeedAccelerator in one of + # accelerator.abstractor_accelerator + # or deepspeed.accelerator.abstract_accelerator, consider accel_obj + # is a conforming object + if not ((dsa1 != None and isinstance(accel_obj, + dsa1)) or + (dsa2 != None and isinstance(accel_obj, + dsa2))): + raise AssertionError( + f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator' + ) + + # TODO: turn off is_available test since this breaks tests + #assert accel_obj.is_available(), \ + # f'{accel_obj.__class__.__name__} accelerator fails is_available() test' + + +def get_accelerator(): + global ds_accelerator + if ds_accelerator is None: + try: + from intel_extension_for_deepspeed import XPU_Accelerator + except ImportError as e: + pass + else: + ds_accelerator = XPU_Accelerator() + _validate_accelerator(ds_accelerator) + return ds_accelerator + + from .cuda_accelerator import CUDA_Accelerator + ds_accelerator = CUDA_Accelerator() + _validate_accelerator(ds_accelerator) + return ds_accelerator + + +def set_accelerator(accel_obj): + global ds_accelerator + _validate_accelerator(accel_obj) + ds_accelerator = accel_obj + + +''' +-----------[code] test_get.py ----------- +from deepspeed.accelerator import get_accelerator +my_accelerator = get_accelerator() +print(f'{my_accelerator._name=}') +print(f'{my_accelerator._communication_backend=}') +print(f'{my_accelerator.HalfTensor().device=}') +print(f'{my_accelerator.total_memory()=}') +-----------[code] test_get.py ----------- + +---[output] python test_get.py--------- +my_accelerator.name()='cuda' +my_accelerator.communication_backend='nccl' +my_accelerator.HalfTensor().device=device(type='cuda', index=0) +my_accelerator.total_memory()=34089730048 +---[output] python test_get.py--------- + +************************************************************************** +-----------[code] test_set.py ----------- +from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator +cu_accel = CUDA_Accelerator() +print(f'{id(cu_accel)=}') +from deepspeed.accelerator import set_accelerator, get_accelerator +set_accelerator(cu_accel) + +my_accelerator = get_accelerator() +print(f'{id(my_accelerator)=}') +print(f'{my_accelerator._name=}') +print(f'{my_accelerator._communication_backend=}') +print(f'{my_accelerator.HalfTensor().device=}') +print(f'{my_accelerator.total_memory()=}') +-----------[code] test_set.py ----------- + + +---[output] python test_set.py--------- +id(cu_accel)=139648165478304 +my_accelerator= +my_accelerator.name='cuda' +my_accelerator.communication_backend='nccl' +my_accelerator.HalfTensor().device=device(type='cuda', index=0) +my_accelerator.total_memory()=34089730048 +---[output] python test_set.py--------- +''' diff --git a/azure/README.md b/azure/README.md index 1cca695..df222b9 100644 --- a/azure/README.md +++ b/azure/README.md @@ -1,3 +1,3 @@ # Getting Started with DeepSpeed on Azure -Please see our [Azure tutorial](https://www.deepspeed.ai/tutorials/azure/) to get started with DeepSpeed on Azure! +The recommended and simplest method to try DeepSpeed on Azure is through [AzureML](https://azure.microsoft.com/en-us/services/machine-learning/). For more details, please see our [Azure tutorial](https://www.deepspeed.ai/tutorials/azure/). diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/benchmarks/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/benchmarks/communication/README.md b/benchmarks/communication/README.md new file mode 100644 index 0000000..f760465 --- /dev/null +++ b/benchmarks/communication/README.md @@ -0,0 +1,75 @@ +# Running Communication Benchmarks + + +To run benchmarks, there are two options: + +1. Run a single communication operation: + +For example, run with a single large message size: +
+deepspeed all_reduce.py
+
+ +Scan across message sizes: +
+deepspeed all_reduce.py --scan
+
+ +2. Run all available communication benchmarks: + +
+deepspeed run_all.py
+
+ +Like the individual benchmarks, `run_all.py` supports scanning arguments for the max message size, bw-unit, etc. Simply pass the desired arguments to `run_all.py` and they'll be propagated to each comm op. + +
+usage: ds_bench [-h] [--local_rank LOCAL_RANK] [--trials TRIALS] [--warmups WARMUPS] [--maxsize MAXSIZE] [--async-op] [--bw-unit {Gbps,GBps}] [--backend {nccl}] [--dist {deepspeed,torch}] [--scan] [--raw] [--all-reduce] [--all-gather] [--all-to-all]
+                [--pt2pt] [--broadcast] [--dtype DTYPE] [--mem-factor MEM_FACTOR] [--debug]
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --local_rank LOCAL_RANK
+  --trials TRIALS       Number of timed iterations
+  --warmups WARMUPS     Number of warmup (non-timed) iterations
+  --maxsize MAXSIZE     Max message size as a power of 2
+  --async-op            Enables non-blocking communication
+  --bw-unit {Gbps,GBps}
+  --backend {nccl}      Communication library to use
+  --dist {deepspeed,torch}
+                        Distributed DL framework to use
+  --scan                Enables scanning all message sizes
+  --raw                 Print the message size and latency without units
+  --all-reduce          Run all_reduce
+  --all-gather          Run all_gather
+  --all-to-all          Run all_to_all
+  --pt2pt               Run pt2pt
+  --broadcast           Run broadcast
+  --dtype DTYPE         PyTorch tensor dtype
+  --mem-factor MEM_FACTOR
+                        Proportion of max available GPU memory to use for single-size evals
+  --debug               Enables all_to_all debug prints
+
+ +Note that `ds_bench` is a pre-packaged wrapper around `run_all.py`. Users can pass the same arguments as well: + +
+/bin/ds_bench --scan --trials=10
+
+ +Finally, users can choose specific communication operations to run in `run_all.py` or `ds_bench` by passing them as arguments (all operations are run by default). For example: + +
+deepspeed run_all.py --scan --all-reduce --all-to-all --broadcast
+
+ + +# Adding Communication Benchmarks + +To add new communication benchmarks, follow this general procedure: + +1. Copy a similar benchmark file (e.g. to add `reduce_scatter`, copy `all_reduce.py` as a template) +2. Add a new bw formula in `utils.get_bw`, a new maximum tensor element formula in `utils.max_numel`, and a new arg in `utils.benchmark_parser` +3. Replace comm op calls in new file with find-replace +4. Find a good default `mem_factor` for use in `run__single()` function +5. Add new comm op to `run_all.py` diff --git a/benchmarks/communication/__init__.py b/benchmarks/communication/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/benchmarks/communication/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/benchmarks/communication/all_gather.py b/benchmarks/communication/all_gather.py new file mode 100644 index 0000000..dc97267 --- /dev/null +++ b/benchmarks/communication/all_gather.py @@ -0,0 +1,159 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from benchmarks.communication.utils import * +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +import time + + +# Run all_gather and print metrics +def timed_all_gather(input, output, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + sync_all() + # Warmups, establish connections, etc. + for i in range(args.warmups): + # use all_gather_base if available + if args.dist == 'torch': + if hasattr(torch.distributed, "_all_gather_base"): + dist._all_gather_base(output, input, group=None, async_op=args.async_op) + else: + output_tensors = list( + torch.chunk(output_tensor, + cdb.get_world_size(group))) + dist.all_gather(output_tensors, input_tensor, group=group, async_op=True) + elif args.dist == 'deepspeed': + dist.allgather_fn(output, input, group=None, async_op=args.async_op) + sync_all() + + # time the actual comm op trials times and average it + pre = time.perf_counter() + for i in range(args.trials): + # use all_gather_base if available + if args.dist == 'torch': + if hasattr(torch.distributed, "_all_gather_base"): + dist._all_gather_base(output, input, group=None, async_op=args.async_op) + else: + output_tensors = list( + torch.chunk(output_tensor, + cdb.get_world_size(group))) + dist.all_gather(output_tensors, input_tensor, group=group, async_op=True) + elif args.dist == 'deepspeed': + dist.allgather_fn(output, input, group=None, async_op=args.async_op) + sync_all() + duration = time.perf_counter() - pre + + # maintain and clean performance data + avg_duration = duration / args.trials + size = input.element_size() * input.nelement() + n = dist.get_world_size() + tput, busbw = get_bw('all_gather', size, avg_duration, args) + tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) + desc = f'{input.nelement()}x{input.element_size()}' + + if not args.raw: + size = convert_size(size) + + print_rank_0( + f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") + + +def run_all_gather(local_rank, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + # Prepare benchmark header + print_header(args, 'all_gather') + global_rank = dist.get_rank() + world_size = dist.get_world_size() + + if args.scan: + # Create list of message sizes + M_LIST = [] + for x in (2**p for p in range(1, args.maxsize)): + M_LIST.append(x) + + sync_all() + # loop over various tensor sizes + for M in M_LIST: + global_rank = dist.get_rank() + try: + mat = torch.ones(world_size, + M, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + sync_all() + input = ((mat.mul_(float(global_rank))).view(-1)) + # Delete original mat to avoid OOM + del mat + get_accelerator().empty_cache() + output = torch.zeros(input.nelement() * world_size, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print('WARNING: Ran out of GPU memory. Exiting comm op.') + sync_all() + break + sync_all() + timed_all_gather(input, output, args) + else: + # all_gather_base saves memory + if (args.dist == 'torch' + and hasattr(torch.distributed, + "_all_gather_base")) or (args.dist == 'deepspeed' + and dist.has_allgather_base): + mem_factor = args.mem_factor + 0.2 + else: + mem_factor = args.mem_factor + # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor + sync_all() + elements_per_gpu = max_numel(comm_op='all_gather', + dtype=getattr(torch, + args.dtype), + mem_factor=mem_factor, + local_rank=local_rank, + args=args) + try: + mat = torch.ones(elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + # multiply each GPU's tensor by the rank to ease debugging + input = ((mat.mul_(float(global_rank))).view(-1)) + # Delete original mat to avoid OOM + del mat + get_accelerator().empty_cache() + output = torch.zeros( + elements_per_gpu * world_size, + dtype=getattr(torch, + args.dtype)).to(get_accelerator().device_name(local_rank)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print( + 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' + ) + sync_all() + return + + sync_all() + timed_all_gather(input, output, args) + + +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + init_processes(local_rank=rank, args=args) + run_all_gather(local_rank=rank, args=args) diff --git a/benchmarks/communication/all_reduce.py b/benchmarks/communication/all_reduce.py new file mode 100644 index 0000000..edc1b99 --- /dev/null +++ b/benchmarks/communication/all_reduce.py @@ -0,0 +1,113 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from benchmarks.communication.utils import * +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +import time + + +def timed_all_reduce(input, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + sync_all() + # Warmups, establish connections, etc. + for i in range(args.warmups): + dist.all_reduce(input, async_op=args.async_op) + sync_all() + + # time the actual comm op trials times and average it + pre = time.perf_counter() + for i in range(args.trials): + dist.all_reduce(input, async_op=args.async_op) + sync_all() + duration = time.perf_counter() - pre + + # maintain and clean performance data + avg_duration = duration / args.trials + size = input.element_size() * input.nelement() + n = dist.get_world_size() + tput, busbw = get_bw('all_reduce', size, avg_duration, args) + tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) + desc = f'{input.nelement()}x{input.element_size()}' + + if not args.raw: + size = convert_size(size) + + print_rank_0( + f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") + + +def run_all_reduce(local_rank, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + # Prepare benchmark header + print_header(args, 'all_reduce') + + world_size = dist.get_world_size() + global_rank = dist.get_rank() + + if args.scan: + M_LIST = [] + for x in (2**p for p in range(1, args.maxsize)): + M_LIST.append(x) + + sync_all() + # loop over various tensor sizes + for M in M_LIST: + global_rank = dist.get_rank() + try: + mat = torch.ones(world_size, + M, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + sync_all() + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print('WARNING: Ran out of GPU memory. Exiting comm op.') + sync_all() + break + sync_all() + timed_all_reduce(input, args) + else: + # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor + # Don't need output tensor, so we double mem_factor + elements_per_gpu = max_numel(comm_op='all_reduce', + dtype=getattr(torch, + args.dtype), + mem_factor=args.mem_factor * 2, + local_rank=local_rank, + args=args) + try: + mat = torch.ones(elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print( + 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' + ) + sync_all() + return + sync_all() + timed_all_reduce(input, args) + + +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + init_processes(local_rank=rank, args=args) + run_all_reduce(local_rank=rank, args=args) diff --git a/benchmarks/communication/all_to_all.py b/benchmarks/communication/all_to_all.py new file mode 100644 index 0000000..bd35cf2 --- /dev/null +++ b/benchmarks/communication/all_to_all.py @@ -0,0 +1,134 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from benchmarks.communication.utils import * +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +import time + + +def timed_all_to_all(input, output, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + sync_all() + # Warmups, establish connections, etc. + for i in range(args.warmups): + dist.all_to_all_single(output, input, async_op=args.async_op) + sync_all() + + # time the actual comm op trials times and average it + pre = time.perf_counter() + for i in range(args.trials): + dist.all_to_all_single(output, input, async_op=args.async_op) + sync_all() + duration = time.perf_counter() - pre + + # maintain and clean performance data + avg_duration = duration / args.trials + size = input.element_size() * input.nelement() + n = dist.get_world_size() + tput, busbw = get_bw('all_to_all', size, avg_duration, args) + tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) + desc = f'{input.nelement()}x{input.element_size()}' + + if not args.raw: + size = convert_size(size) + + print_rank_0( + f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") + + +def run_all_to_all(local_rank, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + world_size = dist.get_world_size() + global_rank = dist.get_rank() + # Prepare benchmark header + print_header(args, 'all_to_all') + + if args.scan: + M_LIST = [] + for x in (2**p for p in range(1, args.maxsize)): + M_LIST.append(x) + + sync_all() + # loop over various tensor sizes + for M in M_LIST: + global_rank = dist.get_rank() + try: + mat = torch.ones(world_size, + M, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + assert mat.numel() % world_size == 0, f"tensor cannot be divided in {world_size} chunks" + sync_all() + input = ((mat.mul_(float(global_rank))).view(-1)) + output = (mat.clone().view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print('WARNING: Ran out of GPU memory. Exiting comm op.') + sync_all() + break + sync_all() + timed_all_to_all(input, output, args) + else: + # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor + elements_per_gpu = max_numel(comm_op='all_to_all', + dtype=getattr(torch, + args.dtype), + mem_factor=args.mem_factor, + local_rank=local_rank, + args=args) + try: + mat = torch.ones(elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + assert mat.numel() % world_size == 0, f"tensor with {mat.numel()} elements cannot be divided in {world_size} chunks" + input = ((mat.mul_(float(global_rank))).view(-1)) + # Delete original mat to avoid OOM + del mat + get_accelerator().empty_cache() + output = torch.zeros( + elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to(get_accelerator().device_name(local_rank)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print( + 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' + ) + sync_all() + return + sync_all() + + if args.debug: + for i in range(world_size): + if i == global_rank: + print(f"Before AllToAll Input List at rank {global_rank}: {input}") + dist.barrier() + + timed_all_to_all(input, output, args) + + if args.debug: + for i in range(world_size): + if i == global_rank: + print(f"AllToAll Results at rank {global_rank}: {output}") + dist.barrier() + + +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + init_processes(local_rank=rank, args=args) + run_all_to_all(local_rank=rank, args=args) diff --git a/benchmarks/communication/broadcast.py b/benchmarks/communication/broadcast.py new file mode 100644 index 0000000..633e466 --- /dev/null +++ b/benchmarks/communication/broadcast.py @@ -0,0 +1,114 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from benchmarks.communication.utils import * +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +import time + + +def timed_broadcast(input, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + sync_all() + # Warmups, establish connections, etc. + for i in range(args.warmups): + dist.broadcast(input, 0, async_op=args.async_op) + sync_all() + + # time the actual comm op trials times and average it + pre = time.perf_counter() + for i in range(args.trials): + dist.broadcast(input, 0, async_op=args.async_op) + sync_all() + duration = time.perf_counter() - pre + + # maintain and clean performance data + avg_duration = duration / args.trials + size = input.element_size() * input.nelement() + n = dist.get_world_size() + tput, busbw = get_bw('broadcast', size, avg_duration, args) + tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) + desc = f'{input.nelement()}x{input.element_size()}' + + if not args.raw: + size = convert_size(size) + + print_rank_0( + f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") + + +def run_broadcast(local_rank, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + # Prepare benchmark header + print_header(args, 'broadcast') + + world_size = dist.get_world_size() + global_rank = dist.get_rank() + + if args.scan: + M_LIST = [] + for x in (2**p for p in range(1, args.maxsize)): + M_LIST.append(x) + + sync_all() + # loop over various tensor sizes + for M in M_LIST: + global_rank = dist.get_rank() + try: + mat = torch.ones(world_size, + M, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + sync_all() + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print('WARNING: Ran out of GPU memory. Exiting comm op.') + sync_all() + break + sync_all() + timed_broadcast(input, args) + else: + # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor + # Don't need output tensor, so we double mem_factor + elements_per_gpu = max_numel(comm_op='broadcast', + dtype=getattr(torch, + args.dtype), + mem_factor=args.mem_factor * 2, + local_rank=local_rank, + args=args) + try: + mat = torch.ones(elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print( + 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' + ) + sync_all() + return + sync_all() + timed_broadcast(input, args) + + +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + init_processes(local_rank=rank, args=args) + run_broadcast(local_rank=rank, args=args) diff --git a/benchmarks/communication/constants.py b/benchmarks/communication/constants.py new file mode 100644 index 0000000..935927a --- /dev/null +++ b/benchmarks/communication/constants.py @@ -0,0 +1,10 @@ +'''Copyright The Microsoft DeepSpeed Team''' +from deepspeed.accelerator import get_accelerator + +DEFAULT_WARMUPS = 5 +DEFAULT_TRIALS = 50 +DEFAULT_TYPE = 'float' +DEFAULT_BACKEND = get_accelerator().communication_backend_name() +DEFAULT_UNIT = 'Gbps' +DEFAULT_DIST = 'deepspeed' +DEFAULT_MAXSIZE = 24 diff --git a/benchmarks/communication/pt2pt.py b/benchmarks/communication/pt2pt.py new file mode 100644 index 0000000..1c890fc --- /dev/null +++ b/benchmarks/communication/pt2pt.py @@ -0,0 +1,132 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from benchmarks.communication.utils import * +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +import time + + +def timed_pt2pt(input, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + sync_all() + # Warmups, establish connections, etc. + for i in range(args.warmups): + if dist.get_rank() == 0: + if args.async_op: + dist.isend(input, 1) + else: + dist.send(input, 1) + if dist.get_rank() == 1: + if args.async_op: + dist.irecv(input, src=0) + else: + dist.recv(input, src=0) + sync_all() + + # time the actual comm op trials times and average it + pre = time.perf_counter() + for i in range(args.trials): + if dist.get_rank() == 0: + if args.async_op: + dist.isend(input, 1) + else: + dist.send(input, 1) + if dist.get_rank() == 1: + if args.async_op: + dist.irecv(input, src=0) + else: + dist.recv(input, src=0) + + sync_all() + duration = time.perf_counter() - pre + + # maintain and clean performance data + avg_duration = duration / args.trials + size = input.element_size() * input.nelement() + n = dist.get_world_size() + tput, busbw = get_bw('pt2pt', size, avg_duration, args) + tput_str, busbw_str, duration_str = get_metric_strings(args, tput, busbw, avg_duration) + desc = f'{input.nelement()}x{input.element_size()}' + + if not args.raw: + size = convert_size(size) + + print_rank_0( + f"{size:<20} {desc:25s} {duration_str:20s} {tput_str:20s} {busbw_str:20s}") + + +def run_pt2pt(local_rank, args): + if args.dist == 'torch': + import torch.distributed as dist + elif args.dist == 'deepspeed': + import deepspeed.comm as dist + + # Prepare benchmark header + print_header(args, 'pt2pt') + global_rank = dist.get_rank() + world_size = dist.get_world_size() + + if args.scan: + # Create list of message sizes + M_LIST = [] + for x in (2**p for p in range(1, args.maxsize)): + M_LIST.append(x) + + sync_all() + # loop over various tensor sizes + for M in M_LIST: + global_rank = dist.get_rank() + try: + mat = torch.ones(world_size, + M, + dtype=getattr( + torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + sync_all() + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print('WARNING: Ran out of GPU memory. Exiting comm op.') + sync_all() + break + sync_all() + timed_pt2pt(input, args) + else: + # Send the biggest message size our GPUs can fit. If you're facing OOM errors, reduce the mem_factor + # Don't need output tensor, so double mem_factor + elements_per_gpu = max_numel(comm_op='pt2pt', + dtype=getattr(torch, + args.dtype), + mem_factor=args.mem_factor * 2, + local_rank=local_rank, + args=args) + try: + mat = torch.ones(elements_per_gpu, + dtype=getattr(torch, + args.dtype)).to( + get_accelerator().device_name(local_rank)) + input = ((mat.mul_(float(global_rank))).view(-1)) + except RuntimeError as e: + if 'out of memory' in str(e): + if dist.get_rank() == 0: + print( + 'WARNING: Ran out of GPU memory. Try to reduce the --mem-factor argument!' + ) + sync_all() + return + sync_all() + timed_pt2pt(input, args) + + +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + init_processes(local_rank=rank, args=args) + run_pt2pt(local_rank=rank, args=args) diff --git a/benchmarks/communication/run_all.py b/benchmarks/communication/run_all.py new file mode 100644 index 0000000..7ec562c --- /dev/null +++ b/benchmarks/communication/run_all.py @@ -0,0 +1,49 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from benchmarks.communication.utils import * +from benchmarks.communication.all_reduce import run_all_reduce +from benchmarks.communication.all_gather import run_all_gather +from benchmarks.communication.all_to_all import run_all_to_all +from benchmarks.communication.pt2pt import run_pt2pt +from benchmarks.communication.broadcast import run_broadcast +from benchmarks.communication.constants import * + + +# For importing +def main(args, rank): + + init_processes(local_rank=rank, args=args) + + ops_to_run = [] + if args.all_reduce: + ops_to_run.append('all_reduce') + if args.all_gather: + ops_to_run.append('all_gather') + if args.broadcast: + ops_to_run.append('broadcast') + if args.pt2pt: + ops_to_run.append('pt2pt') + if args.all_to_all: + ops_to_run.append('all_to_all') + + if len(ops_to_run) == 0: + ops_to_run = ['all_reduce', 'all_gather', 'all_to_all', 'broadcast', 'pt2pt'] + + for comm_op in ops_to_run: + if comm_op == 'all_reduce': + run_all_reduce(local_rank=rank, args=args) + if comm_op == 'all_gather': + run_all_gather(local_rank=rank, args=args) + if comm_op == 'all_to_all': + run_all_to_all(local_rank=rank, args=args) + if comm_op == 'pt2pt': + run_pt2pt(local_rank=rank, args=args) + if comm_op == 'broadcast': + run_broadcast(local_rank=rank, args=args) + + +# For directly calling benchmark +if __name__ == "__main__": + args = benchmark_parser().parse_args() + rank = args.local_rank + main(args, rank) diff --git a/benchmarks/communication/utils.py b/benchmarks/communication/utils.py new file mode 100644 index 0000000..b913dda --- /dev/null +++ b/benchmarks/communication/utils.py @@ -0,0 +1,220 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +import os +import math +import argparse +from benchmarks.communication.constants import * +from deepspeed.accelerator import get_accelerator + +global dist + + +def init_torch_distributed(backend): + global dist + import torch.distributed as dist + torch.distributed.init_process_group(backend) + local_rank = int(os.environ['LOCAL_RANK']) + get_accelerator().set_device(local_rank) + + +def init_deepspeed_comm(backend): + global dist + import deepspeed + import deepspeed.comm as dist + deepspeed.init_distributed(dist_backend=backend) + local_rank = int(os.environ['LOCAL_RANK']) + get_accelerator().set_device(local_rank) + + +def init_processes(local_rank, args): + if args.dist == 'deepspeed': + init_deepspeed_comm(args.backend) + elif args.dist == 'torch': + init_torch_distributed(args.backend) + else: + print_rank_0(f"distributed framework {args.dist} not supported") + exit(0) + + +def print_rank_0(message): + if dist.get_rank() == 0: + print(message) + + +def print_header(args, comm_op): + if comm_op == 'pt2pt': + world_size = 2 + else: + world_size = dist.get_world_size() + tput = f'Throughput ({args.bw_unit})' + busbw = f'BusBW ({args.bw_unit})' + header = f"\n---- Performance of {comm_op} on {world_size} devices ---------------------------------------------------------\n" + duration_str = 'Duration' + if args.raw: + duration_str += ' (us)' + header += f"{'Size (Bytes)':20s} {'Description':25s} {duration_str:20s} {tput:20s} {busbw:20s}\n" + header += "----------------------------------------------------------------------------------------------------" + print_rank_0(header) + + +def get_bw(comm_op, size, duration, args): + n = dist.get_world_size() + tput = 0 + busbw = 0 + if comm_op == "all_to_all": + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_gather": + size *= n + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_reduce": + tput = (size * 2 / duration) + busbw = (size / duration) * (2 * (n - 1) / n) + elif comm_op == "pt2pt" or comm_op == "broadcast": + tput = (size / duration) + busbw = tput + else: + print_rank_0("wrong comm_op specified") + exit(0) + + if args.bw_unit == 'Gbps': + tput *= 8 + busbw *= 8 + + return tput, busbw + + +def get_metric_strings(args, tput, busbw, duration): + duration_ms = duration * 1e3 + duration_us = duration * 1e6 + tput = f'{tput / 1e9:.3f}' + busbw = f'{busbw /1e9:.3f}' + + if duration_us < 1e3 or args.raw: + duration = f'{duration_us:.3f}' + if not args.raw: + duration += ' us' + else: + duration = f'{duration_ms:.3f} ms' + return tput, busbw, duration + + +def sync_all(): + get_accelerator().synchronize() + dist.barrier() + + +def max_numel(comm_op, dtype, mem_factor, local_rank, args): + dtype_size = _element_size(dtype) + max_memory_per_gpu = get_accelerator().total_memory(local_rank) * mem_factor + if comm_op == 'all_reduce' or comm_op == 'pt2pt' or comm_op == 'broadcast': + elements_per_gpu = int(max_memory_per_gpu // dtype_size) + elif comm_op == 'all_gather': + # all_gather performance is lower for non-powers of two, and the output buffer size scales with world size + # Therefore, divide by world size and round down to nearest power of 2 + elements_per_gpu = int(max_memory_per_gpu // dtype_size // dist.get_world_size()) + elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) + elif comm_op == 'all_to_all': + # Number of elements must be divisible by world_size + # all_to_all performance is lower for non-powers of two. Round down like all_gather. + elements_per_gpu = int(max_memory_per_gpu // dtype_size) + elements_per_gpu = int(dist.get_world_size() * + round(elements_per_gpu / dist.get_world_size())) + elements_per_gpu = int(pow(2, int(math.log(elements_per_gpu, 2)))) + else: + print(f"This communication operation: {comm_op} is not supported yet") + exit(0) + return elements_per_gpu + + +# Helper function to pretty-print message sizes +def convert_size(size_bytes): + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +# Copied from torch. Need to add the func here for old torch compatibility. +def _element_size(dtype): + """ + Returns the element size for a dtype, in bytes + """ + if not isinstance(dtype, torch.dtype): + raise RuntimeError(f'expected torch.dtype, but got {type(dtype)}') + + if dtype.is_complex: + return torch.finfo(dtype).bits >> 2 + elif dtype.is_floating_point: + return torch.finfo(dtype).bits >> 3 + elif dtype == torch.bool: + # NOTE: torch.bool is not supported in torch.iinfo() + return 1 + else: + return torch.iinfo(dtype).bits >> 3 + + +def benchmark_parser(): + parser = argparse.ArgumentParser() + parser.add_argument("--local_rank", type=int) + parser.add_argument("--trials", + type=int, + default=DEFAULT_TRIALS, + help='Number of timed iterations') + parser.add_argument("--warmups", + type=int, + default=DEFAULT_WARMUPS, + help='Number of warmup (non-timed) iterations') + parser.add_argument("--maxsize", + type=int, + default=24, + help='Max message size as a power of 2') + parser.add_argument("--async-op", + action="store_true", + help='Enables non-blocking communication') + parser.add_argument("--bw-unit", + type=str, + default=DEFAULT_UNIT, + choices=['Gbps', + 'GBps']) + parser.add_argument("--backend", + type=str, + default=DEFAULT_BACKEND, + choices=['nccl', + 'ccl'], + help='Communication library to use') + parser.add_argument("--dist", + type=str, + default=DEFAULT_DIST, + choices=['deepspeed', + 'torch'], + help='Distributed DL framework to use') + parser.add_argument("--scan", + action="store_true", + help='Enables scanning all message sizes') + parser.add_argument("--raw", + action="store_true", + help='Print the message size and latency without units') + parser.add_argument("--all-reduce", action="store_true", help='Run all_reduce') + parser.add_argument("--all-gather", action="store_true", help='Run all_gather') + parser.add_argument("--all-to-all", action="store_true", help='Run all_to_all') + parser.add_argument("--pt2pt", action="store_true", help='Run pt2pt') + parser.add_argument("--broadcast", action="store_true", help='Run broadcast') + parser.add_argument("--dtype", + type=str, + default=DEFAULT_TYPE, + help='PyTorch tensor dtype') + parser.add_argument( + "--mem-factor", + type=float, + default=.4, + help='Proportion of max available GPU memory to use for single-size evals') + parser.add_argument("--debug", + action="store_true", + help='Enables all_to_all debug prints') + return parser diff --git a/benchmarks/inference/bert-bench.py b/benchmarks/inference/bert-bench.py new file mode 100644 index 0000000..9d586d0 --- /dev/null +++ b/benchmarks/inference/bert-bench.py @@ -0,0 +1,92 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +import time +import deepspeed +import argparse +from transformers import pipeline +from deepspeed.accelerator import get_accelerator + +parser = argparse.ArgumentParser() +parser.add_argument("--model", "-m", type=str, help="hf model name") +parser.add_argument("--deepspeed", action="store_true", help="use deepspeed inference") +parser.add_argument("--dtype", type=str, default="fp16", help="fp16 or fp32") +parser.add_argument("--max-tokens", type=int, default=50, help="max new tokens") +parser.add_argument("--local_rank", type=int, default=0, help="local rank") +parser.add_argument("--trials", type=int, default=30, help="number of trials") +parser.add_argument("--kernel-inject", action="store_true", help="inject kernels on") +parser.add_argument("--graphs", action="store_true", help="CUDA Graphs on") +args = parser.parse_args() + + +def print_latency(latency_set, title, warmup=3): + # trim warmup queries + latency_set = latency_set[warmup:] + count = len(latency_set) + if count > 0: + latency_set.sort() + n50 = (count - 1) * 0.5 + 1 + n90 = (count - 1) * 0.9 + 1 + n95 = (count - 1) * 0.95 + 1 + n99 = (count - 1) * 0.99 + 1 + n999 = (count - 1) * 0.999 + 1 + + avg = sum(latency_set) / count + p50 = latency_set[int(n50) - 1] + p90 = latency_set[int(n90) - 1] + p95 = latency_set[int(n95) - 1] + p99 = latency_set[int(n99) - 1] + p999 = latency_set[int(n999) - 1] + + print(f"====== latency stats {title} ======") + print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000)) + print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000)) + print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000)) + print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000)) + print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000)) + print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000)) + + +deepspeed.init_distributed() + +print(args.model, args.max_tokens, args.dtype) + +if args.dtype.lower() == "fp16": + dtype = torch.float16 +else: + dtype = torch.float32 + +pipe = pipeline("fill-mask", model=args.model, framework="pt", device=args.local_rank) + +if dtype == torch.half: + pipe.model.half() + +mask = pipe.tokenizer.mask_token + +br = pipe(f"Hello I'm a {mask} model") +if args.deepspeed: + pipe.model = deepspeed.init_inference(pipe.model, + dtype=dtype, + mp_size=1, + replace_with_kernel_inject=args.kernel_inject, + enable_cuda_graph=args.graphs) + pipe.model.profile_model_time() + +responses = [] +times = [] +mtimes = [] +for i in range(args.trials): + get_accelerator().synchronize() + start = time.time() + r = pipe(f"Hello I'm a {mask} model") + get_accelerator().synchronize() + end = time.time() + responses.append(r) + times.append((end - start)) + mtimes += pipe.model.model_times() + #print(f"{pipe.model.model_times()=}") + +print_latency(times, "e2e latency") +print_latency(mtimes, "model latency") + +print(responses[0:3]) diff --git a/benchmarks/inference/collect_results.py b/benchmarks/inference/collect_results.py new file mode 100644 index 0000000..0e51033 --- /dev/null +++ b/benchmarks/inference/collect_results.py @@ -0,0 +1,147 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import re +import argparse +import pandas as pd + +parser = argparse.ArgumentParser() +parser.add_argument( + "--results-dir", + "-r", + type=str, + default="./results", + help="directory containing sweep results", +) +parser.add_argument("--version", + "-v", + type=int, + default=0, + help="version to be collected") +parser.add_argument("--gen-text-n", + "-n", + type=int, + default=1, + help="expected number of generated text") +parser.add_argument("--output", + "-o", + type=str, + default="./results.csv", + help="output file") +args = parser.parse_args() + + +def get_branch(file_path): + match = re.match(r".*\/(.*)\.log", file_path) + if match is None: + return False + else: + return match.groups()[0] + + +def get_benchmark_params(root_dir, file_path): + match = re.match( + rf"{root_dir}\/(.+?)_(fp\d+)_(true|false)_(true|false)_(\d+)gpus_v(\d+)\/", + file_path, + ) + if match is None: + return False + else: + model, dtype, graphs, kernel, gpus, version = match.groups() + bool_dict = {"true": True, "false": False} + return { + "model": model, + "dtype": dtype, + "graphs": bool_dict[graphs.lower()], + "kernel": bool_dict[kernel.lower()], + "gpus": int(gpus), + "version": int(version), + } + + +def get_perf_data(file_content): + matches = re.findall(r"\s+(.+?)\sLatency:\s+(\d+\.\d+)\sms", file_content) + if matches is []: + return False + else: + return {f"latency-{key}": float(val) for key, val in matches} + + +def get_generated_text(file_content, gen_text_n): + file_content = file_content.replace("\n", " ") + file_content = file_content.replace("\t", " ") + matches = re.findall(r"RESPONSE\s(\d+):\s+[-]{30}\s+(.+?)\s+[-]{30}", file_content) + if len(matches) != gen_text_n: + return False + else: + return {f"generated-text-{key}": val for key, val in matches} + + +def get_error(file_content): + matches = re.findall(r"Error:\s+(.+?)\n", file_content) + if matches is []: + return False + else: + return {f"error": val for val in matches} + + +if __name__ == "__main__": + # List to collect data from all benchmarks + benchmarks_data = [] + + # Walk through directory of results from sweep.sh + for root, dirs, files in os.walk(args.results_dir): + # Because of how some models are named, the dir structure for results can vary, e.g.: + # "EleutherAI/gpt-neo_*/baseline.log" versus "gpt2_*/baseline.log" + if dirs: + continue + + # Get data from baseline and each tested branch + for name in files: + file_path = os.path.join(root, name) + + branch = get_branch(file_path) + if not branch: + print(f"WARNING: Could not detect branch for file {file_path}, skipping") + continue + + params = get_benchmark_params(args.results_dir, file_path) + if not params: + print( + f"WARNING: Could not detect benchmark settings for file {file_path}, skipping" + ) + continue + + # Verify that the version matches that which we want to collect + if params["version"] != args.version: + continue + + with open(file_path, "r") as f: + file_content = f.read() + + perf_data = get_perf_data(file_content) + if not perf_data: + print( + f"WARNING: Could not detect benchmark performance data for file {file_path}" + ) + + generated_text = get_generated_text(file_content, args.gen_text_n) + if not generated_text: + print(f"WARNING: Could not detect generated text for file {file_path}") + + error = get_error(file_content) + if error: + print(f"Error found in {file_path}, collecting error info...") + benchmarks_data.append({"branch": branch, **params, **error}) + continue + + benchmarks_data.append({ + "branch": branch, + **params, + **perf_data, + **generated_text + }) + + # Convert to a DataFrame and save + benchmarks_df = pd.DataFrame(benchmarks_data) + benchmarks_df.to_csv(args.output) diff --git a/benchmarks/inference/gpt-bench.py b/benchmarks/inference/gpt-bench.py new file mode 100644 index 0000000..29578b3 --- /dev/null +++ b/benchmarks/inference/gpt-bench.py @@ -0,0 +1,124 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import torch +import time +import deepspeed +import argparse +from transformers import pipeline +from deepspeed.accelerator import get_accelerator + +parser = argparse.ArgumentParser() +parser.add_argument("--model", "-m", type=str, help="hf model name") +parser.add_argument("--deepspeed", action="store_true", help="use deepspeed inference") +parser.add_argument("--dtype", + type=str, + default="fp16", + choices=["fp16", + "fp32", + "int8"], + help="int8, fp16, or fp32") +parser.add_argument("--graphs", action="store_true", help="CUDA Graphs on") +parser.add_argument("--kernel-inject", action="store_true", help="inject kernels on") +parser.add_argument("--max-tokens", type=int, default=50, help="max new tokens") +parser.add_argument("--local_rank", + type=int, + default=int(os.getenv("LOCAL_RANK", + "0")), + help="local rank") +parser.add_argument("--world_size", + type=int, + default=int(os.getenv("WORLD_SIZE", + "1")), + help="world size") +parser.add_argument("--trials", type=int, default=30, help="number of trials") +args = parser.parse_args() + + +def print_latency(latency_set, title, warmup=3): + # trim warmup queries + latency_set = list(latency_set) + latency_set = latency_set[warmup:] + count = len(latency_set) + if count > 0: + latency_set.sort() + n50 = (count - 1) * 0.5 + 1 + n90 = (count - 1) * 0.9 + 1 + n95 = (count - 1) * 0.95 + 1 + n99 = (count - 1) * 0.99 + 1 + n999 = (count - 1) * 0.999 + 1 + + avg = sum(latency_set) / count + p50 = latency_set[int(n50) - 1] + p90 = latency_set[int(n90) - 1] + p95 = latency_set[int(n95) - 1] + p99 = latency_set[int(n99) - 1] + p999 = latency_set[int(n999) - 1] + + print(f"====== latency stats {title} ======") + print("\tAvg Latency: {0:8.2f} ms".format(avg * 1000)) + print("\tP50 Latency: {0:8.2f} ms".format(p50 * 1000)) + print("\tP90 Latency: {0:8.2f} ms".format(p90 * 1000)) + print("\tP95 Latency: {0:8.2f} ms".format(p95 * 1000)) + print("\tP99 Latency: {0:8.2f} ms".format(p99 * 1000)) + print("\t999 Latency: {0:8.2f} ms".format(p999 * 1000)) + + +deepspeed.init_distributed() + +if args.local_rank == 0: + print("BENCHMARK SETTINGS:") + print(f"\tMODEL: {args.model}") + print(f"\tMAX_TOKENS: {args.max_tokens}") + print(f"\tDTYPE: {args.dtype}") + print(f"\tCUDA_GRAPHS: {args.graphs}") + print(f"\tKERNEL_INJECT: {args.kernel_inject}") + +if args.dtype == "int8": + dtype = torch.int8 +elif args.dtype == "fp16": + dtype = torch.float16 +else: + dtype = torch.float32 + +pipe = pipeline("text-generation", + model=args.model, + framework="pt", + device=args.local_rank) + +if dtype == torch.float16: + pipe.model.half() + +if args.deepspeed: + pipe.model = deepspeed.init_inference( + pipe.model, + dtype=dtype, + mp_size=args.world_size, + replace_with_kernel_inject=args.kernel_inject, + enable_cuda_graph=args.graphs, + ) + pipe.model.profile_model_time() + +responses = [] +times = [] +mtimes = [] +for i in range(args.trials): + get_accelerator().synchronize() + start = time.time() + r = pipe("DeepSpeed is", do_sample=False, max_new_tokens=args.max_tokens) + get_accelerator().synchronize() + end = time.time() + responses.append(r) + times.append(end - start) # / (args.max_tokens - 3)) + mtimes.append(sum(pipe.model.model_times())) + +if args.local_rank == 0: + print_latency(times, "(e2e) latency") + print_latency(mtimes, "(model-only) latency") + print_latency(map(lambda t: t / (args.max_tokens - 3), + times), + "(e2e) per token latency") + print(f"RESPONSE 0:") + print("-" * 30) + print(responses[0][0]["generated_text"]) + print("-" * 30) diff --git a/benchmarks/inference/requirements.txt b/benchmarks/inference/requirements.txt new file mode 100644 index 0000000..00899dd --- /dev/null +++ b/benchmarks/inference/requirements.txt @@ -0,0 +1 @@ +transformers>=4.21.3 diff --git a/benchmarks/inference/run_model.sh b/benchmarks/inference/run_model.sh new file mode 100644 index 0000000..8e5fe3a --- /dev/null +++ b/benchmarks/inference/run_model.sh @@ -0,0 +1,36 @@ +set -x + +model=$1 +branch1=$2 +branch2=$3 +dtype=$4 +graphs=$5 +kernel=$6 +gpus=$7 + +version=0 +log_path=results/${model}_${dtype}_${graphs}_${kernel}_${gpus}gpus_v${version} +mkdir -p ${log_path} + +params="--dtype $dtype " +if [[ "$graphs" == "true" ]]; then + params+="--graphs " +fi +if [[ "$kernel" == "true" ]]; then + params+="--kernel " +fi + +echo "baseline $log_path" +deepspeed --num_gpus 1 gpt-bench.py -m "${model}" $params &> ${log_path}/baseline.log + +cd ../../ +git checkout ${branch1} +cd - +echo "ds ${branch1} $log_path" +deepspeed --num_gpus $gpus gpt-bench.py --deepspeed -m "${model}" $params &> ${log_path}/ds-${branch1}.log + +cd ../../ +git checkout ${branch2} +cd - +echo "ds ${branch2} $log_path" +deepspeed --num_gpus $gpus gpt-bench.py --deepspeed -m "${model}" $params&> ${log_path}/ds-${branch2}.log diff --git a/benchmarks/inference/sweep.sh b/benchmarks/inference/sweep.sh new file mode 100644 index 0000000..aabcb0b --- /dev/null +++ b/benchmarks/inference/sweep.sh @@ -0,0 +1,41 @@ +set -x + +export TRANSFORMERS_CACHE=/tmp/hf-cache + +branch1=$1 +branch2=$2 + +gptneo_models="EleutherAI/gpt-neo-2.7B EleutherAI/gpt-neo-1.3B EleutherAI/gpt-neo-125M" +gpt2_models="gpt2 gpt2-large gpt2-xl" +gptj_models="EleutherAI/gpt-j-6B" +opt_models="facebook/opt-125m facebook/opt-1.3b facebook/opt-2.7b facebook/opt-6.7b facebook/opt-13b" +bloom_models="bigscience/bloom-560m bigscience/bloom-1b7 bigscience/bloom-3b bigscience/bloom-7b1" + +for gpus in `echo "1 2 4 8"`; do + for dtype in `echo "fp16 fp32"`; do + for graphs in `echo "true false"`; do + for kernel in `echo "true false"`; do + params="$dtype $graphs $kernel $gpus" + for m in `echo "$gptneo_models"`; do + bash run_model.sh $m $branch1 $branch2 $params + done + + for m in `echo "$gpt2_models"`; do + bash run_model.sh $m $branch1 $branch2 $params + done + + for m in `echo "$gptj_models"`; do + bash run_model.sh $m $branch1 $branch2 $params + done + + for m in `echo "$opt_models"`; do + bash run_model.sh $m $branch1 $branch2 $params + done + + for m in `echo "$bloom_models"`; do + bash run_model.sh $m $branch1 $branch2 $params + done + done + done + done +done diff --git a/bin/deepspeed b/bin/deepspeed deleted file mode 100644 index 5ec8820..0000000 --- a/bin/deepspeed +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python3 - -from deepspeed.launcher.runner import main - -if __name__ == '__main__': - main() diff --git a/bin/deepspeed b/bin/deepspeed new file mode 120000 index 0000000..6b76856 --- /dev/null +++ b/bin/deepspeed @@ -0,0 +1 @@ +ds \ No newline at end of file diff --git a/bin/deepspeed.pt b/bin/deepspeed.pt deleted file mode 100644 index 5ec8820..0000000 --- a/bin/deepspeed.pt +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python3 - -from deepspeed.launcher.runner import main - -if __name__ == '__main__': - main() diff --git a/bin/deepspeed.pt b/bin/deepspeed.pt new file mode 120000 index 0000000..6b76856 --- /dev/null +++ b/bin/deepspeed.pt @@ -0,0 +1 @@ +ds \ No newline at end of file diff --git a/bin/ds b/bin/ds old mode 100644 new mode 100755 diff --git a/bin/ds_bench b/bin/ds_bench new file mode 100755 index 0000000..bfacbc8 --- /dev/null +++ b/bin/ds_bench @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +from benchmarks.communication.run_all import main +from benchmarks.communication.constants import * +from benchmarks.communication.utils import * +import os +import sys + +# Run the same file with deepspeed launcher. This is required since setuptools will auto-detect python files and insert a python shebang for both 'scripts' and 'entry_points', and this benchmarks require the DS launcher +required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] +if not all(map(lambda v: v in os.environ, required_env)): + import subprocess + subprocess.run("deepspeed $(which ds_bench) " + " ".join(sys.argv[1:]), shell=True) +else: + args = benchmark_parser().parse_args() + rank = args.local_rank + main(args, rank) diff --git a/bin/ds_elastic b/bin/ds_elastic old mode 100644 new mode 100755 diff --git a/bin/ds_report b/bin/ds_report old mode 100644 new mode 100755 diff --git a/bin/ds_ssh b/bin/ds_ssh old mode 100644 new mode 100755 diff --git a/bin/dsr b/bin/dsr new file mode 120000 index 0000000..747bf47 --- /dev/null +++ b/bin/dsr @@ -0,0 +1 @@ +ds_report \ No newline at end of file diff --git a/build_win.bat b/build_win.bat new file mode 100644 index 0000000..ec8c8a3 --- /dev/null +++ b/build_win.bat @@ -0,0 +1,19 @@ +@echo off + +set DS_BUILD_AIO=0 +set DS_BUILD_SPARSE_ATTN=0 + +echo Administrative permissions required. Detecting permissions... + +net session >nul 2>&1 +if %errorLevel% == 0 ( + echo Success: Administrative permissions confirmed. +) else ( + echo Failure: Current permissions inadequate. + goto end +) + + +python setup.py bdist_wheel + +:end diff --git a/csrc/adagrad/cpu_adagrad.cpp b/csrc/adagrad/cpu_adagrad.cpp index 4f2a9b6..9f8f95c 100644 --- a/csrc/adagrad/cpu_adagrad.cpp +++ b/csrc/adagrad/cpu_adagrad.cpp @@ -1,16 +1,21 @@ +#ifdef __HIPCC__ +#include "cpu_adagrad_hip.h" +#else #include "cpu_adagrad.h" -#include -#include -#include +#endif + #include #include #include #include #include +#if defined(__ENABLE_CUDA__) +#include #include "cublas_v2.h" #include "cuda.h" #include "curand.h" #include "custom_cuda_layers.h" +#endif static std::unordered_map> s_optimizers; @@ -20,7 +25,7 @@ void Adagrad_Optimizer::Step_1(float* _params, float* grads, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -30,17 +35,19 @@ void Adagrad_Optimizer::Step_1(float* _params, #endif if (_param_size > rounded_size) { float step_size = -1 * _alpha; - __half* grads_cast_h; - __half* params_cast_h; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); } for (size_t t = rounded_size; t < _param_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > _param_size) copy_size = _param_size - t; size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#endif #pragma omp parallel for for (size_t k = t; k < offset; k++) { float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; @@ -55,21 +62,24 @@ void Adagrad_Optimizer::Step_1(float* _params, grad += _eps; grad = momentum / grad; param = grad * step_size + param; +#if defined(__ENABLE_CUDA__) if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - +#endif if (half_precision) - params_cast_h[k] = (__half)param; + params_cast_h[k] = (ds_half_precision_t)param; else _params[k] = param; // STORE UPDATE TERM TO GRAD'S MEMORY grads[k] = grad * step_size; _exp_avg_sq[k] = variance; } +#if defined(__ENABLE_CUDA__) if (dev_params) { launch_param_update( _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); _buf_index = !_buf_index; } +#endif } } } @@ -78,7 +88,7 @@ void Adagrad_Optimizer::Step_4(float* _params, float* grads, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -130,7 +140,7 @@ void Adagrad_Optimizer::Step_8(float* _params, float* grads, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -170,7 +180,9 @@ int ds_adagrad_step(int optimizer_id, opt->update_state(lr, epsilon, weight_decay); opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.size(0)); +#if defined(__ENABLE_CUDA__) opt->SynchronizeStreams(); +#endif return 0; } @@ -184,6 +196,7 @@ int ds_adagrad_step_plus_copy(int optimizer_id, torch::Tensor& exp_avg_sq, torch::Tensor& gpu_params) { +#if defined(__ENABLE_CUDA__) auto params_c = params.contiguous(); auto gpu_params_c = gpu_params.contiguous(); auto exp_avg_sq_c = exp_avg_sq.contiguous(); @@ -191,7 +204,7 @@ int ds_adagrad_step_plus_copy(int optimizer_id, float* params_ptr = (float*)params_c.data_ptr(); float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); + ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); std::shared_ptr opt = @@ -206,6 +219,9 @@ int ds_adagrad_step_plus_copy(int optimizer_id, (params.options().dtype() == at::kHalf)); opt->SynchronizeStreams(); +#else + assert(false); +#endif return 0; } diff --git a/csrc/adam/cpu_adam.cpp b/csrc/adam/cpu_adam.cpp index 727eec8..f17f225 100644 --- a/csrc/adam/cpu_adam.cpp +++ b/csrc/adam/cpu_adam.cpp @@ -1,16 +1,18 @@ #include "cpu_adam.h" -#include -#include -#include #include +#include #include #include #include #include + +#if defined(__ENABLE_CUDA__) +#include #include "cublas_v2.h" #include "cuda.h" #include "curand.h" #include "custom_cuda_layers.h" +#endif static std::unordered_map> s_optimizers; @@ -21,7 +23,7 @@ void Adam_Optimizer::Step_1(float* _params, float* _exp_avg, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -41,19 +43,20 @@ void Adam_Optimizer::Step_1(float* _params, float step_size = -1 * _alpha / _bias_correction1; float w_decay = -1 * _alpha * _weight_decay; - __half* grads_cast_h; - __half* params_cast_h; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); } for (size_t t = rounded_size; t < _param_size; t += TILE) { size_t copy_size = TILE; if ((t + TILE) > _param_size) copy_size = _param_size - t; size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } - +#endif #pragma omp parallel for for (size_t k = t; k < offset; k++) { float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; @@ -73,21 +76,24 @@ void Adam_Optimizer::Step_1(float* _params, grad = momentum / grad; if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } param = grad * step_size + param; +#if defined(__ENABLE_CUDA__) if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - +#endif if (half_precision) - params_cast_h[k] = (__half)param; + params_cast_h[k] = (ds_half_precision_t)param; else _params[k] = param; _exp_avg[k] = momentum; _exp_avg_sq[k] = variance; } +#if defined(__ENABLE_CUDA__) if (dev_params) { launch_param_update( _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); _buf_index = !_buf_index; } +#endif } } } @@ -97,7 +103,7 @@ void Adam_Optimizer::Step_4(float* _params, float* _exp_avg, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -166,7 +172,7 @@ void Adam_Optimizer::Step_8(float* _params, float* _exp_avg, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t rounded_size = 0; @@ -228,7 +234,9 @@ int ds_adam_step(int optimizer_id, nullptr, (params.options().dtype() == at::kHalf)); +#if defined(__ENABLE_CUDA__) opt->SynchronizeStreams(); +#endif return 0; } @@ -246,6 +254,7 @@ int ds_adam_step_plus_copy(int optimizer_id, torch::Tensor& exp_avg_sq, torch::Tensor& gpu_params) { +#if defined(__ENABLE_CUDA__) auto params_c = params.contiguous(); auto gpu_params_c = gpu_params.contiguous(); auto exp_avg_c = exp_avg.contiguous(); @@ -254,7 +263,7 @@ int ds_adam_step_plus_copy(int optimizer_id, float* params_ptr = (float*)params_c.data_ptr(); float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); + ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); @@ -271,6 +280,9 @@ int ds_adam_step_plus_copy(int optimizer_id, (params.options().dtype() == at::kHalf)); opt->SynchronizeStreams(); +#else + assert(false); +#endif return 0; } diff --git a/csrc/adam/multi_tensor_adam.cu b/csrc/adam/multi_tensor_adam.cu index 3cb9763..611d9ff 100644 --- a/csrc/adam/multi_tensor_adam.cu +++ b/csrc/adam/multi_tensor_adam.cu @@ -12,7 +12,11 @@ #include +#ifdef __HIPCC__ +#include "multi_tensor_apply_hip.cuh" +#else #include "multi_tensor_apply.cuh" +#endif #include "type_shim.h" #define BLOCK_SIZE 512 diff --git a/csrc/aio/common/deepspeed_aio_utils.cpp b/csrc/aio/common/deepspeed_aio_utils.cpp index 200c703..e8bf9de 100644 --- a/csrc/aio/common/deepspeed_aio_utils.cpp +++ b/csrc/aio/common/deepspeed_aio_utils.cpp @@ -6,6 +6,7 @@ Functionality for swapping optimizer tensors to/from (NVMe) storage devices. */ #include +#include #include "deepspeed_aio_utils.h" @@ -113,8 +114,8 @@ void* ds_page_aligned_alloc(const size_t size, const bool lock) auto mlock_ret = mlock(ptr, size); if (mlock_ret != 0) { auto mlock_error = errno; - printf("mlock failed with %d %s\n", mlock_error, strerror(mlock_error)); - + std::cerr << "mlock failed to allocate " << size << " bytes with error no " << mlock_error + << " msg " << strerror(mlock_error) << std::endl; free(ptr); return nullptr; } diff --git a/csrc/aio/py_lib/deepspeed_pin_tensor.cpp b/csrc/aio/py_lib/deepspeed_pin_tensor.cpp new file mode 100644 index 0000000..20bdf5b --- /dev/null +++ b/csrc/aio/py_lib/deepspeed_pin_tensor.cpp @@ -0,0 +1,43 @@ +/* +Copyright 2023 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for managing CPU tensors occupying page-locked memory. +*/ + +#include "deepspeed_pin_tensor.h" + +using namespace std; + +deepspeed_pin_tensor_t::~deepspeed_pin_tensor_t() +{ + for (auto iter = _locked_tensors.begin(); iter != _locked_tensors.end(); ++iter) { + munlock(iter->first, iter->second); + } + _locked_tensors.clear(); +} + +torch::Tensor deepspeed_pin_tensor_t::alloc(const size_t num_elem, const at::ScalarType& elem_type) +{ + const auto num_bytes = num_elem * elementSize(elem_type); + auto pinned_buffer = ds_page_aligned_alloc(num_bytes, true); + assert(nullptr != pinned_buffer); + + _locked_tensors[pinned_buffer] = num_bytes; + + auto options = torch::TensorOptions().dtype(elem_type).device(torch::kCPU); + + return at::from_blob(pinned_buffer, static_cast(num_bytes), options); +} + +bool deepspeed_pin_tensor_t::free(torch::Tensor& locked_tensor) +{ + auto addr = locked_tensor.data_ptr(); + if (_locked_tensors.find(addr) != _locked_tensors.end()) { + munlock(addr, _locked_tensors[addr]); + _locked_tensors.erase(addr); + return true; + } + + return false; +} diff --git a/csrc/aio/py_lib/deepspeed_pin_tensor.h b/csrc/aio/py_lib/deepspeed_pin_tensor.h new file mode 100644 index 0000000..a421bbc --- /dev/null +++ b/csrc/aio/py_lib/deepspeed_pin_tensor.h @@ -0,0 +1,24 @@ +/* +Copyright 2023 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for managing CPU tensors occupying page-locked memory. +TODO: Implement a full-featured manager that + 1. Avoid page-locked memory leaks + 2. Minimize page-locked memory usage by reducing internal fragmentation +*/ + +#include +#include "deepspeed_py_aio.h" + +struct deepspeed_pin_tensor_t { + std::map _locked_tensors; + + deepspeed_pin_tensor_t() = default; + + ~deepspeed_pin_tensor_t(); + + torch::Tensor alloc(const size_t num_elem, const at::ScalarType& elem_type); + + bool free(torch::Tensor& locked_tensor); +}; diff --git a/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp b/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp index 417319f..cb81924 100644 --- a/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp +++ b/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp @@ -22,7 +22,8 @@ deepspeed_aio_handle_t::deepspeed_aio_handle_t(const int block_size, _overlap_events(overlap_events), _num_threads(num_threads), _aio_config(block_size, queue_depth, single_submit, overlap_events, false), - _num_pending_ops(0) + _num_pending_ops(0), + _pinned_tensor_mgr(new deepspeed_pin_tensor_t()) { for (auto i = 0; i < num_threads; ++i) { _thread_contexts.push_back(std::make_shared(i, _aio_config)); @@ -280,3 +281,14 @@ int deepspeed_aio_handle_t::async_pwrite(const torch::Tensor& buffer, const char { return pwrite(buffer, filename, false, true); } + +at::Tensor deepspeed_aio_handle_t::new_cpu_locked_tensor(const size_t num_elem, + const torch::Tensor& example_tensor) +{ + return _pinned_tensor_mgr->alloc(num_elem, example_tensor.scalar_type()); +} + +bool deepspeed_aio_handle_t::free_cpu_locked_tensor(torch::Tensor& locked_tensor) +{ + return _pinned_tensor_mgr->free(locked_tensor); +} diff --git a/csrc/aio/py_lib/deepspeed_py_aio_handle.h b/csrc/aio/py_lib/deepspeed_py_aio_handle.h index 22de4c3..2163aaf 100644 --- a/csrc/aio/py_lib/deepspeed_py_aio_handle.h +++ b/csrc/aio/py_lib/deepspeed_py_aio_handle.h @@ -8,6 +8,7 @@ Functionality for swapping optimizer tensors to/from (NVMe) storage devices. #include #include #include "deepspeed_aio_thread.h" +#include "deepspeed_pin_tensor.h" struct deepspeed_aio_handle_t { std::unique_ptr _aio_ctxt; @@ -19,6 +20,7 @@ struct deepspeed_aio_handle_t { std::vector> _thread_contexts; std::vector _threads; int _num_pending_ops; + std::unique_ptr _pinned_tensor_mgr; deepspeed_aio_handle_t(const int block_size, const int queue_depth, @@ -56,6 +58,11 @@ struct deepspeed_aio_handle_t { int async_pwrite(const torch::Tensor& buffer, const char* filename); + // TODO: Make API's args to be shape and dtype. + torch::Tensor new_cpu_locked_tensor(const size_t num_elem, const torch::Tensor& example_tensor); + + bool free_cpu_locked_tensor(torch::Tensor&); + int wait(); void _stop_threads(); diff --git a/csrc/aio/py_lib/py_ds_aio.cpp b/csrc/aio/py_lib/py_ds_aio.cpp old mode 100644 new mode 100755 index 6859058..3c971c6 --- a/csrc/aio/py_lib/py_ds_aio.cpp +++ b/csrc/aio/py_lib/py_ds_aio.cpp @@ -37,5 +37,8 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) .def("async_pread", &deepspeed_aio_handle_t::async_pread) .def("async_pwrite", &deepspeed_aio_handle_t::async_pwrite) + .def("new_cpu_locked_tensor", &deepspeed_aio_handle_t::new_cpu_locked_tensor) + .def("free_cpu_locked_tensor", &deepspeed_aio_handle_t::free_cpu_locked_tensor) + .def("wait", &deepspeed_aio_handle_t::wait); } diff --git a/csrc/aio/py_test/aio_bench_perf_sweep.py b/csrc/aio/py_test/aio_bench_perf_sweep.py index be6cd74..eebea69 100644 --- a/csrc/aio/py_test/aio_bench_perf_sweep.py +++ b/csrc/aio/py_test/aio_bench_perf_sweep.py @@ -15,6 +15,7 @@ import shutil from test_ds_aio_utils import refine_integer_value from perf_sweep_utils import READ_OP_DESC, WRITE_OP_DESC, BENCH_LOG_DIR, \ READ_IO_DIR, WRITE_IO_DIR, READ_LOG_DIR, WRITE_LOG_DIR +from deepspeed.ops.op_builder import AsyncIOBuilder OTHER_OPTIONS = '--handle' PERF_SCRIPT = 'test_ds_aio.py' @@ -277,8 +278,6 @@ def script_path(): def async_io_setup(): - import deepspeed - from deepspeed.ops.aio import AsyncIOBuilder return AsyncIOBuilder().is_compatible() diff --git a/csrc/aio/py_test/ds_aio_basic.py b/csrc/aio/py_test/ds_aio_basic.py old mode 100644 new mode 100755 index cf70b66..d7f034a --- a/csrc/aio/py_test/ds_aio_basic.py +++ b/csrc/aio/py_test/ds_aio_basic.py @@ -8,9 +8,10 @@ Functionality of swapping optimizer tensors to/from (NVMe) storage devices. import torch import os import time -from deepspeed.ops.aio import AsyncIOBuilder from multiprocessing import Pool, Barrier from test_ds_aio_utils import report_results, task_log, task_barrier +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import AsyncIOBuilder def pre_basic(args, tid, read_op): @@ -19,7 +20,10 @@ def pre_basic(args, tid, read_op): file = args.read_file if read_op else f'{args.write_file}.{tid}' task_log(tid, f'Allocate tensor of size {num_bytes} bytes') - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cpu').pin_memory() + buffer = get_accelerator().pin_memory( + torch.empty(num_bytes, + dtype=torch.uint8, + device='cpu')) task_log( tid, f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}' diff --git a/csrc/aio/py_test/ds_aio_handle.py b/csrc/aio/py_test/ds_aio_handle.py old mode 100644 new mode 100755 index 947ee2e..7f0e447 --- a/csrc/aio/py_test/ds_aio_handle.py +++ b/csrc/aio/py_test/ds_aio_handle.py @@ -9,8 +9,9 @@ import torch import os import time from multiprocessing import Pool, Barrier -from deepspeed.ops.aio import AsyncIOBuilder from test_ds_aio_utils import report_results, task_log, task_barrier +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import AsyncIOBuilder def pre_handle(args, tid, read_op): @@ -18,23 +19,30 @@ def pre_handle(args, tid, read_op): num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size file = args.read_file if read_op else f'{args.write_file}.{tid}' - task_log(tid, f'Allocate tensor of size {num_bytes} bytes') - if args.gpu: - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cuda') - else: - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cpu').pin_memory() - task_log( - tid, - f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}' - ) - io_parallel = args.io_parallel if args.io_parallel else 1 handle = AsyncIOBuilder().load().aio_handle(args.block_size, args.queue_depth, args.single_submit, args.overlap_events, io_parallel) - task_log(tid, f'created deepspeed aio handle') + task_log(tid, f'Created deepspeed aio handle') + + if args.gpu: + buffer = torch.empty(num_bytes, + dtype=torch.uint8, + device=get_accelerator().device_name()) + else: + if args.use_accelerator_pin_memory: + buffer = get_accelerator().pin_memory( + torch.empty(num_bytes, + dtype=torch.uint8, + device='cpu')) + else: + buffer = handle.new_cpu_locked_tensor(num_bytes, + torch.empty(0, + dtype=torch.uint8)) + + task_log(tid, f'Allocate tensor of size {num_bytes} bytes') ctxt = {} ctxt['file'] = file @@ -43,6 +51,11 @@ def pre_handle(args, tid, read_op): ctxt['buffer'] = buffer ctxt['elapsed_sec'] = 0 + task_log( + tid, + f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}' + ) + return ctxt diff --git a/csrc/aio/py_test/parse_aio_stats.py b/csrc/aio/py_test/parse_aio_stats.py old mode 100644 new mode 100755 index 1921973..2a3e649 --- a/csrc/aio/py_test/parse_aio_stats.py +++ b/csrc/aio/py_test/parse_aio_stats.py @@ -7,7 +7,6 @@ Functionality of swapping optimizer tensors to/from (NVMe) storage devices. import os import argparse -import re READ_SPEED = 'read_speed' WRITE_SPEED = 'write_speed' diff --git a/csrc/aio/py_test/perf_sweep_utils.py b/csrc/aio/py_test/perf_sweep_utils.py index 2fd1a4c..78dd93b 100644 --- a/csrc/aio/py_test/perf_sweep_utils.py +++ b/csrc/aio/py_test/perf_sweep_utils.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + SCRIPT_PREFIX = '_aio_bench' WRITE_OP_DESC = 'write' READ_OP_DESC = 'read' diff --git a/csrc/aio/py_test/run_read_sweep.sh b/csrc/aio/py_test/run_read_sweep.sh old mode 100644 new mode 100755 diff --git a/csrc/aio/py_test/run_write_sweep.sh b/csrc/aio/py_test/run_write_sweep.sh old mode 100644 new mode 100755 diff --git a/csrc/aio/py_test/test_ds_aio.py b/csrc/aio/py_test/test_ds_aio.py old mode 100644 new mode 100755 index f97d3e6..7cb737d --- a/csrc/aio/py_test/test_ds_aio.py +++ b/csrc/aio/py_test/test_ds_aio.py @@ -6,11 +6,7 @@ Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ import os -import torch import argparse -import time -import sys -from multiprocessing import Pool import multiprocessing as mp from ds_aio_basic import aio_basic_multiprocessing from ds_aio_handle import aio_handle_multiprocessing @@ -67,6 +63,10 @@ def parse_arguments(): parser.add_argument('--gpu', action='store_true', help='Use GPU memory') + parser.add_argument('--use_accelerator_pin_memory', + action='store_true', + help='Obtain pinned (CPU page-locked) tensors from accelerator') + args = parser.parse_args() print(f'args = {args}') return args diff --git a/csrc/aio/py_test/test_ds_aio_utils.py b/csrc/aio/py_test/test_ds_aio_utils.py old mode 100644 new mode 100755 index c68dfdd..a330e4c --- a/csrc/aio/py_test/test_ds_aio_utils.py +++ b/csrc/aio/py_test/test_ds_aio_utils.py @@ -5,8 +5,6 @@ Licensed under the MIT license. Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ -import os - BYTES_PER_GB = 1024**3 LOG_TIDS = [0] diff --git a/csrc/aio/py_test/validate_async_io.py b/csrc/aio/py_test/validate_async_io.py index ceae84c..3b96c3c 100644 --- a/csrc/aio/py_test/validate_async_io.py +++ b/csrc/aio/py_test/validate_async_io.py @@ -4,6 +4,5 @@ Licensed under the MIT license. Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ -import deepspeed -from deepspeed.ops.aio import AsyncIOBuilder +from deepspeed.ops.op_builder import AsyncIOBuilder assert AsyncIOBuilder().is_compatible() diff --git a/csrc/common/custom_cuda_kernel.cu b/csrc/common/custom_cuda_kernel.cu index f7a2b5d..4c56a25 100644 --- a/csrc/common/custom_cuda_kernel.cu +++ b/csrc/common/custom_cuda_kernel.cu @@ -1,5 +1,11 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ +#ifdef __HIPCC__ +#include "custom_hip_layers.h" +#else #include "custom_cuda_layers.h" - +#endif __global__ void param_update_kernel(const float* input, __half* output, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; diff --git a/csrc/includes/StopWatch.h b/csrc/includes/StopWatch.h index 9bf0401..a557604 100644 --- a/csrc/includes/StopWatch.h +++ b/csrc/includes/StopWatch.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #ifdef _WIN32 #include diff --git a/csrc/includes/Timer.h b/csrc/includes/Timer.h index efc7fff..d325cae 100644 --- a/csrc/includes/Timer.h +++ b/csrc/includes/Timer.h @@ -1,3 +1,6 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ #ifndef __TIMER_H__ #define __TIMER_H__ diff --git a/csrc/includes/context.h b/csrc/includes/context.h index 5f04241..a8968ba 100644 --- a/csrc/includes/context.h +++ b/csrc/includes/context.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/conversion_utils.h b/csrc/includes/conversion_utils.h new file mode 100644 index 0000000..f033e35 --- /dev/null +++ b/csrc/includes/conversion_utils.h @@ -0,0 +1,626 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include "ds_kernel_utils.h" + +#include +#include + +#ifdef BF16_AVAILABLE +#include +#endif + +namespace conversion { + +// Basic primitive for constructing conversions +template +DS_D_INLINE TO to(FROM val) +{ + return to(val); +} + +// Specializations + +/********************* Identity Conversions *********************/ +/* +Identity conversions are useful in templated functions where we might have +a fixed destination type. For example, I might have a kernel that accepts +__half, __nv_bfloat16, and float but always want to do the core computation +at floating point: + +T mem_value = input[idx]; +float compute_value = conversion::to(mem_value); + +In practice, we should be able to elide the second template parameter: +float compute_val = conversion::to(mem_value); + +In this case, we need an implementation to handle the T = float case + +NOTE: The type inferencing system appears to be unable to handle inferring the first +template parameter, even in the trivial case. +*/ + +// Floating point types +template <> +DS_D_INLINE double to(double val) +{ + return val; +} +template <> +DS_D_INLINE float to(float val) +{ + return val; +} +template <> +DS_D_INLINE __half to(__half val) +{ + return val; +} +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val) +{ + return val; +} +#endif + +// Integer types +template <> +DS_D_INLINE int8_t to(int8_t val) +{ + return val; +} +template <> +DS_D_INLINE uint8_t to(uint8_t val) +{ + return val; +} +template <> +DS_D_INLINE int16_t to(int16_t val) +{ + return val; +} +template <> +DS_D_INLINE uint16_t to(uint16_t val) +{ + return val; +} +template <> +DS_D_INLINE int32_t to(int32_t val) +{ + return val; +} +template <> +DS_D_INLINE uint32_t to(uint32_t val) +{ + return val; +} +template <> +DS_D_INLINE int64_t to(int64_t val) +{ + return val; +} +template <> +DS_D_INLINE uint64_t to(uint64_t val) +{ + return val; +} + +// TODO: evaluate if we want bools + +/********************* To Double Conversions *********************/ + +// * to double variants + +// Would normally like to not use C cast, but this is an important enough conversion +// to keep +template <> +DS_D_INLINE double to(float val) +{ +#ifdef PTX_AVAILABLE + double ret_val; + asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val)); + return ret_val; +#else + return double(val); +#endif +} +// Note: there is a CVT instruction for __half -> double, but there's no inline interface +// for passing a single half value +template <> +DS_D_INLINE double to(__half val) +{ + return to(__half2float(val)); +} +template <> +DS_D_INLINE double to(int64_t val) +{ + return __ll2double_rn(val); +} +template <> +DS_D_INLINE double to(int32_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(int16_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(int8_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(uint64_t val) +{ + return __ull2double_rn(val); +} +template <> +DS_D_INLINE double to(uint32_t val) +{ + return __uint2double_rn(val); +} +template <> +DS_D_INLINE double to(uint16_t val) +{ + return __uint2double_rn(val); +} +template <> +DS_D_INLINE double to(uint8_t val) +{ + return __uint2double_rn(val); +} + +// Same applies here +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE double to(__nv_bfloat16 val) +{ + return to(__bfloat162float(val)); +} +#endif + +/********************* To Float Conversions *********************/ + +template <> +DS_D_INLINE float to(double val) +{ + return __double2float_rn(val); +} +template <> +DS_D_INLINE float to(__half val) +{ + return __half2float(val); +} +template <> +DS_D_INLINE float to(int64_t val) +{ + return __ll2float_rn(val); +} +template <> +DS_D_INLINE float to(int32_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(int16_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(int8_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(uint64_t val) +{ + return __ull2float_rn(val); +} +template <> +DS_D_INLINE float to(uint32_t val) +{ + return __uint2float_rn(val); +} +template <> +DS_D_INLINE float to(uint16_t val) +{ + return __uint2float_rn(val); +} +template <> +DS_D_INLINE float to(uint8_t val) +{ + return __uint2float_rn(val); +} + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE float to(__nv_bfloat16 val) +{ + return __bfloat162float(val); +} +#endif + +/********************* To Float2 Conversions *********************/ +template <> +DS_D_INLINE float2 to(__half2 val) +{ + return __half22float2(val); +} + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE float2 to(__nv_bfloat162 val) +{ + return __bfloat1622float2(val); +} +#endif + +/********************* To Half Conversions *********************/ +//aiss +//template <> +//DS_D_INLINE __half to(double val) +//{ +// return __double2half(val); +//} +template <> +DS_D_INLINE __half to(float val) +{ + return __float2half(val); +} +template <> +DS_D_INLINE __half to(int64_t val) +{ + return __ll2half_rn(val); +} +template <> +DS_D_INLINE __half to(int32_t val) +{ + return __int2half_rn(val); +} +template <> +DS_D_INLINE __half to(int16_t val) +{ + return __short2half_rn(val); +} +template <> +DS_D_INLINE __half to(int8_t val) +{ + return __int2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint64_t val) +{ + return __ull2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint32_t val) +{ + return __uint2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint16_t val) +{ + return __ushort2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint8_t val) +{ + return __uint2half_rn(val); +} + +#ifdef BF16_AVAILABLE +// No direct conversion +template <> +DS_D_INLINE __half to(__nv_bfloat16 val) +{ + return to<__half>(to(val)); +} +#endif + +/********************* To Half2 Conversions *********************/ +template <> +DS_D_INLINE __half2 to(float2 val) +{ + return __float22half2_rn(val); +} + +#ifdef BF16_AVAILABLE +// No direct conversion +template <> +DS_D_INLINE __half2 to(__nv_bfloat162 val) +{ + return to<__half2>(to(val)); +} +#endif + +/********************* To BF16 Conversions *********************/ +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat16 to(double val) +{ + return __double2bfloat16(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(float val) +{ + return __float2bfloat16(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int64_t val) +{ + return __ll2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int32_t val) +{ + return __int2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int16_t val) +{ + return __short2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int8_t val) +{ + return __int2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint64_t val) +{ + return __ull2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint32_t val) +{ + return __uint2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint16_t val) +{ + return __ushort2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint8_t val) +{ + return __uint2bfloat16_rn(val); +} +#endif + +/********************* To BF162 Conversions *********************/ +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat162 to(float2 val) +{ + return __float22bfloat162_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat162 to(__half2 val) +{ + return to<__nv_bfloat162>(to(val)); +} +#endif + +/********************* To INT64_T Conversions *********************/ +template <> +DS_D_INLINE int64_t to(double val) +{ + return __double2ll_rn(val); +} +template <> +DS_D_INLINE int64_t to(float val) +{ + return __float2ll_rn(val); +} +template <> +DS_D_INLINE int64_t to(__half val) +{ + return __half2ll_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int64_t to(__nv_bfloat16 val) +{ + return __bfloat162ll_rn(val); +} +#endif + +/********************* To INT32_T Conversions *********************/ +template <> +DS_D_INLINE int32_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int32_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int32_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int32_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To INT16_T Conversions *********************/ +template <> +DS_D_INLINE int16_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int16_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int16_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int16_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To INT8_T Conversions *********************/ +template <> +DS_D_INLINE int8_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int8_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int8_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int8_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To UINT64_T Conversions *********************/ +template <> +DS_D_INLINE uint64_t to(double val) +{ + return __double2ull_rn(val); +} +template <> +DS_D_INLINE uint64_t to(float val) +{ + return __float2ull_rn(val); +} +template <> +DS_D_INLINE uint64_t to(__half val) +{ + return __half2ull_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint64_t to(__nv_bfloat16 val) +{ + return __bfloat162ull_rn(val); +} +#endif + +/********************* To UINT32_T Conversions *********************/ +template <> +DS_D_INLINE uint32_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint32_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint32_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint32_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +/********************* To UINT16_T Conversions *********************/ +template <> +DS_D_INLINE uint16_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint16_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint16_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint16_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +/********************* To UINT8_T Conversions *********************/ +template <> +DS_D_INLINE uint8_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint8_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint8_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint8_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +} // namespace conversion diff --git a/csrc/includes/cpu_adagrad.h b/csrc/includes/cpu_adagrad.h index 6c21b7c..0dda4f7 100644 --- a/csrc/includes/cpu_adagrad.h +++ b/csrc/includes/cpu_adagrad.h @@ -1,39 +1,54 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #define NOMINMAX // Windows idiosyncrasy // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c -#include -#include #include #include +#include "simd.h" + +#if defined(__ENABLE_CUDA__) +#include +#include #include "cuda.h" #include "custom_cuda_layers.h" -#include "simd.h" +typedef __half ds_half_precision_t; +#else +typedef unsigned short ds_half_precision_t; +#endif -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ +#define STEP(SPAN) \ + void Step_##SPAN(float* _params, \ + float* grads, \ + float* _exp_avg_sq, \ + size_t _param_size, \ + ds_half_precision_t* dev_param = nullptr, \ bool half_precision = false); class Adagrad_Optimizer { public: Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) - : _alpha(alpha), _eps(eps), _weight_decay(weight_decay), _buf_index(false) + : _alpha(alpha), _eps(eps), _weight_decay(weight_decay) { +#if defined(__ENABLE_CUDA__) cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); _streams[0] = Context::Instance().GetCurrentStream(); _streams[1] = Context::Instance().GetNewStream(); + _buf_index = false; +#endif } ~Adagrad_Optimizer() { +#if defined(__ENABLE_CUDA__) cudaFreeHost(_doubled_buffer[0]); cudaFreeHost(_doubled_buffer[1]); +#endif } #if defined(__AVX512__) or defined(__AVX256__) template @@ -42,16 +57,18 @@ public: float* grads, float* _exp_avg_sq, size_t param_size, - __half* dev_param = nullptr, + ds_half_precision_t* dev_param = nullptr, bool half_precision = false); #endif STEP(1) STEP(4) STEP(8) +#if defined(__ENABLE_CUDA__) inline void SynchronizeStreams() { for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); } +#endif inline void IncrementStep(size_t step) { _step++; @@ -73,10 +90,11 @@ private: float _betta2_t; size_t _step; - float* _doubled_buffer[2]; +#if defined(__ENABLE_CUDA__) bool _buf_index; - + float* _doubled_buffer[2]; cudaStream_t _streams[2]; +#endif }; #if defined(__AVX512__) or defined(__AVX256__) @@ -86,7 +104,7 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, float* grads, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t new_rounded_size = 0; @@ -104,7 +122,9 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#endif #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; @@ -128,12 +148,14 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, simd_fma(param_4, grad_4, step_size_4, param_4); simd_store(_params + i, param_4, half_precision); +#if defined(__ENABLE_CUDA__) if (dev_params) { simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); } +#endif simd_store(_exp_avg_sq + i, variance_4, false); } - +#if defined(__ENABLE_CUDA__) if (dev_params) { if (half_precision) launch_param_update_half( @@ -144,6 +166,7 @@ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, _buf_index = !_buf_index; } +#endif } *rounded_size = new_rounded_size; } diff --git a/csrc/includes/cpu_adam.h b/csrc/includes/cpu_adam.h index 09677c6..e9e139a 100644 --- a/csrc/includes/cpu_adam.h +++ b/csrc/includes/cpu_adam.h @@ -1,23 +1,34 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #define NOMINMAX // Windows idiosyncrasy // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c -#include -#include #include #include +#include "simd.h" + +#if defined(__ENABLE_CUDA__) +#include +#include #include "cuda.h" #include "custom_cuda_layers.h" -#include "simd.h" +typedef __half ds_half_precision_t; +#else +#include +typedef unsigned short ds_half_precision_t; +#endif -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ +#define STEP(SPAN) \ + void Step_##SPAN(float* _params, \ + float* grads, \ + float* _exp_avg, \ + float* _exp_avg_sq, \ + size_t _param_size, \ + ds_half_precision_t* dev_param = nullptr, \ bool half_precision = false); class Adam_Optimizer { @@ -36,20 +47,25 @@ public: _betta1_t(1.0), _betta2_t(1.0), _step(0), - _buf_index(false), _adamw_mode(adamw_mode) { +#if defined(__ENABLE_CUDA__) cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); _streams[0] = Context::Instance().GetCurrentStream(); _streams[1] = Context::Instance().GetNewStream(); + _buf_index = false; +#endif } ~Adam_Optimizer() { +#if defined(__ENABLE_CUDA__) cudaFreeHost(_doubled_buffer[0]); cudaFreeHost(_doubled_buffer[1]); +#endif } + #if defined(__AVX512__) or defined(__AVX256__) template void Step_AVX(size_t* rounded_size, @@ -58,16 +74,18 @@ public: float* _exp_avg, float* _exp_avg_sq, size_t param_size, - __half* dev_param = nullptr, + ds_half_precision_t* dev_param = nullptr, bool half_precision = false); #endif STEP(1) STEP(4) STEP(8) +#if defined(__ENABLE_CUDA__) inline void SynchronizeStreams() { for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); } +#endif inline void IncrementStep(size_t step, float beta1, float beta2) { if (beta1 != _betta1 || beta2 != _betta2) { @@ -116,11 +134,13 @@ private: float _bias_correction1; float _bias_correction2; - float* _doubled_buffer[2]; - bool _buf_index; bool _adamw_mode; +#if defined(__ENABLE_CUDA__) + float* _doubled_buffer[2]; cudaStream_t _streams[2]; + bool _buf_index; +#endif }; #if defined(__AVX512__) or defined(__AVX256__) @@ -131,10 +151,11 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, float* _exp_avg, float* _exp_avg_sq, size_t _param_size, - __half* dev_params, + ds_half_precision_t* dev_params, bool half_precision) { size_t new_rounded_size = 0; + int rshft = half_precision ? 1 : 0; AVX_Data betta1_4; betta1_4.data = SIMD_SET(_betta1); @@ -167,11 +188,13 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, size_t copy_size = TILE; if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#endif #pragma omp parallel for for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); + simd_load(grad_4, grads + (i >> rshft), half_precision); AVX_Data momentum_4[span]; simd_load(momentum_4, _exp_avg + i, false); @@ -180,7 +203,7 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, simd_load(variance_4, _exp_avg_sq + i, false); AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); + simd_load(param_4, _params + (i >> rshft), half_precision); if (_weight_decay > 0 && !_adamw_mode) { simd_fma(grad_4, param_4, weight_decay4, grad_4); @@ -201,14 +224,16 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, simd_fma(param_4, grad_4, step_size_4, param_4); - simd_store(_params + i, param_4, half_precision); + simd_store(_params + (i >> rshft), param_4, half_precision); +#if defined(__ENABLE_CUDA__) if (dev_params) { simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); } +#endif simd_store(_exp_avg + i, momentum_4, false); simd_store(_exp_avg_sq + i, variance_4, false); } - +#if defined(__ENABLE_CUDA__) if (dev_params) { if (half_precision) launch_param_update_half( @@ -219,6 +244,7 @@ void Adam_Optimizer::Step_AVX(size_t* rounded_size, _buf_index = !_buf_index; } +#endif } *rounded_size = new_rounded_size; } diff --git a/csrc/includes/cublas_wrappers.h b/csrc/includes/cublas_wrappers.h index 9bb6cc3..68e7f48 100644 --- a/csrc/includes/cublas_wrappers.h +++ b/csrc/includes/cublas_wrappers.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/custom_cuda_layers.h b/csrc/includes/custom_cuda_layers.h index 30c633f..7ac0415 100644 --- a/csrc/includes/custom_cuda_layers.h +++ b/csrc/includes/custom_cuda_layers.h @@ -1,21 +1,17 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #pragma once +#include "ds_kernel_utils.h" + #include #include +#include #include #include -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif -#include - #include "context.h" #include "cublas_wrappers.h" @@ -45,30 +41,6 @@ #define WARP_SIZE_BITS 5 -template -void launch_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_sr_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_sr_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); // Fused bias add with gelu activation template void launch_bias_gelu(const T* input, @@ -301,3 +273,54 @@ void launch_fuse_transpose_bias_kernel(const T* inp, void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream); void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream); + +void launch_token_sort(int32_t* indices, + int layers, + int batch_size, + int reserved_size, + int original_tokens, + cudaStream_t stream); + +template +void launch_gather_tokens(T* retained_tokens, + T* activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream); + +template +void launch_scatter_tokens(T* all_activations, + T* layer_activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream); + +template +void launch_slice_gpt_mask(T* output_mask, + const T* input_mask, + int batch_size, + int truncated_seq_len, + int orig_seq_len, + cudaStream_t stream); + +template +void launch_slice_bert_mask(T* output_mask, + const T* input_mask, + const int32_t* retained_indices, + int32_t layers, + int32_t batch_size, + int32_t truncated_seq_len, + int32_t orig_seq_len, + cudaStream_t stream); diff --git a/csrc/includes/dequantization_utils.h b/csrc/includes/dequantization_utils.h new file mode 100644 index 0000000..fea7505 --- /dev/null +++ b/csrc/includes/dequantization_utils.h @@ -0,0 +1,176 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "quantization.h" +#include "quantization_utils.h" + +namespace cg = cooperative_groups; + +#pragma once + +namespace dequantize { +using Type = quantize::Type; + +template +using Params = quantize::Params; + +constexpr int granularity = quantize::granularity; +using PackedInt4 = quantize::PackedInt4; + +constexpr int h_per_chunk = granularity / sizeof(__half); +constexpr int h2_per_chunk = granularity / sizeof(__half2); + +/* +Device function that reads quantized data from global memory, dequantizes +it, and stores it to global memory. +Template Arguments : + numBits - Number of bits in quantized element. int: 4, 8 + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric + unroll - Number of load steps to internally unroll int + threads - Number of threads to perform dequant int +Function arguments: + global_output - __half pointer in global memory + data - Quantized data in global memory + global_params - Quantization parameters in global memory + elems_per_group - Number of elements in each quantization group + total_elems - Tensor size (note, does not need to be multiple of elems_per_group) +*/ +template +DS_D_INLINE void to_global(__half* global_output, + const int8_t* data, + const float* global_params, + const int elems_per_group, + const int total_elems); + +/* +Device function that quantizes 16 bytes of __half type input data. +Template Arguments : + numBits - Number of bits in quantized element. int : 8 or 4 + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric +Function Arguments : + local_output - Local array to store dequantized data __half* or __half2* + data - Pointer to quantized input data. int8_t* + Params - Parameters for quantization. Params +*/ +template +DS_D_INLINE void chunk(__half2* local_output, const int8_t* data, Params q_params); + +template +DS_D_INLINE void chunk(T* local_output, const int8_t* data, Params q_params); + +/**************** Implementations ******************/ + +template +DS_D_INLINE void chunk(T* local_output, const int8_t* data, Params q_params) +{ + constexpr int32_t num_elems_packed = 8 / numBits; + constexpr int32_t iters = h_per_chunk / num_elems_packed; + +#pragma unroll + for (int i = 0; i < iters; i++) { + if constexpr (num_elems_packed == 1) { + local_output[i] = q_params.template dequantize(data[i]); + } else { + auto accessible_data = *(PackedInt4*)(&data[i]); + local_output[2 * i] = q_params.template dequantize(accessible_data.low); + local_output[2 * i + 1] = q_params.template dequantize(accessible_data.high); + } + } +} + +template +DS_D_INLINE void chunk(__half2* local_output, const int8_t* data, Params q_params) +{ + __half* local_output_cast = reinterpret_cast<__half*>(local_output); + chunk<__half, numBits>(local_output_cast, data, q_params); +} + +template +DS_D_INLINE void _to_global(T* global_output, + const int8_t* data, + const float* global_params, + const int elems_per_group, + const int total_elems) +{ + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // Load constants + // TODO(cmikeh2): Refactor into functions? + constexpr int load_granularity = (granularity / (sizeof(T))) / (numBits == 8 ? 1 : 2); + constexpr int load_step_stride = load_granularity * threads; + constexpr int load_block_stride = load_step_stride * unroll; + + // Store constants + constexpr int T_per_chunk = granularity / sizeof(T); + constexpr int store_step_stride = T_per_chunk * threads; + constexpr int store_block_stride = store_step_stride * unroll; + + // Load offsets + const int load_block_offset = tb.group_index().x * load_block_stride; + // Note: we can use `load_granularity` since the dtype is `int8_t`. + const int load_thread_offset = tb.thread_index().x * load_granularity; + const int8_t* load_base = data + load_block_offset + load_thread_offset; + + // Store offsets + const int store_block_offset = tb.group_index().x * store_block_stride; + const int store_thread_offset = tb.thread_index().x * T_per_chunk; + const int elem_id_base = store_block_offset + store_thread_offset; + + int8_t local_load_buffer[load_granularity * unroll]; + T local_dequant_buffer[T_per_chunk * unroll]; + + /* + Note: Splitting this loop in half gave about 3-5% performance increase for reasons that aren't + totally clear to me, so this is a deliberately weird code structure. + */ +#pragma unroll + for (int i = 0; i < unroll; i++) { + const int elem_id_iter = elem_id_base + i * store_step_stride; + + if (elem_id_iter < total_elems) { + mem_access::load_global(local_load_buffer + i * load_granularity, + load_base + i * load_step_stride); + } + } + +#pragma unroll + for (int i = 0; i < unroll; i++) { + const int elem_id_iter = elem_id_base + i * store_step_stride; + if (elem_id_iter < total_elems) { + // TODO(cmikeh2): Can we amortize this division? Perform once on the first iteration and + // use indexing math to do division free interpolation of the successive groups? + const int group_index = elem_id_iter / elems_per_group; + Params q_params(global_params, group_index); + + chunk(local_dequant_buffer + i * T_per_chunk, + local_load_buffer + i * load_granularity, + q_params); + mem_access::store_global(global_output + elem_id_iter, + local_dequant_buffer + i * T_per_chunk); + } + } +} + +template +DS_D_INLINE void to_global(T* global_output, + const int8_t* data, + const float* global_params, + const int elems_per_group, + const int total_elems) +{ + if constexpr (numBits == 4 || numBits == 8) { + _to_global( + global_output, data, global_params, elems_per_group, total_elems); + } else if constexpr (numBits == 3) { + // TODO(cmikeh2): Need this implementation + assert(false); + } else { + assert(false); + } +} + +} // namespace dequantize diff --git a/csrc/includes/dropout.h b/csrc/includes/dropout.h index a72572d..2fdf224 100644 --- a/csrc/includes/dropout.h +++ b/csrc/includes/dropout.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/ds_kernel_utils.h b/csrc/includes/ds_kernel_utils.h new file mode 100644 index 0000000..1021066 --- /dev/null +++ b/csrc/includes/ds_kernel_utils.h @@ -0,0 +1,49 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team + +Centralized header file for preprocessor macros and constants +used throughout the codebase. +*/ + +#pragma once + +#include + +#define DS_HD_INLINE __host__ __device__ __forceinline__ +#define DS_D_INLINE __device__ __forceinline__ + +#ifdef __HIP_PLATFORM_HCC__ + +// constexpr variant of warpSize for templating +constexpr int hw_warp_size = 64; +#define HALF_PRECISION_AVAILABLE = 1 +#include + +#else // !__HIP_PLATFORM_HCC__ + +// constexpr variant of warpSize for templating +constexpr int hw_warp_size = 32; + +#if __CUDA_ARCH__ >= 530 +#define HALF_PRECISION_AVAILABLE = 1 +#define PTX_AVAILABLE +#endif // __CUDA_ARCH__ >= 530 + +#if __CUDA_ARCH__ >= 800 +#define ASYNC_COPY_AVAILABLE +#define BF16_AVAILABLE +#endif // __CUDA_ARCH__ >= 800 + +#include + +#endif //__HIP_PLATFORM_HCC__ + +inline int next_pow2(const int val) +{ + int rounded_val = val - 1; + rounded_val |= rounded_val >> 1; + rounded_val |= rounded_val >> 2; + rounded_val |= rounded_val >> 4; + rounded_val |= rounded_val >> 8; + return rounded_val + 1; +} diff --git a/csrc/includes/ds_transformer_cuda.h b/csrc/includes/ds_transformer_cuda.h old mode 100644 new mode 100755 index 09afeb9..d289a24 --- a/csrc/includes/ds_transformer_cuda.h +++ b/csrc/includes/ds_transformer_cuda.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/feed_forward.h b/csrc/includes/feed_forward.h index de7a9cf..0f2ece4 100644 --- a/csrc/includes/feed_forward.h +++ b/csrc/includes/feed_forward.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #ifndef __FEEDFORWARD_H__ #define __FEEDFORWARD_H__ diff --git a/csrc/includes/gelu.h b/csrc/includes/gelu.h index 560f414..07d4314 100644 --- a/csrc/includes/gelu.h +++ b/csrc/includes/gelu.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/gemm_test.h b/csrc/includes/gemm_test.h index 22c3512..7ddb8b1 100644 --- a/csrc/includes/gemm_test.h +++ b/csrc/includes/gemm_test.h @@ -1,3 +1,6 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ #pragma once diff --git a/csrc/includes/general_kernels.h b/csrc/includes/general_kernels.h index e949309..875df84 100644 --- a/csrc/includes/general_kernels.h +++ b/csrc/includes/general_kernels.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include #include #include diff --git a/csrc/includes/memory_access_utils.h b/csrc/includes/memory_access_utils.h new file mode 100644 index 0000000..e2cdcb6 --- /dev/null +++ b/csrc/includes/memory_access_utils.h @@ -0,0 +1,1114 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include +#include "ds_kernel_utils.h" + +/////////////////////////////// Memory Access Utils /////////////////////////////// +namespace mem_access { + +enum class LoadPolicy { + CacheAll, // Cache at all levels + CacheGlobal, // Cache at L2 only + CacheStreaming // Cache with evict first policy +}; + +enum class StorePolicy { + Writeback, // Cache in L1, write-back on eviction + CacheGlobal, // Bypass L1, write-back on eviction + CacheStreaming // Allocate cache line with evict first policy +}; + +template +__device__ __forceinline__ void load_global(void* dst, const void* src); + +template +__device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access); + +// Shared accesses have no cache policy +template +__device__ __forceinline__ void load_shared(void* dst, const void* src); + +template +__device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access); + +template +__device__ __forceinline__ void store_global(void* dst, const void* src); + +// Shared accesses have no cache policy +template +__device__ __forceinline__ void store_shared(void* dst, const void* src); + +#ifdef ASYNC_COPY_AVAILABLE +template +__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl); + +template +__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate); + +template +__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate); + +__device__ __forceinline__ void memcpy_async_fence(); + +template +__device__ __forceinline__ void memcpy_async_wait(); + +template +__device__ __forceinline__ void tail_complete_wait(int remaining_stages); +#endif + +// Util for tracking pipeline buffers +// TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE +template +class BufferTracker { +public: + int current_state; + + __device__ __forceinline__ BufferTracker() : current_state(0) {} + + __device__ __forceinline__ int get() + { + int return_val = current_state++; + current_state = (current_state == max ? 0 : current_state); + return return_val; + } +}; + +__device__ __forceinline__ uint32_t lane_id() +{ +#ifdef PTX_AVAILABLE + unsigned int lane_id; + asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id)); + return lane_id; +#else + return threadIdx.x & (warpSize - 1); // Portable +#endif +} + +/////////// Load Global /////////// +template <> +__device__ __forceinline__ void load_global<16>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.cg.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.cs.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2>(void* dst, const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.cg.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.cs.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +/////////// Load Shared /////////// +namespace internal { + +#ifdef PTX_AVAILABLE +__device__ __forceinline__ unsigned convert_to_shared(const void* ptr) +{ +#if __CUDACC_VER_MAJOR__ >= 11 + // In CUDA 11 we have a builtin intrinsic + return __cvta_generic_to_shared(ptr); +#else + unsigned ret_val; + asm volatile( + "{\n" + "\t.reg .u64 p1;\n" + "\tcvta.to.shared.u64 p1, %1\n" + "\tcvt.u32.u64 %0, p1;\n" + "}\n" + : "=r"(ret_val) + : "l"(ptr)); + return ret_val; +#endif +} +#endif + +} // namespace internal + +template <> +__device__ __forceinline__ void load_shared<16>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "r"(src_shr)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "r"(src_shr), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_shared<8>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "r"(src_shr)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "r"(src_shr), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_shared<4>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.shared.u32 %0, [%1];\n" + "}\n" + : "=r"(data[0]) + : "r"(src_shr), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +/////////// Store Global /////////// + +template <> +__device__ __forceinline__ void store_global<16>(void* dst, const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8>(void* dst, const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4>(void* dst, const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +/////////// Store Shared /////////// + +template <> +__device__ __forceinline__ void store_shared<16>(void* dst, const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_shared<8>(void* dst, const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n" + : + : "r"(dst_int), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_shared<4>(void* dst, const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +/////////// Asynchronous Memory Copy /////////// + +#ifdef ASYNC_COPY_AVAILABLE +template +__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n" + : + : "r"(shr_int), "l"(gbl), "n"(AccessSize)); +} + +template +__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.ca.shared.global [%1], [%2], %3;\n" + "}\n" + : + : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize)); +} + +template +__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (predicate ? AccessSize : 0); + + asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n" + : + : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy)); +} + +template +__device__ __forceinline__ void memcpy_async_zero_nop(void* shr, + const void* gbl, + bool zero_predicate, + bool nop_predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (zero_predicate ? AccessSize : 0); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n" + "}\n" + : + : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy)); +} + +// Cache global variants. Separate interface to require deliberate use of them. +__device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl) +{ + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl)); +} + +__device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], 16;\n" + "}\n" + : + : "r"((int)predicate), "r"(shr_int), "l"(gbl)); +} + +__device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (predicate ? 16 : 0); + + asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n" + : + : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy)); +} + +__device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr, + const void* gbl, + bool zero_predicate, + bool nop_predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (zero_predicate ? 16 : 0); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n" + "}\n" + : + : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy)); +} + +__device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); } + +template +__device__ __forceinline__ void memcpy_async_wait() +{ + static_assert(stages <= 8); + + asm volatile("cp.async.wait_group %0;\n" : : "n"(stages)); +} + +// TODO: The tail complete should be a known compile time artifact, should try and induce this +// without all of the branches from the call-site. This is a hacky solution. +template <> +__device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages) +{ + if (remaining_stages == 0) memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages) +{ + if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages) +{ + if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages) +{ + if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages) +{ + if (remaining_stages == 4) + memcpy_async_wait<4>(); + else if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages) +{ + if (remaining_stages == 5) + memcpy_async_wait<5>(); + else if (remaining_stages == 4) + memcpy_async_wait<4>(); + else if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} +#endif + +} // namespace mem_access diff --git a/csrc/includes/normalize_layer.h b/csrc/includes/normalize_layer.h index b4d135e..0921293 100644 --- a/csrc/includes/normalize_layer.h +++ b/csrc/includes/normalize_layer.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/quantization.h b/csrc/includes/quantization.h new file mode 100644 index 0000000..8986774 --- /dev/null +++ b/csrc/includes/quantization.h @@ -0,0 +1,65 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include +#include "ds_kernel_utils.h" + +namespace quantize { + +enum class Type { Symmetric, Asymmetric }; + +struct PackedInt4 { + int8_t high : 4; + int8_t low : 4; +}; + +DS_HD_INLINE bool requires_offset(Type qType) { return qType == Type::Asymmetric; } + +} // namespace quantize + +void launch_quant(int8_t* output_data, + float* params, + const __half* input_data, + const int groups, + const int elems_per_group, + const int num_bits, + const quantize::Type quant_type, + cudaStream_t stream); + +template +void launch_dequantize_kernel(T* dequant_data, + const int8_t* q_data, + const float* q_params, + quantize::Type q_type, + int num_bits, + int elems_per_group, + int total_elems, + cudaStream_t stream); + +template +void launch_fake_quantize_kernel(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template +void launch_sr_fake_quantize_kernel(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template +void launch_fake_quantize_kernel_asym(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template +void launch_sr_fake_quantize_kernel_asym(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); diff --git a/csrc/includes/quantization_utils.h b/csrc/includes/quantization_utils.h new file mode 100644 index 0000000..8b14d1d --- /dev/null +++ b/csrc/includes/quantization_utils.h @@ -0,0 +1,467 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" +#include "quantization.h" +#include "reduction_utils.h" + +#pragma once + +using rop = reduce::ROpType; + +namespace quantize { +constexpr int granularity = 16; +constexpr int h_per_load = granularity / sizeof(__half); +constexpr int h2_per_load = granularity / sizeof(__half2); +constexpr int max_threads = 1024; + +/* +Class to hold the quantization parameters for a given tensor. +Holds the implementation of the quantization operation. +*/ +template +class Params { +public: + /* + Quantization implementation, supports + 1) 4 Bit + 2) 8 Bit + 3) Symmetric + 4) Asymmetric + Function Arguments : + val : The __half value to quantize. + */ + DS_D_INLINE int8_t quantize(__half val); + + template + DS_D_INLINE T dequantize(int8_t val); + + DS_D_INLINE void store(float* params, int group_index); + + // Initialize from memory + DS_D_INLINE Params(const float* params, int group_index); +}; + +template +class Params { +public: + float scale; + + DS_D_INLINE Params(float max) + { + if (max == 0) { + scale = 1.0; + } else { + scale = (1 << numBits) / (2 * max); + } + } + + DS_D_INLINE int8_t quantize(__half val) + { + constexpr int32_t q_min = -(1 << (numBits - 1)); + constexpr int32_t q_max = (1 << (numBits - 1)) - 1; + + float val_f = conversion::to(val) * scale; + int32_t data_i32 = conversion::to(val_f); + data_i32 = min(max(data_i32, q_min), q_max); + return (int8_t)data_i32; + } + + template + DS_D_INLINE T dequantize(int8_t val) + { + const float val_deq_f = conversion::to(val) * scale; + return conversion::to(val_deq_f); + } + + DS_D_INLINE void store(float* params, int group_index) + { + const float store_scale = 1 / scale; + mem_access::store_global(params + group_index, &store_scale); + } + + DS_D_INLINE Params(const float* params, int group_index) + { + mem_access::load_global(&scale, params + group_index); + } +}; + +template +class Params { +public: + float scale; + float offset; + + DS_D_INLINE Params(float max, float min) + { + if (max == min) { + scale = 1.0; + } else { + scale = (1 << numBits) / (max - min); + } + offset = -(1 << (numBits - 1)) - (min * scale); + } + + DS_D_INLINE int8_t quantize(__half val) + { + constexpr int32_t q_min = -(1 << (numBits - 1)); + constexpr int32_t q_max = (1 << (numBits - 1)) - 1; + + float val_f = conversion::to(val) * scale + offset; + int32_t data_i32 = conversion::to(val_f); + data_i32 = min(max(data_i32, q_min), q_max); + return (int8_t)data_i32; + } + + template + DS_D_INLINE T dequantize(int8_t val) + { + const float val_deq_f = conversion::to(val) * scale + offset; + return conversion::to<__half>(val_deq_f); + } + + DS_D_INLINE void store(float* params, int group_index) + { + // Codegen should turn this into stg.64 + const float store_scale = 1 / scale; + mem_access::store_global(params + 2 * group_index, &store_scale); + mem_access::store_global(params + 2 * group_index + 1, &offset); + } + + DS_D_INLINE Params(const float* params, int group_index) + { + // Codegen should turn this into ldg.64 + mem_access::load_global(&scale, params + 2 * group_index); + mem_access::load_global(&offset, params + 2 * group_index + 1); + } +}; + +/* +Group stats tracks the necessary statistics about the quantized group +to abstract the particulars for the main loop. +*/ +template +class GroupStats { +public: + DS_D_INLINE void update(__half2 val); + + DS_D_INLINE void reduce(cg::thread_block& tb, cg::thread_block_tile& warp); +}; + +template <> +class GroupStats { +public: + // Symmetric quantization only tracks the maximum absolute value + __half2 cur_max; + float max; + + /* + Technically, this would give bad results if there + are 0 values to process since the reduction would + give -inf instead of 0. We do not consider this + to be a reasonable edge case. + */ + DS_D_INLINE GroupStats() { cur_max = reduce::init(); } + + /* + Updated the running absmax used to calculate params. + Function Arguments : + val : The __half2 value to update the running min and max with. + */ + DS_D_INLINE void update(__half2 val) + { + cur_max = reduce::element(cur_max, __habs2(val)); + } + + /* + Function to return calculated quantization params. + Template Arguments : + numBits - Number of bits in quantized element. int : 8 or 4 + Function Arguments : + tb - Threadblock object. cg::thread_block + warp - Warp object. cg::thread_block_tile + */ + template + DS_D_INLINE Params get_params( + cg::thread_block& tb, + cg::thread_block_tile& warp) + { + const float2 partial_max = conversion::to(cur_max); + float max = reduce::element(partial_max.x, partial_max.y); + + reduce::partitioned_block(tb, warp, max); + Params params(max); + + return params; + } +}; + +template <> +class GroupStats { +public: + __half2 cur_max; + __half2 cur_min; + + /* + Initialize cur_max to -inf, cur_min to inf since + we are doing a true range analysis. + */ + DS_D_INLINE GroupStats() + { + cur_max = reduce::init(); + cur_min = reduce::init(); + } + + /* + Updated the running min and max used to calculate params. + Function Arguments : + val : The __half2 value to update the running min and max with. + */ + DS_D_INLINE void update(__half2 val) + { + cur_max = reduce::element(cur_max, val); + cur_min = reduce::element(cur_min, val); + } + + /* + Function to return calculated quantization params. + Template Arguments : + numBits - Number of bits in quantized element. int : 8 or 4 + Function Arguments : + tb - Threadblock object. cg::thread_block + warp - Warp object. cg::thread_block_tile + */ + template + DS_D_INLINE Params get_params( + cg::thread_block& tb, + cg::thread_block_tile& warp) + { + const float2 partial_max = conversion::to(cur_max); + float max = reduce::element(partial_max.x, partial_max.y); + + const float2 partial_min = conversion::to(cur_min); + float min = reduce::element(partial_min.x, partial_min.y); + + reduce::partitioned_block(tb, warp, max, min); + + Params params(max, min); + + return params; + } +}; + +/* +Device function that quantizes 16 bytes of __half type input data. +Template Arguments : + numBits - Number of bits in quantized element. int : 8 or 4 + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric +Function Arguments : + local_output - Pointer to local memory to store quantized data. int8_t* + data - Pointer to input data. __half* + Params - Parameters for quantization. Params +*/ +template +DS_D_INLINE void _chunk(int8_t* local_output, const __half* data, Params q_params); + +/* +Device function that quantizes 16 bytes of __half2 type input data. +Template Arguments : + numBits - Number of bits in quantized element. int : 8 or 4 + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric +Function Arguments : + local_output - Pointer to local memory to store quantized data. int8_t* + data - Pointer to input data. __half2* + Params - Parameters for quantization. Params +*/ +template +DS_D_INLINE void _chunk(int8_t* local_output, const __half2* data, Params q_params); + +/* +Helper function to do serial reduction on register-file arrays. +Template Arguments : + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric + numChunks - Number of bits in quantized element. int : 8 or 4 +Function Arguments : + local_buffer - Pointer memory with input half2 data to be quantized. +*/ +template +DS_D_INLINE GroupStats _local_serial_reduce(__half2* local_buffer); + +/* +The main loop of the kernel that quantizes array in local memory of __half2 type input data, when +Quantization parameters are pre-computed. +Template Arguments : + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric + numBits - Number of bits in quantized element. int : 8 or 4 + numChunks - Number of chunks(16 bytes of Input data). int : 8 or 4 +Function Arguments : + local_buffer - Pointer memory with input half2 data to be quantized. + scales - Pointer to output scales. + offsets - Pointer to output offsets. + output_data - Pointer to output data. + elems_per_group - Number of elements to quantize in a group. + q_params - Quantization parameters. +*/ +template +DS_D_INLINE void local_array(cg::thread_block& tb, + cg::thread_block_tile& warp, + __half2* local_buffer, + float* __restrict__ scales, + float* __restrict__ offsets, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups, + Params q_params); + +/* +The main loop of the kernel that quantizes array in local memory of __half2 type input data. +This function computes quantization parameters for each group. +Template Arguments : + qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric + numBits - Number of bits in quantized element. int : 8 or 4 + numChunks - Number of chunks(16 bytes of Input data). int : 8 or 4 +Function Arguments : + local_buffer - Pointer memory with input half2 data to be quantized. + scales - Pointer to output scales. + offsets - Pointer to output offsets. + output_data - Pointer to output data. + elems_per_group - Number of elements to quantize in a group. +*/ +template +__device__ void local_array(__half2* local_buffer, + float* __restrict__ scales, + float* __restrict__ offsets, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups); + +template +DS_D_INLINE void _chunk(int8_t* local_output, const __half* data, Params q_params) +{ + constexpr int32_t elems = 16 / sizeof(__half); + constexpr int32_t num_elems_packed = 8 / numBits; + +#pragma unroll + for (int i = 0, oi = 0; i < elems; i += num_elems_packed, oi++) { + if (num_elems_packed == 1) { + // TODO(cmikeh2): refactor to use conversion utils + local_output[i] = q_params.quantize(data[i]); + } else if (num_elems_packed == 2) { + int8_t data_i8_1 = q_params.quantize(data[i]); + int8_t data_i8_2 = q_params.quantize(data[i + 1]); + auto data_i8 = PackedInt4{data_i8_2, data_i8_1}; + local_output[oi] = *((int8_t*)(&data_i8)); + } + } +} + +template +DS_D_INLINE void _chunk(int8_t* local_output, const __half2* data, Params q_params) +{ + const __half* data_cast = reinterpret_cast(data); + _chunk(local_output, data_cast, q_params); +} + +template +DS_D_INLINE GroupStats _local_serial_reduce(__half2* local_buffer) +{ + GroupStats stats; +#pragma unroll + for (int i = 0; i < numChunks * h2_per_load; i++) { stats.update(local_buffer[i]); } + + return stats; +} + +template +DS_D_INLINE void local_array(cg::thread_block& tb, + cg::thread_block_tile& warp, + __half2* local_buffer, + float* __restrict__ global_params, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups, + Params q_params) +{ + constexpr int num_ele_int8 = 8 / numBits; + constexpr int num_int8_out = quantize::h_per_load / num_ele_int8; + + // Indexing offsets + const int block_num = + (tb.group_index().x * max_threads / threads_per_group) + tb.thread_index().y; + const int block_offset = block_num * elems_per_group; + const int elem_offset = tb.thread_index().x * quantize::h_per_load; + const int base_offset = (block_offset + elem_offset) / num_ele_int8; + const int stride = tb.size() * quantize::h_per_load / num_ele_int8; + + int8_t local_output[num_int8_out]; + + if (tb.thread_index().x == 0 && block_num < groups) { + q_params.store( + global_params, + (tb.group_index().x * max_threads / threads_per_group) + tb.thread_index().y); + } +#pragma unroll + for (int i = 0; i < numChunks; i++) { + if (elem_offset + i * stride * num_ele_int8 < elems_per_group && block_num < groups) { + quantize::_chunk( + local_output, local_buffer + i * quantize::h2_per_load, q_params); + mem_access::store_global(output_data + (base_offset + i * stride), + local_output); + } + } +} + +template +DS_D_INLINE void local_array(cg::thread_block& tb, + cg::thread_block_tile& warp, + __half* local_buffer, + float* __restrict__ global_params, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups, + Params q_params) +{ + __half2* local_buffer_h2 = reinterpret_cast<__half2*>(local_buffer); + + quantize::local_array( + tb, warp, local_buffer, global_params, output_data, elems_per_group, groups, q_params); +} + +template +__device__ void local_array(__half2* local_buffer, + float* __restrict__ global_params, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups) +{ + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + auto group_stats = _local_serial_reduce(local_buffer); + auto params = group_stats.template get_params(tb, warp); + + quantize::local_array( + tb, warp, local_buffer, global_params, output_data, elems_per_group, groups, params); +} + +template +__device__ void local_array(__half* local_buffer, + float* __restrict__ global_params, + int8_t* __restrict__ output_data, + const int& elems_per_group, + const int& groups) +{ + __half2* local_buffer_h2 = reinterpret_cast<__half2*>(local_buffer); + quantize::local_array( + local_buffer_h2, global_params, output_data, elems_per_group, groups); +} + +} // namespace quantize diff --git a/csrc/includes/quantizer.h b/csrc/includes/quantizer.h index 79eeb14..fe363c8 100644 --- a/csrc/includes/quantizer.h +++ b/csrc/includes/quantizer.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/reduction_utils.h b/csrc/includes/reduction_utils.h new file mode 100644 index 0000000..fabf19d --- /dev/null +++ b/csrc/includes/reduction_utils.h @@ -0,0 +1,591 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace reduce { + +enum class ROpType { + // Addition + Add, + + // Maximum reduction + Max, + + // Minimum reduction + Min, +}; + +constexpr int max_threads = 1024; +constexpr int max_warps = max_threads / hw_warp_size; + +/* +High level API. The API takes in a set of operations and variables +and performs that reduction operation on that variable. The reductions +of each of the arguments are completely independent of each other ( +i.e., the val1-op1 combination has no impact on val2-op2). + +Example usage: +``` cpp +float max_val; +float min_val; +reduce::block(tb, warp, max_val, min_val); +``` + +TODO(cmikeh2): In theory, we might be able to do this sequentially with +device functions and rely on the assembler correctly behaving. My initial +instinct is this won't work, but if it does it would reduce implementation +cost significantly. + +TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic +currently supports this (more incidentally than anything else). It is not +uncommon in something like softmax or a fused attention kernel to map multiple +reductions to a thread block, but each reduction itself is only scoped +to part of the threads (i.e block size = 512, 128 threads per reduction). +*/ +template +DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile& warp, float& val); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4); + +/* +The partitioned block is a special case of the above where in the warps of a threadblock are +partitioned into separate independent reductions. For example, I might have an 8 warp thread block +in which each pair of warps is processing an independent piece of data. I would then reduce that +data with the something like the following: +``` cpp +float max_val; +reduce::partitioned_block(tb, warp, max_val); +``` +After which, each pair of warps would have coherent data with each other. Note, this API will not +provide correct results if the number of warps per partition is not a power of 2. +*/ +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4); + +/* +Single element reduction primitives. Used inside serial collection +loops. + +Example usage: +using rop = reduce::OpType; +float min = init(); +for (int i = 0; i < 4; i++) { + min = reduce::element(min, data[i]); +} +*/ + +template +DS_D_INLINE T element(const T lhs, const T rhs); + +template +DS_D_INLINE T init(); + +/********************** Internal reduction APIs **********************/ + +/* +Single element "reductions". TODO(cmikeh2): this sort of "op" concept +should be refactored into its own implementation at some point. This interface +may be easily expanded for new types/operations, but the typical reductions +we need are covered with min/max/add on float. + +NOTE: there is no mean reduction because that relies on knowledge of how +many values were already reduced into each scalar. Implementing this on top +of reduce should be straightforward (can just wrap the sum reduction) and +would be a good extension of the header. +*/ + +/* Float element reduce implementations */ +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return fmaxf(lhs, rhs); +} + +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return fminf(lhs, rhs); +} + +/* __half element reduce implementation */ +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ +#if __CUDA_ARCH__ >= 800 + // Intrinsic limited to Ampere + newer + return __hmax(lhs, rhs); +#else + return (lhs > rhs) ? lhs : rhs; +#endif +} + +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ +#if __CUDA_ARCH__ >= 800 + // Intrinsic limited to Ampere + newer + return __hmin(lhs, rhs); +#else + return (lhs < rhs) ? lhs : rhs; +#endif +} + +/* __half2 element reduce implementation */ +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ +#if __CUDA_ARCH__ >= 800 + return __hmax2(lhs, rhs); +#else + __half2 ret_val; + ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x; + ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y; + return ret_val; +#endif +} + +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ +#if __CUDA_ARCH__ >= 800 + return __hmin2(lhs, rhs); +#else + __half2 ret_val; + ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x; + ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y; + return ret_val; +#endif +} + +/* +Reduction initialization primitives +*/ +template <> +DS_D_INLINE float init() +{ + return 0.0f; +} + +template <> +DS_D_INLINE float init() +{ + // Positive infinity + return INFINITY; +} + +template <> +DS_D_INLINE float init() +{ + // Negative infinity + return -INFINITY; +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw zero = {0x0000}; + return __half(zero); +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw inf = {0x7C00}; + return __half(inf); +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw neg_inf = {0xFC00}; + return __half(neg_inf); +} + +template <> +DS_D_INLINE __half2 init() +{ + constexpr __half2_raw zero = {0x0000, 0x0000}; + return __half2(zero); +} + +template <> +DS_D_INLINE __half2 init() +{ + constexpr __half2_raw inf = {0x7C00, 0x7C00}; + return __half2(inf); +} + +template <> +DS_D_INLINE __half2 init() +{ + constexpr __half2_raw neg_inf = {0xFC00, 0xFC00}; + return __half2(neg_inf); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); + data[2] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); + data[2] = init(); + data[3] = init(); +} + +/* +Warp reduction primitives + +`reduction_width` is an unsafe template parameter, that is that +when using `reduction_width` < hw_warp_size the warp is partitioned +into `hw_warp_size` / `reduction_width` groups of partial sums. + +If someone can figure out how to use variadic templates in a reasonable way +here (fold is C++17 only and I don't think helps and recursion feels like +huge overkill that harms readability) that would be wonderful. +*/ + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, float* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, float* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, float* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + data[2] = element(data[2], warp.shfl_xor(data[2], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, float* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + data[2] = element(data[2], warp.shfl_xor(data[2], i)); + data[3] = element(data[3], warp.shfl_xor(data[3], i)); + } +} + +/* +Implementation for primary block reduction that serves both `block` and +`partitioned_block`. + +`local_warp_rank` refers to the warp's location within the partition, so +for an unpartitioned threadblock this will be equivalent to +`warp_arg.meta_group_rank()`. + +Similarly, the warp offset is the `local_warp_rank` of the warp with the +lowest rank in the partition. In the case of an 8 warp block with a +4 warp reduction, this would map to [0, 0, 0, 0, 4, 4, 4, 4]. + +Partition size is the number of warps per partition (equal to the thread +block in the default case). This enables us to only perform the warp reduction +when able to. +*/ +template +DS_D_INLINE void _block(cg::thread_block& tb, + cg::thread_block_tile& warp_arg, + float* data, + int warp_offset) +{ + constexpr int elems = sizeof...(Ops); + // Separated for now in case this no longer is true + constexpr int bytes = sizeof(float); + // Unused when `partition_size == 1` or total_warps == 1 + __shared__ float reduce_buffer[max_warps * elems]; + + // Always perform warp-scope reduction + _warp(warp_arg, data); + + // If max_warps == 1 let's skip the runtime check + if (warp_arg.meta_group_size() > 1 && total_warps != 1) { + if (warp_arg.thread_rank() == 0) { +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::store_shared( + reduce_buffer + elems * warp_arg.meta_group_rank() + i, data + i); + } + } + + // Synchronization inside block-uniform conditional is safe + tb.sync(); + + if (warp_arg.meta_group_rank() == 0) { + if (warp_arg.thread_rank() < warp_arg.meta_group_size()) { +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::load_shared( + data + i, reduce_buffer + elems * warp_arg.thread_rank() + i); + } + } else { + init(data); + } + + _warp(warp_arg, data); + +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::store_shared(reduce_buffer + elems * warp_arg.thread_rank() + i, + data + i); + } + } + + // Synchronization inside block-uniform conditional is safe + tb.sync(); + +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::load_shared(data + i, + reduce_buffer + warp_arg.meta_group_rank() * elems + i); + } + } +} + +/* +Main API implementations. For the most part, they just convert the individual +variables into arrays, which makes working with them easier with a single +implementation. In theory, we could use the `_block` implementation as another +option, but the nature of using a pointer is a little less safe and this allows +us to obfuscate the details of the partitioned implementation. +*/ +template +DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile& warp, float& val) +{ + _block(tb, warp, &val, 0); +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2) +{ + float data[2] = {val1, val2}; + _block(tb, warp, data, 0); + val1 = data[0]; + val2 = data[1]; +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3) +{ + float data[3] = {val1, val2, val3}; + _block(tb, warp, data, 0); + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4) +{ + float data[4] = {val1, val2, val3, val4}; + _block(tb, warp, data, 0); + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; + val4 = data[3]; +} + +/* +Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order +to shorten block scale reduction length. +*/ +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val) +{ + if (num_threads <= hw_warp_size) { + _warp(warp, &val); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + const int warp_offset = warp.meta_group_rank() & ~(num_warps - 1); + _block(tb, warp, &val, warp_offset); + } +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2) +{ + float data[2] = {val1, val2}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + const int warp_offset = warp.meta_group_rank() & ~(num_warps - 1); + _block(tb, warp, data, warp_offset); + } + + val1 = data[0]; + val2 = data[1]; +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3) +{ + float data[3] = {val1, val2, val3}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + const int warp_offset = warp.meta_group_rank() & ~(num_warps - 1); + _block(tb, warp, data, warp_offset); + } + + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4) +{ + float data[4] = {val1, val2, val3, val4}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + const int warp_offset = warp.meta_group_rank() & ~(num_warps - 1); + _block(tb, warp, data, warp_offset); + } + + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; + val4 = data[3]; +} + +} // namespace reduce diff --git a/csrc/includes/simd.h b/csrc/includes/simd.h index 44c4da8..61f6ffd 100644 --- a/csrc/includes/simd.h +++ b/csrc/includes/simd.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #if (__x86_64__ || __i386__) @@ -22,7 +26,7 @@ #define SIMD_WIDTH 16 #define SIMD_LOAD2(x, h) \ - ((h) ? _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*)x)) : _mm512_loadu_ps(x)) + ((h) ? _mm512_cvtph_ps(_mm256_castps_si256(_mm256_loadu_ps(x))) : _mm512_loadu_ps(x)) #define SIMD_STORE2(x, d, h) \ ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ : _mm512_storeu_ps(x, d)) @@ -60,18 +64,16 @@ union AVX_Data { template inline void simd_store(float* dst, AVX_Data* src, bool half_precision) { + size_t width = (half_precision ? SIMD_WIDTH / 2 : SIMD_WIDTH); #pragma unroll - for (size_t i = 0; i < span; ++i) { - SIMD_STORE2(dst + SIMD_WIDTH * i, src[i].data, half_precision); - } + for (size_t i = 0; i < span; ++i) { SIMD_STORE2(dst + width * i, src[i].data, half_precision); } } template inline void simd_load(AVX_Data* dst, float* src, bool half_precision) { + size_t width = (half_precision ? 1 : SIMD_WIDTH); #pragma unroll - for (size_t i = 0; i < span; ++i) { - dst[i].data = SIMD_LOAD2(src + SIMD_WIDTH * i, half_precision); - } + for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD2(src + width * i, half_precision); } } template inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data* src_a) diff --git a/csrc/includes/softmax.h b/csrc/includes/softmax.h old mode 100644 new mode 100755 index 8d541a6..676a35d --- a/csrc/includes/softmax.h +++ b/csrc/includes/softmax.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/strided_batch_gemm.h b/csrc/includes/strided_batch_gemm.h index 037319b..6a1d65c 100644 --- a/csrc/includes/strided_batch_gemm.h +++ b/csrc/includes/strided_batch_gemm.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/includes/type_shim.h b/csrc/includes/type_shim.h index 4f4e7a5..9d28019 100644 --- a/csrc/includes/type_shim.h +++ b/csrc/includes/type_shim.h @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + /* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */ #include diff --git a/csrc/lamb/fused_lamb_cuda_kernel.cu b/csrc/lamb/fused_lamb_cuda_kernel.cu index c766323..a957eb6 100644 --- a/csrc/lamb/fused_lamb_cuda_kernel.cu +++ b/csrc/lamb/fused_lamb_cuda_kernel.cu @@ -7,12 +7,12 @@ #include "ATen/TensorUtils.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/detail/IndexUtils.cuh" -//#include "ATen/Type.h" +// #include "ATen/Type.h" #include "ATen/AccumulateType.h" #include -//#include +// #include #if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION > 305 #include #else @@ -105,7 +105,7 @@ __device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b) cg::sync(cta); -#if (__CUDA_ARCH__ >= 300) +#if (__CUDA_ARCH__ >= 300) || (defined(__HIP_PLATFORM_HCC__) && HIP_VERSION >= 502) if (tid < 32) { cg::coalesced_group active = cg::coalesced_threads(); diff --git a/csrc/quantization/dequantize.cu b/csrc/quantization/dequantize.cu new file mode 100644 index 0000000..3185b5f --- /dev/null +++ b/csrc/quantization/dequantize.cu @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "dequantization_utils.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +template +__global__ void dequantize_kernel(T* __restrict__ dequant_data, + const int8_t* __restrict__ q_data, + const float* __restrict__ q_params, + int elems_per_group, + int total_elems) +{ + dequantize::to_global( + dequant_data, q_data, q_params, elems_per_group, total_elems); +} + +#define LAUNCH_DEQUANT_KERNEL(num_bits, q_type) \ + dequantize_kernel<<>>( \ + dequant_data, q_data, q_params, elems_per_group, total_elems); + +template +void launch_dequantize_kernel(T* dequant_data, + const int8_t* q_data, + const float* q_params, + quantize::Type q_type, + int num_bits, + int elems_per_group, + int total_elems, + cudaStream_t stream) +{ + constexpr int unroll = 8; + constexpr int threads = 512; + constexpr int elems_per_block = unroll * threads * dequantize::granularity / (sizeof(T)); + + const dim3 block(threads); + const dim3 grid((total_elems + elems_per_block - 1) / elems_per_block); + + // TODO(cmikeh2): It may make sense to tune unroll, there is perf benefit for large + // problem sizes with this large unroll value. + if (num_bits == 8 && q_type == quantize::Type::Symmetric) { + LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Symmetric); + } else if (num_bits == 8 && q_type == quantize::Type::Asymmetric) { + LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Asymmetric); + } else if (num_bits == 4 && q_type == quantize::Type::Symmetric) { + LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Symmetric); + } else if (num_bits == 4 && q_type == quantize::Type::Asymmetric) { + LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Asymmetric); + } +} + +template void launch_dequantize_kernel(__half* dequant_data, + const int8_t* q_data, + const float* q_params, + quantize::Type q_type, + int num_bits, + int elems_per_group, + int total_elems, + cudaStream_t stream); + +template void launch_dequantize_kernel(float* dequant_data, + const int8_t* q_data, + const float* q_params, + quantize::Type q_type, + int num_bits, + int elems_per_group, + int total_elems, + cudaStream_t stream); diff --git a/deepspeed/ops/csrc/quantization/quantizer.cu b/csrc/quantization/fake_quantizer.cu similarity index 81% rename from deepspeed/ops/csrc/quantization/quantizer.cu rename to csrc/quantization/fake_quantizer.cu index 3788341..5d23c6e 100644 --- a/deepspeed/ops/csrc/quantization/quantizer.cu +++ b/csrc/quantization/fake_quantizer.cu @@ -1,99 +1,92 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include #include "custom_cuda_layers.h" +#include "memory_access_utils.h" namespace cg = cooperative_groups; -__global__ void quantize_kernel(__half* vals, int group_size, int num_bits) +__global__ void fake_quantize_kernel(__half* vals, int group_size, int num_bits) { #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + cg::thread_block b = cg::this_thread_block(); // tb + cg::thread_block_tile<32> g = + cg::tiled_partition<32>(b); // warp, 32 not optimal for AMD which should be 64. int gid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int id = threadIdx.x; - float2* vals_cast = reinterpret_cast(vals); + constexpr int granularity = 16; + constexpr int vals_per_access = granularity / sizeof(__half); - float2 data[MAX_REG]; + __half data[vals_per_access]; int group_id = blockIdx.x; - { - int group_index = id; - int reg_count = 0; - int offset = group_id * group_size; - float max = -10000.0; - - while (group_index < group_size && reg_count < MAX_REG) { - data[reg_count] = vals_cast[offset + group_index]; - __half* data_h = reinterpret_cast<__half*>(&data[reg_count]); - - if (abs((float)data_h[0]) > max) max = abs((float)data_h[0]); - if (abs((float)data_h[1]) > max) max = abs((float)data_h[1]); - if (abs((float)data_h[2]) > max) max = abs((float)data_h[2]); - if (abs((float)data_h[3]) > max) max = abs((float)data_h[3]); - - group_index += blockDim.x; - reg_count++; - } + int thread_index = id * vals_per_access; + int reg_count = 0; + int offset = group_id * group_size; + float max = -10000.0; + for (int thread_index = id * vals_per_access; thread_index < group_size; + thread_index += blockDim.x * vals_per_access) { + mem_access::load_global(data, vals + offset + thread_index); #pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; + for (int i = 0; i < vals_per_access; i++) { + if (abs((float)data[i]) > max) max = abs((float)data[i]); } - __shared__ float partialMax[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; + } #pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } + for (int i = 1; i < WARP_SIZE; i <<= 1) { + auto temp = g.shfl_xor(max, i); + if (max < temp) max = temp; + } + __shared__ float partialMax[WARP_SIZE]; - max = g.shfl(max, 0); + if (lane == 0) partialMax[gid] = max; - float q_scale = (1 << num_bits) / (2 * max + 1e-5); - float q_scale_inv = 1 / q_scale; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - __half2* data_h = reinterpret_cast<__half2*>(&data[i]); - float2 q_data[2]; - q_data[0] = __half22float2(data_h[0]); - q_data[1] = __half22float2(data_h[1]); + b.sync(); - float2 q_data_int[2]; + if (lane < warp_num) max = partialMax[lane]; - q_data_int[0].x = roundf(q_data[0].x * q_scale); - q_data_int[0].y = roundf(q_data[0].y * q_scale); - q_data_int[1].x = roundf(q_data[1].x * q_scale); - q_data_int[1].y = roundf(q_data[1].y * q_scale); +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) { + auto temp = g.shfl_down(max, i); + if (max < temp) max = temp; + } - q_data_int[0].x *= q_scale_inv; - q_data_int[0].y *= q_scale_inv; - q_data_int[1].x *= q_scale_inv; - q_data_int[1].y *= q_scale_inv; + max = g.shfl(max, 0); - data_h[0] = __float22half2_rn(q_data_int[0]); - data_h[1] = __float22half2_rn(q_data_int[1]); + float q_scale = (float)(1 << num_bits) / (2 * max + 1e-5); + float q_scale_inv = 1 / q_scale; + int q_range_max = (1 << (num_bits - 1)) - 1; + int q_range_min = -(1 << (num_bits - 1)); - vals_cast[offset + group_index] = data[i]; - } + for (int thread_index = id * vals_per_access; thread_index < group_size; + thread_index += blockDim.x * vals_per_access) { + mem_access::load_global(data, vals + offset + thread_index); +#pragma unroll + for (int j = 0; j < vals_per_access; j++) { + float q_data; + q_data = __half2float(data[j]); + q_data = __float2int_rn(q_data * q_scale); + q_data = q_data > (q_range_max) ? (q_range_max) + : (q_data < (q_range_min) ? (q_range_min) : q_data); + data[j] = __float2half_rn(q_data * q_scale_inv); } + mem_access::store_global(vals + offset + thread_index, data); } + #endif } -__global__ void quantize_kernel(float* vals, int group_size, int num_bits) +__global__ void fake_quantize_kernel(float* vals, int group_size, int num_bits) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); @@ -103,31 +96,31 @@ __global__ void quantize_kernel(float* vals, int group_size, int num_bits) int warp_num = blockDim.x >> 5; int id = threadIdx.x; - float4* vals_cast = reinterpret_cast(vals); + constexpr int granularity = 16; + constexpr int vals_per_access = granularity / sizeof(float); - float4 data[MAX_REG]; + float data[vals_per_access]; int bid = blockIdx.x; - int group_index = bid * group_size + id; + int thread_index = id * vals_per_access; + int reg_count = 0; - float max = -10000.0; + int offset = bid * group_size; - while (id < group_size && reg_count < MAX_REG) { - float4 data_reg = vals_cast[group_index]; - data[reg_count] = data_reg; + float max = -10000.0; - if (abs(data_reg.x) > max) max = abs(data_reg.x); - if (abs(data_reg.y) > max) max = abs(data_reg.y); - if (abs(data_reg.z) > max) max = abs(data_reg.z); - if (abs(data_reg.w) > max) max = abs(data_reg.w); + for (int thread_index = id * vals_per_access; thread_index < group_size; + thread_index += blockDim.x * vals_per_access) { + mem_access::load_global(data, vals + offset + thread_index); - group_index += blockDim.x; - id += blockDim.x; - reg_count++; +#pragma unroll + for (int i = 0; i < vals_per_access; i++) { + if (abs(data[i]) > max) max = abs(data[i]); + } } - id = threadIdx.x; + #pragma unroll for (int i = 1; i < WARP_SIZE; i <<= 1) { auto temp = g.shfl_xor(max, i); @@ -153,58 +146,55 @@ __global__ void quantize_kernel(float* vals, int group_size, int num_bits) float q_scale = (1 << num_bits) / (2 * max + 1e-5); float q_scale_inv = 1 / q_scale; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - float4 q_data; - q_data = data[i]; - float4 q_data_int; - q_data_int.x = roundf(q_data.x * q_scale); - q_data_int.y = roundf(q_data.y * q_scale); - q_data_int.w = roundf(q_data.w * q_scale); - q_data_int.z = roundf(q_data.z * q_scale); - - q_data.x = q_data_int.x * q_scale_inv; - q_data.y = q_data_int.y * q_scale_inv; - q_data.w = q_data_int.w * q_scale_inv; - q_data.z = q_data_int.z * q_scale_inv; + int q_range_max = (1 << (num_bits - 1)) - 1; + int q_range_min = -(1 << (num_bits - 1)); - vals_cast[group_index + bid * group_size] = q_data; + for (int thread_index = id * vals_per_access; thread_index < group_size; + thread_index += blockDim.x * vals_per_access) { + mem_access::load_global(data, vals + offset + thread_index); +#pragma unroll + for (int j = 0; j < vals_per_access; j++) { + float q_data; + q_data = __float2int_rn(data[j] * q_scale); + q_data = q_data > (q_range_max) ? (q_range_max) + : (q_data < (q_range_min) ? (q_range_min) : q_data); + data[j] = roundf(q_data * q_scale_inv); } + mem_access::store_global(vals + offset + thread_index, data); } } template -void launch_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream) +void launch_fake_quantize_kernel(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream) { dim3 grid_dim(group_num); dim3 block_dim(1024); - quantize_kernel<<>>( - vals, (total_count / group_num) / 4, num_bits); + fake_quantize_kernel<<>>( + vals, total_count / group_num, num_bits); } -template void launch_quantize_kernel(float* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template void launch_quantize_kernel(__half* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); - -__global__ void sr_quantize_kernel(__half* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) +template void launch_fake_quantize_kernel(float* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template void launch_fake_quantize_kernel(__half* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); + +__global__ void sr_fake_quantize_kernel(__half* vals, + int token_size, + int token_num, + int num_bits, + std::pair seed) { #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) @@ -336,11 +326,11 @@ __global__ void sr_quantize_kernel(__half* vals, #endif } -__global__ void sr_quantize_kernel(float* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) +__global__ void sr_fake_quantize_kernel(float* vals, + int token_size, + int token_num, + int num_bits, + std::pair seed) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); @@ -456,11 +446,11 @@ __global__ void sr_quantize_kernel(float* vals, } template -void launch_sr_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream) +void launch_sr_fake_quantize_kernel(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream) { dim3 block_dim(1024); dim3 grid_dim(group_num); @@ -468,21 +458,21 @@ void launch_sr_quantize_kernel(T* vals, uint64_t inc = total_count / grid_dim.x / block_dim.x; std::pair seed = Context::Instance().IncrementOffset(inc); - sr_quantize_kernel<<>>( + sr_fake_quantize_kernel<<>>( vals, (total_count / group_num) / 4, group_num, num_bits, seed); } -template void launch_sr_quantize_kernel(float* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template void launch_sr_quantize_kernel(__half* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); +template void launch_sr_fake_quantize_kernel(float* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template void launch_sr_fake_quantize_kernel(__half* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); -__global__ void quantize_kernel_asym(__half* vals, int group_size, int num_bits) +__global__ void fake_quantize_kernel_asym(__half* vals, int group_size, int num_bits) { #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) @@ -595,7 +585,7 @@ __global__ void quantize_kernel_asym(__half* vals, int group_size, int num_bits) #endif } -__global__ void quantize_kernel_asym(float* vals, int group_size, int num_bits) +__global__ void fake_quantize_kernel_asym(float* vals, int group_size, int num_bits) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); @@ -699,35 +689,35 @@ __global__ void quantize_kernel_asym(float* vals, int group_size, int num_bits) } template -void launch_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream) +void launch_fake_quantize_kernel_asym(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream) { dim3 grid_dim(group_num); dim3 block_dim(1024); - quantize_kernel_asym<<>>( + fake_quantize_kernel_asym<<>>( vals, (total_count / group_num) / 4, num_bits); } -template void launch_quantize_kernel_asym(float* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template void launch_quantize_kernel_asym(__half* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); - -__global__ void sr_quantize_kernel_asym(__half* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) +template void launch_fake_quantize_kernel_asym(float* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template void launch_fake_quantize_kernel_asym(__half* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); + +__global__ void sr_fake_quantize_kernel_asym(__half* vals, + int token_size, + int token_num, + int num_bits, + std::pair seed) { #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) @@ -879,11 +869,11 @@ __global__ void sr_quantize_kernel_asym(__half* vals, #endif } -__global__ void sr_quantize_kernel_asym(float* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) +__global__ void sr_fake_quantize_kernel_asym(float* vals, + int token_size, + int token_num, + int num_bits, + std::pair seed) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); @@ -1010,11 +1000,11 @@ __global__ void sr_quantize_kernel_asym(float* vals, } } template -void launch_sr_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream) +void launch_sr_fake_quantize_kernel_asym(T* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream) { dim3 block_dim(1024); dim3 grid_dim(group_num); @@ -1022,16 +1012,16 @@ void launch_sr_quantize_kernel_asym(T* vals, uint64_t inc = total_count / grid_dim.x / block_dim.x; std::pair seed = Context::Instance().IncrementOffset(inc); - sr_quantize_kernel<<>>( + sr_fake_quantize_kernel<<>>( vals, (total_count / group_num) / 4, group_num, num_bits, seed); } -template void launch_sr_quantize_kernel_asym(float* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template void launch_sr_quantize_kernel_asym(__half* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); +template void launch_sr_fake_quantize_kernel_asym(float* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); +template void launch_sr_fake_quantize_kernel_asym(__half* vals, + int total_count, + int group_num, + int num_bits, + cudaStream_t stream); diff --git a/csrc/quantization/pt_binding.cpp b/csrc/quantization/pt_binding.cpp index f76c436..1465cab 100644 --- a/csrc/quantization/pt_binding.cpp +++ b/csrc/quantization/pt_binding.cpp @@ -1,7 +1,8 @@ #include #include +#include #include -#include "custom_cuda_layers.h" +#include "quantization.h" template at::Tensor ds_quantize(at::Tensor& vals, int groups, int bits) @@ -10,8 +11,8 @@ at::Tensor ds_quantize(at::Tensor& vals, int groups, int bits) int size = 1; for (auto dim : t_size) size *= dim; - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel( + if ((((size / groups) - 1) / 4096 + 1) <= 256) { + launch_fake_quantize_kernel( (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); } return vals; @@ -25,7 +26,7 @@ at::Tensor ds_sr_quantize(at::Tensor& vals, int groups, int bits) for (auto dim : t_size) size *= dim; if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel( + launch_sr_fake_quantize_kernel( (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); } return vals; @@ -38,8 +39,8 @@ at::Tensor ds_quantize_asym(at::Tensor& vals, int groups, int bits) int size = 1; for (auto dim : t_size) size *= dim; - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel_asym( + if ((((size / groups) - 1) / 4096 + 1) <= 256) { + launch_fake_quantize_kernel_asym( (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); } return vals; @@ -53,12 +54,83 @@ at::Tensor ds_sr_quantize_asym(at::Tensor& vals, int groups, int bits) for (auto dim : t_size) size *= dim; if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel_asym( + launch_sr_fake_quantize_kernel_asym( (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); } return vals; } +std::vector quantize_kernel(at::Tensor& input_vals, + int groups, + int numBits, + quantize::Type quantType) +{ + auto dtype = at::kFloat; + auto params_options = at::TensorOptions() + .dtype(dtype) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + const int param_elems = (quantize::requires_offset(quantType)) ? 2 : 1; + auto params = torch::empty({groups, param_elems}, params_options); + + auto output_options = at::TensorOptions() + .dtype(at::kChar) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output_sizes = input_vals.sizes().vec(); + output_sizes[output_sizes.size() - 1] /= numBits == 8 ? 1 : 2; + auto output = torch::empty(output_sizes, output_options); + + const int elems_per_group = at::numel(input_vals) / groups; + + launch_quant((int8_t*)output.data_ptr(), + (float*)params.data_ptr(), + (__half*)input_vals.data_ptr(), + groups, + elems_per_group, + numBits, + quantType, + at::cuda::getCurrentCUDAStream()); + + return {output, params}; +} + +template +at::Tensor dequantize(at::Tensor& quantized_data, + at::Tensor& params, + int groups, + int num_bits, + quantize::Type quant_type) +{ + auto dtype = (std::is_same::value) ? torch::kFloat32 : torch::kFloat16; + auto output_options = at::TensorOptions() + .dtype(dtype) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output_sizes = quantized_data.sizes().vec(); + output_sizes[output_sizes.size() - 1] *= num_bits == 8 ? 1 : 2; + auto output = torch::empty(output_sizes, output_options); + + const int total_elems = at::numel(output); + const int elems_per_group = total_elems / groups; + + launch_dequantize_kernel((T*)output.data_ptr(), + (const int8_t*)quantized_data.data_ptr(), + (const float*)params.data_ptr(), + quant_type, + num_bits, + elems_per_group, + total_elems, + at::cuda::getCurrentCUDAStream()); + + return output; +} + PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("ds_quantize_fp32", &ds_quantize, "DeepSpeed Quantize with fp32 (CUDA)"); @@ -74,4 +146,11 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) m.def("ds_sr_quantize_asym_fp16", &ds_sr_quantize_asym<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); + pybind11::enum_(m, "QuantizationType") + .value("Symmetric", quantize::Type::Symmetric) + .value("Asymmetric", quantize::Type::Asymmetric) + .export_values(); + m.def("quantize", &quantize_kernel); + m.def("dequantize", &dequantize<__half>); + m.def("dequantize_fp32", &dequantize); } diff --git a/csrc/quantization/quantize.cu b/csrc/quantization/quantize.cu new file mode 100644 index 0000000..b9c925c --- /dev/null +++ b/csrc/quantization/quantize.cu @@ -0,0 +1,150 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" +#include "quantization.h" +#include "quantization_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; + +/* +Pure quantization kernel with no fusion. +*/ +template +__global__ void cached_quantization(int8_t* __restrict__ output_data, + float* __restrict__ params, + const __half* __restrict__ input_data, + int groups, + int elems_per_group) +{ + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // Indexing offsets + const int block_offset = + (tb.group_index().x * (max_threads / threads_per_group) * elems_per_group) + + (tb.thread_index().y * elems_per_group); + const int elem_offset = tb.thread_index().x * quantize::h_per_load; + const int base_offset = block_offset + elem_offset; + const int stride = tb.size() * quantize::h_per_load; + + const __half* input_base = input_data + base_offset; //.. + + __half2 local_buffer[UNROLL * internal_unroll * quantize::h2_per_load]; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + // Convenience helper, should resolve to register indices and not realize. + __half2* iteration_buffer = local_buffer + i * internal_unroll * quantize::h2_per_load; +#pragma unroll + for (int j = 0; j < internal_unroll; j++) { + const int iteration = i * internal_unroll + j; + mem_access::load_global( + iteration_buffer + j * quantize::h2_per_load, + input_base + iteration * stride, + elem_offset + iteration * stride < elems_per_group); + } + } + + quantize:: + local_array( + local_buffer, params, output_data, elems_per_group, groups); +} + +/********* Launcher methods ***********/ +#define LAUNCH_CACHED_QUANT_CALL(q_bits, quant_type) \ + cached_quantization \ + <<>>(output_data, params, input_data, groups, elems_per_group); + +#define LAUNCH_CACHED_QUANT( \ + q_bits, quant_type, unroll_factor_in, internal_unroll_in, threads_per_group_in) \ + const int unroll_factor = unroll_factor_in; \ + const int internal_unroll_l = internal_unroll_in; \ + const int threads_per_group = threads_per_group_in; \ + if (q_bits == 4) { \ + if (quant_type == quantize::Type::Asymmetric) { \ + LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Asymmetric) \ + } else { \ + LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Symmetric) \ + } \ + } else { \ + if (quant_type == quantize::Type::Asymmetric) { \ + LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Asymmetric) \ + } else { \ + LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Symmetric) \ + } \ + } + +void launch_quant(int8_t* output_data, + float* params, + const __half* input_data, + const int groups, + const int elems_per_group, + const int num_bits, + const quantize::Type quant_type, + cudaStream_t stream) +{ + constexpr int max_threads = 256; + + constexpr int internal_unroll = 2; + + const bool is_subblock_schedule = (elems_per_group <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? quantize::h_per_load + : quantize::h_per_load * internal_unroll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_group + h_per_step - 1) / h_per_step); + const int threads_per_group = (one_step_threads < max_threads) ? one_step_threads : max_threads; + + const int groups_per_block = + is_subblock_schedule ? (max_threads + threads_per_group - 1) / threads_per_group : 1; + const int groups_launch = (groups_per_block + groups - 1) / groups_per_block; + + dim3 block(threads_per_group, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threads_per_group * h_per_step; + const int external_unroll = (elems_per_group + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threads_per_group == 1) { + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 1); + } else if (threads_per_group == 2) { + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 2); + } else if (threads_per_group == 4) { + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 4); + } else if (threads_per_group == 8) { + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 8); + } else if (threads_per_group == 16) { + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 16); + } + } else if (external_unroll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, internal_unroll, max_threads); + } else if (external_unroll == 2) { + // 4097 - 8192 elems + LAUNCH_CACHED_QUANT(num_bits, quant_type, 2, internal_unroll, max_threads); + } else if (external_unroll == 3) { + // 8193 - 12288 elems + LAUNCH_CACHED_QUANT(num_bits, quant_type, 3, internal_unroll, max_threads); + } else if (external_unroll == 4) { + // 12289 - 16384 elems + LAUNCH_CACHED_QUANT(num_bits, quant_type, 4, internal_unroll, max_threads); + } +} diff --git a/csrc/random_ltd/gather_scatter.cu b/csrc/random_ltd/gather_scatter.cu new file mode 100644 index 0000000..8da7479 --- /dev/null +++ b/csrc/random_ltd/gather_scatter.cu @@ -0,0 +1,185 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace td_data { +constexpr int granularity = 16; +} + +template +__global__ void gather_tokens_impl(T* retained_tokens, + const T* activations, + int32_t* gather_indices, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + + const int gather_idx = gather_indices[tb.group_index().x * sampled_tokens + tb.group_index().y]; + + const int read_offset = read_batch_stride * tb.group_index().x + read_seq_stride * gather_idx; + const int write_offset = + write_batch_stride * tb.group_index().x + write_seq_stride * tb.group_index().y; + + for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += blockDim.x * mem_vals_t) { + T local_data[mem_vals_t]; + mem_access::load_global(local_data, activations + read_offset + i); + mem_access::store_global(retained_tokens + write_offset + i, + local_data); + } +} + +template +void launch_gather_tokens(T* retained_tokens, + T* activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t; + const int threads = (load_steps >= 1024) ? 1024 : load_steps; + + dim3 block(threads); + dim3 grid(batch_size, sampled_tokens); + + gather_tokens_impl<<>>(retained_tokens, + activations, + gather_indices, + sampled_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride); +} + +template void launch_gather_tokens(float*, + float*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_gather_tokens<__half>(__half*, + __half*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template +__global__ void scatter_tokens_impl(T* all_activations, + const T* layer_activations, + int32_t* gather_indices, + int32_t retained_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + + const int gather_idx = + gather_indices[tb.group_index().x * retained_tokens + tb.group_index().y]; + + const int read_offset = + read_batch_stride * tb.group_index().x + read_seq_stride * tb.group_index().y; + const int write_offset = + write_batch_stride * tb.group_index().x + write_seq_stride * gather_idx; + + for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += mem_vals_t * blockDim.x) { + T local_data[mem_vals_t]; + mem_access::load_global(local_data, + layer_activations + read_offset + i); + mem_access::store_global(all_activations + write_offset + i, + local_data); + } +} + +template +void launch_scatter_tokens(T* all_activations, + T* layer_activations, + int32_t* gather_indices, + int32_t batch_size, + int32_t sampled_tokens, + int32_t channels, + int32_t read_batch_stride, + int32_t read_seq_stride, + int32_t write_batch_stride, + int32_t write_seq_stride, + cudaStream_t stream) +{ + constexpr int mem_vals_t = td_data::granularity / sizeof(T); + + const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t; + const int threads = (load_steps >= 1024) ? 1024 : load_steps; + + dim3 block(threads); + dim3 grid(batch_size, sampled_tokens); + + scatter_tokens_impl<<>>(all_activations, + layer_activations, + gather_indices, + sampled_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride); +} + +template void launch_scatter_tokens(float*, + float*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_scatter_tokens<__half>(__half*, + __half*, + int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); diff --git a/csrc/random_ltd/pt_binding.cpp b/csrc/random_ltd/pt_binding.cpp new file mode 100644 index 0000000..54c41ca --- /dev/null +++ b/csrc/random_ltd/pt_binding.cpp @@ -0,0 +1,215 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include +#include +#include "custom_cuda_layers.h" + +torch::Tensor token_sort_(torch::Tensor& unsorted_token_ids, int64_t original_tokens) +{ + const int layers = unsorted_token_ids.size(0); + const int batch_size = unsorted_token_ids.size(1); + const int reserved_tokens = unsorted_token_ids.size(2); + + launch_token_sort(unsorted_token_ids.data_ptr(), + layers, + batch_size, + reserved_tokens, + original_tokens, + c10::cuda::getCurrentCUDAStream()); + + return unsorted_token_ids; +} + +torch::Tensor token_gather(torch::Tensor& activations, + torch::Tensor& sorted_indices, + bool batch_first) +{ + // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is + // always in [N, retained] + /* + TORCH_CHECK(sorted_indices.size(0) == activations.size(0) || + sorted_indices.size(0) == activations.size(1), + "Unable to match the batch size of the sorted indices to the activation + shape."); TORCH_CHECK(activations.size(2) % 8 == 0, "Channels must be divisible by 8 to align + with vectorized loads."); + */ + // bool batch_first = sorted_indices.size(0) == activations.size(0); + + const int64_t dim_0 = (batch_first) ? sorted_indices.size(0) : sorted_indices.size(1); + const int64_t dim_1 = (batch_first) ? sorted_indices.size(1) : sorted_indices.size(0); + const int64_t dim_2 = activations.size(2); + + auto output = torch::empty({dim_0, dim_1, dim_2}, activations.options()); + + const int batch_size = sorted_indices.size(0); + const int channels = dim_2; + const int retained_tokens = sorted_indices.size(1); + const int read_batch_stride = (batch_first) ? activations.stride(0) : activations.stride(1); + const int read_seq_stride = (batch_first) ? activations.stride(1) : activations.stride(0); + const int write_batch_stride = (batch_first) ? output.stride(0) : output.stride(1); + const int write_seq_stride = (batch_first) ? output.stride(1) : output.stride(0); + + if (activations.options().dtype() == torch::kFloat) { + launch_gather_tokens((float*)output.data_ptr(), + (float*)activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_gather_tokens((__half*)output.data_ptr(), + (__half*)activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +torch::Tensor token_scatter_(torch::Tensor& all_activations, + torch::Tensor& layer_activations, + torch::Tensor& sorted_indices, + bool batch_first) +{ + // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is + // always in [N, retained] + /* + TORCH_CHECK(sorted_indices.size(0) == all_activations.size(0) || + sorted_indices.size(0) == all_activations.size(1), + "Unable to match the batch size of the sorted indices to the activation + shape."); TORCH_CHECK(all_activations.size(2) % 8 != 0, "Channels must be divisible by 8 to + align with vectorized loads."); + */ + // bool batch_first = sorted_indices.size(0) == all_activations.size(0); + + const int batch_size = sorted_indices.size(0); + const int channels = all_activations.size(2); + const int retained_tokens = sorted_indices.size(1); + const int read_batch_stride = (batch_first) ? layer_activations.stride(0) + : layer_activations.stride(1); + const int read_seq_stride = (batch_first) ? layer_activations.stride(1) + : layer_activations.stride(0); + const int write_batch_stride = (batch_first) ? all_activations.stride(0) + : all_activations.stride(1); + const int write_seq_stride = (batch_first) ? all_activations.stride(1) + : all_activations.stride(0); + + if (all_activations.options().dtype() == torch::kFloat) { + launch_scatter_tokens((float*)all_activations.data_ptr(), + (float*)layer_activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_scatter_tokens((__half*)all_activations.data_ptr(), + (__half*)layer_activations.data_ptr(), + (int32_t*)sorted_indices.data_ptr(), + batch_size, + retained_tokens, + channels, + read_batch_stride, + read_seq_stride, + write_batch_stride, + write_seq_stride, + c10::cuda::getCurrentCUDAStream()); + } + + return all_activations; +} + +torch::Tensor mask_gather_bert(torch::Tensor& dense_mask, torch::Tensor& sorted_indices) +{ + // TORCH_CHECK(dense_mask.dim() == 4) + + const int batch_size = dense_mask.size(0); + const int layers = sorted_indices.size(0); + /* + TORCH_CHECK(layers * batch_size == sorted_indices.size(0), + "Mismatch between the indices and the mask"); + */ + const int orig_seq_len = dense_mask.size(3); + const int truncated_seq_len = sorted_indices.size(2); + + auto output = torch::empty({layers, batch_size, 1, truncated_seq_len, truncated_seq_len}, + dense_mask.options()); + + if (dense_mask.options().dtype() == torch::kFloat) { + launch_slice_bert_mask((float*)output.data_ptr(), + (const float*)dense_mask.data_ptr(), + (const int32_t*)sorted_indices.data_ptr(), + layers, + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_slice_bert_mask((__half*)output.data_ptr(), + (const __half*)dense_mask.data_ptr(), + (const int32_t*)sorted_indices.data_ptr(), + layers, + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +torch::Tensor mask_gather_gpt(torch::Tensor dense_mask, int truncated_seq_len) +{ + // TORCH_CHECK(dense_mask.dim() == 4) + + const int batch_size = dense_mask.size(0); + const int orig_seq_len = dense_mask.size(3); + + auto output = + torch::empty({batch_size, 1, truncated_seq_len, truncated_seq_len}, dense_mask.options()); + + if (dense_mask.options().dtype() == torch::kFloat) { + launch_slice_gpt_mask((float*)output.data_ptr(), + (const float*)dense_mask.data_ptr(), + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } else { + launch_slice_gpt_mask((__half*)output.data_ptr(), + (const __half*)dense_mask.data_ptr(), + batch_size, + truncated_seq_len, + orig_seq_len, + c10::cuda::getCurrentCUDAStream()); + } + + return output; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("token_sort_", &token_sort_, "Comparison free sorting algorithm (CUDA)"); + m.def("token_gather", &token_gather, "Parallel gather of tokens (CUDA)"); + m.def("token_scatter_", &token_scatter_, "Parallel scatter of tokens (CUDA)"); + m.def("mask_gather_bert", &mask_gather_bert, "Token-based mask gather for BERT masking (CUDA)"); + m.def("mask_gather_gpt", &mask_gather_gpt, "Token-based mask gather for GPT masking (CUDA)"); +} diff --git a/csrc/random_ltd/slice_attn_masks.cu b/csrc/random_ltd/slice_attn_masks.cu new file mode 100644 index 0000000..63d005c --- /dev/null +++ b/csrc/random_ltd/slice_attn_masks.cu @@ -0,0 +1,127 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +template +__global__ void slice_gpt_mask_impl(T* output_mask, + const T* input_mask, + int truncated_seq_len, + int orig_seq_len) +{ + const int in_batch_stride = orig_seq_len * orig_seq_len; + const int out_batch_stride = truncated_seq_len * truncated_seq_len; + + cg::thread_block tb = cg::this_thread_block(); + + const T* input_mask_block = + input_mask + blockIdx.x * in_batch_stride + blockIdx.y * orig_seq_len; + T* output_mask_block = + output_mask + blockIdx.x * out_batch_stride + blockIdx.y * truncated_seq_len; + + for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) { + output_mask_block[i] = input_mask_block[i]; + } +} + +template +void launch_slice_gpt_mask(T* output_mask, + const T* input_mask, + int batch_size, + int truncated_seq_len, + int orig_seq_len, + cudaStream_t stream) +{ + const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len; + + dim3 block(threads); + dim3 grid(batch_size, truncated_seq_len); + + slice_gpt_mask_impl + <<>>(output_mask, input_mask, truncated_seq_len, orig_seq_len); +} + +template void launch_slice_gpt_mask(float*, const float*, int, int, int, cudaStream_t); + +template void launch_slice_gpt_mask<__half>(__half*, const __half*, int, int, int, cudaStream_t); + +template +__global__ void slice_bert_mask_impl(T* output_mask, + const T* input_mask, + const int32_t* retained_indices, + int32_t truncated_seq_len, + int32_t orig_seq_len) +{ + const int in_batch_stride = orig_seq_len * orig_seq_len; + const int out_batch_stride = truncated_seq_len * truncated_seq_len; + const int out_layer_stride = out_batch_stride * gridDim.y; + + cg::thread_block tb = cg::this_thread_block(); + + const int out_layer_offset = tb.group_index().x * out_layer_stride; + + const int in_batch_offset = tb.group_index().y * in_batch_stride; + const int out_batch_offset = tb.group_index().y * out_batch_stride; + + const int32_t gather_row = + retained_indices[tb.group_index().y * truncated_seq_len + tb.group_index().z]; + const int in_seq_offset = gather_row * orig_seq_len; + const int out_seq_offset = tb.group_index().z * truncated_seq_len; + + const T* in_sequence = input_mask + in_batch_offset + in_seq_offset; + T* out_sequence = output_mask + out_layer_offset + out_batch_offset + out_seq_offset; + const int32_t* gather_data = retained_indices + tb.group_index().y * truncated_seq_len; + + for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) { + out_sequence[i] = in_sequence[gather_data[i]]; + } +} + +/* +Since the Bert mask is not causal like GPT, we can't just generate a set of +masks for the entire model based off a single layer sample. + +We map the kernel as follows: +z-dimension: layer +y-dimension: batch +x-dimension: sequence_offset +*/ +template +void launch_slice_bert_mask(T* output_mask, + const T* input_mask, + const int32_t* retained_indices, + int32_t layers, + int32_t batch_size, + int32_t truncated_seq_len, + int32_t orig_seq_len, + cudaStream_t stream) +{ + const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len; + dim3 block(threads); + dim3 grid(layers, batch_size, truncated_seq_len); + + slice_bert_mask_impl<<>>( + output_mask, input_mask, retained_indices, truncated_seq_len, orig_seq_len); +} + +template void launch_slice_bert_mask(float*, + const float*, + const int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); + +template void launch_slice_bert_mask<__half>(__half*, + const __half*, + const int32_t*, + int32_t, + int32_t, + int32_t, + int32_t, + cudaStream_t); diff --git a/csrc/random_ltd/token_sort.cu b/csrc/random_ltd/token_sort.cu new file mode 100644 index 0000000..d260211 --- /dev/null +++ b/csrc/random_ltd/token_sort.cu @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include +#include "custom_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace td_sort { +constexpr int threads = 512; +constexpr int granularity = 16; +constexpr int mem_vals = granularity / sizeof(int32_t); +constexpr int max_buffer_size = (threads + 1) * mem_vals; + +#ifdef __HIP_PLATFORM_HCC__ +constexpr int warp_size = 64; +#else +constexpr int warp_size = 32; +#endif + +constexpr int max_warps = threads / warp_size; +} // namespace td_sort + +template +__global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens) +{ + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + __shared__ int32_t indices_buffer[td_sort::max_buffer_size]; + __shared__ int32_t intermediate_buffer[td_sort::max_warps]; + __shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size]; + + for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1; + i += tb.group_dim().x * td_sort::mem_vals) { + uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0}; + mem_access::store_shared(indices_buffer + i, zeros); + } + + int32_t local_vals[VALS_PER_THREAD]; + + // We flatten layers/batch into a single indexing dimension + int32_t* data_block = data + tb.group_index().x * reserved_tokens; + + // The next two loops really could be fused for a more logical code layout, but don't want to + // move the barrier forward +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + mem_access::load_global(local_vals + i, data_block + iter_idx); + } else { + local_vals[i] = 0; + } + } + + tb.sync(); + +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + const int32_t one = 1; + mem_access::store_shared(indices_buffer + local_vals[i], &one); + } + } + + tb.sync(); + + int32_t local_input[td_sort::mem_vals]; + mem_access::load_shared( + local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals); + + int32_t reduce_vals[td_sort::mem_vals]; + reduce_vals[0] = local_input[0]; + +#pragma unroll + for (int i = 1; i < td_sort::mem_vals; i++) { + reduce_vals[i] = local_input[i] + reduce_vals[i - 1]; + } + + int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1]; + // Short span exclusive scan algorithm (less work efficient) +#pragma unroll + for (int i = 1; i < td_sort::warp_size; i *= 2) { + int32_t step_val = warp.shfl_up(step_1_val, i); + step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val; + } + + if (warp.thread_rank() == td_sort::warp_size - 1) { + mem_access::store_shared(intermediate_buffer + warp.meta_group_rank(), + &step_1_val); + } + + tb.sync(); + + if (warp.meta_group_rank() == 0) { + int32_t step_2_val = 0; + if (warp.thread_rank() < td_sort::max_warps) { + mem_access::load_shared(&step_2_val, + intermediate_buffer + warp.thread_rank()); + } + +#pragma unroll + for (int i = 1; i < td_sort::warp_size; i *= 2) { + int32_t step_val = warp.shfl_up(step_2_val, i); + step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val; + } + + if (warp.thread_rank() < td_sort::max_warps) { + mem_access::store_shared(intermediate_buffer + warp.thread_rank(), + &step_2_val); + } + } + + tb.sync(); + + int step_2_val = 0; + if (warp.meta_group_rank() > 0) { + mem_access::load_shared(&step_2_val, + intermediate_buffer + warp.meta_group_rank() - 1); + } + + const int thread_offset = reduce_vals[td_sort::mem_vals - 1]; + +#pragma unroll + for (int i = 0; i < td_sort::mem_vals; i++) { + reduce_vals[i] += step_1_val + step_2_val - thread_offset; + } + mem_access::store_shared( + indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals); + + if (tb.thread_index().x == 0) { + indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens]; + } + tb.sync(); + + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + if (local_vals[i] == 0) { + int zero = 0; + mem_access::store_shared(sorted_indices_buffer, &zero); + } else { + int sorted_idx; + mem_access::load_shared(&sorted_idx, + indices_buffer + local_vals[i] - 1); + mem_access::store_shared(sorted_indices_buffer + sorted_idx, + local_vals + i); + } + } + } + + tb.sync(); + +#pragma unroll + for (int i = 0; i < VALS_PER_THREAD; i++) { + const int iter_idx = i * td_sort::threads + tb.thread_index().x; + if (iter_idx < reserved_tokens) { + int32_t store_val; + mem_access::load_shared(&store_val, sorted_indices_buffer + iter_idx); + mem_access::store_global(data_block + iter_idx, &store_val); + } + } +} + +void launch_token_sort(int32_t* indices, + int layers, + int batch_size, + int reserved_size, + int original_tokens, + cudaStream_t stream) +{ + // Each sort is completely independent, can flatten this dimension + dim3 grid(layers * batch_size); + dim3 block(td_sort::threads); + + const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads; + + if (vals_per_thread == 1) { + scan_sort<1><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 2) { + scan_sort<2><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 3) { + scan_sort<3><<>>(indices, reserved_size, original_tokens); + } else if (vals_per_thread == 4) { + scan_sort<4><<>>(indices, reserved_size, original_tokens); + } else { + assert(false); + } +} diff --git a/csrc/spatial/csrc/opt_bias_add.cu b/csrc/spatial/csrc/opt_bias_add.cu new file mode 100644 index 0000000..dfcb92f --- /dev/null +++ b/csrc/spatial/csrc/opt_bias_add.cu @@ -0,0 +1,148 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include +#include "memory_access_utils.h" +#include "spatial_cuda_layers.h" + +/* +Fused bias add variants +*/ + +namespace badd_opt { +constexpr int threads = 256; +constexpr int steps = 2; +constexpr int granularity = 16; +constexpr int vals_per_h = granularity / sizeof(__half); +constexpr int vals_per_h2 = granularity / sizeof(__half2); +constexpr int vals_per_block = threads * steps * vals_per_h; +constexpr int stride = vals_per_h * threads; +} // namespace badd_opt + +__global__ void opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { act_buffer[j] += bias_buffer[j]; } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +__global__ void opt_bias_add_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + __half2 other_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + mem_access::load_global(other_buffer, other + id + i * stride); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { + act_buffer[j] += bias_buffer[j] + other_buffer[j]; + } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +__global__ void opt_bias_add_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + __half2 other_buffer[badd_opt::vals_per_h2]; + __half2 other_bias_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + mem_access::load_global(other_buffer, other + id + i * stride); + mem_access::load_global( + other_bias_buffer, other_bias + ((id + i * stride) % channels)); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { + act_buffer[j] = + (act_buffer[j] + bias_buffer[j]) + (other_buffer[j] + other_bias_buffer[j]); + } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +void launch_opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int batch_size, + int seq_len, + int channels, + cudaStream_t stream) +{ + // Should evaluate `true` for reasonable hidden sizes + assert(channels % badd_opt::vals_per_h == 0); + + const int effective_seq_len = batch_size * seq_len; + const int vals = effective_seq_len * channels; + + dim3 block(badd_opt::threads); + dim3 grid((vals + badd_opt::vals_per_block - 1) / badd_opt::vals_per_block); + + if (!other) { + // We shouldn't have a bias if there's no activation + assert(!other_bias); + + opt_bias_add<<>>( + result, activation, bias, effective_seq_len, channels); + } else if (!other_bias) { + opt_bias_add_add<<>>( + result, activation, bias, other, effective_seq_len, channels); + } else { + opt_bias_add_bias_add<<>>( + result, activation, bias, other, other_bias, effective_seq_len, channels); + } +} diff --git a/csrc/spatial/csrc/pt_binding.cpp b/csrc/spatial/csrc/pt_binding.cpp new file mode 100644 index 0000000..3234b06 --- /dev/null +++ b/csrc/spatial/csrc/pt_binding.cpp @@ -0,0 +1,111 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include +#include +#include +#include +#include "spatial_cuda_layers.h" + +ChannelsLastProblem dimension_problem(at::Tensor& input) +{ + ChannelsLastProblem dims; + + if (input.dim() == 4) { + // In some sense this is unsafe (and a reflection of the assumptions made inside + // the C10 options checker). Basically, there's no great way to be sure that + // a tensor is in channels last because a 1x1 image will appear to be in channels + // last even when it isn't. + assert(input.is_contiguous(at::MemoryFormat::ChannelsLast)); + dims.batch_size = input.size(0); + dims.seq_len = input.size(2) * input.size(3); + dims.channels = input.size(1); + } else { + assert(input.is_contiguous()); + dims.batch_size = input.size(0); + dims.seq_len = input.size(1); + dims.channels = input.size(2); + } + + return dims; +} + +at::Tensor seq_unroll_bias_add(at::Tensor& input, at::Tensor& bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + nullptr, + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_add(at::Tensor& input, at::Tensor& bias, at::Tensor& other) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_bias_add(at::Tensor& input, + at::Tensor& bias, + at::Tensor& other, + at::Tensor& other_bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + (const __half*)other_bias.data_ptr(), + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("nhwc_bias_add", &seq_unroll_bias_add); + m.def("nhwc_bias_add_add", &seq_bias_add_add); + m.def("nhwc_bias_add_bias_add", &seq_bias_add_bias_add); +} diff --git a/csrc/spatial/includes/spatial_cuda_layers.h b/csrc/spatial/includes/spatial_cuda_layers.h new file mode 100644 index 0000000..9948153 --- /dev/null +++ b/csrc/spatial/includes/spatial_cuda_layers.h @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#if __CUDA_ARCH__ >= 530 +#define HALF_PRECISION_AVAILABLE = 1 +#endif + +#ifdef __HIPCC__ +#include +#else +#include +#endif + +#include +#include + +/*********** Group Norm Kernels, Structs, and Helpers ************/ + +struct { + int64_t batch_size; + int64_t seq_len; + int64_t channels; +} typedef ChannelsLastProblem; + +void launch_opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int batch_size, + int seq_len, + int channels, + cudaStream_t stream); diff --git a/csrc/transformer/cublas_wrappers.cu b/csrc/transformer/cublas_wrappers.cu index 75ecd3f..97ecdd6 100644 --- a/csrc/transformer/cublas_wrappers.cu +++ b/csrc/transformer/cublas_wrappers.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "cublas_wrappers.h" #ifdef __HIP_PLATFORM_HCC__ diff --git a/csrc/transformer/dropout_kernels.cu b/csrc/transformer/dropout_kernels.cu old mode 100644 new mode 100755 index d1ba135..b2f2abc --- a/csrc/transformer/dropout_kernels.cu +++ b/csrc/transformer/dropout_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "custom_cuda_layers.h" const int unroll_factor = 4; diff --git a/csrc/transformer/ds_transformer_cuda.cpp b/csrc/transformer/ds_transformer_cuda.cpp index 0e86322..d2a0f6e 100644 --- a/csrc/transformer/ds_transformer_cuda.cpp +++ b/csrc/transformer/ds_transformer_cuda.cpp @@ -113,7 +113,6 @@ BertTransformerLayer::BertTransformerLayer(unsigned layer_id, _seq_length, _hidden_size / _heads, //(T(1.0) / T(sqrt(_hidden_size / _heads))), - //aiss debug 0506 (T(1.0 / (sqrt(_hidden_size / _heads)))), T(0.0), CUBLAS_OP_T, diff --git a/csrc/transformer/gelu_kernels.cu b/csrc/transformer/gelu_kernels.cu index d683cf0..1f113a9 100644 --- a/csrc/transformer/gelu_kernels.cu +++ b/csrc/transformer/gelu_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "custom_cuda_layers.h" inline __device__ float gelu(const float x) diff --git a/csrc/transformer/general_kernels.cu b/csrc/transformer/general_kernels.cu index 1eaa94e..ea54910 100644 --- a/csrc/transformer/general_kernels.cu +++ b/csrc/transformer/general_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "general_kernels.h" namespace cg = cooperative_groups; diff --git a/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu b/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu index 175854b..0be4635 100644 --- a/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu +++ b/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu @@ -1,9 +1,14 @@ -#include "custom_cuda_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include #endif +namespace cg = cooperative_groups; namespace cg = cooperative_groups; __global__ void apply_rotary_pos_emb(float* mixed_query, @@ -13,7 +18,8 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -26,13 +32,15 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; + float k = key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -42,7 +50,7 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; + key_layer[k_offset + lane] = k; lane += WARP_SIZE; } @@ -56,9 +64,9 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { -#if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -70,13 +78,15 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; + float k = (float)key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -86,12 +96,11 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; + key_layer[k_offset + lane] = (__half)k; lane += WARP_SIZE; } } -#endif } __global__ void apply_rotary_pos_emb1(float* mixed_query, float* key_layer, @@ -100,7 +109,8 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -113,13 +123,15 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; + float k = key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -129,7 +141,7 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; + key_layer[k_offset + lane] = k; lane += WARP_SIZE; } @@ -142,9 +154,9 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { -#if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -153,7 +165,9 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, int lane = id & 0x1f; unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; + unsigned seq_index = head_id % seq_len; unsigned offset = head_id * head_size; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; constexpr unsigned mask[32] = { 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, @@ -164,14 +178,14 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000}; - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_id = (head_id % seq_len) + seq_offset; unsigned half_dim = rotary_dim >> 1; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; + float k = (float)key_layer[k_offset + lane]; float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -183,12 +197,11 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; + key_layer[k_offset + lane] = (__half)k; lane += WARP_SIZE; } } -#endif } template @@ -202,17 +215,32 @@ void launch_apply_rotary_pos_emb(T* mixed_query, unsigned batch, bool rotate_half, bool rotate_every_two, - cudaStream_t stream) + cudaStream_t stream, + int max_out_tokens) { int total_count = batch * num_heads * seq_len; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); if (rotate_every_two) - apply_rotary_pos_emb<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); + apply_rotary_pos_emb<<>>(mixed_query, + key_layer, + rotary_dim, + seq_len, + offset, + num_heads, + head_size, + total_count, + max_out_tokens); else if (rotate_half) - apply_rotary_pos_emb1<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); + apply_rotary_pos_emb1<<>>(mixed_query, + key_layer, + rotary_dim, + seq_len, + offset, + num_heads, + head_size, + total_count, + max_out_tokens); } template void launch_apply_rotary_pos_emb(float*, @@ -225,7 +253,8 @@ template void launch_apply_rotary_pos_emb(float*, unsigned, bool, bool, - cudaStream_t); + cudaStream_t, + int); template void launch_apply_rotary_pos_emb<__half>(__half*, __half*, unsigned, @@ -236,7 +265,9 @@ template void launch_apply_rotary_pos_emb<__half>(__half*, unsigned, bool, bool, - cudaStream_t); + cudaStream_t, + int); + /* __global__ void apply_rotary_pos_emb(float* mixed_query, float* key_layer, diff --git a/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip b/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip index 4e04f7a..a160554 100644 --- a/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip +++ b/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip @@ -1,11 +1,16 @@ // !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" -#include "custom_hip_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include #endif +namespace cg = cooperative_groups; namespace cg = cooperative_groups; __global__ void apply_rotary_pos_emb(float* mixed_query, @@ -15,7 +20,8 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -28,13 +34,15 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; + float k = key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -44,7 +52,7 @@ __global__ void apply_rotary_pos_emb(float* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; + key_layer[k_offset + lane] = k; lane += WARP_SIZE; } @@ -58,9 +66,9 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { -#if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -72,13 +80,15 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; + float k = (float)key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -88,12 +98,11 @@ __global__ void apply_rotary_pos_emb(__half* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; + key_layer[k_offset + lane] = (__half)k; lane += WARP_SIZE; } } -#endif } __global__ void apply_rotary_pos_emb1(float* mixed_query, float* key_layer, @@ -102,7 +111,8 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -115,13 +125,15 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, unsigned offset = head_id * head_size; unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_index = head_id % seq_len; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; + float k = key_layer[k_offset + lane]; float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -131,7 +143,7 @@ __global__ void apply_rotary_pos_emb1(float* mixed_query, k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; + key_layer[k_offset + lane] = k; lane += WARP_SIZE; } @@ -144,9 +156,9 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, unsigned seq_offset, unsigned num_heads, unsigned head_size, - unsigned total_count) + unsigned total_count, + int max_out_tokens) { -#if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); @@ -155,7 +167,9 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, int lane = id & 0x1f; unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; + unsigned seq_index = head_id % seq_len; unsigned offset = head_id * head_size; + unsigned k_offset = (seq_index + (head_id / seq_len) * max_out_tokens) * head_size; constexpr unsigned mask[32] = { 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, @@ -166,14 +180,14 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000}; - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; + unsigned seq_id = (head_id % seq_len) + seq_offset; unsigned half_dim = rotary_dim >> 1; if (head_id < total_count) { while (lane < rotary_dim) { float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; + float k = (float)key_layer[k_offset + lane]; float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); float q_rot = (q * rotary_sign); float k_rot = (k * rotary_sign); @@ -185,12 +199,11 @@ __global__ void apply_rotary_pos_emb1(__half* mixed_query, k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; + key_layer[k_offset + lane] = (__half)k; lane += WARP_SIZE; } } -#endif } template @@ -204,17 +217,32 @@ void launch_apply_rotary_pos_emb(T* mixed_query, unsigned batch, bool rotate_half, bool rotate_every_two, - hipStream_t stream) + hipStream_t stream, + int max_out_tokens) { int total_count = batch * num_heads * seq_len; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); if (rotate_every_two) - hipLaunchKernelGGL(( apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); + hipLaunchKernelGGL(( apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, mixed_query, + key_layer, + rotary_dim, + seq_len, + offset, + num_heads, + head_size, + total_count, + max_out_tokens); else if (rotate_half) - hipLaunchKernelGGL(( apply_rotary_pos_emb1), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); + hipLaunchKernelGGL(( apply_rotary_pos_emb1), dim3(grid_dims), dim3(block_dims), 0, stream, mixed_query, + key_layer, + rotary_dim, + seq_len, + offset, + num_heads, + head_size, + total_count, + max_out_tokens); } template void launch_apply_rotary_pos_emb(float*, @@ -227,7 +255,8 @@ template void launch_apply_rotary_pos_emb(float*, unsigned, bool, bool, - hipStream_t); + hipStream_t, + int); template void launch_apply_rotary_pos_emb<__half>(__half*, __half*, unsigned, @@ -238,7 +267,9 @@ template void launch_apply_rotary_pos_emb<__half>(__half*, unsigned, bool, bool, - hipStream_t); + hipStream_t, + int); + /* __global__ void apply_rotary_pos_emb(float* mixed_query, float* key_layer, diff --git a/csrc/transformer/inference/csrc/dequantize.cu b/csrc/transformer/inference/csrc/dequantize.cu index 4ddaabd..33605e1 100644 --- a/csrc/transformer/inference/csrc/dequantize.cu +++ b/csrc/transformer/inference/csrc/dequantize.cu @@ -1,4 +1,8 @@ -#include "custom_cuda_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "inference_cuda_layers.h" #define MAX_QUANTIZE_GROUPING 1024 @@ -46,8 +50,6 @@ __global__ void dequantize_kernel(__half* output, unsigned groups, unsigned merge_count) { -#ifdef HALF_PRECISION_AVAILABLE - unsigned merge_hidden = hidden_dim >> merge_count; unsigned quantization_stride = (merge_hidden * output_size) / groups; @@ -71,7 +73,6 @@ __global__ void dequantize_kernel(__half* output, output[q_index] = __float2half(scale_data * (float)q); tid += blockDim.x; } -#endif } template @@ -108,3 +109,86 @@ template void launch_dequantize<__half>(__half*, unsigned, unsigned, cudaStream_t); + +__global__ void dequantize_kernel(float* output, + const int8_t* input, + const float* qscale, + int hidden_dim, + unsigned merge_hidden, + int cnt) +{ +} + +__global__ void dequantize_kernel(__half* output, + const int8_t* input, + const float* qscale, + unsigned hidden_dim, + unsigned merge_hidden, + int cnt) +{ + unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; + unsigned tid = threadIdx.x; + + float local_scale = qscale[blockIdx.x]; + + const float* input_cast = reinterpret_cast(input); + float2* output_cast = reinterpret_cast(output); + + input_cast += bid * merge_hidden; + output_cast += bid * merge_hidden; + + for (int c = 0; c < cnt; c++) { + if (tid < merge_hidden) { + float q = input_cast[tid]; + int8_t* q_int8 = (int8_t*)&q; + + float2 q_f; + __half* q_h = (__half*)&q_f; + + q_h[0] = __float2half(local_scale * (float)q_int8[0]); + q_h[1] = __float2half(local_scale * (float)q_int8[1]); + q_h[2] = __float2half(local_scale * (float)q_int8[2]); + q_h[3] = __float2half(local_scale * (float)q_int8[3]); + output_cast[tid] = q_f; + tid += blockDim.x; + } + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream) +{ + unsigned threads = 1024; + hidden_dim /= 4; + unsigned hid_cnt = threads / hidden_dim; + unsigned thd_cnt = (hidden_dim - 1) / threads + 1; + hid_cnt = hid_cnt > 0 ? hid_cnt : 1; + + unsigned blocks = (output_size + hid_cnt * groups - 1) / (hid_cnt * groups); + dim3 block_dims(threads); + dim3 grid_dims(groups, blocks); + + dequantize_kernel<<>>( + output, input, qscale, hidden_dim, hid_cnt * hidden_dim, thd_cnt); +} + +template void launch_dequantize(float*, + const int8_t*, + const float*, + unsigned, + unsigned, + unsigned, + cudaStream_t); +template void launch_dequantize<__half>(__half*, + const int8_t*, + const float*, + unsigned, + unsigned, + unsigned, + cudaStream_t); diff --git a/csrc/transformer/inference/csrc/dequantize.hip b/csrc/transformer/inference/csrc/dequantize.hip index 7c22e30..8b26902 100644 --- a/csrc/transformer/inference/csrc/dequantize.hip +++ b/csrc/transformer/inference/csrc/dequantize.hip @@ -1,6 +1,10 @@ // !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" -#include "custom_hip_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "inference_cuda_layers.h" #define MAX_QUANTIZE_GROUPING 1024 @@ -48,8 +52,6 @@ __global__ void dequantize_kernel(__half* output, unsigned groups, unsigned merge_count) { -#ifdef HALF_PRECISION_AVAILABLE - unsigned merge_hidden = hidden_dim >> merge_count; unsigned quantization_stride = (merge_hidden * output_size) / groups; @@ -73,7 +75,6 @@ __global__ void dequantize_kernel(__half* output, output[q_index] = __float2half(scale_data * (float)q); tid += blockDim.x; } -#endif } template @@ -110,3 +111,86 @@ template void launch_dequantize<__half>(__half*, unsigned, unsigned, hipStream_t); + +__global__ void dequantize_kernel(float* output, + const int8_t* input, + const float* qscale, + int hidden_dim, + unsigned merge_hidden, + int cnt) +{ +} + +__global__ void dequantize_kernel(__half* output, + const int8_t* input, + const float* qscale, + unsigned hidden_dim, + unsigned merge_hidden, + int cnt) +{ + unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; + unsigned tid = threadIdx.x; + + float local_scale = qscale[blockIdx.x]; + + const float* input_cast = reinterpret_cast(input); + float2* output_cast = reinterpret_cast(output); + + input_cast += bid * merge_hidden; + output_cast += bid * merge_hidden; + + for (int c = 0; c < cnt; c++) { + if (tid < merge_hidden) { + float q = input_cast[tid]; + int8_t* q_int8 = (int8_t*)&q; + + float2 q_f; + __half* q_h = (__half*)&q_f; + + q_h[0] = __float2half(local_scale * (float)q_int8[0]); + q_h[1] = __float2half(local_scale * (float)q_int8[1]); + q_h[2] = __float2half(local_scale * (float)q_int8[2]); + q_h[3] = __float2half(local_scale * (float)q_int8[3]); + output_cast[tid] = q_f; + tid += blockDim.x; + } + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + hipStream_t stream) +{ + unsigned threads = 1024; + hidden_dim /= 4; + unsigned hid_cnt = threads / hidden_dim; + unsigned thd_cnt = (hidden_dim - 1) / threads + 1; + hid_cnt = hid_cnt > 0 ? hid_cnt : 1; + + unsigned blocks = (output_size + hid_cnt * groups - 1) / (hid_cnt * groups); + dim3 block_dims(threads); + dim3 grid_dims(groups, blocks); + + hipLaunchKernelGGL(( dequantize_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, + output, input, qscale, hidden_dim, hid_cnt * hidden_dim, thd_cnt); +} + +template void launch_dequantize(float*, + const int8_t*, + const float*, + unsigned, + unsigned, + unsigned, + hipStream_t); +template void launch_dequantize<__half>(__half*, + const int8_t*, + const float*, + unsigned, + unsigned, + unsigned, + hipStream_t); diff --git a/csrc/transformer/inference/csrc/gelu.cu b/csrc/transformer/inference/csrc/gelu.cu index 70bbf42..71a37bb 100644 --- a/csrc/transformer/inference/csrc/gelu.cu +++ b/csrc/transformer/inference/csrc/gelu.cu @@ -1,5 +1,12 @@ -#include "custom_cuda_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; #define MAX_CAP 4 #define MAX_SEQ 2048 @@ -10,74 +17,32 @@ inline __device__ float gelu(const float x) return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); } -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) +/* +In-place gelu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(gelu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); } -#endif } template @@ -87,316 +52,324 @@ void launch_bias_gelu(T* input, int batch_size, cudaStream_t stream) { - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); fused_bias_gelu<<>>( - input, bias, total_count, intermediate_size / 4); + input, bias, total_count, intermediate_size); } template void launch_bias_gelu(float*, const float*, int, int, cudaStream_t); template void launch_bias_gelu<__half>(__half*, const __half*, int, int, cudaStream_t); -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) +/* +In-place channels-last bias add +*/ +template +__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(data_f + bias_f); + } + + mem_access::store_global(input + offset, data); } -#endif } template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream) +void launch_bias_add(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) { - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); - fused_bias_add<<>>(input, bias, total_count, hidden_size / 4); + fused_bias_add<<>>( + input, bias, total_count, intermediate_size); } template void launch_bias_add(float*, const float*, int, int, cudaStream_t); template void launch_bias_add<__half>(__half*, const __half*, int, int, cudaStream_t); -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) +__global__ void fused_bias_residual(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) { - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_fl4.x = + (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x); + res_fl4.y = + (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y); + res_fl4.z = + (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z); + res_fl4.w = + (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w); + } else { + // residual += hidden_state + bias + res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x; + res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y; + res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z; + res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w; + } + res_fl4_ptr[offset] = res_fl4; } } -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) +__global__ void fused_bias_residual(__half* residual, + const __half* hidden_state, + const __half* attn, + const __half* bias, + const __half* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + + __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); + const __half2* hs_half2 = reinterpret_cast(&hs_fl2); + const __half2* attn_half2 = reinterpret_cast(&attn_fl2); + const __half2* bias_half2 = reinterpret_cast(&bias_fl2); + const __half2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + + float2 res_low = __half22float2(res_half2[0]); + float2 res_high = __half22float2(res_half2[1]); + + const float2 hs_low = __half22float2(hs_half2[0]); + const float2 hs_high = __half22float2(hs_half2[1]); + + const float2 attn_low = __half22float2(attn_half2[0]); + const float2 attn_high = __half22float2(attn_half2[1]); + + const float2 bias_low = __half22float2(bias_half2[0]); + const float2 bias_high = __half22float2(bias_half2[1]); + + const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); + const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); + + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_low.x = + (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x; + res_low.y = + (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y; + res_high.x = + (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x; + res_high.y = + (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y; + } else { + // residual += hidden_state + bias + res_low.x = (res_low.x + hs_low.x + bias_low.x); + res_low.y = (res_low.y + hs_low.y + bias_low.y); + res_high.x = (res_high.x + hs_high.x + bias_high.x); + res_high.y = (res_high.y + hs_high.y + bias_high.y); + } + res_half2[0] = __float22half2_rn(res_low); + res_half2[1] = __float22half2_rn(res_high); + + res_fl2_ptr[offset] = res_fl2; } -#endif } template -void launch_bias_residual(T* input, - T* output, +void launch_bias_residual(T* residual, + T* hidden_state, T* attn, T* bias, T* attn_bias, int batch, int hidden_dim, int mp_size, + bool preln, cudaStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - fused_bias_residual<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); + fused_bias_residual<<>>(residual, + hidden_state, + attn, + bias, + attn_bias, + total_count, + hidden_dim / 4, + 1.0 / mp_size, + preln); } -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, cudaStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - cudaStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) +template void launch_bias_residual< + float>(float*, float*, float*, float*, float*, int, int, int, bool, cudaStream_t); +template void launch_bias_residual< + __half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, cudaStream_t); + +__global__ void gptj_residual_add(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) { - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + + if (attn_bias) { + float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + // residual += attention_bias + res_fl4.x += attn_bias_fl4.x; + res_fl4.y += attn_bias_fl4.y; + res_fl4.z += attn_bias_fl4.z; + res_fl4.w += attn_bias_fl4.w; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale; + res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale; + res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale; + res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale; + + res_fl4_ptr[offset] = res_fl4; } } -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) +__global__ void gptj_residual_add(__half* residual, + const __half* hidden_state, + const __half* attn, + const __half* bias, + const __half* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) { -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + + __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); + const __half2* hs_half2 = reinterpret_cast(&hs_fl2); + const __half2* attn_half2 = reinterpret_cast(&attn_fl2); + const __half2* bias_half2 = reinterpret_cast(&bias_fl2); + + float2 res_low = __half22float2(res_half2[0]); + float2 res_high = __half22float2(res_half2[1]); + + const float2 hs_low = __half22float2(hs_half2[0]); + const float2 hs_high = __half22float2(hs_half2[1]); + + const float2 attn_low = __half22float2(attn_half2[0]); + const float2 attn_high = __half22float2(attn_half2[1]); + + const float2 bias_low = __half22float2(bias_half2[0]); + const float2 bias_high = __half22float2(bias_half2[1]); + + if (attn_bias) { + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + const __half2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); + const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); + // residual += attention_bias + res_low.x += attn_bias_low.x; + res_low.y += attn_bias_low.y; + res_high.x += attn_bias_high.x; + res_high.y += attn_bias_high.y; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale; + res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale; + res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale; + res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale; + + res_half2[0] = __float22half2_rn(res_low); + res_half2[1] = __float22half2_rn(res_high); + + res_fl2_ptr[offset] = res_fl2; } -#endif } template -void launch_gptj_residual_add(T* input, - T* output, +void launch_gptj_residual_add(T* residual, + T* hidden_state, T* attn, T* bias, T* attn_bias, @@ -410,7 +383,7 @@ void launch_gptj_residual_add(T* input, dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); gptj_residual_add<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); + residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); } template void launch_gptj_residual_add(float*, @@ -431,69 +404,33 @@ template void launch_gptj_residual_add<__half>(__half*, int, int, cudaStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) +template +__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim) { - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; + constexpr int granularity = 16; + constexpr int vals_per_access = granularity / sizeof(T); + + T* residual_seq = residual + blockIdx.x * hidden_dim; + T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim; + + for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim; + tid += blockDim.x * vals_per_access) { + T mlp[vals_per_access]; + T res[vals_per_access]; + T coef1[vals_per_access]; + T coef2[vals_per_access]; + + mem_access::load_global(mlp, mlp_out_seq + tid); + mem_access::load_global(res, residual_seq + tid); + mem_access::load_global(coef1, coef + tid); + mem_access::load_global(coef2, coef + tid + hidden_dim); + +#pragma unroll + for (int idx = 0; idx < vals_per_access; idx++) { + mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx]; + } + + mem_access::store_global(mlp_out_seq + tid, mlp); } } @@ -508,7 +445,7 @@ void launch_moe_res_matmul(T* residual, dim3 grid_dim(seq_len); dim3 block_dim(1024); moe_res_matmul<<>>( - residual, coef, mlp_out, seq_len, hidden_dim / 4); + residual, coef, mlp_out, seq_len, hidden_dim); } template void launch_moe_res_matmul(float* residual, @@ -523,3 +460,224 @@ template void launch_moe_res_matmul(__half* residual, int seq_len, int hidden_dim, cudaStream_t stream); + +__global__ void pad_data_kernel(__half* padded_output, + __half* output, + int head_size, + int padded_head_size) +{ + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bid = blockIdx.x * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bid * padded_head_size); + output_cast += (bid * head_size); + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + if (idx < head_size) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} +__global__ void pad_data_kernel(float* padded_output, + float* output, + int head_size, + int padded_head_size) +{ +} +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream) +{ + dim3 grid_dim((bsz - 1) / 16 + 1); + dim3 block_dim(padded_head_size / 8, 16); + pad_data_kernel<<>>( + padded_output, output, head_size / 8, padded_head_size / 8); +} +template void pad_data(__half* padded_output, + __half* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream); +template void pad_data(float* padded_output, + float* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream); + +__global__ void pad_head_seq_kernel(__half* padded_output, + __half* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bsz = blockIdx.x; + int bid = blockIdx.y * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size; + output_cast += (bsz * seq_len + bid) * head_size; + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + + if (idx < head_size && bid < seq_len) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} +__global__ void pad_head_seq_kernel(float* padded_output, + float* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ +} +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream) +{ + dim3 grid_dim(bsz, padded_seq_len / 16); + dim3 block_dim(padded_head_size / 8, 16); + pad_head_seq_kernel<<>>( + padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8); +} +template void pad_head_seq(__half* padded_output, + __half* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream); +template void pad_head_seq(float* padded_output, + float* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream); + +// TODO(cmikeh2): evaluate different GeLU performance +__device__ __forceinline__ float old_gelu(float val) +{ + // 1 / sqrt(2) + constexpr float rsqrt_2 = 0.707106769084930419922; + return val * 0.5f * (1.0f + erff(val * rsqrt_2)); +} + +namespace fused_geglu { +constexpr int threads = 256; +constexpr int steps = 2; +constexpr int granularity = 16; +} // namespace fused_geglu + +template +__global__ void fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int base_channels, + int total_elems) +{ + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access; + +#pragma unroll + for (int i = 0; i < fused_geglu::steps; i++) { + T activation_buffer_1[T_per_access]; + T activation_buffer_2[T_per_access]; + T bias_buffer_1[T_per_access]; + T bias_buffer_2[T_per_access]; + + const int iter_id = id + T_per_step * i; + if (iter_id < total_elems) { + const int channel_id = iter_id % base_channels; + const int seq_id = iter_id / base_channels; + const int seq_offset = seq_id * base_channels * 2; + + mem_access::load_global(activation_buffer_1, + activation + seq_offset + channel_id); + mem_access::load_global( + activation_buffer_2, activation + seq_offset + channel_id + base_channels); + mem_access::load_global(bias_buffer_1, bias + channel_id); + mem_access::load_global(bias_buffer_2, + bias + channel_id + base_channels); + + // Since the GeLU is going to happen at float, might as well + // convert +#pragma unroll + for (int v = 0; v < T_per_access; v++) { + T hidden_state = activation_buffer_1[v] + bias_buffer_1[v]; + T pre_gate = activation_buffer_2[v] + bias_buffer_2[v]; + float gate_f = old_gelu(conversion::to(pre_gate)); + T gate = conversion::to(gate_f); + activation_buffer_1[v] = hidden_state * gate; + } + + mem_access::store_global(output + iter_id, + activation_buffer_1); + } + } +} + +template +void launch_fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + /* + Fused bias GEGLU is a variant of the gated activation functions. + The input here is a matrix of [batch, seq_len, 2 * intermediate_dim] + where the second half of the channels act as GeLU gates for the first + half. + */ + + // Re-derive the above figures + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int base_channels = elems_per_row / 2; + const int total_elems = base_channels * rows; + + dim3 block(fused_geglu::threads); + dim3 grid((total_elems + T_per_block - 1) / T_per_block); + + fused_bias_geglu<<>>( + output, activation, bias, base_channels, total_elems); +} + +template void launch_fused_bias_geglu(__half*, + const __half*, + const __half*, + int, + int, + cudaStream_t); +template void launch_fused_bias_geglu(float*, const float*, const float*, int, int, cudaStream_t); diff --git a/csrc/transformer/inference/csrc/gelu.hip b/csrc/transformer/inference/csrc/gelu.hip index 00c03ef..6665406 100644 --- a/csrc/transformer/inference/csrc/gelu.hip +++ b/csrc/transformer/inference/csrc/gelu.hip @@ -1,7 +1,14 @@ // !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" -#include "custom_hip_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; #define MAX_CAP 4 #define MAX_SEQ 2048 @@ -12,74 +19,32 @@ inline __device__ float gelu(const float x) return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); } -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) +/* +In-place gelu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(gelu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); } -#endif } template @@ -89,316 +54,324 @@ void launch_bias_gelu(T* input, int batch_size, hipStream_t stream) { - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, - input, bias, total_count, intermediate_size / 4); + input, bias, total_count, intermediate_size); } template void launch_bias_gelu(float*, const float*, int, int, hipStream_t); template void launch_bias_gelu<__half>(__half*, const __half*, int, int, hipStream_t); -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) +/* +In-place channels-last bias add +*/ +template +__global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(data_f + bias_f); + } + + mem_access::store_global(input + offset, data); } -#endif } template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream) +void launch_bias_add(T* input, + const T* bias, + int intermediate_size, + int batch_size, + hipStream_t stream) { - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); - hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, total_count, hidden_size / 4); + hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream, + input, bias, total_count, intermediate_size); } template void launch_bias_add(float*, const float*, int, int, hipStream_t); template void launch_bias_add<__half>(__half*, const __half*, int, int, hipStream_t); -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) +__global__ void fused_bias_residual(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) { - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_fl4.x = + (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x); + res_fl4.y = + (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y); + res_fl4.z = + (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z); + res_fl4.w = + (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w); + } else { + // residual += hidden_state + bias + res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x; + res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y; + res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z; + res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w; + } + res_fl4_ptr[offset] = res_fl4; } } -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) +__global__ void fused_bias_residual(__half* residual, + const __half* hidden_state, + const __half* attn, + const __half* bias, + const __half* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale, + const bool preln) { -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + + __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); + const __half2* hs_half2 = reinterpret_cast(&hs_fl2); + const __half2* attn_half2 = reinterpret_cast(&attn_fl2); + const __half2* bias_half2 = reinterpret_cast(&bias_fl2); + const __half2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + + float2 res_low = __half22float2(res_half2[0]); + float2 res_high = __half22float2(res_half2[1]); + + const float2 hs_low = __half22float2(hs_half2[0]); + const float2 hs_high = __half22float2(hs_half2[1]); + + const float2 attn_low = __half22float2(attn_half2[0]); + const float2 attn_high = __half22float2(attn_half2[1]); + + const float2 bias_low = __half22float2(bias_half2[0]); + const float2 bias_high = __half22float2(bias_half2[1]); + + const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); + const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); + + if (preln) { + // residual = (residual + attention + bias + attention_bias) * + // mp_scale + hidden_state + res_low.x = + (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x; + res_low.y = + (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y; + res_high.x = + (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x; + res_high.y = + (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y; + } else { + // residual += hidden_state + bias + res_low.x = (res_low.x + hs_low.x + bias_low.x); + res_low.y = (res_low.y + hs_low.y + bias_low.y); + res_high.x = (res_high.x + hs_high.x + bias_high.x); + res_high.y = (res_high.y + hs_high.y + bias_high.y); + } + res_half2[0] = __float22half2_rn(res_low); + res_half2[1] = __float22half2_rn(res_high); + + res_fl2_ptr[offset] = res_fl2; } -#endif } template -void launch_bias_residual(T* input, - T* output, +void launch_bias_residual(T* residual, + T* hidden_state, T* attn, T* bias, T* attn_bias, int batch, int hidden_dim, int mp_size, + bool preln, hipStream_t stream) { int total_count = batch * hidden_dim / 4; dim3 block_dims(1024); dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); + hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, residual, + hidden_state, + attn, + bias, + attn_bias, + total_count, + hidden_dim / 4, + 1.0 / mp_size, + preln); } -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, hipStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - hipStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) +template void launch_bias_residual< + float>(float*, float*, float*, float*, float*, int, int, int, bool, hipStream_t); +template void launch_bias_residual< + __half>(__half*, __half*, __half*, __half*, __half*, int, int, int, bool, hipStream_t); + +__global__ void gptj_residual_add(float* residual, + const float* hidden_state, + const float* attn, + const float* bias, + const float* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) { - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float4* res_fl4_ptr = reinterpret_cast(residual); + const float4* hs_fl4_ptr = reinterpret_cast(hidden_state); + const float4* attn_fl4_ptr = reinterpret_cast(attn); + const float4* bias_fl4_ptr = reinterpret_cast(bias); + const float4* attn_bias_fl4_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; + float4 res_fl4 = res_fl4_ptr[offset]; + const float4 hs_fl4 = hs_fl4_ptr[offset]; + const float4 attn_fl4 = attn_fl4_ptr[offset]; + const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size]; + + if (attn_bias) { + float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size]; + // residual += attention_bias + res_fl4.x += attn_bias_fl4.x; + res_fl4.y += attn_bias_fl4.y; + res_fl4.z += attn_bias_fl4.z; + res_fl4.w += attn_bias_fl4.w; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale; + res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale; + res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale; + res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale; + + res_fl4_ptr[offset] = res_fl4; } } -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) +__global__ void gptj_residual_add(__half* residual, + const __half* hidden_state, + const __half* attn, + const __half* bias, + const __half* attn_bias, + const int total_count, + const int intermediate_size, + const float mp_scale) { -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; + float2* res_fl2_ptr = reinterpret_cast(residual); + const float2* hs_fl2_ptr = reinterpret_cast(hidden_state); + const float2* attn_fl2_ptr = reinterpret_cast(attn); + const float2* bias_fl2_ptr = reinterpret_cast(bias); + const float2* attn_bias_fl2_ptr = reinterpret_cast(attn_bias); + const int offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; + float2 res_fl2 = res_fl2_ptr[offset]; + const float2 hs_fl2 = hs_fl2_ptr[offset]; + const float2 attn_fl2 = attn_fl2_ptr[offset]; + const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size]; + + __half2* res_half2 = reinterpret_cast<__half2*>(&res_fl2); + const __half2* hs_half2 = reinterpret_cast(&hs_fl2); + const __half2* attn_half2 = reinterpret_cast(&attn_fl2); + const __half2* bias_half2 = reinterpret_cast(&bias_fl2); + + float2 res_low = __half22float2(res_half2[0]); + float2 res_high = __half22float2(res_half2[1]); + + const float2 hs_low = __half22float2(hs_half2[0]); + const float2 hs_high = __half22float2(hs_half2[1]); + + const float2 attn_low = __half22float2(attn_half2[0]); + const float2 attn_high = __half22float2(attn_half2[1]); + + const float2 bias_low = __half22float2(bias_half2[0]); + const float2 bias_high = __half22float2(bias_half2[1]); + + if (attn_bias) { + const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size]; + const __half2* attn_bias_half2 = reinterpret_cast(&attn_bias_fl2); + const float2 attn_bias_low = __half22float2(attn_bias_half2[0]); + const float2 attn_bias_high = __half22float2(attn_bias_half2[1]); + // residual += attention_bias + res_low.x += attn_bias_low.x; + res_low.y += attn_bias_low.y; + res_high.x += attn_bias_high.x; + res_high.y += attn_bias_high.y; + } + // residual = hidden_state + attention + (residual + bias) * mp_scale + res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale; + res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale; + res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale; + res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale; + + res_half2[0] = __float22half2_rn(res_low); + res_half2[1] = __float22half2_rn(res_high); + + res_fl2_ptr[offset] = res_fl2; } -#endif } template -void launch_gptj_residual_add(T* input, - T* output, +void launch_gptj_residual_add(T* residual, + T* hidden_state, T* attn, T* bias, T* attn_bias, @@ -412,7 +385,7 @@ void launch_gptj_residual_add(T* input, dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); hipLaunchKernelGGL(( gptj_residual_add), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); + residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); } template void launch_gptj_residual_add(float*, @@ -433,69 +406,33 @@ template void launch_gptj_residual_add<__half>(__half*, int, int, hipStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) +template +__global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim) { - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; + constexpr int granularity = 16; + constexpr int vals_per_access = granularity / sizeof(T); + + T* residual_seq = residual + blockIdx.x * hidden_dim; + T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim; + + for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim; + tid += blockDim.x * vals_per_access) { + T mlp[vals_per_access]; + T res[vals_per_access]; + T coef1[vals_per_access]; + T coef2[vals_per_access]; + + mem_access::load_global(mlp, mlp_out_seq + tid); + mem_access::load_global(res, residual_seq + tid); + mem_access::load_global(coef1, coef + tid); + mem_access::load_global(coef2, coef + tid + hidden_dim); + +#pragma unroll + for (int idx = 0; idx < vals_per_access; idx++) { + mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx]; + } + + mem_access::store_global(mlp_out_seq + tid, mlp); } } @@ -510,7 +447,7 @@ void launch_moe_res_matmul(T* residual, dim3 grid_dim(seq_len); dim3 block_dim(1024); hipLaunchKernelGGL(( moe_res_matmul), dim3(grid_dim), dim3(block_dim), 0, stream, - residual, coef, mlp_out, seq_len, hidden_dim / 4); + residual, coef, mlp_out, seq_len, hidden_dim); } template void launch_moe_res_matmul(float* residual, @@ -525,3 +462,224 @@ template void launch_moe_res_matmul(__half* residual, int seq_len, int hidden_dim, hipStream_t stream); + +__global__ void pad_data_kernel(__half* padded_output, + __half* output, + int head_size, + int padded_head_size) +{ + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bid = blockIdx.x * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bid * padded_head_size); + output_cast += (bid * head_size); + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + if (idx < head_size) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} +__global__ void pad_data_kernel(float* padded_output, + float* output, + int head_size, + int padded_head_size) +{ +} +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + hipStream_t stream) +{ + dim3 grid_dim((bsz - 1) / 16 + 1); + dim3 block_dim(padded_head_size / 8, 16); + hipLaunchKernelGGL(( pad_data_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, + padded_output, output, head_size / 8, padded_head_size / 8); +} +template void pad_data(__half* padded_output, + __half* output, + int bsz, + int head_size, + int padded_head_size, + hipStream_t stream); +template void pad_data(float* padded_output, + float* output, + int bsz, + int head_size, + int padded_head_size, + hipStream_t stream); + +__global__ void pad_head_seq_kernel(__half* padded_output, + __half* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ + float4* padded_output_cast = reinterpret_cast(padded_output); + float4* output_cast = reinterpret_cast(output); + int bsz = blockIdx.x; + int bid = blockIdx.y * (blockDim.y) + threadIdx.y; + int idx = threadIdx.x; + padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size; + output_cast += (bsz * seq_len + bid) * head_size; + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; + + if (idx < head_size && bid < seq_len) + padded_output_cast[idx] = output_cast[idx]; + else + padded_output_cast[idx] = ZERO; +} +__global__ void pad_head_seq_kernel(float* padded_output, + float* output, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size) +{ +} +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + hipStream_t stream) +{ + dim3 grid_dim(bsz, padded_seq_len / 16); + dim3 block_dim(padded_head_size / 8, 16); + hipLaunchKernelGGL(( pad_head_seq_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, + padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8); +} +template void pad_head_seq(__half* padded_output, + __half* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + hipStream_t stream); +template void pad_head_seq(float* padded_output, + float* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + hipStream_t stream); + +// TODO(cmikeh2): evaluate different GeLU performance +__device__ __forceinline__ float old_gelu(float val) +{ + // 1 / sqrt(2) + constexpr float rsqrt_2 = 0.707106769084930419922; + return val * 0.5f * (1.0f + erff(val * rsqrt_2)); +} + +namespace fused_geglu { +constexpr int threads = 256; +constexpr int steps = 2; +constexpr int granularity = 16; +} // namespace fused_geglu + +template +__global__ void fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int base_channels, + int total_elems) +{ + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access; + +#pragma unroll + for (int i = 0; i < fused_geglu::steps; i++) { + T activation_buffer_1[T_per_access]; + T activation_buffer_2[T_per_access]; + T bias_buffer_1[T_per_access]; + T bias_buffer_2[T_per_access]; + + const int iter_id = id + T_per_step * i; + if (iter_id < total_elems) { + const int channel_id = iter_id % base_channels; + const int seq_id = iter_id / base_channels; + const int seq_offset = seq_id * base_channels * 2; + + mem_access::load_global(activation_buffer_1, + activation + seq_offset + channel_id); + mem_access::load_global( + activation_buffer_2, activation + seq_offset + channel_id + base_channels); + mem_access::load_global(bias_buffer_1, bias + channel_id); + mem_access::load_global(bias_buffer_2, + bias + channel_id + base_channels); + + // Since the GeLU is going to happen at float, might as well + // convert +#pragma unroll + for (int v = 0; v < T_per_access; v++) { + T hidden_state = activation_buffer_1[v] + bias_buffer_1[v]; + T pre_gate = activation_buffer_2[v] + bias_buffer_2[v]; + float gate_f = old_gelu(conversion::to(pre_gate)); + T gate = conversion::to(gate_f); + activation_buffer_1[v] = hidden_state * gate; + } + + mem_access::store_global(output + iter_id, + activation_buffer_1); + } + } +} + +template +void launch_fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int rows, + int elems_per_row, + hipStream_t stream) +{ + /* + Fused bias GEGLU is a variant of the gated activation functions. + The input here is a matrix of [batch, seq_len, 2 * intermediate_dim] + where the second half of the channels act as GeLU gates for the first + half. + */ + + // Re-derive the above figures + constexpr int T_per_access = fused_geglu::granularity / sizeof(T); + constexpr int T_per_step = T_per_access * fused_geglu::threads; + constexpr int T_per_block = T_per_step * fused_geglu::steps; + + const int base_channels = elems_per_row / 2; + const int total_elems = base_channels * rows; + + dim3 block(fused_geglu::threads); + dim3 grid((total_elems + T_per_block - 1) / T_per_block); + + hipLaunchKernelGGL(( fused_bias_geglu), dim3(grid), dim3(block), 0, stream, + output, activation, bias, base_channels, total_elems); +} + +template void launch_fused_bias_geglu(__half*, + const __half*, + const __half*, + int, + int, + hipStream_t); +template void launch_fused_bias_geglu(float*, const float*, const float*, int, int, hipStream_t); diff --git a/csrc/transformer/inference/csrc/layer_norm.cu b/csrc/transformer/inference/csrc/layer_norm.cu new file mode 100644 index 0000000..0607851 --- /dev/null +++ b/csrc/transformer/inference/csrc/layer_norm.cu @@ -0,0 +1,529 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Primary layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + const T mean_compute = conversion::to(mean); + const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_ln \ + <<>>(output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +template void launch_fused_ln(__half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + cudaStream_t); +template void +launch_fused_ln(float*, const float*, const float*, const float*, float, int, int, cudaStream_t); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + const T* bias_base = bias + thread_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + mem_access::load_global( + bias_buffer, bias_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + float bias_up_cast = conversion::to(bias_buffer[j]); + vals_up_cast += res_up_cast + bias_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + const T mean_compute = conversion::to(mean); + const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + output, nullptr, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + norm_output, res_output, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +// No-store specializations +template void launch_fused_residual_ln(__half*, + const __half*, + const __half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + cudaStream_t); + +template void launch_fused_residual_ln(float*, + const float*, + const float*, + const float*, + const float*, + const float*, + float, + int, + int, + cudaStream_t); + +// Store specializations +template void launch_fused_residual_ln_store_pre_ln_res(__half*, + __half*, + const __half*, + const __half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + cudaStream_t); + +template void launch_fused_residual_ln_store_pre_ln_res(float*, + float*, + const float*, + const float*, + const float*, + const float*, + const float*, + float, + int, + int, + cudaStream_t); diff --git a/csrc/transformer/inference/csrc/layer_norm.hip b/csrc/transformer/inference/csrc/layer_norm.hip new file mode 100644 index 0000000..7855217 --- /dev/null +++ b/csrc/transformer/inference/csrc/layer_norm.hip @@ -0,0 +1,531 @@ +// !!! This is a file automatically generated by hipify!!! +#include "hip/hip_runtime.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Primary layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + const T mean_compute = conversion::to(mean); + const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + hipLaunchKernelGGL(( fused_ln) \ + , dim3(grid), dim3(block), 0, stream, output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +template void launch_fused_ln(__half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + hipStream_t); +template void +launch_fused_ln(float*, const float*, const float*, const float*, float, int, int, hipStream_t); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + const T* bias_base = bias + thread_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + mem_access::load_global( + bias_buffer, bias_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + float bias_up_cast = conversion::to(bias_buffer[j]); + vals_up_cast += res_up_cast + bias_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + const T mean_compute = conversion::to(mean); + const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + hipLaunchKernelGGL(( fused_residual_ln) \ + , dim3(grid), dim3(block), 0, stream, \ + output, nullptr, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + hipLaunchKernelGGL(( fused_residual_ln) \ + , dim3(grid), dim3(block), 0, stream, \ + norm_output, res_output, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +// No-store specializations +template void launch_fused_residual_ln(__half*, + const __half*, + const __half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + hipStream_t); + +template void launch_fused_residual_ln(float*, + const float*, + const float*, + const float*, + const float*, + const float*, + float, + int, + int, + hipStream_t); + +// Store specializations +template void launch_fused_residual_ln_store_pre_ln_res(__half*, + __half*, + const __half*, + const __half*, + const __half*, + const __half*, + const __half*, + float, + int, + int, + hipStream_t); + +template void launch_fused_residual_ln_store_pre_ln_res(float*, + float*, + const float*, + const float*, + const float*, + const float*, + const float*, + float, + int, + int, + hipStream_t); diff --git a/csrc/transformer/inference/csrc/pt_binding.cpp b/csrc/transformer/inference/csrc/pt_binding.cpp index 320e649..1d68570 100644 --- a/csrc/transformer/inference/csrc/pt_binding.cpp +++ b/csrc/transformer/inference/csrc/pt_binding.cpp @@ -1,39 +1,89 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ -#include +#include #include +#include #include -#include "context.h" -#include "cublas_wrappers.h" -#include "custom_cuda_layers.h" +#include "inference_context.h" +#include "inference_cublas_wrappers.h" +#include "inference_cuda_layers.h" std::array gemm_algos = std::array({99, 99, 99}); -#define MAX_OUT_TOKES 10 +// NOTE: This activation function type enum should be always in sync +// with the python counterpart, otherwise the casting from python binding +// will be incorrect. +enum class ActivationFuncType { UNKNOWN = 0, GELU = 1, ReLU = 2 }; + +enum class TransformerType : uint8_t { UNKNOWN = 0, GPTType = 1, BERTType = 2 }; + +// NOTE: this is a temporary and dodgy solution to distinguish GPT and BERT style models +// based on the dimensions of the corresponding attention mask. +inline auto infer_transformer_type(at::Tensor& attn_mask) -> TransformerType +{ + auto attn_mask_num_dims = attn_mask.sizes().size(); + + if (attn_mask_num_dims > 2) { + return TransformerType::GPTType; + } else if (attn_mask_num_dims == 2) { + return TransformerType::BERTType; + } else { + return TransformerType::UNKNOWN; + } +} + +// infer stride of attention mask memory layout based on the model type. +inline auto get_attn_mask_stride(at::Tensor& attn_mask) -> int +{ + auto trnsfrmr_type = infer_transformer_type(attn_mask); + + if (trnsfrmr_type == TransformerType::GPTType) { + return attn_mask.size(2); + } else if (trnsfrmr_type == TransformerType::BERTType) { + // Bert style models have always a mask stride of 1. + return 1; + } else if (trnsfrmr_type == TransformerType::UNKNOWN) { + return 0; + } + + // this is just to make the compiler happy. + return 0; +} template at::Tensor ds_softmax(at::Tensor& attn_scores, at::Tensor& attn_mask, + at::Tensor& alibi, bool triangular, bool recompute, bool local_attention, int window_size, - bool async_op) + bool async_op, + float layer_scale, + int head_offset, + int mp_size) { auto attn_scores_c = attn_scores.contiguous(); int bsz = attn_scores_c.size(0); int seq_len = attn_scores_c.size(1); int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); + if (len > 2) seq_len = attn_scores_c.size(2); int soft_len = attn_scores_c.size(2); if (len > 3) soft_len = attn_scores_c.size(3); int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); + if (len > 1) heads = attn_scores_c.size(1); + + auto mask_stride = get_attn_mask_stride(attn_mask); launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, triangular, recompute, local_attention, @@ -42,20 +92,35 @@ at::Tensor ds_softmax(at::Tensor& attn_scores, heads, seq_len, soft_len, - 1.0, + head_offset, + mask_stride, + mp_size, Context::Instance().GetCurrentStream(async_op)); return attn_scores_c; } template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) +void allocate_workspace(unsigned hidden_dim, + unsigned num_heads, + unsigned prompt_length, + unsigned batch_size, + unsigned num_layers, + unsigned mp_size = 1, + bool external_cache = false, + unsigned rank = 0, + unsigned max_out_tokens = 1024) { - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); + Context::Instance().GenWorkSpace(num_layers, + num_heads, + batch_size, + prompt_length, + hidden_dim, + mp_size, + external_cache, + sizeof(T), + rank, + max_out_tokens); } template @@ -70,10 +135,13 @@ at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) float alpha = 1; float gemm_beta = 0.0; - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); + /* + // Reallocate memory if we received a new prompt + if (!workspace || input.size(1) != 1) { + allocate_workspace(W.size(1), Context::Instance().GetMaxTokenLenght(), Q.size(0), 1, + head_size); workspace = (T*)Context::Instance().GetWorkSpace(); } + */ auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); unsigned m = W.size(1); @@ -123,6 +191,9 @@ void attention_unfused(at::Tensor& prev_key_cont, float gemm_beta = 0.0; auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); int k = prev_value_cont.size(2) / heads; + + auto mask_stride = get_attn_mask_stride(attn_mask); + cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), soft_len, @@ -144,8 +215,22 @@ void attention_unfused(at::Tensor& prev_key_cont, #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); + launch_attn_softmax_v2((T*)attn_score.data_ptr(), + (T*)(attn_mask.sizes().size() > 1 ? attn_mask.data_ptr() : nullptr), + (T*)nullptr, + 1.0, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + Context::Instance().GetCurrentStream(false)); alpha = 1.0; cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), k, @@ -170,19 +255,19 @@ void attention_unfused(at::Tensor& prev_key_cont, } template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) +std::vector ds_softmax_context1(at::Tensor& query, + at::Tensor& prev_key, + at::Tensor& new_key, + at::Tensor& attn_mask, + at::Tensor& prev_value, + at::Tensor& new_value, + int heads, + float norm_factor, + bool merging, + bool triangular, + bool local_attention, + int window_size, + bool no_masking) { auto query_cont = query.contiguous(); auto prev_key_cont = prev_key.contiguous(); @@ -222,6 +307,230 @@ std::vector ds_softmax_context(at::Tensor& query, return {output, prev_key, prev_value}; } +template +void ds_softmax_internal(T* attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + float& layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int bsz, + int seq_len, + int soft_len, + int heads) +{ + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores, + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + at::cuda::getCurrentCUDAStream()); +} + +template +void attention_unfused(T* prev_key_cont, + T* query_cont, + at::Tensor& attn_mask, + T* prev_value_cont, + T* output, + unsigned& bsz, + int& k, + unsigned& seq_len, + unsigned& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + at::Tensor& alibi, + int layer_id) +{ + float layer_scale = alibi.sizes().size() > 1 ? std::max(1, layer_id) : 1.0; + float alpha = norm_factor * norm_factor / layer_scale; + float gemm_beta = 0.0; + T* workspace = (T*)Context::Instance().GetAttentionUnfusedWorkspace(); + + cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont, + (T*)query_cont, + workspace, + CUBLAS_OP_T, + CUBLAS_OP_N, + Context::Instance().GetMaxTokenLenght() * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + ds_softmax_internal(workspace, + attn_mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + seq_len, + soft_len, + heads); + alpha = 1.0; + cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont, + workspace, + (T*)output, + CUBLAS_OP_N, + CUBLAS_OP_N, + Context::Instance().GetMaxTokenLenght() * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +void reset_cache() { Context::Instance().reset_tokens(); } + +template +std::vector ds_softmax_context(at::Tensor& query_key_value, + at::Tensor& attn_mask, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int heads, + float norm_factor, + bool triangular, + bool local_attention, + int window_size, + bool no_masking, + unsigned layer_id, + unsigned num_layers, + at::Tensor& alibi) +{ + unsigned bsz = query_key_value.size(0); + unsigned seq_len = query_key_value.size(1); + unsigned hidden_dim = query_key_value.size(2) / 3; + + bool is_prompt = (seq_len > 1); + + if (is_prompt) Context::Instance().reset_tokens(seq_len); + unsigned soft_len = Context::Instance().current_tokens(); + + int k = hidden_dim / heads; + auto options = at::TensorOptions() + .dtype(query_key_value.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* workspace = (T*)Context::Instance().GetWorkSpace(); + size_t buf_size = bsz * seq_len * hidden_dim; + auto output = torch::from_blob(workspace + 4 * buf_size, {bsz, seq_len, hidden_dim}, options); + + auto query_cont = workspace + 8 * buf_size; + size_t offset = 16 * (hidden_dim * bsz * Context::Instance().GetMaxTokenLenght()) + + layer_id * 2 * bsz * Context::Instance().GetMaxTokenLenght() * hidden_dim; + unsigned all_tokens = soft_len; + auto kv_cache = workspace + offset + (hidden_dim / heads) * (is_prompt ? 0 : soft_len - 1); + size_t value_offset = bsz * Context::Instance().GetMaxTokenLenght() * hidden_dim; + + T* temp_buf = (T*)output.data_ptr() + at::numel(output); + launch_bias_add_transform_0213((T*)query_cont, + kv_cache, + kv_cache + value_offset, + (T*)query_key_value.data_ptr(), + nullptr, + bsz, + seq_len, + (is_prompt ? 0 : soft_len - 1), + soft_len, + hidden_dim, + heads, + rotary_dim, + rotate_half, + rotate_every_two, + Context::Instance().GetCurrentStream(), + 3, + Context::Instance().GetMaxTokenLenght()); + if (rotary_dim > 0 && rotate_half) + launch_apply_rotary_pos_emb(query_cont, + kv_cache, + k, + seq_len, + rotary_dim, + (is_prompt ? 0 : soft_len - 1), + heads, + bsz, + rotate_half, + rotate_every_two, + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); + + attention_unfused(workspace + offset, + (T*)query_cont, + attn_mask, + workspace + offset + value_offset, + temp_buf, + bsz, + k, + seq_len, + all_tokens, + heads, + norm_factor, + (triangular && is_prompt), + is_prompt, + local_attention, + window_size, + alibi, + layer_id); + launch_transform4d_0213((T*)output.data_ptr(), + temp_buf, + bsz, + heads, + seq_len, + output.size(2), + Context::Instance().GetCurrentStream(false), + 1); + + if (layer_id == num_layers - 1) Context::Instance().advance_tokens(); + auto prev_key = torch::from_blob(workspace + offset, {bsz, heads, all_tokens, k}, options); + auto prev_value = + torch::from_blob(workspace + offset + value_offset, {bsz, heads, all_tokens, k}, options); + return {output, prev_key, prev_value}; +} + template at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) { @@ -238,6 +547,73 @@ at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) return input_cont; } +at::Tensor ds_bias_geglu(at::Tensor& activation, at::Tensor& bias) +{ + /* + Used in FF of Stable diffusion + */ + + const int batch_size = activation.size(0); + const int seq_len = activation.size(1); + const int channels = activation.size(2); + + const int rows = batch_size * seq_len; + // Dimensionality is cut in half + const int out_channels = channels / 2; + + auto output = at::empty({batch_size, seq_len, out_channels}, activation.options()); + + if (activation.options().dtype() == torch::kFloat32) { + launch_fused_bias_geglu((float*)output.data_ptr(), + (const float*)activation.data_ptr(), + (const float*)bias.data_ptr(), + rows, + channels, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_bias_geglu((__half*)output.data_ptr(), + (const __half*)activation.data_ptr(), + (const __half*)bias.data_ptr(), + rows, + channels, + Context::Instance().GetCurrentStream()); + } + + return output; +} + +template +at::Tensor ds_bias_relu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_relu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + Context::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_add(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int hidden_size = input_cont.size(2); + + launch_bias_add((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + hidden_size, + bsz, + Context::Instance().GetCurrentStream()); + return input_cont; +} + template at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) { @@ -255,85 +631,260 @@ at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& return input_cont; } +at::Tensor ds_layer_norm(at::Tensor& input, at::Tensor& gamma, at::Tensor& beta, float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_ln((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_ln((float*)output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return output; +} + template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) +void ds_layer_norm_internal(T* workspace, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) { - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; + int bsz = input.size(0) * input.size(1); + launch_fused_ln(workspace, + (const T*)input.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + Context::Instance().GetCurrentStream()); +} + +/* Currently only used in unit testing */ +at::Tensor ds_layer_norm_residual(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_residual_ln((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)residual.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_residual_ln((float*)output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)residual.data_ptr(), + (const float*)bias.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return output; +} + +/* Currently only used in unit testing */ +std::vector ds_layer_norm_residual_store_pre_ln_res(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto norm_output = at::empty_like(input); + auto res_output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_residual_ln_store_pre_ln_res((__half*)norm_output.data_ptr(), + (__half*)res_output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)residual.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_residual_ln_store_pre_ln_res((float*)norm_output.data_ptr(), + (float*)res_output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)residual.data_ptr(), + (const float*)bias.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return {norm_output, res_output}; } template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) +void quantized_gemm(void* output, + T* input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int bsz, + int hidden_size) { - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); + // T* weight16 = (T*)Context::Instance().GetWorkSpace() + 12 * hidden_size * bsz; - // cudaEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); + auto options = at::TensorOptions() + .dtype(at::kHalf) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto tmp = torch::empty(weight.sizes(), options); + T* weight16 = (T*)tmp.data_ptr(); + launch_dequantize(weight16, + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + Context::Instance().GetCurrentStream()); float alpha = (T)1.0; float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_T, CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), + weight.size(0), bsz, - input.size(2), + weight.size(1), &alpha, &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), + weight16, + (T*)input, + (T*)output, #ifdef __HIP_PLATFORM_HCC__ rocblas_gemm_algo_standard); #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif +} + +template +at::Tensor qkv_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + workspace += (3 * bsz * input.size(2)); + ds_layer_norm_internal(workspace, input, gamma, beta, epsilon); + + if (q_int8) { + quantized_gemm( + output.data_ptr(), workspace, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + cublasSetStream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + workspace, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } if (add_bias) launch_bias_add((T*)output.data_ptr(), (T*)bias.data_ptr(), - weight.size(1), + q_int8 ? weight.size(0) : weight.size(1), bsz, Context::Instance().GetCurrentStream()); - return inp_norm; + return torch::from_blob(workspace, input.sizes(), input.options()); } template std::vector ds_qkv_gemm(at::Tensor& input, at::Tensor& weight, + at::Tensor& q_scale, at::Tensor& bias, at::Tensor& gamma, at::Tensor& beta, const float epsilon, - bool add_bias) + bool add_bias, + unsigned num_layers, + bool external_cache, + unsigned mp_size, + unsigned rank, + bool q_int8) { - auto input_cont = input.contiguous(); + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + int out_size = q_int8 ? weight.size(0) : weight.size(1); + auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + auto inp_norm = qkv_unfused_cublas( + output, input, weight, q_scale, bias, gamma, beta, epsilon, add_bias, q_int8); return {output, inp_norm}; } @@ -357,20 +908,18 @@ void quantized_gemm(at::Tensor& output, launch_dequantize((T*)weight16.data_ptr(), (int8_t*)weight.data_ptr(), (float*)qscale.data_ptr(), - weight.size(1), weight.size(0), + weight.size(1), groups, merge_count, Context::Instance().GetCurrentStream()); - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - float alpha = (T)1.0; float gemm_beta = (T)0.0; cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_T, CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), + weight.size(0), bsz, input.size(2), &alpha, @@ -406,7 +955,7 @@ at::Tensor ds_qkv_gemm_int8(at::Tensor& input, auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); + auto inp_norm = ds_layer_norm(input_cont, gamma, beta, epsilon); quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); if (add_bias) @@ -420,7 +969,12 @@ at::Tensor ds_qkv_gemm_int8(at::Tensor& input, } template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) +at::Tensor ds_linear_layer(at::Tensor& input, + at::Tensor& weight, + at::Tensor& bias, + bool add_bias, + bool do_flash_attn, + int num_heads) { auto input_cont = input.contiguous(); auto options = at::TensorOptions() @@ -429,8 +983,10 @@ at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bi .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); + int head_size = input_cont.size(2) / num_heads; + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), weight.size(1)}, options); float alpha = (T)1.0; float gemm_beta = (T)0.0; @@ -452,16 +1008,172 @@ at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bi #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + bool add_padding = (head_size % 32 != 0 && head_size < 64) || (head_size % 64 != 0); + if (do_flash_attn) { + if (add_padding) { + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + auto padded_output = workspace + output.numel(); + auto final_output = + padded_output + (input.size(0) * input.size(1) * 3 * num_heads * padded_head_size); + pad_data(padded_output, + workspace, + 3 * bsz * num_heads, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * num_heads * padded_head_size), + final_output + (input.size(0) * input.size(1) * 2 * num_heads * padded_head_size), + padded_output, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + (num_heads * padded_head_size), + num_heads, + -1, + false, + false, + Context::Instance().GetCurrentStream(), + 3, + input.size(1)); + return at::from_blob(final_output, + {3, input.size(0), num_heads, input.size(1), padded_head_size}, + options); + // return at::from_blob(padded_output, {input.size(0) * input.size(1), 3, num_heads, + // padded_head_size}, options); + } else { + auto final_output = workspace + output.numel(); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * input_cont.size(2)), + final_output + (input.size(0) * input.size(1) * 2 * input_cont.size(2)), + workspace, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + input_cont.size(2), + num_heads, + -1, + false, + false, + Context::Instance().GetCurrentStream(), + 3, + input.size(1)); + return at::from_blob( + final_output, {3, input.size(0), num_heads, input.size(1), head_size}, options); + // return at::from_blob(workspace, {input.size(0) * input.size(1), 3, num_heads, + // head_size}, options); + } + + } else + return output; +} - return output; +template +std::vector add_padding(at::Tensor& query, at::Tensor& key, at::Tensor& value) +{ + int head_size = query.size(3); + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * query.size(1) * query.size(2); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * query.size(1) * 128; + pad_head_seq(workspace, + (T*)query.data_ptr(), + query.size(0) * query.size(1), + query.size(2), + query.size(2), + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + pad_head_seq(key_pad_ptr, + (T*)key.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + pad_head_seq(value_pad_ptr, + (T*)value.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + return { + at::from_blob(workspace, + {query.size(0), query.size(1), query.size(2), padded_head_size}, + query.options()), + at::from_blob( + key_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options()), + at::from_blob( + value_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options())}; } +template +std::vector padd_add_transform(at::Tensor& query, + at::Tensor& key, + at::Tensor& value, + int heads, + bool add_padding) +{ + int head_size = query.size(2) / heads; + int key_value_length = add_padding ? 128 : key.size(1); + int padded_head_size = add_padding ? (head_size < 32 ? 32 : (head_size < 64 ? 64 : 128)) + : head_size; + T* workspace = (T*)Context::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * heads * query.size(1); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * heads * key_value_length; + launch_pad_add_transform_0213(workspace, + (T*)query.data_ptr(), + query.size(0), + query.size(2), + query.size(1), + query.size(1), + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(key_pad_ptr, + (T*)key.data_ptr(), + key.size(0), + key.size(2), + key.size(1), + key_value_length, + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(value_pad_ptr, + (T*)value.data_ptr(), + value.size(0), + value.size(2), + value.size(1), + key_value_length, + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + return { + at::from_blob( + workspace, {query.size(0), heads, query.size(1), padded_head_size}, query.options()), + at::from_blob(key_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options()), + at::from_blob(value_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options())}; +} template at::Tensor ds_linear_layer_int8(at::Tensor& input, at::Tensor& weight, @@ -489,37 +1201,52 @@ at::Tensor ds_linear_layer_int8(at::Tensor& input, } template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) +at::Tensor ds_vector_matmul(at::Tensor& input, + at::Tensor& weight, + bool async_op, + at::Tensor& q_scale, + bool q_int8) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); + int out_size = q_int8 ? weight.size(0) : weight.size(1); + int bsz = input.size(0) * input.size(1); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), + T* workspace = (T*)Context::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)input.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream(async_op)); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } return output; } @@ -544,95 +1271,163 @@ at::Tensor ds_vector_matmul_int8(at::Tensor& input, } template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) +at::Tensor mlp_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight, + at::Tensor& weight1, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + ActivationFuncType act_func_type) { int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); + T* inp_norm = + (T*)Context::Instance().GetWorkSpace() + torch::numel(input) + torch::numel(output); + T* intermediate = inp_norm + torch::numel(input); + + if (mlp_after_attn) { + launch_fused_residual_ln((T*)inp_norm, + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)input_bias.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + Context::Instance().GetCurrentStream()); + } else { + ds_layer_norm_internal(inp_norm, input, gamma, beta, epsilon); + } + if (q_int8) { + quantized_gemm( + intermediate, inp_norm, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + inp_norm, + intermediate, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate, + (T*)bias.data_ptr(), + q_int8 ? weight.size(0) : weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate, + (T*)bias.data_ptr(), + q_int8 ? weight.size(0) : weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + } - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate, + weight1, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + weight1.size(1), + bsz, + weight1.size(0), + &alpha, + &gemm_beta, + (T*)weight1.data_ptr(), + intermediate, + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); + } + + return torch::from_blob(inp_norm, input.sizes(), input.options()); } + template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) +std::vector ds_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); + int out_size = q_int8 ? weight_out.size(0) : weight_out.size(1); + auto output = at::from_blob((T*)Context::Instance().GetWorkSpace() + torch::numel(input), + {input.size(0), input.size(1), out_size}, + options); + int bsz = input.size(0) * input.size(1); - return output; + auto act_func_type = static_cast(activation_type); + auto res_add = mlp_unfused_cublas(output, + mlp_after_attn ? input : residual, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + epsilon, + preLayerNorm, + mlp_after_attn, + q_scale, + q_scale1, + q_int8, + act_func_type); + + return {output, res_add}; } template @@ -661,20 +1456,6 @@ std::vector ds_mlp_gemm_int8(at::Tensor& input, auto inp_norm = at::empty_like(input_cont); auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); launch_bias_gelu((T*)output.data_ptr(), (T*)bias.data_ptr(), @@ -688,122 +1469,136 @@ std::vector ds_mlp_gemm_int8(at::Tensor& input, template at::Tensor fused_gemm_gelu(at::Tensor& input, at::Tensor& weight, + at::Tensor& weight_scale, at::Tensor& bias, at::Tensor& weight_out, + at::Tensor& weight_out_scale, const float epsilon, bool preLayerNorm, + bool q_int8, bool async_op) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); + int intm_dim = q_int8 ? weight.size(0) : weight.size(1); + + // auto output = at::from_blob((T*)Context::Instance().GetWorkSpace() + torch::numel(input), + // {input.size(0), input.size(1), out_size}, + // options); + // T* intermediate = (T*)input.data_ptr() + torch::numel(input); + auto intermediate = at::empty({input.size(0), input.size(1), intm_dim}, options); + + int bsz = input.size(0) * input.size(1); + float alpha = (T)1.0; float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), + if (q_int8) { + quantized_gemm(intermediate.data_ptr(), + (T*)input.data_ptr(), + weight, + weight_scale, + weight_scale.size(0), + bsz, + input.size(2)); + } else { + cublasSetStream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + intm_dim, + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)intermediate.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } launch_bias_gelu((T*)intermediate.data_ptr(), (T*)bias.data_ptr(), - weight.size(1), + intm_dim, bsz, Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), + int out_size = q_int8 ? weight_out.size(0) : weight_out.size(1); + auto output = at::empty({input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)intermediate.data_ptr(), + weight_out, + weight_out_scale, + weight_out_scale.size(0), + bsz, + input.size(2)); + } else { + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_N, + out_size, + bsz, + intm_dim, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + (T*)intermediate.data_ptr(), + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } // cudaEventRecord(Context::Instance().GetCompEvent(2), // Context::Instance().GetCurrentStream(true)); return output; } -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) +template +at::Tensor& residual_add_bias(at::Tensor& hidden_state, + at::Tensor& residual, + const at::Tensor& attention_output, + const at::Tensor& attention_bias, + const at::Tensor& final_bias, + const int mp_size, + const bool mlp_after_attn, + const bool add_bias, + const bool preln) { - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // cudaStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), + int bsz = residual.size(0) * residual.size(1); + int hidden_size = residual.size(2); + if (mlp_after_attn) + launch_bias_residual(static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast(attention_bias.data_ptr()), bsz, hidden_size, mp_size, + preln, Context::Instance().GetCurrentStream()); else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); + launch_gptj_residual_add( + static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast((add_bias ? attention_bias.data_ptr() : nullptr)), + hidden_size, + bsz, + mp_size, + Context::Instance().GetCurrentStream()); + return residual; } std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, @@ -832,7 +1627,8 @@ std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, bsz, rotate_half, rotate_every_two, - Context::Instance().GetCurrentStream()); + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); else launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), (__half*)key_cont.data_ptr(), @@ -844,7 +1640,8 @@ std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, bsz, rotate_half, rotate_every_two, - Context::Instance().GetCurrentStream()); + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); return {query_cont, key_cont}; } @@ -904,22 +1701,34 @@ at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& out PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); + m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp16 (CUDA)"); m.def( "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); m.def("softmax_context_fp16", &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); + "DeepSpeed attention with fp16 (CUDA)"); + m.def("softmax_context_int8", + &ds_softmax_context1<__half>, + "DeepSpeed attention with int8 (CUDA)"); m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); + m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp16 (CUDA)"); + m.def("bias_geglu", &ds_bias_geglu, "DeepSpeed Bias GEGLU (CUDA)"); + m.def("bias_add_fp32", &ds_bias_add, "DeepSpeed Bias Add with fp32 (CUDA)"); + m.def("bias_add_fp16", &ds_bias_add<__half>, "DeepSpeed Gelu with fp16 (CUDA)"); + m.def("bias_relu_fp32", &ds_bias_relu, "DeepSpeed ReLU with fp32 (CUDA)"); + m.def("bias_relu_fp16", &ds_bias_relu<__half>, "DeepSpeed ReLU with fp16 (CUDA)"); m.def("bias_residual_fp32", &ds_bias_residual, "DeepSpeed residual-bias add with fp32 (CUDA)"); m.def("bias_residual_fp16", &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); + "DeepSpeed residual-bias add with fp16 (CUDA)"); + m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm (CUDA)"); + m.def( + "_layer_norm_residual", &ds_layer_norm_residual, "DeepSpeed layer norm + residual (CUDA)"); + m.def("layer_norm_residual_store_pre_ln_res", + &ds_layer_norm_residual_store_pre_ln_res, + "DeepSpeed layer norm + store pre Layernorm residual (CUDA)"); m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); @@ -938,7 +1747,12 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed linear_layer with int8 (CUDA)"); m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); + m.def("residual_add_bias_fp32", + &residual_add_bias, + "DeepSpeed residual add with fp32 (CUDA)"); + m.def("residual_add_bias_fp16", + &residual_add_bias<__half>, + "DeepSpeed residual add with fp16 (CUDA)"); m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); m.def("einsum_sec_sm_ecm_fp32", &einsum_sec_sm_ecm, @@ -948,4 +1762,19 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) &einsum_sec_sm_ecm<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); + m.def("add_padding_fp32", &add_padding, "DeepSpeed residual add with fp32 (CUDA)"); + m.def("add_padding_fp16", &add_padding<__half>, "DeepSpeed residual add with fp16 (CUDA)"); + m.def("pad_transform_fp32", + &padd_add_transform, + "DeepSpeed residual add with fp32 (CUDA)"); + m.def("pad_transform_fp16", + &padd_add_transform<__half>, + "DeepSpeed residual add with fp16 (CUDA)"); + m.def("allocate_workspace_fp32", + &allocate_workspace, + "DeepSpeed memory allocation for GPT inference with fp32 (CUDA)"); + m.def("allocate_workspace_fp16", + &allocate_workspace<__half>, + "DeepSpeed memory allocation for GPT inference with fp16 (CUDA)"); + m.def("reset_cache", &reset_cache, "Reset Cache for generation tasks"); } diff --git a/csrc/transformer/inference/csrc/pt_binding_hip.cpp b/csrc/transformer/inference/csrc/pt_binding_hip.cpp index 6fed126..a85291c 100644 --- a/csrc/transformer/inference/csrc/pt_binding_hip.cpp +++ b/csrc/transformer/inference/csrc/pt_binding_hip.cpp @@ -1,40 +1,90 @@ // !!! This is a file automatically generated by hipify!!! +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ -#include +#include #include +#include #include -#include "context_hip.h" -#include "cublas_wrappers_hip.h" -#include "custom_hip_layers.h" +#include "inference_context.h" +#include "inference_cublas_wrappers.h" +#include "inference_cuda_layers.h" std::array gemm_algos = std::array({99, 99, 99}); -#define MAX_OUT_TOKES 10 +// NOTE: This activation function type enum should be always in sync +// with the python counterpart, otherwise the casting from python binding +// will be incorrect. +enum class ActivationFuncType { UNKNOWN = 0, GELU = 1, ReLU = 2 }; + +enum class TransformerType : uint8_t { UNKNOWN = 0, GPTType = 1, BERTType = 2 }; + +// NOTE: this is a temporary and dodgy solution to distinguish GPT and BERT style models +// based on the dimensions of the corresponding attention mask. +inline auto infer_transformer_type(at::Tensor& attn_mask) -> TransformerType +{ + auto attn_mask_num_dims = attn_mask.sizes().size(); + + if (attn_mask_num_dims > 2) { + return TransformerType::GPTType; + } else if (attn_mask_num_dims == 2) { + return TransformerType::BERTType; + } else { + return TransformerType::UNKNOWN; + } +} + +// infer stride of attention mask memory layout based on the model type. +inline auto get_attn_mask_stride(at::Tensor& attn_mask) -> int +{ + auto trnsfrmr_type = infer_transformer_type(attn_mask); + + if (trnsfrmr_type == TransformerType::GPTType) { + return attn_mask.size(2); + } else if (trnsfrmr_type == TransformerType::BERTType) { + // Bert style models have always a mask stride of 1. + return 1; + } else if (trnsfrmr_type == TransformerType::UNKNOWN) { + return 0; + } + + // this is just to make the compiler happy. + return 0; +} template at::Tensor ds_softmax(at::Tensor& attn_scores, at::Tensor& attn_mask, + at::Tensor& alibi, bool triangular, bool recompute, bool local_attention, int window_size, - bool async_op) + bool async_op, + float layer_scale, + int head_offset, + int mp_size) { auto attn_scores_c = attn_scores.contiguous(); int bsz = attn_scores_c.size(0); int seq_len = attn_scores_c.size(1); int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); + if (len > 2) seq_len = attn_scores_c.size(2); int soft_len = attn_scores_c.size(2); if (len > 3) soft_len = attn_scores_c.size(3); int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); + if (len > 1) heads = attn_scores_c.size(1); + + auto mask_stride = get_attn_mask_stride(attn_mask); launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, triangular, recompute, local_attention, @@ -43,20 +93,35 @@ at::Tensor ds_softmax(at::Tensor& attn_scores, heads, seq_len, soft_len, - 1.0, + head_offset, + mask_stride, + mp_size, Context::Instance().GetCurrentStream(async_op)); return attn_scores_c; } template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) +void allocate_workspace(unsigned hidden_dim, + unsigned num_heads, + unsigned prompt_length, + unsigned batch_size, + unsigned num_layers, + unsigned mp_size = 1, + bool external_cache = false, + unsigned rank = 0, + unsigned max_out_tokens = 1024) { - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); + Context::Instance().GenWorkSpace(num_layers, + num_heads, + batch_size, + prompt_length, + hidden_dim, + mp_size, + external_cache, + sizeof(T), + rank, + max_out_tokens); } template @@ -71,10 +136,13 @@ at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) float alpha = 1; float gemm_beta = 0.0; - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); + /* + // Reallocate memory if we received a new prompt + if (!workspace || input.size(1) != 1) { + allocate_workspace(W.size(1), Context::Instance().GetMaxTokenLenght(), Q.size(0), 1, + head_size); workspace = (T*)Context::Instance().GetWorkSpace(); } + */ auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); unsigned m = W.size(1); @@ -124,6 +192,9 @@ void attention_unfused(at::Tensor& prev_key_cont, float gemm_beta = 0.0; auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); int k = prev_value_cont.size(2) / heads; + + auto mask_stride = get_attn_mask_stride(attn_mask); + rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), soft_len, @@ -145,8 +216,22 @@ void attention_unfused(at::Tensor& prev_key_cont, #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); + launch_attn_softmax_v2((T*)attn_score.data_ptr(), + (T*)(attn_mask.sizes().size() > 1 ? attn_mask.data_ptr() : nullptr), + (T*)nullptr, + 1.0, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + Context::Instance().GetCurrentStream(false)); alpha = 1.0; cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), k, @@ -171,19 +256,19 @@ void attention_unfused(at::Tensor& prev_key_cont, } template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) +std::vector ds_softmax_context1(at::Tensor& query, + at::Tensor& prev_key, + at::Tensor& new_key, + at::Tensor& attn_mask, + at::Tensor& prev_value, + at::Tensor& new_value, + int heads, + float norm_factor, + bool merging, + bool triangular, + bool local_attention, + int window_size, + bool no_masking) { auto query_cont = query.contiguous(); auto prev_key_cont = prev_key.contiguous(); @@ -223,6 +308,230 @@ std::vector ds_softmax_context(at::Tensor& query, return {output, prev_key, prev_value}; } +template +void ds_softmax_internal(T* attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + float& layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int bsz, + int seq_len, + int soft_len, + int heads) +{ + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores, + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); +} + +template +void attention_unfused(T* prev_key_cont, + T* query_cont, + at::Tensor& attn_mask, + T* prev_value_cont, + T* output, + unsigned& bsz, + int& k, + unsigned& seq_len, + unsigned& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + at::Tensor& alibi, + int layer_id) +{ + float layer_scale = alibi.sizes().size() > 1 ? std::max(1, layer_id) : 1.0; + float alpha = norm_factor * norm_factor / layer_scale; + float gemm_beta = 0.0; + T* workspace = (T*)Context::Instance().GetAttentionUnfusedWorkspace(); + + rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont, + (T*)query_cont, + workspace, + rocblas_operation_transpose, + rocblas_operation_none, + Context::Instance().GetMaxTokenLenght() * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + ds_softmax_internal(workspace, + attn_mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + seq_len, + soft_len, + heads); + alpha = 1.0; + cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont, + workspace, + (T*)output, + rocblas_operation_none, + rocblas_operation_none, + Context::Instance().GetMaxTokenLenght() * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +void reset_cache() { Context::Instance().reset_tokens(); } + +template +std::vector ds_softmax_context(at::Tensor& query_key_value, + at::Tensor& attn_mask, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int heads, + float norm_factor, + bool triangular, + bool local_attention, + int window_size, + bool no_masking, + unsigned layer_id, + unsigned num_layers, + at::Tensor& alibi) +{ + unsigned bsz = query_key_value.size(0); + unsigned seq_len = query_key_value.size(1); + unsigned hidden_dim = query_key_value.size(2) / 3; + + bool is_prompt = (seq_len > 1); + + if (is_prompt) Context::Instance().reset_tokens(seq_len); + unsigned soft_len = Context::Instance().current_tokens(); + + int k = hidden_dim / heads; + auto options = at::TensorOptions() + .dtype(query_key_value.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* workspace = (T*)Context::Instance().GetWorkSpace(); + size_t buf_size = bsz * seq_len * hidden_dim; + auto output = torch::from_blob(workspace + 4 * buf_size, {bsz, seq_len, hidden_dim}, options); + + auto query_cont = workspace + 8 * buf_size; + size_t offset = 16 * (hidden_dim * bsz * Context::Instance().GetMaxTokenLenght()) + + layer_id * 2 * bsz * Context::Instance().GetMaxTokenLenght() * hidden_dim; + unsigned all_tokens = soft_len; + auto kv_cache = workspace + offset + (hidden_dim / heads) * (is_prompt ? 0 : soft_len - 1); + size_t value_offset = bsz * Context::Instance().GetMaxTokenLenght() * hidden_dim; + + T* temp_buf = (T*)output.data_ptr() + at::numel(output); + launch_bias_add_transform_0213((T*)query_cont, + kv_cache, + kv_cache + value_offset, + (T*)query_key_value.data_ptr(), + nullptr, + bsz, + seq_len, + (is_prompt ? 0 : soft_len - 1), + soft_len, + hidden_dim, + heads, + rotary_dim, + rotate_half, + rotate_every_two, + Context::Instance().GetCurrentStream(), + 3, + Context::Instance().GetMaxTokenLenght()); + if (rotary_dim > 0 && rotate_half) + launch_apply_rotary_pos_emb(query_cont, + kv_cache, + k, + seq_len, + rotary_dim, + (is_prompt ? 0 : soft_len - 1), + heads, + bsz, + rotate_half, + rotate_every_two, + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); + + attention_unfused(workspace + offset, + (T*)query_cont, + attn_mask, + workspace + offset + value_offset, + temp_buf, + bsz, + k, + seq_len, + all_tokens, + heads, + norm_factor, + (triangular && is_prompt), + is_prompt, + local_attention, + window_size, + alibi, + layer_id); + launch_transform4d_0213((T*)output.data_ptr(), + temp_buf, + bsz, + heads, + seq_len, + output.size(2), + Context::Instance().GetCurrentStream(false), + 1); + + if (layer_id == num_layers - 1) Context::Instance().advance_tokens(); + auto prev_key = torch::from_blob(workspace + offset, {bsz, heads, all_tokens, k}, options); + auto prev_value = + torch::from_blob(workspace + offset + value_offset, {bsz, heads, all_tokens, k}, options); + return {output, prev_key, prev_value}; +} + template at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) { @@ -239,6 +548,73 @@ at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) return input_cont; } +at::Tensor ds_bias_geglu(at::Tensor& activation, at::Tensor& bias) +{ + /* + Used in FF of Stable diffusion + */ + + const int batch_size = activation.size(0); + const int seq_len = activation.size(1); + const int channels = activation.size(2); + + const int rows = batch_size * seq_len; + // Dimensionality is cut in half + const int out_channels = channels / 2; + + auto output = at::empty({batch_size, seq_len, out_channels}, activation.options()); + + if (activation.options().dtype() == torch::kFloat32) { + launch_fused_bias_geglu((float*)output.data_ptr(), + (const float*)activation.data_ptr(), + (const float*)bias.data_ptr(), + rows, + channels, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_bias_geglu((__half*)output.data_ptr(), + (const __half*)activation.data_ptr(), + (const __half*)bias.data_ptr(), + rows, + channels, + Context::Instance().GetCurrentStream()); + } + + return output; +} + +template +at::Tensor ds_bias_relu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_relu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + Context::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_add(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int hidden_size = input_cont.size(2); + + launch_bias_add((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + hidden_size, + bsz, + Context::Instance().GetCurrentStream()); + return input_cont; +} + template at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) { @@ -256,85 +632,260 @@ at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& return input_cont; } +at::Tensor ds_layer_norm(at::Tensor& input, at::Tensor& gamma, at::Tensor& beta, float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_ln((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_ln((float*)output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return output; +} + template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) +void ds_layer_norm_internal(T* workspace, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) { - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; + int bsz = input.size(0) * input.size(1); + launch_fused_ln(workspace, + (const T*)input.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + Context::Instance().GetCurrentStream()); +} + +/* Currently only used in unit testing */ +at::Tensor ds_layer_norm_residual(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_residual_ln((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)residual.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_residual_ln((float*)output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)residual.data_ptr(), + (const float*)bias.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return output; +} + +/* Currently only used in unit testing */ +std::vector ds_layer_norm_residual_store_pre_ln_res(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto norm_output = at::empty_like(input); + auto res_output = at::empty_like(input); + + if (input.options().dtype() == torch::kFloat16) { + launch_fused_residual_ln_store_pre_ln_res((__half*)norm_output.data_ptr(), + (__half*)res_output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)residual.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)gamma.data_ptr(), + (const __half*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } else { + launch_fused_residual_ln_store_pre_ln_res((float*)norm_output.data_ptr(), + (float*)res_output.data_ptr(), + (const float*)input.data_ptr(), + (const float*)residual.data_ptr(), + (const float*)bias.data_ptr(), + (const float*)gamma.data_ptr(), + (const float*)beta.data_ptr(), + epsilon, + rows, + elems_per_row, + Context::Instance().GetCurrentStream()); + } + + return {norm_output, res_output}; } template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) +void quantized_gemm(void* output, + T* input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int bsz, + int hidden_size) { - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); + // T* weight16 = (T*)Context::Instance().GetWorkSpace() + 12 * hidden_size * bsz; - // hipEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); + auto options = at::TensorOptions() + .dtype(at::kHalf) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto tmp = torch::empty(weight.sizes(), options); + T* weight16 = (T*)tmp.data_ptr(); + launch_dequantize(weight16, + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + Context::Instance().GetCurrentStream()); float alpha = (T)1.0; float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_transpose, rocblas_operation_none, - rocblas_operation_none, - weight.size(1), + weight.size(0), bsz, - input.size(2), + weight.size(1), &alpha, &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), + weight16, + (T*)input, + (T*)output, #ifdef __HIP_PLATFORM_HCC__ rocblas_gemm_algo_standard); #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif +} + +template +at::Tensor qkv_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + workspace += (3 * bsz * input.size(2)); + ds_layer_norm_internal(workspace, input, gamma, beta, epsilon); + + if (q_int8) { + quantized_gemm( + output.data_ptr(), workspace, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + rocblas_set_stream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + workspace, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } if (add_bias) launch_bias_add((T*)output.data_ptr(), (T*)bias.data_ptr(), - weight.size(1), + q_int8 ? weight.size(0) : weight.size(1), bsz, Context::Instance().GetCurrentStream()); - return inp_norm; + return torch::from_blob(workspace, input.sizes(), input.options()); } template std::vector ds_qkv_gemm(at::Tensor& input, at::Tensor& weight, + at::Tensor& q_scale, at::Tensor& bias, at::Tensor& gamma, at::Tensor& beta, const float epsilon, - bool add_bias) + bool add_bias, + unsigned num_layers, + bool external_cache, + unsigned mp_size, + unsigned rank, + bool q_int8) { - auto input_cont = input.contiguous(); + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + int out_size = q_int8 ? weight.size(0) : weight.size(1); + auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + auto inp_norm = qkv_unfused_cublas( + output, input, weight, q_scale, bias, gamma, beta, epsilon, add_bias, q_int8); return {output, inp_norm}; } @@ -358,20 +909,18 @@ void quantized_gemm(at::Tensor& output, launch_dequantize((T*)weight16.data_ptr(), (int8_t*)weight.data_ptr(), (float*)qscale.data_ptr(), - weight.size(1), weight.size(0), + weight.size(1), groups, merge_count, Context::Instance().GetCurrentStream()); - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - float alpha = (T)1.0; float gemm_beta = (T)0.0; cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_transpose, rocblas_operation_none, - rocblas_operation_none, - weight.size(1), + weight.size(0), bsz, input.size(2), &alpha, @@ -407,7 +956,7 @@ at::Tensor ds_qkv_gemm_int8(at::Tensor& input, auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); + auto inp_norm = ds_layer_norm(input_cont, gamma, beta, epsilon); quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); if (add_bias) @@ -421,7 +970,12 @@ at::Tensor ds_qkv_gemm_int8(at::Tensor& input, } template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) +at::Tensor ds_linear_layer(at::Tensor& input, + at::Tensor& weight, + at::Tensor& bias, + bool add_bias, + bool do_flash_attn, + int num_heads) { auto input_cont = input.contiguous(); auto options = at::TensorOptions() @@ -430,8 +984,10 @@ at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bi .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); + int head_size = input_cont.size(2) / num_heads; + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), weight.size(1)}, options); float alpha = (T)1.0; float gemm_beta = (T)0.0; @@ -453,16 +1009,172 @@ at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bi #else CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + bool add_padding = (head_size % 32 != 0 && head_size < 64) || (head_size % 64 != 0); + if (do_flash_attn) { + if (add_padding) { + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + auto padded_output = workspace + output.numel(); + auto final_output = + padded_output + (input.size(0) * input.size(1) * 3 * num_heads * padded_head_size); + pad_data(padded_output, + workspace, + 3 * bsz * num_heads, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * num_heads * padded_head_size), + final_output + (input.size(0) * input.size(1) * 2 * num_heads * padded_head_size), + padded_output, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + (num_heads * padded_head_size), + num_heads, + -1, + false, + false, + Context::Instance().GetCurrentStream(), + 3, + input.size(1)); + return at::from_blob(final_output, + {3, input.size(0), num_heads, input.size(1), padded_head_size}, + options); + // return at::from_blob(padded_output, {input.size(0) * input.size(1), 3, num_heads, + // padded_head_size}, options); + } else { + auto final_output = workspace + output.numel(); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * input_cont.size(2)), + final_output + (input.size(0) * input.size(1) * 2 * input_cont.size(2)), + workspace, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + input_cont.size(2), + num_heads, + -1, + false, + false, + Context::Instance().GetCurrentStream(), + 3, + input.size(1)); + return at::from_blob( + final_output, {3, input.size(0), num_heads, input.size(1), head_size}, options); + // return at::from_blob(workspace, {input.size(0) * input.size(1), 3, num_heads, + // head_size}, options); + } + + } else + return output; +} - return output; +template +std::vector add_padding(at::Tensor& query, at::Tensor& key, at::Tensor& value) +{ + int head_size = query.size(3); + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + T* workspace = (T*)Context::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * query.size(1) * query.size(2); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * query.size(1) * 128; + pad_head_seq(workspace, + (T*)query.data_ptr(), + query.size(0) * query.size(1), + query.size(2), + query.size(2), + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + pad_head_seq(key_pad_ptr, + (T*)key.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + pad_head_seq(value_pad_ptr, + (T*)value.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + Context::Instance().GetCurrentStream()); + return { + at::from_blob(workspace, + {query.size(0), query.size(1), query.size(2), padded_head_size}, + query.options()), + at::from_blob( + key_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options()), + at::from_blob( + value_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options())}; } +template +std::vector padd_add_transform(at::Tensor& query, + at::Tensor& key, + at::Tensor& value, + int heads, + bool add_padding) +{ + int head_size = query.size(2) / heads; + int key_value_length = add_padding ? 128 : key.size(1); + int padded_head_size = add_padding ? (head_size < 32 ? 32 : (head_size < 64 ? 64 : 128)) + : head_size; + T* workspace = (T*)Context::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * heads * query.size(1); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * heads * key_value_length; + launch_pad_add_transform_0213(workspace, + (T*)query.data_ptr(), + query.size(0), + query.size(2), + query.size(1), + query.size(1), + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(key_pad_ptr, + (T*)key.data_ptr(), + key.size(0), + key.size(2), + key.size(1), + key_value_length, + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(value_pad_ptr, + (T*)value.data_ptr(), + value.size(0), + value.size(2), + value.size(1), + key_value_length, + heads, + padded_head_size, + Context::Instance().GetCurrentStream()); + return { + at::from_blob( + workspace, {query.size(0), heads, query.size(1), padded_head_size}, query.options()), + at::from_blob(key_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options()), + at::from_blob(value_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options())}; +} template at::Tensor ds_linear_layer_int8(at::Tensor& input, at::Tensor& weight, @@ -490,37 +1202,52 @@ at::Tensor ds_linear_layer_int8(at::Tensor& input, } template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) +at::Tensor ds_vector_matmul(at::Tensor& input, + at::Tensor& weight, + bool async_op, + at::Tensor& q_scale, + bool q_int8) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); + int out_size = q_int8 ? weight.size(0) : weight.size(1); + int bsz = input.size(0) * input.size(1); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), + T* workspace = (T*)Context::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)input.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + rocblas_set_stream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream(async_op)); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } return output; } @@ -545,95 +1272,163 @@ at::Tensor ds_vector_matmul_int8(at::Tensor& input, } template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) +at::Tensor mlp_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight, + at::Tensor& weight1, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + ActivationFuncType act_func_type) { int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); + T* inp_norm = + (T*)Context::Instance().GetWorkSpace() + torch::numel(input) + torch::numel(output); + T* intermediate = inp_norm + torch::numel(input); + + if (mlp_after_attn) { + launch_fused_residual_ln((T*)inp_norm, + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)input_bias.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + Context::Instance().GetCurrentStream()); + } else { + ds_layer_norm_internal(inp_norm, input, gamma, beta, epsilon); + } + if (q_int8) { + quantized_gemm( + intermediate, inp_norm, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + rocblas_set_stream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + weight.size(1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + inp_norm, + intermediate, +#ifdef __HIP_PLATFORM_HCC__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate, + (T*)bias.data_ptr(), + q_int8 ? weight.size(0) : weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate, + (T*)bias.data_ptr(), + q_int8 ? weight.size(0) : weight.size(1), + bsz, + Context::Instance().GetCurrentStream()); + } - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate, + weight1, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + rocblas_set_stream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + weight1.size(1), + bsz, + weight1.size(0), + &alpha, + &gemm_beta, + (T*)weight1.data_ptr(), + intermediate, + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); + } + + return torch::from_blob(inp_norm, input.sizes(), input.options()); } + template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) +std::vector ds_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); + int out_size = q_int8 ? weight_out.size(0) : weight_out.size(1); + auto output = at::from_blob((T*)Context::Instance().GetWorkSpace() + torch::numel(input), + {input.size(0), input.size(1), out_size}, + options); + int bsz = input.size(0) * input.size(1); - return output; + auto act_func_type = static_cast(activation_type); + auto res_add = mlp_unfused_cublas(output, + mlp_after_attn ? input : residual, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + epsilon, + preLayerNorm, + mlp_after_attn, + q_scale, + q_scale1, + q_int8, + act_func_type); + + return {output, res_add}; } template @@ -662,20 +1457,6 @@ std::vector ds_mlp_gemm_int8(at::Tensor& input, auto inp_norm = at::empty_like(input_cont); auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); launch_bias_gelu((T*)output.data_ptr(), (T*)bias.data_ptr(), @@ -689,122 +1470,136 @@ std::vector ds_mlp_gemm_int8(at::Tensor& input, template at::Tensor fused_gemm_gelu(at::Tensor& input, at::Tensor& weight, + at::Tensor& weight_scale, at::Tensor& bias, at::Tensor& weight_out, + at::Tensor& weight_out_scale, const float epsilon, bool preLayerNorm, + bool q_int8, bool async_op) { - auto input_cont = input.contiguous(); auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) + .dtype(input.options().dtype()) .layout(at::kStrided) .device(at::kCUDA) .requires_grad(false); - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); + int intm_dim = q_int8 ? weight.size(0) : weight.size(1); + + // auto output = at::from_blob((T*)Context::Instance().GetWorkSpace() + torch::numel(input), + // {input.size(0), input.size(1), out_size}, + // options); + // T* intermediate = (T*)input.data_ptr() + torch::numel(input); + auto intermediate = at::empty({input.size(0), input.size(1), intm_dim}, options); + + int bsz = input.size(0) * input.size(1); + float alpha = (T)1.0; float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), + if (q_int8) { + quantized_gemm(intermediate.data_ptr(), + (T*)input.data_ptr(), + weight, + weight_scale, + weight_scale.size(0), + bsz, + input.size(2)); + } else { + rocblas_set_stream(Context::Instance().GetCublasHandle(), + Context::Instance().GetCurrentStream()); + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + intm_dim, + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)intermediate.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } launch_bias_gelu((T*)intermediate.data_ptr(), (T*)bias.data_ptr(), - weight.size(1), + intm_dim, bsz, Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), + int out_size = q_int8 ? weight_out.size(0) : weight_out.size(1); + auto output = at::empty({input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)intermediate.data_ptr(), + weight_out, + weight_out_scale, + weight_out_scale.size(0), + bsz, + input.size(2)); + } else { + cublas_gemm_ex(Context::Instance().GetCublasHandle(), + rocblas_operation_none, + rocblas_operation_none, + out_size, + bsz, + intm_dim, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + (T*)intermediate.data_ptr(), + (T*)output.data_ptr(), #ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); + rocblas_gemm_algo_standard); #else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); + CUBLAS_GEMM_DEFAULT_TENSOR_OP); #endif + } // hipEventRecord(Context::Instance().GetCompEvent(2), // Context::Instance().GetCurrentStream(true)); return output; } -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) +template +at::Tensor& residual_add_bias(at::Tensor& hidden_state, + at::Tensor& residual, + const at::Tensor& attention_output, + const at::Tensor& attention_bias, + const at::Tensor& final_bias, + const int mp_size, + const bool mlp_after_attn, + const bool add_bias, + const bool preln) { - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // hipStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), + int bsz = residual.size(0) * residual.size(1); + int hidden_size = residual.size(2); + if (mlp_after_attn) + launch_bias_residual(static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast(attention_bias.data_ptr()), bsz, hidden_size, mp_size, + preln, Context::Instance().GetCurrentStream()); else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); + launch_gptj_residual_add( + static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast((add_bias ? attention_bias.data_ptr() : nullptr)), + hidden_size, + bsz, + mp_size, + Context::Instance().GetCurrentStream()); + return residual; } std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, @@ -833,7 +1628,8 @@ std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, bsz, rotate_half, rotate_every_two, - Context::Instance().GetCurrentStream()); + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); else launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), (__half*)key_cont.data_ptr(), @@ -845,7 +1641,8 @@ std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, bsz, rotate_half, rotate_every_two, - Context::Instance().GetCurrentStream()); + Context::Instance().GetCurrentStream(), + Context::Instance().GetMaxTokenLenght()); return {query_cont, key_cont}; } @@ -905,22 +1702,34 @@ at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& out PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); + m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp16 (CUDA)"); m.def( "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); m.def("softmax_context_fp16", &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); + "DeepSpeed attention with fp16 (CUDA)"); + m.def("softmax_context_int8", + &ds_softmax_context1<__half>, + "DeepSpeed attention with int8 (CUDA)"); m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); + m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp16 (CUDA)"); + m.def("bias_geglu", &ds_bias_geglu, "DeepSpeed Bias GEGLU (CUDA)"); + m.def("bias_add_fp32", &ds_bias_add, "DeepSpeed Bias Add with fp32 (CUDA)"); + m.def("bias_add_fp16", &ds_bias_add<__half>, "DeepSpeed Gelu with fp16 (CUDA)"); + m.def("bias_relu_fp32", &ds_bias_relu, "DeepSpeed ReLU with fp32 (CUDA)"); + m.def("bias_relu_fp16", &ds_bias_relu<__half>, "DeepSpeed ReLU with fp16 (CUDA)"); m.def("bias_residual_fp32", &ds_bias_residual, "DeepSpeed residual-bias add with fp32 (CUDA)"); m.def("bias_residual_fp16", &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); + "DeepSpeed residual-bias add with fp16 (CUDA)"); + m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm (CUDA)"); + m.def( + "_layer_norm_residual", &ds_layer_norm_residual, "DeepSpeed layer norm + residual (CUDA)"); + m.def("layer_norm_residual_store_pre_ln_res", + &ds_layer_norm_residual_store_pre_ln_res, + "DeepSpeed layer norm + store pre Layernorm residual (CUDA)"); m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); @@ -939,7 +1748,12 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) "DeepSpeed linear_layer with int8 (CUDA)"); m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); + m.def("residual_add_bias_fp32", + &residual_add_bias, + "DeepSpeed residual add with fp32 (CUDA)"); + m.def("residual_add_bias_fp16", + &residual_add_bias<__half>, + "DeepSpeed residual add with fp16 (CUDA)"); m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); m.def("einsum_sec_sm_ecm_fp32", &einsum_sec_sm_ecm, @@ -949,4 +1763,19 @@ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) &einsum_sec_sm_ecm<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); + m.def("add_padding_fp32", &add_padding, "DeepSpeed residual add with fp32 (CUDA)"); + m.def("add_padding_fp16", &add_padding<__half>, "DeepSpeed residual add with fp16 (CUDA)"); + m.def("pad_transform_fp32", + &padd_add_transform, + "DeepSpeed residual add with fp32 (CUDA)"); + m.def("pad_transform_fp16", + &padd_add_transform<__half>, + "DeepSpeed residual add with fp16 (CUDA)"); + m.def("allocate_workspace_fp32", + &allocate_workspace, + "DeepSpeed memory allocation for GPT inference with fp32 (CUDA)"); + m.def("allocate_workspace_fp16", + &allocate_workspace<__half>, + "DeepSpeed memory allocation for GPT inference with fp16 (CUDA)"); + m.def("reset_cache", &reset_cache, "Reset Cache for generation tasks"); } diff --git a/csrc/transformer/inference/csrc/relu.cu b/csrc/transformer/inference/csrc/relu.cu new file mode 100644 index 0000000..87e169a --- /dev/null +++ b/csrc/transformer/inference/csrc/relu.cu @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; +#define MAX_CAP 4 +#define MAX_SEQ 2048 + +inline __device__ float relu(const float x) { return x < 0 ? 0 : x; } + +/* +In-place relu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(relu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + fused_bias_relu<<>>( + input, bias, total_count, intermediate_size); +} + +template void launch_bias_relu(float*, const float*, int, int, cudaStream_t); +template void launch_bias_relu<__half>(__half*, const __half*, int, int, cudaStream_t); diff --git a/csrc/transformer/inference/csrc/relu.hip b/csrc/transformer/inference/csrc/relu.hip new file mode 100644 index 0000000..a34d3fb --- /dev/null +++ b/csrc/transformer/inference/csrc/relu.hip @@ -0,0 +1,65 @@ +// !!! This is a file automatically generated by hipify!!! +#include "hip/hip_runtime.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; +#define MAX_CAP 4 +#define MAX_SEQ 2048 + +inline __device__ float relu(const float x) { return x < 0 ? 0 : x; } + +/* +In-place relu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global(data_bias, bias + (offset % intermediate_size)); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(relu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + hipStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + hipLaunchKernelGGL(( fused_bias_relu), dim3(grid_dims), dim3(block_dims), 0, stream, + input, bias, total_count, intermediate_size); +} + +template void launch_bias_relu(float*, const float*, int, int, hipStream_t); +template void launch_bias_relu<__half>(__half*, const __half*, int, int, hipStream_t); diff --git a/csrc/transformer/inference/csrc/softmax.cu b/csrc/transformer/inference/csrc/softmax.cu index bf3c8bc..c5f0417 100644 --- a/csrc/transformer/inference/csrc/softmax.cu +++ b/csrc/transformer/inference/csrc/softmax.cu @@ -1,5 +1,9 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #include -#include "custom_cuda_layers.h" +#include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include @@ -8,7 +12,7 @@ #include #include -#define ATTN_THREADS 1024 +#define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 @@ -28,6 +32,8 @@ namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, + __half* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -36,19 +42,18 @@ __global__ void attn_softmax_v2(__half* vals, int heads, int sequence_length, int num_seq, - float scale, + int head_offset, + int mask_stride, + int mp_size, int iterations, int reduceWidth) { -#ifdef HALF_PRECISION_AVAILABLE - cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); + const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; @@ -60,11 +65,15 @@ __global__ void attn_softmax_v2(__half* vals, __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); + alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; + mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; @@ -76,47 +85,67 @@ __global__ void attn_softmax_v2(__half* vals, (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; - + // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; + low_data[i].x = data_id > window_stride + ? __half2float(vals[data_id]) * layer_scale + : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) + ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) + ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) + ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; - if (mask && recompute) { + if (alibi) { + low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); + low_data[i].y = + low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); + high_data[i].x = + high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); + high_data[i].y = + high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); + } + if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; + low_data[i].x = data_id > window_stride + ? __half2float(vals[data_id]) * layer_scale + : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) + ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) + ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; + if (alibi) { + low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); + if ((data_id + 1) < sequence_length) + low_data[i].y = + low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); + if ((data_id + 2) < sequence_length) + high_data[i].x = + high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); + } high_data[i].y = minus_infinity; - if (mask && recompute) { + if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); @@ -187,23 +216,26 @@ __global__ void attn_softmax_v2(__half* vals, if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; + vals[data_id] = __float2half(low_data[i].x / sum); + vals[data_id + 1] = __float2half(low_data[i].y / sum); + vals[data_id + 2] = __float2half(high_data[i].x / sum); + vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; + vals[data_id] = __float2half(low_data[i].x / sum); + if ((data_id + 1) < sequence_length) + vals[data_id + 1] = __float2half(low_data[i].y / sum); + if ((data_id + 2) < sequence_length) + vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } -#endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, + float* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -212,7 +244,9 @@ __global__ void attn_softmax_v2(float* vals, int heads, int sequence_length, int num_seq, - float scale, + int head_offset, + int mask_stride, + int mp_size, int iterations, int reduceWidth) { @@ -234,7 +268,10 @@ __global__ void attn_softmax_v2(float* vals, if (iter_offset < total_count) { vals += (iter_offset * sequence_length); - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; @@ -265,7 +302,7 @@ __global__ void attn_softmax_v2(float* vals, (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; - if (attn_mask && recompute) { + if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; @@ -282,7 +319,7 @@ __global__ void attn_softmax_v2(float* vals, ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; - if (attn_mask && recompute) { + if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; @@ -371,6 +408,8 @@ __global__ void attn_softmax_v2(float* vals, template void launch_attn_softmax_v2(T* vals, T* mask, + T* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -379,37 +418,46 @@ void launch_attn_softmax_v2(T* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, cudaStream_t stream) { int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); + int warp_num = ATTN_THREADS / WARP_SIZE; + int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1); + reduce_width = (int)pow(2.0, floor(log2((float)(reduce_width)))) * WARP_SIZE; + dim3 grid_dim((total_count - 1) / (ATTN_THREADS / reduce_width) + 1); dim3 block_dim(ATTN_THREADS); - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) - attn_softmax_v2<<>>( - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); + attn_softmax_v2<<>>(vals, + mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + total_count, + heads, + sequence_length, + num_seq, + head_offset, + mask_stride, + mp_size, + iterations, + reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, + float* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -418,10 +466,14 @@ template void launch_attn_softmax_v2(float* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, cudaStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, + __half* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -430,5 +482,7 @@ template void launch_attn_softmax_v2(__half* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, cudaStream_t stream); diff --git a/csrc/transformer/inference/csrc/softmax.hip b/csrc/transformer/inference/csrc/softmax.hip index 51d5bef..770120c 100644 --- a/csrc/transformer/inference/csrc/softmax.hip +++ b/csrc/transformer/inference/csrc/softmax.hip @@ -1,7 +1,11 @@ // !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #include -#include "custom_hip_layers.h" +#include "inference_cuda_layers.h" #ifndef __HIP_PLATFORM_HCC__ #include @@ -10,7 +14,7 @@ #include #include -#define ATTN_THREADS 1024 +#define ATTN_THREADS 256 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 @@ -30,6 +34,8 @@ namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, + __half* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -38,19 +44,18 @@ __global__ void attn_softmax_v2(__half* vals, int heads, int sequence_length, int num_seq, - float scale, + int head_offset, + int mask_stride, + int mp_size, int iterations, int reduceWidth) { -#ifdef HALF_PRECISION_AVAILABLE - cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile g = cg::tiled_partition(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); + const __half zero_h = __float2half(0.f); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; @@ -62,11 +67,15 @@ __global__ void attn_softmax_v2(__half* vals, __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); + alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; + mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; @@ -78,47 +87,67 @@ __global__ void attn_softmax_v2(__half* vals, (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; - + // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; + low_data[i].x = data_id > window_stride + ? __half2float(vals[data_id]) * layer_scale + : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) + ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) + ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) + ? __half2float(vals[data_id + 3]) * layer_scale : minus_infinity; - if (mask && recompute) { + if (alibi) { + low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); + low_data[i].y = + low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); + high_data[i].x = + high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); + high_data[i].y = + high_data[i].y + __half2float(alibi[data_id + alibi_offset + 3]); + } + if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; + low_data[i].x = data_id > window_stride + ? __half2float(vals[data_id]) * layer_scale + : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) + ? __half2float(vals[data_id + 1]) * layer_scale : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) + ? __half2float(vals[data_id + 2]) * layer_scale : minus_infinity; + if (alibi) { + low_data[i].x = low_data[i].x + __half2float(alibi[data_id + alibi_offset]); + if ((data_id + 1) < sequence_length) + low_data[i].y = + low_data[i].y + __half2float(alibi[data_id + alibi_offset + 1]); + if ((data_id + 2) < sequence_length) + high_data[i].x = + high_data[i].x + __half2float(alibi[data_id + alibi_offset + 2]); + } high_data[i].y = minus_infinity; - if (mask && recompute) { + if (mask) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); @@ -189,23 +218,26 @@ __global__ void attn_softmax_v2(__half* vals, if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; + vals[data_id] = __float2half(low_data[i].x / sum); + vals[data_id + 1] = __float2half(low_data[i].y / sum); + vals[data_id + 2] = __float2half(high_data[i].x / sum); + vals[data_id + 3] = __float2half(high_data[i].y / sum); } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; + vals[data_id] = __float2half(low_data[i].x / sum); + if ((data_id + 1) < sequence_length) + vals[data_id + 1] = __float2half(low_data[i].y / sum); + if ((data_id + 2) < sequence_length) + vals[data_id + 2] = __float2half(high_data[i].x / sum); } } } } -#endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, + float* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -214,7 +246,9 @@ __global__ void attn_softmax_v2(float* vals, int heads, int sequence_length, int num_seq, - float scale, + int head_offset, + int mask_stride, + int mp_size, int iterations, int reduceWidth) { @@ -236,7 +270,10 @@ __global__ void attn_softmax_v2(float* vals, if (iter_offset < total_count) { vals += (iter_offset * sequence_length); - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + mask_offset = mask_offset * sequence_length; int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; @@ -267,7 +304,7 @@ __global__ void attn_softmax_v2(float* vals, (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; - if (attn_mask && recompute) { + if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; @@ -284,7 +321,7 @@ __global__ void attn_softmax_v2(float* vals, ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; - if (attn_mask && recompute) { + if (attn_mask) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; @@ -373,6 +410,8 @@ __global__ void attn_softmax_v2(float* vals, template void launch_attn_softmax_v2(T* vals, T* mask, + T* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -381,37 +420,46 @@ void launch_attn_softmax_v2(T* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, hipStream_t stream) { int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); + int warp_num = ATTN_THREADS / WARP_SIZE; + int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1); + reduce_width = (int)pow(2.0, floor(log2((float)(reduce_width)))) * WARP_SIZE; + dim3 grid_dim((total_count - 1) / (ATTN_THREADS / reduce_width) + 1); dim3 block_dim(ATTN_THREADS); - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) - hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); + hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, vals, + mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + total_count, + heads, + sequence_length, + num_seq, + head_offset, + mask_stride, + mp_size, + iterations, + reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, + float* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -420,10 +468,14 @@ template void launch_attn_softmax_v2(float* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, hipStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, + __half* alibi, + float layer_scale, bool triangular, bool recompute, bool local_attention, @@ -432,5 +484,7 @@ template void launch_attn_softmax_v2(__half* vals, int heads, int num_seq, int sequence_length, - float scale, + int head_offset, + int mask_stride, + int mp_size, hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/transform_kernels.cu b/csrc/transformer/inference/csrc/transform.cu similarity index 55% rename from deepspeed/ops/csrc/transformer_bak/transform_kernels.cu rename to csrc/transformer/inference/csrc/transform.cu index 15a2219..023e02f 100644 --- a/deepspeed/ops/csrc/transformer_bak/transform_kernels.cu +++ b/csrc/transformer/inference/csrc/transform.cu @@ -1,159 +1,345 @@ -#include "custom_cuda_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ -#define rows_trans 16 -#define cols_trans 16 - -template -__global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) -{ - __shared__ T data_block[rows_trans * (cols_trans + 1)]; +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include "inference_cuda_layers.h" +namespace cg = cooperative_groups; - int r = threadIdx.x / cols_trans; - int c = threadIdx.x % cols_trans; +// Bias add - int m = row_width / cols_trans; +__global__ void bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; - int i = blockIdx.x / m * rows_trans + r; - int j = blockIdx.x % m * cols_trans + c; + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) - int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); - for (int k = 0; k < rows_trans; k += row_stride) - data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); - __syncthreads(); + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); - i = blockIdx.x % m * rows_trans + r; - j = blockIdx.x / m * cols_trans + c; + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); - for (int k = 0; k < rows_trans; k += row_stride) - out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; + unsigned seq_id = d1 + seq_offset; + float4 inputs = vals_vec[d3]; + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + float2* q_f = reinterpret_cast(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 2; o++) { + float inv_freq = (float)(((d3 << 1) + o) * 2) / (float)(rotary_dim << 2); + inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; + q_f[o].x = (-1.0 * q_f[o].y * sinf(inv_freq) + q_f[o].x * cosf(inv_freq)); + q_f[o].y = (q_f[o].x * sinf(inv_freq) + q_f[o].y * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = inputs; } -template <> -void Transpose<__half>(const __half* inp_mat, - __half* out_mat, - int rows, - int cols, - cudaStream_t stream) +#define ATTN_H 3 +#define MAX_SEQ_LINE 10 + +__global__ void bias_add_transform_0213(__half* output, // q + __half* k_cache, + __half* v_cache, + const __half* vals, // qkv + const __half* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int all_tokens, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens) { - int threads = THREADS; + unsigned half_dim = (rotary_dim << 3) >> 1; + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); + + float4 vals_arr; + float4 output_arr; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_arr); + __half2* output_half = reinterpret_cast<__half2*>(&output_arr); - Transpose_Kernel<__half><<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( - inp_mat, out_mat, cols, rows); + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); + + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); + + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); + + unsigned seq_id = d1 + seq_offset; + + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + __half2* q_h = reinterpret_cast<__half2*>(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 4; o++) { + float inv_freq = (float)(((d3 << 2) + o) * 2) / (float)(rotary_dim << 3); + inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; + float q_data[2]; + q_data[0] = (float)q_h[o].x; + q_data[1] = (float)q_h[o].y; + q_h[o].x = (__half)(-1.0 * q_data[1] * sinf(inv_freq) + q_data[0] * cosf(inv_freq)); + q_h[o].y = (__half)(q_data[0] * sinf(inv_freq) + q_data[1] * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = vals_vec[d3]; } +// [B S C*H] - > C * [B A S N] template <> -void Transpose(const float* inp_mat, float* out_mat, int rows, int cols, cudaStream_t stream) +void launch_bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens) { - int threads = THREADS; + hidden_dim >>= 2; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - Transpose_Kernel<<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( - inp_mat, out_mat, cols, rows); -} + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + bias_add_transform_0213<<>>(output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + heads, + rotary_dim >> 2, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens); +} template -__global__ void transform_0213(T* output, - const T* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens); template <> -__global__ void transform_0213(float* output, - const float* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) +void launch_bias_add_transform_0213<__half>(__half* output, + __half* k_cache, + __half* v_cache, + const __half* vals, + const __half* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens) { - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) + hidden_dim >>= 3; + int head_ext = 1; // (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + bias_add_transform_0213<<>>(output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + all_tokens, + heads, + rotary_dim >> 3, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens); +} - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); +// Bias add - float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; +__global__ void pad_add_transform_0213(float* output, + const float* vals, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size) +{ } -template <> -__global__ void transform_0213<__half>(__half* output, +__global__ void pad_add_transform_0213(__half* output, const __half* vals, int hidden_dim, int seq_length, + int padded_seq_len, int heads, - int head_ext) + int padded_head_size) { -#ifdef HALF_PRECISION_AVAILABLE + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; int d0_stride = hidden_dim * seq_length; int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y * blockDim.z + threadIdx.z; // Sequence ID (0-127) + int d2 = threadIdx.y; // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) - float4 vals_arr[1]; + int d2_out_stride = padded_head_size * padded_seq_len; + int d0_out_stride = heads * d2_out_stride; const float4* vals_vec = reinterpret_cast(vals); float4* output_vec = reinterpret_cast(output); - vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; -#endif -} + vals_vec += (d0 * d0_stride); + vals_vec += (d1 * d1_stride); + vals_vec += (d2 * d2_stride); -template <> -void launch_transform_0213(float* output, - const float* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); + output_vec += (d1 * padded_head_size); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); - transform_0213 - <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); + if (d3 < d2_stride && d1 < seq_length) + output_vec[d3] = vals_vec[d3]; + else + output_vec[d3] = ZERO; } -template <> -void launch_transform_0213<__half>(__half* output, - const __half* vals, +template +void launch_pad_add_transform_0213(T* output, + const T* vals, int batch_size, - int seq_length, int hidden_dim, + int seq_length, + int padded_seq_len, int heads, - cudaStream_t stream) + int padded_head_size, + cudaStream_t stream); + +// [B S C*H] - > C * [B A S N] +template <> +void launch_pad_add_transform_0213(float* output, + const float* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream) +{ +} +template <> +void launch_pad_add_transform_0213<__half>(__half* output, + const __half* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream) { hidden_dim >>= 3; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - transform_0213<__half> - <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); + dim3 block_dim((padded_head_size >> 3), heads, 2); + dim3 grid_dim(batch_size, padded_seq_len / 2); + pad_add_transform_0213<<>>( + output, vals, hidden_dim, seq_length, padded_seq_len, heads, padded_head_size >> 3); } // Bias add @@ -207,9 +393,6 @@ __global__ void bias_add_transform_0213(float* output, d2 * d2_out_stride + d3] = outputs; } -#define ATTN_H 3 -#define MAX_SEQ_LINE 10 - template <> __global__ void bias_add_transform_0213<__half>(__half* output, const __half* vals, @@ -219,8 +402,6 @@ __global__ void bias_add_transform_0213<__half>(__half* output, int heads, int head_ext) { -#ifdef HALF_PRECISION_AVAILABLE - int d0_stride = hidden_dim * seq_length; int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; @@ -260,26 +441,11 @@ __global__ void bias_add_transform_0213<__half>(__half* output, bias_arr = bias_vec[d3]; vals_arr = vals_vec[d3]; -#if defined(__ACC_HALF__) output_half[0] = vals_half[0] + bias_half[0]; output_half[1] = vals_half[1] + bias_half[1]; output_half[2] = vals_half[2] + bias_half[2]; output_half[3] = vals_half[3] + bias_half[3]; -#else - float2 bias_arr_f[4]; - float2 vals_arr_f[4]; -#pragma unroll - for (int l = 0; l < 4; l++) { - bias_arr_f[l] = __half22float2(bias_half[l]); - vals_arr_f[l] = __half22float2(vals_half[l]); - vals_arr_f[l].x += bias_arr_f[l].x; - vals_arr_f[l].y += bias_arr_f[l].y; - output_half[l] = __float22half2_rn(vals_arr_f[l]); - } -#endif output_vec[d3] = output_arr; - -#endif } __global__ void bias_add_transform_0213_v2(__half* output, @@ -289,7 +455,6 @@ __global__ void bias_add_transform_0213_v2(__half* output, int seq_length, int heads) { -#ifdef HALF_PRECISION_AVAILABLE __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length; @@ -351,55 +516,6 @@ __global__ void bias_add_transform_0213_v2(__half* output, output_vec[out_index + iter_offset] = in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; } -#endif -} - -// [B S C*H] - > C * [B A S N] -template <> -void launch_bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - - bias_add_transform_0213<<>>( - output, vals, bias, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - bias_add_transform_0213<__half><<>>( - output, vals, bias, hidden_dim, seq_length, heads, head_ext); - } else { - dim3 block_dim(hidden_dim / heads, heads, trans_count); - dim3 grid_dim(batch_size, seq_length / 2); - bias_add_transform_0213_v2<<>>( - output, vals, bias, hidden_dim, seq_length, heads); - } } template @@ -451,8 +567,6 @@ __global__ void transform4d_0213<__half>(__half* out, int hidden_dim, int head_ext) { -#ifdef HALF_PRECISION_AVAILABLE - int d0_stride = hidden_dim * (seq_length / head_ext); int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; @@ -477,8 +591,6 @@ __global__ void transform4d_0213<__half>(__half* out, out_vec += (d2 * d1_stride * gridDim.y); out_vec[d3] = in_vec[d3]; - -#endif } __global__ void transform4d_0213_v2(__half* out, @@ -487,7 +599,6 @@ __global__ void transform4d_0213_v2(__half* out, int seq_length, int hidden_dim) { -#ifdef HALF_PRECISION_AVAILABLE __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length; @@ -528,7 +639,6 @@ __global__ void transform4d_0213_v2(__half* out, int iter_id = iter * iteration_stride + iter_index; out_vec[output_offset + iter_id] = in_data[iter_id]; } -#endif } // 3 * [B A S N] - > [B S C*H] @@ -560,16 +670,9 @@ void launch_transform4d_0213<__half>(__half* out, int trans_count) { hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); - dim3 block_dims(hidden_dim / heads, (heads / head_ext)); - transform4d_0213<__half><<>>( - out, in, heads, seq_length, hidden_dim, head_ext); - } else { - dim3 grid_dims(batch_size, seq_length / 2); - dim3 block_dims(hidden_dim / heads, heads, trans_count); - transform4d_0213_v2<<>>( - out, in, heads, seq_length, hidden_dim); - } + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); + dim3 block_dims(hidden_dim / heads, (heads / head_ext)); + transform4d_0213<__half> + <<>>(out, in, heads, seq_length, hidden_dim, head_ext); } diff --git a/deepspeed/ops/csrc/transformer_bak/transform_kernels.hip b/csrc/transformer/inference/csrc/transform.hip similarity index 55% rename from deepspeed/ops/csrc/transformer_bak/transform_kernels.hip rename to csrc/transformer/inference/csrc/transform.hip index 0aaa4cc..c9ff334 100644 --- a/deepspeed/ops/csrc/transformer_bak/transform_kernels.hip +++ b/csrc/transformer/inference/csrc/transform.hip @@ -1,161 +1,347 @@ // !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" -#include "custom_hip_layers.h" +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ -#define rows_trans 16 -#define cols_trans 16 - -template -__global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) -{ - __shared__ T data_block[rows_trans * (cols_trans + 1)]; +#ifndef __HIP_PLATFORM_HCC__ +#include +#endif +#include "inference_cuda_layers.h" +namespace cg = cooperative_groups; - int r = threadIdx.x / cols_trans; - int c = threadIdx.x % cols_trans; +// Bias add - int m = row_width / cols_trans; +__global__ void bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; - int i = blockIdx.x / m * rows_trans + r; - int j = blockIdx.x % m * cols_trans + c; + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) - int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); - for (int k = 0; k < rows_trans; k += row_stride) - data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); - __syncthreads(); + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); - i = blockIdx.x % m * rows_trans + r; - j = blockIdx.x / m * cols_trans + c; + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); - for (int k = 0; k < rows_trans; k += row_stride) - out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; + unsigned seq_id = d1 + seq_offset; + float4 inputs = vals_vec[d3]; + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + float2* q_f = reinterpret_cast(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 2; o++) { + float inv_freq = (float)(((d3 << 1) + o) * 2) / (float)(rotary_dim << 2); + inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; + q_f[o].x = (-1.0 * q_f[o].y * sinf(inv_freq) + q_f[o].x * cosf(inv_freq)); + q_f[o].y = (q_f[o].x * sinf(inv_freq) + q_f[o].y * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = inputs; } -template <> -void Transpose<__half>(const __half* inp_mat, - __half* out_mat, - int rows, - int cols, - hipStream_t stream) +#define ATTN_H 3 +#define MAX_SEQ_LINE 10 + +__global__ void bias_add_transform_0213(__half* output, // q + __half* k_cache, + __half* v_cache, + const __half* vals, // qkv + const __half* bias, + int hidden_dim, + int seq_length, + unsigned seq_offset, + int all_tokens, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int head_ext, + int max_out_tokens) { - int threads = THREADS; + unsigned half_dim = (rotary_dim << 3) >> 1; + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens); + int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens); + + float4 vals_arr; + float4 output_arr; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_arr); + __half2* output_half = reinterpret_cast<__half2*>(&output_arr); - hipLaunchKernelGGL(( Transpose_Kernel<__half>), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, - inp_mat, out_mat, cols, rows); + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = + reinterpret_cast(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache)); + + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); + + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); + + unsigned seq_id = d1 + seq_offset; + + int lane = d3 & 0x1f; + if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) { + float4 q = vals_vec[d3]; + __half2* q_h = reinterpret_cast<__half2*>(&q); + if (rotate_every_two) { +#pragma unroll + for (int o = 0; o < 4; o++) { + float inv_freq = (float)(((d3 << 2) + o) * 2) / (float)(rotary_dim << 3); + inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; + float q_data[2]; + q_data[0] = (float)q_h[o].x; + q_data[1] = (float)q_h[o].y; + q_h[o].x = (__half)(-1.0 * q_data[1] * sinf(inv_freq) + q_data[0] * cosf(inv_freq)); + q_h[o].y = (__half)(q_data[0] * sinf(inv_freq) + q_data[1] * cosf(inv_freq)); + } + } + output_vec[d3] = q; + } else + output_vec[d3] = vals_vec[d3]; } +// [B S C*H] - > C * [B A S N] template <> -void Transpose(const float* inp_mat, float* out_mat, int rows, int cols, hipStream_t stream) +void launch_bias_add_transform_0213(float* output, + float* k_cache, + float* v_cache, + const float* vals, + const float* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + hipStream_t stream, + int trans_count, + int max_out_tokens) { - int threads = THREADS; + hidden_dim >>= 2; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - hipLaunchKernelGGL(( Transpose_Kernel), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, - inp_mat, out_mat, cols, rows); -} + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + hipLaunchKernelGGL(( bias_add_transform_0213), dim3(grid_dim), dim3(block_dim), 0, stream, output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + heads, + rotary_dim >> 2, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens); +} template -__global__ void transform_0213(T* output, - const T* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + hipStream_t stream, + int trans_count, + int max_out_tokens); template <> -__global__ void transform_0213(float* output, - const float* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) +void launch_bias_add_transform_0213<__half>(__half* output, + __half* k_cache, + __half* v_cache, + const __half* vals, + const __half* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int all_tokens, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + hipStream_t stream, + int trans_count, + int max_out_tokens) { - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) + hidden_dim >>= 3; + int head_ext = 1; // (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + hipLaunchKernelGGL(( bias_add_transform_0213), dim3(grid_dim), dim3(block_dim), 0, stream, output, + k_cache, + v_cache, + vals, + bias, + hidden_dim, + seq_length, + seq_offset, + all_tokens, + heads, + rotary_dim >> 3, + rotate_half, + rotate_every_two, + head_ext, + max_out_tokens); +} - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); +// Bias add - float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; +__global__ void pad_add_transform_0213(float* output, + const float* vals, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size) +{ } -template <> -__global__ void transform_0213<__half>(__half* output, +__global__ void pad_add_transform_0213(__half* output, const __half* vals, int hidden_dim, int seq_length, + int padded_seq_len, int heads, - int head_ext) + int padded_head_size) { -#ifdef HALF_PRECISION_AVAILABLE + float4 ZERO; + const __half2 zero_h = __float2half2_rn(0.f); + __half2* ZERO_h = reinterpret_cast<__half2*>(&ZERO); +#pragma unroll + for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h; int d0_stride = hidden_dim * seq_length; int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y * blockDim.z + threadIdx.z; // Sequence ID (0-127) + int d2 = threadIdx.y; // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) - float4 vals_arr[1]; + int d2_out_stride = padded_head_size * padded_seq_len; + int d0_out_stride = heads * d2_out_stride; const float4* vals_vec = reinterpret_cast(vals); float4* output_vec = reinterpret_cast(output); - vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; -#endif -} + vals_vec += (d0 * d0_stride); + vals_vec += (d1 * d1_stride); + vals_vec += (d2 * d2_stride); -template <> -void launch_transform_0213(float* output, - const float* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); + output_vec += (d1 * padded_head_size); + output_vec += (d0 * d0_out_stride); + output_vec += (d2 * d2_out_stride); - hipLaunchKernelGGL(( transform_0213) - , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads, head_ext); + if (d3 < d2_stride && d1 < seq_length) + output_vec[d3] = vals_vec[d3]; + else + output_vec[d3] = ZERO; } -template <> -void launch_transform_0213<__half>(__half* output, - const __half* vals, +template +void launch_pad_add_transform_0213(T* output, + const T* vals, int batch_size, - int seq_length, int hidden_dim, + int seq_length, + int padded_seq_len, int heads, - hipStream_t stream) + int padded_head_size, + hipStream_t stream); + +// [B S C*H] - > C * [B A S N] +template <> +void launch_pad_add_transform_0213(float* output, + const float* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + hipStream_t stream) +{ +} +template <> +void launch_pad_add_transform_0213<__half>(__half* output, + const __half* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + hipStream_t stream) { hidden_dim >>= 3; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - hipLaunchKernelGGL(( transform_0213<__half>) - , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads, head_ext); + dim3 block_dim((padded_head_size >> 3), heads, 2); + dim3 grid_dim(batch_size, padded_seq_len / 2); + hipLaunchKernelGGL(( pad_add_transform_0213), dim3(grid_dim), dim3(block_dim), 0, stream, + output, vals, hidden_dim, seq_length, padded_seq_len, heads, padded_head_size >> 3); } // Bias add @@ -209,9 +395,6 @@ __global__ void bias_add_transform_0213(float* output, d2 * d2_out_stride + d3] = outputs; } -#define ATTN_H 3 -#define MAX_SEQ_LINE 10 - template <> __global__ void bias_add_transform_0213<__half>(__half* output, const __half* vals, @@ -221,8 +404,6 @@ __global__ void bias_add_transform_0213<__half>(__half* output, int heads, int head_ext) { -#ifdef HALF_PRECISION_AVAILABLE - int d0_stride = hidden_dim * seq_length; int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; @@ -262,26 +443,11 @@ __global__ void bias_add_transform_0213<__half>(__half* output, bias_arr = bias_vec[d3]; vals_arr = vals_vec[d3]; -#if defined(__ACC_HALF__) output_half[0] = vals_half[0] + bias_half[0]; output_half[1] = vals_half[1] + bias_half[1]; output_half[2] = vals_half[2] + bias_half[2]; output_half[3] = vals_half[3] + bias_half[3]; -#else - float2 bias_arr_f[4]; - float2 vals_arr_f[4]; -#pragma unroll - for (int l = 0; l < 4; l++) { - bias_arr_f[l] = __half22float2(bias_half[l]); - vals_arr_f[l] = __half22float2(vals_half[l]); - vals_arr_f[l].x += bias_arr_f[l].x; - vals_arr_f[l].y += bias_arr_f[l].y; - output_half[l] = __float22half2_rn(vals_arr_f[l]); - } -#endif output_vec[d3] = output_arr; - -#endif } __global__ void bias_add_transform_0213_v2(__half* output, @@ -291,7 +457,6 @@ __global__ void bias_add_transform_0213_v2(__half* output, int seq_length, int heads) { -#ifdef HALF_PRECISION_AVAILABLE __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length; @@ -353,55 +518,6 @@ __global__ void bias_add_transform_0213_v2(__half* output, output_vec[out_index + iter_offset] = in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; } -#endif -} - -// [B S C*H] - > C * [B A S N] -template <> -void launch_bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - - hipLaunchKernelGGL(( bias_add_transform_0213), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - hipLaunchKernelGGL(( bias_add_transform_0213<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads, head_ext); - } else { - dim3 block_dim(hidden_dim / heads, heads, trans_count); - dim3 grid_dim(batch_size, seq_length / 2); - hipLaunchKernelGGL(( bias_add_transform_0213_v2), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads); - } } template @@ -453,8 +569,6 @@ __global__ void transform4d_0213<__half>(__half* out, int hidden_dim, int head_ext) { -#ifdef HALF_PRECISION_AVAILABLE - int d0_stride = hidden_dim * (seq_length / head_ext); int d1_stride = hidden_dim; int d2_stride = hidden_dim / heads; @@ -479,8 +593,6 @@ __global__ void transform4d_0213<__half>(__half* out, out_vec += (d2 * d1_stride * gridDim.y); out_vec[d3] = in_vec[d3]; - -#endif } __global__ void transform4d_0213_v2(__half* out, @@ -489,7 +601,6 @@ __global__ void transform4d_0213_v2(__half* out, int seq_length, int hidden_dim) { -#ifdef HALF_PRECISION_AVAILABLE __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length; @@ -530,7 +641,6 @@ __global__ void transform4d_0213_v2(__half* out, int iter_id = iter * iteration_stride + iter_index; out_vec[output_offset + iter_id] = in_data[iter_id]; } -#endif } // 3 * [B A S N] - > [B S C*H] @@ -562,16 +672,9 @@ void launch_transform4d_0213<__half>(__half* out, int trans_count) { hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); - dim3 block_dims(hidden_dim / heads, (heads / head_ext)); - hipLaunchKernelGGL(( transform4d_0213<__half>), dim3(grid_dims), dim3(block_dims), 0, stream, - out, in, heads, seq_length, hidden_dim, head_ext); - } else { - dim3 grid_dims(batch_size, seq_length / 2); - dim3 block_dims(hidden_dim / heads, heads, trans_count); - hipLaunchKernelGGL(( transform4d_0213_v2), dim3(grid_dims), dim3(block_dims), 0, stream, - out, in, heads, seq_length, hidden_dim); - } + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); + dim3 block_dims(hidden_dim / heads, (heads / head_ext)); + hipLaunchKernelGGL(( transform4d_0213<__half>) + , dim3(grid_dims), dim3(block_dims), 0, stream, out, in, heads, seq_length, hidden_dim, head_ext); } diff --git a/deepspeed/ops/csrc/transformer/inference/includes/context.h b/csrc/transformer/inference/includes/inference_context.h similarity index 53% rename from deepspeed/ops/csrc/transformer/inference/includes/context.h rename to csrc/transformer/inference/includes/inference_context.h index 21f0b3c..b3851ca 100644 --- a/deepspeed/ops/csrc/transformer/inference/includes/context.h +++ b/csrc/transformer/inference/includes/inference_context.h @@ -1,14 +1,21 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #pragma once -#include +#include #include #include #include #include #include "cublas_v2.h" #include "cuda.h" -#include "curand.h" +#define MEGABYTE (1024 * 1024) +#define GIGABYTE (1024 * 1024 * 1024) + +// TODO: refactor out #define WARP_SIZE 32 #define CUDA_CHECK(callstr) \ @@ -40,10 +47,15 @@ inline int DS_GET_BLOCKS(const int N) class Context { public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0), _stream(0) + Context() + : _workspace(nullptr), + _seed(42), + _curr_offset(0), + _stream(0), + _free_memory_size(0), + _num_tokens(1), + _attention_unfused_workspace_offset(0) { - curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT); - curandSetPseudoRandomGeneratorSeed(_gen, 123); if (cublasCreate(&_cublasHandle) != CUBLAS_STATUS_SUCCESS) { auto message = std::string("Fail to create cublas handle."); std::cerr << message << std::endl; @@ -51,16 +63,11 @@ public: } #ifndef __HIP_PLATFORM_HCC__ cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); - cudaEventCreate(&_comp1_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comp2_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comp_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comm_event, (cudaEventDisableTiming | cudaEventBlockingSync)); -#else +#endif cudaEventCreate(&_comp1_event); cudaEventCreate(&_comp2_event); cudaEventCreate(&_comp_event); cudaEventCreate(&_comm_event); -#endif } virtual ~Context() @@ -79,23 +86,88 @@ public: return _ctx; } - void GenWorkSpace(size_t size) + void GenWorkSpace(const unsigned& num_layers, + const unsigned& num_heads, + const size_t& batch_size, + const size_t& prompt_len, + const size_t& hidden_dim, + const unsigned& mp_size, + const bool& external_cache, + const size_t& elem_size, + const unsigned& rank, + unsigned max_out_tokens) { + size_t total_size; + if (!_free_memory_size) { cudaMemGetInfo(&_free_memory_size, &total_size); } + + // Flash attention requires padded heads and we'll conservatively allocate + // for that here. Flash attention is only enabled for head size <= 128 right now + const int head_size = hidden_dim / num_heads; + const int padded_head_size = head_size <= 32 ? 32 : (head_size <= 64 ? 64 : 128); + const int effective_head_size = (head_size > 128) ? head_size : padded_head_size; + + size_t activation_size = 16 * (num_heads * effective_head_size) * batch_size; + // Other sequence length dimension is added when the final workSpaceSize is calculated + size_t temp_size = batch_size * num_heads * max_out_tokens * 2; + size_t cache_size = + num_layers * batch_size * ((num_heads * effective_head_size) / mp_size) * 2; + size_t minimal_requirements = + temp_size + (_free_memory_size > GIGABYTE ? 500 : 100) * MEGABYTE; + if (_free_memory_size < minimal_requirements) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + minimal_requirements, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace can't be allocated, no enough memory."); + } + + _max_seq_len = ((_free_memory_size - minimal_requirements) / elem_size) / + (activation_size + temp_size + cache_size); + _max_seq_len = std::min((size_t)max_out_tokens, _max_seq_len); + size_t workSpaceSize = ((external_cache ? (activation_size + temp_size) + : (activation_size + temp_size + cache_size))) * + _max_seq_len * elem_size; + temp_size *= _max_seq_len * elem_size; + if (rank == 0 && !_workspace) + printf( + "------------------------------------------------------\n" + "Free memory : %f (GigaBytes) \n" + "Total memory: %f (GigaBytes) \n" + "Requested memory: %f (GigaBytes) \n" + "Setting maximum total tokens (input + output) to %lu \n" + "------------------------------------------------------\n", + (float)_free_memory_size / GIGABYTE, + (float)total_size / GIGABYTE, + (float)workSpaceSize / GIGABYTE, + _max_seq_len); if (!_workspace) { assert(_workspace == nullptr); - cudaMalloc(&_workspace, size); - } else if (_workSpaceSize < size) { + cudaMalloc(&_workspace, workSpaceSize); + } else if (_workSpaceSize < workSpaceSize) { cudaFree(_workspace); - cudaMalloc(&_workspace, size); + cudaMalloc(&_workspace, workSpaceSize); } - _workSpaceSize = size; + if (!_workspace) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + workSpaceSize, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace is null."); + } + _workSpaceSize = workSpaceSize; + _attention_unfused_workspace_offset = workSpaceSize - temp_size; } + inline size_t GetMaxTokenLenght() const { return _max_seq_len; } cudaEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } size_t get_workspace_size() const { return _workSpaceSize; } void* GetWorkSpace() { return _workspace; } + void* GetAttentionUnfusedWorkspace() + { + return (char*)_workspace + _attention_unfused_workspace_offset; + } inline unsigned new_token(unsigned layer_id) { @@ -103,7 +175,7 @@ public: return _token_length; } - inline void reset_tokens(unsigned initial_tokens = 0) + inline void reset_tokens(unsigned initial_tokens = 1) { _num_tokens = initial_tokens; } //_token_length = 0; } @@ -112,8 +184,6 @@ public: inline void advance_tokens() { _num_tokens++; } - curandGenerator_t& GetRandGenerator() { return _gen; } - cudaStream_t GetCommStream(bool async_op = false) { if (!_comm_stream) @@ -157,16 +227,21 @@ public: } private: - curandGenerator_t _gen; cublasHandle_t _cublasHandle; cudaEvent_t _comp_event; cudaEvent_t _comm_event; void* _workspace; + // offset from _workspace for attention unfused memory + size_t _attention_unfused_workspace_offset; uint64_t _seed; uint64_t _curr_offset; + size_t _workSpaceSize; + size_t _free_memory_size; + + size_t _max_seq_len; cudaEvent_t _comp1_event; cudaEvent_t _comp2_event; diff --git a/deepspeed/ops/csrc/transformer/inference/includes/context_hip.h b/csrc/transformer/inference/includes/inference_context_hip.h similarity index 54% rename from deepspeed/ops/csrc/transformer/inference/includes/context_hip.h rename to csrc/transformer/inference/includes/inference_context_hip.h index 738e2dc..5bb968d 100644 --- a/deepspeed/ops/csrc/transformer/inference/includes/context_hip.h +++ b/csrc/transformer/inference/includes/inference_context_hip.h @@ -1,15 +1,22 @@ // !!! This is a file automatically generated by hipify!!! +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #pragma once -#include +#include #include #include #include #include #include "rocblas.h" #include "hip/hip_runtime.h" -#include "hiprand/hiprand.h" +#define MEGABYTE (1024 * 1024) +#define GIGABYTE (1024 * 1024 * 1024) + +// TODO: refactor out #define WARP_SIZE 32 #define CUDA_CHECK(callstr) \ @@ -41,10 +48,15 @@ inline int DS_GET_BLOCKS(const int N) class Context { public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0), _stream(0) + Context() + : _workspace(nullptr), + _seed(42), + _curr_offset(0), + _stream(0), + _free_memory_size(0), + _num_tokens(1), + _attention_unfused_workspace_offset(0) { - hiprandCreateGenerator(&_gen, HIPRAND_RNG_PSEUDO_DEFAULT); - hiprandSetPseudoRandomGeneratorSeed(_gen, 123); if (rocblas_create_handle(&_cublasHandle) != rocblas_status_success) { auto message = std::string("Fail to create cublas handle."); std::cerr << message << std::endl; @@ -52,16 +64,11 @@ public: } #ifndef __HIP_PLATFORM_HCC__ rocblas_set_math_mode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); - hipEventCreate(&_comp1_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comp2_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comp_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comm_event, (hipEventDisableTiming | hipEventBlockingSync)); -#else +#endif hipEventCreate(&_comp1_event); hipEventCreate(&_comp2_event); hipEventCreate(&_comp_event); hipEventCreate(&_comm_event); -#endif } virtual ~Context() @@ -80,23 +87,88 @@ public: return _ctx; } - void GenWorkSpace(size_t size) + void GenWorkSpace(const unsigned& num_layers, + const unsigned& num_heads, + const size_t& batch_size, + const size_t& prompt_len, + const size_t& hidden_dim, + const unsigned& mp_size, + const bool& external_cache, + const size_t& elem_size, + const unsigned& rank, + unsigned max_out_tokens) { + size_t total_size; + if (!_free_memory_size) { hipMemGetInfo(&_free_memory_size, &total_size); } + + // Flash attention requires padded heads and we'll conservatively allocate + // for that here. Flash attention is only enabled for head size <= 128 right now + const int head_size = hidden_dim / num_heads; + const int padded_head_size = head_size <= 32 ? 32 : (head_size <= 64 ? 64 : 128); + const int effective_head_size = (head_size > 128) ? head_size : padded_head_size; + + size_t activation_size = 16 * (num_heads * effective_head_size) * batch_size; + // Other sequence length dimension is added when the final workSpaceSize is calculated + size_t temp_size = batch_size * num_heads * max_out_tokens * 2; + size_t cache_size = + num_layers * batch_size * ((num_heads * effective_head_size) / mp_size) * 2; + size_t minimal_requirements = + temp_size + (_free_memory_size > GIGABYTE ? 500 : 100) * MEGABYTE; + if (_free_memory_size < minimal_requirements) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + minimal_requirements, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace can't be allocated, no enough memory."); + } + + _max_seq_len = ((_free_memory_size - minimal_requirements) / elem_size) / + (activation_size + temp_size + cache_size); + _max_seq_len = std::min((size_t)max_out_tokens, _max_seq_len); + size_t workSpaceSize = ((external_cache ? (activation_size + temp_size) + : (activation_size + temp_size + cache_size))) * + _max_seq_len * elem_size; + temp_size *= _max_seq_len * elem_size; + if (rank == 0 && !_workspace) + printf( + "------------------------------------------------------\n" + "Free memory : %f (GigaBytes) \n" + "Total memory: %f (GigaBytes) \n" + "Requested memory: %f (GigaBytes) \n" + "Setting maximum total tokens (input + output) to %lu \n" + "------------------------------------------------------\n", + (float)_free_memory_size / GIGABYTE, + (float)total_size / GIGABYTE, + (float)workSpaceSize / GIGABYTE, + _max_seq_len); if (!_workspace) { assert(_workspace == nullptr); - hipMalloc(&_workspace, size); - } else if (_workSpaceSize < size) { + hipMalloc(&_workspace, workSpaceSize); + } else if (_workSpaceSize < workSpaceSize) { hipFree(_workspace); - hipMalloc(&_workspace, size); + hipMalloc(&_workspace, workSpaceSize); } - _workSpaceSize = size; + if (!_workspace) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + workSpaceSize, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace is null."); + } + _workSpaceSize = workSpaceSize; + _attention_unfused_workspace_offset = workSpaceSize - temp_size; } + inline size_t GetMaxTokenLenght() const { return _max_seq_len; } hipEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } size_t get_workspace_size() const { return _workSpaceSize; } void* GetWorkSpace() { return _workspace; } + void* GetAttentionUnfusedWorkspace() + { + return (char*)_workspace + _attention_unfused_workspace_offset; + } inline unsigned new_token(unsigned layer_id) { @@ -104,7 +176,7 @@ public: return _token_length; } - inline void reset_tokens(unsigned initial_tokens = 0) + inline void reset_tokens(unsigned initial_tokens = 1) { _num_tokens = initial_tokens; } //_token_length = 0; } @@ -113,8 +185,6 @@ public: inline void advance_tokens() { _num_tokens++; } - hiprandGenerator_t& GetRandGenerator() { return _gen; } - hipStream_t GetCommStream(bool async_op = false) { if (!_comm_stream) @@ -158,16 +228,21 @@ public: } private: - hiprandGenerator_t _gen; rocblas_handle _cublasHandle; hipEvent_t _comp_event; hipEvent_t _comm_event; void* _workspace; + // offset from _workspace for attention unfused memory + size_t _attention_unfused_workspace_offset; uint64_t _seed; uint64_t _curr_offset; + size_t _workSpaceSize; + size_t _free_memory_size; + + size_t _max_seq_len; hipEvent_t _comp1_event; hipEvent_t _comp2_event; diff --git a/deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers.h b/csrc/transformer/inference/includes/inference_cublas_wrappers.h similarity index 99% rename from deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers.h rename to csrc/transformer/inference/includes/inference_cublas_wrappers.h index 75d18a4..9e55cc1 100644 --- a/deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers.h +++ b/csrc/transformer/inference/includes/inference_cublas_wrappers.h @@ -1,3 +1,7 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers_hip.h b/csrc/transformer/inference/includes/inference_cublas_wrappers_hip.h similarity index 99% rename from deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers_hip.h rename to csrc/transformer/inference/includes/inference_cublas_wrappers_hip.h index e7c8190..1493398 100644 --- a/deepspeed/ops/csrc/transformer/inference/includes/cublas_wrappers_hip.h +++ b/csrc/transformer/inference/includes/inference_cublas_wrappers_hip.h @@ -1,4 +1,8 @@ // !!! This is a file automatically generated by hipify!!! +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + #pragma once #include diff --git a/csrc/transformer/inference/includes/inference_cuda_layers.h b/csrc/transformer/inference/includes/inference_cuda_layers.h new file mode 100644 index 0000000..211f56b --- /dev/null +++ b/csrc/transformer/inference/includes/inference_cuda_layers.h @@ -0,0 +1,222 @@ +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include "ds_kernel_utils.h" + +#include +#include +#include +#include +#include +#include + +#define MAX_WARP_NUM 32 +#define WARP_SIZE 32 + +#define MAX_THREADS 1024 +#define SMs 80 + +#define MAX_REGISTERS 256 + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int offset, + int mask_stride, + int mp_size, + cudaStream_t stream); + +// Fused bias add with gelu activation +template +void launch_bias_gelu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int rows, + int elems_per_row, + cudaStream_t stream); + +// Fused bias add with relu activation +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream); + +template +void launch_bias_residual(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int hidden_dim, + int mp_size, + bool preln, + cudaStream_t stream); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream); +template +void launch_gptj_residual_add(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int head_size, + int mp_size, + cudaStream_t stream); + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int max_out_tokens); + +template +void launch_moe_res_matmul(T* residual, + T* coef, + T* mlp_out, + int seq_len, + int hidden_dim, + cudaStream_t stream); + +// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] +template +void launch_transform4d_0213(T* out, + const T* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count); +template +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens); +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void launch_pad_add_transform_0213(T* output, + const T* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream); diff --git a/csrc/transformer/inference/includes/inference_hip_layers.h b/csrc/transformer/inference/includes/inference_hip_layers.h new file mode 100644 index 0000000..1059266 --- /dev/null +++ b/csrc/transformer/inference/includes/inference_hip_layers.h @@ -0,0 +1,223 @@ +// !!! This is a file automatically generated by hipify!!! +/* +Copyright 2022 The Microsoft DeepSpeed Team +*/ + +#pragma once + +#include "ds_kernel_utils_hip.h" + +#include +#include +#include +#include +#include +#include + +#define MAX_WARP_NUM 32 +#define WARP_SIZE 32 + +#define MAX_THREADS 1024 +#define SMs 80 + +#define MAX_REGISTERS 256 + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int offset, + int mask_stride, + int mp_size, + hipStream_t stream); + +// Fused bias add with gelu activation +template +void launch_bias_gelu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + hipStream_t stream); + +template +void launch_fused_bias_geglu(T* output, + const T* activation, + const T* bias, + int rows, + int elems_per_row, + hipStream_t stream); + +// Fused bias add with relu activation +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + hipStream_t stream); + +template +void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream); + +template +void launch_bias_residual(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int hidden_dim, + int mp_size, + bool preln, + hipStream_t stream); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + hipStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + hipStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + hipStream_t stream); +template +void launch_gptj_residual_add(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int head_size, + int mp_size, + hipStream_t stream); + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + bool rotate_half, + bool rotate_every_two, + hipStream_t stream, + int max_out_tokens); + +template +void launch_moe_res_matmul(T* residual, + T* coef, + T* mlp_out, + int seq_len, + int hidden_dim, + hipStream_t stream); + +// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] +template +void launch_transform4d_0213(T* out, + const T* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + hipStream_t stream, + int trans_count); +template +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + hipStream_t stream, + int trans_count, + int max_out_tokens); +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + hipStream_t stream); + +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + hipStream_t stream); + +template +void launch_pad_add_transform_0213(T* output, + const T* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + hipStream_t stream); diff --git a/csrc/transformer/normalize_kernels.cu b/csrc/transformer/normalize_kernels.cu index d634c7f..84f27b2 100644 --- a/csrc/transformer/normalize_kernels.cu +++ b/csrc/transformer/normalize_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "custom_cuda_layers.h" namespace cg = cooperative_groups; @@ -862,6 +866,7 @@ __global__ void LayerNormBackward2(const __half* out_grad, bool invertible, int row_stride) { +#ifdef HALF_PRECISION_AVAILABLE int iteration_stride = blockDim.x; int iterations = row_stride / iteration_stride; @@ -985,6 +990,7 @@ __global__ void LayerNormBackward2(const __half* out_grad, inp_grad_h[high_index] = temp; } +#endif } template <> @@ -1172,6 +1178,7 @@ __global__ void LayerNormBackward2(const __half* out_grad, __half* inp_grad, int row_stride) { +#ifdef HALF_PRECISION_AVAILABLE int iteration_stride = blockDim.x; int iterations = row_stride / iteration_stride; @@ -1290,6 +1297,7 @@ __global__ void LayerNormBackward2(const __half* out_grad, __half2 temp = __float22half2_rn(vals_arr_f[iterations]); inp_grad_h[high_index] = temp; } +#endif } template <> @@ -1601,6 +1609,7 @@ __global__ void LayerNormBackward2_fused_add(const __half* out_grad1, bool invertible, int row_stride) { +#ifdef HALF_PRECISION_AVAILABLE int iteration_stride = blockDim.x; int iterations = row_stride / iteration_stride; @@ -1727,6 +1736,7 @@ __global__ void LayerNormBackward2_fused_add(const __half* out_grad1, inp_grad_h[high_index] = temp + out_grad_h2[high_index]; } +#endif } template <> @@ -1922,6 +1932,7 @@ __global__ void LayerNormBackward2_fused_add(const __half* out_grad1, __half* inp_grad, int row_stride) { +#ifdef HALF_PRECISION_AVAILABLE int iteration_stride = blockDim.x; int iterations = row_stride / iteration_stride; @@ -2044,6 +2055,7 @@ __global__ void LayerNormBackward2_fused_add(const __half* out_grad1, __half2 temp = __float22half2_rn(vals_arr_f[iterations]); inp_grad_h[high_index] = temp + out_grad_h2[high_index]; } +#endif } template <> diff --git a/csrc/transformer/softmax_kernels.cu b/csrc/transformer/softmax_kernels.cu index 34487cb..bad814b 100644 --- a/csrc/transformer/softmax_kernels.cu +++ b/csrc/transformer/softmax_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include #include "custom_cuda_layers.h" #include "general_kernels.h" @@ -536,6 +540,102 @@ __global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, } } +__global__ void softmax_backward_kernel_arbitrary_length(__half* grad /* input & output*/, + const __half* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half2* out_h = reinterpret_cast<__half2*>(&out_reg); + __half2* grad_h = reinterpret_cast<__half2*>(&grad_reg); +#pragma unroll + for (int m = 0; m < 4; m++) grad_h[m] *= out_h[m]; + sum += ((float)grad_h[0].x + (float)grad_h[0].y + (float)grad_h[1].x + (float)grad_h[1].y) + + ((float)grad_h[2].x + (float)grad_h[2].y + (float)grad_h[3].x + (float)grad_h[3].y); + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half* grad_h = reinterpret_cast<__half*>(&grad_reg); + __half* out_h = reinterpret_cast<__half*>(&out_reg); + +#pragma unroll + for (int m = 0; m < 8; m++) grad_h[m] = (float)out_h[m] * ((float)grad_h[m] - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + +__global__ void softmax_backward_kernel_arbitrary_length(float* grad /* input & output*/, + const float* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + + grad_reg.x *= out_reg.x; + grad_reg.y *= out_reg.y; + grad_reg.z *= out_reg.z; + grad_reg.w *= out_reg.w; + sum += (grad_reg.x + grad_reg.y + grad_reg.z + grad_reg.w); + + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + grad_reg.x = out_reg.x * (grad_reg.x - sum); + grad_reg.y = out_reg.y * (grad_reg.y - sum); + grad_reg.z = out_reg.z * (grad_reg.z - sum); + grad_reg.w = out_reg.w * (grad_reg.w - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + template void launch_attn_softmax_backward_v2(T* out_grad, const T* soft_inp, @@ -575,10 +675,15 @@ void launch_attn_softmax_backward_v2(T* out_grad, else if (seq_length <= 2048) softmax_backward_kernel_v2 <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 4096) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 8192) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); else - throw std::runtime_error( - std::string("Special sequence length found in softmax backward, seq_length: ") + - std::to_string(seq_length)); + softmax_backward_kernel_arbitrary_length<<>>( + out_grad, soft_inp, seq_length / (4 << ((sizeof(T) & 2) >> 1))); } template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, diff --git a/csrc/transformer/transform_kernels.cu b/csrc/transformer/transform_kernels.cu old mode 100644 new mode 100755 index 15a2219..37a6e79 --- a/csrc/transformer/transform_kernels.cu +++ b/csrc/transformer/transform_kernels.cu @@ -1,3 +1,7 @@ +/* +Copyright The Microsoft DeepSpeed Team +*/ + #include "custom_cuda_layers.h" #define rows_trans 16 diff --git a/deepspeed/__init__.py b/deepspeed/__init__.py old mode 100644 new mode 100755 index 2292b41..9e2e255 --- a/deepspeed/__init__.py +++ b/deepspeed/__init__.py @@ -4,6 +4,7 @@ Copyright 2020 The Microsoft DeepSpeed Team import sys import types +import json from typing import Optional, Union import torch from torch.optim import Optimizer @@ -17,17 +18,18 @@ from .runtime.engine import DeepSpeedEngine, DeepSpeedOptimizerCallable, DeepSpe from .runtime.engine import ADAM_OPTIMIZER, LAMB_OPTIMIZER from .runtime.pipe.engine import PipelineEngine from .inference.engine import InferenceEngine - +from .inference.config import DeepSpeedInferenceConfig from .runtime.lr_schedules import add_tuning_arguments from .runtime.config import DeepSpeedConfig, DeepSpeedConfigError from .runtime.activation_checkpointing import checkpointing from .ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig from .module_inject import replace_transformer_layer, revert_transformer_layer -from .utils import log_dist -from .utils.distributed import init_distributed +from .utils import log_dist, OnDevice +from .comm.comm import init_distributed from .runtime import zero +from .runtime import DeepSpeedOptimizer, ZeROOptimizer from .pipe import PipelineModule @@ -82,7 +84,7 @@ def initialize(args=None, mpu: Optional: A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}() - dist_init_required: Optional: None will auto-initialize torch.distributed if needed, + dist_init_required: Optional: None will auto-initialize torch distributed if needed, otherwise the user can force it to be initialized or not via boolean. collate_fn: Optional: Merges a list of samples to form a @@ -113,6 +115,10 @@ def initialize(args=None, __git_hash__, __git_branch__), ranks=[0]) + + # Disable zero.Init context if it's currently enabled + zero.partition_parameters.shutdown_init_context() + assert model is not None, "deepspeed.initialize requires a model" if not isinstance(model, PipelineModule): @@ -217,61 +223,57 @@ def add_config_arguments(parser): return parser -def init_inference(model, - triangular_masking=True, - mp_size=1, - training_mp_size=1, - mpu=None, - ep_group=None, - expert_mp_group=None, - checkpoint=None, - dtype=None, - injection_policy=None, - replace_method='auto', - quantization_setting=None, - replace_with_kernel_inject=False, - return_tuple=True, - ep_size=1, - moe=False, - moe_experts=1, - moe_type='standard', - args=None): +def default_inference_config(): + """ + Return a default DeepSpeed inference configuration dictionary. + """ + return DeepSpeedInferenceConfig().dict() + + +def init_inference(model, config=None, **kwargs): """Initialize the DeepSpeed InferenceEngine. - Arguments: - model: Required: nn.module class before apply any wrappers + Description: all four cases are valid and supported in DS init_inference() API. - triangular_masking: Required: this shows the type of masking for attention scores in transformer layer - note that the masking is application specific. + # Case 1: user provides no config and no kwargs. Default config will be used. - mp_size: Optional: Desired model parallel size, default is 1 meaning no - model parallelism. + .. code-block:: python - training_mp_size: Optional: if loading a checkpoint this is the mp size that it was trained with, - it may be different than what the mp size that you want to use during inference. + generator.model = deepspeed.init_inference(generator.model) + string = generator("DeepSpeed is") + print(string) - mpu: Optional: A model parallelism unit object that implements - get_{model,data}_parallel_{rank,group,world_size}() + # Case 2: user provides a config and no kwargs. User supplied config will be used. + + .. code-block:: python + + generator.model = deepspeed.init_inference(generator.model, config=config) + string = generator("DeepSpeed is") + print(string) + + # Case 3: user provides no config and uses keyword arguments (kwargs) only. - checkpoint: Optional: Path to deepspeed compatible checkpoint or path to - JSON with load policy. + .. code-block:: python - dtype: Optional: Desired model data type, will convert model to this type. - Supported target types: torch.half, torch.int8, torch.float + generator.model = deepspeed.init_inference(generator.model, + mp_size=world_size, + dtype=torch.half, + replace_with_kernel_inject=True) + string = generator("DeepSpeed is") + print(string) - injection_policy: Optional: Dictionary mapping a client nn.Module to its corresponding - injection policy. e.g., {BertLayer : deepspeed.inference.HFBertLayerPolicy} + # Case 4: user provides config and keyword arguments (kwargs). Both config and kwargs are merged and kwargs take precedence. - replace_method: Optional: If 'auto' DeepSpeed will automatically try and replace - model modules with its optimized versions. If an injection_policy is set this will - override the automatic replacement behavior. + .. code-block:: python - quantization_setting: Optional: Quantization settings used for quantizing your model using the MoQ. - The setting can be one element or a tuple. If one value is passed in, we consider it as the number - of groups used in quantization. A tuple is passed in if we want to mention that there is extra-grouping - for the MLP part of a Transformer layer (e.g. (True, 8) shows we quantize the model using 8 groups for - all the network except the MLP part that we use 8 extra grouping). - replace_with_kernel_inject: If set we inject kernel as we initialize the inference-engine + generator.model = deepspeed.init_inference(generator.model, config={"dtype": torch.half}, replace_with_kernel_inject=True) + string = generator("DeepSpeed is") + print(string) + + Arguments: + model: Required: original nn.module object without any wrappers + + config: Optional: instead of arguments, you can pass in a DS inference config dict or path to JSON file Returns: A deepspeed.InferenceEngine wrapped model. @@ -282,24 +284,30 @@ def init_inference(model, __git_branch__), ranks=[0]) - engine = InferenceEngine(model, - triangular_masking, - mp_size, - training_mp_size, - ep_size, - mpu, - ep_group, - expert_mp_group, - checkpoint, - dtype, - injection_policy, - return_tuple, - replace_method, - quantization_setting, - replace_with_kernel_inject, - moe, - moe_experts, - moe_type, - args) + # Load config_dict from config first + if config is None: + config = {} + if isinstance(config, str): + with open(config, "r") as f: + config_dict = json.load(f) + elif isinstance(config, dict): + config_dict = config + else: + raise ValueError( + f"'config' argument expected string or dictionary, got {type(config)}") + + # Update with values from kwargs, ensuring no conflicting overlap between config and kwargs + overlap_keys = set(config_dict.keys()).intersection(kwargs.keys()) + # If there is overlap, error out if values are different + for key in overlap_keys: + if config_dict[key] != kwargs[key]: + raise ValueError( + f"Conflicting argument '{key}' in 'config':{config_dict[key]} and kwargs:{kwargs[key]}" + ) + config_dict.update(kwargs) + + ds_inference_config = DeepSpeedInferenceConfig(**config_dict) + + engine = InferenceEngine(model, config=ds_inference_config) return engine diff --git a/deepspeed/accelerator b/deepspeed/accelerator new file mode 120000 index 0000000..b61fffa --- /dev/null +++ b/deepspeed/accelerator @@ -0,0 +1 @@ +../accelerator/ \ No newline at end of file diff --git a/deepspeed/autotuning/README.md b/deepspeed/autotuning/README.md old mode 100644 new mode 100755 diff --git a/deepspeed/autotuning/__init__.py b/deepspeed/autotuning/__init__.py index deea832..11ad470 100644 --- a/deepspeed/autotuning/__init__.py +++ b/deepspeed/autotuning/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .autotuner import Autotuner diff --git a/deepspeed/autotuning/autotuner.py b/deepspeed/autotuning/autotuner.py old mode 100644 new mode 100755 index 4ff85e6..569b626 --- a/deepspeed/autotuning/autotuner.py +++ b/deepspeed/autotuning/autotuner.py @@ -1,30 +1,40 @@ -import copy -import json -import os -from random import sample +'''Copyright The Microsoft DeepSpeed Team''' + import shutil import subprocess -import hjson -import torch import time import datetime import math +import hjson from ..runtime.config_utils import dict_raise_error_on_duplicate_keys from ..runtime.constants import * -from ..runtime.zero.constants import * + +from ..runtime.zero.config import ZERO_OPTIMIZATION, ZeroStageEnum from ..utils import logger from .config import DeepSpeedAutotuningConfig from .constants import * -from .scheduler import ResourceManager, run_experiment +from .scheduler import ResourceManager from .tuner import GridSearchTuner, RandomTuner, ModelBasedTuner from .utils import * +from deepspeed.accelerator import get_accelerator try: from tabulate import tabulate except ImportError: tabulate = None +try: + import mlflow + has_mlflow = True +except Exception as e: + has_mlflow = False + +ZERO_OPTIMIZATION_STAGE = "stage" +OFFLOAD_OPTIMIZER = "offload_optimizer" +OFFLOAD_PARAM = "offload_param" +ZERO_OPTIMIZATION_STAGE_DEFAULT = ZeroStageEnum.disabled + class Autotuner: """The DeepSpeed Autotuner automatically discovers the optimal DeepSpeed configuration that delivers good training speed. The Autotuner uses model information, system information, and heuristics to efficiently tune system knobs that affect compute and memory efficiencies, such as ZeRO optimization stages, micro-batch sizes, and many other ZeRO optimization configurations. It not only reduces the time and resources user spend on tuning, but also can discover configurations better than hand-tuned methods. @@ -42,22 +52,37 @@ class Autotuner: assert self.user_config is not None, "DeepSpeed configuration is not provided" self.autotuning_config = DeepSpeedAutotuningConfig(self.user_config) + if self.user_config[AUTOTUNING]: + if AUTOTUNING_EXPS_DIR in self.user_config[AUTOTUNING].keys(): + del self.user_config[AUTOTUNING][AUTOTUNING_EXPS_DIR] + if AUTOTUNING_RESULTS_DIR in self.user_config[AUTOTUNING].keys(): + del self.user_config[AUTOTUNING][AUTOTUNING_RESULTS_DIR] - self.exps_dir = DEFAULT_EXPRS_DIR - if self.autotuning_config.exps_dir and self.autotuning_config.exps_dir != "": - self.exps_dir = self.autotuning_config.exps_dir + self.exps_dir = self.autotuning_config.exps_dir if self.autotuning_config.overwrite and os.path.exists(self.exps_dir): shutil.rmtree(self.exps_dir, ignore_errors=True) if not os.path.exists(self.exps_dir): - os.makedirs(self.exps_dir, exist_ok=True) + try: + os.makedirs(self.exps_dir, exist_ok=True) + logger.info(f"Created autotuning experiments directory: {self.exps_dir}") + except: + logger.error( + f"Failed to create {self.exps_dir}, please check `exps_dir` in the autotuning config file is accessible by all the nodes in the job." + ) + exit(-1) - self.results_dir = DEFAULT_RESULTS_DIR - if self.autotuning_config.results_dir and self.autotuning_config.results_dir != "": - self.results_dir = self.autotuning_config.results_dir + self.results_dir = self.autotuning_config.results_dir if self.autotuning_config.overwrite and os.path.exists(self.results_dir): shutil.rmtree(self.results_dir, ignore_errors=True) if not os.path.exists(self.results_dir): - os.makedirs(self.results_dir, exist_ok=True) + try: + os.makedirs(self.results_dir, exist_ok=True) + logger.info(f"Created autotuning resutls directory: {self.exps_dir}") + except: + logger.error( + f"Failed to create {self.results_dir}, please check `results_dir` in the autotuning config file is accessible by all the nodes in the job." + ) + exit(-1) # set the active resource for the autotuner resource manager self.rm = self._get_resource_manager(active_resources) @@ -70,6 +95,10 @@ class Autotuner: self.rm.nodes), "num_nodes in the autotuning configuration must not be less than the --num_nodes value in the train script if any" self.records = {} + self.optimal_cmd = None + self.optmal_ds_config = None + + self.mlflow_parent_id = None def print_tuning_results(self): """Print the autotuning results in tabular format. @@ -252,7 +281,7 @@ class Autotuner: return False def get_gpu_memory_info(self): - return torch.cuda.get_device_properties(0).total_memory + return get_accelerator().total_memory() def get_activation_memory_per_gpu(self): if self.model_info and "activation_mem_per_gpu" in self.model_info: @@ -266,18 +295,18 @@ class Autotuner: if not num_params: return 0 # assume the model uses Adam optimizer - # ZERO_OPTIMIZATION_DISABLED: + # ZeroStageEnum.disabled: params_mem = num_params * (2 if fp16_enabled else 4) gradients_mem = num_params * (2 if fp16_enabled else 4) optimizer_mem = num_params * (16 if fp16_enabled else 8) - if zero_stage >= ZERO_OPTIMIZATION_OPTIMIZER_STATES: + if zero_stage >= ZeroStageEnum.optimizer_states: optimizer_mem = optimizer_mem / total_gpus - if zero_stage >= ZERO_OPTIMIZATION_GRADIENTS: + if zero_stage >= ZeroStageEnum.gradients: gradients_mem = gradients_mem / total_gpus - if zero_stage >= ZERO_OPTIMIZATION_WEIGHTS: + if zero_stage >= ZeroStageEnum.weights: params_mem = params_mem / total_gpus mem_per_gpu = (params_mem + gradients_mem + optimizer_mem) / self.mp_size() @@ -308,7 +337,7 @@ class Autotuner: # each zero stage uses a different template configuration file config_zero = tuning_space.get(ZERO_OPTIMIZATION, {}) - stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None) + stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT) template_config = {} if stage == 0: template_path = DEFAULT_TEMPLATE_PATH_ZERO_0 @@ -331,12 +360,11 @@ class Autotuner: model_info = self.model_info if model_info and "hidden_size" in model_info: hs = model_info["hidden_size"] + template_config[ZERO_OPTIMIZATION]['reduce_bucket_size'] = hs * hs template_config[ZERO_OPTIMIZATION][ - ZERO_OPTIMIZATION_REDUCE_BUCKET_SIZE] = hs * hs - template_config[ZERO_OPTIMIZATION][ - ZERO_OPTIMIZATION_PREFETCH_BUCKET_SIZE] = 0.9 * hs * hs + 'stage3_prefetch_bucket_size'] = 0.9 * hs * hs template_config[ZERO_OPTIMIZATION][ - ZERO_OPTIMIZATION_PARAM_PERSISTENCE_THRESHOLD] = 10 * hs + 'stage3_param_persistence_threshold'] = 10 * hs prefix = "z3_" else: return exps @@ -355,11 +383,11 @@ class Autotuner: logger.debug(f"tuning_keys = {tuning_keys}") - logger.debug(f"before prunning total configs = {len(all_configs)}") + logger.debug(f"before pruning total configs = {len(all_configs)}") pruned_list = prune_configs(all_configs) - logger.debug(f"after prunning total configs = {len(pruned_list)}") + logger.debug(f"after pruning total configs = {len(pruned_list)}") for config in pruned_list: exp_config = copy.deepcopy(template_config) @@ -375,7 +403,6 @@ class Autotuner: if OFFLOAD_PARAM not in config_zero and OFFLOAD_PARAM in exp_config[ ZERO_OPTIMIZATION]: del exp_config[ZERO_OPTIMIZATION][OFFLOAD_PARAM] - # set gradient accumulation steps according to max_train_batch_size_per_gpu mbs = exp_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] gas = max_train_batch_size_per_gpu // mbs @@ -396,6 +423,10 @@ class Autotuner: def tune(self): """ Tunes Zero stages, micro batch size per GPU, and other Zero configurations. Performance metrics of different tuning spaces are recorded in self.records. """ + if has_mlflow: + self.mlflow_parent_id = os.environ['MLFLOW_RUN_ID'] + mlflow.start_run(run_id=self.mlflow_parent_id) + self.start_time = time.time() if self.fast_enabled(): logger.info(f"Fast mode is enabled. Tuning micro batch size only.") @@ -420,9 +451,11 @@ class Autotuner: f"The model requires at least {memory_to_string(self.activation_mem, postfix='B')} activation memory for micro batch size 1." ) + #TODO: FIX THIS stage = self.user_config.get(ZERO_OPTIMIZATION, {}).get(ZERO_OPTIMIZATION_STAGE, "all") + stage = "all" user_zero_stages = [stage] if not isinstance(stage, list) else stage logger.info(f"User-defined zero stages are {stage}.") @@ -431,9 +464,9 @@ class Autotuner: metric_val = 0 required_gpu_mem = self.get_instantiation_memory_required_per_gpu( - ZERO_OPTIMIZATION_DISABLED) + self.activation_mem + ZeroStageEnum.disabled) + self.activation_mem if self.gpu_mem > required_gpu_mem: - if "all" in user_zero_stages or ZERO_OPTIMIZATION_DISABLED in user_zero_stages: + if "all" in user_zero_stages or ZeroStageEnum.disabled in user_zero_stages: logger.info( f"The model might be runable with ZERO 0 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1), adding DEFAULT_TUNING_SPACE_ZERO_0 to the global tuning space" ) @@ -443,15 +476,17 @@ class Autotuner: mbs = next_mbs max_mbs = next_max_mbs metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z0{self.metric()}", next_metric_val) else: logger.info( - f"The model is not runable with ZERO stage {ZERO_OPTIMIZATION_DISABLED} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + f"The model is not runable with ZERO stage {ZeroStageEnum.disabled} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" ) required_gpu_mem = self.get_instantiation_memory_required_per_gpu( - ZERO_OPTIMIZATION_OPTIMIZER_STATES) + self.activation_mem + ZeroStageEnum.optimizer_states) + self.activation_mem if self.gpu_mem > required_gpu_mem: - if "all" in user_zero_stages or ZERO_OPTIMIZATION_OPTIMIZER_STATES in user_zero_stages: + if "all" in user_zero_stages or ZeroStageEnum.optimizer_states in user_zero_stages: logger.info( f"The model might be runable with ZERO 1 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_1 to the global tuning space" ) @@ -461,15 +496,17 @@ class Autotuner: mbs = next_mbs max_mbs = next_max_mbs metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z1{self.metric()}", next_metric_val) else: logger.info( - f"The model is not runable with ZERO stage {ZERO_OPTIMIZATION_OPTIMIZER_STATES} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + f"The model is not runable with ZERO stage {ZeroStageEnum.optimizer_states} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" ) required_gpu_mem = self.get_instantiation_memory_required_per_gpu( - ZERO_OPTIMIZATION_GRADIENTS) + self.activation_mem + ZeroStageEnum.gradients) + self.activation_mem if self.gpu_mem > required_gpu_mem: - if "all" in user_zero_stages or ZERO_OPTIMIZATION_GRADIENTS in user_zero_stages: + if "all" in user_zero_stages or ZeroStageEnum.gradients in user_zero_stages: logger.info( f"The model might be runable with ZERO 2 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_2 to the global tuning space" ) @@ -479,25 +516,31 @@ class Autotuner: mbs = next_mbs max_mbs = next_max_mbs metric_val = next_metric_val + if has_mlflow: + mlflow.log_metric(f"z2{self.metric()}", next_metric_val) else: logger.info( - f"The model is not runable with ZERO stage {ZERO_OPTIMIZATION_GRADIENTS} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" + f"The model is not runable with ZERO stage {ZeroStageEnum.gradients} (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory with mbs = 1)" ) required_gpu_mem = self.get_instantiation_memory_required_per_gpu( - ZERO_OPTIMIZATION_WEIGHTS) + self.activation_mem + ZeroStageEnum.weights) + self.activation_mem if self.gpu_mem > required_gpu_mem: - if "all" in user_zero_stages or ZERO_OPTIMIZATION_WEIGHTS in user_zero_stages: + if "all" in user_zero_stages or ZeroStageEnum.weights in user_zero_stages: logger.info( f"The model might be runable with ZERO 3 (which requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory), adding DEFAULT_TUNING_SPACE_ZERO_3 to the global tuning space" ) - _, _, _ = self.tune_space( + _, _, next_metric_val = self.tune_space( DEFAULT_TUNING_SPACE_ZERO_3, prev_max_mbs = max_mbs, prev_best_mbs=mbs, prev_best_metric_val=metric_val) + if has_mlflow: + mlflow.log_metric(f"z3{self.metric()}", next_metric_val) else: logger.info( - f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZERO_OPTIMIZATION_WEIGHTS} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed." + f"The model has {self.get_model_num_params()} parameters and requires at least {memory_to_string(required_gpu_mem, postfix='B')} memory per GPU with DeepSpeed Zero stage {ZeroStageEnum.weights} optimization. Memory per GPU in system is {memory_to_string(self.gpu_mem)}. No tuning is performed." ) return + if has_mlflow: + mlflow.end_run() def tune_space(self, tuning_space, @@ -505,7 +548,7 @@ class Autotuner: prev_best_mbs=0, prev_best_metric_val=0): config_zero = tuning_space.get(ZERO_OPTIMIZATION, {}) - stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, ZERO_OPTIMIZATION_STAGE_DEFAULT) + stage = config_zero.get(ZERO_OPTIMIZATION_STAGE, None) tuning_space_name = TUNING_MICRO_BATCH_SIZE_PREFIX + str(stage) tuning_micro_batch_sizes = [] max_train_batch_size_per_gpu = 0 @@ -785,11 +828,12 @@ class Autotuner: self.rm.schedule_experiments(exp_paths) self.rm.run() + for exp_id, (exp, err) in self.rm.finished_experiments.items(): if exp: metric_file = exp[DS_CONFIG][AUTOTUNING][AUTOTUNING_METRIC_PATH] - if os.path.exists(metric_file): + with open(metric_file, 'r') as f: results = hjson.load(f) metric_val = results[self.metric()] @@ -797,11 +841,19 @@ class Autotuner: if max_micro_batch_size == exp[DS_CONFIG][ TRAIN_MICRO_BATCH_SIZE_PER_GPU]: max_micro_batch_size_metric_val = metric_val + if has_mlflow: + os.environ.pop('MLFLOW_RUN_ID') + mlflow.start_run(nested=True, run_name=exp['name']) + for metric in results: + mlflow.log_metric(metric, results[metric]) + mlflow.end_run() + os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id else: self.update_records(tuning_space_name, exp, 0, 1) else: mbs = exp[DS_CONFIG][TRAIN_MICRO_BATCH_SIZE_PER_GPU] logger.info(f"micro batch size = {mbs} was not run successfully") + self.rm.clear() if tuning_micro_batch_sizes_overwritten: @@ -831,7 +883,18 @@ class Autotuner: self.exp_num_gpus * self.exp_num_nodes // self.mp_size() exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mbs) exp, metric_val = self.run_ds_config(ds_config, exp_name) + if metric_val: + with open(metric_file, 'r') as f: + results = hjson.load(f) + metric_val = results[self.metric()] + if has_mlflow: + os.environ.pop('MLFLOW_RUN_ID') + mlflow.start_run(nested=True, run_name=exp_name) + for metric in results: + mlflow.log_metric(metric, results[metric]) + mlflow.end_run() + os.environ['MLFLOW_RUN_ID'] = self.mlflow_parent_id self.update_records(tuning_space_name, exp, metric_val, 1) if metric_val > prev_best_metric_val * (1 + METRIC_PERCENT_DIFF_CONST): prev_best_metric_val = metric_val @@ -843,7 +906,6 @@ class Autotuner: break if prev_best_mbs != max_micro_batch_size: tuning_micro_batch_sizes[-1] = prev_best_mbs - return tuning_micro_batch_sizes def get_min_max_micro_batch_size(self, @@ -961,11 +1023,10 @@ class Autotuner: low = min_micro_batch_size high = max_micro_batch_size - while low < high: + # binary search until low is the smallest micro batch size that OOMs. + while low <= high: mid = int((low + high) // 2) logger.debug(f"trying mbs = {mid}, low = {low}, high = {high}") - if mid == low: - break if mid not in used_micro_batch_sizes: ds_config[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = mid ds_config[TRAIN_BATCH_SIZE] = mid * gas * \ @@ -973,7 +1034,7 @@ class Autotuner: exp_name = tuning_space_name + "_gas" + str(gas) + "_tmbspg" + str(mid) exp, metric_val = self.run_ds_config(ds_config, exp_name) if metric_val: - low = mid + low = mid + 1 self.update_records(tuning_space_name, exp, metric_val, 1) used_micro_batch_sizes.append(mid) if prev_metric_val and ((metric_val - prev_metric_val) / @@ -985,8 +1046,8 @@ class Autotuner: self.update_records(tuning_space_name, exp, 0, 1) high = mid - 1 else: - low = mid - max_micro_batch_size = low + low = mid + 1 + max_micro_batch_size = low - 1 logger.info( f"min_micro_batch_size = {min_micro_batch_size}, max_micro_batch_size = {max_micro_batch_size}." @@ -1084,26 +1145,18 @@ class Autotuner: json.dump(exp_config, fd) fd.flush() os.fsync(fd) - self.rm.schedule_experiments([exp_path]) self.rm.run() exp, metric_val = self.rm.parse_results(self.metric()) self.rm.clear() return exp, metric_val - def run_after_tuning(self): - """ Launches the training with the optmimal DeepSpeed configuration found through the autotuning process. - "ds_config_optimal.json" describing the optmimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir. - """ + def write_optimal_config(self): best_space_records = self.get_best_space_records() if GLOBAL_TUNING_SPACE not in best_space_records: return best_exp, best_metric_val, _ = best_space_records[GLOBAL_TUNING_SPACE] if best_exp: - logger.info( - "Start training with the optmimal DeepSpeed configuration found through the tuning process" - ) - exp_dir = best_exp["result_dir"] cmd = None with open(os.path.join(exp_dir, "cmd.txt"), "r") as f: @@ -1115,18 +1168,27 @@ class Autotuner: ds_config_path = os.path.join(self.results_dir, "ds_config_optimal.json") json.dump(ds_config, open(ds_config_path, "w")) - idx = cmd.index(os.path.join(exp_dir, "ds_config.json")) - cmd[idx] = ds_config_path - cmd_path = os.path.join(self.results_dir, "cmd_optimal.txt") with open(cmd_path, "w") as fd: fd.write(" ".join(cmd)) fd.write("\n") fd.flush() + self.optimal_cmd = cmd + self.optmal_ds_config = ds_config + logger.info( + f"Wrote the optimal DeepSpeed configuration found by autotuning to {ds_config_path}, and the corresponding DeepSpeed command to {cmd_path}" + ) - result = subprocess.Popen(cmd) + def run_after_tuning(self): + """ Launches the training with the optimal DeepSpeed configuration found through the autotuning process. + "ds_config_optimal.json" describing the optmimal DeepSpeed configuration as well the command used to launch training "cmd_optimal.txt" are saved to self.results_dir. + """ + if self.optimal_cmd: + result = subprocess.Popen(self.optimal_cmd) result.wait() logger.info( - f"Done running with the optimal DeepSpeed configuration found by autotuning: {ds_config_path}" + f"Done running with the optimal DeepSpeed configuration using {self.optimal_cmd}" ) + else: + logger.info(f"No optimal DeepSpeed configuration found by autotuning.") diff --git a/deepspeed/autotuning/config.py b/deepspeed/autotuning/config.py index dea36f0..6f6b690 100644 --- a/deepspeed/autotuning/config.py +++ b/deepspeed/autotuning/config.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -41,11 +42,11 @@ class DeepSpeedAutotuningConfig(DeepSpeedConfigObject): self.results_dir = get_scalar_param(autotuning_dict, AUTOTUNING_RESULTS_DIR, AUTOTUNING_RESULTS_DIR_DEFAULT) - + assert self.results_dir, "results_dir cannot be empty" self.exps_dir = get_scalar_param(autotuning_dict, AUTOTUNING_EXPS_DIR, AUTOTUNING_EXPS_DIR_DEFAULT) - + assert self.exps_dir, "exps_dir cannot be empty" self.overwrite = get_scalar_param(autotuning_dict, AUTOTUNING_OVERWRITE, AUTOTUNING_OVERWRITE_DEFAULT) diff --git a/deepspeed/autotuning/constants.py b/deepspeed/autotuning/constants.py index 3bfcd27..d0306bb 100644 --- a/deepspeed/autotuning/constants.py +++ b/deepspeed/autotuning/constants.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -22,9 +23,6 @@ DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__f "config_templates", "template_zero3.json") -DEFAULT_EXPRS_DIR = os.path.join(os.getcwd(), "autotuning_exps") -DEFAULT_RESULTS_DIR = os.path.join(os.getcwd(), "autotuning_results") - METRIC_PERCENT_DIFF_CONST = 0.05 DS_CONFIG = "ds_config" BUFSIZE = 1 # line buffer size for writing files @@ -54,10 +52,10 @@ AUTOTUNING_FAST = "fast" AUTOTUNING_FAST_DEFAULT = True AUTOTUNING_RESULTS_DIR = "results_dir" -AUTOTUNING_RESULTS_DIR_DEFAULT = None +AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results" AUTOTUNING_EXPS_DIR = "exps_dir" -AUTOTUNING_EXPS_DIR_DEFAULT = None +AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps" AUTOTUNING_OVERWRITE = "overwrite" AUTOTUNING_OVERWRITE_DEFAULT = True diff --git a/deepspeed/autotuning/scheduler.py b/deepspeed/autotuning/scheduler.py old mode 100644 new mode 100755 index e4090b3..2a4c0c7 --- a/deepspeed/autotuning/scheduler.py +++ b/deepspeed/autotuning/scheduler.py @@ -1,23 +1,20 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import copy -from re import I from numpy import BUFSIZE -from deepspeed.env_report import SUCCESS -from enum import Flag import json -import os import subprocess import sys import threading import time -from pathlib import Path -from typing import List +import base64 +import os import hjson from tqdm import tqdm from ..utils import logger -from .constants import * from .constants import AUTOTUNING, AUTOTUNING_METRIC_PATH from .utils import get_val_by_key, search_error, was_interruptted """ @@ -25,9 +22,7 @@ thread-0: loop over experiment queue dispatching experiments if they become avai thread-N: start each experiment in its own thread """ -import torch.distributed as dist - -from datetime import datetime +from deepspeed import comm as dist TIMEOUT = 5 @@ -188,7 +183,6 @@ class ResourceManager: logger.debug(f'Put exp_id = {exp["exp_id"]} back into the queue') self.experiment_check(pbar) else: - desc = "" for reservation in reservations: reservation.slots.sort() @@ -344,19 +338,27 @@ def run_experiment(exp: dict, reservations, user_script, user_args): exp["job_id"] = get_job_id() exp_dir = exp["result_dir"] os.makedirs(exp_dir, exist_ok=True) - - exp["ds_config_path"] = os.path.join(exp_dir, "ds_config.json") + ds_config_path = os.path.join(exp_dir, "ds_config.json") + exp["ds_config_path"] = ds_config_path ds_config = copy.deepcopy(exp["ds_config"]) + ds_config_json = json.dumps(ds_config).encode('utf-8') + + exp["ds_config_base64"] = base64.urlsafe_b64encode(ds_config_json).decode('utf-8') with open(exp["ds_config_path"], "w", buffering=BUFSIZE) as fd: json.dump(ds_config, fd) fd.flush() os.fsync(fd) + path = exp["ds_config_path"] + logger.info(f"Scheduler wrote ds_config to {path}, {os.path.abspath(path)}") + with open(os.path.join(exp_dir, "exp.json"), "w", buffering=BUFSIZE) as fd: json.dump(exp, fd) fd.flush() os.fsync(fd) + path = os.path.join(exp_dir, "exp.json") + logger.info(f"Scheduler wrote exp to {path}, {os.path.abspath(path)}") # remove "--deepspeed_config ds_config.json" from user_args if user_args: @@ -365,9 +367,10 @@ def run_experiment(exp: dict, reservations, user_script, user_args): # "--deepspeed_config" is omitted in HF elif "--deepspeed" in user_args: idx = user_args.index("--deepspeed") - assert idx < len(user_args) and ".json" in user_args[idx + - 1], "there is no ds_config file specified after --deepspeed_config or --deepspeed" - user_args[idx + 1] = exp["ds_config_path"] + assert idx < len(user_args), "there is no ds_config file specified after --deepspeed_config or --deepspeed" + # user_args[idx + 1] = exp["ds_config_path"] + # pass base64 serialized ds_config to launcher + user_args[idx + 1] = exp["ds_config_base64"] exp["user_script"] = user_script exp["user_args"] = user_args @@ -382,7 +385,9 @@ def run_experiment(exp: dict, reservations, user_script, user_args): fd.flush() os.fsync(fd) - logger.info(f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}") + logger.info( + f"Launching exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}, and ds_config = {os.path.abspath(ds_config_path)}" + ) with open(os.path.join(exp_dir, "stdout.log"), "wb") as out, open( os.path.join(exp_dir, "stderr.log"), "wb" @@ -396,7 +401,9 @@ def run_experiment(exp: dict, reservations, user_script, user_args): clean_up(exp, reservations) - logger.info(f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}") + logger.info( + f"Done running exp_id = {exp['exp_id']}, exp_name = {exp['name']}, with resource = {include_str}" + ) PDSH_MAX_FAN_OUT = 1024 diff --git a/deepspeed/autotuning/tuner/__init__.py b/deepspeed/autotuning/tuner/__init__.py old mode 100644 new mode 100755 index 7ce9fe4..9f2e567 --- a/deepspeed/autotuning/tuner/__init__.py +++ b/deepspeed/autotuning/tuner/__init__.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .index_based_tuner import RandomTuner, GridSearchTuner # from .ga_tuner import GATuner from .model_based_tuner import ModelBasedTuner diff --git a/deepspeed/autotuning/tuner/base_tuner.py b/deepspeed/autotuning/tuner/base_tuner.py old mode 100644 new mode 100755 index fbdb16d..89eb2d3 --- a/deepspeed/autotuning/tuner/base_tuner.py +++ b/deepspeed/autotuning/tuner/base_tuner.py @@ -1,12 +1,11 @@ -import atexit +'''Copyright The Microsoft DeepSpeed Team''' + import sys from deepspeed.autotuning.constants import * from deepspeed.autotuning.utils import write_experiments from deepspeed.utils import logger -import json - class BaseTuner: def __init__(self, exps, resource_manager, metric): diff --git a/deepspeed/autotuning/tuner/cost_model.py b/deepspeed/autotuning/tuner/cost_model.py old mode 100644 new mode 100755 index c311659..858ab6d --- a/deepspeed/autotuning/tuner/cost_model.py +++ b/deepspeed/autotuning/tuner/cost_model.py @@ -1,4 +1,4 @@ -import numpy as np +'''Copyright The Microsoft DeepSpeed Team''' from .utils import * diff --git a/deepspeed/autotuning/tuner/index_based_tuner.py b/deepspeed/autotuning/tuner/index_based_tuner.py old mode 100644 new mode 100755 index f7233f2..f196948 --- a/deepspeed/autotuning/tuner/index_based_tuner.py +++ b/deepspeed/autotuning/tuner/index_based_tuner.py @@ -1,6 +1,6 @@ -import random +'''Copyright The Microsoft DeepSpeed Team''' -from deepspeed.utils import logger +import random from .base_tuner import BaseTuner diff --git a/deepspeed/autotuning/tuner/model_based_tuner.py b/deepspeed/autotuning/tuner/model_based_tuner.py old mode 100644 new mode 100755 index d8bc2b4..ec47500 --- a/deepspeed/autotuning/tuner/model_based_tuner.py +++ b/deepspeed/autotuning/tuner/model_based_tuner.py @@ -1,8 +1,8 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import hjson -import numpy as np -from deepspeed.utils import logger -from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH, AUTOTUNING_METRIC_DEFAULT +from ..constants import AUTOTUNING, AUTOTUNING_METRIC_PATH from .base_tuner import BaseTuner from .cost_model import XGBoostCostModel from .utils import * diff --git a/deepspeed/autotuning/tuner/utils.py b/deepspeed/autotuning/tuner/utils.py old mode 100644 new mode 100755 index f87c756..f6719f0 --- a/deepspeed/autotuning/tuner/utils.py +++ b/deepspeed/autotuning/tuner/utils.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import numpy as np import itertools from ..utils import * diff --git a/deepspeed/autotuning/utils.py b/deepspeed/autotuning/utils.py index 5cfa92c..40da7ec 100644 --- a/deepspeed/autotuning/utils.py +++ b/deepspeed/autotuning/utils.py @@ -1,10 +1,10 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import re import collections.abc import os import json from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, TRAIN_MICRO_BATCH_SIZE_PER_GPU -import hjson -import sys import itertools import copy @@ -35,23 +35,11 @@ def was_interruptted(filename): return False -def was_interruptted(filename): - if not os.path.exists(filename): - return "stderr.log does not exist" - with open(filename) as f: - for line in f: - s = "KeyboardInterrupt" - idx = line.find(s) - if idx != -1: - return True - return False - - def find_replace_str(value, replace_dict): if not isinstance(value, str): return str(value) - matches = re.findall("\$[A-Za-z0-9_]+", value) + matches = re.findall(r"\$[A-Za-z0-9_]+", value) for var in matches: var_key = var.replace("$", "").lower() if var_key == "nvme_path": diff --git a/deepspeed/checkpoint/__init__.py b/deepspeed/checkpoint/__init__.py index e69de29..2ef0817 100644 --- a/deepspeed/checkpoint/__init__.py +++ b/deepspeed/checkpoint/__init__.py @@ -0,0 +1,19 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .reshape_meg_2d import reshape_meg_2d_parallel + +from .deepspeed_checkpoint import DeepSpeedCheckpoint + +from .utils import (get_layer_ckpt_name_for_rank, + get_model_ckpt_name_for_rank, + get_zero_ckpt_name_for_rank) + +from .reshape_utils import (merge_state) + +from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor) + +from .zero_checkpoint import ZeROCheckpoint + +from .universal_checkpoint import enable_universal_checkpoint + +from .constants import * diff --git a/deepspeed/checkpoint/constants.py b/deepspeed/checkpoint/constants.py index f457770..7873984 100644 --- a/deepspeed/checkpoint/constants.py +++ b/deepspeed/checkpoint/constants.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' ''' Various symbolic constants used for model checkpointing ''' @@ -11,15 +12,54 @@ FP32_FLAT_GROUPS = 'fp32_flat_groups' BASE_OPTIMIZER_STATE = 'base_optimizer_state' SINGLE_PARTITION_OF_FP32_GROUPS = "single_partition_of_fp32_groups" -GROUPS_PADDING = 'groups_padding' - +GROUP_PADDINGS = 'group_paddings' PARTITION_COUNT = 'partition_count' ZERO_STAGE = 'zero_stage' CLIP_GRAD = 'clip_grad' +FP32_WEIGHT_KEY = "fp32" ######################################### # Module checkpoint keys ######################################### +PARAM = 'param' PARAM_SHAPES = 'param_shapes' BUFFER_NAMES = 'buffer_names' + +######################################### +# Checkpoint naming constants +######################################### +MODEL_FILE_PREFIX = 'mp_rank_' +ZERO_FILE_PREFIX = 'zero_pp_rank_' +OPTIM_FILE_SUFFIX = '_optim_states.pt' +MODEL_FILE_SUFFIX = '_model_states.pt' +LAYER_FILE_PREFIX = 'layer_' +BF16_ZERO_FILE_PREFIX = 'bf16_' + ZERO_FILE_PREFIX +FP16_ZERO_FILE_PREFIX = 'fp16_' + ZERO_FILE_PREFIX + +######################################### +# Checkpoint utility keys +######################################### DS_VERSION = 'ds_version' + +######################################### +# Universal Checkpoint keys +######################################### +UNIVERSAL_CHECKPOINT_INFO = 'universal_checkpoint_info' +UNIVERSAL_CHECKPOINT_VERSION_KEY = 'universal_checkpoint_version' +# Reserve version 0.1 for the hardcoded logic used in BLOOM-176B training +UNIVERSAL_CHECKPOINT_VERSION_VALUE = 0.2 + +# Vocabulary padding +VOCAB_DIVISIBILITY_PADDING_TENSOR = 'vocab_divisibility_padding_tensor' +PADDED_VOCAB_SIZE = 'padded_vocab_size' +ORIGINAL_VOCAB_SIZE = 'original_vocab_size' + +# Parameter splitting/merging +PARAM_SLICE_MAPPINGS = 'param_slice_mappings' +CAT_DIM = "cat_dim" + +# Regex list of parameters that require special handling +VOCABULARY_PARAMETER_PATTERNS = 'vocabulary_parameter_patterns' +PIPELINE_REPLICATED_PARAMETER_PATTERNS = 'pipeline_replicated_parameter_patterns' +PARAMETER_TO_AVERAGE_PATTERNS = 'parameter_to_average_patterns' +PARAMETER_WITH_ROW_PARALLELISM_PATTERNS = 'parameter_with_row_parallelism_patterns' diff --git a/deepspeed/checkpoint/deepspeed_checkpoint.py b/deepspeed/checkpoint/deepspeed_checkpoint.py new file mode 100644 index 0000000..c1a31b0 --- /dev/null +++ b/deepspeed/checkpoint/deepspeed_checkpoint.py @@ -0,0 +1,317 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +from typing import Dict +import torch + +from .reshape_3d_utils import model_3d_desc +from .reshape_utils import (basic_folder_validation, + merge_state, + partition_data, + get_files, + get_files_with_prefix) + +from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX) + +from .reshape_meg_2d import reshape_meg_2d_parallel, meg_2d_parallel_map +from .zero_checkpoint import ZeROCheckpoint +from .constants import * + +EMBEDDING_LAYER_INDEX = 0 +FINAL_LAYER_NORM_INDEX = -1 +ARGS_KEY = 'args' +CHECKPOINT_INFO_KEY = 'checkpoint_info' +ITERATION_KEY = 'iteration' + +SEQUENTIAL_LAYERS = [ + 'input_layernorm.weight', + 'input_layernorm.bias', + 'self_attention.dense.bias', + 'post_attention_layernorm.weight', + 'post_attention_layernorm.bias', + 'mlp.dense_4h_to_h.bias', + 'position_embeddings.weight' +] + +LAYER_CONCAT_DIM = {'self_attention.dense.weight': 1, 'mlp.dense_4h_to_h.weight': 1} + + +class DeepSpeedCheckpoint(object): + def __init__(self, dir, tp_degree=None, pp_degree=None, dp_degree=None): + self.dir = dir + self._validate_folder(dir) + + self.zero_checkpoint = ZeROCheckpoint(dir) + + self.file_list = get_files(dir) + self.layer_files = get_files_with_prefix(self.file_list, LAYER_FILE_PREFIX) + self.mp_rank_files = get_files_with_prefix(self.file_list, MODEL_FILE_PREFIX) + + self.layer_keys = self._get_layer_keys() + self.layer_count = len(self.layer_keys) + + self.tp_degree = self.zero_checkpoint.get_src_tp_degree( + ) if tp_degree is None else tp_degree + self.pp_degree = self.zero_checkpoint.get_src_pp_degree( + ) if pp_degree is None else pp_degree + self.dp_degree = self.zero_checkpoint.get_src_dp_degree( + ) if dp_degree is None else dp_degree + + self.original_world_size = self.zero_checkpoint.get_src_tp_degree( + ) * self.zero_checkpoint.get_src_pp_degree( + ) * self.zero_checkpoint.get_src_dp_degree() + self.world_size = self.tp_degree * self.pp_degree * self.dp_degree + + self.old_2d_map = meg_2d_parallel_map(self.zero_checkpoint.get_src_pp_degree(), + self.zero_checkpoint.get_src_tp_degree()) + self.old_2d_map.simple_init() + self.new_2d_map = reshape_meg_2d_parallel( + old_pp_degree=self.zero_checkpoint.get_src_pp_degree(), + old_tp_degree=self.zero_checkpoint.get_src_tp_degree(), + new_pp_degree=self.pp_degree, + new_tp_degree=self.tp_degree) + + if self.is_change_pp_degree() or self.is_change_tp_degree( + ) or self.is_change_dp_degree(): + self.zero_checkpoint.reshape( + model_3d_desc(self.pp_degree, + self.tp_degree, + self.dp_degree)) + + self.global_state = {} + + self._sanity_check() + self.pp_to_transformer_map = self._build_pp_transformer_map() + self.transformer_file_map = self._build_transformer_file_map() + self.tp_to_embedding_map = self._build_tp_other_layer_map(EMBEDDING_LAYER_INDEX) + self.tp_to_final_norm_map = self._build_tp_other_layer_map( + FINAL_LAYER_NORM_INDEX) + self._build_global_state() + + def is_change_tp_degree(self): + return self.tp_degree != self.zero_checkpoint.get_src_tp_degree() + + def is_change_pp_degree(self): + return self.pp_degree != self.zero_checkpoint.get_src_pp_degree() + + def is_change_dp_degree(self): + return self.dp_degree != self.zero_checkpoint.get_src_dp_degree() + + def show_2d_mapping(self): + print(f'reshaped 2d map ---- begin') + + for i in range(self.pp_degree): + for j in range(self.tp_degree): + file_list = self.get_2d_parallel_files(pp_index=i, tp_index=j) + print(f'[{i}, {j}] = {file_list}') + + print(f'reshaped 2d map ---- end') + + def show_tp_embedding_map(self): + self._dump_mapping(self.tp_to_embedding_map, 'tp_to_embedding_layers') + + def show_tp_final_norm_map(self): + self._dump_mapping(self.tp_to_final_norm_map, 'tp_to_final_norm_layers') + + def show_pp_tranformer_map(self): + self._dump_mapping(self.pp_to_transformer_map, 'pp_to_tranformer_layers') + + def show_transformer_file_map(self): + self._dump_mapping(self.transformer_file_map, 'rank_to_tranformer_files') + + def _build_global_state(self): + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + self.global_state[ARGS_KEY] = sd.get(ARGS_KEY, None) + + def get_zero_checkpoint_state(self, pp_index, tp_index, dp_index) -> dict: + return self.zero_checkpoint.get_state_for_rank(pp_index=pp_index, + tp_index=tp_index, + dp_index=dp_index, + keys_to_ignore=[PARAM_SHAPES]) + + def get_zero_files(self, pp_index, tp_index, dp_index) -> list: + return self.zero_checkpoint.get_files_for_rank(pp_index=pp_index, + tp_index=tp_index, + dp_index=dp_index) + + def get_embedding_layer_id(self): + return self.layer_keys[EMBEDDING_LAYER_INDEX] + + def get_final_norm_layer_id(self): + return self.layer_keys[FINAL_LAYER_NORM_INDEX] + + def get_iteration(self): + if not ITERATION_KEY in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[ITERATION_KEY] = sd.get(ITERATION_KEY, 0) + + return self.global_state[ITERATION_KEY] + + def get_embedding_state(self, tp_index: int) -> Dict: + assert tp_index in self.tp_to_embedding_map.keys() + sd_list = [ + torch.load(fname, + map_location=torch.device('cpu')) + for fname in self.tp_to_embedding_map[tp_index] + ] + sd = self._merge_state_dicts(sd_list) + return sd + + def get_embedding_files(self, tp_index: int) -> list: + assert tp_index in self.tp_to_embedding_map.keys() + return self.tp_to_embedding_map[tp_index] + + def _get_checkpoint_value(self, key): + if not key in self.global_state: + sd = torch.load(self.mp_rank_files[0], map_location=torch.device('cpu')) + self.global_state[key] = sd.get(key, None) + + return self.global_state[key] + + def get_args(self): + return self._get_checkpoint_value(ARGS_KEY) + + def get_checkpoint_info(self, info_key=CHECKPOINT_INFO_KEY): + return self._get_checkpoint_value(info_key) + + def get_2d_parallel_state(self, tp_index: int, pp_index: int) -> dict: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + fname_list = self.get_2d_parallel_files(tp_index=tp_index, pp_index=pp_index) + sd_list = [ + torch.load(fname, + map_location=torch.device('cpu')) for fname in fname_list + ] + + merged_sd = None + for sd in sd_list: + if merged_sd is None: + merged_sd = sd + else: + merged_sd = merge_state(merged_sd, sd) + + return merged_sd + + def get_transformer_state(self, tp_index: int, pp_index: int) -> list: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + t_list = [] + for fname_list in self.transformer_file_map[(tp_index, pp_index)]: + sd_list = [ + torch.load(fname, + map_location=torch.device('cpu')) for fname in fname_list + ] + sd = self._merge_state_dicts(sd_list) + t_list.append(sd) + return t_list + + def get_pp_transformer_map(self, pp_index: int) -> list: + assert pp_index < self.pp_degree + return self.pp_to_transformer_map[pp_index] + + def get_final_norm_state(self, tp_index: int) -> Dict: + assert tp_index in self.tp_to_final_norm_map.keys() + sd = torch.load(self.tp_to_final_norm_map[tp_index][0], + map_location=torch.device('cpu')) + return sd + + def get_final_norm_files(self, tp_index: int) -> list: + assert tp_index in self.tp_to_final_norm_map.keys() + return self.tp_to_final_norm_map[tp_index] + + def _build_tp_other_layer_map(self, layer_index: int): + assert layer_index < len(self.layer_files) + layer_files = get_files_with_prefix(self.layer_files, + self.layer_keys[layer_index]) + layer_file_partitions = partition_data(layer_files, self.tp_degree) + data_map = {i: flist for i, flist in enumerate(layer_file_partitions)} + return data_map + + def get_2d_parallel_files(self, tp_index: int, pp_index: int) -> list: + assert tp_index < self.tp_degree + assert pp_index < self.pp_degree + file_indices = self.new_2d_map.get_data(pp_index=pp_index, tp_index=tp_index) + return [self.mp_rank_files[i] for i in file_indices] + + def _build_pp_transformer_map(self): + data_map = {} + transformer_layers = self.layer_keys[1:-1] + layers_per_pp = len(transformer_layers) // self.pp_degree + data_map = { + i: transformer_layers[i * layers_per_pp:(i + 1) * layers_per_pp] + for i in range(0, + self.pp_degree) + } + return data_map + + def _dump_mapping(self, data_map, map_tag=None): + if map_tag is not None: + print(f'Dump mapping: {map_tag}') + for k, v in data_map.items(): + print(f'{k} = {v}') + + def _build_transformer_file_map(self): + transformer_layer_keys = self.layer_keys[1:-1] + file_map = {} + # XXX: this is not guaranteed + layers_per_pp = len(transformer_layer_keys) // self.pp_degree + if layers_per_pp == 0: + layers_per_pp = 1 + #print(f"{transformer_layer_keys} {layers_per_pp}") + for key_index, layer_key in enumerate(transformer_layer_keys): + pp_index = key_index // layers_per_pp + layer_files = get_files_with_prefix(self.layer_files, layer_key) + layer_file_partitions = partition_data(layer_files, self.tp_degree) + for tp_index in range(self.tp_degree): + map_key = (tp_index, pp_index) + if not map_key in file_map.keys(): + file_map[map_key] = [] + file_map[map_key].append(layer_file_partitions[tp_index]) + + return file_map + + def _sanity_check(self): + assert len(self.mp_rank_files) % self.tp_degree == 0 + assert len(self.layer_keys) > 2 + assert self.zero_checkpoint.num_files % (self.pp_degree * self.tp_degree) == 0 + # XXX: fix me - isn't always the case + # only true with --pp-partition-method 'type:transformer|embedding' \ + # assert (len(self.layer_keys) - 2) % self.pp_degree == 0 + + def validate_files(self): + for file in self.file_list: + if not os.path.isfile(file): + print(f'Error: {file} is not existent') + + def _get_layer_keys(self): + key_set = set() + key_len = len(LAYER_FILE_PREFIX) + 2 + for file_path in self.layer_files: + _, fname = os.path.split(file_path) + key_set.add(fname[:key_len]) + return sorted(list(key_set)) + + def _merge_state_dicts(self, sd_list): + merged_sd = {} + for key in sd_list[0].keys(): + if not key in SEQUENTIAL_LAYERS: + cat_dim = LAYER_CONCAT_DIM.get(key, 0) + merged_sd[key] = torch.cat([sd[key] for sd in sd_list], dim=cat_dim) + else: + merged_sd[key] = sd_list[0][key] + + return merged_sd + + def _validate_folder(self, dir): + basic_folder_validation(dir) + + file_list = get_files(dir) + + for file_prefix in [ + MODEL_FILE_PREFIX, + LAYER_FILE_PREFIX, + f'{LAYER_FILE_PREFIX}01' + ]: + ckpt_files = get_files_with_prefix(file_list, file_prefix) + assert len(ckpt_files) > 0, f'{dir} seems a bogus DeepSpeed checkpoint folder: Cannot find {file_prefix}* files in there.' diff --git a/deepspeed/checkpoint/reshape_3d_utils.py b/deepspeed/checkpoint/reshape_3d_utils.py new file mode 100644 index 0000000..15faffb --- /dev/null +++ b/deepspeed/checkpoint/reshape_3d_utils.py @@ -0,0 +1,120 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .reshape_utils import (get_files, + get_files_with_prefix, + partition_data, + get_zero_files) + +from .constants import (MODEL_FILE_PREFIX, LAYER_FILE_PREFIX) + +from .reshape_meg_2d import (reshape_meg_2d_parallel, meg_2d_parallel_map) + +PP_DIM = 'PP' +TP_DIM = 'TP' +DP_DIM = 'DP' + + +class model_3d_desc(object): + def __init__(self, pp_degree=1, tp_degree=1, dp_degree=1): + self.pp_degree = pp_degree + self.tp_degree = tp_degree + self.dp_degree = dp_degree + + def reshape(self, target_3d_desc, verbose=False): + valid_reshape, reshape_errors = self.can_reshape(target_3d_desc) + assert valid_reshape, ','.join(reshape_errors) + tgt_2d_map = reshape_meg_2d_parallel(old_pp_degree=self.pp_degree, + old_tp_degree=self.tp_degree, + new_pp_degree=target_3d_desc.pp_degree, + new_tp_degree=target_3d_desc.tp_degree, + verbose=verbose) + + flat_3d_map = flatten_dp_dimension(meg_2d_map=tgt_2d_map, + src_2d_size=self.pp_degree * self.tp_degree, + dp_degree=self.dp_degree) + + return unflatten_dp_dimension(meg_2d_map=flat_3d_map, + dp_degree=target_3d_desc.dp_degree) + + def get_desc(self): + return f'{PP_DIM},{TP_DIM},{DP_DIM} = ({self.pp_degree}, {self.tp_degree}, {self.dp_degree})' + + def world_size(self): + return self.pp_degree * self.tp_degree * self.dp_degree + + def is_valid(self, pp_index, tp_index, dp_index): + err_msg = [] + valid = True + for index, degree, dim_name in [ + (pp_index, self.pp_degree, PP_DIM), + (tp_index, self.tp_degree, TP_DIM), + (dp_index, self.dp_degree, DP_DIM)]: + if index >= degree: + valid = False + err_msg.append( + f'{dim_name} indexing error: index {index} >= degree {degree}') + + return valid, err_msg + + def can_reshape(self, target_3d_desc): + err_msg = [] + if target_3d_desc.pp_degree > self.pp_degree: + err_msg.append( + f'Expansion reshape not supported - {PP_DIM}: {self.pp_degree} ---> {target_3d_desc.pp_degree}' + ) + + if target_3d_desc.tp_degree > self.tp_degree: + err_msg.append( + f'Expansion reshape not supported - {TP_DIM}: {self.tp_degree} ---> {target_3d_desc.tp_degree}' + ) + + if target_3d_desc.dp_degree > self.dp_degree: + err_msg.append( + f'Expansion reshape not supported - {DP_DIM}: {self.dp_degree} ---> {target_3d_desc.dp_degree}' + ) + + return len(err_msg) == 0, err_msg + + +def get_model_3d_descriptor(dir): + file_list = get_files(dir) + zero_file_list = get_zero_files(dir) + num_pp0_files = len(get_files_with_prefix(file_list, f'{LAYER_FILE_PREFIX}01')) + if num_pp0_files > 0: + tp_degree = num_pp0_files + pp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) // tp_degree + dp_degree = max(1, len(zero_file_list) // (pp_degree * tp_degree)) + else: + tp_degree = len(get_files_with_prefix(file_list, MODEL_FILE_PREFIX)) + dp_degree = max(1, len(zero_file_list) // tp_degree) + pp_degree = 0 + + return model_3d_desc(pp_degree, tp_degree, dp_degree) + + +def flatten_dp_dimension(meg_2d_map, src_2d_size, dp_degree): + new_meg_2d_map = meg_2d_parallel_map(meg_2d_map.pp_degree, meg_2d_map.tp_degree) + for pp_index in range(meg_2d_map.pp_degree): + for tp_index in range(meg_2d_map.tp_degree): + dp0_indices = meg_2d_map.get_data(pp_index, tp_index) + for idx in dp0_indices: + dpX_indices = [idx + (i * src_2d_size) for i in range(dp_degree)] + new_meg_2d_map.add_data(pp_index, tp_index, dpX_indices) + return new_meg_2d_map + + +def unflatten_dp_dimension(meg_2d_map, dp_degree): + pp_degree = meg_2d_map.pp_degree + tp_degree = meg_2d_map.tp_degree + meg_2d_map_list = [ + meg_2d_parallel_map(pp_degree=pp_degree, + tp_degree=tp_degree) for _ in range(dp_degree) + ] + for pp_index in range(pp_degree): + for tp_index in range(tp_degree): + flat_dp_indices = meg_2d_map.get_data(pp_index, tp_index) + partitioned_dp_indices = partition_data(flat_dp_indices, dp_degree) + for dp_indices, _2d_map in zip(partitioned_dp_indices, meg_2d_map_list): + _2d_map.add_data(pp_index, tp_index, dp_indices) + + return meg_2d_map_list diff --git a/deepspeed/checkpoint/reshape_meg_2d.py b/deepspeed/checkpoint/reshape_meg_2d.py new file mode 100644 index 0000000..4800b55 --- /dev/null +++ b/deepspeed/checkpoint/reshape_meg_2d.py @@ -0,0 +1,228 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .reshape_utils import partition_data + + +class meg_2d_parallel_map(object): + def __init__(self, pp_degree, tp_degree): + self.pp_degree = pp_degree + self.tp_degree = tp_degree + self.map = {} + + def simple_init(self): + self.map = { + self._make_key(i // self.tp_degree, + i % self.tp_degree): [i] + for i in range(self.pp_degree * self.tp_degree) + } + + def add_data(self, pp_index, tp_index, data): + self._validate_indices(pp_index, tp_index) + assert type(data) is list + + key = self._make_key(pp_index, tp_index) + if not key in self.map.keys(): + self.map[key] = [] + self.map[key] += data + + def get_data(self, pp_index=None, tp_index=None): + self._validate_indices(pp_index, tp_index) + pp_indices = list(range(self.pp_degree)) if pp_index is None else [pp_index] + tp_indices = list(range(self.tp_degree)) if tp_index is None else [tp_index] + + result = [] + for i in pp_indices: + for j in tp_indices: + result += self.map[self._make_key(i, j)] + + return result + + def print_data(self, tag): + print(f'{tag}') + for key, value in self.map.items(): + print(f'{key} = {value}') + + def _validate_indices(self, pp_index, tp_index): + assert pp_index is None or pp_index < self.pp_degree + assert tp_index is None or tp_index < self.tp_degree + + def _make_key(self, i, j): + return f'{i},{j}' + + +def _reshape_tp_dimension(old_2d_map, new_tp_degree): + old_pp_degree = old_2d_map.pp_degree + new_2d_map = meg_2d_parallel_map(old_pp_degree, new_tp_degree) + for i in range(old_pp_degree): + ranks_for_pp_index = old_2d_map.get_data(pp_index=i, tp_index=None) + split_ranks = partition_data(ranks_for_pp_index, new_tp_degree) + for j in range(new_tp_degree): + new_2d_map.add_data(i, j, split_ranks[j]) + + return new_2d_map + + +def _reshape_pp_dimension(old_2d_map, new_pp_degree): + old_tp_degree = old_2d_map.tp_degree + new_2d_map = meg_2d_parallel_map(new_pp_degree, old_tp_degree) + for i in range(old_tp_degree): + ranks_for_tp_index = old_2d_map.get_data(pp_index=None, tp_index=i) + split_ranks = partition_data(ranks_for_tp_index, new_pp_degree) + for j in range(new_pp_degree): + new_2d_map.add_data(j, i, split_ranks[j]) + + return new_2d_map + + +def reshape_meg_2d_parallel(old_pp_degree, + old_tp_degree, + new_pp_degree, + new_tp_degree, + verbose=False): + assert new_pp_degree <= old_pp_degree + assert new_tp_degree <= old_tp_degree + + old_2d_map = meg_2d_parallel_map(old_pp_degree, old_tp_degree) + old_2d_map.simple_init() + if verbose: + old_2d_map.print_data(f'original_2d_map:') + + if old_tp_degree != new_tp_degree: + new_tp_map = _reshape_tp_dimension(old_2d_map, new_tp_degree) + else: + new_tp_map = old_2d_map + if verbose: + new_tp_map.print_data(f'after_tp_reshape:') + + if old_pp_degree != new_pp_degree: + final_map = _reshape_pp_dimension(new_tp_map, new_pp_degree) + else: + final_map = new_tp_map + + if verbose: + final_map.print_data(f'final_2d_map:') + + return final_map + + +def get_mpu_ranks(tp_size=1, pp_size=1, dp_size=1, virtual_pp_size=None): + """ + Initialize model data parallel groups. + + Arguments: + tp_size: number of GPUs used to parallelize model tensor. + pp_size: number of GPUs used to parallelize model pipeline. + dp_size: number of GPUs used to parallelize model data. + + Let's say we have a total of 16 GPUs denoted by g0 ... g15 and we + use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize + the model pipeline. The present function will + create 8 tensor model-parallel groups, 4 pipeline model-parallel groups + and 8 data-parallel groups as: + 8 data_parallel groups: + [g0, g2], [g1, g3], [g4, g6], [g5, g7], [g8, g10], [g9, g11], [g12, g14], [g13, g15] + 8 tensor model-parallel groups: + [g0, g1], [g2, g3], [g4, g5], [g6, g7], [g8, g9], [g10, g11], [g12, g13], [g14, g15] + 4 pipeline model-parallel groups: + [g0, g4, g8, g12], [g1, g5, g9, g13], [g2, g6, g10, g14], [g3, g7, g11, g15] + Note that for efficiency, the caller should make sure adjacent ranks + are on the same DGX box. For example if we are using 2 DGX-1 boxes + with a total of 16 GPUs, rank 0 to 7 belong to the first box and + ranks 8 to 15 belong to the second box. + """ + + world_size = tp_size * pp_size * dp_size + + print(f"\n\n*** tp={tp_size}, pp={pp_size}, dp={dp_size}, world={world_size}") + + tensor_model_parallel_size = min(tp_size, world_size) + pipeline_model_parallel_size = min(pp_size, world_size) + data_parallel_size = world_size // (tensor_model_parallel_size * + pipeline_model_parallel_size) + + num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size + num_pipeline_model_parallel_groups = world_size // pipeline_model_parallel_size + num_data_parallel_groups = world_size // data_parallel_size + + # Build the data-parallel groups. + all_dp_group_ranks = [] + for i in range(pipeline_model_parallel_size): + start_rank = i * num_pipeline_model_parallel_groups + end_rank = (i + 1) * num_pipeline_model_parallel_groups + for j in range(tensor_model_parallel_size): + ranks = range(start_rank + j, end_rank, tensor_model_parallel_size) + all_dp_group_ranks.append(list(ranks)) + + print("DP", all_dp_group_ranks) + + # Build the model-parallel groups. + all_pp_group_ranks = [] + for i in range(data_parallel_size): + ranks = [ + data_parallel_group_ranks[i] + for data_parallel_group_ranks in all_dp_group_ranks + ] + all_pp_group_ranks.append(list(ranks)) + + print(f"PP", all_pp_group_ranks) + + # Build the tensor model-parallel groups. + all_tp_group_ranks = [] + for i in range(num_tensor_model_parallel_groups): + ranks = range(i * tensor_model_parallel_size, + (i + 1) * tensor_model_parallel_size) + all_tp_group_ranks.append(list(ranks)) + + print(f"TP", all_tp_group_ranks) + + return all_tp_group_ranks, all_pp_group_ranks, all_dp_group_ranks + + # # Build the pipeline model-parallel groups and embedding groups + # # (first and last rank in each pipeline model-parallel group). + # for i in range(num_pipeline_model_parallel_groups): + # ranks = range(i, world_size, + # num_pipeline_model_parallel_groups) + # print(f"EMB{i}", list(ranks)) + + +def reshape(src, tgt): + """ + reshape([tp_size_src, pp_size_src, dp_size_src], + [tp_size_tgt, pp_size_tgt, dp_size_tgt]) + """ + + print(f"\n\n*** Reshaping: {src} => {tgt}") + + tp_size_src, pp_size_src, dp_size_src = src + tp_size_tgt, pp_size_tgt, dp_size_tgt = tgt + + tp_ranks1, pp_ranks1, dp_ranks1 = get_mpu_ranks(tp_size=tp_size_src, pp_size=pp_size_src, dp_size=dp_size_src) + tp_ranks2, pp_ranks2, dp_ranks2 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_src, dp_size=dp_size_src) + tp_ranks3, pp_ranks3, dp_ranks3 = get_mpu_ranks(tp_size=tp_size_tgt, pp_size=pp_size_tgt, dp_size=dp_size_src) + + # handle tp contraction first + print("\n*** TP contraction:") + + for i, r in enumerate(tp_ranks1): + print(f'{tp_ranks1[i]} => {tp_ranks2[i]}') + + # handle pp contraction next + + print("\n*** PP contraction:") + + for i, r in enumerate(pp_ranks1): + print(f'{pp_ranks2[i]} => {pp_ranks3[i]}') + + +# easy +#reshape([2,2,1],[1,1,1]) + +# probably need more logic to suggest how to pack +#reshape([4,4,1],[2,2,1]) + +#reshape([2,4,2], [8,32,1]) + +# get_mpu_ranks(2,2,2) +# get_mpu_ranks(4,2,1) +# get_mpu_ranks(2,4,1) +# get_mpu_ranks(1,1,8) diff --git a/deepspeed/checkpoint/reshape_utils.py b/deepspeed/checkpoint/reshape_utils.py new file mode 100644 index 0000000..4d6b272 --- /dev/null +++ b/deepspeed/checkpoint/reshape_utils.py @@ -0,0 +1,100 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import torch +from collections import OrderedDict +from .constants import (ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX) + + +def basic_folder_validation(dir): + assert os.path.exists(dir), f'{dir} path does not exist' + assert os.path.isdir(dir), f'{dir} is not a folder' + + +def get_files_with_prefix(all_files, prefix): + file_list = [] + for file_path in all_files: + _, fname = os.path.split(file_path) + if fname.startswith(prefix): + file_list.append(file_path) + + return sorted(file_list) + + +def validate_files(file_list): + for file in file_list: + if not os.path.isfile(file): + print(f'Error: {file} is not existent') + + +def get_files(dir): + file_list = [] + for root, _, files in os.walk(dir): + for file in files: + file_list.append(os.path.join(root, file)) + return file_list + + +def get_zero_files(dir): + file_list = get_files(dir) + for prefix in [ZERO_FILE_PREFIX, FP16_ZERO_FILE_PREFIX, BF16_ZERO_FILE_PREFIX]: + zero_files = get_files_with_prefix(file_list, prefix) + if len(zero_files) > 0: + return zero_files + + return [] + + +def partition_data(data_list, num_partitions): + num_elems = len(data_list) + assert num_elems % num_partitions == 0 + partition_size = num_elems // num_partitions + partitions_list = [ + data_list[i:i + partition_size] for i in range(0, + num_elems, + partition_size) + ] + return partitions_list + + +def _key_list_to_string(key_list): + return '.'.join(key_list) + + +def merge_state_dict(dict_a, dict_b, key_list): + merged_dict = type(dict_a)({}) + + for key, value in dict_b.items(): + if key in dict_a.keys(): + merged_dict[key] = merge_state(dict_a[key], dict_b[key], [str(key)]) + else: + merged_dict[key] = value + + return merged_dict + + +def merge_state_list(list_a, list_b, key_list): + if len(list_a) != len(list_b): + print(f'{_key_list_to_string(key_list)}') + raise ValueError( + f'Cannot merge lists of different lengths, a = {len(list_a)} b = {len(list_b)}' + ) + + return [merge_state(a, b, key_list) for a, b in zip(list_a, list_b)] + + +def merge_state(state_a, state_b, key_list=[]): + if type(state_a) != type(state_b): + key_list_string = _key_list_to_string(key_list) + print(f'key_list = {key_list_string}') + raise ValueError( + f'Cannot merge two states of types {type(state_a)} and type {type(state_b)}') + + if type(state_a) in (dict, OrderedDict): + return merge_state_dict(state_a, state_b, key_list) + elif type(state_a) in (list, tuple): + return type(state_a)(merge_state_list(state_a, state_b, key_list)) + elif torch.is_tensor(state_a): + return torch.cat([state_a, state_b], 0) + else: + return state_a diff --git a/deepspeed/checkpoint/universal_checkpoint.py b/deepspeed/checkpoint/universal_checkpoint.py new file mode 100644 index 0000000..b58de48 --- /dev/null +++ b/deepspeed/checkpoint/universal_checkpoint.py @@ -0,0 +1,108 @@ +""" +Copyright 2022 The Microsoft DeepSpeed Team +""" +import os +import torch +import types +from .constants import (FP32_WEIGHT_KEY, + PARAM, + VOCAB_DIVISIBILITY_PADDING_TENSOR, + CAT_DIM) + + +def load_hp_checkpoint_state(self, folder, tp_rank, tp_world_size): + hp_mapping = self._hp_mapping + optim_state_keys = hp_mapping.get_optim_state_keys() + hp_keys = [FP32_WEIGHT_KEY] + optim_state_keys + checkpoint_files = {key: os.path.join(folder, f"{key}.pt") for key in hp_keys} + + for file in checkpoint_files.values(): + assert os.path.isfile(file), f'{file} is not a valid file' + + for key in hp_keys: + ckpt_file = checkpoint_files[key] + ckpt_dict = torch.load(ckpt_file) + full_hp_param = ckpt_dict[PARAM] + + # need to deal with slices that were averaged. + # the opposite of averaging here becomes an exact copy of the first slice + # I thought of 2 ways: + # implementation a. find a way for a client to pass a dict with patterns + # if any(re.search(pattern, folder) for pattern in WEIGHTS_TO_AVERAGE_PATTERNS): + # tp_rank = 0 + # tp_world_size = 1 + # the other approach is to assume that the saved data is correct and if full_hp_param.shape == + # self.shape that means we automatically copy? + # implementation b. + # this version requires no additional data passed from the client + # if the shapes already match it must be slices that were averaged - so we just hack around those + if full_hp_param.shape == self.shape: + tp_rank = 0 + tp_world_size = 1 + + # special case for word_embeddings weights which get padded differently depending on TP degree. + # the converter to universal currently strips the original padding completely so the saved + # weight is padding-free and we just need to add new padding depending on the target TP + # degree + vocab_divisibility_padding_tensor = ckpt_dict.get( + VOCAB_DIVISIBILITY_PADDING_TENSOR, + None) + if vocab_divisibility_padding_tensor is not None: + # In the absence of data passed from the user wrt new padded vocab specific to tp degree + # we can again derive that data by reverse engineering the target shapes like so: + padded_target_vocab_size = self.shape[0] * tp_world_size + if padded_target_vocab_size > full_hp_param.shape[0]: + # Need to expand + padding_size = padded_target_vocab_size - full_hp_param.shape[0] + # Implement the following concat in efficient way using pad + #full_hp_param = torch.cat((full_hp_param, padding_tensor), 0) + full_hp_param = torch.nn.functional.pad(full_hp_param, + (0, + 0, + 0, + padding_size), + "constant", + 0) + full_hp_param[:-padding_size, :] = vocab_divisibility_padding_tensor + else: + # Need to shrink or keep the same + full_hp_param = full_hp_param[:padded_target_vocab_size, :] + + full_param_numel = full_hp_param.numel() + tp_slice_numel = self.numel() + # if key == FP32_WEIGHT_KEY and 'word_embeddings.weight' in folder: + # print_rank_0(f'{full_hp_param[:10]=}', force=True) + + + assert full_param_numel == tp_world_size * tp_slice_numel, \ + f'Loading {ckpt_file} full param numel {full_param_numel} != tensor slice numel {tp_slice_numel} * tp_world_size {tp_world_size}' + dst_tensor = hp_mapping.hp_fragment if key == FP32_WEIGHT_KEY else hp_mapping.get_optim_state_fragment( + key) + + # print(f"{full_hp_param.shape=} {full_param_numel=} {folder=}") + # print(f"{dst_tensor.shape=} {dst_tensor.numel()=}{folder=}") + + # since when we do many to 1 on tp we cat sometimes on dim=0 and other times on dim=1 we have to do exactly the same in reverse + chunk_dim = ckpt_dict.get(CAT_DIM, 0) + + # this performs the opposite of cat when merging TP slices + tp_hp_slice = full_hp_param.chunk(tp_world_size, chunk_dim)[tp_rank] + tp_hp_slice = tp_hp_slice.flatten() + + lp_frag_address = hp_mapping.lp_fragment_address + tp_hp_fragment = tp_hp_slice.narrow(0, + lp_frag_address.start, + lp_frag_address.numel) + assert dst_tensor.numel() == lp_frag_address.numel, \ + f'Load checkpoint {key} dst_tensor numel {dst_tensor.numel()} != src numel {lp_frag_address.numel}' + + # print(f"{key} SHAPE: {tp_hp_slice.shape=}") + # print(f"{key} SHAPE: {dst_tensor.shape=}") + # print(f"{key} SHAPE: {tp_hp_fragment.shape=}") + dst_tensor.data.copy_(tp_hp_fragment.data) + + +def enable_universal_checkpoint(param_list): + for param in param_list: + param.load_hp_checkpoint_state = types.MethodType(load_hp_checkpoint_state, + param) diff --git a/deepspeed/checkpoint/utils.py b/deepspeed/checkpoint/utils.py new file mode 100644 index 0000000..4e96d07 --- /dev/null +++ b/deepspeed/checkpoint/utils.py @@ -0,0 +1,31 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +from .constants import (MODEL_FILE_PREFIX, + MODEL_FILE_SUFFIX, + OPTIM_FILE_SUFFIX, + ZERO_FILE_PREFIX) + + +def get_model_ckpt_name_for_rank(base_folder, mp_rank_str): + ckpt_name = os.path.join( + base_folder, + MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX, + ) + return ckpt_name + + +def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank): + zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}' + mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}' + zero_ckpt_name = os.path.join( + base_folder, + zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX, + ) + return zero_ckpt_name + + +def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank): + ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}' + ckpt_path = os.path.join(base_folder, ckpt_file) + return ckpt_path diff --git a/deepspeed/checkpoint/zero_checkpoint.py b/deepspeed/checkpoint/zero_checkpoint.py new file mode 100644 index 0000000..cb33e8e --- /dev/null +++ b/deepspeed/checkpoint/zero_checkpoint.py @@ -0,0 +1,148 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch + +from .constants import (BASE_OPTIMIZER_STATE, + GROUP_PADDINGS, + OPTIMIZER_STATE_DICT, + PARTITION_COUNT) + +from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state) + +from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor) + +GROUP_STATE_KEY = 'state' + + +class ZeROCheckpoint(object): + def __init__(self, dir): + basic_folder_validation(dir) + self.dir = dir + self.file_list = get_zero_files(dir) + self.num_files = len(self.file_list) + assert self.num_files > 0, f'No ZeRO files found in {dir}' + + self.src_3d = get_model_3d_descriptor(dir) + self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree, + tp_degree=self.src_3d.tp_degree, + dp_degree=self.src_3d.dp_degree) + self._3d_file_map = self.src_3d.reshape(self.target_3d) + + def get_src_world_size(self): + return self.src_3d.world_size() + + def get_src_tp_degree(self): + return self.src_3d.tp_degree + + def get_src_pp_degree(self): + return self.src_3d.pp_degree + + def get_src_dp_degree(self): + return self.src_3d.dp_degree + + def get_file_indices_for_rank(self, pp_index, tp_index, dp_index): + assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}' + dp_2d_map = self._3d_file_map[dp_index] + return dp_2d_map.get_data(pp_index, tp_index) + + def get_files_for_rank(self, pp_index, tp_index, dp_index): + file_idx_list = self.get_file_indices_for_rank(pp_index, tp_index, dp_index) + return [self.file_list[idx] for idx in file_idx_list] + + def get_state_for_rank(self, + pp_index, + tp_index, + dp_index, + keys_to_ignore=[], + strip_tensor_paddings=True): + state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index) + merged_sd = None + for state_file in state_file_list: + sd = torch.load(state_file, map_location=torch.device('cpu')) + for key in keys_to_ignore: + sd.pop(key, None) + + if strip_tensor_paddings: + self._strip_tensor_paddings(sd) + + if merged_sd is None: + merged_sd = sd + else: + merged_sd = merge_state(merged_sd, sd) + + self._update_partition_count(merged_sd) + if strip_tensor_paddings: + self._clear_group_paddings(merged_sd) + + return merged_sd + + def print_3d_index_map(self, tag=None): + if tag: + print(f'3D index map: {tag}') + for dp_index, _2d_map in enumerate(self._3d_file_map): + _2d_map.print_data(f'dp = {dp_index}') + + def print_3d_file_map(self, tag=None): + if tag: + print(f'3D file map: {tag}') + for dp_index, _2d_map in enumerate(self._3d_file_map): + for pp_index in _2d_map.pp_degree: + for tp_index in _2d_map.tp_degree: + file_index_list = _2d_map.get_data(pp_index, tp_index) + file_list = [self.file_list[idx] for idx in file_index_list] + print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}') + + def reshape(self, target_3d_desc: model_3d_desc): + self.target_3d = target_3d_desc + self._3d_file_map = self.src_3d.reshape(self.target_3d) + + def _strip_tensor_paddings(self, sd): + param_group_states = self._get_param_group_states(sd) + if param_group_states is None: + return + + group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) + if group_paddings is None: + return + + for key, group_state in param_group_states.items(): + if group_paddings[key] == 0: + continue + for state_name, state_value in group_state.items(): + if torch.is_tensor(state_value): + raw_length = state_value.numel() - group_paddings[key] + group_state[state_name] = torch.narrow(state_value, + 0, + 0, + raw_length).clone() + + def _clear_group_paddings(self, sd): + group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS) + if group_paddings: + num_groups = len(group_paddings) + sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups + + def _get_optimizer_state(self, sd, state_key): + optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None) + if optimizer_state is None: + return None + + return optimizer_state.get(state_key, None) + + def _get_param_group_states(self, sd): + optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None) + if optimizer_state is None: + return None + + base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None) + if base_optimizer_state is None: + return None + + return base_optimizer_state.get(GROUP_STATE_KEY, None) + + def _update_partition_count(self, sd): + partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT) + if partition_counts: + num_groups = len(partition_counts) + sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree + ] * num_groups diff --git a/deepspeed/comm/__init__.py b/deepspeed/comm/__init__.py new file mode 100644 index 0000000..84d0f54 --- /dev/null +++ b/deepspeed/comm/__init__.py @@ -0,0 +1,52 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from .utils import * +from deepspeed import utils + +supported_torch_version = False + +# See more details at: https://github.com/pytorch/pytorch/pull/48767 +# The PG API in torch versions lesser than 1.8 are different so it is +# non-trivial to support both in the same API. We will just use the +# DS comm. backend in deepspeed/comm/comm.py if torch version if 1.8+. + +if older_torch(): + # Add custom deepspeed torch comm functions here since we can't import deepspeed.comm + # NOTE: We can't call torch.distributed directly here. Current hack is to import functions before calling them. + supported_torch_version = False + from torch.distributed import * + + def get_world_group(): + return group.WORLD + + def get_global_rank(group, group_rank): + if hasattr(torch.distributed.distributed_c10d, "get_global_rank"): + from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank + else: + from torch.distributed.distributed_c10d import _get_global_rank + return _get_global_rank(group, group_rank) + + def allgather_fn(output_tensor, input_tensor, group=None, async_op=False): + from torch.distributed import all_gather, get_world_size + from torch import chunk + output_tensors = list(chunk(output_tensor, get_world_size(group))) + return all_gather(output_tensors, input_tensor, group=group, async_op=async_op) + + def reduce_scatter_fn(output_tensor, input_tensor, group=None, async_op=False): + from torch.distributed import reduce_scatter, get_world_size + from torch import chunk + input_tensor_lst = list(chunk(input_tensor, get_world_size(group))) + return reduce_scatter(output_tensor, input_tensor_lst, group=group) + + def configure(deepspeed_config=None, + enabled=None, + prof_all=None, + prof_ops=None, + verbose=None): + utils.logger.warn( + "Communication logging is not supported in torch versions older than 1.8") + +else: + supported_torch_version = True + from .comm import * diff --git a/deepspeed/comm/backend.py b/deepspeed/comm/backend.py new file mode 100644 index 0000000..cd43686 --- /dev/null +++ b/deepspeed/comm/backend.py @@ -0,0 +1,43 @@ +'''Copyright The Microsoft DeepSpeed Team''' +''' DeepSpeed Communication Backend. + +In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now. + +# Custom DS Backends -- Direct C/Ops + - NCCL -- [EXPERIMENTAL] + - MPI -- [EXPERIMENTAL] + - RCCL -- [EXPERIMENTAL] + - GLOO -- [EXPERIMENTAL] + +# DS backend wrapper for torch.distributed [DEFAULT] + - T-NCCL -- [DEFAULT] + - T-GLOO + - T-MPI +''' +''' Backend is the base class + -- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now. +''' + + +class Backend(object): + def __init__(self, name='backend', rank=0, size=1): + self.name = name + # The world size and rank of the world process group + self.world_group = None + self.world_size = rank + self.world_rank = size + # Single process group (pg) implementation for now but keep a list for future + self.process_groups = [] + self.initialized = False + + def is_initialized(self): + return self.initialized + + def new_group(self): + # create a new pg and add it to pg list + pass + + def init_process_group(self): + # subclasses will initialize them fully + # - initialize a default world process group and add it to pg list + self.initialized = True diff --git a/deepspeed/comm/comm.py b/deepspeed/comm/comm.py new file mode 100644 index 0000000..302e3ea --- /dev/null +++ b/deepspeed/comm/comm.py @@ -0,0 +1,778 @@ +""" + Copyright 2021 The Microsoft DeepSpeed Team + + DeepSpeed Communication Package: deepspeed.comm + + deepspeed.comm + -- import and use deepspeeed.ops.comm + -- use torch.distributed directly if both this package and torch.distributed use the same NCCL version + -- use custom collectives + -- can either use torch.dist or ds.ops.comm? + + Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well. + + deepspeed.comm API + -- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility. + -- e.g. if a client code used + from deepspeed import comm as dist + + instead of + import torch.distributed as dist + + The code should work without breaking any of the public torch.distributed functionality + + Future: + -- deepspeed groups API should be brought into ds.comm +""" + +from enum import Enum +import torch +import os + +from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout +from .constants import * +from deepspeed.accelerator import get_accelerator + + +class ReduceOp(Enum): + SUM = 0 + PRODUCT = 1 + MIN = 2 + MAX = 3 + BAND = 4 + BOR = 5 + BXOR = 6 + AVG = 7 + UNUSED = 8 + + +from deepspeed.utils.comms_logging import CommsLogger +from deepspeed.utils import timer, get_caller_func +from deepspeed.comm.torch import TorchBackend + +from deepspeed import utils +from datetime import timedelta + +# Current deepspeed.comm backend (cdb) global object for simple access by client code +use_ds_backend = False +cdb = None + +# Create global timer for ops +timers = timer.SynchronizedWallClockTimer() +timer_summary = {} + +comms_logger = CommsLogger() + +# Ensure we don't warn about base collectives more than once +has_warned_all_gather = False +has_warned_reduce_scatter = False + +# Maintain objects of all initialized ds backends and assign them using the API functions in this file +nccl_backend = None +mpi_backend = None + +# This should be set here so all rank/size information from the launcher can be propagated +from deepspeed.comm.utils import * + + +def _configure_using_config_file(config): + if config.comms_logger_enabled: + comms_logger.configure(config) + + +def configure( + deepspeed_config=None, + enabled=None, + prof_all=None, + prof_ops=None, + verbose=None, + debug=None, +): + + if deepspeed_config is not None: + _configure_using_config_file(deepspeed_config.comms_config) + + if enabled is not None: + comms_logger.enabled = enabled + + if prof_all is not None: + comms_logger.prof_all = prof_all + + if prof_ops is not None: + comms_logger.prof_ops = prof_ops + + if verbose is not None: + comms_logger.verbose = verbose + + if debug is not None: + comms_logger.debug = debug + + +# Logging wrapper for timing ops +def timed_op(func): + def log_wrapper(*args, **kwargs): + # Add enabled flag so that overhead to each comm op is two if conditions at most + if comms_logger.enabled: + if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or ( + 'log_name' in kwargs + and kwargs['log_name'] in comms_logger.prof_ops): + # Need func args for their defaults + func_args = get_default_args(func) + func_args.update(kwargs) + msg_size = get_msg_size_from_args(func, *args, **kwargs) + log_name = get_debug_log_name(func_args, comms_logger.debug) + timers(log_name).start() + # Return the op, then stop the op's timer + try: + return func(*args, **kwargs) + finally: + if comms_logger.enabled: + # Need to make op blocking for accurate logging + get_accelerator().synchronize() + # If we're using MPI, we can't simply sync the stream + if cdb.using_mpi: + cdb.barrier() + if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or ( + 'log_name' in kwargs + and kwargs['log_name'] in comms_logger.prof_ops): + log_name = get_debug_log_name(func_args, comms_logger.debug) + raw_name = func.__name__ + timers(log_name).stop() + # need temp var since 'elapsed' resets events + time_elapsed = timers(log_name).elapsed(reset=False) + comms_logger.append(raw_name, log_name, time_elapsed, msg_size) + + return log_wrapper + + +# For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code. +# DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation. +# Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html + + +# UNUSED: Future helper function to initialize DS backends +def init_deepspeed_backend(ds_backend): + global cdb + global nccl_backend + global mpi_backend + global use_ds_backend + + if ds_backend == NCCL_BACKEND: + utils.logger.warn("NCCL backend in DeepSpeed not yet implemented") + elif ds_backend == MPI_BACKEND: + utils.logger.warn("MPI backend in DeepSpeed not yet implemented") + elif ds_backend == GLOO_BACKEND: + utils.logger.warn("Gloo backend in DeepSpeed not yet implemented") + else: + utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend") + + +def is_initialized(): + #assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb is None: + return False + else: + return cdb.is_initialized() + + +def destroy_process_group(group=None): + global cdb + return cdb.destroy_process_group(group=group) + + +def new_group(ranks): + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.new_group(ranks) + + +def is_available() -> bool: + + # Returns ``True`` if the deepspeed comm package is available. + + # TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import + # any communication related primitives from this package. + # use hasattr(deepspeed.csrc.ops, "_comm") or something + return True + + +def set_backend(backend): + if not use_ds_backend: + utils.logger.error( + "DeepSpeed communication backend is required. Please use deepspeed.comm.init_distributed(backend, use_deepspeed=True) to use this functionality" + ) + raise RuntimeError( + 'Error: Custom DeepSpeed backend called without initializing DeepSpeed distributed.' + ) + + global cdb + global nccl_backend + global mpi_backend + + try: + if backend_name == NCCL_BACKEND: + if nccl_backend is not None and nccl_backend.is_initialized(): + cdb = nccl_backend + elif backend_name == MPI_BACKEND: + if mpi_backend is not None and mpi_backend.is_initialized(): + cdb = mpi_backend + except Exception as inst: + print(inst) + + +@timed_op +def broadcast(tensor, + src, + group=None, + async_op=False, + prof=False, + log_name='broadcast', + debug=get_caller_func()): + global cdb + return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) + + +@timed_op +def all_gather(tensor_list, + tensor, + group=None, + async_op=False, + prof=False, + log_name='all_gather', + debug=get_caller_func()): + global cdb + return cdb.all_gather(tensor_list=tensor_list, + tensor=tensor, + group=group, + async_op=async_op) + + +def has_reduce_scatter_base(): + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + assert cdb.has_reduce_scatter_base is not None, 'has_reduce_scatter_base is not yet defined' + return cdb.has_reduce_scatter_base + + +def reduce_scatter_fn(output_tensor, + tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + debug=get_caller_func()): + global cdb + global has_warned_reduce_scatter + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb.has_reduce_scatter_base: + return reduce_scatter_base(output_tensor, + tensor, + op=op, + group=group, + async_op=async_op, + prof=prof, + debug=debug) + else: + if not has_warned_reduce_scatter: + utils.logger.warning( + "unable to find torch.distributed._reduce_scatter_base. will fall back to " + "torch.distributed.all_gather which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + has_warned_reduce_scatter = True + input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group))) + return reduce_scatter(output_tensor, + input_tensor_lst, + op=op, + group=group, + async_op=async_op, + prof=prof, + debug=debug) + + +@timed_op +def reduce_scatter_base(output_tensor, + tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce_scatter_base', + debug=get_caller_func()): + global cdb + return cdb.reduce_scatter_base(output_tensor=output_tensor, + input_tensor=tensor, + op=op, + group=group, + async_op=async_op) + + +@timed_op +def all_gather_base(output_tensor, + tensor, + group=None, + async_op=False, + prof=False, + log_name='all_gather_base', + debug=get_caller_func()): + global cdb + return cdb.all_gather_base(output_tensor=output_tensor, + input_tensor=tensor, + group=group, + async_op=async_op) + + +def has_allgather_base(): + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + assert cdb.has_allgather_base is not None, 'has_allgather_base is not yet defined' + return cdb.has_allgather_base + + +def allgather_fn(output_tensor, + input_tensor, + group=None, + async_op=False, + debug=get_caller_func()): + global cdb + global has_warned_all_gather + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + if cdb.has_allgather_base: + return all_gather_base(output_tensor, + input_tensor, + group=group, + async_op=async_op, + debug=debug) + else: + if not has_warned_all_gather and get_rank() == 0: + utils.logger.warning( + "unable to find torch.distributed._all_gather_base. will fall back to " + "torch.distributed.all_gather which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + has_warned_all_gather = True + output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group))) + return all_gather(output_tensors, + input_tensor, + group=group, + async_op=async_op, + debug=debug) + + +@timed_op +def all_to_all_single(output, + tensor, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False, + prof=False, + log_name='all_to_all_single', + debug=get_caller_func()): + global cdb + return cdb.all_to_all_single(output=output, + input=tensor, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + async_op=async_op) + + +@timed_op +def send(tensor, + dst, + group=None, + tag=0, + prof=False, + log_name='send', + debug=get_caller_func()): + global cdb + return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) + + +@timed_op +def recv(tensor, + src=None, + group=None, + tag=0, + prof=False, + log_name='recv', + debug=get_caller_func()): + global cdb + return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) + + +@timed_op +def isend(tensor, + dst, + group=None, + tag=0, + prof=False, + log_name='isend', + debug=get_caller_func()): + global cdb + return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) + + +@timed_op +def irecv(tensor, + src=None, + group=None, + tag=0, + prof=False, + log_name='irecv', + debug=get_caller_func()): + global cdb + return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) + + +@timed_op +def gather(tensor, + gather_list=None, + dst=0, + group=None, + async_op=False, + prof=False, + log_name='gather', + debug=get_caller_func()): + global cdb + return cdb.gather(tensor=tensor, + gather_list=gather_list, + dst=dst, + group=group, + async_op=async_op) + + +@timed_op +def scatter(tensor, + scatter_list=None, + src=0, + group=None, + async_op=False, + prof=False, + log_name='scatter', + debug=get_caller_func()): + global cdb + return cdb.scatter(tensor=tensor, + scatter_list=scatter_list, + src=src, + group=group, + async_op=async_op) + + +@timed_op +def barrier(group=None, + async_op=False, + device_ids=None, + prof=False, + log_name='barrier', + debug=get_caller_func()): + global cdb + return cdb.barrier(group=group, async_op=async_op, device_ids=device_ids) + + +@timed_op +def monitored_barrier(group=None, + timeout=None, + wait_all_ranks=False, + prof=False, + log_name='monitored_barrier', + debug=get_caller_func()): + global cdb + return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) + + +def log_summary(): + global cdb + barrier(log_name='log_summary_barrier') + if cdb.get_rank() == 0: + comms_logger.log_all() + barrier(log_name='log_summary_barrier') + + +@timed_op +def reduce(tensor, + dst, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce', + debug=get_caller_func()): + global cdb + return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op) + + +@timed_op +def reduce_scatter(output, + input_list, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='reduce_scatter', + debug=get_caller_func()): + global cdb + return cdb.reduce_scatter(output=output, + input_list=input_list, + op=op, + group=group, + async_op=async_op) + + +@timed_op +def all_reduce(tensor, + op=ReduceOp.SUM, + group=None, + async_op=False, + prof=False, + log_name='all_reduce', + debug=get_caller_func()): + #if profile_comm: + # context of the timers? + # timers.start() + # TensorBoard logging for comm calls.? + global cdb + #print(f'op = {op}, cdb= {cdb.name}') + return cdb.all_reduce(tensor, op, group, async_op) + + +def get_world_group(): + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_world_group() + + +def get_world_size(group=None) -> int: + """ + Returns the number of processes in the current process group + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + The world size of the process group + -1, if not part of the group + """ + global cdb + + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_world_size(group) + + +def get_rank(group=None): + """ + Returns the rank of the current process in the provided ``group`` or the + default group if none was provided. + Rank is a unique identifier assigned to each process within a distributed + process group. They are always consecutive integers ranging from 0 to + ``world_size``. + Args: + group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + The rank of the process group + -1, if not part of the group + """ + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_rank(group) + + +def get_local_rank(): + """ + Helper function to get local rank after a backend has been set and initialized + Args: + None + Returns: + local rank (= GPU device ID) + """ + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return get_local_rank_from_launcher() + + +def get_global_rank(group=None, group_rank=0): + global cdb + assert cdb is not None and cdb.is_initialized(), 'DeepSpeed backend not set, please initialize it using init_process_group()' + return cdb.get_global_rank(group, group_rank) + + +# Main DeepSpeed Comms. public API. +def init_distributed(dist_backend=None, + auto_mpi_discovery=True, + distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, + verbose=True, + timeout=default_pg_timeout, + init_method=None, + dist_init_required=None, + config=None, + rank=-1, + world_size=-1): + ''' Initialize dist backend, potentially performing MPI discovery if needed + + Arguments: + dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo + auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI + distributed_port: Optional (int). torch distributed backend port + verbose: Optional (bool). verbose logging + timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes. + init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified. + config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling) + rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization) + world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization. + ''' + global cdb + + configure(deepspeed_config=config) + + if dist_init_required is None: + dist_init_required = cdb is None or not cdb.is_initialized() + + if cdb is None and torch.distributed.is_initialized(): + # The user initialized torch.dist themselves, create cdb and short-circuit + cdb = TorchBackend(dist_backend, timeout, init_method) + return + + if dist_init_required is False: + assert ( + cdb is not None and cdb.is_initialized() is True + ), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()" + else: + # Initialize torch distributed if needed + required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)): + if verbose: + utils.logger.info( + "Not using the DeepSpeed or dist launchers, attempting to detect MPI environment..." + ) + if in_aml() and not in_dlts(): + patch_aml_env_for_torch_nccl_backend(verbose=verbose) + elif in_aws_sm(): + patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose) + else: + mpi_discovery(distributed_port=distributed_port, verbose=verbose) + + if cdb is not None and cdb.is_initialized(): + if int(os.getenv('RANK', '0')) == 0: + utils.logger.info('Distributed backend already initialized') + else: + assert isinstance(timeout, timedelta) + if dist_backend == None: + dist_backend = get_accelerator().communication_backend_name() + if int(os.getenv('RANK', '0')) == 0: + utils.logger.info( + 'Initializing TorchBackend in DeepSpeed with backend {}'.format( + dist_backend)) + # Create a torch backend object, initialize torch distributed, and assign to cdb + cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) + + +def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True): + ''' + Discovery MPI environment via mpi4py and map to relevant dist state + ''' + from mpi4py import MPI + import subprocess + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + world_size = comm.Get_size() + + master_addr = None + if rank == 0: + hostname_cmd = ["hostname -I"] + result = subprocess.check_output(hostname_cmd, shell=True) + master_addr = result.decode('utf-8').split()[0] + master_addr = comm.bcast(master_addr, root=0) + + # Determine local rank by assuming hostnames are unique + proc_name = MPI.Get_processor_name() + all_procs = comm.allgather(proc_name) + local_rank = sum([i == proc_name for i in all_procs[:rank]]) + + os.environ['RANK'] = str(rank) + os.environ['WORLD_SIZE'] = str(world_size) + os.environ['LOCAL_RANK'] = str(local_rank) + os.environ['MASTER_ADDR'] = master_addr + os.environ['MASTER_PORT'] = str(distributed_port) + + if verbose: + utils.logger.info( + "Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" + .format(os.environ['RANK'], + os.environ['LOCAL_RANK'], + os.environ['WORLD_SIZE'], + os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) + + if cdb is not None and cdb.is_initialized(): + assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format( + rank, cdb.get_rank()) + assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format( + world_size, cdb.get_world_size()) + + +def in_aml(): + # Are we running inside an Azure Machine Learning (AML) environment? + return 'AZUREML_EXPERIMENT_ID' in os.environ + + +def in_aws_sm(): + # Are we running inside an AWS SageMaker environment? + return 'SM_TRAINING_ENV' in os.environ + + +def in_dlts(): + # Are we running on a DLTS cluster? + return 'DLTS_JOB_ID' in os.environ + + +def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True): + """Helper routine to get and set environment variables. + This is adapted from Azure ML's documentation available from: + https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi + """ + os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] + os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] + single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int( + os.environ["WORLD_SIZE"]) + + if not single_node: + master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":") + os.environ["MASTER_ADDR"] = master_node_params[0] + # Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE + if "MASTER_PORT" not in os.environ: + os.environ["MASTER_PORT"] = str(master_port) + else: + os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"] + os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT + + if verbose: + utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format( + os.environ["NCCL_SOCKET_IFNAME"])) + + os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME + os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] + + if verbose: + utils.logger.info( + "Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" + .format(os.environ['RANK'], + os.environ['LOCAL_RANK'], + os.environ['WORLD_SIZE'], + os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) + + +def patch_aws_sm_env_for_torch_nccl_backend(verbose=True): + """Helper routine to get and set environment variables when running inside an AWS SageMaker environment. + """ + os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] + os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] + os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] + + if verbose: + utils.logger.info( + "Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" + .format(os.environ['RANK'], + os.environ['LOCAL_RANK'], + os.environ['WORLD_SIZE'], + os.environ['MASTER_ADDR'], + os.environ['MASTER_PORT'])) diff --git a/deepspeed/comm/config.py b/deepspeed/comm/config.py new file mode 100644 index 0000000..2583951 --- /dev/null +++ b/deepspeed/comm/config.py @@ -0,0 +1,32 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" +Copyright (c) Microsoft Corporation +Licensed under the MIT license. +""" + +from pydantic import BaseModel +from .constants import * + + +class CommsConfig(BaseModel): + class Config: + validate_all = True + validate_assignment = True + use_enum_values = True + extra = 'forbid' + + +class CommsLoggerConfig(CommsConfig): + enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT + prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT + prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT + verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT + debug: bool = COMMS_LOGGER_DEBUG_DEFAULT + + +class DeepSpeedCommsConfig: + def __init__(self, ds_config): + self.comms_logger_enabled = 'comms_logger' in ds_config + + if self.comms_logger_enabled: + self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger']) diff --git a/deepspeed/comm/constants.py b/deepspeed/comm/constants.py new file mode 100644 index 0000000..45fd5ac --- /dev/null +++ b/deepspeed/comm/constants.py @@ -0,0 +1,46 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +NCCL_BACKEND = 'nccl' +MPI_BACKEND = 'mpi' +GLOO_BACKEND = 'gloo' +SCCL_BACKEND = 'sccl' + +DEFAULT_AML_MASTER_PORT = "54965" +DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo" + +######################################### +# Comms Logger +######################################### +# Comms Logger. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +COMMS_LOGGER_FORMAT = ''' +The Comms Logger can be specified as: +"comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": true, + "debug": false, + "prof_ops": ["all_reduce", "custom_all_reduce_name"] +} +''' +COMMS_LOGGER = "comms_logger" + +# Comms logger enable signal +COMMS_LOGGER_ENABLED = "enabled" +COMMS_LOGGER_ENABLED_DEFAULT = False + +# Comms logger verbose signal +COMMS_LOGGER_VERBOSE = "verbose" +COMMS_LOGGER_VERBOSE_DEFAULT = False + +# comms logger profile all ops signal +COMMS_LOGGER_PROF_ALL = "prof_all" +COMMS_LOGGER_PROF_ALL_DEFAULT = True + +# comms logger show all ops signal +COMMS_LOGGER_DEBUG = "debug" +COMMS_LOGGER_DEBUG_DEFAULT = False + +# comms logger profile specific ops in list +COMMS_LOGGER_PROF_OPS = "prof_ops" +COMMS_LOGGER_PROF_OPS_DEFAULT = [] diff --git a/deepspeed/comm/torch.py b/deepspeed/comm/torch.py new file mode 100644 index 0000000..0753e7c --- /dev/null +++ b/deepspeed/comm/torch.py @@ -0,0 +1,237 @@ +''' +Copyright 2021 The Microsoft DeepSpeed Team +''' +from deepspeed import utils + +from .utils import * +from .backend import * +from .comm import * + + +class TorchBackend(Backend): + """ + A light-weight wrapper class for torch.distributed API. + Only a subset of functions are wrapped. Once the init_process_group + is initialized, standard torch.distributed.* can be used directly + so no need to wrap all the functions. We can keep adding wrappers as + needed. + """ + def __init__(self, + backend, + timeout, + init_method, + rank=-1, + world_size=-1, + name='torch'): + super(TorchBackend, self).__init__() + self.torch_version_before_18 = older_torch() + self.has_allgather_base = has_allgather_base() + self.has_reduce_scatter_base = has_reduce_scatter_base() + self.initialized = True + self.name = name + # Future functionality to support ds.initialize() on a single GPU + # The idea is to fake that dist backend is initialized even when + # it is not so we can run on a single GPU without doing any init_process_group + self.single_gpu_mode = True + self.init_process_group(backend, timeout, init_method, rank, world_size) + + def init_process_group(self, backend, timeout, init_method, rank, world_size): + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend, + timeout=timeout, + init_method=init_method, + rank=rank, + world_size=world_size) + self.using_mpi = torch.distributed.get_backend() == 'mpi' + + def all_reduce(self, + tensor, + op=torch.distributed.ReduceOp.SUM, + group=None, + async_op=False): + op = self._reduce_op(op) + return torch.distributed.all_reduce(tensor=tensor, + op=op, + group=group, + async_op=async_op) + + def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): + return torch.distributed.reduce(tensor=tensor, + dst=dst, + op=self._reduce_op(op), + group=group, + async_op=async_op) + + def reduce_scatter(self, + output, + input_list, + op=ReduceOp.SUM, + group=None, + async_op=False): + return torch.distributed.reduce_scatter(output=output, + input_list=input_list, + op=self._reduce_op(op), + group=group, + async_op=async_op) + + def broadcast(self, tensor, src, group=None, async_op=False): + return torch.distributed.broadcast(tensor=tensor, + src=src, + group=group, + async_op=async_op) + + def all_gather(self, tensor_list, tensor, group=None, async_op=False): + return torch.distributed.all_gather(tensor_list=tensor_list, + tensor=tensor, + group=group, + async_op=async_op) + + def all_gather_base(self, output_tensor, input_tensor, group=None, async_op=False): + if self.has_allgather_base: + return torch.distributed.distributed_c10d._all_gather_base( + output_tensor=output_tensor, + input_tensor=input_tensor, + group=group, + async_op=async_op) + else: + utils.logger.warning( + "unable to find torch.distributed._all_gather_base. will fall back to " + "torch.distributed.reduce_scatter which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + pass + + def reduce_scatter_base(self, + output_tensor, + input_tensor, + op=ReduceOp.SUM, + group=None, + async_op=False): + if self.has_reduce_scatter_base: + return torch.distributed._reduce_scatter_base(output_tensor, + input_tensor, + op=self._reduce_op(op), + group=group, + async_op=async_op) + else: + utils.logger.warning( + "unable to find torch.distributed._reduce_scatter_base. will fall back to " + "torch.distributed.reduce_scatter which will result in suboptimal performance. " + "please consider upgrading your pytorch installation.") + pass + + def all_to_all_single(self, + output, + input, + output_split_sizes=None, + input_split_sizes=None, + group=None, + async_op=False): + return torch.distributed.all_to_all_single(output=output, + input=input, + output_split_sizes=output_split_sizes, + input_split_sizes=input_split_sizes, + group=group, + async_op=async_op) + + def send(self, tensor, dst, group=None, tag=0): + return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag) + + def recv(self, tensor, src=None, group=None, tag=0): + return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag) + + def isend(self, tensor, dst, group=None, tag=0): + return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag) + + def irecv(self, tensor, src=None, group=None, tag=0): + return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag) + + def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): + return torch.distributed.gather(tensor=tensor, + gather_list=gather_list, + dst=dst, + group=group, + async_op=async_op) + + def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): + return torch.distributed.scatter(tensor=tensor, + scatter_list=scatter_list, + src=src, + group=group, + async_op=async_op) + + def barrier(self, + group=torch.distributed.GroupMember.WORLD, + async_op=False, + device_ids=None): + if group is None: + group = torch.distributed.GroupMember.WORLD + return torch.distributed.barrier(group=group, + async_op=async_op, + device_ids=device_ids) + + def monitored_barrier(self, + group=torch.distributed.GroupMember.WORLD, + timeout=None, + wait_all_ranks=False): + if group is None: + group = torch.distributed.GroupMember.WORLD + return torch.distributed.monitored_barrier(group=group, + timeout=timeout, + wait_all_ranks=wait_all_ranks) + + def get_rank(self, group=None): + return torch.distributed.get_rank(group=group) + + def get_world_size(self, group=None): + return torch.distributed.get_world_size(group=group) + + def is_initialized(self): + return torch.distributed.is_initialized() + + def get_backend(self, group=None): + return torch.distributed.get_backend(group=group) + + def new_group(self, ranks): + return torch.distributed.new_group(ranks) + + def get_global_rank(self, group, group_rank): + if hasattr(torch.distributed.distributed_c10d, "get_global_rank"): + from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank + else: + from torch.distributed.distributed_c10d import _get_global_rank + return _get_global_rank(group, group_rank) + + def get_world_group(self): + return torch.distributed.group.WORLD + + def destroy_process_group(self, group=None): + return torch.distributed.destroy_process_group(group=group) + + def _reduce_op(self, op): + ''' + Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return + ''' + if not isinstance(op, torch.distributed.ReduceOp): + if op == ReduceOp.SUM: + op = torch.distributed.ReduceOp.SUM + elif op == ReduceOp.PRODUCT: + op = torch.distributed.ReduceOp.PRODUCT + elif op == ReduceOp.AVG: + op = torch.distributed.ReduceOp.AVG + elif op == ReduceOp.MIN: + op = torch.distributed.ReduceOp.MIN + elif op == ReduceOp.MAX: + op = torch.distributed.ReduceOp.MAX + elif op == ReduceOp.BAND: + op = torch.distributed.ReduceOp.BAND + elif op == ReduceOp.BOR: + op = torch.distributed.ReduceOp.BOR + elif op == ReduceOp.BXOR: + op = torch.distributed.ReduceOp.BXOR + return op + + +# This will become a light-weight wrapper around torch.distributed functions +# TODO: create some example to show how this wrapper can help profile communication +# TODO: make sure there is no performance regression with this approach +# TODO: explore monkey-patching if this does not work diff --git a/deepspeed/comm/utils.py b/deepspeed/comm/utils.py new file mode 100644 index 0000000..120838e --- /dev/null +++ b/deepspeed/comm/utils.py @@ -0,0 +1,158 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import torch +import inspect +from deepspeed.utils import get_caller_func + + +def older_torch(): + ''' + Helper to lookup torch version. For versions less than 1.8, torch.dist + used torch.distributed.group.WORLD as the default group argument instead of None. + See more details at: https://github.com/pytorch/pytorch/pull/48767 + ''' + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + if TORCH_MAJOR == 1 and TORCH_MINOR < 8: + return True + else: + return False + + +def has_allgather_base(): + ''' + Helper to check if torch.distributed has _all_gather_base + ''' + return hasattr(torch.distributed, "_all_gather_base") + + +def has_reduce_scatter_base(): + ''' + Helper to check if torch.distributed has _reduce_scatter_base + ''' + return hasattr(torch.distributed, "_reduce_scatter_base") + + +def get_local_rank_from_launcher(): + + # DeepSpeed launcher will set it so get from there + rank = os.environ.get('LOCAL_RANK') + + if rank is None: + rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') + + # Make it a single process job and set rank to 0 + if rank is None: + rank = 0 + + return int(rank) + + +def get_world_rank_from_launcher(): + + # DeepSpeed launcher will set it so get from there + rank = os.environ.get('RANK') + + if rank is None: + rank = os.environ.get('OMPI_COMM_WORLD_RANK') + + # Make it a single process job and set rank to 0 + if rank is None: + rank = 0 + + return int(rank) + + +def get_world_size_from_launcher(): + # DeepSpeed launcher will set it so get from there + size = os.environ.get('WORLD_SIZE') + rank = os.environ.get('RANK') + + if size is None: + size = os.environ.get('OMPI_COMM_WORLD_SIZE') + + # Make it a single process job and set size to 1 + if size is None: + size = 1 + + if rank == 0: + print(f"set world size to {size}") + + return int(size) + + +def get_default_args(func): + signature = inspect.signature(func) + return { + k: v.default + for k, + v in signature.parameters.items() if v.default is not inspect.Parameter.empty + } + + +# We need this hacky function since torch doesn't consistently name or place the input tensor args +def get_tensor_position(func): + sig_params = inspect.signature(func).parameters + arg = None + # most colls + if 'tensor' in sig_params: + arg = 'tensor' + # reduce scatter coll + elif 'input_list' in sig_params: + arg = 'input_list' + # all_to_all and torch multiGPU colls + elif 'input_tensor_list' in sig_params: + arg = 'input_tensor_list' + if arg is None: + return -1 + else: + return list(sig_params).index(arg) + + +def get_tensor_kwarg(func, kwargs): + func_args = get_default_args(func) + func_args.update(kwargs) + arg = None + + if 'tensor' in func_args: + arg = func_args['tensor'] + elif 'input_list' in func_args: + arg = func_args['input_list'] + elif 'input_tensor_list' in func_args: + arg = func_args['input_tensor_list'] + return arg + + +def get_msg_size_from_args(func, *args, **kwargs): + # 3 cases: + # - tensor arg is in args + # - tensor arg is in kwargs + # - tensor arg is not present (e.g. barrier) + tensor_arg_position = -1 + tensor_arg = None + # check if tensor arg is in args + if len(args) > 0: + tensor_arg_position = get_tensor_position(func) + if tensor_arg_position > -1: + tensor_arg = args[get_tensor_position(func)] + # check if tensor arg is in kwargs + if tensor_arg is None and len(kwargs) > 0: + tensor_arg = get_tensor_kwarg(func, kwargs) + # if tensor arg is not present, no data is being transmitted + if tensor_arg is None: + return 0 + else: + # Sum of tensor sizes for list colls such as torch's all_to_all + # NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted. + if type(tensor_arg) is list: + return sum(x.element_size() * x.nelement() for x in tensor_arg) + else: + return tensor_arg.element_size() * tensor_arg.nelement() + + +def get_debug_log_name(func_args, debug): + if debug: + return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']' + else: + return func_args['log_name'] diff --git a/deepspeed/compression/__init__.py b/deepspeed/compression/__init__.py new file mode 100644 index 0000000..8e4974e --- /dev/null +++ b/deepspeed/compression/__init__.py @@ -0,0 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .compress import init_compression, redundancy_clean +from .scheduler import compression_scheduler +from .helper import convert_conv1d_to_linear diff --git a/deepspeed/compression/basic_layer.py b/deepspeed/compression/basic_layer.py new file mode 100644 index 0000000..a15f80d --- /dev/null +++ b/deepspeed/compression/basic_layer.py @@ -0,0 +1,925 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +import math +from torch import nn +from torch.nn import init +import deepspeed.comm as dist +from .utils import TopKBinarizer, SymQuantizer, AsymQuantizer, TernaryQuantizer, BinaryQuantizer +from deepspeed.utils import logger + +g_mpu = None + + +class QuantAct(nn.Module): + """ + Class to quantize given activations. Note that when using this function, the input acttivation quantization range will be fixed for all + tokens/images for inference. This generally will affect some accuracy but achieve better latency performance. + Parameters: + ---------- + act_range_momentum : float, default 0.95 + Momentum for updating the activation quantization range. + quant_mode : str, default 'symmetric' + """ + def __init__(self, act_range_momentum=0.95, quant_mode='symmetric'): + super(QuantAct, self).__init__() + + self.act_range_momentum = act_range_momentum + self.quant_mode = quant_mode + if quant_mode == 'symmetric': + self.act_function = SymQuantizer.apply + else: + self.act_function = AsymQuantizer.apply + + self.register_buffer('x_min_max', torch.zeros(2)) + + def forward(self, x, num_bits, *args): + """ + x: the activation that we need to quantize + num_bits: the number of bits we need to quantize the activation to + *args: some extra arguments that are useless but needed for align with the interface of other quantization functions + """ + + if self.training: + x_min = x.data.min() + x_max = x.data.max() + + # Initialization + if self.x_min_max[0] == self.x_min_max[1]: + self.x_min_max[0] = x_min + self.x_min_max[1] = x_max + + # if do not need momentum, please set self.act_range_momentum = 0 + self.x_min_max[0] = self.x_min_max[0] * self.act_range_momentum + x_min * ( + 1 - self.act_range_momentum) + self.x_min_max[1] = self.x_min_max[1] * self.act_range_momentum + x_max * ( + 1 - self.act_range_momentum) + + x_q = self.act_function(x, num_bits, self.x_min_max[0], self.x_min_max[1]) + + return x_q + + +class Embedding_Compress(nn.Embedding): + def __init__(self, *kargs): + super(Embedding_Compress, self).__init__(*kargs) + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.weight_quantization_enabled = False + + def extra_repr(self): + return 'num_embeddings={}, embedding_dim={}, weight_quantization={}'.format( + self.num_embeddings, + self.embedding_dim, + self.weight.target_bits) + + def enable_weight_quantization(self, + start_bits, + target_bits, + quantization_period, + weight_quantization_enabled_in_forward, + quantization_type, + num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if self.weight.target_bits >= 3: + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + elif self.weight.target_bits == 2: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization' + self.weight_quantizer = TernaryQuantizer.apply + elif self.weight.target_bits == 1: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization' + self.weight_quantizer = BinaryQuantizer.apply + # for embedding, we always use token-wise quantization + self.weight_quantize_num_groups = self.weight.size(0) + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def forward(self, input): + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups) + else: + weight = self.weight + + out = nn.functional.embedding(input, + weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse) + return out + + +class LinearLayer_Compress(nn.Linear): + """ + Linear layer with compression. + """ + def __init__(self, *kargs, bias=True): + super(LinearLayer_Compress, self).__init__(*kargs, bias=bias) + self.sparse_pruning_method = None + self.row_pruning_method = None + self.head_pruning_method = None + self.activation_quantization_method = None + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.weight_quantization_enabled = False + self.sparse_pruning_enabled = False + self.row_pruning_enabled = False + self.head_pruning_enabled = False + self.activation_quantization_enabled = False + + def extra_repr(self): + return 'in_features={}, out_features={}, bias={}, sparse pruning={}, row pruning={}, head pruning={}, activation quantization={}, weight_quantization={}'.format( + self.in_features, self.out_features, self.bias is not None, self.sparse_pruning_method is not None, \ + self.row_pruning_method is not None, self.head_pruning_method is not None, self.activation_quantization_method is not None, self.weight.target_bits) + + def enable_sparse_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.sparse_pruning_ratio = ratio + self.sparse_pruning_method = method + if method == 'l1': + weight_norm = torch.abs(self.weight.data) + mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False) + mask = mask.view(self.weight.size()) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size())) + self.sparse_mask_scores.data = self.sparse_mask_scores.data.to( + self.weight.device) + init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('sparse_pruning_mask', mask) + + def enable_row_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.row_pruning_ratio = ratio + self.row_pruning_method = method + + if method == 'l1': + # compute the l1 norm of each column + weight_norm = torch.norm(self.weight.data, p=1, dim=1) + mask = TopKBinarizer.apply(weight_norm, self.row_pruning_ratio, False) + mask = mask.view(-1, 1) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.row_mask_scores = nn.Parameter(torch.Tensor(self.weight.size(0), 1)) + self.row_mask_scores.data = self.row_mask_scores.data.to(self.weight.device) + init.kaiming_uniform_(self.row_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('row_pruning_mask', mask) + + def enable_head_pruning(self, ratio, method, num_heads): + # Here, we support only topk based pruning + self.num_heads = num_heads + self.head_pruning_ratio = ratio + self.head_pruning_method = method + + if method not in ['topk']: + raise NotImplementedError + else: + self.head_pruning_ratio = ratio + self.head_pruning_scores = nn.Parameter(torch.Tensor( + 1, + self.num_heads)) # we apply the pruning to O matrix + self.head_pruning_scores.data = self.head_pruning_scores.data.to( + self.weight.device) + init.kaiming_uniform_(self.head_pruning_scores, a=math.sqrt(5)) + + def fix_sparse_pruning_helper(self): + mask = self.get_mask(pruning_type='sparse') + self.weight.data = self.weight.data * mask + del self.sparse_pruning_mask + if self.sparse_pruning_method == 'topk': + del self.sparse_mask_scores + self.sparse_pruning_method = None + self.sparse_pruning_enabled = False + return None + + def fix_row_col_pruning_helper(self, mask=None, dim_reduction=False): + # This function is used for row/col pruning + # particularly, if we have two back-to-back layers, F1 and F2; when + # we remove rows from F1, we also need to remove columns from F2 + # However, if we only have one layer, F1, then we only need to mask pruned + # rows as 0 in F1 + if mask is None: + mask = self.get_mask(pruning_type='row').bool() + if dim_reduction: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[mask.view(-1), :]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + self.out_features = self.weight.size(0) + else: + self.weight.data = self.weight.data * mask.view(-1, 1) + if self.bias is not None: + self.bias.data = self.bias.data * mask.view(-1) + + del self.row_pruning_mask + if self.row_pruning_method == 'topk': + del self.row_mask_scores + self.row_pruning_method = None + else: + # this is generally for column pruning + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[:, mask.view(-1)]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + self.in_features = self.weight.size(1) + mask = None + self.row_pruning_enabled = False + return mask + + def fix_head_pruning_helper(self, mask=None, num_heads=None, dim_reduction=False): + # similar as row/col pruning, head pruning also needs to prune QKV which is associated with O matrix + num_heads = num_heads if num_heads else self.num_heads + if mask is None: + if self.head_pruning_method == 'topk': + mask = self.get_mask(pruning_type='head').bool() + if dim_reduction: + shape = self.weight.size(0) + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data.t().reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape).t()) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + else: + + shape = self.weight.size() + self.weight.data = (self.weight.data.t().reshape(self.num_heads, + -1) * + mask.view(-1, + 1)).reshape(shape[1], + shape[0]).t() + + if self.head_pruning_method == 'topk': + del self.head_pruning_scores + self.head_pruning_method = None + else: + raise NotImplementedError + else: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + shape = self.weight.size(1) + self.weight = nn.Parameter(self.weight.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1, shape)) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data.reshape(num_heads, -1)[mask.view(-1), :].reshape(-1)) + self.head_pruning_enabled = False + return mask + + def get_mask(self, pruning_type='row'): + if pruning_type == 'sparse': + if self.sparse_pruning_method == 'l1': + return self.sparse_pruning_mask.to(self.weight.device) + elif self.sparse_pruning_method == 'topk': + return TopKBinarizer.apply(self.sparse_mask_scores, + self.sparse_pruning_ratio, + False) + else: + raise NotImplementedError + if pruning_type == 'row': + if self.row_pruning_method == 'l1': + return self.row_pruning_mask.to(self.weight.device) + elif self.row_pruning_method == 'topk': + return TopKBinarizer.apply(self.row_mask_scores, + self.row_pruning_ratio, + False) + else: + raise NotImplementedError + elif pruning_type == 'head': + if self.head_pruning_method == 'topk': + return TopKBinarizer.apply(self.head_pruning_scores, + self.head_pruning_ratio, + False) + else: + raise NotImplementedError + else: + raise NotImplementedError + + def enable_weight_quantization(self, + start_bits, + target_bits, + quantization_period, + weight_quantization_enabled_in_forward, + quantization_type, + num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if self.weight.target_bits >= 3: + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + elif self.weight.target_bits == 2: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for ternary weight quantization' + self.weight_quantizer = TernaryQuantizer.apply + elif self.weight.target_bits == 1: + assert quantization_type == 'symmetric', 'Only symmetric quantization is supported for binary weight quantization' + self.weight_quantizer = BinaryQuantizer.apply + self.weight_quantize_num_groups = num_groups + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def enable_activation_quantization(self, bits, quantization_type, range_calibration): + assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now' + self.activation_quantization_bits = bits + self.activation_quantization_method = f"{quantization_type}_{range_calibration}" + if range_calibration == 'static': + self.activation_quantizer = QuantAct(quant_mode=quantization_type) + else: + if quantization_type == 'symmetric': + self.activation_quantizer = SymQuantizer.apply + else: + self.activation_quantizer = AsymQuantizer.apply + + def head_pruning_reshape(self, w, mask): + shape = w.shape + return (w.t().reshape(self.num_heads, + -1) * mask.view(-1, + 1)).reshape(shape[1], + shape[0]).t() + + def forward(self, input, skip_bias_add=False): + + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups) + bias = self.bias + else: + weight = self.weight + bias = self.bias + + if self.sparse_pruning_enabled and self.sparse_pruning_method: + mask = self.get_mask(pruning_type='sparse') + weight = weight * mask.view(self.weight.size()) + + if self.row_pruning_enabled and self.row_pruning_method: + mask = self.get_mask(pruning_type='row') + weight = weight * mask.view(-1, 1) + if bias is not None: + bias = bias * mask.view(-1) + + if self.head_pruning_enabled and self.head_pruning_method: + mask = self.get_mask(pruning_type='head') + weight = self.head_pruning_reshape(weight, mask) + + if self.activation_quantization_enabled: + if 'dynamic' in self.activation_quantization_method: + num_groups = input.numel() // input.size(-1) + else: + num_groups = 1 + input = self.activation_quantizer(input, + self.activation_quantization_bits, + None, + None, + num_groups) + + if skip_bias_add: + # used for mpu linear layers + output = nn.functional.linear(input, weight, None) + return output, bias + else: + output = nn.functional.linear(input, weight, bias) + return output + + +class Conv2dLayer_Compress(nn.Conv2d): + """ + Conv2D layer with compression. + """ + def __init__(self, *kargs): + super(Conv2dLayer_Compress, self).__init__(*kargs) + self.sparse_pruning_method = None + self.channel_pruning_method = None + self.activation_quantization_method = None + self.weight.start_bits = None + self.weight.target_bits = None + self.weight.q_period = None + self.weight_quantization_enabled_in_forward = False + self.sparse_pruning_enabled = False + self.channel_pruning_enabled = False + self.activation_quantization_enabled = False + + def __repr__(self): + s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' + ', stride={stride}') + if self.padding != (0, ) * len(self.padding): + s += ', padding={padding}' + if self.dilation != (1, ) * len(self.dilation): + s += ', dilation={dilation}' + if self.output_padding != (0, ) * len(self.output_padding): + s += ', output_padding={output_padding}' + if self.groups != 1: + s += ', groups={groups}' + if self.bias is None: + s += ', bias=False' + if self.padding_mode != 'zeros': + s += ', padding_mode={padding_mode}' + output = s.format(**self.__dict__) + + return output + ' sparse pruning={}, channel pruning={}, activation quantization={}, weight_quantization={}'.format( + self.sparse_pruning_method is not None, + self.channel_pruning_method is not None, + self.activation_quantization_method is not None, + self.weight.target_bits) + + def enable_sparse_pruning(self, ratio, method): + self.sparse_pruning_ratio = ratio + self.sparse_pruning_method = method + if method == 'l1': + weight_norm = torch.abs(self.weight.data) + mask = TopKBinarizer.apply(weight_norm, self.sparse_pruning_ratio, False) + mask = mask.view(self.weight.size()) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.sparse_mask_scores = nn.Parameter(torch.Tensor(self.weight.size())) + self.sparse_mask_scores.data = self.sparse_mask_scores.data.to( + self.weight.device) + init.kaiming_uniform_(self.sparse_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('sparse_pruning_mask', mask) + + def enable_channel_pruning(self, ratio, method): + # Here, we support two cases: L1 norm based pruning and topk based pruning + self.channel_pruning_ratio = ratio + self.channel_pruning_method = method + + if method == 'l1': + # compute the l1 norm of each conv2d kernel (the last three dimension) + weight_norm = torch.norm(self.weight.data, p=1, dim=[1, 2, 3]) + mask = TopKBinarizer.apply(weight_norm, self.channel_pruning_ratio, False) + mask = mask.view(-1, 1, 1, 1) + mask = mask.to(self.weight.device) + elif method == 'topk': + self.channel_mask_scores = nn.Parameter( + torch.Tensor(self.weight.size(0), + 1, + 1, + 1)) + self.channel_mask_scores.data = self.channel_mask_scores.data.to( + self.weight.device) + init.kaiming_uniform_(self.channel_mask_scores, a=math.sqrt(5)) + mask = None + else: + raise NotImplementedError + + self.register_buffer('channel_pruning_mask', mask) + + def fix_sparse_pruning_helper(self): + mask = self.get_mask(pruning_type='sparse') + self.weight.data = self.weight.data * mask + del self.sparse_pruning_mask + if self.sparse_pruning_method == 'topk': + del self.sparse_mask_scores + self.sparse_pruning_method = None + self.sparse_pruning_enabled = False + return None + + def fix_channel_pruning_helper(self, mask=None, dim_reduction=False): + if mask is None: + if self.channel_pruning_method in ['l1', 'topk']: + mask = self.get_mask(pruning_type='channel').bool() + if dim_reduction: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[mask.view(-1), ...]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + if self.bias is not None: + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + else: + self.weight.data = self.weight.data * mask.view(-1, 1, 1, 1) + if self.bias is not None: + self.bias.data = self.bias.data * mask.view(-1) + del self.channel_pruning_mask + if self.channel_pruning_method == 'topk': + del self.channel_mask_scores + self.channel_pruning_method = None + else: + raise NotImplementedError + else: + start_bits = self.weight.start_bits + target_bits = self.weight.target_bits + q_period = self.weight.q_period + self.weight = nn.Parameter(self.weight.data[:, mask.view(-1), ...]) + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = q_period + mask = None + self.channel_pruning_enabled = False + return mask + + def get_mask(self, pruning_type='sparse'): + if pruning_type == 'sparse': + if self.sparse_pruning_method == 'l1': + return self.sparse_pruning_mask.to(self.weight.device) + elif self.sparse_pruning_method == 'topk': + return TopKBinarizer.apply(self.sparse_mask_scores, + self.sparse_pruning_ratio, + False) + else: + raise NotImplementedError + elif pruning_type == 'channel': + if self.channel_pruning_method == 'l1': + return self.channel_pruning_mask.to(self.weight.device) + elif self.channel_pruning_method == 'topk': + return TopKBinarizer.apply(self.channel_mask_scores, + self.channel_pruning_ratio, + False) + else: + raise NotImplementedError + else: + raise NotImplementedError + + def fix_weight_quantization(self): + self.weight.data = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups).data + self.weight_quantization_enabled_in_forward = False + return None + + def enable_weight_quantization(self, + start_bits, + target_bits, + quantization_period, + weight_quantization_enabled_in_forward, + quantization_type, + num_groups): + self.weight.start_bits = start_bits + self.weight.target_bits = target_bits + self.weight.q_period = quantization_period + self.weight_quantization_enabled_in_forward = weight_quantization_enabled_in_forward + if self.weight_quantization_enabled_in_forward: + assert self.weight.target_bits >= 4, 'Only >=4 bits weight quantization are supported during forward pass for now' + logger.warning( + "************ A lot of MoQ features are not supported in quantize_weight_in_forward mode, please consider to use DS-FP16 optimizer************" + ) + if quantization_type == 'symmetric': + self.weight_quantizer = SymQuantizer.apply + else: + self.weight_quantizer = AsymQuantizer.apply + self.weight_quantize_num_groups = num_groups + + def enable_activation_quantization(self, bits, quantization_type, range_calibration): + assert bits in [4, 8], 'Only 4/8 bits activation quantization are supported for now' + self.activation_quantization_bits = bits + self.activation_quantization_method = f"{quantization_type}_{range_calibration}" + if range_calibration == 'static': + self.activation_quantizer = QuantAct(quant_mode=quantization_type) + else: + if quantization_type == 'symmetric': + self.activation_quantizer = SymQuantizer.apply + else: + self.activation_quantizer = AsymQuantizer.apply + + def forward(self, input): + + if self.weight_quantization_enabled_in_forward and self.weight_quantization_enabled: + weight = self.weight_quantizer(self.weight, + self.weight.target_bits, + None, + None, + self.weight_quantize_num_groups) + bias = self.bias + else: + weight = self.weight + bias = self.bias + + if self.sparse_pruning_enabled and self.sparse_pruning_method: + mask = self.get_mask(pruning_type='sparse') + weight = weight * mask.view(self.weight.size()) + + if self.channel_pruning_enabled: + mask = self.get_mask(pruning_type='channel') + weight = weight * mask.view(-1, 1, 1, 1) + if bias is not None: + bias = bias * mask.view(-1) + + if self.activation_quantization_enabled: + if 'dynamic' in self.activation_quantization_method: + num_groups = input.numel() // input[0].numel() + else: + num_groups = 1 + input = self.activation_quantizer(input, + self.activation_quantization_bits, + None, + None, + num_groups) + + return nn.functional.conv2d(input, + weight, + bias, + self.stride, + self.padding, + self.dilation, + self.groups) + + +class BNLayer_Compress(nn.BatchNorm2d): + def fix_channel_pruning_helper(self, mask, dim_reduction=True): + self.weight = nn.Parameter(self.weight.data[mask.view(-1)]) + self.bias = nn.Parameter(self.bias.data[mask.view(-1)]) + self.running_mean = self.running_mean[mask.view(-1)] + self.running_var = self.running_var[mask.view(-1)] + + +def _reduce(input_): + """All-reduce the the input tensor across model parallel group.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # All-reduce. + dist.all_reduce(input_, group=group) + + return input_ + + +def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + assert tensor.size()[last_dim] % num_partitions == 0 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + +def _split(input_): + """Split the tensor along its last dimension and keep the + corresponding slice.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # Split along last dimension. + world_size = dist.get_world_size(group=group) + input_list = split_tensor_along_last_dim(input_, world_size) + + # Note: torch.split does not create contiguous tensors by default. + rank = dist.get_rank(group=group) + output = input_list[rank].contiguous() + + return output + + +def _gather(input_): + """Gather tensors and concatinate along the last dimension.""" + group = g_mpu.get_model_parallel_group() + + # Bypass the function if we are using only 1 GPU. + if dist.get_world_size(group=group) == 1: + return input_ + + # Size and dimension. + last_dim = input_.dim() - 1 + rank = dist.get_rank(group=group) + world_size = dist.get_world_size(group=group) + + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + dist.all_gather(tensor_list, input_, group=group) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=last_dim).contiguous() + + return output + + +class _CopyToModelParallelRegion(torch.autograd.Function): + """Pass the input to the model parallel region.""" + @staticmethod + def forward(ctx, input_): + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output) + + +class _ReduceFromModelParallelRegion(torch.autograd.Function): + """All-redcue the input from the model parallel region.""" + @staticmethod + def forward(ctx, input_): + return _reduce(input_) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _ScatterToModelParallelRegion(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + @staticmethod + def forward(ctx, input_): + return _split(input_) + + @staticmethod + def backward(ctx, grad_output): + return _gather(grad_output) + + +class _GatherFromModelParallelRegion(torch.autograd.Function): + """Gather the input from model parallel region and concatinate.""" + @staticmethod + def forward(ctx, input_): + return _gather(input_) + + @staticmethod + def backward(ctx, grad_output): + return _split(grad_output) + + +# ----------------- +# Helper functions. +# ----------------- + + +def copy_to_model_parallel_region(input_): + return _CopyToModelParallelRegion.apply(input_) + + +def reduce_from_model_parallel_region(input_): + return _ReduceFromModelParallelRegion.apply(input_) + + +def scatter_to_model_parallel_region(input_): + return _ScatterToModelParallelRegion.apply(input_) + + +def gather_from_model_parallel_region(input_): + return _GatherFromModelParallelRegion.apply(input_) + + +class ColumnParallelLinear_Compress(LinearLayer_Compress): + def __init__(self, + mpu, + input_size, + output_size, + bias=True, + gather_output=True, + skip_bias_add=False): + # Keep input parameters + global g_mpu + g_mpu = mpu + self.input_size = input_size + self.output_size = output_size + self.gather_output = gather_output + self.skip_bias_add = skip_bias_add + + # Divide the weight matrix along the last dimension. + world_size = mpu.get_model_parallel_world_size() + assert output_size % world_size == 0 + self.output_size_per_partition = output_size // world_size + + super(ColumnParallelLinear_Compress, + self).__init__(self.input_size, + self.output_size_per_partition, + bias=bias) + + def forward(self, input_): + # Set up backprop all-reduce. + input_parallel = copy_to_model_parallel_region(input_) + # Matrix multiply. + if self.skip_bias_add: + output_parallel, bias = super().forward(input_parallel, True) + else: + output_parallel = super().forward(input_parallel) + bias = None + if self.gather_output: + # All-gather across the partitions. + output = gather_from_model_parallel_region(output_parallel) + else: + output = output_parallel + return output, bias + + +class RowParallelLinear_Compress(LinearLayer_Compress): + def __init__(self, + mpu, + input_size, + output_size, + bias=True, + input_is_parallel=False, + skip_bias_add=False): + # Keep input parameters + global g_mpu + g_mpu = mpu + self.input_size = input_size + self.output_size = output_size + self.input_is_parallel = input_is_parallel + self.skip_bias_add = skip_bias_add + + # Divide the weight matrix along the last dimension. + world_size = mpu.get_model_parallel_world_size() + assert input_size % world_size == 0 + self.input_size_per_partition = input_size // world_size + + super(RowParallelLinear_Compress, + self).__init__(self.input_size_per_partition, + self.output_size, + bias=bias) + + def forward(self, input_): + # Set up backprop all-reduce. + if self.input_is_parallel: + input_parallel = input_ + else: + input_parallel = scatter_to_model_parallel_region(input_) + # Matrix multiply. + output_parallel, bias = super().forward(input_parallel, True) + + # All-reduce across all the partitions. + output_ = reduce_from_model_parallel_region(output_parallel) + if not self.skip_bias_add: + if bias is not None: + output = output_ + bias + else: + output = output_ + output_bias = None + else: + output = output_ + output_bias = bias + return output, output_bias diff --git a/deepspeed/compression/compress.py b/deepspeed/compression/compress.py new file mode 100644 index 0000000..bf3b6c2 --- /dev/null +++ b/deepspeed/compression/compress.py @@ -0,0 +1,233 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import re +from .helper import compression_preparation, fix_compression, recursive_getattr, is_module_compressible +from .config import get_compression_config +from ..runtime.config_utils import dict_raise_error_on_duplicate_keys +from .constants import * +import os +import json + + +def check_deepspeed_config(config): + if isinstance(config, dict): + return config + elif os.path.exists(config): + return json.load(open(config, + "r"), + object_pairs_hook=dict_raise_error_on_duplicate_keys) + else: + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}" + ) + + +def get_module_name(group_name, + model, + key_word, + exist_module_name, + mpu=None, + verbose=True): + ''' + get the associated module name from the model based on the key_word provided by users + ''' + return_module_name = [] + for name, module in model.named_modules(): + + module_check = is_module_compressible(module, mpu) + + if re.search(key_word, name) is not None and module_check: + if name in exist_module_name and verbose: + # logger.warning + raise ValueError( + f"{name} is already added to compression, please check your config file for {group_name}." + ) + if name not in exist_module_name: + exist_module_name.add(name) + return_module_name.append(name) + return return_module_name, exist_module_name + + +def get_compress_methods(model, compress_methods, mpu=None): + # extract the compression module for each method in compress_methods + layer_added_compress_methods = [] + for method, method_content in compress_methods.items(): + if LAYER_REDUCTION in method: + continue + # for loop different methods, i.e., weight quantization, activation quantization etc + exist_module_name = set() + shared_parameters = method_content[ + SHARED_PARAMETERS] # get all the shared parameters + for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): + # for loop different groups, i.e., weight quantization group 1, weight quantization group 2 etc + module_name_list = [] + related_module_name_list = [] + if method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]: + # this is used for head/row/channel pruning, if users provide the related module scope, we can shrink the layer dim for them + # otherwise we just mask those as zeros + for key_word, related_key_words in zip(method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE], method_parameters[DIFFERENT_GROUPS_RELATED_MODULE_SCOPE]): + module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) + module_name_list.append(module_name) + tmp_related_module_name_list = [] + for rkw in related_key_words: + # related key word can be a list, for instance the QKV for O matrix in Attention + module_name, _ = get_module_name(group_name, model, rkw, set(), mpu=mpu) + tmp_related_module_name_list.append(module_name) + related_module_name_list.append(tmp_related_module_name_list) + else: + for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: + module_name, exist_module_name = get_module_name(group_name, model, key_word, exist_module_name, mpu=mpu) + module_name_list.append(module_name) + + if module_name_list: + # combine shared parameters with each group + combined_method_parameters = { + **(method_parameters.copy().pop(DIFFERENT_GROUPS_PARAMETERS)), + **shared_parameters + } + compression_item = [ + module_name_list, + related_module_name_list, + { + method: combined_method_parameters + } + ] + layer_added_compress_methods.append(compression_item) + return layer_added_compress_methods + + +def init_compression(model, deepspeed_config, teacher_model=None, mpu=None): + """ + Compress a model: replace linear/conv2d layer with deepspeed compression-aware modules + Args: + model (`torch.nn.Module`) + The model to compress. + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + mpu + The mpu module for Row/Column parallelism + """ + compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + # For layer reduction + if compress_methods[LAYER_REDUCTION][LAYER_REDUCTION_ENABLED]: + assert teacher_model is not None, "Teacher model is required for layer reduction" + student_initialization(c_model, teacher_model, deepspeed_config) + + layer_added_compress_methods = get_compress_methods(c_model, + compress_methods, + mpu=mpu) + compression_preparation(c_model, layer_added_compress_methods, mpu) + + return model + + +def redundancy_clean(model, deepspeed_config, mpu=None): + """ + Remove the redundancy of a model + Args: + model (`torch.nn.Module`) + The model to compress. + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + mpu + The mpu module for Row/Column parallelism + """ + compress_methods = get_compression_config(check_deepspeed_config(deepspeed_config)) + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + layer_added_compress_methods_tmp = get_compress_methods(c_model, + compress_methods, + mpu=mpu) + # sort methods + order_list = [ + WEIGHT_QUANTIZATION, + SPARSE_PRUNING, + ROW_PRUNING, + HEAD_PRUNING, + CHANNEL_PRUNING, + ACTIVATION_QUANTIZATION + ] + layer_added_compress_methods = sorted( + layer_added_compress_methods_tmp, + key=lambda x: order_list.index(list(x[2].keys())[0])) + + for module_name_lists, related_module_name_lists, compression_technique in layer_added_compress_methods: + stored_mask = [] + need_mask = True if related_module_name_lists else False + for i, mnl in enumerate(module_name_lists): + for module_name in mnl: + mask = fix_compression(c_model, + module_name, + compression_technique, + dim_reduction=need_mask) + if need_mask: + stored_mask.append(mask) + if need_mask: + for rmnl in related_module_name_lists[i]: + for j, module_name in enumerate(rmnl): + mask = fix_compression(c_model, + module_name, + compression_technique, + mask=stored_mask[j], + dim_reduction=True) + return model + + +def student_initialization(student_model, teacher_model, deepspeed_config): + ''' + Given a student model and a teacher model, select the + Args: + student_model (`torch.nn.Module`) + The model we will update weight + teacher_model (`torch.nn.Module`) + The model guide the student to learn + deepspeed_config (`DeepSpeedConfig`) + The path of ds_config + ''' + config = get_compression_config(check_deepspeed_config(deepspeed_config)) + compress_methods = config[LAYER_REDUCTION] + + module_name_prefix = compress_methods[MODULE_NAME_PREFIX] + teacher_layer = compress_methods[TEACHER_LAYER] + student_layer = [i for i in range(len(teacher_layer))] + other_module_name = compress_methods[OTHER_MODULE_NAME] + ''' + name_prefix (`str`) + The prefix name before the layer #. + Example 1: bert.encoder.layer, for BERT_base model's prefix name + Example 2: transformer.h, for GPT-2 hugging face prefix name + teacher_layer (`list of intergers`) + The layer of teacher will be used for student's reinitializedion + Example 1: [1,3,5,7,9], means we want to matches the 2nd/4th/6th/8th/10th layer of teacher to the first 5 layers of student + student_layer (`list` or None) + The layer of student need to be re-intiialized + Example 1: None, means we want to reinitialize all the layers + Example 1: [0,1,2,3,4], means we want to reinitialize the first 5 layers + other_module_name (`list of string`) + The modules will be used for student's reinitializedion + Example 1: ['bert.pooler', 'bert.embeddings', 'classifier'], means we want to apply the weight in teacher's embedding/pooler/classier module to the student + Example 2: ['transformer.w', 'transformer.ln_f', 'lm_head'], means we want to apply the weight in teacher's embeddingn layers module to the student + Note that teacher_layer should matches student layer + ''' + assert len(student_layer) == len(teacher_layer) + for s_name, t_name in zip(student_layer, teacher_layer): + s_module = recursive_getattr(student_model, + module_name_prefix + '.' + str(s_name)) + t_module = recursive_getattr(teacher_model, + module_name_prefix + '.' + str(t_name)) + for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): + s_param.data.copy_(t_param.data) + for name in other_module_name: + s_module = recursive_getattr(student_model, name) + t_module = recursive_getattr(teacher_model, name) + print(name) + for s_param, t_param in zip(s_module.parameters(), t_module.parameters()): + s_param.data.copy_(t_param.data) diff --git a/deepspeed/compression/config.py b/deepspeed/compression/config.py new file mode 100644 index 0000000..e6a710d --- /dev/null +++ b/deepspeed/compression/config.py @@ -0,0 +1,492 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .constants import * +import copy +from ..runtime.config_utils import get_scalar_param + + +def get_compression_config(param_dict): + # + output = {} + + if COMPRESSION_TRAINING not in param_dict.keys(): + param_dict[COMPRESSION_TRAINING] = {} + sub_param_dict = param_dict[COMPRESSION_TRAINING] + output[WEIGHT_QUANTIZATION] = get_weight_quantization(sub_param_dict) + output[ACTIVATION_QUANTIZATION] = get_activation_quantization(sub_param_dict) + output[SPARSE_PRUNING] = get_sparse_pruning(sub_param_dict) + output[ROW_PRUNING] = get_row_pruning(sub_param_dict) + output[HEAD_PRUNING] = get_head_pruning(sub_param_dict) + output[CHANNEL_PRUNING] = get_channel_pruning(sub_param_dict) + + output[LAYER_REDUCTION] = get_layer_reduction(sub_param_dict) + + return output + + +def get_layer_reduction(param_dict): + output = {} + output[LAYER_REDUCTION_ENABLED] = LAYER_REDUCTION_ENABLED_DEFAULT + if get_layer_reduction_enabled(param_dict): + output[LAYER_REDUCTION_ENABLED] = get_layer_reduction_enabled(param_dict) + for key, val in get_layer_reduction_params(param_dict).items(): + output[key] = val + return output + + +def get_layer_reduction_enabled(param_dict): + if LAYER_REDUCTION in param_dict.keys(): + return get_scalar_param(param_dict[LAYER_REDUCTION], + LAYER_REDUCTION_ENABLED, + LAYER_REDUCTION_ENABLED_DEFAULT) + else: + return False + + +def get_layer_reduction_params(param_dict): + if LAYER_REDUCTION in param_dict.keys(): + layer_reduction_params = copy.copy(param_dict[LAYER_REDUCTION]) + layer_reduction_params.pop(LAYER_REDUCTION_ENABLED) + return layer_reduction_params + else: + return False + + +def get_quantize_enabled(param_dict): + if COMPRESSION_TRAINING not in param_dict.keys(): + return False + + sub_param_dict = param_dict[COMPRESSION_TRAINING] + output = get_weight_quantization_shared_parameters(sub_param_dict) + return output[WEIGHT_QUANTIZE_ENABLED] + + +def get_weight_quantization(param_dict): + output = {} + if WEIGHT_QUANTIZATION not in param_dict.keys(): + param_dict[WEIGHT_QUANTIZATION] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[WEIGHT_QUANTIZATION] + # shared parameters + output[SHARED_PARAMETERS] = get_weight_quantization_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Weigh Quantization is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_weight_quantization_different_groups(sub_param_dict) + return output + + +def get_weight_quantization_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[WEIGHT_QUANTIZE_ENABLED] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_ENABLED, + WEIGHT_QUANTIZE_ENABLED_DEFAULT) + output[WEIGHT_QUANTIZE_KERNEL] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_KERNEL, + WEIGHT_QUANTIZE_KERNEL_DEFAULT) + output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_SCHEDULE_OFFSET, + WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT) + output[WEIGHT_QUANTIZE_GROUPS] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_GROUPS, + WEIGHT_QUANTIZE_GROUPS_DEFAULT) + output[WEIGHT_QUANTIZE_VERBOSE] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_VERBOSE, + WEIGHT_QUANTIZE_VERBOSE_DEFAULT) + output[WEIGHT_QUANTIZE_TYPE] = get_scalar_param(sub_param_dict, + WEIGHT_QUANTIZE_TYPE, + WEIGHT_QUANTIZE_TYPE_DEFAULT) + output[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT) + assert output[WEIGHT_QUANTIZE_TYPE] in [WEIGHT_QUANTIZE_SYMMETRIC, WEIGHT_QUANTIZE_ASYMMETRIC], f"Invalid weight quantize type. Supported types: [{WEIGHT_QUANTIZE_SYMMETRIC}, {WEIGHT_QUANTIZE_ASYMMETRIC}]" + output[WEIGHT_QUANTIZE_ROUNDING] = get_scalar_param( + sub_param_dict, + WEIGHT_QUANTIZE_ROUNDING, + WEIGHT_QUANTIZE_ROUNDING_DEFAULT) + assert output[WEIGHT_QUANTIZE_ROUNDING] in [WEIGHT_QUANTIZE_NEAREST_ROUNDING, WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING], f"Invalid weight quantize rounding. Supported types: [{WEIGHT_QUANTIZE_NEAREST_ROUNDING}, {WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING}]" + if WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE in sub_param_dict.keys(): + output[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = get_scalar_param( + sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED, + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT) + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = get_scalar_param( + sub_param_dict[WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], + WEIGHT_QUANTIZE_CHANGE_RATIO, + WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT) + else: + output[ + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT + else: + output[WEIGHT_QUANTIZE_ENABLED] = WEIGHT_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_KERNEL] = WEIGHT_QUANTIZE_KERNEL_DEFAULT + output[WEIGHT_QUANTIZE_SCHEDULE_OFFSET] = WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT + output[WEIGHT_QUANTIZE_GROUPS] = WEIGHT_QUANTIZE_GROUPS_DEFAULT + output[WEIGHT_QUANTIZE_VERBOSE] = WEIGHT_QUANTIZE_VERBOSE_DEFAULT + output[WEIGHT_QUANTIZE_TYPE] = WEIGHT_QUANTIZE_TYPE_DEFAULT + output[WEIGHT_QUANTIZE_ROUNDING] = WEIGHT_QUANTIZE_ROUNDING_DEFAULT + output[ + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE] = WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT + output[WEIGHT_QUANTIZE_CHANGE_RATIO] = WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT + return output + + +def get_weight_quantization_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert WEIGHT_QUANTIZE_START_BITS in group_dict.keys(), f"{WEIGHT_QUANTIZE_START_BITS} must be specified for weight quantization group {name}" + assert WEIGHT_QUANTIZE_TARGET_BITS in group_dict.keys(), f"{WEIGHT_QUANTIZE_TARGET_BITS} must be specified for weight quantization group {name}" + group_dict[WEIGHT_QUANTIZATION_PERIOD] = get_scalar_param( + group_dict, + WEIGHT_QUANTIZATION_PERIOD, + WEIGHT_QUANTIZATION_PERIOD_DEFAULT) + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_activation_quantization(param_dict): + output = {} + if ACTIVATION_QUANTIZATION not in param_dict.keys(): + param_dict[ACTIVATION_QUANTIZATION] = { + SHARED_PARAMETERS: {}, + DIFFERENT_GROUPS: {} + } + sub_param_dict = param_dict[ACTIVATION_QUANTIZATION] + # shared parameters + output[SHARED_PARAMETERS] = get_activation_quantization_shared_parameters( + sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][ACTIVATION_QUANTIZATION_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Activation Quantization is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_activation_quantization_different_groups( + sub_param_dict) + return output + + +def get_activation_quantization_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[ACTIVATION_QUANTIZATION_ENABLED] = get_scalar_param( + sub_param_dict, + ACTIVATION_QUANTIZATION_ENABLED, + ACTIVATION_QUANTIZATION_ENABLED_DEFAULT) + output[ACTIVATION_QUANTIZE_TYPE] = get_scalar_param( + sub_param_dict, + ACTIVATION_QUANTIZE_TYPE, + ACTIVATION_QUANTIZE_TYPE_DEFAULT) + assert output[ACTIVATION_QUANTIZE_TYPE] in [ACTIVATION_QUANTIZE_SYMMETRIC, ACTIVATION_QUANTIZE_ASYMMETRIC], f"Invalid activation quantize type. Supported types: [{ACTIVATION_QUANTIZE_SYMMETRIC}, {ACTIVATION_QUANTIZE_ASYMMETRIC}]" + output[ACTIVATION_QUANTIZE_RANGE] = get_scalar_param( + sub_param_dict, + ACTIVATION_QUANTIZE_RANGE, + ACTIVATION_QUANTIZE_RANGE_DEFAULT) + assert output[ACTIVATION_QUANTIZE_RANGE] in [ACTIVATION_QUANTIZE_RANGE_DYNAMIC, ACTIVATION_QUANTIZE_RANGE_STATIC], f"Invalid activation quantize range calibration. Supported types: [{ACTIVATION_QUANTIZE_RANGE_DYNAMIC}, {ACTIVATION_QUANTIZE_RANGE_STATIC}]" + output[ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + ACTIVATION_QUANTIZE_SCHEDULE_OFFSET, + ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT) + else: + output[ACTIVATION_QUANTIZATION_ENABLED] = ACTIVATION_QUANTIZATION_ENABLED_DEFAULT + output[ACTIVATION_QUANTIZE_TYPE] = ACTIVATION_QUANTIZE_TYPE_DEFAULT + output[ACTIVATION_QUANTIZE_RANGE] = ACTIVATION_QUANTIZE_RANGE_DEFAULT + output[ + ACTIVATION_QUANTIZE_SCHEDULE_OFFSET] = ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_activation_quantization_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert ACTIVATION_QUANTIZE_BITS in group_dict.keys(), f"{ACTIVATION_QUANTIZE_BITS} must be specified for activation quantization group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_sparse_pruning(param_dict): + output = {} + if SPARSE_PRUNING not in param_dict.keys(): + param_dict[SPARSE_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[SPARSE_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_sparse_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][SPARSE_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_sparse_pruning_different_groups(sub_param_dict) + return output + + +def get_sparse_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[SPARSE_PRUNING_ENABLED] = get_scalar_param( + sub_param_dict, + SPARSE_PRUNING_ENABLED, + SPARSE_PRUNING_ENABLED_DEFAULT) + output[SPARSE_PRUNING_METHOD] = get_scalar_param(sub_param_dict, + SPARSE_PRUNING_METHOD, + SPARSE_PRUNING_METHOD_DEFAULT) + assert output[SPARSE_PRUNING_METHOD] in [SPARSE_PRUNING_METHOD_L1, SPARSE_PRUNING_METHOD_TOPK], f"Invalid sparse pruning method. Supported types: [{SPARSE_PRUNING_METHOD_L1}, {SPARSE_PRUNING_METHOD_TOPK}]" + output[SPARSE_PRUNING_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + SPARSE_PRUNING_SCHEDULE_OFFSET, + SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT) + else: + output[SPARSE_PRUNING_ENABLED] = SPARSE_PRUNING_ENABLED_DEFAULT + output[SPARSE_PRUNING_METHOD] = SPARSE_PRUNING_METHOD_DEFAULT + output[SPARSE_PRUNING_SCHEDULE_OFFSET] = SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_sparse_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert SPARSE_PRUNING_DENSE_RATIO in group_dict.keys(), f"{SPARSE_PRUNING_DENSE_RATIO} must be specified for sparse pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output + + +def get_row_pruning(param_dict): + output = {} + if ROW_PRUNING not in param_dict.keys(): + param_dict[ROW_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[ROW_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_row_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][ROW_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Row Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_row_pruning_different_groups(sub_param_dict) + return output + + +def get_row_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[ROW_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, + ROW_PRUNING_ENABLED, + ROW_PRUNING_ENABLED_DEFAULT) + output[ROW_PRUNING_METHOD] = get_scalar_param(sub_param_dict, + ROW_PRUNING_METHOD, + ROW_PRUNING_METHOD_DEFAULT) + assert output[ROW_PRUNING_METHOD] in [ROW_PRUNING_METHOD_L1, ROW_PRUNING_METHOD_TOPK], f"Invalid row pruning method. Supported types: [{ROW_PRUNING_METHOD_L1}, {ROW_PRUNING_METHOD_TOPK}]" + output[ROW_PRUNING_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + ROW_PRUNING_SCHEDULE_OFFSET, + ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT) + else: + output[ROW_PRUNING_ENABLED] = ROW_PRUNING_ENABLED_DEFAULT + output[ROW_PRUNING_METHOD] = ROW_PRUNING_METHOD_DEFAULT + output[ROW_PRUNING_SCHEDULE_OFFSET] = ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_row_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert ROW_PRUNING_DENSE_RATIO in group_dict.keys(), f"{ROW_PRUNING_DENSE_RATIO} must be specified for row pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + return output + + +def get_head_pruning(param_dict): + output = {} + if HEAD_PRUNING not in param_dict.keys(): + param_dict[HEAD_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[HEAD_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_head_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][HEAD_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Head Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_head_pruning_different_groups(sub_param_dict) + return output + + +def get_head_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[HEAD_PRUNING_ENABLED] = get_scalar_param(sub_param_dict, + HEAD_PRUNING_ENABLED, + HEAD_PRUNING_ENABLED_DEFAULT) + output[HEAD_PRUNING_METHOD] = get_scalar_param(sub_param_dict, + HEAD_PRUNING_METHOD, + HEAD_PRUNING_METHOD_DEFAULT) + assert output[HEAD_PRUNING_METHOD] in [HEAD_PRUNING_METHOD_L1, HEAD_PRUNING_METHOD_TOPK], f"Invalid head pruning method. Supported types: [{HEAD_PRUNING_METHOD_L1}, {HEAD_PRUNING_METHOD_TOPK}]" + output[HEAD_PRUNING_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + HEAD_PRUNING_SCHEDULE_OFFSET, + HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT) + if output[HEAD_PRUNING_ENABLED]: + assert HEAD_PRUNING_NUM_HEADS in sub_param_dict.keys(), f"{HEAD_PRUNING_NUM_HEADS} must be specified for head pruning" + output[HEAD_PRUNING_NUM_HEADS] = sub_param_dict[HEAD_PRUNING_NUM_HEADS] + else: + output[HEAD_PRUNING_ENABLED] = HEAD_PRUNING_ENABLED_DEFAULT + output[HEAD_PRUNING_METHOD] = HEAD_PRUNING_METHOD_DEFAULT + output[HEAD_PRUNING_SCHEDULE_OFFSET] = HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_head_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert HEAD_PRUNING_DENSE_RATIO in group_dict.keys(), f"dense_ratio must be specified for head pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + return output + + +def get_channel_pruning(param_dict): + output = {} + if CHANNEL_PRUNING not in param_dict.keys(): + param_dict[CHANNEL_PRUNING] = {SHARED_PARAMETERS: {}, DIFFERENT_GROUPS: {}} + sub_param_dict = param_dict[CHANNEL_PRUNING] + # shared parameters + output[SHARED_PARAMETERS] = get_channel_pruning_shared_parameters(sub_param_dict) + # each sub-groups + if output[SHARED_PARAMETERS][CHANNEL_PRUNING_ENABLED]: + assert DIFFERENT_GROUPS in sub_param_dict.keys(), f"Sparse Pruning is enabled, {DIFFERENT_GROUPS} must be specified" + output[DIFFERENT_GROUPS] = get_channel_pruning_different_groups(sub_param_dict) + return output + + +def get_channel_pruning_shared_parameters(param_dict): + output = {} + if SHARED_PARAMETERS in param_dict.keys(): + sub_param_dict = param_dict[SHARED_PARAMETERS] + output[CHANNEL_PRUNING_ENABLED] = get_scalar_param( + sub_param_dict, + CHANNEL_PRUNING_ENABLED, + CHANNEL_PRUNING_ENABLED_DEFAULT) + output[CHANNEL_PRUNING_METHOD] = get_scalar_param( + sub_param_dict, + CHANNEL_PRUNING_METHOD, + CHANNEL_PRUNING_METHOD_DEFAULT) + assert output[CHANNEL_PRUNING_METHOD] in [CHANNEL_PRUNING_METHOD_L1, CHANNEL_PRUNING_METHOD_TOPK], f"Invalid channel pruning method. Supported types: [{CHANNEL_PRUNING_METHOD_L1}, {CHANNEL_PRUNING_METHOD_TOPK}]" + output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = get_scalar_param( + sub_param_dict, + CHANNEL_PRUNING_SCHEDULE_OFFSET, + CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT) + else: + output[CHANNEL_PRUNING_ENABLED] = CHANNEL_PRUNING_ENABLED_DEFAULT + output[CHANNEL_PRUNING_METHOD] = CHANNEL_PRUNING_METHOD_DEFAULT + output[CHANNEL_PRUNING_SCHEDULE_OFFSET] = CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT + return output + + +def get_channel_pruning_different_groups(param_dict): + output = {} + sub_param_dict = param_dict[DIFFERENT_GROUPS] + + def get_params(name, group_dict): + assert CHANNEL_PRUNING_DENSE_RATIO in group_dict.keys(), f"{CHANNEL_PRUNING_DENSE_RATIO} must be specified for channel pruning group {name}" + return group_dict + + for k, v in sub_param_dict.items(): + output[k] = {} + output[k][DIFFERENT_GROUPS_PARAMETERS] = get_params( + k, + sub_param_dict[k][DIFFERENT_GROUPS_PARAMETERS]) + output[k][DIFFERENT_GROUPS_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_MODULE_SCOPE, + DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT) + output[k][DIFFERENT_GROUPS_RELATED_MODULE_SCOPE] = get_scalar_param( + sub_param_dict[k], + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE, + DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT) + + return output diff --git a/deepspeed/compression/constants.py b/deepspeed/compression/constants.py new file mode 100644 index 0000000..593b86e --- /dev/null +++ b/deepspeed/compression/constants.py @@ -0,0 +1,170 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +######################################### +# Compression Methods +# It has several sub-components +# ######################################### +COMPRESSION_TRAINING = "compression_training" +SHARED_PARAMETERS = "shared_parameters" +DIFFERENT_GROUPS = "different_groups" +TECHNIQUE_ENABLED = "enabled" +TECHNIQUE_SCHEDULE_OFFSET = "schedule_offset" +DIFFERENT_GROUPS_PARAMETERS = "params" +DIFFERENT_GROUPS_MODULE_SCOPE = "modules" +DIFFERENT_GROUPS_MODULE_SCOPE_DEFAULT = "*" +DIFFERENT_GROUPS_RELATED_MODULE_SCOPE = "related_modules" +DIFFERENT_GROUPS_RELATED_MODULE_SCOPE_DEFAULT = None +# COMPRESSION_TRAINING_ENABLED = "enabled" +# COMPRESSION_TRAINING_ENABLED_DEFAULT = False + +#### +# Layer Reduction +#### +LAYER_REDUCTION = "layer_reduction" +LAYER_REDUCTION_ENABLED = "enabled" +LAYER_REDUCTION_ENABLED_DEFAULT = False +KEEP_NUMBER_LAYER = "keep_number_layer" +MODULE_NAME_PREFIX = "module_name_prefix" +TEACHER_LAYER = "teacher_layer" +OTHER_MODULE_NAME = "other_module_name" + +#### +# Weight Quantzation +#### +WEIGHT_QUANTIZATION = "weight_quantization" + +WEIGHT_QUANTIZATION_PERIOD = "quantization_period" +WEIGHT_QUANTIZATION_PERIOD_DEFAULT = 1 + +WEIGHT_QUANTIZE_IN_FORWARD_ENABLED = "quantize_weight_in_forward" +WEIGHT_QUANTIZE_IN_FORWARD_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_ENABLED = TECHNIQUE_ENABLED +WEIGHT_QUANTIZE_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_KERNEL = "quantizer_kernel" +WEIGHT_QUANTIZE_KERNEL_DEFAULT = False + +WEIGHT_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +WEIGHT_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 0 + +WEIGHT_QUANTIZE_GROUPS = "quantize_groups" +WEIGHT_QUANTIZE_GROUPS_DEFAULT = 1 + +WEIGHT_QUANTIZE_VERBOSE = "quantize_verbose" +WEIGHT_QUANTIZE_VERBOSE_DEFAULT = False + +WEIGHT_QUANTIZE_TYPE = "quantization_type" +WEIGHT_QUANTIZE_TYPE_DEFAULT = "symmetric" +WEIGHT_QUANTIZE_SYMMETRIC = "symmetric" +WEIGHT_QUANTIZE_ASYMMETRIC = "asymmetric" + +WEIGHT_QUANTIZE_ROUNDING = "rounding" +WEIGHT_QUANTIZE_ROUNDING_DEFAULT = "nearest" +WEIGHT_QUANTIZE_STOCHASTIC_ROUNDING = "stochastic" +WEIGHT_QUANTIZE_NEAREST_ROUNDING = "nearest" +# maybe deleted for a cleaner version +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE = "fp16_mixed_quantize" + +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED = "enabled" +WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False + +WEIGHT_QUANTIZE_CHANGE_RATIO = "quantize_change_ratio" +WEIGHT_QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001 + +WEIGHT_QUANTIZE_START_BITS = "start_bits" +WEIGHT_QUANTIZE_TARGET_BITS = "target_bits" +### +# Activation Quantization +### +ACTIVATION_QUANTIZATION = "activation_quantization" + +ACTIVATION_QUANTIZATION_ENABLED = TECHNIQUE_ENABLED +ACTIVATION_QUANTIZATION_ENABLED_DEFAULT = False + +ACTIVATION_QUANTIZE_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +ACTIVATION_QUANTIZE_SCHEDULE_OFFSET_DEFAULT = 1000 + +ACTIVATION_QUANTIZE_TYPE = "quantization_type" +ACTIVATION_QUANTIZE_TYPE_DEFAULT = "symmetric" +ACTIVATION_QUANTIZE_SYMMETRIC = "symmetric" +ACTIVATION_QUANTIZE_ASYMMETRIC = "asymmetric" + +ACTIVATION_QUANTIZE_RANGE = 'range_calibration' +ACTIVATION_QUANTIZE_RANGE_DEFAULT = 'dynamic' +ACTIVATION_QUANTIZE_RANGE_STATIC = 'static' +ACTIVATION_QUANTIZE_RANGE_DYNAMIC = 'dynamic' + +ACTIVATION_QUANTIZE_BITS = "bits" +### +# Sparse Pruning +### +SPARSE_PRUNING = "sparse_pruning" + +SPARSE_PRUNING_ENABLED = TECHNIQUE_ENABLED +SPARSE_PRUNING_ENABLED_DEFAULT = False + +SPARSE_PRUNING_METHOD = "method" +SPARSE_PRUNING_METHOD_DEFAULT = "l1" +SPARSE_PRUNING_METHOD_L1 = "l1" +SPARSE_PRUNING_METHOD_TOPK = "topk" + +SPARSE_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +SPARSE_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +SPARSE_PRUNING_DENSE_RATIO = "dense_ratio" +### +# Row Pruning +### +ROW_PRUNING = "row_pruning" + +ROW_PRUNING_ENABLED = TECHNIQUE_ENABLED +ROW_PRUNING_ENABLED_DEFAULT = False + +ROW_PRUNING_METHOD = "method" +ROW_PRUNING_METHOD_DEFAULT = "l1" +ROW_PRUNING_METHOD_L1 = "l1" +ROW_PRUNING_METHOD_TOPK = "topk" + +ROW_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +ROW_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +ROW_PRUNING_DENSE_RATIO = "dense_ratio" + +### +# Head Pruning +### +HEAD_PRUNING = "head_pruning" + +HEAD_PRUNING_ENABLED = TECHNIQUE_ENABLED +HEAD_PRUNING_ENABLED_DEFAULT = False + +HEAD_PRUNING_METHOD = "method" +HEAD_PRUNING_METHOD_DEFAULT = "topk" +HEAD_PRUNING_METHOD_L1 = "l1" +HEAD_PRUNING_METHOD_TOPK = "topk" + +HEAD_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +HEAD_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +HEAD_PRUNING_NUM_HEADS = "num_heads" + +HEAD_PRUNING_DENSE_RATIO = "dense_ratio" + +### +# Channel Pruning +### +CHANNEL_PRUNING = "channel_pruning" + +CHANNEL_PRUNING_ENABLED = TECHNIQUE_ENABLED +CHANNEL_PRUNING_ENABLED_DEFAULT = False + +CHANNEL_PRUNING_METHOD = "method" +CHANNEL_PRUNING_METHOD_DEFAULT = "l1" +CHANNEL_PRUNING_METHOD_L1 = "l1" +CHANNEL_PRUNING_METHOD_TOPK = "topk" + +CHANNEL_PRUNING_SCHEDULE_OFFSET = TECHNIQUE_SCHEDULE_OFFSET +CHANNEL_PRUNING_SCHEDULE_OFFSET_DEFAULT = 1000 + +CHANNEL_PRUNING_DENSE_RATIO = "dense_ratio" diff --git a/deepspeed/compression/helper.py b/deepspeed/compression/helper.py new file mode 100644 index 0000000..e839a5d --- /dev/null +++ b/deepspeed/compression/helper.py @@ -0,0 +1,283 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from .basic_layer import Embedding_Compress, LinearLayer_Compress, Conv2dLayer_Compress, BNLayer_Compress, ColumnParallelLinear_Compress, RowParallelLinear_Compress +from .constants import * + + +def recursive_getattr(model, module_name): + """ + Recursively get the attribute of a module. + Args: + model (`torch.nn.Module`) + The model to get the attribute from. + module_name (`str`) + The name of the module to get the attribute from. + """ + split_list = module_name.split('.') + output = model + for name in split_list: + output = getattr(output, name) + return output + + +def recursive_setattr(model, module_name, module): + """ + Recursively set the attribute of a module. + Args: + model (`torch.nn.Module`) + The model to set the attribute in. + module_name (`str`) + The name of the module to set the attribute in. + module (`torch.nn.Module`) + The module to set the attribute to. + """ + split_list = module_name.split('.') + output = model + for name in split_list[:-1]: + output = getattr(output, name) + output.__setattr__(split_list[-1], module) + + +def module_replacement(model, module_name, compression_technique=None, mpu=None): + """ + Replace a module with a new module. + Args: + model (`torch.nn.Module`) + The model to replace the module in. + module_name (`str`) + The name of the module to replace. + compression_technique (`str`) + The compression technique to use for the new module. + """ + + # Get the old module + old_module = recursive_getattr(model, module_name) + + need_bias = False + if hasattr(old_module, 'bias') and old_module.bias is not None: + need_bias = True + + # Initialize the new module + if isinstance(old_module, + LinearLayer_Compress) or isinstance(old_module, + torch.nn.Linear): + if isinstance(old_module, LinearLayer_Compress): + new_module = old_module + else: + new_module = LinearLayer_Compress(old_module.in_features, + old_module.out_features, + bias=need_bias).to( + device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif isinstance(old_module, + Conv2dLayer_Compress) or isinstance(old_module, + torch.nn.Conv2d): + if isinstance(old_module, Conv2dLayer_Compress): + new_module = old_module + else: + new_module = Conv2dLayer_Compress(old_module.in_channels, old_module.out_channels, old_module.kernel_size, old_module.stride, old_module.padding, \ + old_module.dilation, old_module.groups, need_bias, \ + old_module.padding_mode).to(device=old_module.weight.device, dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif isinstance(old_module, torch.nn.BatchNorm2d): + new_module = BNLayer_Compress(old_module.num_features, + old_module.eps, + old_module.momentum, + old_module.affine, + old_module.track_running_stats).to( + old_module.weight.device, + old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + new_module.running_mean.data = old_module.running_mean.data + new_module.running_var.data = old_module.running_var.data + elif isinstance(old_module, + Embedding_Compress) or isinstance(old_module, + torch.nn.Embedding): + if isinstance(old_module, Embedding_Compress): + new_module = old_module + else: + new_module = Embedding_Compress(old_module.num_embeddings, old_module.embedding_dim, old_module.padding_idx, old_module.max_norm, old_module.norm_type, \ + old_module.scale_grad_by_freq, old_module.sparse).to(device=old_module.weight.device, dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + elif mpu is not None and (isinstance(old_module, + ColumnParallelLinear_Compress) + or isinstance(old_module, + mpu.ColumnParallelLinear)): + if isinstance(old_module, ColumnParallelLinear_Compress): + new_module = old_module + else: + new_module = ColumnParallelLinear_Compress( + mpu, + old_module.input_size, + old_module.output_size, + gather_output=old_module.gather_output, + skip_bias_add=old_module.skip_bias_add, + bias=need_bias).to(device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + elif mpu is not None and (isinstance(old_module, + RowParallelLinear_Compress) + or isinstance(old_module, + mpu.RowParallelLinear)): + if isinstance(old_module, RowParallelLinear_Compress): + new_module = old_module + else: + new_module = RowParallelLinear_Compress( + mpu, + old_module.input_size, + old_module.output_size, + input_is_parallel=old_module.input_is_parallel, + skip_bias_add=old_module.skip_bias_add, + bias=need_bias).to(device=old_module.weight.device, + dtype=old_module.weight.dtype) + new_module.weight.data = old_module.weight.data + if need_bias: + new_module.bias.data = old_module.bias.data + else: + new_module = None + + if compression_technique is not None: + for k, v in compression_technique.items(): + if k == SPARSE_PRUNING: + if v[SPARSE_PRUNING_ENABLED]: + new_module.enable_sparse_pruning(v[SPARSE_PRUNING_DENSE_RATIO], + v[SPARSE_PRUNING_METHOD]) + elif k == ROW_PRUNING: + if v[ROW_PRUNING_ENABLED]: + new_module.enable_row_pruning(v[ROW_PRUNING_DENSE_RATIO], + v[ROW_PRUNING_METHOD]) + elif k == HEAD_PRUNING: + if v[HEAD_PRUNING_ENABLED]: + new_module.enable_head_pruning(v[HEAD_PRUNING_DENSE_RATIO], + v[HEAD_PRUNING_METHOD], + v[HEAD_PRUNING_NUM_HEADS]) + elif k == ACTIVATION_QUANTIZATION: + if v[ACTIVATION_QUANTIZATION_ENABLED]: + new_module.enable_activation_quantization( + v[ACTIVATION_QUANTIZE_BITS], + v[ACTIVATION_QUANTIZE_TYPE], + v[ACTIVATION_QUANTIZE_RANGE]) + elif k == WEIGHT_QUANTIZATION: + if v[WEIGHT_QUANTIZE_ENABLED]: + new_module.enable_weight_quantization( + v[WEIGHT_QUANTIZE_START_BITS], + v[WEIGHT_QUANTIZE_TARGET_BITS], + v[WEIGHT_QUANTIZATION_PERIOD], + v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED], + v[WEIGHT_QUANTIZE_TYPE], + v[WEIGHT_QUANTIZE_GROUPS]) + elif k == CHANNEL_PRUNING: + if v[CHANNEL_PRUNING_ENABLED]: + new_module.enable_channel_pruning(v[CHANNEL_PRUNING_DENSE_RATIO], + v[CHANNEL_PRUNING_METHOD]) + else: + raise NotImplementedError( + 'Compression technique {} is not implemented'.format(k)) + + # Replace the old module with the new one + recursive_setattr(model, module_name, new_module) + + +def is_module_compressible(module, mpu=None): + ret = isinstance(module, torch.nn.Linear) or \ + isinstance(module, torch.nn.Conv2d) or \ + isinstance(module, torch.nn.Embedding) or \ + isinstance(module, torch.nn.BatchNorm2d) + + if mpu is not None: + ret = ret or isinstance(module, + mpu.RowParallelLinear) or isinstance( + module, + mpu.ColumnParallelLinear) + + return ret + + +def compression_preparation(model, compression_techinique_list, mpu): + """ + Prepare the compression techniques of a model. + Args: + model (`torch.nn.Module`) + The model to prepare the compression techniques of. + compression_techinique_list (`list`) + The list of compression techniques to prepare the model to. + list[] + """ + # Here we first replace all module with our linear wrapper + for module_name, module in model.named_modules(): + if is_module_compressible(module, mpu): + module_replacement(model, module_name, mpu=mpu) + for module_name_lists, _, compression_technique in compression_techinique_list: + for mnl in module_name_lists: + for module_name in mnl: + module_replacement(model, module_name, compression_technique) + + return model + + +def fix_compression(model, + module_name, + compression_technique, + mask=None, + dim_reduction=False): + """ + Fix the compression technique of a module. + Args: + model (`torch.nn.Module`) + The model to fix the compression technique of. + module_name (`str`) + The name of the module to fix the compression technique of. + compression_technique (`str`) + The compression technique to fix the module to. + """ + # Here we can make things much simpler by just replacing the module + module = recursive_getattr(model, module_name) + for k, v in compression_technique.items(): + if k == WEIGHT_QUANTIZATION and v[WEIGHT_QUANTIZE_IN_FORWARD_ENABLED] and v[ + WEIGHT_QUANTIZE_ENABLED]: + return module.fix_weight_quantization() + elif k == SPARSE_PRUNING and v[SPARSE_PRUNING_ENABLED]: + return module.fix_sparse_pruning_helper() + elif k == ROW_PRUNING and (v[ROW_PRUNING_ENABLED] or mask is not None): + return module.fix_row_col_pruning_helper(mask, dim_reduction=dim_reduction) + elif k == HEAD_PRUNING and (v[HEAD_PRUNING_ENABLED] or mask is not None): + return module.fix_head_pruning_helper(mask, + v[HEAD_PRUNING_NUM_HEADS], + dim_reduction=dim_reduction) + elif k == CHANNEL_PRUNING and (v[CHANNEL_PRUNING_ENABLED] or mask is not None): + return module.fix_channel_pruning_helper(mask, dim_reduction=dim_reduction) + + +def convert_conv1d_to_linear(model, convert_type): + ''' + This is a help function to convert conv1d to linear (e.g., convert GPT2 from HF) + ''' + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + for name, module in c_model.named_modules(): + if isinstance(module, convert_type): + old_module = recursive_getattr(c_model, name) + new_module = torch.nn.Linear( + old_module.weight.data.size(0), + old_module.weight.data.size(1), + bias=True if old_module.bias is not None else False) + new_module.weight.data = old_module.weight.data.t().contiguous() + if new_module.bias is not None: + new_module.bias.data = old_module.bias.data.view(-1) + + recursive_setattr(c_model, name, new_module) + + return model diff --git a/deepspeed/compression/scheduler.py b/deepspeed/compression/scheduler.py new file mode 100644 index 0000000..67955a8 --- /dev/null +++ b/deepspeed/compression/scheduler.py @@ -0,0 +1,173 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .compress import get_module_name +from .constants import * +from .helper import recursive_getattr +from deepspeed.utils import logger + + +class compression_scheduler(): + ''' + Used to schedule different compression methods + ''' + def __init__(self, model, compression_config): + self.model = model + self.compression_config = compression_config + self.make_init() + self.training_steps = 0 + self.weight_quantization_enabled = False + + self.verbose = { + WEIGHT_QUANTIZATION: False, + ACTIVATION_QUANTIZATION: False, + SPARSE_PRUNING: False, + HEAD_PRUNING: False, + ROW_PRUNING: False, + CHANNEL_PRUNING: False + } + + def make_init(self): + self.different_compression_methods = {} + for method, method_content in self.compression_config.items(): + if LAYER_REDUCTION in method: + continue + self.different_compression_methods[method] = { + TECHNIQUE_ENABLED: False, + SHARED_PARAMETERS: None, + DIFFERENT_GROUPS: [] + } + exist_module_name = set() + shared_parameters = method_content[SHARED_PARAMETERS] + self.different_compression_methods[method][ + TECHNIQUE_ENABLED] = shared_parameters[TECHNIQUE_ENABLED] + self.different_compression_methods[method][ + SHARED_PARAMETERS] = shared_parameters + + for group_name, method_parameters in method_content[DIFFERENT_GROUPS].items(): + module_name_list = [] + for key_word in method_parameters[DIFFERENT_GROUPS_MODULE_SCOPE]: + module_name, exist_module_name = get_module_name(group_name, self.model, key_word, exist_module_name, verbose=False) + module_name_list.extend(module_name) + if module_name_list: + self.different_compression_methods[method][DIFFERENT_GROUPS].append([ + group_name, + module_name_list, + method_parameters.copy().pop('params') + ]) + + def check_weight_quantization(self): + # check weight quantization + wq = self.different_compression_methods[WEIGHT_QUANTIZATION] + if not wq[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = wq[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in wq[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.weight_quantization_enabled = True + + if not self.verbose[WEIGHT_QUANTIZATION]: + logger.info( + f'Weight quantization is enabled at step {self.training_steps}') + self.weight_quantization_enabled = True + self.verbose[WEIGHT_QUANTIZATION] = True + + def check_activation_quantization(self): + # check activation quantization + aq = self.different_compression_methods[ACTIVATION_QUANTIZATION] + if not aq[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = aq[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in aq[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.activation_quantization_enabled = True + if not self.verbose[ACTIVATION_QUANTIZATION]: + logger.info( + f'Activation quantization is enabled at step {self.training_steps}' + ) + self.verbose[ACTIVATION_QUANTIZATION] = True + + def check_sparse_pruning(self): + # check sparse pruning + sp = self.different_compression_methods[SPARSE_PRUNING] + if not sp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = sp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in sp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.sparse_pruning_enabled = True + if not self.verbose[SPARSE_PRUNING]: + logger.info( + f'Sparse pruning is enabled at step {self.training_steps}') + self.verbose[SPARSE_PRUNING] = True + + def check_head_pruning(self): + # check head pruning + hp = self.different_compression_methods[HEAD_PRUNING] + if not hp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = hp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in hp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.head_pruning_enabled = True + if not self.verbose[HEAD_PRUNING]: + logger.info(f'Head pruning is enabled at step {self.training_steps}') + self.verbose[HEAD_PRUNING] = True + + def check_row_pruning(self): + # check row pruning + rp = self.different_compression_methods[ROW_PRUNING] + if not rp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = rp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in rp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.row_pruning_enabled = True + if not self.verbose[ROW_PRUNING]: + logger.info(f'Row pruning is enabled at step {self.training_steps}') + self.verbose[ROW_PRUNING] = True + + def check_channel_pruning(self): + # check channel pruning + cp = self.different_compression_methods[CHANNEL_PRUNING] + if not cp[TECHNIQUE_ENABLED]: + return + else: + shared_parameters = cp[SHARED_PARAMETERS] + if self.training_steps >= shared_parameters[TECHNIQUE_SCHEDULE_OFFSET]: + for group_name, module_name_list, method_parameters in cp[DIFFERENT_GROUPS]: + for module_name in module_name_list: + module = recursive_getattr(self.model, module_name) + module.channel_pruning_enabled = True + if not self.verbose[CHANNEL_PRUNING]: + logger.info( + f'Channel pruning is enabled at step {self.training_steps}') + self.verbose[CHANNEL_PRUNING] = True + + def check_all_modules(self): + # check all different compression methods we have + self.check_weight_quantization() + self.check_activation_quantization() + self.check_sparse_pruning() + self.check_head_pruning() + self.check_row_pruning() + self.check_channel_pruning() + + def step(self, step_zero_check=False): + if not step_zero_check: + self.training_steps += 1 + self.check_all_modules() diff --git a/deepspeed/compression/utils.py b/deepspeed/compression/utils.py new file mode 100644 index 0000000..2492c8c --- /dev/null +++ b/deepspeed/compression/utils.py @@ -0,0 +1,218 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from torch import autograd +import math + + +class TopKBinarizer(autograd.Function): + """ + Top-k Binarizer. + Computes a binary mask M from a real value matrix S such that `M_{i,j} = 1` if and only if `S_{i,j}` + is among the k% highest values of S. + Implementation is inspired from: + https://github.com/yaozhewei/MLPruning + """ + @staticmethod + def forward(ctx, inputs: torch.tensor, threshold: float, sigmoid: bool): + """ + Args: + inputs (`torch.FloatTensor`) + The input matrix from which the binarizer computes the binary mask. + threshold (`float`) + The percentage of weights to keep (the rest is pruned). + `threshold` is a float between 0 and 1. + sigmoid (`bool`) + Whether to apply a sigmoid on the threshold + Returns: + mask (`torch.FloatTensor`) + Binary matrix of the same size as `inputs` acting as a mask (1 - the associated weight is + retained, 0 - the associated weight is pruned). + """ + # Get the subnetwork by sorting the inputs and using the top threshold + if sigmoid: + threshold = torch.sigmoid(threshold).item() + ctx.sigmoid = sigmoid + mask = inputs.clone() + + _, idx = inputs.flatten().sort(descending=True) + j = math.ceil(threshold * inputs.numel()) + + # flat_out and mask access the same memory. + flat_out = mask.flatten() + flat_out[idx[j:]] = 0. + flat_out[idx[:j]] = 1. + ctx.save_for_backward(mask) + + return mask + + @staticmethod + def backward(ctx, gradOutput): + mask, = ctx.saved_tensors + if ctx.sigmoid: + return gradOutput.clone(), ((gradOutput * mask).sum()).view(-1), None + else: + return gradOutput.clone(), None, None + + +class SymQuantizer(torch.autograd.Function): + """ + Symmetric quantization + """ + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int, >=4) + Number of bits to use for quantization + min_value/max_vlue (torch.FloatTensor) + Used for static activation quantization + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + assert (min_value is None + and max_value is None) or (min_value is not None + and max_value is not None and num_groups == 1) + q_range = 2**num_bits + input_shape = input.shape + if min_value is None: + input = input.reshape(num_groups, -1) + max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1) + else: + max_input = torch.max(min_value.abs(), max_value).view(-1) + + scale = 2 * max_input / q_range + output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale + output = output.reshape(input_shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class AsymQuantizer(torch.autograd.Function): + """ + Asymmetric quantization + """ + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int, >=4) + Number of bits to use for quantization + min_value/max_vlue (torch.FloatTensor) + Used for static activation quantization + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None + and max_value is None) or (min_value is not None + and max_value is not None and num_groups == 1) + q_range = 2**num_bits + input_shape = input.shape + if min_value is None: + input = input.reshape(num_groups, -1) + min_value = input.amin(dim=-1, keepdim=True) + max_value = input.amax(dim=-1, keepdim=True) + + scale = (max_value - min_value) / q_range + zero_point = (min_value / scale).round() * scale + + output = ( + (input - zero_point) / scale).round().clamp(0, + q_range - 1) * scale + zero_point + output = output.reshape(input_shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class TernaryQuantizer(torch.autograd.Function): + """ + Ternary quantization + """ + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int) + Dummy variable + min_value/max_vlue (torch.FloatTensor) + Used for static activation quantization; for now they are dummy variable + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None and max_value is None) + input_flat = input.reshape(num_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1).div(n) + thres = (0.7 * m).view(-1, 1) + pos = (input_flat > thres).type(input.type()) + neg = (input_flat < -thres).type(input.type()) + mask = (input_flat.abs() > thres).type(input.type()) + alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1) + output = alpha * pos - alpha * neg + output = output.reshape(input.shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None + + +class BinaryQuantizer(torch.autograd.Function): + """ + Binary quantization + """ + @staticmethod + def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int) + Dummy variable + min_value/max_vlue (torch.FloatTensor) + Used for static activation quantization; for now they are dummy variable + num_groups (int) + How many groups to partition the quantization into + Returns: + quantized_input (`torch.FloatTensor`) + Quantized input + """ + + assert (min_value is None and max_value is None) + input_flat = input.reshape(num_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1, keepdim=True).div(n) + output = input_flat.sign().mul(m) + output = output.reshape(input.shape).contiguous() + return output + + @staticmethod + def backward(ctx, grad_output): + grad_input = grad_output.clone() + return grad_input, None, None, None, None diff --git a/deepspeed/constants.py b/deepspeed/constants.py index 9576c9c..9e7f511 100644 --- a/deepspeed/constants.py +++ b/deepspeed/constants.py @@ -14,6 +14,5 @@ TORCH_DISTRIBUTED_DEFAULT_PORT = 29500 # To make an attempt at backwards compatibility with THD, we use an # extraordinarily high default timeout, given that THD did not have timeouts. default_pg_timeout = timedelta(minutes=30) - INFERENCE_GENERIC_MODE = 'generic' INFERENCE_SPECIALIZED_MODE = 'specialized' diff --git a/deepspeed/elasticity/__init__.py b/deepspeed/elasticity/__init__.py index be517de..be95e3a 100644 --- a/deepspeed/elasticity/__init__.py +++ b/deepspeed/elasticity/__init__.py @@ -1 +1,7 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .elasticity import compute_elastic_config, elasticity_enabled, ensure_immutable_elastic_config +from .utils import is_torch_elastic_compatible +from .constants import ENABLED, ENABLED_DEFAULT, ELASTICITY +if is_torch_elastic_compatible(): + from .elastic_agent import DSElasticAgent diff --git a/deepspeed/elasticity/config.py b/deepspeed/elasticity/config.py index 67db58c..ffbce70 100644 --- a/deepspeed/elasticity/config.py +++ b/deepspeed/elasticity/config.py @@ -79,6 +79,7 @@ class ElasticityConfig: self.min_gpus = param_dict.get(MIN_GPUS, MIN_GPUS_DEFAULT) self.max_gpus = param_dict.get(MAX_GPUS, MAX_GPUS_DEFAULT) + if self.min_gpus < 1 or self.max_gpus < 1: raise ElasticityConfigError( "Elasticity min/max gpus must be > 0, " @@ -88,6 +89,20 @@ class ElasticityConfig: "Elasticity min_gpus cannot be greater than max_gpus, " f"given min_gpus: {self.min_gpus}, max_gpus: {self.max_gpus}") + self.model_parallel_size = param_dict.get(MODEL_PARLLEL_SIZE, + MODEL_PARLLEL_SIZE_DEFAULT) + if self.model_parallel_size < 1: + raise ElasticityConfigError( + "Model-Parallel size cannot be less than 1, " + f"given model-parallel size: {self.model_parallel_size}") + + self.num_gpus_per_node = param_dict.get(NUM_GPUS_PER_NODE, + NUM_GPUS_PER_NODE_DEFAULT) + if self.num_gpus_per_node < 1: + raise ElasticityConfigError( + "Number of GPUs per node cannot be less than 1, " + f"given number of GPUs per node: {self.num_gpus_per_node}") + self.min_time = param_dict.get(MIN_TIME, MIN_TIME_DEFAULT) if self.min_time < 0: raise ElasticityConfigError( diff --git a/deepspeed/elasticity/constants.py b/deepspeed/elasticity/constants.py index cf428b7..eb40edc 100644 --- a/deepspeed/elasticity/constants.py +++ b/deepspeed/elasticity/constants.py @@ -27,7 +27,7 @@ Elasticity should be enabled as: ELASTICITY = 'elasticity' # Current elasticity version -LATEST_ELASTICITY_VERSION = 0.1 +LATEST_ELASTICITY_VERSION = 0.2 ENABLED = 'enabled' ENABLED_DEFAULT = False @@ -46,6 +46,12 @@ MIN_GPUS_DEFAULT = 1 MAX_GPUS = 'max_gpus' MAX_GPUS_DEFAULT = 10000 +NUM_GPUS_PER_NODE = 'num_gpus_per_node' +NUM_GPUS_PER_NODE_DEFAULT = 1 + +MODEL_PARLLEL_SIZE = "model_parallel_size" +MODEL_PARLLEL_SIZE_DEFAULT = 1 + # Minimum running time (minutes) before the scheduler will scale us, 0 implies it's unknown MIN_TIME = "min_time" MIN_TIME_DEFAULT = 0 diff --git a/deepspeed/elasticity/elastic_agent.py b/deepspeed/elasticity/elastic_agent.py new file mode 100644 index 0000000..db0a61b --- /dev/null +++ b/deepspeed/elasticity/elastic_agent.py @@ -0,0 +1,189 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent +from typing import Any, Dict, Optional, Tuple +from datetime import datetime +from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port +from torch.distributed.elastic.metrics import put_metric +from torch.distributed.elastic.agent.server.api import ( + RunResult, + WorkerGroup, + WorkerSpec, + WorkerState, +) +from torch.distributed import Store +import time +import os +from torch.distributed.elastic.multiprocessing import start_processes +from torch.distributed.elastic.utils import macros +import shutil +import copy +from contextlib import closing +import subprocess + + +class DSElasticAgent(LocalElasticAgent): + def __init__( + self, + spec: WorkerSpec, + env: Dict, + start_method="spawn", + exit_barrier_timeout: float = 300, + log_dir: Optional[str] = None, + ): + super().__init__(spec, start_method, exit_barrier_timeout, log_dir) + self.ds_env = env + + @staticmethod + def _set_master_addr_port(store: Store, + master_addr: Optional[str], + master_port: Optional[int]): + if master_port is None: + sock = _get_socket_with_port() + with closing(sock): + master_port = sock.getsockname()[1] + + if master_addr is None: + # master_addr = _get_fq_hostname() + result = subprocess.check_output("hostname -I", shell=True) + master_addr = result.decode('utf-8').split()[0] + + store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8")) + store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8")) + + def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]: + spec = worker_group.spec + store = worker_group.store + assert store is not None + master_addr, master_port = super()._get_master_addr_port(store) + restart_count = spec.max_restarts - self._remaining_restarts + + use_agent_store = spec.rdzv_handler.get_backend() == "static" + + args: Dict[int, Tuple] = {} + envs: Dict[int, Dict[str, str]] = {} + for worker in worker_group.workers: + local_rank = worker.local_rank + + worker_env_ds = copy.deepcopy(self.ds_env) + worker_env_elastic = { + "LOCAL_RANK": str(local_rank), + "RANK": str(worker.global_rank), + "GROUP_RANK": str(worker_group.group_rank), + "ROLE_RANK": str(worker.role_rank), + "ROLE_NAME": spec.role, + "LOCAL_WORLD_SIZE": str(spec.local_world_size), + "WORLD_SIZE": str(worker.world_size), + "GROUP_WORLD_SIZE": str(worker_group.group_world_size), + "ROLE_WORLD_SIZE": str(worker.role_world_size), + "MASTER_ADDR": master_addr, + "MASTER_PORT": str(master_port), + "TORCHELASTIC_RESTART_COUNT": str(restart_count), + "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts), + "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(), + "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store), + "NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", + str(1)), + } + worker_env_ds.update(worker_env_elastic) + if "OMP_NUM_THREADS" in os.environ: + worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"] + + envs[local_rank] = worker_env_ds + worker_args = list(spec.args) + worker_args = macros.substitute(worker_args, str(local_rank)) + args[local_rank] = tuple(worker_args) + + # scaling events do not count towards restarts (gets same attempt #) + # remove existing log dir if this restart is due to a scaling event + attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}") + shutil.rmtree(attempt_log_dir, ignore_errors=True) + os.makedirs(attempt_log_dir) + + assert spec.entrypoint is not None + self._pcontext = start_processes( + name=spec.role, + entrypoint=spec.entrypoint, + args=args, + envs=envs, + log_dir=attempt_log_dir, + start_method=self._start_method, + redirects=spec.redirects, + tee=spec.tee, + ) + + return self._pcontext.pids() + + def _invoke_run(self, role: str = "default") -> RunResult: + # NOTE: currently only works for a single role + + spec = self._worker_group.spec + role = spec.role + + log.info( + f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}") + + self._initialize_workers(self._worker_group) + monitor_interval = spec.monitor_interval + rdzv_handler = spec.rdzv_handler + + participants = rdzv_handler._state_holder.state.participants + + while True: + assert self._worker_group.state != WorkerState.INIT + time.sleep(monitor_interval) + run_result = self._monitor_workers(self._worker_group) + state = run_result.state + self._worker_group.state = state + + expire_time = datetime.utcnow() - ( + rdzv_handler._settings.keep_alive_interval * + rdzv_handler._settings.keep_alive_max_attempt) + _dead_nodes = [ + node for node, + last_heartbeat in + rdzv_handler._state_holder.state.last_heartbeats.items() + if last_heartbeat < expire_time + ] + + put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts) + put_metric(f"workers.{role}.{state.name.lower()}", 1) + + if state == WorkerState.SUCCEEDED: + log.info( + f"[{role}] worker group successfully finished." + f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish." + ) + self._exit_barrier() + return run_result + elif state in { + WorkerState.UNHEALTHY, + WorkerState.FAILED + } or len(participants) > len(rdzv_handler._state_holder.state.participants): + if self._remaining_restarts > 0: + log.info( + f"[{role}] Worker group {state.name}. " + f"{self._remaining_restarts}/{spec.max_restarts} attempts left;" + f" will restart worker group") + self._remaining_restarts -= 1 + # rdzv_handler._state_holder.state.restart = False + self._restart_workers(self._worker_group) + participants = rdzv_handler._state_holder.state.participants + + else: + self._stop_workers(self._worker_group) + self._worker_group.state = WorkerState.FAILED + self._exit_barrier() + return run_result + elif state == WorkerState.HEALTHY: + # membership changes do not count as retries + num_nodes_waiting = rdzv_handler.num_nodes_waiting() + group_rank = self._worker_group.group_rank + if num_nodes_waiting > 0: + log.info(f"[{role}] Detected {num_nodes_waiting} " + f"new nodes from group_rank={group_rank}; " + f"will restart worker group") + self._restart_workers(self._worker_group) + participants = rdzv_handler._state_holder.state.participants + else: + raise Exception(f"[{role}] Worker group in {state.name} state") diff --git a/deepspeed/elasticity/elasticity.py b/deepspeed/elasticity/elasticity.py index e678d5e..17a8b6e 100644 --- a/deepspeed/elasticity/elasticity.py +++ b/deepspeed/elasticity/elasticity.py @@ -2,17 +2,15 @@ Copyright 2020 The Microsoft DeepSpeed Team """ import os -import re import json import numpy as np - +import math from packaging import version as pkg_version from .config import ElasticityConfig, ElasticityConfigError, ElasticityError, \ ElasticityIncompatibleWorldSize from .constants import ELASTICITY, ENABLED, ENABLED_DEFAULT, LATEST_ELASTICITY_VERSION, \ - MINIMUM_DEEPSPEED_VERSION, IGNORE_NON_ELASTIC_BATCH_INFO, \ - IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT, DEEPSPEED_ELASTICITY_CONFIG + MINIMUM_DEEPSPEED_VERSION, DEEPSPEED_ELASTICITY_CONFIG from ..git_version_info import version as __version__ from ..utils import logger @@ -93,7 +91,6 @@ def get_valid_gpus(batch_size, micro_batches, min_valid_gpus, max_valid_gpus): valid_gpus.append(i) valid_gpus = set(valid_gpus) valid_gpus = sorted(list(valid_gpus)) - logger.info(f"Valid GPUs: {valid_gpus}") return valid_gpus @@ -173,6 +170,70 @@ def _get_compatible_gpus_v01(micro_batches, return final_batch_size, valid_gpus +def _get_compatible_gpus_v02(micro_batches, + max_acceptable_batch_size, + current_num_gpus, + min_gpus=None, + max_gpus=None, + prefer_larger=True, + num_gpus_per_node=1, + model_parallel_size=1): + ''' + Returns: + final_batch_size + valid_gpus + micro-batch size + ''' + if num_gpus_per_node % model_parallel_size != 0: + raise ElasticityError( + f"In Elasticity v0.2, number of GPUs per node:" \ + f"{num_gpus_per_node} should be divisible by " \ + f"model parallel size {model_parallel_size}") + + def get_microbatch(final_batch_size): + candidate_microbatch = None + + for micro_batch in micro_batches: + if final_batch_size // current_num_gpus % micro_batch == 0: + if candidate_microbatch == None: + candidate_microbatch = micro_batch + if prefer_larger and candidate_microbatch < micro_batch: + candidate_microbatch = micro_batch + return candidate_microbatch + + dp_size_per_node = num_gpus_per_node // model_parallel_size + + final_batch_size, valid_world_size = _get_compatible_gpus_v01(micro_batches, + int(max_acceptable_batch_size/dp_size_per_node), + int(min_gpus/num_gpus_per_node), + int(max_gpus/num_gpus_per_node), # Passing number of max nodes as Elasticity v2 works at node level + prefer_larger=prefer_larger) + + final_batch_size = int(final_batch_size) * dp_size_per_node + valid_dp_world_size = [i * dp_size_per_node for i in valid_world_size] + if current_num_gpus // model_parallel_size in valid_dp_world_size: + candidate_microbatch = get_microbatch(final_batch_size) + return final_batch_size, valid_dp_world_size, candidate_microbatch + + current_dp_size = (current_num_gpus / num_gpus_per_node) * dp_size_per_node + candidate_batch_sizes = [] + for micro_batch in micro_batches: + min_batch_size = micro_batch * current_dp_size + + factor = math.floor(max_acceptable_batch_size / float(min_batch_size)) + candidate_batch_sizes.append(factor * min_batch_size) + + used_microbatch = None + if prefer_larger: + candidate_batch_size = max(candidate_batch_sizes) + else: + candidate_batch_size = min(candidate_batch_sizes) + + candidate_microbatch = get_microbatch(candidate_batch_size) + + return candidate_batch_size, [int(current_dp_size)], candidate_microbatch + + def _compatible_ds_version_check(target_deepspeed_version: str): min_version = pkg_version.parse(MINIMUM_DEEPSPEED_VERSION) target_version = pkg_version.parse(target_deepspeed_version) @@ -223,7 +284,10 @@ def ensure_immutable_elastic_config(runtime_elastic_config_dict: dict): "guarantee resource scheduler will scale this job using compatible GPU counts.") -def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world_size=0): +def compute_elastic_config(ds_config: dict, + target_deepspeed_version: str, + world_size=0, + return_microbatch=False): """Core deepspeed elasticity API. Given an elastic config (similar to the example below) DeepSpeed will compute a total train batch size corresponding valid GPU count list that provides a high level of elasticity. Elasticity in this case means we are safe to scale @@ -250,8 +314,9 @@ def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world target_deepspeed_version (str): When called from scheduling infrastructure we want to ensure that the target deepspeed version is compatible with the elasticity version used in the backend. - world_size (int, optional): Intended/current world size, will do some sanity + world_size (int, optional): Intended/current DP world size, will do some sanity checks to ensure world size is actually valid with the config. + return_microbatch (bool, optional): whether to return micro batch size or not. Raises: ElasticityConfigError: Missing required elasticity config or elasticity disabled @@ -277,6 +342,13 @@ def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world "('enabled':true) if running an elastic training job.") elastic_config = ElasticityConfig(elastic_config_dict) + model_parallel_size = elastic_config.model_parallel_size + num_gpus_per_node = elastic_config.num_gpus_per_node + + if model_parallel_size > 1 and float(elastic_config.version) != 0.2: + raise ElasticityConfigError(f"Elasticity V{elastic_config.version} " \ + f"does not support model-parallel training. Given model-parallel size: " \ + f"{model_parallel_size}") if float(elastic_config.version) > LATEST_ELASTICITY_VERSION: raise ElasticityConfigError("Attempting to run elasticity version " \ @@ -297,10 +369,39 @@ def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world prefer_larger=elastic_config.prefer_larger_batch_size) # ensure batch size is int dtype final_batch_size = int(final_batch_size) + elif float(elastic_config.version) == 0.2: + if world_size != 0: + current_num_gpus = world_size + else: + if "WORLD_SIZE" in os.environ and \ + os.getenv('WORLD_SIZE').isnumeric(): + current_num_gpus = int(os.getenv('WORLD_SIZE')) + else: + WORLD_SIZE = os.getenv('WORLD_SIZE') + raise ElasticityConfigError( + 'Elasticity V 0.2 needs WORLD_SIZE '\ + 'to compute valid batch size. '\ + 'Either give it as argument to function compute_elastic_config '\ + 'or set it as an environment variable. '\ + f'Value of WORLD_SIZE as environment variable is {WORLD_SIZE}') + + final_batch_size, valid_gpus, candidate_microbatch_size = _get_compatible_gpus_v02( + micro_batches=elastic_config.micro_batches, + max_acceptable_batch_size=elastic_config.max_acceptable_batch_size, + current_num_gpus=current_num_gpus, + min_gpus=elastic_config.min_gpus, + max_gpus=elastic_config.max_gpus, + prefer_larger=elastic_config.prefer_larger_batch_size, + num_gpus_per_node=num_gpus_per_node, + model_parallel_size=model_parallel_size) + # ensure batch size is int dtype + final_batch_size = int(final_batch_size) else: raise NotImplementedError( f"Unable to find elastic logic for version: {elastic_config.version}") + logger.info(f"Valid World Size (GPUs / Model Parallel Size): {valid_gpus}") + if world_size > 0: if world_size not in valid_gpus: raise ElasticityIncompatibleWorldSize(f"World size ({world_size}) is not valid " \ @@ -317,4 +418,19 @@ def compute_elastic_config(ds_config: dict, target_deepspeed_version: str, world f" micro_batches={elastic_config.micro_batches}." return final_batch_size, valid_gpus, micro_batch_size + if return_microbatch: + # Pick a valid micro batch size + if float(elastic_config.version) == 0.2: + return final_batch_size, valid_gpus, candidate_microbatch_size + else: + micro_batch_size = None + for mbsz in sorted(list(set(elastic_config.micro_batches)), reverse=True): + if final_batch_size // world_size % mbsz == 0: + micro_batch_size = mbsz + break + assert micro_batch_size is not None, "Unable to find divisible micro batch size" \ + f" world_size={world_size}, final_batch_size={final_batch_size}, and " \ + f" micro_batches={elastic_config.micro_batches}." + return final_batch_size, valid_gpus, micro_batch_size + return final_batch_size, valid_gpus diff --git a/deepspeed/elasticity/utils.py b/deepspeed/elasticity/utils.py new file mode 100644 index 0000000..3fba694 --- /dev/null +++ b/deepspeed/elasticity/utils.py @@ -0,0 +1,16 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch + + +def is_torch_elastic_compatible(): + ''' + Helper to lookup torch version. Elastic training is + introduced in 1.11.x + ''' + TORCH_MAJOR = int(torch.__version__.split('.')[0]) + TORCH_MINOR = int(torch.__version__.split('.')[1]) + if TORCH_MAJOR == 1 and TORCH_MINOR >= 11: + return True + else: + return False diff --git a/deepspeed/env_report.py b/deepspeed/env_report.py index aad1836..e8be40a 100644 --- a/deepspeed/env_report.py +++ b/deepspeed/env_report.py @@ -1,10 +1,12 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch import deepspeed import subprocess import argparse -from .ops.op_builder import ALL_OPS +from .ops.op_builder.all_ops import ALL_OPS from .git_version_info import installed_ops, torch_info -from .ops import __compatible_ops__ as compatible_ops +from deepspeed.accelerator import get_accelerator GREEN = '\033[92m' RED = '\033[91m' @@ -54,7 +56,7 @@ def op_report(verbose=True): def ninja_installed(): try: - import ninja + import ninja # noqa: F401 except ImportError: return False return True @@ -80,31 +82,33 @@ def nvcc_version(): def debug_report(): max_dots = 33 - hip_version = None - if hasattr(torch.version, 'hip'): - hip_version = torch.version.hip - report = [ ("torch install path", torch.__path__), ("torch version", torch.__version__), - ("torch cuda version", - torch.version.cuda), - ("torch hip version", - hip_version), - ("nvcc version", - (None if hip_version else nvcc_version())), ("deepspeed install path", deepspeed.__path__), ("deepspeed info", f"{deepspeed.__version__}, {deepspeed.__git_hash__}, {deepspeed.__git_branch__}" - ), - ("deepspeed wheel compiled w.", - f"torch {torch_info['version']}, " + - (f"hip {torch_info['hip_version']}" - if hip_version else f"cuda {torch_info['cuda_version']}")), + ) ] + if get_accelerator().device_name() == 'cuda': + hip_version = getattr(torch.version, "hip", None) + report.extend([("torch cuda version", + torch.version.cuda), + ("torch hip version", + hip_version), + ("nvcc version", + (None if hip_version else nvcc_version())), + ("deepspeed wheel compiled w.", + f"torch {torch_info['version']}, " + + (f"hip {torch_info['hip_version']}" + if hip_version else f"cuda {torch_info['cuda_version']}"))]) + else: + report.extend([("deepspeed wheel compiled w.", + f"torch {torch_info['version']} ")]) + print("DeepSpeed general environment info:") for name, value in report: print(name, "." * (max_dots - len(name)), value) diff --git a/deepspeed/git_version_info.py b/deepspeed/git_version_info.py index a806475..dfb8864 100644 --- a/deepspeed/git_version_info.py +++ b/deepspeed/git_version_info.py @@ -1,6 +1,8 @@ +'''Copyright The Microsoft DeepSpeed Team''' + try: # This is populated by setup.py - from .git_version_info_installed import * + from .git_version_info_installed import * # noqa: F401 except ModuleNotFoundError: import os if os.path.isfile('version.txt'): @@ -11,7 +13,7 @@ except ModuleNotFoundError: git_hash = '[none]' git_branch = '[none]' - from .ops.op_builder import ALL_OPS + from .ops.op_builder.all_ops import ALL_OPS installed_ops = dict.fromkeys(ALL_OPS.keys(), False) compatible_ops = dict.fromkeys(ALL_OPS.keys(), False) torch_info = {'version': "0.0", "cuda_version": "0.0", "hip_version": "0.0"} diff --git a/deepspeed/inference/__init__.py b/deepspeed/inference/__init__.py index 8ee60d6..449e665 100644 --- a/deepspeed/inference/__init__.py +++ b/deepspeed/inference/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .engine import InferenceEngine diff --git a/deepspeed/inference/config.py b/deepspeed/inference/config.py new file mode 100644 index 0000000..61298db --- /dev/null +++ b/deepspeed/inference/config.py @@ -0,0 +1,278 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from deepspeed.runtime.zero.config import DeepSpeedZeroConfig +from pydantic import Field +from pydantic import validator +from typing import Dict, Union +from enum import Enum + + +class DtypeEnum(Enum): + # The torch dtype must always be the first value (so we return torch.dtype) + fp16 = torch.float16, "torch.float16", "fp16", "float16", "half" + bf16 = torch.bfloat16, "torch.bfloat16", "bf16", "bfloat16" + fp32 = torch.float32, "torch.float32", "fp32", "float32", "float" + int8 = torch.int8, "torch.int8", "int8" + + # Copied from https://stackoverflow.com/a/43210118 + # Allows us to use multiple values for each Enum index and returns first + # listed value when Enum is called + def __new__(cls, *values): + obj = object.__new__(cls) + # first value is canonical value + obj._value_ = values[0] + for other_value in values[1:]: + cls._value2member_map_[other_value] = obj + obj._all_values = values + return obj + + def __repr__(self): + return "<%s.%s: %s>" % ( + self.__class__.__name__, + self._name_, + ", ".join([repr(v) for v in self._all_values]), + ) + + +class MoETypeEnum(str, Enum): + residual = "residual" + standard = "standard" + + +class DeepSpeedTPConfig(DeepSpeedConfigModel): + """ Configure tensor parallelism settings """ + + enabled: bool = True + """ Turn tensor parallelism on/off. """ + + tp_size: int = 1 + """ Number of devices to split the model across using tensor parallelism. """ + + mpu: object = None + """ + A model parallelism unit object that implements + ``get_{model,data}_parallel_{rank,group,world_size}()``. + """ + + tp_group: object = None + + +class DeepSpeedMoEConfig(DeepSpeedConfigModel): + """ Sets parameters for MoE """ + + enabled: bool = True + ep_size: int = 1 + """ + The expert-parallelism size which is used for partitioning the experts + across the GPUs in the expert-parallel group. + """ + + moe_experts: list = Field([1], alias="num_experts") + """ The global number of experts used in an MoE layer. """ + + type: MoETypeEnum = MoETypeEnum.standard + """ + Specify the type of MoE layer. We have two types of MoE layer: 'Standard' + and 'Residual'. + """ + + ep_mp_group: object = None + ep_group: object = Field(None, alias="expert_group") + + +class QuantTypeEnum(str, Enum): + asym = "asymmetric" + sym = "symmetric" + + +class BaseQuantConfig(DeepSpeedConfigModel): + enabled = True + num_bits = 8 + q_type: QuantTypeEnum = QuantTypeEnum.sym + q_groups: int = 1 + + +class WeightQuantConfig(BaseQuantConfig): + enabled = True + + +class ActivationQuantConfig(BaseQuantConfig): + enabled = True + + +class QKVQuantConfig(DeepSpeedConfigModel): + enabled = True + + +class QuantizationConfig(DeepSpeedConfigModel): + enabled: bool = True + activation: ActivationQuantConfig = ActivationQuantConfig() + weight: WeightQuantConfig = WeightQuantConfig() + qkv: QKVQuantConfig = QKVQuantConfig() + + +# todo: brainstorm on how to do ckpt loading for DS inference +class InferenceCheckpointConfig(DeepSpeedConfigModel): + checkpoint_dir: str = None + save_mp_checkpoint_path: str = None + base_dir: str = None + + +class DeepSpeedInferenceConfig(DeepSpeedConfigModel): + """ Sets parameters for DeepSpeed Inference Engine. """ + + replace_with_kernel_inject: bool = Field(False, alias="kernel_inject") + """ + Set to true to inject inference kernels for models such as, Bert, GPT2, + GPT-Neo and GPT-J. Otherwise, the injection_dict provides the names of two + linear layers as a tuple: + `(attention_output projection, transformer output projection)` + """ + + dtype: DtypeEnum = torch.float16 + """ + Desired model data type, will convert model to this type. + Supported target types: `torch.half`, `torch.int8`, `torch.float` + """ + + tensor_parallel: DeepSpeedTPConfig = Field({}, alias="tp") + """ + Configuration for tensor parallelism used to split the model across several + GPUs. Expects a dictionary containing values for :any:`DeepSpeedTPConfig`. + """ + + enable_cuda_graph: bool = False + """ + Use this flag for capturing the CUDA-Graph of the inference ops, so that it + can run faster using the graph replay method. + """ + + zero: DeepSpeedZeroConfig = {} + """ + ZeRO configuration to use with the Inference Engine. Expects a dictionary + containing values for :any:`DeepSpeedZeroConfig`. + """ + + triangular_masking: bool = Field(True, alias="tm") + """ + Controls the type of masking for attention scores in transformer layer. + Note that the masking is application specific. + """ + + moe: Union[bool, DeepSpeedMoEConfig] = {} + """ + Specify if the type of Transformer is MoE. Expects a dictionary containing + values for :any:`DeepSpeedMoEConfig`. + """ + + quant: QuantizationConfig = {} + """ + NOTE: only works for int8 dtype. + Quantization settings used for quantizing your model using the MoQ. The + setting can be one element or a tuple. If one value is passed in, we + consider it as the number of groups used in quantization. A tuple is passed + in if we want to mention that there is extra-grouping for the MLP part of a + Transformer layer (e.g. (True, 8) shows we quantize the model using 8 + groups for all the network except the MLP part that we use 8 extra + grouping). Expects a dictionary containing values for + :any:`QuantizationConfig`. + """ + + #todo: refactor the following 3 into the new checkpoint_config + checkpoint: str = None + """ + Path to deepspeed compatible checkpoint or path to JSON with load policy. + """ + + base_dir: str = None + """ + This shows the root directory under which all the checkpoint files exists. + This can be passed through the json config too. + """ + + save_mp_checkpoint_path: str = None + """ + The path for which we want to save the loaded model with a checkpoint. This + feature is used for adjusting the parallelism degree to help alleviate the + model loading overhead. It does not save any new checkpoint if no path is + passed. + """ + + checkpoint_config: InferenceCheckpointConfig = Field({}, alias="ckpt_config") + """ + TODO: Add docs. Expects a dictionary containing values for + :any:`InferenceCheckpointConfig`. + """ + + return_tuple: bool = True + """ + Specify whether or not the transformer layers need to return a tuple or a + Tensor. + """ + + training_mp_size: int = 1 + """ + If loading a checkpoint this is the mp size that it was trained with, it + may be different than what the mp size that you want to use during + inference. + """ + + replace_method: str = Field( + "auto", + deprecated=True, + deprecated_msg= + "This parameter is no longer needed, please remove from your call to DeepSpeed-inference" + ) + + injection_policy: Dict = Field(None, alias="injection_dict") + """ + Dictionary mapping a client nn.Module to its corresponding injection + policy. e.g., `{BertLayer : deepspeed.inference.HFBertLayerPolicy}` + """ + + injection_policy_tuple: tuple = None + """ TODO: Add docs """ + + config: Dict = Field( + None, + alias="args") # todo: really no need for this field if we can refactor + + max_out_tokens: int = Field(1024, alias="max_tokens") + """ + This argument shows the maximum number of tokens inference-engine can work + with, including the input and output tokens. Please consider increasing it + to the required token-length required for your use-case. + """ + + mp_size: int = Field(1, deprecated=True, new_param="tensor_parallel.tp_size") + """ + Desired model parallel size, default is 1 meaning no model parallelism. + Deprecated, please use the ``tensor_parallel` config to control model + parallelism. + """ + mpu: object = Field(None, deprecated=True, new_param="tensor_parallel.mpu") + ep_size: int = Field(1, deprecated=True, new_param="moe.ep_size") + ep_group: object = Field(None, + alias="expert_group", + deprecated=True, + new_param="moe.ep_group") + ep_mp_group: object = Field(None, + alias="expert_mp_group", + deprecated=True, + new_param="moe.ep_mp_group") + moe_experts: list = Field([1], deprecated=True, new_param="moe.moe_experts") + moe_type: MoETypeEnum = Field(MoETypeEnum.standard, + deprecated=True, + new_param="moe.type") + + @validator("moe") + def moe_backward_compat(cls, field_value, values): + if isinstance(field_value, bool): + return DeepSpeedMoEConfig(moe=field_value) + return field_value + + class Config: + # Get the str representation of the datatype for serialization + json_encoders = {torch.dtype: lambda x: str(x)} diff --git a/deepspeed/inference/engine.py b/deepspeed/inference/engine.py old mode 100644 new mode 100755 index a37f7c2..123a028 --- a/deepspeed/inference/engine.py +++ b/deepspeed/inference/engine.py @@ -2,22 +2,34 @@ Copyright 2021 The Microsoft DeepSpeed Team ''' import torch +import time import os + +from deepspeed import comm as dist +from deepspeed.utils.logging import log_dist + from torch.nn.modules import Module -import torch.distributed as dist +from packaging import version as pkg_version +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine +from deepspeed.utils.timer import SynchronizedWallClockTimer + from ..runtime.state_dict_factory import SDLoaderFactory from ..runtime.weight_quantizer import WeightQuantization -from ..module_inject.replace_module import replace_transformer_layer -from ..utils import logger, init_distributed - +from ..module_inject import replace_transformer_layer, generic_injection +from ..comm.comm import init_distributed from ..pipe import PipelineModule from ..moe.utils import has_moe_layers -from ..moe.layer import MoE +from ..module_inject import LinearAllreduce, LinearLayer, Normalize, ReplaceWithTensorSlicing +from deepspeed.accelerator import get_accelerator +from ..module_inject.policy import TransformerPolicy +from ..module_inject.auto_tp import AutoTP -import torch.distributed as dist -import deepspeed.utils.groups as groups +from ..module_inject.replace_policy import generic_policies DS_INFERENCE_ENABLED = False +from torch import nn + +INFERENCE_MODEL_TIMER = "model-forward-inference" class InferenceEngine(Module): @@ -25,42 +37,11 @@ class InferenceEngine(Module): inference_ep_group = None expert_mp_group = None - def __init__(self, - model, - triangular_masking=True, - mp_size=1, - training_mp_size=1, - ep_size=1, - mpu=None, - ep_group=None, - expert_mp_group=None, - checkpoint=None, - dtype=None, - injection_dict=None, - return_tuple=True, - replace_method='auto', - quantization_setting=None, - replace_with_kernel_inject=False, - moe=False, - moe_experts=1, - moe_type='standard', - config=None): + def __init__(self, model, config): """ Args: model: torch.nn.Module - mp_size: model-parallel size - mpu: model-parallel unit (used for Megatron-type models) - checkpoint: the json-path, showing the address of model-checkpoints - Example: {type: 'Megatron', 'checkpoints': [ckpt_mp0.pt, ckpt_mp1.pt], 'version': 1.0} - dtype: data-type by which inference is executed - injection_dict: the dictionary that shows the injection policy: - Example: {BertLayer: HFBertLayerPolicy} - return_tuple: if true, inference-API returns a tuple, otherwise a tensor - replace_method: the injection method, this can be passed as auto if no injection-policy is defined, in which case the injection is automatic based on the available policies - quantization_setting: - one of None, Tuple(mlp_extra_grouping, quantize_groups), quantize_groups - replace_with_kernel_inject: this flag need to be set to true to inject inference kernels for models such as, Bert, GPT2, GPT-Neo and GPT-J. Otherwise, - the injection_dict provides the names of two linear layers as a tuple: (attention_output projection, transformer output projection) + config: DeepSpeedInferenceConfig """ global DS_INFERENCE_ENABLED DS_INFERENCE_ENABLED = True @@ -68,90 +49,165 @@ class InferenceEngine(Module): super().__init__() self.module = model + self._config = config + + self._get_model_config_generate(config) # keep for weird backward compatibility - self._get_model_config_generate(config) + # patch model generate with ours if model uses it + if hasattr(self.module, "generate"): + self.generate = self._generate - self.mp_world_size = mp_size - self.checkpoint = checkpoint - self.dtype = dtype - self.injection_dict = injection_dict - self.mp_group = None - self.mpu = mpu - self._validate_args(mpu) - self.replace_method = replace_method + if hasattr(self.module, "config"): + TransformerPolicy.hf_model_config = self.module.config + + # todo: keep this self.injection_dict because we don't use to change config.injection_policy API + # todo: this will get changed when Molly's PR on auto injection dict is merged + self.injection_dict = config.injection_policy + + # todo: refactor the mp_group and mp_size related in the next refactor + self.mp_group = config.tensor_parallel.tp_group + self.mpu = config.tensor_parallel.mpu + + #self._validate_args(self.mpu, config.replace_with_kernel_inject) self.quantize_merge_count = 1 self.quantization_scales = None - self.triangular_masking = triangular_masking - self.ep_size = ep_size - self.ep_group = ep_group - self.expert_mp_group = expert_mp_group - self._init_quantization_setting(quantization_setting) + # these are not needed in the config as we are creating them ourselves in the inference engine + self.ep_group = None # config.moe.ep_group + self.expert_mp_group = None # config.moe.ep_mp_group + + self.cuda_graph_created = False + self.checkpoint_engine = TorchCheckpointEngine() + quantization_setting = None + self._init_quantization_setting( + quantization_setting + ) # todo: update with the new quant config for weight quant + self.model_profile_enabled = False + self._model_times = [] + + # This is a hack to remove the prepare_mask function on HF side for BLOOM architecture + self.remove_mask_prepare_for_bloom() - if self.checkpoint: - self._load_checkpoint(self.checkpoint) + if get_accelerator().device_name() == 'cuda' and config.enable_cuda_graph: + assert pkg_version.parse(torch.__version__) >= pkg_version.parse("1.10"), \ + "If you want to use cuda graph, please upgrade torch to at least v1.10" + + if config.checkpoint and not config.replace_with_kernel_inject: + self._load_checkpoint(config.checkpoint) # convert model to intended dtype - if self.dtype: - self._convert_to_dtype() + if config.dtype: + self._convert_to_dtype(config) if self.mpu: - self.mp_world_size = dist.get_world_size( + config.tensor_parallel.tp_size = dist.get_world_size( group=self.mpu.get_model_parallel_group()) - self.mp_group = mpu.get_model_parallel_group() - elif self.mp_world_size > 1: - self._create_model_parallel_group() + self.mp_group = self.mpu.get_model_parallel_group() + elif config.tensor_parallel.tp_size > 1: + self._create_model_parallel_group(config) + config.tensor_parallel.tp_group = self.mp_group - moe, _ = has_moe_layers(self.module) + if isinstance(self.module, torch.nn.Module): + moe, _ = has_moe_layers(self.module) + else: + moe = False if moe and dist.get_world_size() > 1: - self._create_ep_parallel_group(moe_experts) + self._create_ep_parallel_group(config.moe.moe_experts) + + # retain this from the old conditional argument being passed to apply_injection_policy() + if not config.replace_with_kernel_inject: + config.checkpoint = None + # We only support three modes: 1) user specified policy for tensor-parallelism, 2) kernel injection (replace_with_kernel_inject), and 3) automatic tensor parallelism. if self.injection_dict: + # 1. User specified Tensor Parallelism + assert not config.replace_with_kernel_inject, "Cannot use both user specified injection policy and kernel injection" for client_module, injection_policy in self.injection_dict.items(): - self._apply_injection_policy(client_module, - injection_policy, - return_tuple, - replace_with_kernel_inject, - moe, - moe_experts, - moe_type, - training_mp_size) - elif replace_method == 'auto': - self._apply_injection_policy( - return_tuple=return_tuple, - replace_with_kernel_inject=replace_with_kernel_inject, - moe=moe, - moe_experts=moe_experts, - moe_type=moe_type, - training_mp_size=training_mp_size) - - device = torch.cuda.current_device() - logger.info(f"Place model to device: {device}") + # construct the tuple and pass that instead of a string or dict. + if isinstance(injection_policy, str): + config.injection_policy_tuple = (injection_policy, ) + else: + config.injection_policy_tuple = injection_policy + self._apply_injection_policy(config, client_module) + else: + if config.replace_with_kernel_inject: + # 2. DeepSpeed Kernel Injection + self._apply_injection_policy(config) + else: + # 3. Automatic Tensor Parallelism + parser_dict = AutoTP.tp_parser(model) + print("AutoTP: ", parser_dict) + for client_module, injection_policy in parser_dict: + if isinstance(injection_policy, str): + config.injection_policy_tuple = (injection_policy, ) + else: + config.injection_policy_tuple = injection_policy + self._apply_injection_policy(config, client_module) + + device = get_accelerator().current_device_name() self.module.to(device) - if self.mp_world_size > 1: - self.model_orig_fwd = self.module.forward - self.module.forward = self.forward - else: + if config.tensor_parallel.tp_size > 1: + _rng_state = get_accelerator().get_rng_state().to( + get_accelerator().current_device_name()) + dist.broadcast(_rng_state, 0) + get_accelerator().set_rng_state(_rng_state.cpu()) + + if config.tensor_parallel.tp_size > 1: + assert not config.enable_cuda_graph, "Cuda graph is not supported for model parallelism" + + # Check if local CUDA graphs can be created in replacement modules + self.local_cuda_graph = self._local_cuda_graph_used(self.module) + + def profile_model_time(self, use_cuda_events=True): + if not self.model_profile_enabled and not self._config.enable_cuda_graph: self.module.register_forward_pre_hook(self._pre_forward_hook) + self.module.register_forward_hook(self._post_forward_hook) + self.model_profile_enabled = True + self.use_cuda_events = use_cuda_events + if self.use_cuda_events: + self.timers = SynchronizedWallClockTimer() + # todo: remove this once all the config dicts are centralized from top level pydantic config def _get_model_config_generate(self, config): - self.config = getattr(self.module, 'config', None) if config is None else config - self.generate = getattr(self.module, 'generate', None) + # this is being passed to replace_transformer_layer(config=self.user_model_config_dict) + self.config = getattr(self.module, + 'config', + None) if config.config is None else config.config + + def remove_mask_prepare_for_bloom(self): + if hasattr(self.module, 'transformer'): + if hasattr(self.module.transformer, '_prepare_attn_mask'): + self.module.transformer._prepare_attn_mask = lambda attention_mask, *args, **kwargs: attention_mask + + def _pre_forward_hook(self, module, *inputs, **kwargs): + if self.use_cuda_events: + self.timers(INFERENCE_MODEL_TIMER).start() + else: + get_accelerator().synchronize() + self._start = time.time() + + def _post_forward_hook(self, module, input, output): + if self.use_cuda_events: + self.timers(INFERENCE_MODEL_TIMER).stop() + elapsed_time = self.timers(INFERENCE_MODEL_TIMER).elapsed(reset=True) + else: + get_accelerator().synchronize() + self._end = time.time() + elapsed_time = self._end - self._start + self._model_times.append(elapsed_time) - def _create_model_parallel_group(self): + def _create_model_parallel_group(self, config): # Call the init process if InferenceEngine.inference_mp_group is None: init_distributed() - local_rank = int(os.getenv('LOCAL_RANK', '0')) - torch.cuda.set_device(local_rank) + get_accelerator().set_device(local_rank) - ranks = [i for i in range(self.mp_world_size)] + ranks = [i for i in range(config.tensor_parallel.tp_size)] self.mp_group = dist.new_group(ranks) InferenceEngine.inference_mp_group = self.mp_group - else: self.mp_group = InferenceEngine.inference_mp_group @@ -194,66 +250,121 @@ class InferenceEngine(Module): self.quantize_groups = quantization_setting elif quantization_setting is not None: self.quantize_groups = quantization_setting - logger.info(f"quantize_bits = {self.quantize_bits} " - f"mlp_extra_grouping = {self.mlp_extra_grouping}, " - f"quantize_groups = {self.quantize_groups}") - - def _validate_args(self, mpu): - if not isinstance(self.module, Module): + log_dist( + f"quantize_bits = {self.quantize_bits} " + f"mlp_extra_grouping = {self.mlp_extra_grouping}, " + f"quantize_groups = {self.quantize_groups}", + [0]) + + # TODO: remove this function and add this functionality to pydantic config checking + def _validate_args(self, mpu, replace_with_kernel_inject): + # TODO: to support SD pipeline we need to avoid this check for now + if replace_with_kernel_inject and not isinstance(self.module, Module): raise ValueError(f"model must be a torch.nn.Module, got {type(self.module)}") - if not isinstance(self.mp_world_size, int) or self.mp_world_size < 1: - raise ValueError(f"mp_size must be an int >= 1, got {self.mp_world_size}") + if not isinstance(self._config.tensor_parallel.tp_size, + int) or self._config.tensor_parallel.tp_size < 1: + raise ValueError( + f"mp_size must be an int >= 1, got {self._config.tensor_parallel.tp_size}" + ) if mpu: methods = ["get_model_parallel_group", "get_data_parallel_group"] for method in methods: if not hasattr(mpu, method): raise ValueError(f"mpu is missing {method}") - if self.checkpoint is not None and not isinstance(self.checkpoint, str): + if self._config.checkpoint is not None and not isinstance( + self._config.checkpoint, + (str, + dict)): raise ValueError( - f"checkpoint must be None or a str, got {type(self.checkpoint)}") + f"checkpoint must be None, str or dict, got {type(self._config.checkpoint)}" + ) supported_dtypes = [None, torch.half, torch.int8, torch.float] - if self.dtype not in supported_dtypes: + if self._config.dtype not in supported_dtypes: raise ValueError( - f"{self.dtype} not supported, valid dtype: {supported_dtypes}") + f"{self._config.dtype} not supported, valid dtype: {supported_dtypes}") if self.injection_dict is not None and not isinstance(self.injection_dict, dict): raise ValueError( f"injection_dict must be None or a dict, got: {self.injection_dict}") - def _apply_injection_policy(self, - client_module=None, - injection_policy=None, - return_tuple=True, - replace_with_kernel_inject=False, - moe=False, - moe_experts=1, - moe_type='standard', - training_mp_size=1): - - replace_transformer_layer(client_module, - self.module, - triangular_masking=self.triangular_masking, - policy=injection_policy, - mp_size=self.mp_world_size, - mp_group=self.mp_group, - ep_group=self.ep_group, - expert_mp_group=self.expert_mp_group, - config=self.config, - fp16=(self.dtype == torch.half), - training=False, - return_tuple=return_tuple, - quantize=(self.dtype == torch.int8), - quantize_settings=(self.quantization_scales, - self.quantize_merge_count, - self.mlp_extra_grouping, - self.quantize_groups), - replace_with_kernel_inject=replace_with_kernel_inject, - moe=moe, - moe_experts=moe_experts, - moe_type=moe_type, - training_mp_size=training_mp_size) + def load_model_with_checkpoint(self, r_module): + self.mp_replace = ReplaceWithTensorSlicing( + mp_group=self.mp_group, + mp_size=self._config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1) + error_msgs = [] + + def load(module, state_dict, prefix): + args = (state_dict, prefix, {}, True, [], [], error_msgs) + if hasattr(module, 'weight'): + if 'query_key_value' in prefix: + module.weight = self.mp_replace.qkv_copy( + module.weight.data, + state_dict[prefix + 'weight']) + else: + module.weight = self.mp_replace.copy(module.weight.data, + state_dict[prefix + 'weight']) + else: + module.norm.weight = self.mp_replace.copy(module.norm.weight.data, + state_dict[prefix + 'weight']) + if prefix + 'bias' in self.key_list: + if hasattr(module, 'norm'): + module.norm.bias = self.mp_replace.copy(module.norm.bias, + state_dict[prefix + 'bias']) + else: + data = state_dict[prefix + 'bias'] + data = data.to(get_accelerator().current_device_name()) + module.bias = self.mp_replace.copy(module.bias, data) + + layer_policies = { + nn.Linear: load, + nn.Embedding: load, + nn.LayerNorm: load, + LinearLayer: load, + LinearAllreduce: load + } + + def load_module_recursive(module, prefix='', level=0): + for name, child in module.named_children(): + if child.__class__ in layer_policies: + checking_key = prefix + name + '.' + if not any(checking_key in item for item in self.key_list): + continue + if len(list(child.parameters())) > 0 and list( + child.parameters())[0].numel() == 0: + if len(child.weight.ds_shape) == 1: + child = Normalize(dim=child.weight.ds_shape[-1], + dtype=child.weight.dtype, + eps=child.eps) + setattr(module, name, child) + load(child, self.sd, prefix + name + '.') + else: + load_module_recursive(child, + prefix if level == 0 else prefix + name + '.', + level + 1) + + load_module_recursive(r_module) + + def _apply_injection_policy(self, config, client_module=None): + # client_module is only passed when using the injection_dict method. + checkpoint_dir = config.checkpoint + checkpoint = SDLoaderFactory.get_sd_loader_json( + checkpoint_dir, + self.checkpoint_engine) if checkpoint_dir is not None else None + + generic_injection(self.module, + fp16=(config.dtype == torch.half) + or (config.dtype == torch.int8), + enable_cuda_graph=config.enable_cuda_graph) + + if isinstance(self.module, torch.nn.Module): + # config is our DeepSpeedInferenceConfig and self.config is the HF model config + replace_transformer_layer(client_module, + self.module, + checkpoint, + config, + self.config) def _get_all_ckpt_names(self, checkpoints_path, tag): ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, @@ -283,7 +394,7 @@ class InferenceEngine(Module): if is_pipe_parallel: raise RuntimeError( 'pipeline parallelism is currently not supported in inference.') - if os.path.isdir(load_dir): + if not isinstance(load_dir, dict) and os.path.isdir(load_dir): if tag is None: latest_path = os.path.join(load_dir, "latest") if os.path.isfile(latest_path): @@ -291,38 +402,54 @@ class InferenceEngine(Module): tag = fd.read().strip() ckpt_list = self._get_all_ckpt_names(load_dir, tag) - sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list) + sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, self.checkpoint_engine) else: - sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir) - - mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() - - load_path, checkpoint, quantize_config = sd_loader.load(self.mp_world_size, - mp_rank, - is_pipe_parallel=is_pipe_parallel, - quantize=(self.dtype is torch.int8), - quantize_groups=self.quantize_groups, - mlp_extra_grouping=self.mlp_extra_grouping) - - self.quantization_scales, self.quantize_merge_count = quantize_config - - moe, _ = has_moe_layers(self.module) - if moe: - from deepspeed.runtime.engine import DeepSpeedEngine - old_moe_load = False - if not isinstance(checkpoint['num_experts'], list): - old_moe_load = True - DeepSpeedEngine.load_moe_state_dict( - load_dir, - tag, - state_dict=checkpoint[self._choose_module_key(checkpoint)], - old_moe_load=old_moe_load, - model=self.module, - mpu=self.mpu) + sd_loader = SDLoaderFactory.get_sd_loader_json(load_dir, + self.checkpoint_engine) + + if type(sd_loader) is list: + self.sd = torch.load(sd_loader[0], map_location='cpu') + self.key_list = list(self.sd.keys()) + + self.load_model_with_checkpoint(self.module) + + for i in range(1, len(sd_loader)): + if not dist.is_initialized() or dist.get_rank() == 0: + print(f"loading checkpoint ({i})") + self.sd = torch.load(sd_loader[i], + map_location=get_accelerator().device_name()) + self.key_list = list(self.sd.keys()) + self.load_model_with_checkpoint(self.module) + else: + mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() - self.module.load_state_dict( - state_dict=checkpoint[self._choose_module_key(checkpoint)], - strict=load_module_strict) + load_path, checkpoint, quantize_config = sd_loader.load(self._config.tensor_parallel.tp_size, + mp_rank, + is_pipe_parallel=is_pipe_parallel, + quantize=(self._config.dtype is torch.int8), + quantize_groups=self.quantize_groups, + mlp_extra_grouping=self.mlp_extra_grouping) + + self.quantization_scales, self.quantize_merge_count = quantize_config + + moe, _ = has_moe_layers(self.module) + if moe: + from deepspeed.runtime.engine import DeepSpeedEngine + old_moe_load = False + if not isinstance(checkpoint['num_experts'], list): + old_moe_load = True + DeepSpeedEngine.load_moe_state_dict( + load_dir, + tag, + state_dict=checkpoint[self._choose_module_key(checkpoint)], + old_moe_load=old_moe_load, + model=self.module, + mpu=self.mpu, + checkpoint_engine=self.checkpoint_engine) + + self.module.load_state_dict( + state_dict=checkpoint[self._choose_module_key(checkpoint)], + strict=load_module_strict) def _choose_module_key(self, sd): assert not ('module' in sd and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed" @@ -332,25 +459,84 @@ class InferenceEngine(Module): elif 'model' in sd: return 'model' - def _convert_to_dtype(self): - if self.dtype is torch.int8 and self.quantization_scales is None: + def _convert_to_dtype(self, config): + if not isinstance(self.module, torch.nn.Module): + return + + if False: #config.dtype is torch.int8 and self.quantization_scales is None: quantizer = WeightQuantization(mlp_extra_grouping=self.mlp_extra_grouping) model, self.quantization_scales = quantizer.model_quantize(self.module, self.injection_dict, self.quantize_bits, self.quantize_groups) - elif self.dtype == torch.half: + elif config.dtype == torch.half: self.module.half() - elif self.dtype == torch.float: + elif config.dtype == torch.bfloat16: + self.module.bfloat16() + elif config.dtype == torch.float: self.module.float() - def _pre_forward_hook(self, module, *inputs, **kwargs): - for input in inputs: - if torch.is_tensor(input): - input = input.to(torch.cuda.current_device()) + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = get_accelerator().Stream() + cuda_stream.wait_stream(get_accelerator().current_stream()) + with get_accelerator().stream(cuda_stream): + for i in range(3): + ret = self.module(*inputs, **kwargs) + get_accelerator().current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs = torch.cuda.CUDAGraph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with torch.cuda.graph(self._cuda_graphs): + self.static_output = self.module(*self.static_inputs, **self.static_kwargs) + + self.cuda_graph_created = True + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) for k in kwargs: if torch.is_tensor(kwargs[k]): - kwargs[k] = kwargs[k].to(torch.cuda.current_device()) + self.static_kwargs[k].copy_(kwargs[k]) + self._cuda_graphs.replay() + return self.static_output + + def model_times(self): + assert self.model_profile_enabled, "model profiling is not enabled" + model_times = self._model_times + if self._config.enable_cuda_graph and len(self._model_times) == 0: + raise ValueError( + "Model times are empty and cuda graph is enabled. If " + "this is a GPT-style model this combo is not supported. If this is a " + "BERT-style model this is a bug, please report it. " + f"Model type is: {type(self.module)}") + self._model_times = [] + return model_times + + def _module_match(self, module): + for policy in generic_policies: + policy = policy() + if policy.match_replaced(module): + return True + return False + + def _local_cuda_graph_used(self, module): + if isinstance(module, torch.nn.Module): + return False + else: + sub_module_cuda_graph = False + for name in module.__dict__.keys(): + sub_module = getattr(module, name) + + if self._module_match(sub_module) and hasattr(sub_module, + "enable_cuda_graph"): + sub_module_cuda_graph = True + + return sub_module_cuda_graph def forward(self, *inputs, **kwargs): """Execute forward propagation @@ -359,22 +545,44 @@ class InferenceEngine(Module): *inputs: Variable length input list **kwargs: variable length keyword arguments """ - if self.mp_world_size > 1: - if self.mpu is None: - for input in inputs: - if torch.is_tensor(input): - input = input.to(torch.cuda.current_device()) - if not input.is_contiguous(): - input = input.contiguous() - dist.broadcast(input, 0) - for k in kwargs: - if torch.is_tensor(kwargs[k]): - kwargs[k] = kwargs[k].to(torch.cuda.current_device()) - if not kwargs[k].is_contiguous(): - kwargs[k] = kwargs[k].contiguous() - dist.broadcast(kwargs[k], 0) - - outputs = self.model_orig_fwd(*inputs, **kwargs) + start = None + if self.model_profile_enabled and get_accelerator().device_name( + ) == 'cuda' and self._config.enable_cuda_graph: + get_accelerator().synchronize() + start = time.time() + + if get_accelerator().device_name( + ) == 'cuda' and self._config.enable_cuda_graph and not self.local_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) else: outputs = self.module(*inputs, **kwargs) + + if self.model_profile_enabled and self._config.enable_cuda_graph: + get_accelerator().synchronize() + duration = time.time() - start + self._model_times.append(duration) + return outputs + + def _generate(self, *inputs, **kwargs): + # Reset KV-cache at the beginning of generate + if hasattr(self.module, 'reset_cache'): + self.module.reset_cache() + num_beams = 1 + if "generation_config" in kwargs: + gen_config = kwargs["generation_config"] + num_beams = getattr(gen_config, "num_beams", 1) + if "num_beams" in kwargs: + num_beams = kwargs["num_beams"] + + if num_beams > 1: + raise NotImplementedError( + "DeepSpeed does not support `num_beams` > 1, if this is important to you please " + "add your request to: https://github.com/microsoft/DeepSpeed/issues/2506" + ) + + return self.module.generate(*inputs, **kwargs) diff --git a/deepspeed/launcher/__init__.py b/deepspeed/launcher/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/launcher/__init__.py +++ b/deepspeed/launcher/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/launcher/constants.py b/deepspeed/launcher/constants.py index 15b2623..09dd1bd 100644 --- a/deepspeed/launcher/constants.py +++ b/deepspeed/launcher/constants.py @@ -4,6 +4,9 @@ PDSH_LAUNCHER = 'pdsh' PDSH_MAX_FAN_OUT = 1024 OPENMPI_LAUNCHER = 'openmpi' - +MPICH_LAUNCHER = 'mpich' +SLURM_LAUNCHER = 'slurm' MVAPICH_LAUNCHER = 'mvapich' MVAPICH_TMP_HOSTFILE = '/tmp/deepspeed_mvapich_hostfile' + +ELASTIC_TRAINING_ID_DEFAULT = "123456789" diff --git a/deepspeed/launcher/launch.py b/deepspeed/launcher/launch.py old mode 100644 new mode 100755 index 21ef04b..d9e6fda --- a/deepspeed/launcher/launch.py +++ b/deepspeed/launcher/launch.py @@ -1,6 +1,6 @@ # Copyright 2020 The Microsoft DeepSpeed Team """ -DeepSpeed launcher, this is similar to torch.distributed.launch but supports +DeepSpeed launcher, this is similar to torch's distributed.launch but supports additional features such as arbitrary gpu exclusion. deepspeed.launcher.launch is intended to be run on a single worker node and @@ -15,11 +15,15 @@ import json import base64 import time import signal +import psutil from collections import defaultdict +from typing import Dict from argparse import ArgumentParser, REMAINDER - from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT +from ..nebula.constants import DLTS_POD_ENV_PATH from ..utils import logger +from ..elasticity import is_torch_elastic_compatible +from .constants import ELASTIC_TRAINING_ID_DEFAULT PID_FILE_BASEPATH = "/tmp" @@ -64,6 +68,20 @@ def parse_args(): help="Skip prepending the training script with " "'python' - just execute it directly.") + parser.add_argument("--enable_elastic_training", + action="store_true", + help="Enable elastic training support.") + + parser.add_argument("--min_elastic_nodes", + type=int, + default=-1, + help="Min number of nodes in elastic training.") + + parser.add_argument("--max_elastic_nodes", + type=int, + default=-1, + help="Max number of nodes in elastic training.") + parser.add_argument("--no_local_rank", action="store_true", help="Do not pass local_rank as an argument when calling " @@ -74,6 +92,12 @@ def parse_args(): default=0, help="main launching process pid, for internal pid tracking") + parser.add_argument( + "--enable_each_rank_log", + default="None", + type=str, + help="redirect the stdout and stderr from each rank into different log files") + # positional parser.add_argument("training_script", type=str, @@ -87,6 +111,21 @@ def parse_args(): return parser.parse_args() +# Adapted from https://psutil.readthedocs.io/en/latest/#kill-process-tree +def terminate_process_tree(pid): + process = psutil.Process(pid) + children = process.children(recursive=True) + children.append(process) + for child in children: + try: + child.terminate() + except psutil.NoSuchProcess: + pass + gone, alive = psutil.wait_procs(children, timeout=30) + for p in alive: + p.kill() + + def main(): args = parse_args() current_env = os.environ.copy() @@ -143,15 +182,93 @@ def main(): with open(pid_file, 'w') as fd: fd.write(f"{launcher_pid}") + if not is_torch_elastic_compatible(): + if args.enable_elastic_training: + logger.info(f"Disabling elastic training support as \ + PyTorch version should be greater than 1.11.x") + args.enable_elastic_training = False + + if os.path.exists(DLTS_POD_ENV_PATH): + with open(DLTS_POD_ENV_PATH) as file: + lines = file.readlines() + lines = [line.rstrip() for line in lines] + for line in lines: + if line.startswith('export FC_TASKROLE_NAME') or line.startswith( + 'export FC_TASK_INDEX'): + key_val = line.split()[1] + key, val = key_val.split('=') + current_env[key] = val + processes = [] cmd = [] - for local_rank in range(0, num_local_procs): - # each process's rank - dist_rank = global_rank_mapping[local_node][local_rank] - current_env["RANK"] = str(dist_rank) - current_env["LOCAL_RANK"] = str(local_rank) - # spawn the processes + if not args.enable_elastic_training: + if args.enable_each_rank_log != "None": + # prepare the log path and the file name prefix + if os.path.isfile(args.enable_each_rank_log): + raise ValueError( + f"{args.enable_each_rank_log} should not be a file, it should be a directory." + ) + if not os.path.exists(args.enable_each_rank_log): + try: + os.makedirs(args.enable_each_rank_log) + except Exception as e: + print(e) + raise ValueError( + f"unable to create directory {args.enable_each_rank_log} for each rank log." + ) + log_name_prefix = time.strftime("%Y%m%d%H%M%S", time.localtime()) + + for local_rank in range(0, num_local_procs): + # each process's rank + dist_rank = global_rank_mapping[local_node][local_rank] + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + # spawn the processes + cmd = [] + if not args.no_python: + cmd = [sys.executable, "-u"] + if args.module: + cmd.append("-m") + else: + if args.module: + raise ValueError("Don't use both the '--no_python' flag" + " and the '--module' flag at the same time.") + cmd.append(args.training_script) + # A user may not want to pass local_rank as a keyword arg so we make this optional. + if not args.no_local_rank: + cmd.append(f"--local_rank={local_rank}") + cmd += args.training_script_args + + if args.enable_each_rank_log != "None": + log_file = os.path.join(args.enable_each_rank_log, + f"{log_name_prefix}_rank{dist_rank}.log") + log_fd = open(log_file, 'w') + process = subprocess.Popen(cmd, + env=current_env, + stdout=log_fd, + stderr=log_fd) + else: + process = subprocess.Popen(cmd, env=current_env) + + processes.append(process) + else: + from ..elasticity import DSElasticAgent + from torch.distributed.elastic.rendezvous import RendezvousParameters + from torch.distributed.elastic.agent.server.api import WorkerSpec + import torch.distributed.elastic.rendezvous.registry as rdzv_registry + from torch.distributed.elastic.multiprocessing import Std + + if args.min_elastic_nodes == -1: + args.min_elastic_nodes = 1 + if args.max_elastic_nodes == -1: + args.max_elastic_nodes = args.nnodes + assert args.max_elastic_nodes > 0 and args.min_elastic_nodes > 0 , "Max and Min nodes should be positive" + + current_env["NCCL_ASYNC_ERROR_HANDLING"] = str(1) + + # Get config and arguments cmd = [] if not args.no_python: cmd = [sys.executable, "-u"] @@ -162,13 +279,36 @@ def main(): raise ValueError("Don't use both the '--no_python' flag" " and the '--module' flag at the same time.") cmd.append(args.training_script) - # A user may not want to pass local_rank as a keyword arg so we make this optional. - if not args.no_local_rank: - cmd.append(f"--local_rank={local_rank}") cmd += args.training_script_args - - process = subprocess.Popen(cmd, env=current_env) - processes.append(process) + cmd_args = cmd[1:] + + rdzv_configs: Dict[str, str] = {'timeout': 100} + run_id = os.environ.get("ELASTIC_RUN_ID", ELASTIC_TRAINING_ID_DEFAULT) + + # Creating config for rendezvous class + rdzv_parameters = RendezvousParameters(backend='c10d', + endpoint=args.master_addr + ":" + + str(args.master_port), + run_id=run_id, + min_nodes=args.min_elastic_nodes, + max_nodes=args.max_elastic_nodes, + **rdzv_configs) + + spec = WorkerSpec( + role='trainer', + local_world_size=num_local_procs, + entrypoint=cmd[0], + args=cmd[1:], + rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), + max_restarts=100, + monitor_interval=5, + redirects=Std.from_str("0"), + tee=Std.from_str("0"), + master_addr=None, + master_port=None, + ) + agent = DSElasticAgent(spec, current_env) + agent.run() sig_names = {2: "SIGINT", 15: "SIGTERM"} last_return_code = None @@ -177,7 +317,7 @@ def main(): for process in processes: logger.info(f"Killing subprocess {process.pid}") try: - process.kill() + terminate_process_tree(process.pid) except Exception: pass if last_return_code is not None: diff --git a/deepspeed/launcher/multinode_runner.py b/deepspeed/launcher/multinode_runner.py index a962a8a..6027d10 100644 --- a/deepspeed/launcher/multinode_runner.py +++ b/deepspeed/launcher/multinode_runner.py @@ -1,11 +1,13 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import os import sys import shutil import subprocess import warnings -from shlex import quote +from shlex import split from abc import ABC, abstractmethod - +from deepspeed.accelerator import get_accelerator from ..utils import logger from .constants import PDSH_MAX_FAN_OUT, MVAPICH_TMP_HOSTFILE @@ -66,7 +68,14 @@ class PDSHRunner(MultiNodeRunner): # PDSH flags for max node fan out and specific hosts to launch on # See https://linux.die.net/man/1/pdsh for flag details - pdsh_cmd_args = ['pdsh', '-f', str(PDSH_MAX_FAN_OUT), '-w', active_workers] + pdsh_cmd_args = [ + 'pdsh', + '-S', + '-f', + str(PDSH_MAX_FAN_OUT), + '-w', + active_workers + ] + split(self.args.launcher_args) exports = "" for key, val in self.exports.items(): @@ -94,8 +103,16 @@ class PDSHRunner(MultiNodeRunner): deepspeed_launch.append("--no_local_rank") if self.args.save_pid: deepspeed_launch += ["--save_pid", f"{os.getpid()}"] + if self.args.elastic_training: + deepspeed_launch.append("--enable_elastic_training") + deepspeed_launch.append(f"--max_elastic_nodes={self.args.max_elastic_nodes}") + deepspeed_launch.append(f"--min_elastic_nodes={self.args.min_elastic_nodes}") + + cmd_to_search = [i + "\\" for i in deepspeed_launch[2:6]] + + kill_command = pdsh_cmd_args + ["pkill -f ", " ".join(cmd_to_search)[:-2]] return pdsh_cmd_args + deepspeed_launch + [self.user_script - ] + self.user_arguments + ] + self.user_arguments, kill_command class OpenMPIRunner(MultiNodeRunner): @@ -137,7 +154,7 @@ class OpenMPIRunner(MultiNodeRunner): '--mca', 'btl_tcp_if_include', 'eth0', - ] + ] + split(self.args.launcher_args) export_cmd = [] for k, v in self.exports.items(): @@ -153,6 +170,102 @@ class OpenMPIRunner(MultiNodeRunner): ] + self.user_arguments +class MPICHRunner(MultiNodeRunner): + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + def backend_exists(self): + #TODO: if IB is available we should suggestion mpich + return shutil.which('mpirun') #mpich_info + + @property + def name(self): + return "mpich" + + def validate_args(self): + super().validate_args() + #TODO: Allow for include/exclude at node-level but not gpu-level + if self.args.include != "" or self.args.exclude != "": + raise ValueError( + f"{self.name} backend does not support worker include/exclusion") + + if self.args.num_nodes != -1 or self.args.num_gpus != -1: + raise ValueError( + f"{self.name} backend does not support limiting num nodes/gpus") + + def get_cmd(self, environment, active_resources): + devices_per_node = self.resource_pool.values() + total_process_count = sum(devices_per_node) + process_per_node = list(devices_per_node)[0] + + mpirun_cmd = [ + 'mpirun', + '-n', + f'{total_process_count}', + '-ppn', + f'{process_per_node}', + ] + split(self.args.launcher_args) + export_cmd = [] + + for k, v in self.exports.items(): + export_cmd += ['-x', "{}={}".format(k, v)] + + python_exec = [] + if not self.args.no_python: + python_exec = [sys.executable, "-u"] + if self.args.module: + python_exec.append("-m") + return mpirun_cmd + python_exec + [self.user_script] + self.user_arguments + + +class SlurmRunner(MultiNodeRunner): + def __init__(self, args, world_info_base64, resource_pool): + super().__init__(args, world_info_base64) + self.resource_pool = resource_pool + + def backend_exists(self): + return shutil.which('sinfo') + + @property + def name(self): + return 'slurm' + + def get_cmd(self, environment, active_resources): + assert not getattr(self.args, 'detect_nvlink_pairs', False), "slurm backend does not support remapping visible devices" + total_process_count = sum(self.resource_pool.values()) + srun_cmd = [ + 'srun', + '-n', + f'{total_process_count}', + ] + split(self.args.launcher_args) + + if getattr(self.args, 'slurm_comment', ''): + srun_cmd += ['--comment', self.args.slurm_comment] + + if self.args.include != "": + srun_cmd.append('--include') + srun_cmd.append(f'{self.args.include}') + if self.args.exclude != "": + srun_cmd.append('--exclude') + srun_cmd.append(f'{self.args.exclude}') + if self.args.num_nodes > 0: + srun_cmd.append('--nodes') + srun_cmd.append(f'{self.args.num_nodes}') + if self.args.num_gpus > 0: + srun_cmd.append('--gpus') + srun_cmd.append(f'{self.args.num_gpus}') + + exports = '--export=ALL' + for key, val in self.exports.items(): + exports += f",{key}={val}" + + python_exec = [sys.executable, "-u"] + command = srun_cmd + [exports] + python_exec + [self.user_script + ] + self.user_arguments + return command + + class MVAPICHRunner(MultiNodeRunner): def __init__(self, args, world_info_base64, resource_pool): super().__init__(args, world_info_base64) @@ -165,7 +278,8 @@ class MVAPICHRunner(MultiNodeRunner): self.add_export('MV2_DEBUG_SHOW_BACKTRACE', '1') # Enabled cuda-aware communication - self.add_export('MV2_USE_CUDA', '1') + if get_accelerator().device_name() == 'cuda': + self.add_export('MV2_USE_CUDA', '1') # Support deep learning frameworks: http://hidl.cse.ohio-state.edu/userguide/horovod/ self.add_export('MV2_SUPPORT_DL', '1') @@ -227,7 +341,7 @@ class MVAPICHRunner(MultiNodeRunner): f'{process_per_node}', '--hostfile', f'{MVAPICH_TMP_HOSTFILE}', - ] + ] + split(self.args.launcher_args) export_cmd = [] for k, v in self.exports.items(): diff --git a/deepspeed/launcher/runner.py b/deepspeed/launcher/runner.py old mode 100644 new mode 100755 index 551d24e..044f6ff --- a/deepspeed/launcher/runner.py +++ b/deepspeed/launcher/runner.py @@ -7,6 +7,7 @@ per rank for training. """ import os +import re import sys import json import base64 @@ -14,18 +15,21 @@ import argparse import subprocess import collections from copy import deepcopy +import signal +import time -import torch.cuda - -from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner -from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER +from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner, SlurmRunner, MPICHRunner +from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER, SLURM_LAUNCHER, MPICH_LAUNCHER from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT +from ..nebula.constants import NEBULA_EXPORT_ENVS from ..utils import logger from ..autotuning import Autotuner +from deepspeed.accelerator import get_accelerator DLTS_HOSTFILE = "/job/hostfile" -EXPORT_ENVS = ["NCCL", "PYTHON", "MV2", "UCX"] +EXPORT_ENVS = ['MLFLOW', 'NCCL', 'PYTHON', 'MV2', 'UCX'] +EXPORT_ENVS += NEBULA_EXPORT_ENVS DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env" DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.'] PDSH_MAX_FAN_OUT = 1024 @@ -75,6 +79,18 @@ def parse_args(args=None): help="Total number of worker nodes to run on, this will use " "the top N hosts from the given hostfile.") + parser.add_argument("--min_elastic_nodes", + type=int, + default=-1, + help="Minimum number of nodes to run elastic training on. " + "Default is 1 when elastic training is enabled") + + parser.add_argument("--max_elastic_nodes", + type=int, + default=-1, + help="Maximum number of nodes to run elastic training on. " + "Default is num_nodes when elastic training is enabled") + parser.add_argument("--num_gpus", type=int, default=-1, @@ -93,11 +109,12 @@ def parse_args(args=None): help="(optional) IP address of node 0, will be " "inferred via 'hostname -I' if not specified.") - parser.add_argument("--launcher", - default=PDSH_LAUNCHER, - type=str, - help="(optional) choose launcher backend for multi-node " - "training. Options currently include PDSH, OpenMPI, MVAPICH.") + parser.add_argument( + "--launcher", + default=PDSH_LAUNCHER, + type=str, + help="(optional) choose launcher backend for multi-node " + "training. Options currently include PDSH, OpenMPI, MVAPICH, SLURM, MPICH.") parser.add_argument("--launcher_args", default="", @@ -121,6 +138,10 @@ def parse_args(args=None): help="Do not pass local_rank as an argument when calling " "the user's training script.") + parser.add_argument("--no_ssh_check", + action="store_true", + help="Do not perform ssh check in multi-node launcher model") + parser.add_argument("--force_multi", action="store_true", help="Force multi-node launcher mode, helps in cases where user " @@ -133,6 +154,12 @@ def parse_args(args=None): "where is the pid of the first process that invoked `deepspeed`. " "Useful when launching deepspeed processes programmatically.") + parser.add_argument( + "--enable_each_rank_log", + default="None", + type=str, + help="redirect the stdout and stderr from each rank into different log files") + parser.add_argument( "--autotuning", default="", @@ -142,6 +169,10 @@ def parse_args(args=None): help="Run DeepSpeed autotuner to discover optimal configuration parameters " "before running job.") + parser.add_argument("--elastic_training", + action="store_true", + help="Enable elastic training support in DeepSpeed.") + parser.add_argument("user_script", type=str, help="User script to launch, followed by any required " @@ -158,25 +189,45 @@ def fetch_hostfile(hostfile_path): # e.g., worker-0 slots=16 with open(hostfile_path, 'r') as fd: - resource_pool = collections.OrderedDict() - for line in fd.readlines(): - line = line.strip() - if line == '': - # skip empty lines - continue - try: - hostname, slots = line.split() - _, slot_count = slots.split("=") - slot_count = int(slot_count) - except ValueError as err: - logger.error("Hostfile is not formatted correctly, unable to " - "proceed with training.") - raise err - if hostname in resource_pool: - logger.error("Hostfile contains duplicate hosts, unable to " - "proceed with training.") - raise ValueError(f"host {hostname} is already defined") - resource_pool[hostname] = slot_count + hostfile_text = fd.readlines() + + return _parse_hostfile(hostfile_text) + + +def _parse_hostfile(hostfile_lines): + # Regex matches one or more non-whitespace characters (\S+) at the start of + # the line, followed by one or more whitespace characters (\s+), followed + # by the string "slots=", followed by one or more digits (\d+). + pattern = r'^(\S+)\s+slots=(\d+)' + + resource_pool = collections.OrderedDict() + + for line in hostfile_lines: + line = line.strip() + match = re.search(pattern, line) + if line.startswith("#") or line == "": + # hostfile comment or empty line, ignore + continue + elif match: + host = match.group(1) + num_slots = int(match.group(2)) + if host in resource_pool: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError( + f"Hostfile contains multiple entries for {host}, unable to proceed with launching" + ) + resource_pool[host] = num_slots + else: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError( + "Hostfile contains a bad entry: {line}, unable to proceed with launching" + ) + + if len(resource_pool) == 0: + logger.error(f"Bad hostfile text: {hostfile_lines}") + raise ValueError( + "Hostfile is empty or not formatted correctly, unable to proceed with launching." + ) return resource_pool @@ -305,14 +356,33 @@ def run_autotuning(args, active_resources): tuner.print_tuning_results() logger.info("[End] Running autotuning") + tuner.write_optimal_config() if args.autotuning == "run": tuner.run_after_tuning() +def parse_num_nodes(str_num_nodes: str, elastic_training: bool): + node_list = str_num_nodes.split(":") + + if len(node_list) == 1: + min_nodes, max_nodes = int(node_list[0]), -1 + elif len(node_list) == 2 and elastic_training: + min_nodes, max_nodes = int(node_list[0]), int(node_list[1]) + elif len(node_list) == 2 and not elastic_training: + raise RuntimeError("MIN:MAX format is only supported in elastic training") + else: + raise RuntimeError("num_nodes {} is not in MIN:MAX format".format(str_num_nodes)) + + return min_nodes, max_nodes + + def main(args=None): args = parse_args(args) + if args.elastic_training: + assert args.master_addr != "", "Master Addr is required when elastic training is enabled" + resource_pool = fetch_hostfile(args.hostfile) # respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters @@ -336,7 +406,7 @@ def main(args=None): multi_node_exec = True if not resource_pool: resource_pool = {} - device_count = torch.cuda.device_count() + device_count = get_accelerator().device_count() if device_count == 0: raise RuntimeError("Unable to proceed, no GPU resources available") resource_pool['localhost'] = device_count @@ -352,7 +422,7 @@ def main(args=None): env = os.environ.copy() # validate that passwordless-ssh is workly properly with this hostfile - if multi_node_exec: + if multi_node_exec and not args.no_ssh_check: first_host = list(active_resources.keys())[0] try: subprocess.check_call( @@ -369,8 +439,18 @@ def main(args=None): assert multi_node_exec first_host = list(active_resources.keys())[0] hostname_cmd = [f"ssh {first_host} hostname -I"] - result = subprocess.check_output(hostname_cmd, shell=True) + try: + result = subprocess.check_output(hostname_cmd, shell=True) + except subprocess.CalledProcessError as err: + logger.error( + "Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr" + ) + raise err args.master_addr = result.decode('utf-8').split()[0] + if not args.master_addr: + raise RuntimeError( + f"Unable to detect suitable master address via `hostname -I`, please manually specify one via --master_addr" + ) logger.info(f"Using IP address of {args.master_addr} for node {first_host}") if args.autotuning != "": @@ -391,6 +471,9 @@ def main(args=None): updated_active_resources[hostname] = list(range(args.num_gpus)) active_resources = updated_active_resources + if args.elastic_training: + assert not args.no_local_rank, "--no_local_rank argument is not supported in Elastic training" + # encode world info as base64 to make it easier to pass via command line world_info_base64 = encode_world_info(active_resources) @@ -414,6 +497,13 @@ def main(args=None): deepspeed_launch.append("--no_local_rank") if args.save_pid: deepspeed_launch += ["--save_pid", f"{os.getpid()}"] + if args.enable_each_rank_log: + deepspeed_launch.append( + f"--enable_each_rank_log={args.enable_each_rank_log}") + if args.elastic_training: + deepspeed_launch.append("--enable_elastic_training") + deepspeed_launch.append(f"--max_elastic_nodes={args.max_elastic_nodes}") + deepspeed_launch.append(f"--min_elastic_nodes={args.min_elastic_nodes}") cmd = deepspeed_launch + [args.user_script] + args.user_args else: args.launcher = args.launcher.lower() @@ -421,8 +511,12 @@ def main(args=None): runner = PDSHRunner(args, world_info_base64) elif args.launcher == OPENMPI_LAUNCHER: runner = OpenMPIRunner(args, world_info_base64, resource_pool) + elif args.launcher == MPICH_LAUNCHER: + runner = MPICHRunner(args, world_info_base64, resource_pool) elif args.launcher == MVAPICH_LAUNCHER: runner = MVAPICHRunner(args, world_info_base64, resource_pool) + elif args.launcher == SLURM_LAUNCHER: + runner = SlurmRunner(args, world_info_base64, resource_pool) else: raise NotImplementedError(f"Unknown launcher {args.launcher}") @@ -448,11 +542,26 @@ def main(args=None): key, val = var.split('=', maxsplit=1) runner.add_export(key, val) - cmd = runner.get_cmd(env, active_resources) + if args.launcher == PDSH_LAUNCHER: + cmd, kill_cmd = runner.get_cmd(env, active_resources) + else: + cmd = runner.get_cmd(env, active_resources) logger.info(f"cmd = {' '.join(cmd)}") result = subprocess.Popen(cmd, env=env) + def sigkill_handler(signum, frame): + result.send_signal(signal.SIGINT) + time.sleep(0.1) + result.send_signal(signal.SIGTERM) + result_kill = subprocess.Popen(kill_cmd, env=env) + result_kill.wait() + time.sleep(1) + sys.exit(1) + + if args.launcher == PDSH_LAUNCHER: + signal.signal(signal.SIGINT, sigkill_handler) + result.wait() # In case of failure must propagate the error-condition back to the caller (usually shell). The diff --git a/deepspeed/model_implementations/__init__.py b/deepspeed/model_implementations/__init__.py new file mode 100644 index 0000000..77d1c5b --- /dev/null +++ b/deepspeed/model_implementations/__init__.py @@ -0,0 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .transformers.ds_transformer import DeepSpeedTransformerInference +from .transformers.clip_encoder import DSClipEncoder diff --git a/deepspeed/model_implementations/diffusers/__init__.py b/deepspeed/model_implementations/diffusers/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/model_implementations/diffusers/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py new file mode 100644 index 0000000..8ac9794 --- /dev/null +++ b/deepspeed/model_implementations/diffusers/unet.py @@ -0,0 +1,63 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import torch +from ..features.cuda_graph import CUDAGraph + + +class DSUNet(CUDAGraph, torch.nn.Module): + def __init__(self, unet, enable_cuda_graph=True): + super().__init__(enable_cuda_graph=enable_cuda_graph) + self.unet = unet + # SD pipeline accesses this attribute + self.in_channels = unet.in_channels + self.device = self.unet.device + self.dtype = self.unet.dtype + self.config = self.unet.config + self.fwd_count = 0 + self.unet.requires_grad_(requires_grad=False) + self.unet.to(memory_format=torch.channels_last) + self.cuda_graph_created = False + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[k].copy_(kwargs[k]) + self._cuda_graphs.replay() + return self.static_output + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + return outputs + else: + return self._forward(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs = torch.cuda.CUDAGraph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with torch.cuda.graph(self._cuda_graphs): + self.static_output = self._forward(*self.static_inputs, **self.static_kwargs) + + self.cuda_graph_created = True + + def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True): + return self.unet(sample, timestamp, encoder_hidden_states, return_dict) diff --git a/deepspeed/model_implementations/diffusers/vae.py b/deepspeed/model_implementations/diffusers/vae.py new file mode 100644 index 0000000..8f8d13c --- /dev/null +++ b/deepspeed/model_implementations/diffusers/vae.py @@ -0,0 +1,148 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import torch +from ..features.cuda_graph import CUDAGraph + + +class DSVAE(CUDAGraph, torch.nn.Module): + def __init__(self, vae, enable_cuda_graph=True): + super().__init__(enable_cuda_graph=enable_cuda_graph) + self.vae = vae + self.device = self.vae.device + self.dtype = self.vae.dtype + self.vae.requires_grad_(requires_grad=False) + self.decoder_cuda_graph_created = False + self.encoder_cuda_graph_created = False + self.all_cuda_graph_created = False + + def _graph_replay_decoder(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_decoder_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_decoder_kwargs[k].copy_(kwargs[k]) + self._decoder_cuda_graph.replay() + return self.static_decoder_output + + def _decode(self, x, return_dict=True): + return self.vae.decode(x, return_dict=return_dict) + + def _create_cuda_graph_decoder(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._decode(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._decoder_cuda_graph = torch.cuda.CUDAGraph() + self.static_decoder_inputs = inputs + self.static_decoder_kwargs = kwargs + + with torch.cuda.graph(self._decoder_cuda_graph): + self.static_decoder_output = self._decode(*self.static_decoder_inputs, + **self.static_decoder_kwargs) + + self.decoder_cuda_graph_created = True + + def decode(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.decoder_cuda_graph_created: + outputs = self._graph_replay_decoder(*inputs, **kwargs) + else: + self._create_cuda_graph_decoder(*inputs, **kwargs) + outputs = self._graph_replay_decoder(*inputs, **kwargs) + return outputs + else: + return self._decode(*inputs, **kwargs) + + def _graph_replay_encoder(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_encoder_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_encoder_kwargs[k].copy_(kwargs[k]) + self._encoder_cuda_graph.replay() + return self.static_encoder_output + + def _encode(self, x, return_dict=True): + return self.vae.encode(x, return_dict=return_dict) + + def _create_cuda_graph_encoder(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._encode(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._encoder_cuda_graph = torch.cuda.CUDAGraph() + self.static_encoder_inputs = inputs + self.static_encoder_kwargs = kwargs + + with torch.cuda.graph(self._encoder_cuda_graph): + self.static_encoder_output = self._encode(*self.static_encoder_inputs, + **self.static_encoder_kwargs) + + self.encoder_cuda_graph_created = True + + def encode(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.encoder_cuda_graph_created: + outputs = self._graph_replay_encoder(*inputs, **kwargs) + else: + self._create_cuda_graph_encoder(*inputs, **kwargs) + outputs = self._graph_replay_encoder(*inputs, **kwargs) + return outputs + else: + return self._encode(*inputs, **kwargs) + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[k].copy_(kwargs[k]) + self._all_cuda_graph.replay() + return self.static_output + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + return outputs + else: + return self._forward(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._all_cuda_graph = torch.cuda.CUDAGraph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with torch.cuda.graph(self._all_cuda_graph): + self.static_output = self._forward(*self.static_inputs, **self.static_kwargs) + + self.all_cuda_graph_created = True + + def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True): + return self.vae(sample, timestamp, encoder_hidden_states, return_dict) diff --git a/deepspeed/model_implementations/features/__init__.py b/deepspeed/model_implementations/features/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/model_implementations/features/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/model_implementations/features/cuda_graph.py b/deepspeed/model_implementations/features/cuda_graph.py new file mode 100644 index 0000000..3224f62 --- /dev/null +++ b/deepspeed/model_implementations/features/cuda_graph.py @@ -0,0 +1,24 @@ +''' +Copyright 2023 The Microsoft DeepSpeed Team +''' +from abc import ABC, abstractmethod + + +class CUDAGraph(ABC): + def __init__(self, enable_cuda_graph=False): + super().__init__() + self.enable_cuda_graph = enable_cuda_graph + + @abstractmethod + def _create_cuda_graph(self): + """ + Create CUDA graph(s) + """ + raise NotImplementedError + + @abstractmethod + def _graph_replay(self): + """ + Replay CUDA graph(s) + """ + raise NotImplementedError diff --git a/deepspeed/model_implementations/transformers/__init__.py b/deepspeed/model_implementations/transformers/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/model_implementations/transformers/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/model_implementations/transformers/clip_encoder.py b/deepspeed/model_implementations/transformers/clip_encoder.py new file mode 100644 index 0000000..efa282c --- /dev/null +++ b/deepspeed/model_implementations/transformers/clip_encoder.py @@ -0,0 +1,79 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import torch +from deepspeed.accelerator import get_accelerator +from ..features.cuda_graph import CUDAGraph + + +class DSClipEncoder(CUDAGraph, torch.nn.Module): + def __init__(self, enc, enable_cuda_graph=False): + super().__init__(enable_cuda_graph=enable_cuda_graph) + enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask + self.enc = enc + self.device = self.enc.device + self.dtype = self.enc.dtype + self.cuda_graph_created = [False, False] + self.static_inputs = [None, None] + self.static_kwargs = [None, None] + self.static_output = [None, None] + self._cuda_graphs = [None, None] + self.iter = 0 + self.config = self.enc.config + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + mask = torch.empty(bsz, + seq_len, + seq_len, + dtype=dtype, + device=get_accelerator().current_device_name()) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) + mask = mask.unsqueeze(1) + return mask + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[self.iter][i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[self.iter][k].copy_(kwargs[k]) + self._cuda_graphs[self.iter].replay() + return self.static_output[self.iter] + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created[self.iter]: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + self.iter = (self.iter + 1) % 2 + return outputs + else: + return self.enc(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs[self.iter] = torch.cuda.CUDAGraph() + self.static_inputs[self.iter] = inputs + self.static_kwargs[self.iter] = kwargs + + with torch.cuda.graph(self._cuda_graphs[self.iter]): + self.static_output[self.iter] = self._forward( + *self.static_inputs[self.iter], + **self.static_kwargs[self.iter]) + + self.cuda_graph_created[self.iter] = True + + def _forward(self, *inputs, **kwargs): + return self.enc(*inputs, **kwargs) diff --git a/deepspeed/model_implementations/transformers/ds_base.py b/deepspeed/model_implementations/transformers/ds_base.py new file mode 100644 index 0000000..9a848ea --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_base.py @@ -0,0 +1,11 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch.nn as nn + + +class DeepSpeedTransformerBase(nn.module): + def __init__(self): + pass + + # this would be the new clean base class that will replace DeepSpeedTransformerInference. + # we currently don't know how this will look like but keeping it here as a placeholder. diff --git a/deepspeed/model_implementations/transformers/ds_bert.py b/deepspeed/model_implementations/transformers/ds_bert.py new file mode 100644 index 0000000..3e7a7b7 --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_bert.py @@ -0,0 +1,23 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedBERTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed BERT Transformer Layer. + """ + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) diff --git a/deepspeed/model_implementations/transformers/ds_bloom.py b/deepspeed/model_implementations/transformers/ds_bloom.py new file mode 100644 index 0000000..386352f --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_bloom.py @@ -0,0 +1,23 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedBloomInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed Bloom Transformer Layer. + """ + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) diff --git a/deepspeed/model_implementations/transformers/ds_gpt.py b/deepspeed/model_implementations/transformers/ds_gpt.py new file mode 100644 index 0000000..86cc9fd --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_gpt.py @@ -0,0 +1,23 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedGPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed GPT Transformer Layer. + """ + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) diff --git a/deepspeed/model_implementations/transformers/ds_megatron_gpt.py b/deepspeed/model_implementations/transformers/ds_megatron_gpt.py new file mode 100644 index 0000000..aca6b80 --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_megatron_gpt.py @@ -0,0 +1,23 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed Megatron GPT Transformer Layer. + """ + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) diff --git a/deepspeed/model_implementations/transformers/ds_opt.py b/deepspeed/model_implementations/transformers/ds_opt.py new file mode 100644 index 0000000..a5209a3 --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_opt.py @@ -0,0 +1,23 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedOPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed OPT Transformer Layer. + """ + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) diff --git a/deepspeed/model_implementations/transformers/ds_transformer.py b/deepspeed/model_implementations/transformers/ds_transformer.py new file mode 100644 index 0000000..ee5a9bd --- /dev/null +++ b/deepspeed/model_implementations/transformers/ds_transformer.py @@ -0,0 +1,188 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.utils.logging import log_dist + +from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP +from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder + +inference_cuda_module = None + + +class DeepSpeedTransformerInference(nn.Module): + """Initialize the DeepSpeed Transformer Layer. + Arguments: + layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers, + layer_id will be 0,1,2...23 when each layer object is instantiated + config: An object of DeepSpeedInferenceConfig + mp_group: Model parallelism group initialized on the modeling side. + quantize_scales: This argument groups all the layers' scales used for quantization + quantize_groups: Number of groups used for quantizing the model + merge_count: Shows the number of model-parallel checkpoints merged before running inference. + We use this argument to control the quantization scale for the model parameters if a bigger + quantize-grouping than 1 is used. + mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part + of a Transformer layer. We use this feature for quantization to reduce the convergence impact + for specific downstream tasks. + """ + layer_id = 0 + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super(DeepSpeedTransformerInference, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedTransformerInference.layer_id + DeepSpeedTransformerInference.layer_id += 1 + + data_type = torch.half if config.fp16 else torch.float + global inference_cuda_module + if inference_cuda_module is None: + builder = InferenceBuilder() + inference_cuda_module = builder.load() + + if DeepSpeedTransformerInference.layer_id == 1: + log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) + + if self.config.bigscience_bloom: + self.attention = BloomSelfAttention(self.config, + mp_group, + quantize_scales, + quantize_groups, + merge_count) + else: + self.attention = DeepSpeedSelfAttention(self.config, + mp_group, + quantize_scales, + quantize_groups, + merge_count) + self.mlp = DeepSpeedMLP(self.config, + mp_group, + quantize_scales, + quantize_groups, + merge_count, + mlp_extra_grouping) + + device = get_accelerator().current_device_name( + ) # if config.bigscience_bloom else 'cpu' + self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.layer_past = None + self.allocate_workspace = inference_cuda_module.allocate_workspace_fp32 if (not config.fp16) else \ + inference_cuda_module.allocate_workspace_fp16 + + @classmethod + def reset_cache(cls): + if inference_cuda_module is not None: + inference_cuda_module.reset_cache() + + def forward( + self, + input=None, + input_mask=None, + attention_mask=None, + attn_mask=None, + head_mask=None, + layer_past=None, + get_key_value=False, + get_present=False, + encoder_output=None, + enc_dec_attn_mask=None, + x=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + alibi=None, + output_attentions=False, + # TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API. + # This needs to be redesigned later! + layer_head_mask=None, + past_key_value=None): + + if x is not None: + input = x + + input_mask = (input_mask if attn_mask is None else + attn_mask) if attention_mask is None else attention_mask + + # Allocate memory only on first layer forward + if self.config.layer_id == 0: + self.allocate_workspace(self.config.hidden_size, + self.config.heads, + input.size()[1], + input.size()[0], + DeepSpeedTransformerInference.layer_id, + self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, + self.config.max_out_tokens) + + get_present = (get_present or get_key_value or use_cache) + input_mask = input_mask if attention_mask is None else attention_mask + + # We set the prev key/value to None when there is a prompt + if input.shape[1] > 1: + self.layer_past = None + layer_past = layer_past if layer_past is not None else self.layer_past + head_mask = layer_head_mask if layer_head_mask is not None else head_mask + + attn_mask = None + if isinstance(input, tuple): + attn_mask = input[1] + input = input[0] + input_type = input.dtype + + if (self.config.fp16 or self.config.q_int8) \ + and input.dtype == torch.float: + input = input.half() + with torch.no_grad(): + attention_output, key, value, context_outputtn_ctx, inp_norm = \ + self.attention(input, + input_mask, + head_mask, + layer_past, + get_present, + encoder_hidden_states, + encoder_attention_mask, + output_attentions, + self.norm_w, + self.norm_b, + alibi) + + presents = (key, value) + self.layer_past = presents if layer_past is None else None + output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) + + if not self.config.pre_layer_norm: + output = inference_cuda_module.layer_norm(output, + self.norm_w, + self.norm_b, + self.config.epsilon) + + output = output.to(input_type) + if get_present: + output = (output, presents) + + if self.config.return_single_tuple: + return (output, ) + elif self.config.return_tuple: + return output if type(output) is tuple else (output, attn_mask) + else: + return output diff --git a/deepspeed/module_inject/__init__.py b/deepspeed/module_inject/__init__.py old mode 100644 new mode 100755 index 7fe1a3b..aab3028 --- a/deepspeed/module_inject/__init__.py +++ b/deepspeed/module_inject/__init__.py @@ -1,3 +1,7 @@ -from .replace_module import replace_transformer_layer, revert_transformer_layer +'''Copyright The Microsoft DeepSpeed Team''' + +from .replace_module import replace_transformer_layer, revert_transformer_layer, ReplaceWithTensorSlicing, GroupQuantizer, generic_injection from .module_quantize import quantize_transformer_layer -from .replace_policy import DSPolicy, HFBertLayerPolicy +from .replace_policy import HFBertLayerPolicy +from .layers import LinearAllreduce, LinearLayer, EmbeddingLayer, Normalize +from .policy import DSPolicy diff --git a/deepspeed/module_inject/auto_tp.py b/deepspeed/module_inject/auto_tp.py new file mode 100644 index 0000000..a2c570f --- /dev/null +++ b/deepspeed/module_inject/auto_tp.py @@ -0,0 +1,124 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +# Automatic Tensor Parallelism +import re + +from torch import nn +from .replace_policy import replace_policies + + +class AutoTP(): + def in_module_list(module, module_list): + for item in module_list: + if type(item).__name__ == type(module).__name__: + return True + return False + + def get_module_list(model): + mlist = [] + for child in model.children(): + if isinstance(child, nn.ModuleList): + for module in child.children(): + if not mlist: + mlist = [module] + elif not AutoTP.in_module_list(module, mlist): + mlist = mlist + [module] + else: + mlist = mlist + AutoTP.get_module_list(child) + return mlist + + def supported(model): + unsupported = [ + 'bloom', + 'codegen', + 'deberta', + 'flaubert', + 'fsmt', + 'gpt2', + 'led', + 'longformer', + 'xlm', + 'xlnet' + ] + model = str(model) + key = re.search(r": (.*?)Model", model) + if key is None: + key = re.search(r": (.*?)Stack", model) + if key is None: + key = re.match(r"(.*?)Model", model) + assert key is not None, "Not able to determine model policy automatically. Please provide policy." + if key.group(1).lower() in unsupported: + return False + return True + + def get_layers(parent, module): + layer_list = [] + for key, submodule in module._modules.items(): + if isinstance(submodule, nn.Linear): + layer_list = layer_list + [parent + "." + key] + elif isinstance(submodule, + nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm': + layer_list = layer_list + ["ln"] + else: + layer_list = layer_list + AutoTP.get_layers(key, submodule) + return layer_list + + def update_policy_list(policy_list, new_module, new_gems): + if len(policy_list): + for i, policy in enumerate(policy_list): + # if module already exists in policy, combine gems and remove duplicates + if policy[0] == type(new_module): + new_gems = set(new_gems + policy[1]) + policy_list[i] = tuple([type(new_module), new_gems]) + return policy_list + policy_list.append(tuple([type(new_module), new_gems])) + return policy_list + + def kernel_supported(module_list): + policy = [] + for plcy in replace_policies: + # instantiate a throw-away policy in order to populate the _orig_layer_class + _ = plcy(None) + if isinstance(plcy._orig_layer_class, list): + for orig_layer_class in plcy._orig_layer_class: + policy.append(orig_layer_class) + elif plcy._orig_layer_class is not None: + policy.append(plcy._orig_layer_class) + for child in module_list: + if child.__class__ in policy: + return True + return False + + def tp_parser(model): + policy_list = [] + module_list = [] + layer_list = [] + gem_list = [] + + module_list = AutoTP.get_module_list(model) + assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \ + if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy." + for module in module_list: + for key, submodule in module._modules.items(): + if isinstance(submodule, nn.Linear): + layer_list = layer_list + ["." + key] + elif isinstance( + submodule, + nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm': + layer_list = layer_list + ["ln"] + else: + layer_list = layer_list + AutoTP.get_layers(key, submodule) + for i, layer in enumerate(layer_list): + if layer == 'ln': + if layer_list[i - 1] != 'ln': + gem_list = gem_list + [layer_list[i - 1]] + elif 'out_proj' in layer: + gem_list = gem_list + [layer] + layer_list = [] + if gem_list != []: + gem_list = list(set(gem_list)) + policy_list = AutoTP.update_policy_list(policy_list, module, gem_list) + gem_list = [] + assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \ + if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy." + return policy_list diff --git a/deepspeed/module_inject/containers/__init__.py b/deepspeed/module_inject/containers/__init__.py new file mode 100644 index 0000000..6abef51 --- /dev/null +++ b/deepspeed/module_inject/containers/__init__.py @@ -0,0 +1,15 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .bert import DS_BERTContainer, HFBertLayerPolicy +from .bloom import DS_BloomContainer, BLOOMLayerPolicy, supported_models +from .distil_bert import DS_DistilBERTContainer, HFDistilBertLayerPolicy +from .gpt2 import DS_GPT2Container, HFGPT2LayerPolicy +from .gptj import DS_GPTJContainer, HFGPTJLayerPolicy +from .gptneo import DS_GPTNEOContainer, HFGPTNEOLayerPolicy +from .gptneox import DS_GPTNEOXContainer, GPTNEOXLayerPolicy +from .megatron_gpt import DS_MegatronGPTContainer, MegatronLayerPolicy +from .megatron_gpt_moe import DS_MegatronGPTMoEContainer, MegatronMoELayerPolicy +from .opt import DS_OPTContainer, HFOPTLayerPolicy +from .clip import DS_CLIPContainer, HFCLIPLayerPolicy +from .unet import UNetPolicy +from .vae import VAEPolicy diff --git a/deepspeed/module_inject/containers/base.py b/deepspeed/module_inject/containers/base.py new file mode 100644 index 0000000..45faeb4 --- /dev/null +++ b/deepspeed/module_inject/containers/base.py @@ -0,0 +1,248 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +# Create a container object to save model-specific tensors using the policy file above. +from abc import ABC +import torch + +from deepspeed.ops.transformer.inference.config import DeepSpeedInferenceConfig +from deepspeed.accelerator import get_accelerator + + +class BaseConvolutionContainer(ABC): + # not implemented + def __init__(self): + pass + + +class BaseTransformerContainer(ABC): + def __init__(self, policy, config, model_config, layer_id, child): + self.policy = policy + self.config = config + self.model_config = model_config + self.layer_id = layer_id + self.child = child + + self.megatron_v2 = self.policy.is_megatron_v2 + self.scale_attention = self.policy.scale_attention + self.ckpt_load_enabled = False + + # configuration for models. todo: can this be moved to a pydantic model config? + self.hidden_size = None + self.num_attention_heads = None + self.mp_size = self.config.tensor_parallel.tp_size + self.pre_layer_norm = self.policy.pre_attn_norm + self.fp16 = False + self.attn_linear_layer = self.policy.linear_layer + self.mlp_linear_layer = self.policy.linear_layer + self.layer_norm_eps = self.model_config.layer_norm_eps if \ + hasattr(self.model_config, 'layer_norm_eps') else (self.model_config.layer_norm_epsilon if \ + hasattr(self.model_config, 'layer_norm_epsilon') else self.model_config.layernorm_epsilon if \ + hasattr(self.model_config, 'layernorm_epsilon') else 1.0e-12) + self.return_tuple = self.config.return_tuple + self.triangular_masking = True + self.local_attention = ((self.model_config.attention_layers[self.layer_id] + == "local") if hasattr(self.model_config, + 'attention_layers') else False) + self.window_size = getattr(self.model_config, "window_size", 1) + self.mlp_act_func_type = self.policy.mlp_act_func_type + self.training_mp_size = self.config.training_mp_size + self.bigscience_bloom = False + self.max_out_tokens = self.config.max_out_tokens + self.scale_attn_by_inverse_layer_idx = getattr( + self.config, + "scale_attn_by_inverse_layer_idx", + False) + self.use_mup = self.policy.use_mup + self.return_single_tuple = False + self.rotary_dim = self.model_config.rotary_dim if hasattr(self.model_config, 'rotary_dim') \ + else self.child.attention.rotary_ndims if \ + hasattr(self.child, 'attention') and hasattr(self.child.attention,'rotary_ndims') else -1 + self.mlp_after_attn = (self.rotary_dim is None or self.rotary_dim < 0) + + # Attention tensors + self.qkvw = None + self.qkvb = None + self.dense_w = None + self.dense_b = None + # MLP tensors + self._h4h_w = None + self._h4h_b = None + self._4hh_w = None + self._4hh_b = None + # LayerNorm tensors + self.attn_nw = None + self.attn_nb = None + self.input_nw = None + self.input_nb = None + + def create_ds_model_config(self): + self.set_hidden_heads(*self.policy.get_hidden_heads()) + assert self.num_attention_heads % self.mp_size == 0,\ + "To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\ + "This is because the attention computation is partitioned evenly among the parallel GPUs." + + self.ds_model_config = DeepSpeedInferenceConfig( + hidden_size=self.hidden_size, + heads=self.num_attention_heads, + layer_norm_eps=self.layer_norm_eps, + fp16=self.fp16, + pre_layer_norm=self.pre_layer_norm, + mp_size=self.mp_size, + q_int8=self.quantize, + return_tuple=self.return_tuple, + triangular_masking=self.triangular_masking, + local_attention=self.local_attention, + window_size=self.window_size, + rotary_dim=self.rotary_dim, + mlp_after_attn=self.mlp_after_attn, + mlp_act_func_type=self.mlp_act_func_type, + training_mp_size=self.training_mp_size, + bigscience_bloom=self.bigscience_bloom, + max_out_tokens=self.max_out_tokens, + scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx, + use_mup=self.use_mup, + return_single_tuple=self.return_single_tuple, + ) + + return self.ds_model_config + + def initialize_tensors(self): + # Set the tensors from policy (user module) to container (DS module) + self.set_attention(*self.policy.attention()) + self.set_mlp(*self.policy.mlp()) + self.set_layernorm(*self.policy.layernorm()) + + def convert_to_required_dtype(self, dtype): + # Note: converting tensors to fp16 requires that we do it in-place using self.__dict__ and not make a list/dict copy + if dtype == torch.half: + for k, v in self.__dict__.items(): + # The list comprehension is used for MoE tensor lists + if isinstance(v, list) and all((isinstance(tensor, torch.Tensor) \ + or isinstance(tensor, torch.nn.Parameter)) for tensor in v): + self.__dict__[k] = [moe_tensor.half() for moe_tensor in v] + + if isinstance(v, torch.Tensor) or isinstance(v, torch.nn.Parameter): + self.__dict__[k] = v.half() + + def set_dtype(self, fp16=False): + self.fp16 = fp16 + + def set_moe(self, moe=False): + self.moe = moe + + def set_tensor_parallel_config(self, mp_size, mp_group): + self.mp_size = mp_size + self.mp_group = mp_group + + def set_quantization_config(self, quantize, quantizer): + self.quantize = quantize + self.quantizer = quantizer + + def set_hidden_heads(self, hidden_size, num_attention_heads): + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + + def set_attention(self, qkvw, qkvb, dense_w, dense_b): + self.qkvw = qkvw + self.qkvb = qkvb + self.dense_w = dense_w + self.dense_b = dense_b + + def set_mlp(self, _h4h_w, _h4h_b, _4hh_w, _4hh_b): + self._h4h_w = _h4h_w + self._h4h_b = _h4h_b + self._4hh_w = _4hh_w + self._4hh_b = _4hh_b + + def set_layernorm(self, attn_nw, attn_nb, input_nw, input_nb): + self.attn_nw = attn_nw + self.attn_nb = attn_nb + self.input_nw = input_nw + self.input_nb = input_nb + + def apply_weight_quantization(self): + # quantize attention weights + self.attention_quantization() + + # quantize mlp weights + self.mlp_quantization() + + def attention_quantization(self): + self.module.attention.attn_qkvw = self.quantizer.quantize( + self.module.attention.attn_qkvw) + self.module.attention.attn_ow = self.quantizer.quantize( + self.module.attention.attn_ow) + + def mlp_quantization(self): + self.module.mlp.inter_w = self.quantizer.quantize(self.module.mlp.inter_w) + self.module.mlp.output_w = self.quantizer.quantize(self.module.mlp.output_w) + + def apply_tensor_parallelism(self, mp_replace): + # setup the new Attention module + self.attention_qkv_mp(mp_replace) + self.attention_o_mp(mp_replace) + + # setup the new MLP module + self.mlp_inter_mp(mp_replace) + self.mlp_output_mp(mp_replace) + + # Apply weight quantization + self.apply_weight_quantization() + + def attention_qkv_mp(self, mp_replace): + self.module.attention.attn_qkvw = mp_replace.qkv_copy( + self.module.attention.attn_qkvw, + self.qkvw) + self.module.attention.attn_qkvb = mp_replace.qkv_copy( + self.module.attention.attn_qkvb, + self.qkvb) + + def attention_o_mp(self, mp_replace): + self.module.attention.attn_ow = mp_replace.copy(self.module.attention.attn_ow, + self.dense_w) + self.module.attention.attn_ob = mp_replace.copy(self.module.attention.attn_ob, + self.dense_b) + + def mlp_inter_mp(self, mp_replace): + self.module.mlp.inter_w = mp_replace.copy(self.module.mlp.inter_w, self._h4h_w) + self.module.mlp.inter_b = mp_replace.copy(self.module.mlp.inter_b, self._h4h_b) + + def mlp_output_mp(self, mp_replace): + self.module.mlp.output_w = mp_replace.copy(self.module.mlp.output_w, self._4hh_w) + self.module.mlp.output_b = mp_replace.copy(self.module.mlp.output_b, self._4hh_b) + + def copy_data_to_new_module(self): + if self.attn_nw is None: + self.module.mlp.attn_nw = self.attn_nw + self.module.mlp.attn_nb = self.attn_nb + else: + self.module.mlp.attn_nw.data.copy_( + self.attn_nw.to(get_accelerator().current_device_name())) + self.module.mlp.attn_nb.data.copy_( + self.attn_nb.to(get_accelerator().current_device_name())) + + self.module.norm_w.data.copy_( + self.input_nw.to(get_accelerator().current_device_name())) + self.module.norm_b.data.copy_( + self.input_nb.to(get_accelerator().current_device_name())) + + def transpose(self): + self.transpose_attention() + self.transpose_mlp() + + def transpose_attention(self): + if self.attn_linear_layer: + self.qkvw = self.transpose_impl(self.qkvw.data) + self.dense_w = self.transpose_impl(self.dense_w.data) + + def transpose_mlp(self): + if self.mlp_linear_layer: + self._h4h_w = self.transpose_impl(self._h4h_w.data) + self._4hh_w = self.transpose_impl(self._4hh_w.data) + + def transpose_impl(self, data): + data = data.contiguous() + data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1)) + data = data.reshape(data.shape[-1], data.shape[-2]) + data.to(get_accelerator().current_device_name()) + return data diff --git a/deepspeed/module_inject/containers/base_moe.py b/deepspeed/module_inject/containers/base_moe.py new file mode 100644 index 0000000..4139b08 --- /dev/null +++ b/deepspeed/module_inject/containers/base_moe.py @@ -0,0 +1,141 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +# Create a container object to save model-specific tensors using the policy file above. +from .base import * +from deepspeed import comm as dist +import deepspeed.ops.transformer as transformer_inference +from deepspeed.accelerator import get_accelerator + + +class BaseTransformerMoEContainer(BaseTransformerContainer): + def __init__(self, **kwargs): + # Call the init function of the parent class to initialize the tensors and configs from parent class + super().__init__(**kwargs) + + self.num_experts = self.policy.get_num_experts() + self.ep_world_size = dist.get_world_size() + self.local_ep_size = 1 if self.num_experts < self.ep_world_size else self.num_experts // self.ep_world_size + + self.layer_norm_eps = self.config.layer_norm_eps if hasattr( + self.config, + 'layer_norm_eps') else 1e-12, + + # MoE models will have a list of mlp related tensors + self._h4h_w = [] + self._h4h_b = [] + self._4hh_w = [] + self._4hh_b = [] + + # Residual MoE needs extra parameters + self._res_h4h_w = None + self._res_h4h_b = None + self._res_4hh_w = None + self._res_4hh_b = None + self._res_coef = None + + def create_ds_model_config(self): + self.set_hidden_heads(*self.policy.get_hidden_heads()) + assert self.num_attention_heads % self.mp_size == 0,\ + "To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\ + "This is because the attention computation is partitioned evenly among the parallel GPUs." + + self.ds_model_config = transformer_inference.DeepSpeedMoEInferenceConfig( + hidden_size=self.hidden_size, + heads=self.num_attention_heads, + layer_norm_eps=self.layer_norm_eps, + fp16=self.fp16, + pre_layer_norm=self.pre_layer_norm, + mp_size=self.mp_size, + q_int8=self.quantize, + moe_experts=self.local_ep_size, + global_experts=self.num_experts, + mlp_type=self.config.moe.type, + scale_attn_by_inverse_layer_idx=self.scale_attn_by_inverse_layer_idx, + ) + + return self.ds_model_config + + def initialize_tensors(self): + # Set the tensors from policy (user module) to container (DS module) + self.set_attention(*self.policy.attention()) + self.set_mlp(self.config.moe.type) + self.set_layernorm(*self.policy.layernorm()) + + def set_mlp(self, config_moe_type): + if config_moe_type == 'standard': + self._h4h_w, self._h4h_b, \ + self._4hh_w, self._4hh_b = self.policy.mlp() + else: + self._h4h_w, self._h4h_b, self._4hh_w, \ + self._4hh_b, self._res_h4h_w, self._res_h4h_b, \ + self._res_4hh_w, self._res_4hh_b, \ + self._res_coef = self.policy.mlp(config_moe_type) + + def transpose(self): + self.transpose_attention() + self.transpose_mlp() + + if self.config.moe.type == 'residual': + self.transpose_residual() + + def transpose_mlp(self): + self._h4h_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._h4h_w] + self._4hh_w = [self.transpose_impl(moe_w1.data) for moe_w1 in self._4hh_w] + + def transpose_residual(self): + self._res_h4h_w.data = self.transpose_impl(self._res_h4h_w.data) + self._res_4hh_w.data = self.transpose_impl(self._res_4hh_w.data) + self._res_coef.data = self.transpose_impl(self._res_coef.data) + + def apply_tensor_parallelism(self, mp_replace): + # setup the new Attention module + self.attention_qkv_mp(mp_replace) + self.attention_o_mp(mp_replace) + + # quantize attention weights + self.attention_quantization() + + # setup the new MLP module + self.mlp_mp() + + def mlp_mp(self): + gpu_index = dist.get_rank() + for ep_index in range(self.local_ep_size): + # mlp inter + self.module.mlp[ep_index].inter_w.data = self._h4h_w[ + gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + self.module.mlp[ep_index].inter_b.data = self._h4h_b[ + gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + + # mlp output + self.module.mlp[ep_index].output_w.data = self._4hh_w[ + gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + self.module.mlp[ep_index].output_b.data = self._4hh_b[ + gpu_index * self.local_ep_size + ep_index].to( + get_accelerator().current_device_name()) + + def copy_data_to_new_module(self): + self.module.attn_nw.data = self.attn_nw.to( + get_accelerator().current_device_name()) + self.module.attn_nb.data = self.attn_nb.to( + get_accelerator().current_device_name()) + + self.module.norm_w.data.copy_( + self.input_nw.to(get_accelerator().current_device_name())) + self.module.norm_b.data.copy_( + self.input_nb.to(get_accelerator().current_device_name())) + + if self.config.moe.type == 'residual': + self.module.res_mlp.inter_w.data = self._res_h4h_w.to( + get_accelerator().current_device_name()) + self.module.res_mlp.inter_b.data = self._res_h4h_b.to( + get_accelerator().current_device_name()) + self.module.res_mlp.output_w.data = self._res_4hh_w.to( + get_accelerator().current_device_name()) + self.module.res_mlp.output_b.data = self._res_4hh_b.to( + get_accelerator().current_device_name()) + self.module.res_coef.data = self._res_coef.to( + get_accelerator().current_device_name()) diff --git a/deepspeed/module_inject/containers/bert.py b/deepspeed/module_inject/containers/bert.py new file mode 100644 index 0000000..95d8b48 --- /dev/null +++ b/deepspeed/module_inject/containers/bert.py @@ -0,0 +1,81 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_BERTContainer(BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.return_tuple = True + self.triangular_masking = False + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFBertLayerPolicy(TransformerPolicy): + def __init__(self, client_module, inference=False): + super().__init__(inference, pre_attn_norm=False) + self.client_module = client_module + self.cuda_graph_supported = True + + if HFBertLayerPolicy._orig_layer_class is None: + try: + import transformers + HFBertLayerPolicy._orig_layer_class = [ + transformers.models.bert.modeling_bert.BertLayer, + transformers.models.roberta.modeling_roberta.RobertaLayer + ] + except: + HFBertLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attention.self.query.weight.shape[1], \ + self.client_module.attention.self.num_attention_heads + + def attention(self): + qw = self.client_module.attention.self.query.weight + qb = self.client_module.attention.self.query.bias + kw = self.client_module.attention.self.key.weight + kb = self.client_module.attention.self.key.bias + vw = self.client_module.attention.self.value.weight + vb = self.client_module.attention.self.value.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=False) + + return qkvw, \ + qkvb, \ + self.client_module.attention.output.dense.weight, \ + self.client_module.attention.output.dense.bias, \ + + def mlp(self): + if self.pre_attn_norm: + intermediate_ff = self.client_module.intermediate.dense_act + else: + intermediate_ff = self.client_module.intermediate.dense + + return intermediate_ff.weight, intermediate_ff.bias, \ + self.client_module.output.dense.weight, \ + self.client_module.output.dense.bias + + def layernorm(self): + if self.pre_attn_norm: + attention_layernorm = self.client_module.PostAttentionLayerNorm + transformer_layernorm = self.client_module.PreAttentionLayerNorm + else: + attention_layernorm = self.client_module.attention.output.LayerNorm + transformer_layernorm = self.client_module.output.LayerNorm + return attention_layernorm.weight, \ + attention_layernorm.bias, \ + transformer_layernorm.weight, \ + transformer_layernorm.bias diff --git a/deepspeed/module_inject/containers/bloom.py b/deepspeed/module_inject/containers/bloom.py new file mode 100644 index 0000000..eedf851 --- /dev/null +++ b/deepspeed/module_inject/containers/bloom.py @@ -0,0 +1,128 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy + +supported_models = {None} + + +class DS_BloomContainer(MetaTensorContainer, BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.bigscience_bloom = True + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + + self.module = DeepSpeedBloomInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def attention_qkv_mp(self, mp_replace): + self.module.attention.attn_qkvw = mp_replace.copy( + self.module.attention.attn_qkvw, + self.qkvw) + self.module.attention.attn_qkvb = mp_replace.copy( + self.module.attention.attn_qkvb, + self.qkvb) + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attention.query_key_value.weight', \ + 'self_attention.query_key_value.bias', \ + 'self_attention.dense.weight', \ + 'self_attention.dense.bias', \ + 'mlp.dense_h_to_4h.weight', \ + 'mlp.dense_h_to_4h.bias', \ + 'mlp.dense_4h_to_h.weight', \ + 'mlp.dense_4h_to_h.bias', \ + 'post_attention_layernorm.weight', \ + 'post_attention_layernorm.bias', \ + 'input_layernorm.weight', \ + 'input_layernorm.bias' + ) + for i in range(0, 2): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i], + qkv=True, + megatron_v2=self.policy.is_megatron_v2, + split_qkv=self.policy.split_qkv) + for i in range(2, 4): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + for i in range(4, 10): + maybe_copy(module.mlp, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + for i in range(10, 12): + maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + + +class BLOOMLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, + client_module, + inference=True, + use_load_prefix=True, + split_qkv=False): + super().__init__(inference, + linear_layer=True, + use_load_prefix=use_load_prefix, + split_qkv=split_qkv) + self.client_module = client_module + try: + import transformers + BLOOMLayerPolicy._orig_layer_class = transformers.models.bloom.modeling_bloom.BloomBlock + global supported_models + supported_models.update( + {transformers.models.bloom.modeling_bloom.BloomModel}) + except Exception as e: + print( + f"WARNING! Setting BLOOMLayerPolicy._orig_layer_class to None due to Exception: {e}" + ) + BLOOMLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.self_attention.hidden_size, \ + self.client_module.self_attention.num_heads + + def attention(self): + return self.client_module.self_attention.query_key_value.weight, \ + self.client_module.self_attention.query_key_value.bias, \ + self.client_module.self_attention.dense.weight, \ + self.client_module.self_attention.dense.bias, + + def mlp(self): + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/deepspeed/module_inject/containers/clip.py b/deepspeed/module_inject/containers/clip.py new file mode 100644 index 0000000..8e69750 --- /dev/null +++ b/deepspeed/module_inject/containers/clip.py @@ -0,0 +1,66 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_CLIPContainer(BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFCLIPLayerPolicy(TransformerPolicy): + def __init__(self, client_module, inference=False): + super().__init__(inference, pre_attn_norm=True, scale_attention=True) + self.client_module = client_module + self.cuda_graph_supported = True + + if HFCLIPLayerPolicy._orig_layer_class is None: + try: + import transformers + HFCLIPLayerPolicy._orig_layer_class = transformers.models.clip.modeling_clip.CLIPEncoderLayer + except: + HFCLIPLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.self_attn.q_proj.weight.shape[1], \ + self.client_module.self_attn.num_heads + + def attention(self): + qw = self.client_module.self_attn.q_proj.weight + qb = self.client_module.self_attn.q_proj.bias + kw = self.client_module.self_attn.k_proj.weight + kb = self.client_module.self_attn.k_proj.bias + vw = self.client_module.self_attn.v_proj.weight + vb = self.client_module.self_attn.v_proj.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=False) + + return qkvw, \ + qkvb, \ + self.client_module.self_attn.out_proj.weight, \ + self.client_module.self_attn.out_proj.bias + + def mlp(self): + return self.client_module.mlp.fc1.weight, \ + self.client_module.mlp.fc1.bias, \ + self.client_module.mlp.fc2.weight, \ + self.client_module.mlp.fc2.bias + + def layernorm(self): + return self.client_module.layer_norm2.weight, \ + self.client_module.layer_norm2.bias, \ + self.client_module.layer_norm1.weight, \ + self.client_module.layer_norm1.bias diff --git a/deepspeed/module_inject/containers/distil_bert.py b/deepspeed/module_inject/containers/distil_bert.py new file mode 100644 index 0000000..71f46dc --- /dev/null +++ b/deepspeed/module_inject/containers/distil_bert.py @@ -0,0 +1,75 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy + + +class DS_DistilBERTContainer(BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + self.triangular_masking = False + self.return_single_tuple = True + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedBERTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFDistilBertLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=False, preln=False): + super().__init__(inference) + self.client_module = client_module + self.preln = preln + self.cuda_graph_supported = True + if HFDistilBertLayerPolicy._orig_layer_class is None: + try: + import transformers + HFDistilBertLayerPolicy._orig_layer_class = [ + transformers.models.distilbert.modeling_distilbert.TransformerBlock, + ] + except: + HFDistilBertLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attention.q_lin.weight.shape[1], \ + self.client_module.attention.n_heads + + def attention(self): + qw = self.client_module.attention.q_lin.weight + qb = self.client_module.attention.q_lin.bias + kw = self.client_module.attention.k_lin.weight + kb = self.client_module.attention.k_lin.bias + vw = self.client_module.attention.v_lin.weight + vb = self.client_module.attention.v_lin.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0)) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0)) + + return qkvw, \ + qkvb, \ + self.client_module.attention.out_lin.weight, \ + self.client_module.attention.out_lin.bias + + def mlp(self): + intermediate_ff = self.client_module.ffn.lin1 + + return intermediate_ff.weight, intermediate_ff.bias, \ + self.client_module.ffn.lin2.weight, \ + self.client_module.ffn.lin2.bias + + def layernorm(self): + attention_layernorm = self.client_module.sa_layer_norm + transformer_layernorm = self.client_module.output_layer_norm + return attention_layernorm.weight, \ + attention_layernorm.bias, \ + transformer_layernorm.weight, \ + transformer_layernorm.bias diff --git a/deepspeed/module_inject/containers/features/__init__.py b/deepspeed/module_inject/containers/features/__init__.py new file mode 100644 index 0000000..0bd2964 --- /dev/null +++ b/deepspeed/module_inject/containers/features/__init__.py @@ -0,0 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .megatron import MegatronContainer +from .meta_tensor import MetaTensorContainer diff --git a/deepspeed/module_inject/containers/features/megatron.py b/deepspeed/module_inject/containers/features/megatron.py new file mode 100644 index 0000000..45a013b --- /dev/null +++ b/deepspeed/module_inject/containers/features/megatron.py @@ -0,0 +1,37 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from abc import ABC + + +class MegatronContainer(ABC): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.megatron_v2 = self.policy.is_megatron_v2 + + def transpose_qkv_alignment(self, x): + attention_head_size = x.shape[-1] // self.num_attention_heads + new_x_shape = x.size()[:-1] + (self.num_attention_heads, attention_head_size) + x_1 = x.view(*new_x_shape) + (q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=(x_1.dim() - 1)) + if len(q.shape) > 2: + return torch.cat((q.reshape(q.shape[0], + -1), + k.reshape(q.shape[0], + -1), + v.reshape(q.shape[0], + -1)), + dim=-1).reshape(x.shape) + else: + return torch.cat((q.reshape(-1), + k.reshape(-1), + v.reshape(-1)), + dim=-1).reshape(x.shape) + + def transpose(self): + super().transpose() + if self.megatron_v2: + self.qkvw = torch.nn.parameter.Parameter( + self.transpose_qkv_alignment(self.qkvw).contiguous()) + self.qkvb = torch.nn.parameter.Parameter( + self.transpose_qkv_alignment(self.qkvb).contiguous()) diff --git a/deepspeed/module_inject/containers/features/meta_tensor.py b/deepspeed/module_inject/containers/features/meta_tensor.py new file mode 100644 index 0000000..5b63c5c --- /dev/null +++ b/deepspeed/module_inject/containers/features/meta_tensor.py @@ -0,0 +1,58 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from abc import ABC, abstractmethod + + +class MetaTensorContainer(ABC): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.is_meta = False + self.ckpt_load_enabled = True + + def initialize_tensors(self): + super().initialize_tensors() + self.is_meta = self.qkvw.is_meta + + def apply_tensor_parallelism(self, mp_replace): + if self.is_meta: + if self.qkvb is None: + self.module.attention.attn_qkvb = None + if self.dense_b is None: + self.module.attention.attn_ob = None + else: + super().apply_tensor_parallelism(mp_replace) + + def copy_data_to_new_module(self): + if self.is_meta: + if self.attn_nw is None: + self.module.mlp.attn_nw = self.attn_nw + self.module.mlp.attn_nb = self.attn_nb + else: + super().copy_data_to_new_module() + + def transpose(self): + if not self.is_meta: + super().transpose() + + @abstractmethod + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + """ + Load all the transformer parameter from the checkpoint file (sd). + In addition to the parameter names, we require two + more parameters to help read the the data correctly + from the checkpoint and split the qkv heads in the + right order: + 1. `use_load_prefix` (Default: False): this specifies + whether we need to use the name of first abstraction + layer of the model for searching the parameter's name + in a checkpoint file. For more information of how this + is used please see + https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/load_checkpoint.py + 2. `split_qkv` (Default: True): we use this flag when splitting + the qkv parameter into heads. If it is False, it means the heads + of q, k, and v are stored together and needs to split in the + DeepSpeed-Inference API. + """ + raise NotImplementedError( + "A load_params() function must be defined in the model container \ + when inheriting the MetaTensorContainer feature") diff --git a/deepspeed/module_inject/containers/gpt2.py b/deepspeed/module_inject/containers/gpt2.py new file mode 100644 index 0000000..dc194d7 --- /dev/null +++ b/deepspeed/module_inject/containers/gpt2.py @@ -0,0 +1,54 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +from ..policy import TransformerPolicy + + +class DS_GPT2Container(BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + +class HFGPT2LayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True): + # HuggingFace GPT2 uses convolutional layer instead of linear layer + super().__init__(inference, linear_layer=False) + self.client_module = client_module + try: + import transformers + HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block + except: + HFGPT2LayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.embed_dim, \ + self.client_module.attn.num_heads + + def attention(self): + return self.client_module.attn.c_attn.weight, \ + self.client_module.attn.c_attn.bias, \ + self.client_module.attn.c_proj.weight, \ + self.client_module.attn.c_proj.bias + + def mlp(self): + return self.client_module.mlp.c_fc.weight, \ + self.client_module.mlp.c_fc.bias, \ + self.client_module.mlp.c_proj.weight, \ + self.client_module.mlp.c_proj.bias + + def layernorm(self): + return self.client_module.ln_2.weight, \ + self.client_module.ln_2.bias, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/deepspeed/module_inject/containers/gptj.py b/deepspeed/module_inject/containers/gptj.py new file mode 100644 index 0000000..35472c1 --- /dev/null +++ b/deepspeed/module_inject/containers/gptj.py @@ -0,0 +1,110 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv + + +class DS_GPTJContainer(MetaTensorContainer, BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attn.q_proj.weight', \ + 'attn.k_proj.weight', \ + 'attn.v_proj.weight', \ + 'attn.out_proj.weight', \ + 'mlp.fc_in.weight', \ + 'mlp.fc_in.bias', \ + 'mlp.fc_out.weight', \ + 'mlp.fc_out.bias', \ + 'ln_1.weight', \ + 'ln_1.bias' + ) + maybe_copy_qkv( + module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', + [prefix + param_names[0], + prefix + param_names[1], + prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 4): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(4, 8): + maybe_copy(module.mlp, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + for i in range(8, 10): + maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i + 2], + prefix + param_names[i]) + + +class HFGPTJLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True): + super().__init__(inference, scale_attention=True) + self.client_module = client_module + try: + import transformers + HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock + except: + HFGPTJLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.q_proj.weight.shape[1], \ + self.client_module.attn.num_attention_heads + + def attention(self): + qw = self.client_module.attn.q_proj.weight + kw = self.client_module.attn.k_proj.weight + vw = self.client_module.attn.v_proj.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + + return qkvw, \ + None, \ + self.client_module.attn.out_proj.weight, \ + None, + + def mlp(self): + return self.client_module.mlp.fc_in.weight, \ + self.client_module.mlp.fc_in.bias, \ + self.client_module.mlp.fc_out.weight, \ + self.client_module.mlp.fc_out.bias + + def layernorm(self): + return None, \ + None, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/deepspeed/module_inject/containers/gptneo.py b/deepspeed/module_inject/containers/gptneo.py new file mode 100644 index 0000000..a8f206f --- /dev/null +++ b/deepspeed/module_inject/containers/gptneo.py @@ -0,0 +1,111 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv + + +class DS_GPTNEOContainer(MetaTensorContainer, BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attn.attention.q_proj.weight', \ + 'attn.attention.k_proj.weight', \ + 'attn.attention.v_proj.weight', \ + 'attn.attention.out_proj.weight', \ + 'attn.attention.out_proj.bias', \ + 'mlp.c_fc.weight', \ + 'mlp.c_fc.bias', \ + 'mlp.c_proj.weight', \ + 'mlp.c_proj.bias', \ + 'ln_2.weight', \ + 'ln_2.bias', \ + 'ln_1.weight', \ + 'ln_1.bias' + ) + maybe_copy_qkv( + module.attention, + sd, + weight_quantizer, + mp_replace, + 'attn_qkvw', + [prefix + param_names[0], + prefix + param_names[1], + prefix + param_names[2]], + split_qkv=self.policy.split_qkv) + for i in range(3, 5): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(5, 11): + maybe_copy(module.mlp, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 1], + prefix + param_names[i]) + for i in range(11, 13): + maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 1], + prefix + param_names[i]) + + +class HFGPTNEOLayerPolicy(TransformerPolicy): + def __init__(self, client_module, inference=True): + super().__init__(inference, scale_attention=False) + self.client_module = client_module + try: + import transformers + HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock + except: + HFGPTNEOLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attn.attention.q_proj.weight.shape[1], \ + self.client_module.attn.attention.num_heads + + def attention(self): + qw = self.client_module.attn.attention.q_proj.weight + kw = self.client_module.attn.attention.k_proj.weight + vw = self.client_module.attn.attention.v_proj.weight + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + + return qkvw, \ + None, \ + self.client_module.attn.attention.out_proj.weight, \ + self.client_module.attn.attention.out_proj.bias + + def mlp(self): + return self.client_module.mlp.c_fc.weight, \ + self.client_module.mlp.c_fc.bias, \ + self.client_module.mlp.c_proj.weight, \ + self.client_module.mlp.c_proj.bias + + def layernorm(self): + return self.client_module.ln_2.weight, \ + self.client_module.ln_2.bias, \ + self.client_module.ln_1.weight, \ + self.client_module.ln_1.bias diff --git a/deepspeed/module_inject/containers/gptneox.py b/deepspeed/module_inject/containers/gptneox.py new file mode 100644 index 0000000..ebf2db0 --- /dev/null +++ b/deepspeed/module_inject/containers/gptneox.py @@ -0,0 +1,129 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from .features.megatron import MegatronContainer +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +import torch +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from packaging import version as pkg_version + + +class DS_GPTNEOXContainer(MetaTensorContainer, + MegatronContainer, + BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'attention.query_key_value.weight', \ + 'attention.query_key_value.bias', \ + 'attention.dense.weight', \ + 'attention.dense.bias', \ + 'mlp.dense_h_to_4h.weight', \ + 'mlp.dense_h_to_4h.bias', \ + 'mlp.dense_4h_to_h.weight', \ + 'mlp.dense_4h_to_h.bias', \ + 'post_attention_layernorm.weight', \ + 'post_attention_layernorm.bias', \ + 'input_layernorm.weight', \ + 'input_layernorm.bias' + ) + for i in range(0, 2): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i], + qkv=True, + megatron_v2=self.policy.is_megatron_v2, + split_qkv=self.policy.split_qkv, + heads=self.policy.client_module.attention.num_attention_heads) + for i in range(2, 4): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + for i in range(4, 10): + maybe_copy(module.mlp, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + for i in range(10, 12): + maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i], + prefix + param_names[i]) + + +class GPTNEOXLayerPolicy(TransformerPolicy): + _orig_layer_class = None + version = 0 + + def __init__(self, client_module, inference=True, megatron_v2=True, split_qkv=False): + super().__init__(inference, megatron_v2=megatron_v2, split_qkv=split_qkv) + self.client_module = client_module + if GPTNEOXLayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + GPTNEOXLayerPolicy._orig_layer_class = None + else: + try: + from transformers import GPTNeoXLayer + GPTNEOXLayerPolicy._orig_layer_class = GPTNeoXLayer + except ImportError: + GPTNEOXLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + if GPTNEOXLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return self.client_module.attention.query_key_value.weight.shape[1], \ + self.client_module.attention.num_attention_heads + + def attention(self): + if GPTNEOXLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return attention.query_key_value.weight, \ + attention.query_key_value.bias, \ + attention.dense.weight, \ + attention.dense.bias + + def mlp(self): + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/deepspeed/module_inject/containers/megatron_gpt.py b/deepspeed/module_inject/containers/megatron_gpt.py new file mode 100644 index 0000000..7a8db91 --- /dev/null +++ b/deepspeed/module_inject/containers/megatron_gpt.py @@ -0,0 +1,106 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.megatron import MegatronContainer +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +import torch +from ..policy import TransformerPolicy +from packaging import version as pkg_version + + +class DS_MegatronGPTContainer(MegatronContainer, BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + +# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp +# TODO: Generalize MoE overall goal, expand beyond Megatron +class MegatronLayerPolicy(TransformerPolicy): + _orig_layer_class = None + version = 0 + moe_type = 'standard' + megatron_v2 = True + use_mup = False + + def __init__(self, client_module, inference=True): + super().__init__(inference, + megatron_v2=MegatronLayerPolicy.megatron_v2, + use_mup=MegatronLayerPolicy.use_mup) + self.client_module = client_module + # we use megatron version to differentiate between the old and new + # megatron-lm source code + if MegatronLayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + MegatronLayerPolicy._orig_layer_class = None + else: + try: + from megatron.model.transformer import ParallelTransformerLayer + MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer + except ImportError: + MegatronLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.attention.query_key_value.weight.shape[1], \ + self.client_module.attention.num_attention_heads + + def attention(self): + if self.inference: + if MegatronLayerPolicy.version == 0: + attention = self.client_module.attention + else: + attention = self.client_module.self_attention + + return attention.query_key_value.weight, \ + attention.query_key_value.bias, \ + attention.dense.weight, \ + attention.dense.bias + + def mlp(self, moe_type='standard'): + from deepspeed.moe.utils import has_moe_layers + moe, _ = has_moe_layers(self.client_module) + + if moe: + moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \ + self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts + num_experts = len(moe_experts) + if moe_type == 'standard': + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)] + else: + + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \ + self.client_module.mlp.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.mlp.dense_4h_to_h.bias, \ + self.client_module.mlp.coefficient.weight + + else: + return self.client_module.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.dense_4h_to_h.bias + + def layernorm(self): + return self.client_module.post_attention_layernorm.weight, \ + self.client_module.post_attention_layernorm.bias, \ + self.client_module.input_layernorm.weight, \ + self.client_module.input_layernorm.bias diff --git a/deepspeed/module_inject/containers/megatron_gpt_moe.py b/deepspeed/module_inject/containers/megatron_gpt_moe.py new file mode 100644 index 0000000..2968161 --- /dev/null +++ b/deepspeed/module_inject/containers/megatron_gpt_moe.py @@ -0,0 +1,82 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .base_moe import * +from .features.megatron import MegatronContainer +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +import torch +from .megatron_gpt import MegatronLayerPolicy +from packaging import version as pkg_version + + +class DS_MegatronGPTMoEContainer(MegatronContainer, BaseTransformerMoEContainer): + def __init__(self, policy, config, model_config, layer_id): + super().__init__(policy, config, model_config, layer_id) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedMegatronGPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + + if self.megatron_v2: + self.module.config.rotate_half = True + self.module.config.rotate_every_two = False + + return self.module + + +# TODO: Megatron GPT MoE inherits from Megatron policy and replaces mlp +# TODO: Generalize MoE overall goal, expand beyond Megatron +class MegatronMoELayerPolicy(MegatronLayerPolicy): + _orig_layer_class = None + version = 0 + moe_type = 'standard' + num_experts = 1 + + def __init__(self, client_module, inference=True): + super().__init__(inference) + self.client_module = client_module + # we use megatron version to differentiate between the old and new + # megatron-lm source code + if MegatronMoELayerPolicy._orig_layer_class is None: + if pkg_version.parse(torch.__version__) <= pkg_version.parse("1.2"): + MegatronMoELayerPolicy._orig_layer_class = None + else: + try: + from megatron.model.transformer import ParallelTransformerLayer + MegatronMoELayerPolicy._orig_layer_class = ParallelTransformerLayer + except ImportError: + MegatronMoELayerPolicy._orig_layer_class = None + + def get_num_experts(self): + return self.num_experts + + def mlp(self, moe_type='standard'): + # for now, all of this is tightly coupled to megatron-deepspeed moe implementation + # todo: think and refactor this to be more general + + #from deepspeed.moe.utils import has_moe_layers + #moe, _ = has_moe_layers(self.client_module) + + moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \ + self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts + num_experts = len(moe_experts) + self.num_experts = num_experts + + if moe_type == 'standard': + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)] + else: + return [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ + [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \ + self.client_module.mlp.mlp.dense_h_to_4h.weight, \ + self.client_module.mlp.mlp.dense_h_to_4h.bias, \ + self.client_module.mlp.mlp.dense_4h_to_h.weight, \ + self.client_module.mlp.mlp.dense_4h_to_h.bias, \ + self.client_module.mlp.coefficient.weight diff --git a/deepspeed/module_inject/containers/opt.py b/deepspeed/module_inject/containers/opt.py new file mode 100644 index 0000000..8f9c30b --- /dev/null +++ b/deepspeed/module_inject/containers/opt.py @@ -0,0 +1,134 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .base import * +from .features.meta_tensor import MetaTensorContainer +from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference +import torch +from torch.nn.parameter import Parameter +from ..policy import TransformerPolicy +from ..policy import transformer_param_names +from ..policy import maybe_copy +from ..policy import maybe_copy_qkv +from deepspeed.utils.types import ActivationFuncType + + +class DS_OPTContainer(MetaTensorContainer, BaseTransformerContainer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + # All model specific things should be defined here instead of the base class. + + def create_module(self, config=None): + _config = config if config is not None else self.ds_model_config + self.module = DeepSpeedOPTInference(_config, mp_group=self.mp_group) + self.module.config.scale_attention = self.scale_attention + return self.module + + def load_params(self, module, sd, weight_quantizer, mp_replace, prefix): + param_names = ( + 'self_attn.q_proj.weight', \ + 'self_attn.k_proj.weight', \ + 'self_attn.v_proj.weight', \ + 'self_attn.q_proj.bias', \ + 'self_attn.k_proj.bias', \ + 'self_attn.v_proj.bias', \ + 'self_attn.out_proj.weight', \ + 'self_attn.out_proj.bias', \ + 'fc1.weight', \ + 'fc1.bias', \ + 'fc2.weight', \ + 'fc2.bias', \ + 'final_layer_norm.weight', \ + 'final_layer_norm.bias', \ + 'self_attn_layer_norm.weight', \ + 'self_attn_layer_norm.bias' + ) + + for i in range(0, 6, 3): + maybe_copy_qkv(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i // 3], + [ + prefix + param_names[i], + prefix + param_names[i + 1], + prefix + param_names[i + 2] + ], + split_qkv=self.policy.split_qkv) + for i in range(6, 8): + maybe_copy(module.attention, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 4], + prefix + param_names[i]) + for i in range(8, 14): + maybe_copy(module.mlp, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 4], + prefix + param_names[i]) + for i in range(14, 16): + maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + transformer_param_names[i - 4], + prefix + param_names[i]) + + +class HFOPTLayerPolicy(TransformerPolicy): + _orig_layer_class = None + + def __init__(self, client_module, inference=True, use_load_prefix=True): + super().__init__(inference, + linear_layer=True, + mlp_act_func_type=ActivationFuncType.ReLU, + pre_attn_norm=True, + use_load_prefix=use_load_prefix) + self.client_module = client_module + + try: + import transformers + HFOPTLayerPolicy._orig_layer_class = transformers.models.opt.modeling_opt.OPTDecoderLayer + if isinstance(TransformerPolicy.hf_model_config, + transformers.models.opt.configuration_opt.OPTConfig): + self.pre_attn_norm = TransformerPolicy.hf_model_config.do_layer_norm_before + except: + HFOPTLayerPolicy._orig_layer_class = None + + def get_hidden_heads(self): + return self.client_module.self_attn.embed_dim, \ + self.client_module.self_attn.num_heads + + def attention(self): + qw = self.client_module.self_attn.q_proj.weight + qb = self.client_module.self_attn.q_proj.bias + + kw = self.client_module.self_attn.k_proj.weight + kb = self.client_module.self_attn.k_proj.bias + + vw = self.client_module.self_attn.v_proj.weight + vb = self.client_module.self_attn.v_proj.bias + + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=False) + + return qkvw, \ + qkvb, \ + self.client_module.self_attn.out_proj.weight, \ + self.client_module.self_attn.out_proj.bias + + def mlp(self): + return self.client_module.fc1.weight, \ + self.client_module.fc1.bias, \ + self.client_module.fc2.weight, \ + self.client_module.fc2.bias + + def layernorm(self): + return self.client_module.final_layer_norm.weight, \ + self.client_module.final_layer_norm.bias, \ + self.client_module.self_attn_layer_norm.weight, \ + self.client_module.self_attn_layer_norm.bias diff --git a/deepspeed/module_inject/containers/unet.py b/deepspeed/module_inject/containers/unet.py new file mode 100644 index 0000000..461ca12 --- /dev/null +++ b/deepspeed/module_inject/containers/unet.py @@ -0,0 +1,51 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import torch +from torch.nn.parameter import Parameter + +from ..policy import DSPolicy +from ...model_implementations.diffusers.unet import DSUNet + + +class UNetPolicy(DSPolicy): + def __init__(self): + super().__init__() + try: + import diffusers + self._orig_layer_class = diffusers.models.unet_2d_condition.UNet2DConditionModel + except ImportError: + self._orig_layer_class = None + + def match(self, module): + return isinstance(module, self._orig_layer_class) + + def match_replaced(self, module): + return isinstance(module, DSUNet) + + def apply(self, module, enable_cuda_graph=True): + # TODO(cmikeh2): Enable cuda graph should be an inference configuration + return DSUNet(module, enable_cuda_graph=enable_cuda_graph) + + def attention(self, client_module): + qw = client_module.to_q.weight + kw = client_module.to_k.weight + vw = client_module.to_v.weight + + if qw.shape[1] == kw.shape[1]: + qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) + + return qkvw, \ + client_module.to_out[0].weight, \ + client_module.to_out[0].bias, \ + qw.shape[-1], \ + client_module.heads + else: + #return None + #kvw = Parameter(torch.cat((kw, vw), dim=0), requires_grad=False) + return qw, \ + kw, vw, \ + client_module.to_out[0].weight, \ + client_module.to_out[0].bias, \ + qw.shape[-1], \ + client_module.heads diff --git a/deepspeed/module_inject/containers/vae.py b/deepspeed/module_inject/containers/vae.py new file mode 100644 index 0000000..c873f97 --- /dev/null +++ b/deepspeed/module_inject/containers/vae.py @@ -0,0 +1,33 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +from ..policy import DSPolicy +from ...model_implementations.diffusers.vae import DSVAE + + +class VAEPolicy(DSPolicy): + def __init__(self): + super().__init__() + try: + import diffusers + if hasattr(diffusers.models.vae, "AutoencoderKL"): + self._orig_layer_class = diffusers.models.vae.AutoencoderKL + else: + # Diffusers >= 0.12.0 changes location of AutoencoderKL + self._orig_layer_class = diffusers.models.autoencoder_kl.AutoencoderKL + except ImportError: + self._orig_layer_class = None + + def match(self, module): + return isinstance(module, self._orig_layer_class) + + def match_replaced(self, module): + return isinstance(module, DSVAE) + + def apply(self, module, enable_cuda_graph=True): + # TODO(cmikeh2): Enable cuda graph should be an inference configuration + return DSVAE(module, enable_cuda_graph=enable_cuda_graph) + + # NOTE (lekurile): Should we have a diffusers policy class? + def attention(self): + pass diff --git a/deepspeed/module_inject/inject.py b/deepspeed/module_inject/inject.py old mode 100644 new mode 100755 index a601ef1..384bb72 --- a/deepspeed/module_inject/inject.py +++ b/deepspeed/module_inject/inject.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import copy import torch from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig diff --git a/deepspeed/module_inject/layers.py b/deepspeed/module_inject/layers.py new file mode 100644 index 0000000..0636e89 --- /dev/null +++ b/deepspeed/module_inject/layers.py @@ -0,0 +1,101 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from deepspeed import comm as dist +from torch import nn +from torch.nn import functional as F + +from torch.nn.parameter import Parameter +from deepspeed.accelerator import get_accelerator + + +class LinearAllreduce(nn.Module): + def __init__(self, weight, bias=None, mp_group=None): + super(LinearAllreduce, self).__init__() + self.weight = weight + self.bias = bias + self.mp_group = mp_group + + def forward(self, input): + output = torch.matmul(input, self.weight.transpose(-1, -2)) + if self.mp_group is not None: + dist.all_reduce(output, group=self.mp_group) + if self.bias is not None: + output += self.bias + return output + + +class LinearLayer(nn.Module): + def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None): + super(LinearLayer, self).__init__() + if weight is not None: + self.weight = weight + self.bias = bias + else: + self.weight = Parameter( + torch.empty(weight_shape, + dtype=dtype, + device=get_accelerator().current_device_name())) + + self.bias = Parameter( + torch.empty(weight_shape[0], + dtype=dtype, + device=get_accelerator().current_device_name())) \ + if bias is not None else None + + def forward(self, input): + output = torch.matmul(input, self.weight.transpose(-1, -2)) + if self.bias is not None: + output += self.bias + return output + + +class Normalize(nn.Module): + def __init__(self, dim, dtype=torch.float, eps=1e-5): + super(Normalize, self).__init__() + self.norm = nn.LayerNorm(dim, + eps=eps).to(dtype).to( + get_accelerator().current_device_name()) + self.weight = self.norm.weight + self.bias = self.norm.bias + + def forward(self, input): + return self.norm(input) + + +class EmbeddingLayer(nn.Module): + def __init__(self, weight_shape, dtype=torch.half): + super(EmbeddingLayer, self).__init__() + self.weight = Parameter( + torch.empty(weight_shape[0], + weight_shape[1], + dtype=dtype, + device=get_accelerator().current_device_name())) + + def forward(self, input): + return F.embedding(input, self.weight) + + +class OPTEmbedding(EmbeddingLayer): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + def __init__(self, weight_shape): + # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(weight_shape) + + def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0): + """`input_ids_shape` is expected to be [bsz x seqlen].""" + attention_mask = attention_mask.long() + + # create positions depending on attention_mask + positions = (torch.cumsum(attention_mask, + dim=1).type_as(attention_mask) * + attention_mask).long() - 1 + + # cut positions if `past_key_values_length` is > 0 + positions = positions[:, past_key_values_length:] + + return super().forward(positions + self.offset) diff --git a/deepspeed/module_inject/load_checkpoint.py b/deepspeed/module_inject/load_checkpoint.py new file mode 100644 index 0000000..ff8f454 --- /dev/null +++ b/deepspeed/module_inject/load_checkpoint.py @@ -0,0 +1,289 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from torch import nn +from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference + +import deepspeed.ops.transformer as transformer_inference +from .layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +import torch +import gc +from deepspeed.accelerator import get_accelerator + + +def load_model_with_checkpoint(r_module, + sd, + mp_replace, + ckpt_type, + ckpt_mp_size, + weight_quantizer=None, + rank=0, + container=None): + error_msgs = [] + + def transpose(data): + with torch.no_grad(): + data = data.contiguous() + data1 = data.transpose(-1, -2).reshape(-1) + data.reshape(-1).copy_(data1) + data1 = None + return data.reshape(data.shape[-1], data.shape[-2]) + + def load(module, prefix): + args = (sd[0], prefix, {}, True, [], [], error_msgs) + + if hasattr(module, 'weight'): + module.weight = mp_replace.copy(module.weight.data, sd[0][prefix + 'weight']) + if prefix + 'bias' in sd[0].keys(): + if module.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.bias = torch.nn.parameter.Parameter( + data=torch.empty_like(module.bias.data, + device="cpu"), + requires_grad=module.bias.data.requires_grad) + module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias']) + args = None + gc.collect() + + def load_transformer_layer(module, prefix): + if ckpt_type == "tp": + + def load_parameters(module, prefix): + for n, p in module.named_parameters(): + if prefix + n in sd[0] and len(n.split('.')) == 1: + if type(sd[0][prefix + n]) is list: + tmp_data, scale = sd[0][prefix + n] + tmp_data = tmp_data + scale = scale.to(get_accelerator().current_device_name()) + # set the quantizer number of groups using the checkpoint scale shape + weight_quantizer.num_groups = scale.shape[0] + else: + tmp_data = sd[0][prefix + n].to( + get_accelerator().current_device_name()) + scale = None + src_shape = tmp_data.shape + dst_shape = p.shape + inner_dim = 1 if tmp_data.dtype == torch.int8 else 0 + outer_dim = 0 if tmp_data.dtype == torch.int8 else 1 + if (len(src_shape) == 2 and len(dst_shape) == 2): + if (src_shape[inner_dim] == dst_shape[0] + and src_shape[outer_dim] == dst_shape[1]): + if tmp_data.dtype != torch.int8: + p = weight_quantizer.quantize( + transpose(tmp_data) if weight_quantizer. + q_int8 else tmp_data) + else: + p = torch.nn.parameter.Parameter(tmp_data, + requires_grad=False) + p.scale = scale + setattr(module, n, p) + else: + dim = inner_dim if src_shape[inner_dim] != dst_shape[ + 0] else outer_dim + dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1 + if src_shape[dim] > dst_shape[dim1]: + weight_partition = torch.split( + tmp_data, + dst_shape[dim1], + dim=dim)[rank].to( + get_accelerator().current_device_name()) + assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \ + '''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\ + Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!''' + scale = scale.view( + -1)[weight_quantizer.num_groups * + (rank + 1):].reshape( + weight_quantizer.num_groups, + -1).contiguous() + else: + assert tmp_data.dtype != torch.int8, \ + '''Merging of the checkpoints are not supported when using INT8 checkpoint! \ + Please use a as many GPUs as TP-size for the checkpoint''' + all_data = [ + sd[j][prefix + + n] if type(sd[j][prefix + n]) is list else + sd[j][prefix + n].to( + get_accelerator().current_device_name()) + for j in range(len(sd)) + ] + # Check if the weight tensor is for the QKV parameter + if src_shape[1] == (3 * + src_shape[0]) // ckpt_mp_size: + qkv_size = src_shape[outer_dim] // 3 + src_split = [ + torch.split(src[0].data, + qkv_size, + dim=outer_dim) + for src in all_data + ] + + weight_partition = torch.cat([ + torch.cat([qkv_s[i] for qkv_s in src_split], + axis=outer_dim) + for i in range(len(src_split[0])) + ], + dim=dim) + else: + weight_partition = torch.cat([ + ad[0].to( + get_accelerator().current_device_name()) + if type(ad) is list else ad + for ad in all_data + ], + dim=dim) + if tmp_data.dtype == torch.int8: + scale = torch.cat([ + ad[1].to( + get_accelerator().current_device_name()) + for ad in all_data + ], + dim=dim) + + if tmp_data.dtype != torch.int8: + weight_partition = weight_quantizer.quantize( + transpose(weight_partition), \ + parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \ + weight_quantizer.quantize(weight_partition) + else: + weight_partition = torch.nn.parameter.Parameter( + weight_partition, + requires_grad=False) + weight_partition.scale = scale + setattr(module, n, weight_partition) + else: + if src_shape[0] == dst_shape[0]: + p.data.copy_(tmp_data) + else: + if src_shape[0] > dst_shape[0]: + bias_split = torch.split( + tmp_data, + dst_shape[-1])[rank].to(get_accelerator( + ).current_device_name()).contiguous() + p.data.copy_(bias_split) + else: + # Check if the weight tensor is for the QKV parameter + if src_shape[0] == (3 * r_module.config.hidden_size + ) // ckpt_mp_size: + qkv_size = src_shape[0] // 3 + src_split = [ + torch.split(sd[j][prefix + n], + qkv_size, + dim=0) for j in range(len(sd)) + ] + + p.data.copy_( + torch.cat( + [ + torch.cat([ + qkv_s[i] for qkv_s in src_split + ], + axis=0) + for i in range(len(src_split[0])) + ], + dim=0).to(get_accelerator( + ).current_device_name()).contiguous()) + else: + p.data.copy_( + torch.cat( + [ + sd[j][prefix + n] + for j in range(len(sd)) + ], + dim=0).to(get_accelerator( + ).current_device_name()).contiguous()) + + load_parameters(module, prefix) + for n, child in module.named_children(): + load_parameters(child, prefix + n + '.') + else: + container.load_params(module, sd[0], weight_quantizer, mp_replace, prefix) + + try: + import transformers + OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding + except: + OPTLearnedPositionalEmbedding = None + layer_policies = { + nn.Linear: load, + nn.Embedding: load, + nn.LayerNorm: load, + EmbeddingLayer: load, + LinearLayer: load, + Normalize: load, + transformer_inference.DeepSpeedTransformerInference: load_transformer_layer, + DeepSpeedBloomInference: load_transformer_layer, + DeepSpeedGPTInference: load_transformer_layer, + DeepSpeedBERTInference: load_transformer_layer, + DeepSpeedMegatronGPTInference: load_transformer_layer, + DeepSpeedOPTInference: load_transformer_layer, + OPTLearnedPositionalEmbedding: load, + OPTEmbedding: load + } + + all_ds_ids = {} + + def load_module_recursive(module, prefix='', level=0): + for name, child in module.named_children(): + if child.__class__ in layer_policies: + checking_key = prefix + name + '.' + if not any(checking_key in item for item in sd[0].keys()): + if hasattr(child, 'weight') and \ + (hasattr(child.weight, 'ds_id') and \ + child.weight.ds_id in all_ds_ids): + prefix1 = all_ds_ids[child.weight.ds_id] + if child.__class__ is nn.Linear: + child = LinearLayer(weight=all_ds_ids[child.weight.ds_id]) + setattr(module, name, child) + continue + child_params = list(child.parameters()) + if len(child_params) > 0 and (child_params[0].numel() == 0 + or child_params[0].is_meta): + if child.weight.is_meta: + ds_shape = child.weight.shape + else: + ds_shape = child.weight.ds_shape + if child.__class__ is nn.LayerNorm: + child = Normalize(dim=ds_shape[-1], + dtype=child.weight.dtype, + eps=child.eps) + setattr(module, name, child) + elif child.__class__ is nn.Linear: + child = LinearLayer(weight_shape=child.weight.shape, + bias=child.bias) + setattr(module, name, child) + elif child.__class__ is OPTLearnedPositionalEmbedding: + child = OPTEmbedding(weight_shape=ds_shape) + setattr(module, name, child) + else: + ds_id = None + if hasattr(child.weight, 'ds_id'): + ds_id = child.weight.ds_id + child = EmbeddingLayer(weight_shape=ds_shape, + dtype=child.weight.dtype) + if ds_id is not None: + all_ds_ids[ds_id] = child.weight + setattr(module, name, child) + layer_policies[child.__class__](child, prefix + name + '.') + else: + load_module_recursive( + child, + prefix if (level == 0 and ckpt_type == 'pp') and container.policy.use_load_prefix else \ + prefix + name + '.', + level + 1) + + load_module_recursive(r_module) + + embedding_weight = None + + for n, p in r_module.named_parameters(): + if "word_embeddings." in n or "embed_tokens." in n or "wte." in n: + embedding_weight = p + if embedding_weight is not None and r_module.lm_head.weight.is_meta: + r_module.lm_head.weight = embedding_weight + for sd_ in sd: + del sd_ + sd = None + gc.collect() diff --git a/deepspeed/module_inject/module_quantize.py b/deepspeed/module_inject/module_quantize.py old mode 100644 new mode 100755 index fde6990..4123a12 --- a/deepspeed/module_inject/module_quantize.py +++ b/deepspeed/module_inject/module_quantize.py @@ -1,6 +1,6 @@ -import copy +'''Copyright The Microsoft DeepSpeed Team''' + import torch -import deepspeed def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False): diff --git a/deepspeed/module_inject/policy.py b/deepspeed/module_inject/policy.py new file mode 100644 index 0000000..dfd3343 --- /dev/null +++ b/deepspeed/module_inject/policy.py @@ -0,0 +1,205 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +from abc import ABC, abstractmethod +from deepspeed.utils.types import ActivationFuncType +import torch +from deepspeed.accelerator import get_accelerator + +transformer_param_names = ( + 'attn_qkvw', \ + 'attn_qkvb', \ + 'attn_ow' , \ + 'attn_ob', \ + 'inter_w', \ + 'inter_b', \ + 'output_w', \ + 'output_b', \ + 'attn_nw', \ + 'attn_nb', \ + 'norm_w', \ + 'norm_b') + + +class DSPolicy(ABC): + _orig_layer_class = None + + def __init__(self): + self.cuda_graph_supported = False + + @abstractmethod + def attention(self): + """ + Returns attention qkv and dense parameters + weight: (3*hidden, hidden) and (hidden, hidden) + bias: (3*hidden) and (hidden) + """ + raise NotImplementedError + + +class TransformerPolicy(DSPolicy): + # a static class variable containing the HuggingFace model configuration. + # see e.g., transformers.models.opt.configuration_opt.OPTConfig + hf_model_config = None + + def __init__( + self, + inference=True, + linear_layer=True, + scale_attention=True, + megatron_v2=False, + use_mup=False, + # the type of activation function used in MLP + mlp_act_func_type=ActivationFuncType.GELU, + # applies layer norm before attention if `pre_attn_norm` is set to True + pre_attn_norm=True, + # this flag shows whether or not using prefix in loading the checkpoint + use_load_prefix=False, + # whether or not the qkv is stored in the split-format + split_qkv=True): + super().__init__() + self.cuda_graph_supported = False + self.inference = inference + self.linear_layer = linear_layer + self.scale_attention = scale_attention + self.is_megatron_v2 = megatron_v2 + self.use_mup = use_mup + self.mlp_act_func_type = mlp_act_func_type + self.pre_attn_norm = pre_attn_norm + self.use_load_prefix = use_load_prefix + self.split_qkv = split_qkv + + @abstractmethod + def attention(self): + """ + Returns attention qkv and dense parameters + weight: (3*hidden, hidden) and (hidden, hidden) + bias: (3*hidden) and (hidden) + """ + raise NotImplementedError + + @abstractmethod + def get_hidden_heads(self): + """ + return hidden_size and number of heads + """ + raise NotImplementedError + + @abstractmethod + def mlp(self): + """ + Returns mlp intermediate and output + weight: (intermediate, hidden) and (hidden, intermediate) + bias: (intermediate) and (hidden) + """ + raise NotImplementedError + + @abstractmethod + def layernorm(self): + """ + Returns LayerNorms used in transformer layer + Post-Attention and pre/post layer norm + gamma and beta with shape: (hidden) + """ + raise NotImplementedError + + +# TODO (lekurile): This function exists in base container as well, consolidate as some point +def transpose(data): + with torch.no_grad(): + data = data.contiguous() + data1 = data.transpose(-1, -2).reshape(-1) + data.reshape(-1).copy_(data1) + data1 = None + return data.reshape(data.shape[-1], data.shape[-2]) + + +# TODO (lekurile): This function exists in megatron feature container as well, consolidate as some point +def _transpose(x, heads=1, mp_replace=None): + heads = heads // mp_replace.mp_size + outer_dim = -1 + attention_head_size = x.shape[outer_dim] // heads + new_x_shape = x.size()[:outer_dim] + (heads, attention_head_size) + x_1 = x.view(*new_x_shape) + (q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=-1) + if len(q.shape) > 2: + new_shape = (q.shape[0], ) + (-1, ) + return torch.cat((q.reshape(new_shape), + k.reshape(new_shape), + v.reshape(new_shape)), + dim=outer_dim).reshape(x.shape) + else: + return torch.cat((q.reshape(-1), + k.reshape(-1), + v.reshape(-1)), + dim=-1).reshape(x.shape) + + +# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor. +# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist! +def maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + dst_name, + src_name, + qkv=False, + megatron_v2=False, + split_qkv=False, + heads=1): + if src_name in sd: + dst = getattr(module, dst_name) + tmp = sd[src_name] + if len(dst.shape) == 1: + if split_qkv: + dst = mp_replace.qkv_copy(dst, tmp) + else: + dst = mp_replace.copy(dst, tmp) + if qkv and megatron_v2: + dst = torch.nn.parameter.Parameter( + _transpose(dst, + heads=heads, + mp_replace=mp_replace).contiguous()) + else: + if split_qkv: + dst = mp_replace.qkv_copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \ + (transpose(tmp).contiguous())), int8=weight_quantizer.q_int8) + else: + if qkv and megatron_v2: + tmp = _transpose(transpose(tmp), + heads=heads, + mp_replace=mp_replace).contiguous() + if weight_quantizer.q_int8: + tmp = transpose(tmp) + dst = mp_replace.copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \ + transpose(tmp)), int8=weight_quantizer.q_int8) + setattr(module, dst_name, dst) + + +# Extending the maybe_copy function for when the q, k, and v are in separate parameters! +def maybe_copy_qkv(module, + sd, + weight_quantizer, + mp_replace, + dst_name, + src_names, + split_qkv=False): + if src_names[0] in sd: + q = sd[src_names[0]] + k = sd[src_names[1]] + v = sd[src_names[2]] + qkv_data = torch.cat((q, k, v), dim=0) + dst = getattr(module, dst_name) + if len(dst.shape) == 1: + if split_qkv: + dst = mp_replace.qkv_copy(dst, qkv_data.contiguous()) + else: + dst = mp_replace.copy(dst, qkv_data) + else: + if split_qkv: + dst = mp_replace.qkv_copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \ + ((transpose(qkv_data)).contiguous())), int8=weight_quantizer.q_int8) + else: + dst = mp_replace.copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \ + transpose(qkv_data)), int8=weight_quantizer.q_int8) + setattr(module, dst_name, dst) diff --git a/deepspeed/module_inject/replace_module.py b/deepspeed/module_inject/replace_module.py index 62d5cd7..c9032f9 100644 --- a/deepspeed/module_inject/replace_module.py +++ b/deepspeed/module_inject/replace_module.py @@ -1,49 +1,36 @@ -import copy +'''Copyright The Microsoft DeepSpeed Team''' + +import os import torch +import tqdm import deepspeed import deepspeed.ops.transformer as transformer_inference -from .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, HFGPTJLayerPolicy -from .replace_policy import replace_policies -from ..constants import INFERENCE_GENERIC_MODE, INFERENCE_SPECIALIZED_MODE -from ..runtime.weight_quantizer import WeightQuantization +from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention +from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock +from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig +from deepspeed.accelerator import get_accelerator +from .replace_policy import HFGPT2LayerPolicy +from .replace_policy import replace_policies, generic_policies + +from deepspeed import comm as dist from torch import nn +from .layers import LinearAllreduce, LinearLayer +from .load_checkpoint import load_model_with_checkpoint +import time -class LinearAllreduce(nn.Module): - def __init__(self, weight, bias=None, mp_group=None): - super(LinearAllreduce, self).__init__() - self.weight = weight - self.bias = bias - self.mp_group = mp_group - - def forward(self, input): - output = torch.matmul(input, self.weight) - if self.mp_group is not None: - torch.distributed.all_reduce(output, group=self.mp_group) - if self.bias is not None: - output += self.bias - return output - - -class LinearLayer(nn.Module): - def __init__(self, weight, bias=None): - super(LinearLayer, self).__init__() - self.weight = weight - self.bias = bias - - def forward(self, input): - output = torch.matmul(input, self.weight) - if self.bias is not None: - output += self.bias - return output +from .utils import policy_to_ds_container class ReplaceWithTensorSlicing: - def __init__(self, mp_group=None): + def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0): if mp_group is not None: - self.gpu_index = torch.distributed.get_rank(group=mp_group) + self.gpu_index = dist.get_rank(group=mp_group) else: self.gpu_index = 0 + self.out_dim = out_dim + self.in_dim = in_dim + self.mp_size = mp_size def merge_assert(self, dim1, dim2): assert dim1 > dim2, \ @@ -51,495 +38,415 @@ class ReplaceWithTensorSlicing: for merging your checkpoints before replacing the transformer layer with\ inference-kernels' - def qkv_copy(self, dst, src): + def qkv_copy(self, dst, src, int8=False): if src is None: - return torch.nn.Parameter(src) + return src src_shape = src.shape dst_shape = dst.shape - src_split = torch.split(src.data, src.shape[-1] // 3, dim=-1) + outer_dim = 0 if int8 else -1 + inner_dim = -1 if int8 else 0 + src_split = torch.split(src.data, src.shape[outer_dim] // 3, dim=outer_dim) if (len(src_shape) == 2 and len(dst_shape) == 2): - if src_shape[1] == dst_shape[1]: - return torch.nn.Parameter(src) - - self.merge_assert(src_shape[1], dst_shape[1]) - qkv_size = dst_shape[1] // 3 - qkv_split = [torch.split(src_s, qkv_size, dim=1) for src_s in src_split] - - weight_split = [ - torch.cat([qkv_s[i] for qkv_s in qkv_split], - axis=1) for i in range(len(qkv_split[0])) - ] - dst.data.copy_(weight_split[self.gpu_index].to( - torch.cuda.current_device()).contiguous()) + if src_shape[outer_dim] == dst_shape[self.out_dim]: + dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape) + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst + if self.out_dim == 1: + self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim]) + qkv_size = dst_shape[self.out_dim] // 3 + qkv_split = [ + torch.split(src_s, + qkv_size, + dim=outer_dim) for src_s in src_split + ] + + weight_split = [ + torch.cat([qkv_s[i] for qkv_s in qkv_split], + axis=outer_dim) for i in range(len(qkv_split[0])) + ] + dst = dst.reshape(-1).data.copy_( + weight_split[self.gpu_index].contiguous().reshape(-1)).reshape( + weight_split[self.gpu_index].shape) + else: + dst.data.copy_(src_split[self.gpu_index].to( + get_accelerator().current_device_name()).contiguous()) else: if src_shape[0] == dst_shape[0]: - return torch.nn.Parameter(src) - - qkv_size = dst_shape[0] // 3 - qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split] - bias_split = [ - torch.cat([qkv_s[i] for qkv_s in qkv_split], - axis=0) for i in range(len(qkv_split[0])) - ] - dst.data.copy_(bias_split[self.gpu_index].to( - torch.cuda.current_device()).contiguous()) + return torch.nn.parameter.Parameter(src) + if self.out_dim == 1: + qkv_size = dst_shape[0] // 3 + qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split] + bias_split = [ + torch.cat([qkv_s[i] for qkv_s in qkv_split], + axis=0) for i in range(len(qkv_split[0])) + ] + dst.data.copy_(bias_split[self.gpu_index].contiguous()) + else: + dst.data.copy_(src_split[self.gpu_index].contiguous()) - return torch.nn.Parameter(dst) + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst - def copy(self, dst, src): + def copy(self, dst, src, int8=False): if src is None: - return torch.nn.Parameter(src) - + return src + assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors + outer_dim = 0 if int8 else 1 + inner_dim = 1 if int8 else 0 src_shape = src.shape dst_shape = dst.shape - if (len(src_shape) == 2 and len(dst_shape) == 2): - if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]: - return torch.nn.Parameter(src) - - if src_shape[0] != dst_shape[0]: - self.merge_assert(src_shape[0], dst_shape[0]) - weight_split = torch.split(src, dst_shape[0]) + if src_shape[inner_dim] == dst_shape[ + self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]: + dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape) else: - self.merge_assert(src_shape[1], dst_shape[1]) - weight_split = torch.split(src.data, dst_shape[1], dim=1) - - dst.data.copy_(weight_split[self.gpu_index].to( - torch.cuda.current_device()).contiguous()) + if src_shape[inner_dim] != dst_shape[self.in_dim]: + self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim]) + weight_split = torch.split( + src, + dst_shape[self.in_dim], + dim=inner_dim)[self.gpu_index].contiguous() + else: + self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim]) + weight_split = torch.split( + src.data, + dst_shape[self.out_dim], + dim=outer_dim)[self.gpu_index].contiguous() + dst = dst.reshape(-1).data.copy_(weight_split.reshape(-1)).reshape( + weight_split.shape) else: if src_shape[0] == dst_shape[0]: - return torch.nn.Parameter(src) - - bias_split = torch.split(src.data, dst_shape[-1]) - dst.data.copy_(bias_split[self.gpu_index].to( - torch.cuda.current_device()).contiguous()) - - return torch.nn.Parameter(dst) + dst.data.copy_(src) + else: + bias_split = torch.split(src.data, + dst_shape[-1])[self.gpu_index].contiguous() + dst.data.copy_(bias_split) + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst + + +def get_transformer_name(replaced_module): + from .containers import supported_models + from torch.nn import ModuleList + transformer_name = '' + for n, c in replaced_module.named_children(): + if c.__class__ in supported_models: + transformer_name += n + '.' + for name, child in c.named_children(): + if child.__class__ is ModuleList: + transformer_name += name + break + break + return transformer_name + + +class GroupQuantizer: + def __init__(self, q_int8=True, group_size=1, num_bits=8, num_groups=0): + self.group_size = group_size + self.num_bits = num_bits + self.q_int8 = q_int8 + + self.num_groups = num_groups + + def quantize(self, inputs, qkv=True, count=1, parallel_dim=0): + if not self.q_int8 or not qkv: + inputs = torch.nn.Parameter(inputs, requires_grad=False) + inputs.scale = torch.empty(1) + return inputs + q_range = 2**self.num_bits + num_groups = self.num_groups if self.num_groups > 0 else inputs.shape[ + 0] // self.group_size + inputs = inputs.to(get_accelerator().current_device_name()) + input_flat = inputs.reshape(num_groups, -1).contiguous() + input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float() + input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float() + scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range) + input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1) + inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous() + out = torch.nn.Parameter(inputs_q, requires_grad=False) + inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim) + input_flat = [ + inputs_split[i].reshape(num_groups, + -1).contiguous() for i in range(2) + ] + input_min = [ + torch.min(input_flat[i], + dim=1, + keepdim=True)[0].float() for i in range(2) + ] + input_max = [ + torch.max(input_flat[i], + dim=1, + keepdim=True)[0].float() for i in range(2) + ] + scale1 = [ + (torch.max(input_min[i].abs(), + input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0) + for i in range(2) + ] + + out.scale = torch.cat([scale.squeeze().unsqueeze(0), + scale1[0], + scale1[1]], + dim=0).reshape(num_groups, + -1).contiguous() + return out + + +def _module_match(module): + for policy in generic_policies: + policy = policy() + if policy.match(module): + return policy + return None + + +def generic_injection(module, fp16=False, enable_cuda_graph=True): + def replace_attn(child, policy): + policy_attn = policy.attention(child) + if policy_attn is None: + return child + if len(policy_attn) == 5: + qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn + else: + qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn + + config = transformer_inference.DeepSpeedInferenceConfig( + hidden_size=hidden_size, + heads=heads, + fp16=fp16, + triangular_masking=False, + max_out_tokens=4096, + ) + attn_module = DeepSpeedDiffusersAttention(config) + + def transpose(data): + data = data.contiguous() + data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1)) + data = data.reshape(data.shape[-1], data.shape[-2]) + data.to(get_accelerator().current_device_name()) + return data + + if len(policy_attn) == 5: + attn_module.attn_qkvw.data = transpose(qkvw.data) + else: + attn_module.attn_qkvw = None + attn_module.attn_qw.data = transpose(qw.data) + attn_module.attn_kw.data = transpose(kw.data) + attn_module.attn_vw.data = transpose(vw.data) + + attn_module.attn_qkvb = None + attn_module.attn_ow.data = transpose(attn_ow.data) + attn_module.attn_ob.data.copy_( + attn_ob.data.to(get_accelerator().current_device_name())) + return attn_module + + def replace_attn_block(child, policy): + config = Diffusers2DTransformerConfig() + return DeepSpeedDiffusersTransformerBlock(child, config) + + if isinstance(module, torch.nn.Module): + pass + else: + if fp16 is False: + raise ValueError("Generic injection only supported with FP16") + + try: + import diffusers + cross_attention = diffusers.models.attention.CrossAttention + attention_block = diffusers.models.attention.BasicTransformerBlock + new_policies = { + cross_attention: replace_attn, + attention_block: replace_attn_block, + } + except ImportError: + new_policies = {} + + #replace_transformer_layer(None, + # module.text_encoder, + # training=False, + # replace_with_kernel_inject=True, + # triangular_masking=True, + # max_out_tokens=8192) + from ..model_implementations.transformers.clip_encoder import DSClipEncoder + cg_encoder = DSClipEncoder(module.text_encoder, + enable_cuda_graph=enable_cuda_graph) + setattr(module, 'text_encoder', cg_encoder) + for name in module.__dict__.keys(): + sub_module = getattr(module, name) + policy = _module_match(sub_module) + + if policy is not None: + + def _replace_module(module, policy): + for name, child in module.named_children(): + _replace_module(child, policy) + if child.__class__ in new_policies: + replaced_module = new_policies[child.__class__](child, + policy) + setattr(module, name, replaced_module) + + _replace_module(sub_module, policy) + new_module = policy.apply(sub_module, + enable_cuda_graph=enable_cuda_graph) + print(f"**** found and replaced {name} w. {type(new_module)}") + setattr(module, name, new_module) + + +container_g = None def replace_transformer_layer(orig_layer_impl, model, - policy=None, - micro_batch_size=-1, - config=None, - seed=-1, - hidden_size=-1, - num_attention_heads=-1, - mp_size=1, - training_mp_size=1, - mp_group=None, - ep_group=None, - expert_mp_group=None, - preln=True, - fp16=True, - local_rank=-1, - stochastic_mode=True, - training=True, - quantize=False, - quantize_settings=None, - triangular_masking=False, - return_tuple=True, - replace_with_kernel_inject=False, - linear_layer_setting=None, - moe=False, - moe_experts=1, - moe_type='standard'): + checkpoint_dict, + config, + model_config): """ Replace bert-style transformer layers with DeepSpeed's transformer layer Arguments: orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, e.g., transformers.modeling_bert.BertLayer. model (torch.nn.Module): user's nn.module representing their model - policy: shows the policy for mapping from the orig_layer_impl to transformer parameters when - replace_with_kernel_inject is set, otherwise, it provides the names of two linear layers as - a tuple: (attention_output projection, transformer output projection) - micro_batch_size (int): micro batch size per gpu used during training/eval - config (dict): model config containing hidden size, attention heads, etc. - seed (int): random seed value - max_seq_length (int): max sequence length for training - hidden_size (int): hidden dimension - num_attention_heads (int): number of attention heads - mp_size (int): model_parallelism degree - mp_group : model_parallel group initialized on the modeling side - preln (bool): does the original layer implementation do pre or post layer norm? - fp16 (bool): fp16 or fp32 - local_rank (int): GPU rank (optional), - stochastic_mode (bool): whether to use stochastic mode - training (bool): specifying whether kernel-injection is done for training/inference (set to false for inference-mode injection) - quantize_settings (tuple): this setting shows how we can quantize a model for running it through the inference kernels. - It includes (quantization_scales, merge_count, mlp_extra_grouping, quantize_groups). - return_tuple (bool): if set, transformer layer returns a tuple as the output. - Note: this flag needs to be set for huggingface models. - replace_with_kernel_inject (bool): injection_mode, if true, kernels will be add along with configuring - Tensor-Parallelism - linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers - and embedding layers - attention_params: (list of strings) [Optional]: shows the parameters in the attention part that needs to - be adjusted based on the model-parallelism + checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine + config: top-level DS Inference config defined in inference/config.py + model_config: HuggingFace model config passed from the inference/engine.py Returns: Updated nn.module with replaced transformer layers """ + # defining globals as internally defined functions inherit these everywhere + fp16 = (config.dtype == torch.float16 or config.dtype == torch.int8) + quantize = (config.dtype == torch.int8) + # todo: Refactor later. In future, let's minimize the style used above and use config.** instead + + linear_layer_setting = None + ''' + linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers + ''' + micro_batch_size = -1 + seed = -1 + local_rank = -1 + + mp_replace = ReplaceWithTensorSlicing( + mp_group=config.tensor_parallel.tp_group, + mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1) + def replace_with_policy(child, policy_cls, triangular_masking, inference=False, - preln=True, layer_id=0): - preln = False if policy_cls is HFBertLayerPolicy else preln - if policy_cls is HFBertLayerPolicy: - policy = policy_cls(child, inference=inference, preln=preln) - else: - policy = policy_cls(child, inference=inference) + policy = policy_cls(child, inference=inference) + if not policy.cuda_graph_supported: + # policy says cuda graph is not supported raise an error if set + assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable" - if inference: - hidden_size, num_attention_heads = policy.get_hidden_heads() - assert num_attention_heads % mp_size == 0,\ - "To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\ - "This is because the attention computation is partitioned evenly among the parallel GPUs." from deepspeed.moe.layer import MoE moe = False if hasattr(child, 'mlp') and isinstance(child.mlp, MoE): num_experts = child.mlp.num_experts moe = True - attn_linear_layer, qkvw, qkvb, dense_w, dense_b, scale_attention, megatron_v2 = policy.attention() - if not moe or moe_type == 'standard': - mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b = policy.mlp() - else: - mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b, \ - _res_h4h_w, _res_h4h_b, _res_4hh_w, _res_4hh_b, _res_coef = policy.mlp(moe_type) - - attn_nw, attn_nb, input_nw, input_nb = policy.layerNorm() - if quantize: - if policy_cls is not HFBertLayerPolicy: - qkvw = qkvw.to(torch.int8) - dense_w = dense_w.to(torch.int8) - _h4h_w = [moe_w1.to(torch.int8) - for moe_w1 in _h4h_w] if moe else _h4h_w.to(torch.int8) - _4hh_w = [moe_w1.to(torch.int8) - for moe_w1 in _4hh_w] if moe else _4hh_w.to(torch.int8) - elif fp16: - qkvw = qkvw.half() - dense_w = dense_w.half() - _h4h_w = [moe_w1.half() for moe_w1 in _h4h_w] if moe else _h4h_w.half() - _4hh_w = [moe_w1.half() for moe_w1 in _4hh_w] if moe else _4hh_w.half() - if quantize or fp16: - qkvb = qkvb if qkvb is None else qkvb.half() - dense_b = dense_b if dense_b is None else dense_b.half() - _h4h_b = [moe_b1.half() for moe_b1 in _h4h_b] if moe else _h4h_b.half() - _4hh_b = [moe_b1.half() for moe_b1 in _4hh_b] if moe else _4hh_b.half() - attn_nw = attn_nw if attn_nw is None else attn_nw.half() - attn_nb = attn_nb if attn_nb is None else attn_nb.half() - input_nw = input_nw.half() - input_nb = input_nb.half() - - if moe and moe_type == 'residual' and fp16: - _res_h4h_b = _res_h4h_b.half() - _res_4hh_b = _res_4hh_b.half() - _res_h4h_w = _res_h4h_w.half() - _res_4hh_w = _res_4hh_w.half() - _res_coef = _res_coef.half() - - mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group) - #expert_mp_replace = ReplaceWithTensorSlicing(mp_group=expert_mp_group) - - if inference: - if moe: - ep_world_size = torch.distributed.get_world_size() - local_ep_size = 1 if num_experts < ep_world_size else num_experts // ep_world_size - - transformer_config = transformer_inference.DeepSpeedMoEInferenceConfig( - hidden_size=hidden_size, - heads=num_attention_heads, - layer_norm_eps=config.layer_norm_eps if hasattr( - config, - 'layer_norm_eps') else 1e-12, - fp16=fp16, - pre_layer_norm=preln, - mp_size=mp_size, - q_int8=quantize, - moe_experts=local_ep_size, - global_experts=num_experts, - mlp_type=moe_type) - else: - rotary_dim = config.rotary_dim if hasattr(config, 'rotary_dim') else child.attention.rotary_ndims \ - if hasattr(child, 'attention') and hasattr(child.attention,'rotary_ndims') else -1 - transformer_config = transformer_inference.DeepSpeedInferenceConfig( - hidden_size=hidden_size, - heads=num_attention_heads, - layer_norm_eps=config.layer_norm_eps if hasattr( - config, - 'layer_norm_eps') else - (config.layer_norm_epsilon - if hasattr(config, - 'layer_norm_epsilon') else config.layernorm_epsilon - if hasattr(config, - 'layernorm_epsilon') else 1.0e-12), - fp16=fp16, - pre_layer_norm=preln, - mp_size=mp_size, - q_int8=quantize, - return_tuple=(return_tuple or (policy_cls is HFBertLayerPolicy)), - triangular_masking=(policy_cls is not HFBertLayerPolicy), - local_attention=((config.attention_layers[layer_id] == "local") - if hasattr(config, - 'attention_layers') else False), - window_size=(config.window_size if hasattr(config, - 'window_size') else 1), - rotary_dim=rotary_dim, - mlp_after_attn=(rotary_dim is None or rotary_dim < 0), - training_mp_size=training_mp_size) - - if quantize and quantize_settings is not None: - (quantization_scales, - merge_count, - mlp_extra_grouping, - quantize_groups) = quantize_settings - if moe: - new_module = transformer_inference.DeepSpeedMoEInference( - transformer_config, - mp_group=mp_group, - ep_group=None if ep_group is None else ep_group[num_experts], - expert_mp_group=None - if expert_mp_group is None else expert_mp_group[num_experts], - quantize_scales=quantization_scales[layer_id], - quantize_groups=quantize_groups, - merge_count=merge_count, - mlp_extra_grouping=mlp_extra_grouping, - qkv_merging=(policy_cls is HFBertLayerPolicy)) + # 1. Create a model-specific container object using the policy object. + _container = policy_to_ds_container(policy=policy, + config=config, + model_config=model_config, + layer_id=layer_id, + child=child) + _container.set_dtype(fp16) + _container.set_moe(moe) - else: - new_module = transformer_inference.DeepSpeedTransformerInference( - transformer_config, - mp_group=mp_group, - quantize_scales=quantization_scales[layer_id], - quantize_groups=quantize_groups, - merge_count=merge_count, - mlp_extra_grouping=mlp_extra_grouping, - qkv_merging=(policy_cls is HFBertLayerPolicy)) - - if quantize and qkvw.dtype != torch.int8: - quantize_bits = 8 - quantizer = WeightQuantization() - if policy_cls is HFBertLayerPolicy: - data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups * 3) - else: - data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups) - qkvw.data.copy_(data_quantized) - qkvw.data = qkvw.data.to(torch.int8) - else: + # 2. Set the tensor parallelism config + _container.set_tensor_parallel_config(config.tensor_parallel.tp_size, + config.tensor_parallel.tp_group) - if moe: - new_module = transformer_inference.DeepSpeedMoEInference( - transformer_config, - mp_group=mp_group, - ep_group=None if ep_group is None else ep_group[num_experts], - expert_mp_group=None - if expert_mp_group is None else expert_mp_group[num_experts], - ) + # 3. Initialize tensors + _container.initialize_tensors() - else: - new_module = transformer_inference.DeepSpeedTransformerInference( - transformer_config, - mp_group=mp_group, - ) - new_module.config.scale_attention = scale_attention - - # we want the weights in [input, output] shape - # linear layer is created with [input, output] shape - # transpose it here to reduce inference cost! - def transpose(data): - data.view(-1).copy_(data.transpose(-1, -2).contiguous().view(-1)) - data = data.reshape(data.shape[-1], data.shape[-2]) - return data - - if attn_linear_layer: - qkvw.data = transpose(qkvw.data) - dense_w.data = transpose(dense_w.data) - - if megatron_v2: - new_module.config.rotate_half = True - new_module.config.rotate_every_two = False - - def _transpose(x): - num_attention_heads_per_partition = transformer_config.heads // transformer_config.mp_size - attention_head_size = x.shape[-1] // num_attention_heads_per_partition - new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, - attention_head_size) - x_1 = x.view(*new_x_shape) - (q, - k, - v) = torch.split(x_1, - (x_1.shape[-1] // 3), - dim=(x_1.dim() - 1)) - if len(q.shape) > 2: - return torch.cat((q.reshape(q.shape[0], - -1), - k.reshape(q.shape[0], - -1), - v.reshape(q.shape[0], - -1)), - dim=-1).reshape(x.shape) - else: - return torch.cat((q.reshape(-1), - k.reshape(-1), - v.reshape(-1)), - dim=-1).reshape(x.shape) - - qkvw = torch.nn.Parameter(_transpose(qkvw).contiguous()) - qkvb = torch.nn.Parameter(_transpose(qkvb).contiguous()) - - dense_b = dense_b * (transformer_config.training_mp_size / - transformer_config.mp_size) - _4hh_b = _4hh_b * (transformer_config.training_mp_size / - transformer_config.mp_size) - - if mlp_linear_layer: - _h4h_w = [transpose(moe_w1.data) - for moe_w1 in _h4h_w] if moe else transpose(_h4h_w.data) - _4hh_w = [transpose(moe_w1.data) - for moe_w1 in _4hh_w] if moe else transpose(_4hh_w.data) - - if moe and moe_type == 'residual': - _res_h4h_w.data = transpose(_res_h4h_w.data) - _res_4hh_w.data = transpose(_res_4hh_w.data) - _res_coef.data = transpose(_res_coef.data) - - attn_block = new_module.attention - attn_block.attn_qkvw = mp_replace.qkv_copy(attn_block.attn_qkvw, qkvw) - attn_block.attn_qkvb = mp_replace.qkv_copy(attn_block.attn_qkvb, qkvb) - - attn_block.attn_ow = mp_replace.copy(attn_block.attn_ow, dense_w) - attn_block.attn_ob = mp_replace.copy(attn_block.attn_ob, dense_b) - - mpl_block = new_module.mlp - if moe: - gpu_index = torch.distributed.get_rank() - gpu_index = 0 - for ep_index in range(local_ep_size): - mpl_block[ep_index].inter_w.data = _h4h_w[ - gpu_index * local_ep_size + ep_index].to( - torch.cuda.current_device()) - mpl_block[ep_index].inter_b.data = _h4h_b[ - gpu_index * local_ep_size + ep_index].to( - torch.cuda.current_device()) - mpl_block[ep_index].output_w.data = _4hh_w[ - gpu_index * local_ep_size + ep_index].to( - torch.cuda.current_device()) - mpl_block[ep_index].output_b.data = _4hh_b[ - gpu_index * local_ep_size + ep_index].to( - torch.cuda.current_device()) - new_module.attn_nw.data = attn_nw.to(torch.cuda.current_device()) - new_module.attn_nb.data = attn_nb.to(torch.cuda.current_device()) - if moe_type == 'residual': - new_module.res_mlp.inter_w.data = _res_h4h_w.to( - torch.cuda.current_device()) - new_module.res_mlp.inter_b.data = _res_h4h_b.to( - torch.cuda.current_device()) - new_module.res_mlp.output_w.data = _res_4hh_w.to( - torch.cuda.current_device()) - new_module.res_mlp.output_b.data = _res_4hh_b.to( - torch.cuda.current_device()) - new_module.res_coef.data = _res_coef.to(torch.cuda.current_device()) - else: - mpl_block.inter_w.data = mp_replace.copy(mpl_block.inter_w, _h4h_w) - mpl_block.inter_b.data = mp_replace.copy(mpl_block.inter_b, _h4h_b) - mpl_block.output_w.data = mp_replace.copy(mpl_block.output_w, _4hh_w) - mpl_block.output_b.data = mp_replace.copy(mpl_block.output_b, _4hh_b) - if attn_nw is None: - new_module.mlp.attn_nw = attn_nw - else: - new_module.mlp.attn_nw.data = attn_nw.to(torch.cuda.current_device()) - if attn_nb is None: - new_module.mlp.attn_nb = attn_nb - else: - new_module.mlp.attn_nb.data = attn_nb.to(torch.cuda.current_device()) - new_module.norm_w.data = input_nw.to(torch.cuda.current_device()) - new_module.norm_b.data = input_nb.to(torch.cuda.current_device()) - else: - transformer_config = deepspeed.DeepSpeedTransformerConfig( - batch_size=micro_batch_size, - hidden_size=config.hidden_size, - heads=config.num_attention_heads, - attn_dropout_ratio=config.attention_probs_dropout_prob, - hidden_dropout_ratio=config.hidden_dropout_prob, - num_hidden_layers=config.num_hidden_layers, - initializer_range=config.initializer_range, - layer_norm_eps=config.layer_norm_eps if hasattr( - config, - 'layer_norm_eps') else 1e-12, - seed=seed, - fp16=fp16, - pre_layer_norm=(False if policy_cls is HFBertLayerPolicy else preln), - return_tuple=return_tuple, - local_rank=local_rank, - stochastic_mode=stochastic_mode, - normalize_invertible=True, - training=training) - new_module = deepspeed.DeepSpeedTransformerLayer(transformer_config) - new_module.attn_qkvw.data = qkvw - new_module.attn_qkvb.data = qkvb - new_module.attn_ow.data = dense_w - new_module.attn_ob.data = dense_b - - new_module.attn_nw.data = attn_nw - new_module.attn_nb.data = attn_nb - new_module.norm_w.data = input_nw - new_module.norm_b.data = input_nb - - new_module.inter_w.data = _h4h_w - new_module.inter_b.data = _h4h_b - new_module.output_w.data = _4hh_w - new_module.output_b.data = _4hh_b - return new_module + # 4. deal with data types -- needs refactor to use dtype instead of fp16 + if fp16: + _container.convert_to_required_dtype(dtype=torch.half) + + # 5. Set the quantization config + quantizer = GroupQuantizer(q_int8=quantize) + _container.set_quantization_config(quantize, quantizer) + + # 6. create a DS Inference config object + _container.create_ds_model_config() + + # 7. use the config and create the module + _container.create_module() + + # 8. transpose the weights and bias if needed + _container.transpose() + + # 9. deal with tensor parallelism. + _container.apply_tensor_parallelism(mp_replace) + + # 10. copy the tensors from the model-specific container to the new module + _container.copy_data_to_new_module() + + # 11. set global for generic checkpoint loading + global container_g + + if container_g is None: + container_g = _container + + return _container.module def replace_wo_policy(module, all_reduce_linears): + mp_size = config.tensor_parallel.tp_size + mp_group = config.tensor_parallel.tp_group + def _replace(child, name, conv_linear_layer): mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group) + weight_shape = child.weight.shape if name in all_reduce_linears: - new_weight = torch.empty( - (child.weight.shape[0] - if conv_linear_layer else child.weight.shape[1] // mp_size, - child.weight.shape[1] - if conv_linear_layer else child.weight.shape[0]), - device=child.weight.device, - dtype=torch.half if fp16 else torch.float) - if not conv_linear_layer: - child.weight.data.view(-1).copy_( - child.weight.data.transpose(-1, - -2).contiguous().view(-1)) - child.weight.data = child.weight.data.reshape( - child.weight.data.shape[-1], - child.weight.data.shape[-2]) - data = mp_replace.copy(new_weight, - child.weight.data).to(torch.cuda.current_device()) + new_weight = torch.empty(( + weight_shape[1] if conv_linear_layer else weight_shape[0], + (weight_shape[0] if conv_linear_layer else weight_shape[1]) // + mp_size, + ), + device=child.weight.device, + dtype=child.weight.dtype) + if conv_linear_layer: + child.weight.data = child.weight.data.transpose(-1, -2).contiguous() + data = mp_replace.copy(new_weight, child.weight.data) + new_bias = torch.empty((weight_shape[0]), + device=child.weight.device, + dtype=child.weight.dtype) + if child.bias is not None: + new_bias.data.copy_(child.bias.data) return LinearAllreduce(data, child.bias if child.bias is None else \ - child.bias.to(torch.cuda.current_device()), mp_group) + torch.nn.parameter.Parameter(new_bias.to(get_accelerator().current_device_name())), mp_group) else: - new_weight = torch.empty( - (child.weight.shape[0] // - mp_size if conv_linear_layer else child.weight.shape[1], - child.weight.shape[1] - if conv_linear_layer else child.weight.shape[0] // mp_size), - device=child.weight.device, - dtype=torch.half if fp16 else torch.float) - if not conv_linear_layer: - child.weight.data.view(-1).copy_( - child.weight.data.transpose(-1, - -2).contiguous().view(-1)) - child.weight.data = child.weight.data.reshape( - child.weight.data.shape[-1], - child.weight.data.shape[-2]) + new_weight = torch.empty(( + (weight_shape[1] if conv_linear_layer else weight_shape[0]) // + mp_size, + weight_shape[0] // mp_size if conv_linear_layer else weight_shape[1], + ), + device=child.weight.device, + dtype=child.weight.dtype) + if conv_linear_layer: + child.weight.data = child.weight.data.transpose(-1, -2).contiguous() data = mp_replace.copy(new_weight, child.weight.data) - new_bias = torch.empty((child.weight.shape[1] // mp_size), + + new_bias = torch.empty((weight_shape[0] // mp_size), device=child.weight.device, - dtype=torch.half if fp16 else torch.float) + dtype=child.weight.dtype) bias_data = None if child.bias is None else mp_replace.copy( new_bias, - child.bias.data).to(torch.cuda.current_device()) - return LinearLayer(data.to(torch.cuda.current_device()), bias_data) + child.bias.data).to(get_accelerator().current_device_name()) + return LinearLayer(weight=data.to( + get_accelerator().current_device_name()), + bias=bias_data) def _slice_embedding(child, name, conv_linear_layer): mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group) @@ -547,7 +454,9 @@ def replace_transformer_layer(orig_layer_impl, child.weight.shape[1] // mp_size), device=child.weight.device, dtype=child.weight.dtype) - data = mp_replace.copy(new_weight, child.weight.data) + data = mp_replace.copy(new_weight, + child.weight.ds_tensor.data if hasattr(child.weight, 'ds_tensor') else \ + child.weight.data) new_embedding = nn.Embedding(child.weight.shape[0], child.weight.shape[1] // mp_size) new_embedding.weight.data.copy_(data) @@ -562,10 +471,14 @@ def replace_transformer_layer(orig_layer_impl, child.num_heads = child.num_heads // mp_size if hasattr(child, 'num_attention_heads'): child.num_attention_heads = child.num_attention_heads // mp_size + if hasattr(child, 'num_attn_heads'): + child.num_attn_heads = child.num_attn_heads // mp_size if hasattr(child, 'all_head_size'): child.all_head_size = child.all_head_size // mp_size if hasattr(child, 'embed_dim'): child.embed_dim = child.embed_dim // mp_size + if hasattr(child, 'hidden_size'): + child.hidden_size = child.hidden_size // mp_size conv_linear_layer = False if linear_layer_setting is not None: @@ -600,32 +513,193 @@ def replace_transformer_layer(orig_layer_impl, return _replace_module(module) def replace_fn(child, _policy, layer_id=0): + training = False # todo: refactor this part to go in the config if training: # copy relevant state from child -> new module - new_module = replace_with_policy(child, - _policy, - triangular_masking, - preln=preln) + new_module = replace_with_policy(child, _policy, config.triangular_masking) else: # copy relevant state from child -> new module - if replace_with_kernel_inject: + if config.replace_with_kernel_inject: new_module = replace_with_policy(child, _policy, - triangular_masking, + config.triangular_masking, inference=True, - preln=(_policy - is not HFBertLayerPolicy), layer_id=layer_id) else: new_module = replace_wo_policy(child, _policy) return new_module - return replace_module(model=model, - orig_class=orig_layer_impl, - replace_fn=replace_fn, - _replace_policy=policy) + replaced_module = replace_module(model=model, + orig_class=orig_layer_impl, + replace_fn=replace_fn, + _replace_policy=config.injection_policy_tuple) + + quantizer = GroupQuantizer(q_int8=quantize) + world_size = dist.get_world_size() if dist.is_initialized() else 1 + rank = dist.get_rank() if dist.is_initialized() else 0 + if checkpoint_dict is not None: + assert container_g.ckpt_load_enabled, \ + f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container" + start_time = time.time() + checkpoint = checkpoint_dict['checkpoints'] + ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint + ckpt_type = checkpoint_dict.get('parallelization', 'pp') + ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list)) + ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size) + base_dir1 = checkpoint_dict.get('base_dir', config.base_dir) + + if ckpt_type == 'pp' and type(checkpoint) is list: + pbar = tqdm.tqdm(total=len(checkpoint), + desc=f"Loading {len(checkpoint)} checkpoint shards") + + for i in range(len(checkpoint)): + sd = [ + torch.load(os.path.join(base_dir1, + checkpoint[i]), + map_location='cpu') + ] + load_model_with_checkpoint(replaced_module, + sd, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + container=container_g) + pbar.update(1) + else: + import gc + num_checkpoints = len(ckpt_list) // ckpt_mp_size + tp_split_size = (world_size / ckpt_mp_size) + sd_offset = int(rank / tp_split_size) + sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset + pbar = tqdm.tqdm(total=num_checkpoints, + desc=f"Loading {num_checkpoints} checkpoint shards") + for i in range(num_checkpoints): + pbar.update(1) + ckpt_index = i * ckpt_mp_size + sd_offset + ckpt_files = [ + os.path.join(base_dir1, + ckpt_list[ckpt_index + + j]) if base_dir1 else ckpt_list[ckpt_index + + j] + for j in range(sd_count) + ] + sds = [ + torch.load(ckpt_file, + map_location='cpu') for ckpt_file in ckpt_files + ] + load_model_with_checkpoint(replaced_module, + sds, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + int(rank % tp_split_size), + container=container_g) + sds = [None for _ in sds] + gc.collect() + + if "non_tp" in checkpoint: + pbar = tqdm.tqdm( + total=len(checkpoint["non_tp"]), + desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards") + + for i in range(len(checkpoint["non_tp"])): + pbar.update(1) + ckpt_file = os.path.join(base_dir1, + checkpoint["non_tp"][i] + ) if base_dir1 else checkpoint["non_tp"][i] + sds = [torch.load(ckpt_file, map_location='cpu')] + load_model_with_checkpoint(replaced_module, + sds, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + int(rank % tp_split_size), + container=container_g) + sds = [None for _ in sds] + gc.collect() + print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec") + + if config.save_mp_checkpoint_path is not None: + from collections import OrderedDict + import json + num_partitions = 8 + + if checkpoint_dict is None: + ckpt_name = "ds_model" + try: + from transformers.models.bloom.modeling_bloom import BloomForCausalLM + if isinstance(model, BloomForCausalLM): + ckpt_name = "bloom" + except ImportError: + ckpt_name = "ds_model" + else: + ckpt_name = checkpoint_dict['type'] + if dist.is_initialized(): + dist.barrier() + transformer_name = get_transformer_name(replaced_module) + non_tp_ckpt_name = f'non-tp.pt' + ckpt_files = [non_tp_ckpt_name] + os.makedirs(config.save_mp_checkpoint_path, exist_ok=True) + + if not dist.is_initialized() or dist.get_rank() == 0: + print("Saving tp-sharded checkpoints") + torch.save( + OrderedDict({ + k: v + for k, + v in dict(replaced_module.state_dict()).items() + if transformer_name not in k + }), + f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}') + ckpt_config = json.dumps({ + 'type': + ckpt_name, + 'base_dir': + f'{config.save_mp_checkpoint_path}', + 'checkpoints': { + "non_tp": + ckpt_files, + "tp": [ + f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions) + for r in range(world_size) + ] + }, + 'version': + 1.0, + 'parallelization': + 'tp', + 'tp_size': + world_size, + 'dtype': + 'int8' if quantize else ('float16' if fp16 else 'float32') + }) + with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json", + "w") as cfg: + cfg.write(ckpt_config) + + rep_sd = replaced_module.state_dict() + for n, p in replaced_module.named_parameters(): + if hasattr(p, 'scale'): + rep_sd[n] = [p, p.scale] + keys = list(rep_sd.keys()) + partition_size = (len(keys) // num_partitions + 1) + for m in range(num_partitions): + torch.save( + OrderedDict({ + k: [rep_sd[k], + rep_sd[k].scale] if hasattr(rep_sd[k], + 'scale') else rep_sd[k] + for k in keys[m * partition_size:(m + 1) * partition_size] + if transformer_name in k + }), + f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt') + + return replaced_module def revert_transformer_layer(orig_layer_impl, model, config, preln=False): @@ -751,4 +825,6 @@ def _replace_module(model, policies, layer_id=0): else: _, layer_id = _replace_module(child, policies, layer_id=layer_id) + # Add the reset_cache func to the model, so that it can be called in the beginning of text-generation. + model.reset_cache = transformer_inference.DeepSpeedTransformerInference.reset_cache return model, layer_id diff --git a/deepspeed/module_inject/replace_policy.py b/deepspeed/module_inject/replace_policy.py old mode 100644 new mode 100755 index c8d14e4..65dadcc --- a/deepspeed/module_inject/replace_policy.py +++ b/deepspeed/module_inject/replace_policy.py @@ -1,374 +1,20 @@ -from abc import ABC - -import torch -from torch.nn.parameter import Parameter - - -class DSPolicy(ABC): - def __init__(self, - inference=True, - linear_layer=True, - scale_attention=True, - megatron_v2=False): - self.inference = inference - self.linear_layer = linear_layer - self.scale_attention = scale_attention - self.is_megatron_v2 = megatron_v2 - - def attention(self): - """ - Returns attention qkv and dense parameters - weight: (3*hidden, hidden) and (hidden, hidden) - bias: (3*hidden) and (hidden) - """ - raise NotImplementedError - - def get_hidden_heads(self): - """ - return hidden_size and number of heads - """ - raise NotImplementedError - - def mlp(self): - """ - Returns mlp intermediate and output - weight: (intermediate, hidden) and (hidden, intermediate) - bias: (intermediate) and (hidden) - """ - raise NotImplementedError - - def layerNorm(self): - """ - Returns LayerNorms used in transformer layer - Post-Attention and pre/post layer norm - gamma and beta with shape: (hidden) - """ - raise NotImplementedError - - -class HFBertLayerPolicy(DSPolicy): - _orig_layer_class = None - - def __init__(self, client_module, inference=False, preln=False): - super().__init__(inference) - self.client_module = client_module - self.preln = preln - if HFBertLayerPolicy._orig_layer_class is None: - try: - import transformers - HFBertLayerPolicy._orig_layer_class = [ - transformers.models.bert.modeling_bert.BertLayer, - transformers.models.roberta.modeling_roberta.RobertaLayer - ] - except: - HFBertLayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - return self.client_module.attention.self.query.weight.shape[1], \ - self.client_module.attention.self.num_attention_heads - - def attention(self): - qw = self.client_module.attention.self.query.weight - qb = self.client_module.attention.self.query.bias - kw = self.client_module.attention.self.key.weight - kb = self.client_module.attention.self.key.bias - vw = self.client_module.attention.self.value.weight - vb = self.client_module.attention.self.value.bias - - qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) - qkvb = Parameter(torch.cat((qb, kb, vb), dim=0), requires_grad=False) - - return self.linear_layer, \ - qkvw, \ - qkvb, \ - self.client_module.attention.output.dense.weight, \ - self.client_module.attention.output.dense.bias, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self): - if self.preln: - intermediate_ff = self.client_module.intermediate.dense_act - else: - intermediate_ff = self.client_module.intermediate.dense - - return self.linear_layer, intermediate_ff.weight, intermediate_ff.bias, \ - self.client_module.output.dense.weight, \ - self.client_module.output.dense.bias - - def layerNorm(self): - if self.preln: - attention_layernorm = self.client_module.PostAttentionLayerNorm - transformer_layernorm = self.client_module.PreAttentionLayerNorm - else: - attention_layernorm = self.client_module.attention.output.LayerNorm - transformer_layernorm = self.client_module.output.LayerNorm - return attention_layernorm.weight, \ - attention_layernorm.bias, \ - transformer_layernorm.weight, \ - transformer_layernorm.bias - - -class HFGPTNEOLayerPolicy(DSPolicy): - _orig_layer_class = None - - def __init__(self, client_module, inference=True): - super().__init__(inference, scale_attention=False) - self.client_module = client_module - try: - import transformers - HFGPTNEOLayerPolicy._orig_layer_class = transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoBlock - except: - HFGPTNEOLayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - return self.client_module.attn.attention.q_proj.weight.shape[1], \ - self.client_module.attn.attention.num_heads - - def attention(self): - qw = self.client_module.attn.attention.q_proj.weight - kw = self.client_module.attn.attention.k_proj.weight - vw = self.client_module.attn.attention.v_proj.weight - - qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) - - return self.linear_layer, \ - qkvw, \ - None, \ - self.client_module.attn.attention.out_proj.weight, \ - self.client_module.attn.attention.out_proj.bias, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self): - return self.linear_layer, \ - self.client_module.mlp.c_fc.weight, \ - self.client_module.mlp.c_fc.bias, \ - self.client_module.mlp.c_proj.weight, \ - self.client_module.mlp.c_proj.bias - - def layerNorm(self): - return self.client_module.ln_2.weight, \ - self.client_module.ln_2.bias, \ - self.client_module.ln_1.weight, \ - self.client_module.ln_1.bias - - -class HFGPTJLayerPolicy(DSPolicy): - _orig_layer_class = None - - def __init__(self, client_module, inference=True): - super().__init__(inference, scale_attention=True) - self.client_module = client_module - try: - import transformers - HFGPTJLayerPolicy._orig_layer_class = transformers.models.gptj.modeling_gptj.GPTJBlock - except: - HFGPTJLayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - return self.client_module.attn.q_proj.weight.shape[1], \ - self.client_module.attn.num_attention_heads - - def attention(self): - qw = self.client_module.attn.q_proj.weight - kw = self.client_module.attn.k_proj.weight - vw = self.client_module.attn.v_proj.weight - - qkvw = Parameter(torch.cat((qw, kw, vw), dim=0), requires_grad=False) - - return self.linear_layer, \ - qkvw, \ - None, \ - self.client_module.attn.out_proj.weight, \ - None, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self): - return self.linear_layer, \ - self.client_module.mlp.fc_in.weight, \ - self.client_module.mlp.fc_in.bias, \ - self.client_module.mlp.fc_out.weight, \ - self.client_module.mlp.fc_out.bias - - def layerNorm(self): - return None, \ - None, \ - self.client_module.ln_1.weight, \ - self.client_module.ln_1.bias - - -class MegatronLayerPolicy(DSPolicy): - _orig_layer_class = None - version = 0 - moe_type = 'standard' - - def __init__(self, client_module, inference=True): - super().__init__(inference) - self.client_module = client_module - # we use megatron version to differentiate between the old and new - # megatron-lm source code - if MegatronLayerPolicy._orig_layer_class is None: - try: - import megatron - from megatron.model.transformer import ParallelTransformerLayer - MegatronLayerPolicy._orig_layer_class = ParallelTransformerLayer - except ImportError: - MegatronLayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - return self.client_module.attention.query_key_value.weight.shape[1], \ - self.client_module.attention.num_attention_heads - - def attention(self): - if self.inference: - if MegatronLayerPolicy.version == 0: - attention = self.client_module.attention - else: - attention = self.client_module.self_attention - - return self.linear_layer, \ - attention.query_key_value.weight, \ - attention.query_key_value.bias, \ - attention.dense.weight, \ - attention.dense.bias, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self, moe_type='standard'): - from deepspeed.moe.utils import has_moe_layers - moe, _ = has_moe_layers(self.client_module) - - if moe: - moe_experts = self.client_module.mlp.deepspeed_moe.experts.deepspeed_experts if moe_type == 'standard' else \ - self.client_module.mlp.moe.deepspeed_moe.experts.deepspeed_experts - num_experts = len(moe_experts) - if moe_type == 'standard': - return self.linear_layer, \ - [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ - [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ - [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ - [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)] - else: - - return self.linear_layer, \ - [moe_experts[i].dense_h_to_4h.weight for i in range(num_experts)], \ - [moe_experts[i].dense_h_to_4h.bias for i in range(num_experts)], \ - [moe_experts[i].dense_4h_to_h.weight for i in range(num_experts)], \ - [moe_experts[i].dense_4h_to_h.bias for i in range(num_experts)], \ - self.client_module.mlp.mlp.dense_h_to_4h.weight, \ - self.client_module.mlp.mlp.dense_h_to_4h.bias, \ - self.client_module.mlp.mlp.dense_4h_to_h.weight, \ - self.client_module.mlp.mlp.dense_4h_to_h.bias, \ - self.client_module.mlp.coefficient.weight - - else: - return self.linear_layer, \ - self.client_module.mlp.dense_h_to_4h.weight, \ - self.client_module.mlp.dense_h_to_4h.bias, \ - self.client_module.mlp.dense_4h_to_h.weight, \ - self.client_module.mlp.dense_4h_to_h.bias - - def layerNorm(self): - return self.client_module.post_attention_layernorm.weight, \ - self.client_module.post_attention_layernorm.bias, \ - self.client_module.input_layernorm.weight, \ - self.client_module.input_layernorm.bias - - -class HFGPT2LayerPolicy(DSPolicy): - _orig_layer_class = None - - def __init__(self, client_module, inference=True): - # HuggingFace GPT2 uses convolutional layer instead of linear layer - super().__init__(inference, linear_layer=False) - self.client_module = client_module - try: - import transformers - HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block - except: - HFGPT2LayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - return self.client_module.attn.embed_dim, \ - self.client_module.attn.num_heads - - def attention(self): - return self.linear_layer, \ - self.client_module.attn.c_attn.weight, \ - self.client_module.attn.c_attn.bias, \ - self.client_module.attn.c_proj.weight, \ - self.client_module.attn.c_proj.bias, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self): - return self.linear_layer, \ - self.client_module.mlp.c_fc.weight, \ - self.client_module.mlp.c_fc.bias, \ - self.client_module.mlp.c_proj.weight, \ - self.client_module.mlp.c_proj.bias - - def layerNorm(self): - return self.client_module.ln_2.weight, \ - self.client_module.ln_2.bias, \ - self.client_module.ln_1.weight, \ - self.client_module.ln_1.bias - - -class GPTNEOXLayerPolicy(DSPolicy): - _orig_layer_class = None - version = 0 - - def __init__(self, client_module, inference=True, megatron_v2=True): - super().__init__(inference, megatron_v2=megatron_v2) - self.client_module = client_module - if GPTNEOXLayerPolicy._orig_layer_class is None: - try: - import megatron - from megatron.model.transformer import ParallelTransformerLayerPipe - GPTNEOXLayerPolicy._orig_layer_class = ParallelTransformerLayerPipe - except ImportError: - GPTNEOXLayerPolicy._orig_layer_class = None - - def get_hidden_heads(self): - if GPTNEOXLayerPolicy.version == 0: - attention = self.client_module.attention - else: - attention = self.client_module.self_attention - - return self.client_module.attention.query_key_value.weight.shape[1], \ - self.client_module.attention.num_attention_heads - - def attention(self): - if GPTNEOXLayerPolicy.version == 0: - attention = self.client_module.attention - else: - attention = self.client_module.self_attention - - return self.linear_layer, \ - attention.query_key_value.weight, \ - attention.query_key_value.bias, \ - attention.dense.weight, \ - attention.dense.bias, \ - self.scale_attention, \ - self.is_megatron_v2 - - def mlp(self): - return self.linear_layer, \ - self.client_module.mlp.dense_h_to_4h.weight, \ - self.client_module.mlp.dense_h_to_4h.bias, \ - self.client_module.mlp.dense_4h_to_h.weight, \ - self.client_module.mlp.dense_4h_to_h.bias - - def layerNorm(self): - return self.client_module.post_attention_layernorm.weight, \ - self.client_module.post_attention_layernorm.bias, \ - self.client_module.input_layernorm.weight, \ - self.client_module.input_layernorm.bias - - +''' +Copyright 2020 The Microsoft DeepSpeed Team +''' +from .containers import HFGPT2LayerPolicy +from .containers import HFBertLayerPolicy +from .containers import BLOOMLayerPolicy +from .containers import HFGPTJLayerPolicy +from .containers import HFGPTNEOLayerPolicy +from .containers import GPTNEOXLayerPolicy +from .containers import HFOPTLayerPolicy +from .containers import MegatronLayerPolicy +from .containers import HFDistilBertLayerPolicy +from .containers import HFCLIPLayerPolicy +from .containers import UNetPolicy +from .containers import VAEPolicy + +# transformer-based policies replace_policies = [ HFBertLayerPolicy, HFGPTNEOLayerPolicy, @@ -376,4 +22,11 @@ replace_policies = [ HFGPTJLayerPolicy, MegatronLayerPolicy, HFGPT2LayerPolicy, + BLOOMLayerPolicy, + HFOPTLayerPolicy, + HFCLIPLayerPolicy, + HFDistilBertLayerPolicy ] + +# non-transformer-based policies +generic_policies = [UNetPolicy, VAEPolicy] diff --git a/deepspeed/module_inject/utils.py b/deepspeed/module_inject/utils.py new file mode 100644 index 0000000..7ebd797 --- /dev/null +++ b/deepspeed/module_inject/utils.py @@ -0,0 +1,40 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from deepspeed.utils import log_dist + + +# helper function to map between DS policies and DS containers +def policy_to_ds_container(**kwargs): + from .containers import HFGPT2LayerPolicy, DS_GPT2Container + from .containers import HFBertLayerPolicy, DS_BERTContainer + from .containers import BLOOMLayerPolicy, DS_BloomContainer + from .containers import HFGPTJLayerPolicy, DS_GPTJContainer + from .containers import HFGPTNEOLayerPolicy, DS_GPTNEOContainer + from .containers import GPTNEOXLayerPolicy, DS_GPTNEOXContainer + from .containers import HFOPTLayerPolicy, DS_OPTContainer + from .containers import MegatronLayerPolicy, DS_MegatronGPTContainer + from .containers import HFDistilBertLayerPolicy, DS_DistilBERTContainer + + policy_to_container = { + HFGPT2LayerPolicy: DS_GPT2Container, + HFBertLayerPolicy: DS_BERTContainer, + BLOOMLayerPolicy: DS_BloomContainer, + HFGPTJLayerPolicy: DS_GPTJContainer, + HFGPTNEOLayerPolicy: DS_GPTNEOContainer, + GPTNEOXLayerPolicy: DS_GPTNEOXContainer, + HFOPTLayerPolicy: DS_OPTContainer, + MegatronLayerPolicy: DS_MegatronGPTContainer, + HFDistilBertLayerPolicy: DS_DistilBERTContainer, + } + + container = None + policy = kwargs['policy'] + assert policy is not None, "Policy cannot be None" + policy_type = type(policy) + + if policy_type not in policy_to_container: + log_dist(f"Policy type {policy_type} not supported", [0]) + else: + container = policy_to_container[policy_type](**kwargs) + + return container diff --git a/deepspeed/moe/__init__.py b/deepspeed/moe/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/moe/__init__.py +++ b/deepspeed/moe/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/moe/layer.py b/deepspeed/moe/layer.py index c596da4..6b4a076 100644 --- a/deepspeed/moe/layer.py +++ b/deepspeed/moe/layer.py @@ -2,20 +2,35 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' -import torch.nn.init as init import torch -import torch.distributed as dist -from deepspeed.utils import logger, log_dist +from deepspeed.utils import log_dist -import deepspeed.utils.groups as groups +from deepspeed.utils import groups from .sharded_moe import MOELayer, TopKGate from .experts import Experts -import copy import typing class MoE(torch.nn.Module): + """Initialize an MoE layer. + + Arguments: + hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension. + expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear). + num_experts (int, optional): default=1, the total number of experts per layer. + ep_size (int, optional): default=1, number of ranks in the expert parallel world or group. + k (int, optional): default=1, top-k gating value, only supports k=1 or k=2. + capacity_factor (float, optional): default=1.0, the capacity of the expert at training time. + eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time. + min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor. + use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer. + noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'. + drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity). + use_rts (bool, optional): default=True, whether to use Random Token Selection. + use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). + enable_expert_tensor_parallelism (bool, optional): default=False, whether to use tensor parallelism for experts + """ def __init__(self, hidden_size, expert, @@ -29,37 +44,21 @@ class MoE(torch.nn.Module): noisy_gate_policy: typing.Optional[str] = None, drop_tokens: bool = True, use_rts=True, - use_tutel: bool = False): - """Initialize an MoE layer. - - Arguments: - hidden_size (int): the hidden dimension of the model, importantly this is also the input and output dimension. - expert (torch.nn.Module): the torch module that defines the expert (e.g., MLP, torch.linear). - num_experts (int, optional): default=1, the total number of experts per layer. - ep_size (int, optional): default=1, number of ranks in the expert parallel world or group. - k (int, optional): default=1, top-k gating value, only supports k=1 or k=2. - capacity_factor (float, optional): default=1.0, the capacity of the expert at training time. - eval_capacity_factor (float, optional): default=1.0, the capacity of the expert at eval time. - min_capacity (int, optional): default=4, the minimum capacity per expert regardless of the capacity_factor. - use_residual (bool, optional): default=False, make this MoE layer a Residual MoE (https://arxiv.org/abs/2201.05596) layer. - noisy_gate_policy (str, optional): default=None, noisy gate policy, valid options are 'Jitter', 'RSample' or 'None'. - drop_tokens (bool, optional): default=True, whether to drop tokens - (setting to False is equivalent to infinite capacity). - use_rts (bool, optional): default=True, whether to use Random Token Selection. - use_tutel (bool, optional): default=False, whether to use Tutel optimizations (if installed). - """ + use_tutel: bool = False, + enable_expert_tensor_parallelism: bool = False): super(MoE, self).__init__() self.use_residual = use_residual - self.ep_size = min( - ep_size, - num_experts) # the ep size should be less than the number of experts + self.enable_expert_tensor_parallelism = enable_expert_tensor_parallelism + assert num_experts % ep_size == 0, f"Number of experts ({num_experts}) should be divisible by expert parallel size ({ep_size})" + self.ep_size = ep_size self.expert_group_name = f"ep_size_{self.ep_size}" self.num_experts = num_experts - self.num_local_experts = 1 if num_experts < ep_size else num_experts // ep_size + self.num_local_experts = num_experts // self.ep_size log_dist( - f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {ep_size}', + f'Creating MoE layer with num_experts: {num_experts} | num_local_experts: {self.num_local_experts} | expert_parallel_size: {self.ep_size}', [0]) assert noisy_gate_policy is None or noisy_gate_policy in ['None', 'Jitter', 'RSample'], \ @@ -94,9 +93,12 @@ class MoE(torch.nn.Module): print( f"No existing process group found, creating a new group named: {self.expert_group_name}" ) - if groups.mpu is None: + if (groups.mpu is None) or (not self.enable_expert_tensor_parallelism): + # Condition 1 - no groups.mpu means no tensor parallelism + # Condition 2 - disabling expert tensor parallelism on purpose groups._create_expert_and_data_parallel(self.ep_size) else: + # expert tensor parallelism is enabled groups._create_expert_data_and_model_parallel(self.ep_size, mpu=groups.mpu) # Set the group handle for the MOELayer (deepspeed_moe) object @@ -126,6 +128,6 @@ class MoE(torch.nn.Module): if type(output_mlp) is tuple: output_mlp = output_mlp[0] # Ignore the bias term for now coef = self.coefficient(hidden_states) - coef = torch.nn.functional.softmax(coef, dim=1) + coef = torch.nn.functional.softmax(coef, dim=-1) output = output * coef[..., 0:1] + output_mlp * coef[..., 1:] return output, self.deepspeed_moe.l_aux, self.deepspeed_moe.exp_counts diff --git a/deepspeed/moe/mappings.py b/deepspeed/moe/mappings.py new file mode 100644 index 0000000..38f1630 --- /dev/null +++ b/deepspeed/moe/mappings.py @@ -0,0 +1,108 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +# The file has been adapted from the following Megatron-LM file: +# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/mpu/mappings.py +# Git commit hash: 9dc3c42a84aa656f583703cf8b6b4f79f712b796 +# We retain the following copyright from the original files: + +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import deepspeed + + +def _gather_tokens(input_, dim=0): + """Gather tensors and concatenate them along a dimension""" + mpu = deepspeed.utils.groups.mpu + + input_ = input_.contiguous() + # Size and dimension. + rank = mpu.get_tensor_model_parallel_rank() + + tensor_list = [ + torch.empty_like(input_) + for _ in range(mpu.get_tensor_model_parallel_world_size()) + ] + tensor_list[rank] = input_ + deepspeed.comm.all_gather(tensor_list, + input_, + group=mpu.get_tensor_model_parallel_group()) + + # Note: torch.cat already creates a contiguous tensor. + output = torch.cat(tensor_list, dim=dim).contiguous() + + return output + + +def _drop_tokens(input_, dim=0): + """Divide a tensor among the tensor parallel ranks""" + mpu = deepspeed.utils.groups.mpu + + total_chunks = mpu.get_tensor_model_parallel_world_size() + this_chunk = mpu.get_tensor_model_parallel_rank() + assert input_.shape[dim] % total_chunks == 0, f"input dimension {dim} ({input_.shape[dim]}) is not divisible by tensor parallel world size ({total_chunks})" + chunk_size = input_.shape[dim] // total_chunks + + return torch.narrow(input_, dim, this_chunk * chunk_size, chunk_size) + + +class _GatherTokens(torch.autograd.Function): + """All gather tokens among the tensor parallel ranks""" + @staticmethod + def symbolic(graph, input_, dim): + return _gather_tokens(input_, dim) + + @staticmethod + def forward(ctx, input_, dim): + ctx.dim = dim + return _gather_tokens(input_, dim) + + @staticmethod + def backward(ctx, grad_output): + return _drop_tokens(grad_output, ctx.dim), None + + +class _DropTokens(torch.autograd.Function): + "Divide tokens equally among the tensor parallel ranks" + + @staticmethod + def symbolic(graph, input_, dim): + return _drop_tokens(input_, dim) + + @staticmethod + def forward(ctx, input_, dim): + ctx.dim = dim + return _drop_tokens(input_, dim) + + @staticmethod + def backward(ctx, input_): + return _gather_tokens(input_, ctx.dim), None + + +def gather_tokens(input_, dim=0): + mpu = deepspeed.utils.groups.mpu + if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + # no tensor parallelism for non-experts + return input_ + return _GatherTokens.apply(input_, dim) + + +def drop_tokens(input_, dim=0): + mpu = deepspeed.utils.groups.mpu + if mpu is None or mpu.get_tensor_model_parallel_world_size() == 1: + # no tensor parallelism for non-experts + return input_ + return _DropTokens.apply(input_, dim) diff --git a/deepspeed/moe/sharded_moe.py b/deepspeed/moe/sharded_moe.py index 024de2f..211b212 100644 --- a/deepspeed/moe/sharded_moe.py +++ b/deepspeed/moe/sharded_moe.py @@ -12,17 +12,16 @@ Copyright 2021 The Microsoft DeepSpeed Team # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. -from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer -from deepspeed.utils import logger, log_dist -from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple, Union, cast +from deepspeed.utils.timer import SynchronizedWallClockTimer +from deepspeed.utils import logger +from typing import Callable, Dict, TYPE_CHECKING, Any, Optional, Tuple -import time -from time import perf_counter import torch from torch import Tensor -import torch.distributed as dist -from torch.nn import Module, ModuleList +from torch.nn import Module import torch.nn.functional as F +from deepspeed.utils import groups +from .mappings import drop_tokens, gather_tokens if TYPE_CHECKING: Base = Module[Tensor] @@ -80,12 +79,20 @@ def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: return gumbel(shape) +from deepspeed import comm as dist + +# einsum dimensions: (g)roup, (s)equence, (e)xpert, (m)odel, (c)apacity +# See https://arxiv.org/pdf/2006.16668.pdf for details. + + # Based on https://github.com/pytorch/pytorch/pull/40762 class _AllToAll(torch.autograd.Function): @staticmethod - def forward(ctx: Any, - group: dist.ProcessGroup, - input: Tensor) -> Tensor: # type: ignore + def forward( + ctx: Any, + # TODO: replace with DS process group + group: torch.distributed.ProcessGroup, + input: Tensor) -> Tensor: # type: ignore ctx.group = group input = input.contiguous() output = torch.empty_like(input) @@ -206,7 +213,7 @@ def top1gating(logits: Tensor, # if we don't want to drop any tokens if not drop_tokens: new_capacity = torch.max(exp_counts).to(logits.device) - dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.group.WORLD) + dist.all_reduce(new_capacity, op=dist.ReduceOp.MAX, group=dist.get_world_group()) capacity = new_capacity # Compute l_aux @@ -424,7 +431,7 @@ class TopKGate(Module): if self.wall_clock_breakdown: self.timers('TopKGate').stop() - self.gate_time = self.timers('TopKGate').elapsed(reset=False) * 1000 + self.gate_time = self.timers('TopKGate').elapsed(reset=False) return gate_output @@ -466,13 +473,17 @@ class MOELayer(Base): self.timers = SynchronizedWallClockTimer() self.wall_clock_breakdown = False - self.use_tutel = use_tutel and TUTEL_INSTALLED + self.use_tutel = use_tutel and TUTEL_INSTALLED and gate.k == 1 if self.use_tutel: logger.info('Using Tutel optimizations.') elif use_tutel and not TUTEL_INSTALLED: logger.warning("Tutel optimization requested but not installed. " "Proceeding without Tutel.") + elif use_tutel and TUTEL_INSTALLED and gate.k != 1: + logger.warning( + "To enable Tutel optimization, use top-1 instead of top-2 gate. " + "Proceeding without Tutel.") def _set_ep_group(self, ep_group): self.ep_group = ep_group @@ -511,11 +522,20 @@ class MOELayer(Base): if self.wall_clock_breakdown: self.timers('falltoall').start() + if groups._get_expert_model_parallel_world_size() == 1: + # If the non-expert is tensor-parallel, it will create + # duplicate tokens on the tensor-parallel ranks. + # Since our experts are not tensor-parallel, these duplicates + # need to be dropped to ensure correctness. + # this also doubles up as a communication optimization as we are + # reducing the all-to-all communication volume. + dispatched_input = drop_tokens(dispatched_input, dim=1) + dispatched_input = _AllToAll.apply(self.ep_group, dispatched_input) if self.wall_clock_breakdown: self.timers('falltoall').stop() - self.time_falltoall = self.timers('falltoall').elapsed(reset=False) * 1000 + self.time_falltoall = self.timers('falltoall').elapsed(reset=False) # Re-shape after all-to-all: ecm -> gecm dispatched_input = dispatched_input.reshape(self.ep_size, @@ -532,13 +552,19 @@ class MOELayer(Base): if self.wall_clock_breakdown: self.timers('salltoall').stop() - self.time_salltoall = self.timers('salltoall').elapsed(reset=False) * 1000 + self.time_salltoall = self.timers('salltoall').elapsed(reset=False) # Re-shape back: gecm -> ecm expert_output = expert_output.reshape(self.ep_size * self.num_local_experts, -1, d_model) + if groups._get_expert_model_parallel_world_size() == 1: + # the dropped duplicate tokens need to be gathered on each + # tensor parallel rank again for the tensor-parallel + # non-expert of the next layer. + expert_output = gather_tokens(expert_output, dim=1) + if self.use_tutel: combined_output = self._tutel_dispatcher.decode(expert_output.view(E * C, M)) else: @@ -550,6 +576,6 @@ class MOELayer(Base): if self.wall_clock_breakdown: self.timers('moe').stop() - self.time_moe = self.timers('moe').elapsed(reset=False) * 1000 + self.time_moe = self.timers('moe').elapsed(reset=False) return a diff --git a/deepspeed/moe/utils.py b/deepspeed/moe/utils.py index 0992293..1bf5279 100644 --- a/deepspeed/moe/utils.py +++ b/deepspeed/moe/utils.py @@ -1,12 +1,14 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from typing import List, Tuple, Dict import torch -import deepspeed.utils.groups as groups from .layer import MoE def has_moe_layers(m): has_moe = False num_experts = 0 + for _, module in m.named_modules(): if isinstance(module, MoE): has_moe = True @@ -59,8 +61,9 @@ def split_params_grads_into_shared_and_expert_params( return shared_grads, expert_grads -def split_params_into_different_moe_groups_for_optimizer( - param_groups: Tuple[Dict]) -> Tuple[Dict]: +def split_params_into_different_moe_groups_for_optimizer(param_groups: Tuple[Dict], + max_group_size=178956971 + ) -> Tuple[Dict]: """Split parameters into different MoE groups for optimizer Args: @@ -112,8 +115,32 @@ def split_params_into_different_moe_groups_for_optimizer( param_group['params'] = new_params # Flatten the moe groups - for k, v in group_moe.items(): - for k1, v1 in v.items(): - param_groups.append(v1) + if max_group_size is not None: + for k, v in group_moe.items(): + for k1, v1 in v.items(): + cur_group = [] + all_groups = [] + size_of_cur_group = 0 + for param in v1['params']: + if size_of_cur_group + param.numel() <= max_group_size: + cur_group.append(param) + size_of_cur_group += param.numel() + else: + all_groups.append(cur_group) + cur_group = [param] + size_of_cur_group = param.numel() + if cur_group: + all_groups.append(cur_group) + for group in all_groups: + new_dict = {} + for key, val in v1.items(): + if key != 'params': + new_dict[key] = val + new_dict['params'] = group + param_groups.append(new_dict) + else: + for k, v in group_moe.items(): + for k1, v1 in v.items(): + param_groups.append(v1) return tuple(param_groups) diff --git a/deepspeed/monitor/__init__.py b/deepspeed/monitor/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/monitor/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/monitor/config.py b/deepspeed/monitor/config.py new file mode 100644 index 0000000..09ba7ef --- /dev/null +++ b/deepspeed/monitor/config.py @@ -0,0 +1,87 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" +Copyright (c) Microsoft Corporation +Licensed under the MIT license. +""" + +from pydantic import root_validator +from deepspeed.runtime.config_utils import DeepSpeedConfigModel + + +def get_monitor_config(param_dict): + monitor_dict = { + key: param_dict.get(key, + {}) + for key in ("tensorboard", + "wandb", + "csv_monitor") + } + return DeepSpeedMonitorConfig(**monitor_dict) + + +class TensorBoardConfig(DeepSpeedConfigModel): + """Sets parameters for TensorBoard monitor.""" + + enabled: bool = False + """ Whether logging to Tensorboard is enabled. Requires `tensorboard` package is installed. """ + + output_path: str = "" + """ + Path to where the Tensorboard logs will be written. If not provided, the + output path is set under the training script’s launching path. + """ + + job_name: str = "DeepSpeedJobName" + """ Name for the current job. This will become a new directory inside `output_path`. """ + + +class WandbConfig(DeepSpeedConfigModel): + """Sets parameters for WandB monitor.""" + + enabled: bool = False + """ Whether logging to WandB is enabled. Requires `wandb` package is installed. """ + + group: str = None + """ Name for the WandB group. This can be used to group together runs. """ + + team: str = None + """ Name for the WandB team. """ + + project: str = "deepspeed" + """ Name for the WandB project. """ + + +class CSVConfig(DeepSpeedConfigModel): + """Sets parameters for CSV monitor.""" + + enabled: bool = False + """ Whether logging to local CSV files is enabled. """ + + output_path: str = "" + """ + Path to where the csv files will be written. If not provided, the output + path is set under the training script’s launching path. + """ + + job_name: str = "DeepSpeedJobName" + """ Name for the current job. This will become a new directory inside `output_path`. """ + + +class DeepSpeedMonitorConfig(DeepSpeedConfigModel): + """Sets parameters for various monitoring methods.""" + + tensorboard: TensorBoardConfig = {} + """ TensorBoard monitor, requires `tensorboard` package is installed. """ + + wandb: WandbConfig = {} + """ WandB monitor, requires `wandb` package is installed. """ + + csv_monitor: CSVConfig = {} + """ Local CSV output of monitoring data. """ + @root_validator + def check_enabled(cls, values): + values["enabled"] = False + if (values.get("tensorboard").enabled or values.get("wandb").enabled + or values.get("csv_monitor").enabled): + values["enabled"] = True + return values diff --git a/deepspeed/monitor/csv_monitor.py b/deepspeed/monitor/csv_monitor.py new file mode 100644 index 0000000..7de4fbc --- /dev/null +++ b/deepspeed/monitor/csv_monitor.py @@ -0,0 +1,63 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .monitor import Monitor +import os + +import deepspeed.comm as dist + + +class csvMonitor(Monitor): + def __init__(self, csv_config): + super().__init__(csv_config) + self.filenames = [] + self.enabled = csv_config.enabled + self.output_path = csv_config.output_path + self.job_name = csv_config.job_name + self.log_dir = self.setup_log_dir() + + def setup_log_dir(self, base=os.path.join(os.path.expanduser("~"), "csv_monitor")): + if self.enabled and dist.get_rank() == 0: + if self.output_path is not None: + log_dir = os.path.join(self.output_path, self.job_name) + # NOTE: This code path currently is never used since the default tensorboard_output_path is an empty string and not None. Saving it in case we want this functionality in the future. + else: + if "DLWS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLWS_JOB_ID"] + elif "DLTS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLTS_JOB_ID"] + else: + infra_job_id = "unknown-job-id" + + csv_monitor_dir_name = os.path.join(infra_job_id, "logs") + log_dir = os.path.join(base, csv_monitor_dir_name, self.job_name) + os.makedirs(log_dir, exist_ok=True) + return log_dir + + def write_events(self, event_list): + if self.enabled and dist.get_rank() == 0: + import csv + # We assume each event_list element is a tensorboard-style tuple in the format: (log_name: String, value, step: Int) + for event in event_list: + log_name = event[0] + value = event[1] + step = event[2] + + # Set the header to the log_name + # Need this check because the deepspeed engine currently formats log strings to separate with '/' + if '/' in log_name: + record_splits = log_name.split('/') + header = record_splits[len(record_splits) - 1] + else: + header = log_name + + # sanitize common naming conventions into filename + filename = log_name.replace('/', '_').replace(' ', '_') + fname = self.log_dir + '/' + filename + '.csv' + + # Open file and record event. Insert header if this is the first time writing + with open(fname, 'a+') as csv_monitor_file: + csv_monitor_writer = csv.writer(csv_monitor_file) + if filename not in self.filenames: + self.filenames.append(filename) + csv_monitor_writer.writerow(['step', header]) + csv_monitor_writer.writerow([step, value]) diff --git a/deepspeed/monitor/monitor.py b/deepspeed/monitor/monitor.py new file mode 100644 index 0000000..504c3da --- /dev/null +++ b/deepspeed/monitor/monitor.py @@ -0,0 +1,48 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" + Support different forms of monitoring such as wandb and tensorboard +""" + +from abc import ABC, abstractmethod +import deepspeed.comm as dist + + +class Monitor(ABC): + @abstractmethod + def __init__(self, monitor_config): + self.monitor_config = monitor_config + + @abstractmethod + def write_events(self, event_list): + pass + + +from .wandb import WandbMonitor +from .tensorboard import TensorBoardMonitor +from .csv_monitor import csvMonitor + + +class MonitorMaster(Monitor): + def __init__(self, monitor_config): + super().__init__(monitor_config) + self.tb_monitor = None + self.wandb_monitor = None + self.csv_monitor = None + self.enabled = monitor_config.enabled + + if dist.get_rank() == 0: + if monitor_config.tensorboard.enabled: + self.tb_monitor = TensorBoardMonitor(monitor_config.tensorboard) + if monitor_config.wandb.enabled: + self.wandb_monitor = WandbMonitor(monitor_config.wandb) + if monitor_config.csv_monitor.enabled: + self.csv_monitor = csvMonitor(monitor_config.csv_monitor) + + def write_events(self, event_list): + if dist.get_rank() == 0: + if self.tb_monitor is not None: + self.tb_monitor.write_events(event_list) + if self.wandb_monitor is not None: + self.wandb_monitor.write_events(event_list) + if self.csv_monitor is not None: + self.csv_monitor.write_events(event_list) diff --git a/deepspeed/monitor/tensorboard.py b/deepspeed/monitor/tensorboard.py new file mode 100644 index 0000000..db3a50d --- /dev/null +++ b/deepspeed/monitor/tensorboard.py @@ -0,0 +1,54 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .utils import check_tb_availability +from .monitor import Monitor +import os + +import deepspeed.comm as dist + + +class TensorBoardMonitor(Monitor): + def __init__(self, tensorboard_config): + super().__init__(tensorboard_config) + check_tb_availability() + + self.summary_writer = None + self.enabled = tensorboard_config.enabled + self.output_path = tensorboard_config.output_path + self.job_name = tensorboard_config.job_name + + if self.enabled and dist.get_rank() == 0: + self.get_summary_writer() + + def get_summary_writer(self, + base=os.path.join(os.path.expanduser("~"), + "tensorboard")): + if self.enabled and dist.get_rank() == 0: + from torch.utils.tensorboard import SummaryWriter + if self.output_path is not None: + log_dir = os.path.join(self.output_path, self.job_name) + # NOTE: This code path currently is never used since the default output_path is an empty string and not None. Saving it in case we want this functionality in the future. + else: + if "DLWS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLWS_JOB_ID"] + elif "DLTS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLTS_JOB_ID"] + else: + infra_job_id = "unknown-job-id" + + summary_writer_dir_name = os.path.join(infra_job_id, "logs") + log_dir = os.path.join(base, summary_writer_dir_name, self.output_path) + os.makedirs(log_dir, exist_ok=True) + self.summary_writer = SummaryWriter(log_dir=log_dir) + return self.summary_writer + + def write_events(self, event_list, flush=True): + if self.enabled and self.summary_writer is not None and dist.get_rank() == 0: + for event in event_list: + self.summary_writer.add_scalar(*event) + if flush: + self.summary_writer.flush() + + def flush(self): + if self.enabled and self.summary_writer is not None and dist.get_rank() == 0: + self.summary_writer.flush() diff --git a/deepspeed/monitor/utils.py b/deepspeed/monitor/utils.py new file mode 100644 index 0000000..aa37e8d --- /dev/null +++ b/deepspeed/monitor/utils.py @@ -0,0 +1,21 @@ +'''Copyright The Microsoft DeepSpeed Team''' + + +def check_tb_availability(): + try: + # torch.utils.tensorboard will fail if `tensorboard` is not available, + # see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html + import tensorboard # noqa: F401 + except ImportError: + print('If you want to use tensorboard logging, please `pip install tensorboard`') + raise + + +def check_wandb_availability(): + try: + import wandb # noqa: F401 + except ImportError: + print( + 'If you want to use wandb logging, please `pip install wandb` and follow the instructions at https://docs.wandb.ai/quickstart' + ) + raise diff --git a/deepspeed/monitor/wandb.py b/deepspeed/monitor/wandb.py new file mode 100644 index 0000000..49fc033 --- /dev/null +++ b/deepspeed/monitor/wandb.py @@ -0,0 +1,34 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .utils import check_wandb_availability +from .monitor import Monitor + +import deepspeed.comm as dist + + +class WandbMonitor(Monitor): + def __init__(self, wandb_config): + super().__init__(wandb_config) + check_wandb_availability() + import wandb + + self.enabled = wandb_config.enabled + self.group = wandb_config.group + self.team = wandb_config.team + self.project = wandb_config.project + + if self.enabled and dist.get_rank() == 0: + wandb.init(project=self.project, group=self.group, entity=self.team) + + def log(self, data, step=None, commit=None, sync=None): + if self.enabled and dist.get_rank() == 0: + import wandb + return wandb.log(data, step=step, commit=commit, sync=sync) + + def write_events(self, event_list): + if self.enabled and dist.get_rank() == 0: + for event in event_list: + label = event[0] + value = event[1] + step = event[2] + self.log({label: value}, step=step) diff --git a/deepspeed/nebula/__init__.py b/deepspeed/nebula/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/nebula/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/nebula/config.py b/deepspeed/nebula/config.py new file mode 100644 index 0000000..93b879f --- /dev/null +++ b/deepspeed/nebula/config.py @@ -0,0 +1,54 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" +Copyright (c) Microsoft Corporation +Licensed under the MIT license. +""" + +from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject +from deepspeed.nebula.constants import * + + +class DeepSpeedNebulaConfig(DeepSpeedConfigObject): + def __init__(self, param_dict): + super(DeepSpeedNebulaConfig, self).__init__() + + self.enabled = None + self.persistent_storage_path = None + self.persistent_time_interval = None + self.num_of_version_in_retention = None + self.enable_nebula_load = None + + if NEBULA in param_dict.keys(): + nebula_dict = param_dict[NEBULA] + else: + nebula_dict = {} + + self._initialize(nebula_dict) + + def _initialize(self, nebula_dict): + self.enabled = get_scalar_param(nebula_dict, + NEBULA_ENABLED, + NEBULA_ENABLED_DEFAULT) + + self.load_path = get_scalar_param(nebula_dict, + NEBULA_LOAD_PATH, + NEBULA_LOAD_PATH_DEFAULT) + + self.enable_nebula_load = get_scalar_param(nebula_dict, + NEBULA_ENABLE_NEBULA_LOAD, + NEBULA_ENABLE_NEBULA_LOAD_DEFAULT) + + self.persistent_storage_path = get_scalar_param( + nebula_dict, + NEBULA_PERSISTENT_STORAGE_PATH, + NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT) + + self.persistent_time_interval = get_scalar_param( + nebula_dict, + NEBULA_PERSISTENT_TIME_INTERVAL, + NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT) + + self.num_of_version_in_retention = get_scalar_param( + nebula_dict, + NEBULA_NUM_OF_VERSION_IN_RETENTION, + NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT) diff --git a/deepspeed/nebula/constants.py b/deepspeed/nebula/constants.py new file mode 100644 index 0000000..6ad876a --- /dev/null +++ b/deepspeed/nebula/constants.py @@ -0,0 +1,87 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" +Copyright (c) Microsoft Corporation +Licensed under the MIT license. +""" + +######################################### +# nebula +######################################### +# Nebula. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +NEBULA_FORMAT = ''' +nebula should be enabled as: +"session_params": { + "nebula": { + "enabled": true, + "persistent_storage_path": "/foo/bar", + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true + } +} +''' + +NEBULA = "nebula" + +NEBULA_ENABLED = "enabled" +NEBULA_ENABLED_DEFAULT = False + +# There is a case where customer want to load the checkpoint saved +# by raw torch. Because nebula cannot load torch checkpoint directly +# as they have different folder structures to bring the gap for +# loading(the data are totaly same in bytes for torch and enbula s +# aving). +# In this case, we must disable nebula load to use raw torch load. +# Customer can just set NEBULA_ENABLE_NEBULA_LOAD to False. Then use +# original way of deepspeed to load, i.e. set the value of "--load". +NEBULA_ENABLE_NEBULA_LOAD = "enable_nebula_load" +NEBULA_ENABLE_NEBULA_LOAD_DEFAULT = True + +# When you want to resume the previous checkpoint saved by nebula, +# you can set NEBULA_LOAD_PATH as the parent folder of checkpoint. +# If NEBULA_LOAD_PATH is None, the NEBULA_PERSISTENT_STORAGE_PATH +# will be the default path to load. +NEBULA_LOAD_PATH = "nebula_load_path" +NEBULA_LOAD_PATH_DEFAULT = None + +# Nebula will save the checkpoint under NEBULA_LOAD_PATH in the +# asynchronous way. +NEBULA_PERSISTENT_STORAGE_PATH = "persistent_storage_path" +NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT = None + +# Time interval to trigger the nebula persistence. +NEBULA_PERSISTENT_TIME_INTERVAL = "persistent_time_interval" +NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT = 100 + +# Checkpoint number which will be kept in memory. Let us say, +# if the value is 2. Then we have checkpoints 1 and 2 are ready +# now. When it comes to checkpoint 3, the 1 will be removed if +# 1 has been persisted to disk. +NEBULA_NUM_OF_VERSION_IN_RETENTION = "num_of_version_in_retention" +NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT = 2 + +# Neubla envs +NEBULA_EXPORT_ENVS = [ + 'DLTS_JOB_ID', + 'DLTS_NUM_WORKER', + 'NEBULA_PERSISTENT_STORAGE_PATH', + 'NEBULA_PERSISTENT_TIME_INTERVAL', + 'AML_RUN_ID', + 'AZUREML_RUN_TOKEN', + 'AZUREML_WORKSPACE_SCOPE', + 'AZUREML_EXPERIMENT_SCOPE', + 'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT', + 'AZUREML_RUN_ID', + 'NEBULA_MEMORY_BUFFER_SIZE', + 'AZUREML_PARAMETER_ITPJOB_NAME', + 'FC_TASKROLE_NAME', + 'FC_TASK_INDEX', + 'MASTER_HOST', + 'LOCAL_HOST', + 'AZUREML_BLOB_ACCOUNT_NAME', + 'AZUREML_BLOB_ACCOUNT_KEY' +] + +# ITP env files +DLTS_POD_ENV_PATH = '/dlts-runtime/env/pod.env' diff --git a/deepspeed/ops/__init__.py b/deepspeed/ops/__init__.py old mode 100644 new mode 100755 index 698f959..efec4e6 --- a/deepspeed/ops/__init__.py +++ b/deepspeed/ops/__init__.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from . import adam from . import adagrad from . import lamb diff --git a/deepspeed/ops/adagrad/__init__.py b/deepspeed/ops/adagrad/__init__.py index 3067c5c..a5ab6de 100644 --- a/deepspeed/ops/adagrad/__init__.py +++ b/deepspeed/ops/adagrad/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .cpu_adagrad import DeepSpeedCPUAdagrad diff --git a/deepspeed/ops/adagrad/cpu_adagrad.py b/deepspeed/ops/adagrad/cpu_adagrad.py old mode 100644 new mode 100755 index e3f70a6..07cdaa4 --- a/deepspeed/ops/adagrad/cpu_adagrad.py +++ b/deepspeed/ops/adagrad/cpu_adagrad.py @@ -2,11 +2,8 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' -import math import torch -import time -from pathlib import Path -from ..op_builder import CPUAdagradBuilder +from deepspeed.ops.op_builder import CPUAdagradBuilder from deepspeed.utils.logging import should_log_le diff --git a/deepspeed/ops/adam/__init__.py b/deepspeed/ops/adam/__init__.py old mode 100644 new mode 100755 index 6ab6cbd..111d317 --- a/deepspeed/ops/adam/__init__.py +++ b/deepspeed/ops/adam/__init__.py @@ -1,2 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .cpu_adam import DeepSpeedCPUAdam from .fused_adam import FusedAdam diff --git a/deepspeed/ops/adam/cpu_adam.py b/deepspeed/ops/adam/cpu_adam.py old mode 100644 new mode 100755 index 1c7dd5d..04c5ac3 --- a/deepspeed/ops/adam/cpu_adam.py +++ b/deepspeed/ops/adam/cpu_adam.py @@ -2,12 +2,11 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' -import math import torch -import time -from pathlib import Path -from ..op_builder import CPUAdamBuilder +from cpuinfo import get_cpu_info +from deepspeed.utils import logger from deepspeed.utils.logging import should_log_le +from deepspeed.ops.op_builder import CPUAdamBuilder class DeepSpeedCPUAdam(torch.optim.Optimizer): @@ -76,6 +75,20 @@ class DeepSpeedCPUAdam(torch.optim.Optimizer): amsgrad=amsgrad) super(DeepSpeedCPUAdam, self).__init__(model_params, default_args) + cpu_info = get_cpu_info() + self.cpu_vendor = cpu_info["vendor_id_raw"].lower( + ) if "vendor_id_raw" in cpu_info else "unknown" + if "amd" in self.cpu_vendor: + for group_id, group in enumerate(self.param_groups): + for param_id, p in enumerate(group['params']): + if p.dtype == torch.half: + logger.warning( + "FP16 params for CPUAdam may not work on AMD CPUs") + break + else: + continue + break + self.opt_id = DeepSpeedCPUAdam.optimizer_id DeepSpeedCPUAdam.optimizer_id = DeepSpeedCPUAdam.optimizer_id + 1 self.adam_w_mode = adamw_mode diff --git a/deepspeed/ops/adam/fused_adam.py b/deepspeed/ops/adam/fused_adam.py index 80e5bf5..169fde6 100644 --- a/deepspeed/ops/adam/fused_adam.py +++ b/deepspeed/ops/adam/fused_adam.py @@ -6,11 +6,11 @@ This file is adapted from fused adam in NVIDIA/apex, commit a109f85 ''' import torch -import importlib from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048 * 32) -from ..op_builder import FusedAdamBuilder +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import FusedAdamBuilder class FusedAdam(torch.optim.Optimizer): @@ -72,7 +72,7 @@ class FusedAdam(torch.optim.Optimizer): fused_adam_cuda = FusedAdamBuilder().load() # Skip buffer - self._dummy_overflow_buf = torch.cuda.IntTensor([0]) + self._dummy_overflow_buf = get_accelerator().IntTensor([0]) self.multi_tensor_adam = fused_adam_cuda.multi_tensor_adam def zero_grad(self): @@ -109,12 +109,8 @@ class FusedAdam(torch.optim.Optimizer): bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] - # assume same step across group now to simplify things - # per parameter step can be easily support by making it tensor, or pass list into kernel - if 'step' in group: - group['step'] += 1 - else: - group['step'] = 1 + if 'step' not in group: + group['step'] = 0 # create lists for multi-tensor apply g_16, p_16, m_16, v_16 = [], [], [], [] @@ -131,6 +127,10 @@ class FusedAdam(torch.optim.Optimizer): state = self.state[p] # State initialization if len(state) == 0: + # DeepSpeed ZeRO 3 processes each subgroup a time, so we need to keep tracking step count for each tensor separately. + # While this is not an issue for ZeRO 1 & 2, since they apply a single optimizatin step to the whole param group at the same time. + # In order to keep backward compatibility for the existing checkpoints, we use group['state'] to initialize state['step'] if it exists. + state['step'] = group.get('step', 0) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values @@ -150,6 +150,7 @@ class FusedAdam(torch.optim.Optimizer): raise RuntimeError('FusedAdam only support fp16 and fp32.') if (len(g_16) > 0): + state['step'] += 1 multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, @@ -160,11 +161,12 @@ class FusedAdam(torch.optim.Optimizer): beta1, beta2, group['eps'], - group['step'], + state['step'], self.adam_w_mode, bias_correction, group['weight_decay']) if (len(g_32) > 0): + state['step'] += 1 multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, @@ -175,7 +177,7 @@ class FusedAdam(torch.optim.Optimizer): beta1, beta2, group['eps'], - group['step'], + state['step'], self.adam_w_mode, bias_correction, group['weight_decay']) diff --git a/deepspeed/ops/adam/multi_tensor_apply.py b/deepspeed/ops/adam/multi_tensor_apply.py index 173288e..e837309 100644 --- a/deepspeed/ops/adam/multi_tensor_apply.py +++ b/deepspeed/ops/adam/multi_tensor_apply.py @@ -4,7 +4,6 @@ Copyright 2020 The Microsoft DeepSpeed Team Copyright NVIDIA/apex This file is adapted from NVIDIA/apex, commit a109f85 ''' -import torch class MultiTensorApply(object): diff --git a/deepspeed/ops/aio/__init__.py b/deepspeed/ops/aio/__init__.py old mode 100644 new mode 100755 diff --git a/deepspeed/ops/csrc b/deepspeed/ops/csrc new file mode 120000 index 0000000..3386523 --- /dev/null +++ b/deepspeed/ops/csrc @@ -0,0 +1 @@ +../../csrc \ No newline at end of file diff --git a/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp b/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp deleted file mode 100644 index 4f2a9b6..0000000 --- a/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp +++ /dev/null @@ -1,227 +0,0 @@ -#include "cpu_adagrad.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "custom_cuda_layers.h" - -static std::unordered_map> s_optimizers; - -// C++ interface - -void Adagrad_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) { - float step_size = -1 * _alpha; - __half* grads_cast_h; - __half* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); - } - for (size_t t = rounded_size; t < _param_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > _param_size) copy_size = _param_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; - float momentum = grads[k]; - float variance = _exp_avg_sq[k]; - if (_weight_decay > 0) { grad = param * _weight_decay + grad; } - - variance += grad * grad; - - grad = sqrt(variance); - grad += _eps; - grad = momentum / grad; - param = grad * step_size + param; - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - - if (half_precision) - params_cast_h[k] = (__half)param; - else - _params[k] = param; - // STORE UPDATE TERM TO GRAD'S MEMORY - grads[k] = grad * step_size; - _exp_avg_sq[k] = variance; - } - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - _buf_index = !_buf_index; - } - } - } -} - -void Adagrad_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) - Step_1((_params + rounded_size), - (grads + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int create_adagrad_optimizer(int optimizer_id, - float alpha = 1e-2, - float eps = 1e-8, - float weight_decay = 0, - bool should_log = false) -{ - auto opt = std::make_shared(alpha, eps, weight_decay); - - s_optimizers[optimizer_id] = opt; - - if (should_log) { - std::string avx_type = ""; -#if defined(__AVX512__) - avx_type = "AVX512"; -#else -#if defined(__AVX256__) - avx_type = "AVX2"; -#else - avx_type = "scalar"; -#endif -#endif - - printf("Adagrad Optimizer #%d is created with %s arithmetic capability.\n", - optimizer_id, - avx_type.c_str()); - printf("Config: alpha=%f, weight_decay=%f\n", alpha, weight_decay); - } - - return 0; -} - -void Adagrad_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) - Step_4((_params + rounded_size), - (grads + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int ds_adagrad_step(int optimizer_id, - size_t step, - float lr, - float epsilon, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg_sq) -{ - auto params_c = params.contiguous(); - auto grads_c = grads.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step); - opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.size(0)); - - opt->SynchronizeStreams(); - return 0; -} - -int ds_adagrad_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float epsilon, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params) -{ - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step); - opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_sq_ptr, - params_c.size(0), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int destroy_adagrad_optimizer(int optimizer_id) -{ - s_optimizers.erase(optimizer_id); - - return 0; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)"); - m.def("adagrad_update_copy", - &ds_adagrad_step_plus_copy, - "DeepSpeed CPU Adagrad update and param copy (C++)"); - m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)"); - m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)"); -} diff --git a/deepspeed/ops/csrc/adagrad/cpu_adagrad_hip.cpp b/deepspeed/ops/csrc/adagrad/cpu_adagrad_hip.cpp deleted file mode 100644 index 6bbe9a9..0000000 --- a/deepspeed/ops/csrc/adagrad/cpu_adagrad_hip.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "cpu_adagrad_hip.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "rocblas.h" -#include "hip/hip_runtime.h" -#include "hiprand/hiprand.h" -#include "custom_hip_layers.h" - -static std::unordered_map> s_optimizers; - -// C++ interface - -void Adagrad_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) { - float step_size = -1 * _alpha; - __half* grads_cast_h; - __half* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); - } - for (size_t t = rounded_size; t < _param_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > _param_size) copy_size = _param_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; - float momentum = grads[k]; - float variance = _exp_avg_sq[k]; - if (_weight_decay > 0) { grad = param * _weight_decay + grad; } - - variance += grad * grad; - - grad = sqrt(variance); - grad += _eps; - grad = momentum / grad; - param = grad * step_size + param; - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - - if (half_precision) - params_cast_h[k] = (__half)param; - else - _params[k] = param; - // STORE UPDATE TERM TO GRAD'S MEMORY - grads[k] = grad * step_size; - _exp_avg_sq[k] = variance; - } - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - _buf_index = !_buf_index; - } - } - } -} - -void Adagrad_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) - Step_1((_params + rounded_size), - (grads + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int create_adagrad_optimizer(int optimizer_id, - float alpha = 1e-2, - float eps = 1e-8, - float weight_decay = 0, - bool should_log = false) -{ - auto opt = std::make_shared(alpha, eps, weight_decay); - - s_optimizers[optimizer_id] = opt; - - if (should_log) { - std::string avx_type = ""; -#if defined(__AVX512__) - avx_type = "AVX512"; -#else -#if defined(__AVX256__) - avx_type = "AVX2"; -#else - avx_type = "scalar"; -#endif -#endif - - printf("Adagrad Optimizer #%d is created with %s arithmetic capability.\n", - optimizer_id, - avx_type.c_str()); - printf("Config: alpha=%f, weight_decay=%f\n", alpha, weight_decay); - } - - return 0; -} - -void Adagrad_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>( - &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); -#endif - if (_param_size > rounded_size) - Step_4((_params + rounded_size), - (grads + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int ds_adagrad_step(int optimizer_id, - size_t step, - float lr, - float epsilon, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg_sq) -{ - auto params_c = params.contiguous(); - auto grads_c = grads.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step); - opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.size(0)); - - opt->SynchronizeStreams(); - return 0; -} - -int ds_adagrad_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float epsilon, - float weight_decay, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params) -{ - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step); - opt->update_state(lr, epsilon, weight_decay); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_sq_ptr, - params_c.size(0), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int destroy_adagrad_optimizer(int optimizer_id) -{ - s_optimizers.erase(optimizer_id); - - return 0; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)"); - m.def("adagrad_update_copy", - &ds_adagrad_step_plus_copy, - "DeepSpeed CPU Adagrad update and param copy (C++)"); - m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)"); - m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)"); -} diff --git a/deepspeed/ops/csrc/adam/cpu_adam.cpp b/deepspeed/ops/csrc/adam/cpu_adam.cpp deleted file mode 100644 index 727eec8..0000000 --- a/deepspeed/ops/csrc/adam/cpu_adam.cpp +++ /dev/null @@ -1,292 +0,0 @@ -#include "cpu_adam.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "custom_cuda_layers.h" - -static std::unordered_map> s_optimizers; - -// C++ interface - -void Adam_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) { - float betta1_minus1 = 1 - _betta1; - float betta2_minus1 = 1 - _betta2; - - float step_size = -1 * _alpha / _bias_correction1; - float w_decay = -1 * _alpha * _weight_decay; - __half* grads_cast_h; - __half* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); - } - - for (size_t t = rounded_size; t < _param_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > _param_size) copy_size = _param_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } - -#pragma omp parallel for - for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; - float momentum = _exp_avg[k]; - float variance = _exp_avg_sq[k]; - if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; } - momentum = momentum * _betta1; - momentum = grad * betta1_minus1 + momentum; - - variance = variance * _betta2; - grad = grad * grad; - variance = grad * betta2_minus1 + variance; - - grad = sqrt(variance); - grad = grad * _bias_correction2 + _eps; - grad = momentum / grad; - if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } - param = grad * step_size + param; - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - - if (half_precision) - params_cast_h[k] = (__half)param; - else - _params[k] = param; - _exp_avg[k] = momentum; - _exp_avg_sq[k] = variance; - } - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - } -} - -void Adam_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) - Step_1((_params + rounded_size), - (grads + rounded_size), - (_exp_avg + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int create_adam_optimizer(int optimizer_id, - float alpha = 1e-3, - float betta1 = 0.9, - float betta2 = 0.999, - float eps = 1e-8, - float weight_decay = 0, - bool adamw_mode = true, - bool should_log = false) -{ - auto opt = - std::make_shared(alpha, betta1, betta2, eps, weight_decay, adamw_mode); - - s_optimizers[optimizer_id] = opt; - - if (should_log) { - std::string avx_type = ""; -#if defined(__AVX512__) - avx_type = "AVX512"; -#else -#if defined(__AVX256__) - avx_type = "AVX2"; -#else - avx_type = "scalar"; -#endif -#endif - - printf("Adam Optimizer #%d is created with %s arithmetic capability.\n", - optimizer_id, - avx_type.c_str()); - printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n", - alpha, - betta1, - betta2, - weight_decay, - (int)adamw_mode); - } - - return 0; -} - -void Adam_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) - Step_4((_params + rounded_size), - (grads + rounded_size), - (_exp_avg + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int ds_adam_step(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq) -{ - auto params_c = params.contiguous(); - auto grads_c = grads.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - - // assert(params.options().dtype() == grads.options().dtype()); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, epsilon, weight_decay, bias_correction); - - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.size(0), - nullptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int ds_adam_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params) -{ - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, epsilon, weight_decay, bias_correction); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.size(0), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int destroy_adam_optimizer(int optimizer_id) -{ - s_optimizers.erase(optimizer_id); - - return 0; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)"); - m.def("adam_update_copy", - &ds_adam_step_plus_copy, - "DeepSpeed CPU Adam update and param copy (C++)"); - m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)"); - m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)"); -} diff --git a/deepspeed/ops/csrc/adam/cpu_adam_hip.cpp b/deepspeed/ops/csrc/adam/cpu_adam_hip.cpp deleted file mode 100644 index 6716397..0000000 --- a/deepspeed/ops/csrc/adam/cpu_adam_hip.cpp +++ /dev/null @@ -1,293 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "cpu_adam_hip.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "rocblas.h" -#include "hip/hip_runtime.h" -#include "hiprand/hiprand.h" -#include "custom_hip_layers.h" - -static std::unordered_map> s_optimizers; - -// C++ interface - -void Adam_Optimizer::Step_1(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<1>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) { - float betta1_minus1 = 1 - _betta1; - float betta2_minus1 = 1 - _betta2; - - float step_size = -1 * _alpha / _bias_correction1; - float w_decay = -1 * _alpha * _weight_decay; - __half* grads_cast_h; - __half* params_cast_h; - if (half_precision) { - grads_cast_h = reinterpret_cast<__half*>(grads); - params_cast_h = reinterpret_cast<__half*>(_params); - } - - for (size_t t = rounded_size; t < _param_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > _param_size) copy_size = _param_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); } - -#pragma omp parallel for - for (size_t k = t; k < offset; k++) { - float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; - float param = half_precision ? (float)params_cast_h[k] : _params[k]; - float momentum = _exp_avg[k]; - float variance = _exp_avg_sq[k]; - if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; } - momentum = momentum * _betta1; - momentum = grad * betta1_minus1 + momentum; - - variance = variance * _betta2; - grad = grad * grad; - variance = grad * betta2_minus1 + variance; - - grad = sqrt(variance); - grad = grad * _bias_correction2 + _eps; - grad = momentum / grad; - if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } - param = grad * step_size + param; - if (dev_params) _doubled_buffer[_buf_index][k - t] = param; - - if (half_precision) - params_cast_h[k] = (__half)param; - else - _params[k] = param; - _exp_avg[k] = momentum; - _exp_avg_sq[k] = variance; - } - if (dev_params) { - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - } -} - -void Adam_Optimizer::Step_4(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<4>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) - Step_1((_params + rounded_size), - (grads + rounded_size), - (_exp_avg + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int create_adam_optimizer(int optimizer_id, - float alpha = 1e-3, - float betta1 = 0.9, - float betta2 = 0.999, - float eps = 1e-8, - float weight_decay = 0, - bool adamw_mode = true, - bool should_log = false) -{ - auto opt = - std::make_shared(alpha, betta1, betta2, eps, weight_decay, adamw_mode); - - s_optimizers[optimizer_id] = opt; - - if (should_log) { - std::string avx_type = ""; -#if defined(__AVX512__) - avx_type = "AVX512"; -#else -#if defined(__AVX256__) - avx_type = "AVX2"; -#else - avx_type = "scalar"; -#endif -#endif - - printf("Adam Optimizer #%d is created with %s arithmetic capability.\n", - optimizer_id, - avx_type.c_str()); - printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n", - alpha, - betta1, - betta2, - weight_decay, - (int)adamw_mode); - } - - return 0; -} - -void Adam_Optimizer::Step_8(float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t rounded_size = 0; -#if defined(__AVX512__) or defined(__AVX256__) - Step_AVX<8>(&rounded_size, - _params, - grads, - _exp_avg, - _exp_avg_sq, - _param_size, - dev_params, - half_precision); -#endif - if (_param_size > rounded_size) - Step_4((_params + rounded_size), - (grads + rounded_size), - (_exp_avg + rounded_size), - (_exp_avg_sq + rounded_size), - (_param_size - rounded_size), - (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), - half_precision); -} - -int ds_adam_step(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq) -{ - auto params_c = params.contiguous(); - auto grads_c = grads.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - - // assert(params.options().dtype() == grads.options().dtype()); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, epsilon, weight_decay, bias_correction); - - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.size(0), - nullptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int ds_adam_step_plus_copy(int optimizer_id, - size_t step, - float lr, - float beta1, - float beta2, - float epsilon, - float weight_decay, - bool bias_correction, - torch::Tensor& params, - torch::Tensor& grads, - torch::Tensor& exp_avg, - torch::Tensor& exp_avg_sq, - torch::Tensor& gpu_params) -{ - auto params_c = params.contiguous(); - auto gpu_params_c = gpu_params.contiguous(); - auto exp_avg_c = exp_avg.contiguous(); - auto exp_avg_sq_c = exp_avg_sq.contiguous(); - auto grads_c = grads.contiguous(); - - float* params_ptr = (float*)params_c.data_ptr(); - float* grads_ptr = (float*)grads_c.data_ptr(); - __half* gpu_params_ptr = (__half*)gpu_params_c.data_ptr(); - float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); - float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); - - std::shared_ptr opt = - std::static_pointer_cast(s_optimizers[optimizer_id]); - opt->IncrementStep(step, beta1, beta2); - opt->update_state(lr, epsilon, weight_decay, bias_correction); - opt->Step_8(params_ptr, - grads_ptr, - exp_avg_ptr, - exp_avg_sq_ptr, - params_c.size(0), - gpu_params_ptr, - (params.options().dtype() == at::kHalf)); - - opt->SynchronizeStreams(); - return 0; -} - -int destroy_adam_optimizer(int optimizer_id) -{ - s_optimizers.erase(optimizer_id); - - return 0; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)"); - m.def("adam_update_copy", - &ds_adam_step_plus_copy, - "DeepSpeed CPU Adam update and param copy (C++)"); - m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)"); - m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)"); -} diff --git a/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp b/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp deleted file mode 100644 index b06531c..0000000 --- a/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include - -void multi_tensor_adam_cuda(int chunk_size, - at::Tensor noop_flag, - std::vector> tensor_lists, - const float lr, - const float beta1, - const float beta2, - const float epsilon, - const int step, - const int mode, - const int bias_correction, - const float weight_decay); - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("multi_tensor_adam", - &multi_tensor_adam_cuda, - "Compute and apply gradient update to parameters for Adam optimizer"); -} diff --git a/deepspeed/ops/csrc/adam/multi_tensor_adam.cu b/deepspeed/ops/csrc/adam/multi_tensor_adam.cu deleted file mode 100644 index 3cb9763..0000000 --- a/deepspeed/ops/csrc/adam/multi_tensor_adam.cu +++ /dev/null @@ -1,163 +0,0 @@ -/* Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#include -#include -#include -#include -// Another possibility: -// #include - -#include - -#include "multi_tensor_apply.cuh" -#include "type_shim.h" - -#define BLOCK_SIZE 512 -#define ILP 4 - -typedef enum { - ADAM_MODE_0 = 0, // L2 regularization mode - ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW) -} adamMode_t; - -using MATH_T = float; - -template -struct AdamFunctor { - __device__ __forceinline__ void operator()(int chunk_size, - volatile int* noop_gmem, - TensorListMetadata<4>& tl, - const float beta1, - const float beta2, - const float beta1_correction, - const float beta2_correction, - const float epsilon, - const float lr, - adamMode_t mode, - const float decay) - { - // I'd like this kernel to propagate infs/nans. - // if(*noop_gmem == 1) - // return; - - int tensor_loc = tl.block_to_tensor[blockIdx.x]; - - // potentially use to pass in list of scalar - // int tensor_num = tl.start_tensor_this_launch + tensor_loc; - - int chunk_idx = tl.block_to_chunk[blockIdx.x]; - int n = tl.sizes[tensor_loc]; - - T* g = (T*)tl.addresses[0][tensor_loc]; - g += chunk_idx * chunk_size; - - T* p = (T*)tl.addresses[1][tensor_loc]; - p += chunk_idx * chunk_size; - - T* m = (T*)tl.addresses[2][tensor_loc]; - m += chunk_idx * chunk_size; - - T* v = (T*)tl.addresses[3][tensor_loc]; - v += chunk_idx * chunk_size; - - n -= chunk_idx * chunk_size; - - // see note in multi_tensor_scale_kernel.cu - for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { - MATH_T r_g[ILP]; - MATH_T r_p[ILP]; - MATH_T r_m[ILP]; - MATH_T r_v[ILP]; -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - int i = i_start + threadIdx.x + ii * blockDim.x; - if (i < n && i < chunk_size) { - r_g[ii] = g[i]; - r_p[ii] = p[i]; - r_m[ii] = m[i]; - r_v[ii] = v[i]; - } else { - r_g[ii] = MATH_T(0); - r_p[ii] = MATH_T(0); - r_m[ii] = MATH_T(0); - r_v[ii] = MATH_T(0); - } - } -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - if (mode == ADAM_MODE_0) { // L2 - r_g[ii] = r_g[ii] + (decay * r_p[ii]); - r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; - r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; - MATH_T next_m_unbiased = r_m[ii] / beta1_correction; - MATH_T next_v_unbiased = r_v[ii] / beta2_correction; - MATH_T denom = sqrtf(next_v_unbiased) + epsilon; - MATH_T update = next_m_unbiased / denom; - r_p[ii] = r_p[ii] - (lr * update); - } else { // weight decay - r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; - r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; - MATH_T next_m_unbiased = r_m[ii] / beta1_correction; - MATH_T next_v_unbiased = r_v[ii] / beta2_correction; - MATH_T denom = sqrtf(next_v_unbiased) + epsilon; - MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]); - r_p[ii] = r_p[ii] - (lr * update); - } - } -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - int i = i_start + threadIdx.x + ii * blockDim.x; - if (i < n && i < chunk_size) { - p[i] = r_p[ii]; - m[i] = r_m[ii]; - v[i] = r_v[ii]; - } - } - } - } -}; - -void multi_tensor_adam_cuda(int chunk_size, - at::Tensor noop_flag, - std::vector> tensor_lists, - const float lr, - const float beta1, - const float beta2, - const float epsilon, - const int step, - const int mode, - const int bias_correction, - const float weight_decay) -{ - using namespace at; - - // Handle bias correction mode - float bias_correction1 = 1.0f, bias_correction2 = 1.0f; - if (bias_correction == 1) { - bias_correction1 = 1 - std::pow(beta1, step); - bias_correction2 = 1 - std::pow(beta2, step); - } - - // Assume single type across p,g,m1,m2 now - DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), - 0, - "adam", - multi_tensor_apply<4>(BLOCK_SIZE, - chunk_size, - noop_flag, - tensor_lists, - AdamFunctor(), - beta1, - beta2, - bias_correction1, - bias_correction2, - epsilon, - lr, - (adamMode_t)mode, - weight_decay);) - - AT_CUDA_CHECK(cudaGetLastError()); -} diff --git a/deepspeed/ops/csrc/adam/multi_tensor_adam.hip b/deepspeed/ops/csrc/adam/multi_tensor_adam.hip deleted file mode 100644 index f0b7ced..0000000 --- a/deepspeed/ops/csrc/adam/multi_tensor_adam.hip +++ /dev/null @@ -1,164 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -/* Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#include -#include -#include -#include -// Another possibility: -// #include - -#include - -#include "multi_tensor_apply_hip.cuh" -#include "type_shim_hip.h" - -#define BLOCK_SIZE 512 -#define ILP 4 - -typedef enum { - ADAM_MODE_0 = 0, // L2 regularization mode - ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW) -} adamMode_t; - -using MATH_T = float; - -template -struct AdamFunctor { - __device__ __forceinline__ void operator()(int chunk_size, - volatile int* noop_gmem, - TensorListMetadata<4>& tl, - const float beta1, - const float beta2, - const float beta1_correction, - const float beta2_correction, - const float epsilon, - const float lr, - adamMode_t mode, - const float decay) - { - // I'd like this kernel to propagate infs/nans. - // if(*noop_gmem == 1) - // return; - - int tensor_loc = tl.block_to_tensor[blockIdx.x]; - - // potentially use to pass in list of scalar - // int tensor_num = tl.start_tensor_this_launch + tensor_loc; - - int chunk_idx = tl.block_to_chunk[blockIdx.x]; - int n = tl.sizes[tensor_loc]; - - T* g = (T*)tl.addresses[0][tensor_loc]; - g += chunk_idx * chunk_size; - - T* p = (T*)tl.addresses[1][tensor_loc]; - p += chunk_idx * chunk_size; - - T* m = (T*)tl.addresses[2][tensor_loc]; - m += chunk_idx * chunk_size; - - T* v = (T*)tl.addresses[3][tensor_loc]; - v += chunk_idx * chunk_size; - - n -= chunk_idx * chunk_size; - - // see note in multi_tensor_scale_kernel.cu - for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { - MATH_T r_g[ILP]; - MATH_T r_p[ILP]; - MATH_T r_m[ILP]; - MATH_T r_v[ILP]; -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - int i = i_start + threadIdx.x + ii * blockDim.x; - if (i < n && i < chunk_size) { - r_g[ii] = g[i]; - r_p[ii] = p[i]; - r_m[ii] = m[i]; - r_v[ii] = v[i]; - } else { - r_g[ii] = MATH_T(0); - r_p[ii] = MATH_T(0); - r_m[ii] = MATH_T(0); - r_v[ii] = MATH_T(0); - } - } -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - if (mode == ADAM_MODE_0) { // L2 - r_g[ii] = r_g[ii] + (decay * r_p[ii]); - r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; - r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; - MATH_T next_m_unbiased = r_m[ii] / beta1_correction; - MATH_T next_v_unbiased = r_v[ii] / beta2_correction; - MATH_T denom = sqrtf(next_v_unbiased) + epsilon; - MATH_T update = next_m_unbiased / denom; - r_p[ii] = r_p[ii] - (lr * update); - } else { // weight decay - r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; - r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; - MATH_T next_m_unbiased = r_m[ii] / beta1_correction; - MATH_T next_v_unbiased = r_v[ii] / beta2_correction; - MATH_T denom = sqrtf(next_v_unbiased) + epsilon; - MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]); - r_p[ii] = r_p[ii] - (lr * update); - } - } -#pragma unroll - for (int ii = 0; ii < ILP; ii++) { - int i = i_start + threadIdx.x + ii * blockDim.x; - if (i < n && i < chunk_size) { - p[i] = r_p[ii]; - m[i] = r_m[ii]; - v[i] = r_v[ii]; - } - } - } - } -}; - -void multi_tensor_adam_cuda(int chunk_size, - at::Tensor noop_flag, - std::vector> tensor_lists, - const float lr, - const float beta1, - const float beta2, - const float epsilon, - const int step, - const int mode, - const int bias_correction, - const float weight_decay) -{ - using namespace at; - - // Handle bias correction mode - float bias_correction1 = 1.0f, bias_correction2 = 1.0f; - if (bias_correction == 1) { - bias_correction1 = 1 - ::pow(beta1, step); - bias_correction2 = 1 - ::pow(beta2, step); - } - - // Assume single type across p,g,m1,m2 now - DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), - 0, - "adam", - multi_tensor_apply<4>(BLOCK_SIZE, - chunk_size, - noop_flag, - tensor_lists, - AdamFunctor(), - beta1, - beta2, - bias_correction1, - bias_correction2, - epsilon, - lr, - (adamMode_t)mode, - weight_decay);) - - AT_CUDA_CHECK(hipGetLastError()); -} diff --git a/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh b/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh deleted file mode 100644 index 13af4b7..0000000 --- a/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#include -#include -#include -#include -#include -#include "compat.h" - -#include - -// #include - -// This header is the one-stop shop for all your multi-tensor apply needs. - -// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) -constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; -constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; - -template -struct TensorListMetadata { - void* addresses[n][depth_to_max_tensors[n - 1]]; - int sizes[depth_to_max_tensors[n - 1]]; - unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; - int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. - int start_tensor_this_launch; -}; - -template -__global__ void multi_tensor_apply_kernel(int chunk_size, - volatile int* noop_flag, - T tl, - U callable, - ArgTypes... args) -{ - // Hand the chunk information to the user-supplied functor to process however it likes. - callable(chunk_size, noop_flag, tl, args...); -} - -template -void multi_tensor_apply(int block_size, - int chunk_size, - const at::Tensor& noop_flag, - const std::vector>& tensor_lists, - T callable, - ArgTypes... args) -{ - TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); - int len0 = tensor_lists[0].size(); - TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); - auto ref_device = tensor_lists[0][0].device(); - TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); - for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices - { - TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); - for (int t = 0; t < tensor_lists[l].size(); t++) { - // TODO: Print which tensor fails. - bool contiguous_memory = tensor_lists[l][t].is_contiguous(); -#ifdef VERSION_GE_1_5 - contiguous_memory = (contiguous_memory || - tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); -#endif - TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); - TORCH_CHECK(tensor_lists[l][t].device() == ref_device, - "A tensor was not on the same device as the first tensor"); - TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); - } - } - - int ntensors = tensor_lists[0].size(); - - TensorListMetadata tl; - - const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); - auto stream = at::cuda::getCurrentCUDAStream(); - - tl.start_tensor_this_launch = 0; - int loc_block_info = 0; - int loc_tensor_info = 0; - for (int t = 0; t < ntensors; t++) { - tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); - for (int d = 0; d < depth; d++) - tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); - loc_tensor_info++; - - int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; - - for (int chunk = 0; chunk < chunks_this_tensor; chunk++) { - // std::cout << chunks_this_tensor << std::endl; - tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; - tl.block_to_chunk[loc_block_info] = chunk; - loc_block_info++; - - bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && - chunk == chunks_this_tensor - 1); - bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); - bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); - if (tensors_full || blocks_full || last_chunk) { - // using accscalar_t = acc_type; - multi_tensor_apply_kernel<<>>( - chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); - - AT_CUDA_CHECK(cudaGetLastError()); - - // Reset. The control flow possibilities here make my brain hurt. - loc_block_info = 0; - if (chunk == chunks_this_tensor - 1) { - // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << - // std::endl; - loc_tensor_info = 0; - tl.start_tensor_this_launch = t + 1; - } else { - // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << - // std::endl; - tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; - for (int d = 0; d < depth; d++) - tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; - loc_tensor_info = 1; - tl.start_tensor_this_launch = t; - } - } - } - } -} diff --git a/deepspeed/ops/csrc/adam/multi_tensor_apply_hip.cuh b/deepspeed/ops/csrc/adam/multi_tensor_apply_hip.cuh deleted file mode 100644 index 09bc997..0000000 --- a/deepspeed/ops/csrc/adam/multi_tensor_apply_hip.cuh +++ /dev/null @@ -1,129 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -/* Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#include -#include -#include -#include -#include -#include "compat.h" - -#include - -// #include - -// This header is the one-stop shop for all your multi-tensor apply needs. - -// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) -constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; -constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; - -template -struct TensorListMetadata { - void* addresses[n][depth_to_max_tensors[n - 1]]; - int sizes[depth_to_max_tensors[n - 1]]; - unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; - int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. - int start_tensor_this_launch; -}; - -template -__global__ void multi_tensor_apply_kernel(int chunk_size, - volatile int* noop_flag, - T tl, - U callable, - ArgTypes... args) -{ - // Hand the chunk information to the user-supplied functor to process however it likes. - callable(chunk_size, noop_flag, tl, args...); -} - -template -void multi_tensor_apply(int block_size, - int chunk_size, - const at::Tensor& noop_flag, - const std::vector>& tensor_lists, - T callable, - ArgTypes... args) -{ - TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); - int len0 = tensor_lists[0].size(); - TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); - auto ref_device = tensor_lists[0][0].device(); - TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); - for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices - { - TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); - for (int t = 0; t < tensor_lists[l].size(); t++) { - // TODO: Print which tensor fails. - bool contiguous_memory = tensor_lists[l][t].is_contiguous(); -#ifdef VERSION_GE_1_5 - contiguous_memory = (contiguous_memory || - tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); -#endif - TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); - TORCH_CHECK(tensor_lists[l][t].device() == ref_device, - "A tensor was not on the same device as the first tensor"); - TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); - } - } - - int ntensors = tensor_lists[0].size(); - - TensorListMetadata tl; - - const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(tensor_lists[0][0])); - auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); - - tl.start_tensor_this_launch = 0; - int loc_block_info = 0; - int loc_tensor_info = 0; - for (int t = 0; t < ntensors; t++) { - tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); - for (int d = 0; d < depth; d++) - tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); - loc_tensor_info++; - - int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; - - for (int chunk = 0; chunk < chunks_this_tensor; chunk++) { - // std::cout << chunks_this_tensor << std::endl; - tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; - tl.block_to_chunk[loc_block_info] = chunk; - loc_block_info++; - - bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && - chunk == chunks_this_tensor - 1); - bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); - bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); - if (tensors_full || blocks_full || last_chunk) { - // using accscalar_t = acc_type; - hipLaunchKernelGGL(( multi_tensor_apply_kernel), dim3(loc_block_info), dim3(block_size), 0, stream, - chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); - - AT_CUDA_CHECK(hipGetLastError()); - - // Reset. The control flow possibilities here make my brain hurt. - loc_block_info = 0; - if (chunk == chunks_this_tensor - 1) { - // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << - // std::endl; - loc_tensor_info = 0; - tl.start_tensor_this_launch = t + 1; - } else { - // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << - // std::endl; - tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; - for (int d = 0; d < depth; d++) - tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; - loc_tensor_info = 1; - tl.start_tensor_this_launch = t; - } - } - } - } -} diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp b/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp deleted file mode 100644 index 9e405d8..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp +++ /dev/null @@ -1,333 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "deepspeed_aio_common.h" - -using namespace std; -using namespace std::chrono; - -#define DEBUG_DS_AIO_PERF 0 -#define DEBUG_DS_AIO_SUBMIT_PERF 0 - -static const std::string c_library_name = "deepspeed_aio"; - -static void _report_aio_statistics(const char* tag, - const std::vector>& latencies) - __attribute__((unused)); - -static void _report_aio_statistics(const char* tag, - const std::vector>& latencies) -{ - std::vector lat_usec; - for (auto& lat : latencies) { lat_usec.push_back(lat.count() * 1e6); } - const auto min_lat = *(std::min_element(lat_usec.begin(), lat_usec.end())); - const auto max_lat = *(std::max_element(lat_usec.begin(), lat_usec.end())); - const auto avg_lat = std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); - - std::cout << c_library_name << ": latency statistics(usec) " << tag - << " min/max/avg = " << min_lat << " " << max_lat << " " << avg_lat << std::endl; -} - -static void _get_aio_latencies(std::vector>& raw_latencies, - struct deepspeed_aio_latency_t& summary_latencies) -{ - std::vector lat_usec; - for (auto& lat : raw_latencies) { lat_usec.push_back(lat.count() * 1e6); } - summary_latencies._min_usec = *(std::min_element(lat_usec.begin(), lat_usec.end())); - summary_latencies._max_usec = *(std::max_element(lat_usec.begin(), lat_usec.end())); - summary_latencies._avg_usec = - std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); -} - -static void _do_io_submit_singles(const long long int n_iocbs, - const long long int iocb_index, - std::unique_ptr& aio_ctxt, - std::vector>& submit_times) -{ - for (auto i = 0; i < n_iocbs; ++i) { - const auto st = std::chrono::high_resolution_clock::now(); - const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, 1, aio_ctxt->_iocbs.data() + i); - submit_times.push_back(std::chrono::high_resolution_clock::now() - st); -#if DEBUG_DS_AIO_SUBMIT_PERF - printf("submit(usec) %f io_index=%lld buf=%p len=%lu off=%llu \n", - submit_times.back().count() * 1e6, - iocb_index, - aio_ctxt->_iocbs[i]->u.c.buf, - aio_ctxt->_iocbs[i]->u.c.nbytes, - aio_ctxt->_iocbs[i]->u.c.offset); -#endif - assert(submit_ret > 0); - } -} - -static void _do_io_submit_block(const long long int n_iocbs, - const long long int iocb_index, - std::unique_ptr& aio_ctxt, - std::vector>& submit_times) -{ - const auto st = std::chrono::high_resolution_clock::now(); - const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, n_iocbs, aio_ctxt->_iocbs.data()); - submit_times.push_back(std::chrono::high_resolution_clock::now() - st); -#if DEBUG_DS_AIO_SUBMIT_PERF - printf("submit(usec) %f io_index=%lld nr=%lld buf=%p len=%lu off=%llu \n", - submit_times.back().count() * 1e6, - iocb_index, - n_iocbs, - aio_ctxt->_iocbs[0]->u.c.buf, - aio_ctxt->_iocbs[0]->u.c.nbytes, - aio_ctxt->_iocbs[0]->u.c.offset); -#endif - assert(submit_ret > 0); -} - -static int _do_io_complete(const long long int min_completes, - const long long int max_completes, - std::unique_ptr& aio_ctxt, - std::vector>& reap_times) -{ - const auto start_time = std::chrono::high_resolution_clock::now(); - const auto n_completes = io_getevents( - aio_ctxt->_io_ctxt, min_completes, max_completes, aio_ctxt->_io_events.data(), nullptr); - reap_times.push_back(std::chrono::high_resolution_clock::now() - start_time); - - assert(n_completes >= min_completes); - return n_completes; -} - -void do_aio_operation_sequential(const bool read_op, - std::unique_ptr& aio_ctxt, - std::unique_ptr& xfer_ctxt, - deepspeed_aio_config_t* config, - deepspeed_aio_perf_t* perf) -{ - struct io_prep_context prep_ctxt(read_op, xfer_ctxt, aio_ctxt->_block_size, &aio_ctxt->_iocbs); - - const auto num_io_blocks = static_cast( - ceil(static_cast(xfer_ctxt->_num_bytes) / aio_ctxt->_block_size)); -#if DEBUG_DS_AIO_PERF - const auto io_op_name = std::string(read_op ? "read" : "write"); - std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes - << " bytes with " << num_io_blocks << " io blocks" << std::endl; -#endif - - std::vector> submit_times; - std::vector> reap_times; - const auto max_queue_bytes = - static_cast(aio_ctxt->_queue_depth * aio_ctxt->_block_size); - - auto start = std::chrono::high_resolution_clock::now(); - for (long long iocb_index = 0; iocb_index < num_io_blocks; - iocb_index += aio_ctxt->_queue_depth) { - const auto start_offset = iocb_index * aio_ctxt->_block_size; - const auto start_buffer = (char*)xfer_ctxt->_mem_buffer + start_offset; - const auto n_iocbs = - min(static_cast(aio_ctxt->_queue_depth), (num_io_blocks - iocb_index)); - const auto num_bytes = min(max_queue_bytes, (xfer_ctxt->_num_bytes - start_offset)); - prep_ctxt.prep_iocbs(n_iocbs, num_bytes, start_buffer, start_offset); - - if (config->_single_submit) { - _do_io_submit_singles(n_iocbs, iocb_index, aio_ctxt, submit_times); - } else { - _do_io_submit_block(n_iocbs, iocb_index, aio_ctxt, submit_times); - } - - _do_io_complete(n_iocbs, n_iocbs, aio_ctxt, reap_times); - } - const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; - - if (perf) { - _get_aio_latencies(submit_times, perf->_submit); - _get_aio_latencies(reap_times, perf->_complete); - perf->_e2e_usec = elapsed.count() * 1e6; - perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); - } - -#if DEBUG_DS_AIO_PERF - _report_aio_statistics("submit", submit_times); - _report_aio_statistics("complete", reap_times); -#endif - -#if DEBUG_DS_AIO_PERF - std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 - << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; -#endif - -#if DEBUG_DS_AIO_PERF - std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes - << " bytes " << std::endl; -#endif -} - -void do_aio_operation_overlap(const bool read_op, - std::unique_ptr& aio_ctxt, - std::unique_ptr& xfer_ctxt, - deepspeed_aio_config_t* config, - deepspeed_aio_perf_t* perf) -{ - struct io_prep_generator io_gen(read_op, xfer_ctxt, aio_ctxt->_block_size); - -#if DEBUG_DS_AIO_PERF - const auto io_op_name = std::string(read_op ? "read" : "write"); - std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes - << " bytes with " << io_gen._num_io_blocks << " io blocks" << std::endl; -#endif - - std::vector> submit_times; - std::vector> reap_times; - - auto request_iocbs = aio_ctxt->_queue_depth; - auto n_pending_iocbs = 0; - const auto min_completes = 1; - auto start = std::chrono::high_resolution_clock::now(); - while (true) { - const auto n_iocbs = io_gen.prep_iocbs(request_iocbs - n_pending_iocbs, &aio_ctxt->_iocbs); - if (n_iocbs > 0) { - if (config->_single_submit) { - _do_io_submit_singles( - n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); - } else { - _do_io_submit_block( - n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); - } - } - - n_pending_iocbs += n_iocbs; - assert(n_pending_iocbs <= aio_ctxt->_queue_depth); - - if (n_pending_iocbs == 0) { break; } - - const auto n_complete = - _do_io_complete(min_completes, n_pending_iocbs, aio_ctxt, reap_times); - n_pending_iocbs -= n_complete; - } - - const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; - - if (perf) { - _get_aio_latencies(submit_times, perf->_submit); - _get_aio_latencies(reap_times, perf->_complete); - perf->_e2e_usec = elapsed.count() * 1e6; - perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); - } - -#if DEBUG_DS_AIO_PERF - _report_aio_statistics("submit", submit_times); - _report_aio_statistics("complete", reap_times); -#endif - -#if DEBUG_DS_AIO_PERF - std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 - << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; -#endif - -#if DEBUG_DS_AIO_PERF - std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes - << " bytes " << std::endl; -#endif -} - -void report_file_error(const char* filename, const std::string file_op, const int error_code) -{ - std::string err_msg = file_op + std::string(" failed on ") + std::string(filename) + - " error = " + std::to_string(error_code); - std::cerr << c_library_name << ": " << err_msg << std::endl; -} - -int open_file(const char* filename, const bool read_op) -{ - const int flags = read_op ? (O_RDONLY | __O_DIRECT) : (O_WRONLY | O_CREAT | __O_DIRECT); - const int mode = 0600; - const auto fd = open(filename, flags, mode); - if (fd == -1) { - const auto error_code = errno; - const auto error_msg = read_op ? " open for read " : " open for write "; - report_file_error(filename, error_msg, error_code); - return -1; - } - return fd; -} - -int regular_read(const char* filename, std::vector& buffer) -{ - long long int num_bytes; - const auto f_size = get_file_size(filename, num_bytes); - assert(f_size != -1); - buffer.resize(num_bytes); - const auto fd = open(filename, O_RDONLY, 0600); - assert(fd != -1); - long long int read_bytes = 0; - auto r = 0; - do { - const auto buffer_ptr = buffer.data() + read_bytes; - const auto bytes_to_read = num_bytes - read_bytes; - r = read(fd, buffer_ptr, bytes_to_read); - read_bytes += r; - } while (r > 0); - - if (read_bytes != num_bytes) { - std::cerr << "read error " - << " read_bytes (read) = " << read_bytes << " num_bytes (fstat) = " << num_bytes - << std::endl; - } - assert(read_bytes == num_bytes); - close(fd); - return 0; -} - -static bool _validate_buffer(const char* filename, void* aio_buffer, const long long int num_bytes) -{ - std::vector regular_buffer; - const auto reg_ret = regular_read(filename, regular_buffer); - assert(0 == reg_ret); - std::cout << "regular read of " << filename << " returned " << regular_buffer.size() << " bytes" - << std::endl; - - if (static_cast(regular_buffer.size()) != num_bytes) { return false; } - - return (0 == memcmp(aio_buffer, regular_buffer.data(), regular_buffer.size())); -} - -bool validate_aio_operation(const bool read_op, - const char* filename, - void* aio_buffer, - const long long int num_bytes) -{ - const auto msg_suffix = std::string("deepspeed_aio_") + - std::string(read_op ? "read()" : "write()") + - std::string("using read()"); - - if (false == _validate_buffer(filename, aio_buffer, num_bytes)) { - std::cout << "Fail: correctness of " << msg_suffix << std::endl; - return false; - } - - std::cout << "Pass: correctness of " << msg_suffix << std::endl; - return true; -} diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h b/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h deleted file mode 100644 index cc62d33..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include -#include - -using namespace std; - -void do_aio_operation_sequential(const bool read_op, - std::unique_ptr& aio_ctxt, - std::unique_ptr& xfer_ctxt, - deepspeed_aio_config_t* config, - deepspeed_aio_perf_t* perf); - -void do_aio_operation_overlap(const bool read_op, - std::unique_ptr& aio_ctxt, - std::unique_ptr& xfer_ctxt, - deepspeed_aio_config_t* config, - deepspeed_aio_perf_t* perf); - -int open_file(const char* filename, const bool read_op); - -void report_file_error(const char* filename, const std::string file_op, const int error_code); - -int regular_read(const char* filename, std::vector& buffer); - -bool validate_aio_operation(const bool read_op, - const char* filename, - void* aio_buffer, - const long long int num_bytes); diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp b/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp deleted file mode 100644 index e5811bb..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include - -#include "deepspeed_aio_utils.h" - -using namespace std; - -const int c_block_size = 128 * 1024; -const int c_io_queue_depth = 8; - -deepspeed_aio_config_t::deepspeed_aio_config_t() - : _block_size(c_block_size), - _queue_depth(c_io_queue_depth), - _single_submit(false), - _overlap_events(false), - _lock_memory(false) -{ -} - -deepspeed_aio_config_t::deepspeed_aio_config_t(const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool lock_memory) - : _block_size(block_size), - _queue_depth(queue_depth), - _single_submit(single_submit), - _overlap_events(overlap_events), - _lock_memory(lock_memory) -{ -} - -void deepspeed_aio_latency_t::dump(const std::string tag) -{ - std::cout << tag << _min_usec << " " << _max_usec << " " << _avg_usec << " " << std::endl; -} - -void deepspeed_aio_latency_t::accumulate(const struct deepspeed_aio_latency_t& other) -{ - _min_usec += other._min_usec; - _max_usec += other._max_usec; - _avg_usec += other._avg_usec; -} - -void deepspeed_aio_latency_t::scale(const float scaler) -{ - _min_usec *= scaler; - _max_usec *= scaler; - _avg_usec *= scaler; -} - -aio_context::aio_context(const int block_size, const int queue_depth) -{ - _block_size = block_size; - _queue_depth = queue_depth; - for (auto i = 0; i < queue_depth; ++i) { - _iocbs.push_back((struct iocb*)calloc(1, sizeof(struct iocb))); - } - _io_events.resize(queue_depth); - io_queue_init(queue_depth, &_io_ctxt); -} - -aio_context::~aio_context() -{ - for (auto& iocb : _iocbs) { free(iocb); } - _io_events.resize(0); - io_queue_release(_io_ctxt); -} diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h b/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h deleted file mode 100644 index be3b352..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include - -#include -#include - -using namespace std; - -struct deepspeed_aio_latency_t { - double _min_usec; - double _max_usec; - double _avg_usec; - - void dump(const std::string tag); - void accumulate(const deepspeed_aio_latency_t&); - void scale(const float value); -}; - -struct deepspeed_aio_perf_t { - deepspeed_aio_latency_t _submit; - deepspeed_aio_latency_t _complete; - double _e2e_usec; - double _e2e_rate_GB; -}; - -struct deepspeed_aio_config_t { - const int _block_size; - const int _queue_depth; - const bool _single_submit; - const bool _overlap_events; - const bool _lock_memory; - - deepspeed_aio_config_t(); - deepspeed_aio_config_t(const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool lock_memory); -}; - -struct aio_context { - io_context_t _io_ctxt; - std::vector _io_events; - std::vector _iocbs; - int _block_size; - int _queue_depth; - - aio_context(const int block_size, const int queue_depth); - ~aio_context(); -}; diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp b/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp deleted file mode 100644 index 200c703..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include - -#include "deepspeed_aio_utils.h" - -using namespace std; - -const int c_block_size = 128 * 1024; -const int c_io_queue_depth = 8; - -io_xfer_ctxt::io_xfer_ctxt(const int fd, - const long long int file_offset, - const long long int num_bytes, - const void* buffer) - : _fd(fd), _base_offset(file_offset), _mem_buffer(buffer), _num_bytes(num_bytes) -{ -} - -io_prep_context::io_prep_context(const bool read_op, - const std::unique_ptr& xfer_ctxt, - const size_t block_size, - const std::vector* iocbs) - : _read_op(read_op), _xfer_ctxt(xfer_ctxt), _block_size(block_size), _iocbs(iocbs) -{ -} - -void io_prep_context::prep_iocbs(const int n_iocbs, - const size_t num_bytes, - const void* start_buffer, - const long long int start_offset) -{ - assert(static_cast(n_iocbs) <= _iocbs->size()); - for (auto i = 0; i < n_iocbs; ++i) { - const auto shift = i * _block_size; - const auto xfer_buffer = (char*)start_buffer + _xfer_ctxt->_base_offset + shift; - const auto xfer_offset = _xfer_ctxt->_base_offset + start_offset + shift; - auto byte_count = _block_size; - if ((shift + _block_size) > num_bytes) { byte_count = num_bytes - shift; } - - if (_read_op) { - io_prep_pread(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset); - } else { - io_prep_pwrite(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset); - } - } -} - -io_prep_generator::io_prep_generator(const bool read_op, - const std::unique_ptr& xfer_ctxt, - const size_t block_size) - : _read_op(read_op), - _xfer_ctxt(xfer_ctxt), - _block_size(block_size), - _remaining_bytes(xfer_ctxt->_num_bytes), - _next_iocb_index(0) -{ - _num_io_blocks = - static_cast(ceil(static_cast(xfer_ctxt->_num_bytes) / block_size)); - _remaining_io_blocks = _num_io_blocks; -} - -int io_prep_generator::prep_iocbs(const int n_iocbs, std::vector* iocbs) -{ - if ((_remaining_bytes) == 0 || (_remaining_io_blocks == 0)) { - assert(static_cast(_remaining_bytes) == _remaining_io_blocks); - return 0; - } - - assert(static_cast(n_iocbs) <= iocbs->size()); - - auto actual_n_iocbs = min(static_cast(n_iocbs), _remaining_io_blocks); - for (auto i = 0; i < actual_n_iocbs; ++i, ++_next_iocb_index) { - const auto xfer_offset = _xfer_ctxt->_base_offset + (_next_iocb_index * _block_size); - const auto xfer_buffer = (char*)_xfer_ctxt->_mem_buffer + xfer_offset; - const auto num_bytes = min(static_cast(_block_size), _remaining_bytes); - - if (_read_op) { - io_prep_pread(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset); - } else { - io_prep_pwrite(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset); - } - _remaining_bytes -= num_bytes; - } - _remaining_io_blocks -= actual_n_iocbs; - - return actual_n_iocbs; -} - -int get_file_size(const char* filename, long long int& size) -{ - struct stat st; - if (stat(filename, &st) == -1) { return -1; } - size = st.st_size; - return 0; -} - -void* ds_page_aligned_alloc(const size_t size, const bool lock) -{ - void* ptr; - int retval; - - retval = posix_memalign(&ptr, (size_t)sysconf(_SC_PAGESIZE), size); - if (retval) { return nullptr; } - - if (lock == false) { return ptr; } - - auto mlock_ret = mlock(ptr, size); - if (mlock_ret != 0) { - auto mlock_error = errno; - printf("mlock failed with %d %s\n", mlock_error, strerror(mlock_error)); - - free(ptr); - return nullptr; - } - - return ptr; -} diff --git a/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h b/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h deleted file mode 100644 index 6c59527..0000000 --- a/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#pragma once - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -struct io_xfer_ctxt { - const int _fd; - const long long int _base_offset; - const void* _mem_buffer; - const long long int _num_bytes; - - io_xfer_ctxt(const int fd, - const long long int file_offset, - const long long int num_bytes, - const void* buffer); -}; - -struct io_prep_context { - const bool _read_op; - const std::unique_ptr& _xfer_ctxt; - const size_t _block_size; - const std::vector* _iocbs; - - io_prep_context(const bool read_op, - const std::unique_ptr& xfer_ctxt, - const size_t block_size, - const std::vector* iocbs); - - void prep_iocbs(const int n_iocbs, - const size_t num_bytes, - const void* start_buffer, - const long long int start_offset); -}; - -struct io_prep_generator { - const bool _read_op; - const std::unique_ptr& _xfer_ctxt; - const size_t _block_size; - - long long int _remaining_bytes; - long long int _num_io_blocks; - long long int _remaining_io_blocks; - long long int _next_iocb_index; - - io_prep_generator(const bool read_op, - const std::unique_ptr& xfer_ctxt, - const size_t block_size); - - int prep_iocbs(const int n_iocbs, std::vector* iocbs); -}; - -void* ds_page_aligned_alloc(const size_t size, const bool lock = false); - -int get_file_size(const char* filename, long long int& size); diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp b/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp deleted file mode 100644 index a2670fb..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include "deepspeed_aio_thread.h" - -using namespace std; - -io_op_desc_t::io_op_desc_t(const bool read_op, - const torch::Tensor& buffer, - const int fd, - const char* filename, - const long long int num_bytes, - const bool validate) - : _read_op(read_op), - _buffer(buffer), - _fd(fd), - _filename(filename), - _num_bytes(num_bytes), - _validate(validate) -{ - _cpu_buffer = _buffer.is_cuda() ? _buffer.to(torch::kCPU).pin_memory() : _buffer; - _contiguous_buffer = _cpu_buffer.contiguous(); -} - -char* io_op_desc_t::data_ptr() const { return (char*)_contiguous_buffer.data_ptr(); } - -void io_op_desc_t::fini() -{ - if (_read_op && _buffer.is_cuda()) { _buffer.copy_(_cpu_buffer.to(torch::kCUDA)); } -} - -deepspeed_aio_thread_t::deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config) - : _tid(tid), - _aio_config(aio_config), - _aio_ctxt(new aio_context(aio_config._block_size, aio_config._queue_depth)), - _time_to_exit(false) -{ -} - -deepspeed_aio_thread_t::~deepspeed_aio_thread_t() {} - -void deepspeed_aio_thread_t::run() -{ - while (true) { - std::shared_ptr next_io_op = nullptr; - - { - std::unique_lock lock(_work_sync._mutex); - _work_sync._cond_var.wait(lock, - [this] { return (!_work_queue.empty() || _time_to_exit); }); - if (!_work_queue.empty()) { - next_io_op = _work_queue.front(); - _work_queue.pop(); - } - } - - if (next_io_op) { - const auto base_offset = next_io_op->_num_bytes * _tid; - - std::unique_ptr xfer_ctxt(new io_xfer_ctxt( - next_io_op->_fd, base_offset, next_io_op->_num_bytes, next_io_op->data_ptr())); - - if (_aio_config._overlap_events) { - do_aio_operation_overlap( - next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } else { - do_aio_operation_sequential( - next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } - - { - std::lock_guard lock(_complete_sync._mutex); - _complete_queue.push(next_io_op); - } - _complete_sync._cond_var.notify_one(); - } - - if (_time_to_exit) { break; } - } -} diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h b/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h deleted file mode 100644 index d1cfcab..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include -#include "deepspeed_py_aio.h" - -struct io_op_desc_t { - const bool _read_op; - torch::Tensor _buffer; - int _fd; - const std::string _filename; - const long long int _num_bytes; - torch::Tensor _cpu_buffer; - torch::Tensor _contiguous_buffer; - const bool _validate; - - io_op_desc_t(const bool read_op, - const torch::Tensor& buffer, - const int fd, - const char* filename, - const long long int num_bytes, - const bool validate); - - char* data_ptr() const; - void fini(); -}; - -struct thread_sync_t { - std::mutex _mutex; - std::condition_variable _cond_var; -}; - -struct deepspeed_aio_thread_t { - const int _tid; - deepspeed_aio_config_t& _aio_config; - - std::unique_ptr _aio_ctxt; - std::queue> _work_queue; - std::queue> _complete_queue; - - bool _time_to_exit; - - struct thread_sync_t _work_sync; - struct thread_sync_t _complete_sync; - - deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config); - - ~deepspeed_aio_thread_t(); - - void run(); -}; diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp deleted file mode 100644 index 49ff1f2..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp +++ /dev/null @@ -1,121 +0,0 @@ - -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "deepspeed_py_aio.h" - -using namespace std; -using namespace std::chrono; - -#define DEBUG_DS_AIO_READ 0 -#define DEBUG_DS_AIO_WRITE 0 - -static const std::string c_library_name = "deepspeed_aio"; - -int deepspeed_py_aio_write(const torch::Tensor& buffer, - const char* filename, - const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool validate) -{ - const auto start_time = std::chrono::high_resolution_clock::now(); - deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false); - - const auto fd = open_file(filename, false); - if (fd == -1) { return -1; } - - auto write_buffer = (char*)buffer.data_ptr(); - const auto num_write_bytes = static_cast(buffer.nbytes()); - std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer)); - std::unique_ptr aio_ctxt(new aio_context(config._block_size, config._queue_depth)); - - if (config._overlap_events) { - do_aio_operation_overlap(false, aio_ctxt, xfer_ctxt, &config, nullptr); - } else { - do_aio_operation_sequential(false, aio_ctxt, xfer_ctxt, &config, nullptr); - } - const std::chrono::duration aio_time = - std::chrono::high_resolution_clock::now() - start_time; - - close(fd); - - if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); } - - const std::chrono::duration fn_time = - std::chrono::high_resolution_clock::now() - start_time; - std::cout << "Elapsed time(usec): " - << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 - << std::endl; - return 0; -} - -int deepspeed_py_aio_read(torch::Tensor& buffer, - const char* filename, - const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool validate) -{ - const auto start_time = std::chrono::high_resolution_clock::now(); - long long num_file_bytes; - if (-1 == get_file_size(filename, num_file_bytes)) { - const auto error_code = errno; - report_file_error(filename, " fstat for read", error_code); - return -1; - } - - deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false); - const auto fd = open_file(filename, true); - if (fd == -1) { return -1; } - - auto read_buffer = (char*)buffer.data_ptr(); - assert(static_cast(buffer.nbytes()) == num_file_bytes); - - std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer)); - std::unique_ptr aio_ctxt(new aio_context(config._block_size, config._queue_depth)); - - if (config._overlap_events) { - do_aio_operation_overlap(true, aio_ctxt, xfer_ctxt, &config, nullptr); - } else { - do_aio_operation_sequential(true, aio_ctxt, xfer_ctxt, &config, nullptr); - } - const std::chrono::duration aio_time = - std::chrono::high_resolution_clock::now() - start_time; - - close(fd); - - if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); } - - const std::chrono::duration fn_time = - std::chrono::high_resolution_clock::now() - start_time; - std::cout << "Elapsed time(usec): " - << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 - << std::endl; - return 0; -} diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h deleted file mode 100644 index 230d88d..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h +++ /dev/null @@ -1,27 +0,0 @@ - -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include - -int deepspeed_py_aio_write(const torch::Tensor& buffer, - const char* filename, - const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool validate); - -int deepspeed_py_aio_read(torch::Tensor& buffer, - const char* filename, - const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const bool validate); diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp deleted file mode 100644 index 417319f..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp +++ /dev/null @@ -1,282 +0,0 @@ - -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include "deepspeed_py_aio_handle.h" - -using namespace std; - -static void _start_aio_thread(std::shared_ptr ctxt) { ctxt->run(); } - -deepspeed_aio_handle_t::deepspeed_aio_handle_t(const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const int num_threads) - : _aio_ctxt(new aio_context(block_size, queue_depth)), - _single_submit(single_submit), - _overlap_events(overlap_events), - _num_threads(num_threads), - _aio_config(block_size, queue_depth, single_submit, overlap_events, false), - _num_pending_ops(0) -{ - for (auto i = 0; i < num_threads; ++i) { - _thread_contexts.push_back(std::make_shared(i, _aio_config)); - } - - for (auto& ctxt : _thread_contexts) { - _threads.push_back(std::thread(_start_aio_thread, ctxt)); - } -} - -deepspeed_aio_handle_t::~deepspeed_aio_handle_t() -{ - _stop_threads(); - for (auto& thr : _threads) { thr.join(); } -} - -const int deepspeed_aio_handle_t::get_block_size() const -{ - return _aio_ctxt ? _aio_ctxt->_block_size : -1; -} - -const int deepspeed_aio_handle_t::get_queue_depth() const -{ - return _aio_ctxt ? _aio_ctxt->_queue_depth : -1; -} - -const bool deepspeed_aio_handle_t::get_single_submit() const { return _single_submit; } - -const bool deepspeed_aio_handle_t::get_overlap_events() const { return _overlap_events; } - -const int deepspeed_aio_handle_t::get_thread_count() const { return _num_threads; } - -int deepspeed_aio_handle_t::read(torch::Tensor& buffer, const char* filename, const bool validate) -{ - const auto start_time = std::chrono::high_resolution_clock::now(); - - assert(_aio_ctxt); - - long long num_file_bytes; - if (-1 == get_file_size(filename, num_file_bytes)) { - const auto error_code = errno; - report_file_error(filename, " fstat for read", error_code); - return -1; - } - assert(static_cast(buffer.nbytes()) == num_file_bytes); - - const auto fd = open_file(filename, true); - if (fd == -1) { return -1; } - - auto read_buffer = (char*)buffer.data_ptr(); - std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer)); - - if (_aio_config._overlap_events) { - do_aio_operation_overlap(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } else { - do_aio_operation_sequential(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } - - close(fd); - const std::chrono::duration aio_time = - std::chrono::high_resolution_clock::now() - start_time; - - if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); } - const std::chrono::duration fn_time = - std::chrono::high_resolution_clock::now() - start_time; - std::cout << "Elapsed time(usec): " - << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 - << std::endl; - return 0; -} - -int deepspeed_aio_handle_t::write(const torch::Tensor& buffer, - const char* filename, - const bool validate) -{ - assert(_aio_ctxt); - - const auto start_time = std::chrono::high_resolution_clock::now(); - - const auto fd = open_file(filename, false); - if (fd == -1) { return -1; } - - auto write_buffer = (char*)buffer.data_ptr(); - const auto num_write_bytes = static_cast(buffer.nbytes()); - std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer)); - - if (_aio_config._overlap_events) { - do_aio_operation_overlap(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } else { - do_aio_operation_sequential(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); - } - const std::chrono::duration aio_time = - std::chrono::high_resolution_clock::now() - start_time; - - close(fd); - - if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); } - - const std::chrono::duration fn_time = - std::chrono::high_resolution_clock::now() - start_time; - std::cout << "Elapsed time(usec): " - << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 - << std::endl; - return 0; -} - -void deepspeed_aio_handle_t::_schedule_aio_work(std::shared_ptr scheduled_op) -{ - for (auto& ctxt : _thread_contexts) { - { - std::lock_guard lock(ctxt->_work_sync._mutex); - ctxt->_work_queue.push(scheduled_op); - } - ctxt->_work_sync._cond_var.notify_one(); - } - _num_pending_ops++; -} - -std::shared_ptr deepspeed_aio_handle_t::_wait_for_aio_work() -{ - std::shared_ptr completed_op = nullptr; - for (auto& ctxt : _thread_contexts) { - std::unique_lock lock(ctxt->_complete_sync._mutex); - ctxt->_complete_sync._cond_var.wait(lock, - [ctxt] { return !ctxt->_complete_queue.empty(); }); - completed_op = ctxt->_complete_queue.front(); - ctxt->_complete_queue.pop(); - } - return completed_op; -} - -void deepspeed_aio_handle_t::_stop_threads() -{ - assert(0 == _num_pending_ops); - for (auto& ctxt : _thread_contexts) { - { - std::lock_guard lock(ctxt->_work_sync._mutex); - ctxt->_time_to_exit = true; - } - ctxt->_work_sync._cond_var.notify_one(); - } -} - -int deepspeed_aio_handle_t::wait() -{ - assert(_num_pending_ops > 0); - auto num_completed_ops = 0; - - while (_num_pending_ops > 0) { - auto completed_op = _wait_for_aio_work(); - - completed_op->fini(); - - close(completed_op->_fd); - - if (completed_op->_validate) { - validate_aio_operation(completed_op->_read_op, - completed_op->_filename.c_str(), - completed_op->data_ptr(), - _num_threads * completed_op->_num_bytes); - } - --_num_pending_ops; - ++num_completed_ops; - } - - return num_completed_ops; -} - -bool deepspeed_aio_handle_t::_is_valid_parallel_aio_op(const bool read_op, - const long long int num_bytes) -{ - const auto op_string = read_op ? "Read" : "Write"; - if (num_bytes % get_thread_count()) { - std::cout << "deepspeed_aio failure: parallel " << op_string << " num_bytes = " << num_bytes - << " not divisible by thread count = " << get_thread_count() << std::endl; - return false; - } - - return true; -} - -int deepspeed_aio_handle_t::pread(const torch::Tensor& buffer, - const char* filename, - const bool validate, - const bool async) -{ - long long num_file_bytes; - if (-1 == get_file_size(filename, num_file_bytes)) { - const auto error_code = errno; - report_file_error(filename, " fstat for read", error_code); - return -1; - } - const auto buffer_bytes = static_cast(buffer.nbytes()); - if (buffer_bytes != num_file_bytes) { - std::cout << filename << ": buffer nbytes != file bytes " << buffer_bytes - << " != " << num_file_bytes << std::endl; - } - assert(static_cast(buffer.nbytes()) == num_file_bytes); - assert((num_file_bytes % _num_threads) == 0); - - if (!_is_valid_parallel_aio_op(true, num_file_bytes)) { return -1; } - - const auto fd = open_file(filename, true); - if (fd == -1) { return -1; } - - auto scheduled_op = std::make_shared( - true, buffer, fd, filename, (num_file_bytes / _num_threads), validate); - - _schedule_aio_work(scheduled_op); - - if (async) { return 0; } - - return wait(); -} - -int deepspeed_aio_handle_t::pwrite(const torch::Tensor& buffer, - const char* filename, - const bool validate, - const bool async) -{ - const auto num_write_bytes = static_cast(buffer.nbytes()); - assert((num_write_bytes % _num_threads) == 0); - - if (!_is_valid_parallel_aio_op(false, num_write_bytes)) { return -1; } - - const auto fd = open_file(filename, false); - if (fd == -1) { return -1; } - - auto scheduled_op = std::make_shared( - false, buffer, fd, filename, (num_write_bytes / _num_threads), validate); - - _schedule_aio_work(scheduled_op); - - if (async) { return 0; } - - return wait(); -} - -int deepspeed_aio_handle_t::sync_pread(torch::Tensor& buffer, const char* filename) -{ - return pread(buffer, filename, false, false); -} - -int deepspeed_aio_handle_t::sync_pwrite(const torch::Tensor& buffer, const char* filename) -{ - return pwrite(buffer, filename, false, false); -} - -int deepspeed_aio_handle_t::async_pread(torch::Tensor& buffer, const char* filename) -{ - return pread(buffer, filename, false, true); -} - -int deepspeed_aio_handle_t::async_pwrite(const torch::Tensor& buffer, const char* filename) -{ - return pwrite(buffer, filename, false, true); -} diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h deleted file mode 100644 index 22de4c3..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include -#include "deepspeed_aio_thread.h" - -struct deepspeed_aio_handle_t { - std::unique_ptr _aio_ctxt; - const bool _single_submit; - const bool _overlap_events; - const int _num_threads; - deepspeed_aio_config_t _aio_config; - - std::vector> _thread_contexts; - std::vector _threads; - int _num_pending_ops; - - deepspeed_aio_handle_t(const int block_size, - const int queue_depth, - const bool single_submit, - const bool overlap_events, - const int num_threads); - - ~deepspeed_aio_handle_t(); - - const int get_block_size() const; - const int get_queue_depth() const; - const bool get_single_submit() const; - const bool get_overlap_events() const; - const int get_thread_count() const; - - int read(torch::Tensor& buffer, const char* filename, const bool validate); - - int write(const torch::Tensor& buffer, const char* filename, const bool validate); - - int pread(const torch::Tensor& buffer, - const char* filename, - const bool validate, - const bool async); - - int pwrite(const torch::Tensor& buffer, - const char* filename, - const bool validate, - const bool async); - - int sync_pread(torch::Tensor& buffer, const char* filename); - - int sync_pwrite(const torch::Tensor& buffer, const char* filename); - - int async_pread(torch::Tensor& buffer, const char* filename); - - int async_pwrite(const torch::Tensor& buffer, const char* filename); - - int wait(); - - void _stop_threads(); - - void _schedule_aio_work(std::shared_ptr scheduled_op); - - std::shared_ptr _wait_for_aio_work(); - - bool _is_valid_parallel_aio_op(const bool read_op, const long long int num_bytes); -}; diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp deleted file mode 100644 index ee51147..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include "deepspeed_py_copy.h" -#include - -#define ROUND_DOWN(size, step) ((size) & ~((step)-1)) - -#if defined(__AVX512__) or defined(__AVX256__) -union AVX_Data { -#if defined(__AVX512__) - __m512 data; -#else - __m256 data; -#endif -}; -#endif - -static void helper_memcpy_1(float* dest, float* src, size_t param_size) -{ - size_t rounded_size = 0; - -#if defined(__AVX512__) or defined(__AVX256__) - - rounded_size = ROUND_DOWN(param_size, SIMD_WIDTH); - - for (size_t t = 0; t < rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > rounded_size) copy_size = rounded_size - t; - size_t offset = copy_size + t; -#pragma omp parallel for - for (size_t i = t; i < offset; i += SIMD_WIDTH) { - AVX_Data src_4; - src_4.data = SIMD_LOAD(src + i); - - SIMD_STORE(dest + i, src_4.data); - } - } - -#endif - - if (param_size > rounded_size) { -#pragma omp parallel for - for (size_t k = rounded_size; k < param_size; k++) { dest[k] = src[k]; } - } -} - -static void helper_memcpy_4(float* dest, float* src, size_t param_size) -{ - size_t rounded_size = 0; - -#if defined(__AVX512__) or defined(__AVX256__) - - rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2)); - - for (size_t t = 0; t < rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > rounded_size) copy_size = rounded_size - t; - size_t offset = copy_size + t; -#pragma omp parallel for - for (size_t i = t; i < offset; i += (SIMD_WIDTH << 2)) { - AVX_Data src_4[4]; - src_4[0].data = SIMD_LOAD(src + i); - src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH); - src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1)); - src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3); - - SIMD_STORE(dest + i, src_4[0].data); - SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data); - SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data); - SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data); - } - } -#endif - if (param_size > rounded_size) - helper_memcpy_1((dest + rounded_size), (src + rounded_size), (param_size - rounded_size)); -} - -static void helper_mempcy_8(float* dest, float* src, size_t param_size) -{ - size_t rounded_size = 0; - -#if defined(__AVX512__) or defined(__AVX256__) - - rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2)); - - for (size_t t = 0; t < rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > rounded_size) copy_size = rounded_size - t; - size_t offset = copy_size + t; -#pragma omp parallel for - for (size_t i = t; i < offset; i += (SIMD_WIDTH << 3)) { - AVX_Data src_4[8]; - src_4[0].data = SIMD_LOAD(src + i); - src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH); - src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1)); - src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3); - src_4[4].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 2)); - src_4[5].data = SIMD_LOAD(src + i + SIMD_WIDTH * 5); - src_4[6].data = SIMD_LOAD(src + i + SIMD_WIDTH * 6); - src_4[7].data = SIMD_LOAD(src + i + SIMD_WIDTH * 7); - - SIMD_STORE(dest + i, src_4[0].data); - SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data); - SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data); - SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data); - SIMD_STORE(dest + i + (SIMD_WIDTH << 2), src_4[4].data); - SIMD_STORE(dest + i + SIMD_WIDTH * 5, src_4[5].data); - SIMD_STORE(dest + i + SIMD_WIDTH * 6, src_4[6].data); - SIMD_STORE(dest + i + SIMD_WIDTH * 7, src_4[7].data); - } - } -#endif - if (param_size > rounded_size) - helper_memcpy_4((dest + rounded_size), (src + rounded_size), (param_size - rounded_size)); -} - -int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src) -{ - auto dest_c = dest.contiguous(); - auto src_c = src.contiguous(); - - float* dest_ptr = (float*)dest_c.data_ptr(); - float* src_ptr = (float*)src_c.data_ptr(); - - helper_mempcy_8(dest_ptr, src_ptr, dest_c.size(0)); - - return 0; -} diff --git a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h b/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h deleted file mode 100644 index 69b0448..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h +++ /dev/null @@ -1,42 +0,0 @@ - -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#if (__x86_64__ || __i386__) -#include -#include -#endif - -#include -#include -#include - -#define TILE (1024 * 1024 * 1024) - -#if defined(__AVX512__) -#define SIMD_STORE(a, d) _mm512_storeu_ps(a, d) -#define SIMD_LOAD(x) _mm512_loadu_ps(x) -#define SIMD_SET(x) _mm512_set1_ps(x) -#define SIMD_MUL(x, y) _mm512_mul_ps(x, y) -#define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c) -#define SIMD_SQRT(x) _mm512_sqrt_ps(x) -#define SIMD_DIV(x, y) _mm512_div_ps(x, y) -#define SIMD_WIDTH 16 -#else -#if defined(__AVX256__) -#define SIMD_STORE(a, d) _mm256_storeu_ps(a, d) -#define SIMD_LOAD(x) _mm256_loadu_ps(x) -#define SIMD_SET(x) _mm256_set1_ps(x) -#define SIMD_MUL(x, y) _mm256_mul_ps(x, y) -#define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c) -#define SIMD_SQRT(x) _mm256_sqrt_ps(x) -#define SIMD_DIV(x, y) _mm256_div_ps(x, y) -#define SIMD_WIDTH 8 -#endif -#endif - -int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src); diff --git a/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp b/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp deleted file mode 100644 index 6859058..0000000 --- a/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality for swapping optimizer tensors to/from (NVMe) storage devices. -*/ - -#include -#include "deepspeed_py_aio_handle.h" -#include "deepspeed_py_copy.h" - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("aio_read", &deepspeed_py_aio_read, "DeepSpeed Asynchronous I/O Read"); - - m.def("aio_write", &deepspeed_py_aio_write, "DeepSpeed Asynchronous I/O Write"); - - m.def("deepspeed_memcpy", &deepspeed_py_memcpy, "DeepSpeed Memory Copy"); - - py::class_(m, "aio_handle") - .def(py::init()) - - .def("get_block_size", &deepspeed_aio_handle_t::get_block_size) - .def("get_queue_depth", &deepspeed_aio_handle_t::get_queue_depth) - .def("get_single_submit", &deepspeed_aio_handle_t::get_single_submit) - .def("get_overlap_events", &deepspeed_aio_handle_t::get_overlap_events) - .def("get_thread_count", &deepspeed_aio_handle_t::get_thread_count) - - .def("read", &deepspeed_aio_handle_t::read) - .def("write", &deepspeed_aio_handle_t::write) - - .def("pread", &deepspeed_aio_handle_t::pread) - .def("pwrite", &deepspeed_aio_handle_t::pwrite) - - .def("sync_pread", &deepspeed_aio_handle_t::sync_pread) - .def("sync_pwrite", &deepspeed_aio_handle_t::sync_pwrite) - .def("async_pread", &deepspeed_aio_handle_t::async_pread) - .def("async_pwrite", &deepspeed_aio_handle_t::async_pwrite) - - .def("wait", &deepspeed_aio_handle_t::wait); -} diff --git a/deepspeed/ops/csrc/aio/py_test/aio_bench_generate_param.py b/deepspeed/ops/csrc/aio/py_test/aio_bench_generate_param.py deleted file mode 100644 index caa833f..0000000 --- a/deepspeed/ops/csrc/aio/py_test/aio_bench_generate_param.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Copyright 2021 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" -import os -import argparse -import json -from parse_aio_stats import READ_SPEED, WRITE_SPEED, get_sorted_results -from perf_sweep_utils import BENCH_LOG_DIR, READ_LOG_DIR, WRITE_LOG_DIR - - -def parse_arguments(): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--log_dir', - type=str, - default=BENCH_LOG_DIR, - help= - f'Folder of performance sweep logs. Default is {os.path.join(".", BENCH_LOG_DIR)}' - ) - - args = parser.parse_args() - print(f'args = {args}') - - return args - - -def validate_args(args): - for d in [READ_LOG_DIR, WRITE_LOG_DIR]: - log_dir = os.path.join(args.log_dir, d) - if not os.path.isdir(log_dir): - print(f'{log_dir} folder is not existent') - return False - - return True - - -def convert_to_param(key): - assert len(key) == 6 - return { - "single_submit": "true" if key[0] == "single" else "false", - "overlap_events": "true" if key[1] == "overlap" else "false", - "thread_count": int(key[3]), - "queue_depth": int(key[4]), - "block_size": int(key[5]) - } - - -def generate_aio_param(read_log_dir, write_log_dir): - _, read_results = get_sorted_results(read_log_dir, READ_SPEED) - _, write_results = get_sorted_results(write_log_dir, WRITE_SPEED) - combined_perf = {key[1:]: value for key, value in read_results.items()} - - for key, value in write_results.items(): - new_key = key[1:] - if new_key in combined_perf: - combined_perf[new_key] += value - else: - combined_perf[new_key] = 0 - - optimal_key = None - optimal_perf = 0.0 - for key, value in combined_perf.items(): - if value > optimal_perf: - optimal_perf = value - optimal_key = key - - aio_param = {"aio": convert_to_param(optimal_key)} - - read_perf_keys = {key[1:]: key for key in read_results.keys()} - write_perf_keys = {key[1:]: key for key in write_results.keys()} - optimal_config_read = read_results.get(read_perf_keys[optimal_key], None) - optimal_config_write = write_results.get(write_perf_keys[optimal_key], None) - - print( - f'Best performance (GB/sec): read = {optimal_config_read:5.2f}, write = {optimal_config_write:5.2f}' - ) - print(json.dumps(aio_param, indent=3)) - - -def main(): - print('Generate aio param') - args = parse_arguments() - if not validate_args(args): - quit() - - read_log_dir = os.path.join(args.log_dir, READ_LOG_DIR) - write_log_dir = os.path.join(args.log_dir, WRITE_LOG_DIR) - generate_aio_param(read_log_dir, write_log_dir) - - -if __name__ == "__main__": - main() diff --git a/deepspeed/ops/csrc/aio/py_test/aio_bench_perf_sweep.py b/deepspeed/ops/csrc/aio/py_test/aio_bench_perf_sweep.py deleted file mode 100644 index be6cd74..0000000 --- a/deepspeed/ops/csrc/aio/py_test/aio_bench_perf_sweep.py +++ /dev/null @@ -1,397 +0,0 @@ -""" -Copyright 2021 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" -import os -import sys -import argparse -import json -import itertools -import subprocess -import shutil - -from test_ds_aio_utils import refine_integer_value -from perf_sweep_utils import READ_OP_DESC, WRITE_OP_DESC, BENCH_LOG_DIR, \ - READ_IO_DIR, WRITE_IO_DIR, READ_LOG_DIR, WRITE_LOG_DIR - -OTHER_OPTIONS = '--handle' -PERF_SCRIPT = 'test_ds_aio.py' -DEFAULT_SWEEP_CONFIG = { - "block_size": ["128K", - "256K"], - "queue_depth": [4, - 16, - 32], - "overlap_events": [True, - False], - "io_parallel": [2, - 8], - "single_submit": [False] -} - - -class Job(object): - def __init__(self, cmd_line, output_file=None, work_dir=None): - self.cmd_line = cmd_line - self.output_file = output_file - self.work_dir = work_dir - self.output_fd = None - - def cmd(self): - return self.cmd_line - - def get_stdout(self): - return self.output_fd - - def get_stderr(self): - return self.output_fd - - def get_cwd(self): - return self.work_dir - - def open_output_file(self): - if self.output_file is not None: - self.output_fd = open(self.output_file, 'w') - - def close_output_file(self): - if self.output_fd is not None: - self.output_fd.close() - self.output_fd = None - - -class SweepConfig(object): - def __init__(self, args): - self.nvme_dir = args.nvme_dir - self.io_size = args.io_size - self.search_space = get_sweep_config_dict(args.sweep_config) - self.read = not args.no_read - self.write = not args.no_write - self.flush_cache = not args.no_sudo - self.log_dir = args.log_dir - self.loops = args.loops - self.other_options = f'{OTHER_OPTIONS} --loops {args.loops}' - - -def parse_arguments(): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--nvme_dir', - required=True, - type=str, - help= - 'Directory in which to perform I/O tests. A writeable directory on a NVMe device.' - ) - - parser.add_argument('--sweep_config', - type=str, - default=None, - help='Performance sweep configuration json file.') - - parser.add_argument('--no_read', - action='store_true', - help='Disable read performance measurements.') - - parser.add_argument('--no_write', - action='store_true', - help='Disable write performance measurements.') - - parser.add_argument( - '--io_size', - type=str, - default="400M", - help='Number of I/O bytes to read/write for performance measurements.') - - parser.add_argument( - '--no_sudo', - action='store_true', - help= - 'Run without sudo access. Page cache will not be flushed and reported read speeds may be higher than actual.' - ) - - parser.add_argument( - '--log_dir', - type=str, - default=BENCH_LOG_DIR, - help= - f'Output directory for performance log files. Default is {os.path.join(".", BENCH_LOG_DIR)}' - ) - - parser.add_argument('--loops', - type=int, - default=1, - help='Count of operation repetitions') - - args = parser.parse_args() - print(f'args = {args}') - - return args - - -def dump_cmd_lines(cmd_lines): - print(f'cmd line count = {len(cmd_lines)}') - for i, cmd in enumerate(cmd_lines): - print(f'{i}: {cmd}') - - -def get_sweep_config_dict(sweep_config_json): - if sweep_config_json is None: - return DEFAULT_SWEEP_CONFIG - - with open(sweep_config_json) as fp: - sweep_config = json.load(fp) - return sweep_config - - -def get_sweep_cmd_lines(sweep_config_dict): - def flatten_options(key, value_list): - flat_list = [] - for v in value_list: - if not type(v) is bool: - flat_list.append(f'--{key} {v}') - elif v: - flat_list.append(f'--{key}') - else: - flat_list.append(' ') - - return flat_list - - flat_list = [flatten_options(key, value) for key, value in sweep_config_dict.items()] - cmd_list = list(itertools.product(*flat_list)) - cmd_list = [list(cmd) for cmd in cmd_list] - #dump_cmd_lines(cmd_list) - return cmd_list - - -def run_job(job): - args = ' '.join(job.cmd()) - print(f'args = {args}') - job.open_output_file() - proc = subprocess.run(args=args, - shell=True, - stdout=job.get_stdout(), - stderr=job.get_stderr(), - cwd=job.get_cwd()) - job.close_output_file() - assert proc.returncode == 0, \ - f"This command failed: {job.cmd()}" - - -def launch_sweep(sweep_jobs, sync_job, flush_cache_job): - for perf_job in sweep_jobs: - if flush_cache_job is not None: - run_job(sync_job) - run_job(flush_cache_job) - - run_job(perf_job) - - run_job(sync_job) - - -def create_cmd_tags(cmd_line): - tags = {} - for param_value in cmd_line: - fields = param_value.split() - if len(fields) == 1: - tags[fields[0]] = None - elif len(fields) == 2: - tags[fields[0]] = fields[1] - return tags - - -def get_log_file(io_op_desc, cmd_line): - QUEUE_DEPTH = "--queue_depth" - BLOCK_SIZE = "--block_size" - SINGLE_SUBMIT = "--single_submit" - OVERLAP_EVENTS = "--overlap_events" - THREAD_COUNT = "--threads" - IO_PARALLEL = "--io_parallel" - - tag_map = { - QUEUE_DEPTH: "d", - BLOCK_SIZE: "bs", - SINGLE_SUBMIT: "single", - OVERLAP_EVENTS: "overlap", - THREAD_COUNT: "t", - IO_PARALLEL: "p" - } - - tag_default = { - QUEUE_DEPTH: 1, - BLOCK_SIZE: "1M", - SINGLE_SUBMIT: "block", - OVERLAP_EVENTS: "sequential", - THREAD_COUNT: 1, - IO_PARALLEL: 1 - } - - def get_default_value(tag): - value = tag_default[tag] - if tag in [SINGLE_SUBMIT, OVERLAP_EVENTS]: - return value - return f'{tag_map[tag]}{value}' - - def get_config_value(tag, value): - tag_key = tag_map[tag] - if value is None: - return tag_key - return f'{tag_key}{value}' - - tag_list = [ - SINGLE_SUBMIT, - OVERLAP_EVENTS, - THREAD_COUNT, - IO_PARALLEL, - QUEUE_DEPTH, - BLOCK_SIZE - ] - log_tags = [io_op_desc] - cmd_tags = create_cmd_tags(cmd_line) - for tag in tag_list: - if tag in cmd_tags: - log_tags.append(get_config_value(tag, cmd_tags[tag])) - else: - log_tags.append(get_default_value(tag)) - - log_file = '_'.join(log_tags) - log_file += '.txt' - return log_file - - -def create_perf_jobs(io_op_desc, log_dir, cmd_lines): - py_cmd = ['python', os.path.join(script_path(), PERF_SCRIPT)] - - perf_jobs = [] - for cmd in cmd_lines: - log_file = os.path.join(log_dir, get_log_file(io_op_desc, cmd)) - job = Job(cmd_line=py_cmd + cmd, output_file=log_file) - perf_jobs.append(job) - - return perf_jobs - - -def script_path(): - return os.path.dirname(os.path.realpath(sys.argv[0])) - - -def async_io_setup(): - import deepspeed - from deepspeed.ops.aio import AsyncIOBuilder - return AsyncIOBuilder().is_compatible() - - -def get_block_size_and_count(io_bytes): - block_size = 1 - block_count = io_bytes - bytes_in_KB = 1024 - - while block_count % bytes_in_KB == 0: - block_size *= bytes_in_KB - block_count /= bytes_in_KB - - return int(block_size), int(block_count) - - -def create_read_file(sweep_config): - read_folder = os.path.join(sweep_config.nvme_dir, f'{READ_IO_DIR}') - os.makedirs(read_folder, exist_ok=True) - read_file_name = os.path.join(read_folder, f'random_{sweep_config.io_size}B.pt') - block_size, block_count = get_block_size_and_count(refine_integer_value(sweep_config.io_size)) - dd_job = Job(cmd_line=[ - f'dd if=/dev/urandom of={read_file_name} bs={block_size} count={block_count}' - ]) - print( - f'[Start] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....' - ) - run_job(dd_job) - print( - f'[Done] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....' - ) - return read_folder, read_file_name - - -def remove_folder(folder): - assert os.path.isdir(folder), f"Error: cannot remove {folder} - folder not found" - shutil.rmtree(folder) - - -def run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines): - read_folder, read_file_name = create_read_file(sweep_config) - read_option = f'--read_file {read_file_name}' - read_cmd_lines = [[f'{read_option} {sweep_config.other_options}'] + cmd - for cmd in cmd_lines] - #dump_cmd_lines(read_cmd_lines) - - log_folder = os.path.join(sweep_config.log_dir, f'{READ_LOG_DIR}') - os.makedirs(log_folder, exist_ok=True) - - perf_jobs = create_perf_jobs(io_op_desc=READ_OP_DESC, - log_dir=log_folder, - cmd_lines=read_cmd_lines) - - launch_sweep(sweep_jobs=perf_jobs, - sync_job=sync_job, - flush_cache_job=flush_cache_job) - - remove_folder(read_folder) - - -def run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines): - write_folder = os.path.join(sweep_config.nvme_dir, f'{WRITE_IO_DIR}') - os.makedirs(write_folder, exist_ok=True) - write_file_name = os.path.join(write_folder, f'random_{sweep_config.io_size}B.pt') - write_option = f'--write_size {sweep_config.io_size} --write_file {write_file_name}' - write_cmd_lines = [[f'{write_option} {sweep_config.other_options}'] + cmd - for cmd in cmd_lines] - #dump_cmd_lines(write_cmd_lines) - - log_folder = os.path.join(sweep_config.log_dir, f'{WRITE_LOG_DIR}') - os.makedirs(log_folder, exist_ok=True) - - perf_jobs = create_perf_jobs(io_op_desc=WRITE_OP_DESC, - log_dir=log_folder, - cmd_lines=write_cmd_lines) - - launch_sweep(sweep_jobs=perf_jobs, - sync_job=sync_job, - flush_cache_job=flush_cache_job) - - remove_folder(write_folder) - - -def main(): - print("Running performance sweep of deepspeed nvme library") - - if not async_io_setup(): - error_msg = """ - Failing because environment is not properly configured for deepspeed async i/o module. - Possible fix: apt install libaio-dev. - """ - print(error_msg) - quit() - - args = parse_arguments() - sweep_config = SweepConfig(args) - cmd_lines = get_sweep_cmd_lines(sweep_config.search_space) - - if sweep_config.flush_cache: - flush_cache_job = Job( - cmd_line=['sudo', - 'bash -c', - "'echo 1 > /proc/sys/vm/drop_caches'"]) - else: - flush_cache_job = None - - sync_job = Job(cmd_line=['sync']) - - if sweep_config.read: - run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines) - - if sweep_config.write: - run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines) - - -if __name__ == "__main__": - main() diff --git a/deepspeed/ops/csrc/aio/py_test/ds_aio_basic.py b/deepspeed/ops/csrc/aio/py_test/ds_aio_basic.py deleted file mode 100644 index cf70b66..0000000 --- a/deepspeed/ops/csrc/aio/py_test/ds_aio_basic.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" - -import torch -import os -import time -from deepspeed.ops.aio import AsyncIOBuilder -from multiprocessing import Pool, Barrier -from test_ds_aio_utils import report_results, task_log, task_barrier - - -def pre_basic(args, tid, read_op): - io_string = "Read" if read_op else "Write" - num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size - file = args.read_file if read_op else f'{args.write_file}.{tid}' - - task_log(tid, f'Allocate tensor of size {num_bytes} bytes') - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cpu').pin_memory() - task_log( - tid, - f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}' - ) - - ctxt = {} - ctxt['file'] = file - ctxt['num_bytes'] = num_bytes - ctxt['buffer'] = buffer - ctxt['elapsed_sec'] = 0 - - return ctxt - - -def pre_basic_read(pool_params): - args, tid = pool_params - ctxt = pre_basic(args, tid, True) - return ctxt - - -def pre_basic_write(pool_params): - args, tid = pool_params - ctxt = pre_basic(args, tid, False) - return ctxt - - -def post_basic(pool_params): - _, _, ctxt = pool_params - ctxt["buffer"].detach() - ctxt["buffer"] = None - return ctxt - - -def main_basic_read(pool_params): - args, tid, ctxt = pool_params - start_time = time.time() - AsyncIOBuilder().load().aio_read(ctxt['buffer'], - ctxt['file'], - args.block_size, - args.queue_depth, - args.single_submit, - args.overlap_events, - args.validate) - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def main_basic_write(pool_params): - args, tid, ctxt = pool_params - start_time = time.time() - AsyncIOBuilder().load().aio_write(ctxt['buffer'], - ctxt['file'], - args.block_size, - args.queue_depth, - args.single_submit, - args.overlap_events, - args.validate) - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def get_schedule(args, read_op): - schedule = {} - if read_op: - schedule['pre'] = pre_basic_read - schedule['post'] = post_basic - schedule['main'] = main_basic_read - else: - schedule['pre'] = pre_basic_write - schedule['post'] = post_basic - schedule['main'] = main_basic_write - - return schedule - - -def _aio_handle_tasklet(pool_params): - args, tid, read_op = pool_params - - # Create schedule - schedule = get_schedule(args, read_op) - task_log(tid, f'schedule = {schedule}') - task_barrier(aio_barrier, args.threads) - - # Run pre task - task_log(tid, f'running pre-task') - ctxt = schedule["pre"]((args, tid)) - task_barrier(aio_barrier, args.threads) - - # Run main tasks in a loop - ctxt["main_task_sec"] = 0 - for i in range(args.loops): - task_log(tid, f'running main task {i}') - start_time = time.time() - ctxt = schedule["main"]((args, tid, ctxt)) - task_barrier(aio_barrier, args.threads) - stop_time = time.time() - ctxt["main_task_sec"] += stop_time - start_time - - # Run post task - task_log(tid, f'running post-task') - ctxt = schedule["post"]((args, tid, ctxt)) - task_barrier(aio_barrier, args.threads) - - return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops - - -def _init_tasklet(b): - global aio_barrier - aio_barrier = b - - -def aio_basic_multiprocessing(args, read_op): - b = Barrier(args.threads) - pool_params = [(args, p, read_op) for p in range(args.threads)] - with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p: - pool_results = p.map(_aio_handle_tasklet, pool_params) - - report_results(args, read_op, pool_results) diff --git a/deepspeed/ops/csrc/aio/py_test/ds_aio_handle.py b/deepspeed/ops/csrc/aio/py_test/ds_aio_handle.py deleted file mode 100644 index 947ee2e..0000000 --- a/deepspeed/ops/csrc/aio/py_test/ds_aio_handle.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" - -import torch -import os -import time -from multiprocessing import Pool, Barrier -from deepspeed.ops.aio import AsyncIOBuilder -from test_ds_aio_utils import report_results, task_log, task_barrier - - -def pre_handle(args, tid, read_op): - io_string = "Read" if read_op else "Write" - num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size - file = args.read_file if read_op else f'{args.write_file}.{tid}' - - task_log(tid, f'Allocate tensor of size {num_bytes} bytes') - if args.gpu: - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cuda') - else: - buffer = torch.empty(num_bytes, dtype=torch.uint8, device='cpu').pin_memory() - task_log( - tid, - f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}' - ) - - io_parallel = args.io_parallel if args.io_parallel else 1 - handle = AsyncIOBuilder().load().aio_handle(args.block_size, - args.queue_depth, - args.single_submit, - args.overlap_events, - io_parallel) - task_log(tid, f'created deepspeed aio handle') - - ctxt = {} - ctxt['file'] = file - ctxt['num_bytes'] = num_bytes - ctxt['handle'] = handle - ctxt['buffer'] = buffer - ctxt['elapsed_sec'] = 0 - - return ctxt - - -def pre_handle_read(pool_params): - args, tid = pool_params - ctxt = pre_handle(args, tid, True) - return ctxt - - -def pre_handle_write(pool_params): - args, tid = pool_params - ctxt = pre_handle(args, tid, False) - return ctxt - - -def post_handle(pool_params): - _, _, ctxt = pool_params - ctxt["buffer"].detach() - ctxt["buffer"] = None - return ctxt - - -def main_parallel_read(pool_params): - args, tid, ctxt = pool_params - handle = ctxt['handle'] - - start_time = time.time() - ret = handle.pread(ctxt['buffer'], ctxt['file'], args.validate, True) - assert ret != -1 - handle.wait() - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def main_parallel_write(pool_params): - args, tid, ctxt = pool_params - handle = ctxt['handle'] - start_time = time.time() - ret = handle.pwrite(ctxt['buffer'], ctxt['file'], args.validate, True) - assert ret != -1 - handle.wait() - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def main_handle_read(pool_parms): - args, tid, ctxt = pool_parms - handle = ctxt['handle'] - - start_time = time.time() - ret = handle.read(ctxt['buffer'], ctxt['file'], args.validate) - assert ret != -1 - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def main_handle_write(pool_parms): - args, tid, ctxt = pool_parms - handle = ctxt['handle'] - start_time = time.time() - ret = handle.write(ctxt['buffer'], ctxt['file'], args.validate) - assert ret != -1 - end_time = time.time() - ctxt['elapsed_sec'] += end_time - start_time - - return ctxt - - -def get_schedule(args, read_op): - schedule = {} - if read_op: - schedule['pre'] = pre_handle_read - schedule['post'] = post_handle - schedule['main'] = main_parallel_read if args.io_parallel else main_handle_read - else: - schedule['pre'] = pre_handle_write - schedule['post'] = post_handle - schedule['main'] = main_parallel_write if args.io_parallel else main_handle_write - - return schedule - - -def _aio_handle_tasklet(pool_params): - args, tid, read_op = pool_params - - # Create schedule - schedule = get_schedule(args, read_op) - task_log(tid, f'schedule = {schedule}') - task_barrier(aio_barrier, args.threads) - - # Run pre task - task_log(tid, f'running pre-task') - ctxt = schedule["pre"]((args, tid)) - task_barrier(aio_barrier, args.threads) - - # Run main tasks in a loop - ctxt["main_task_sec"] = 0 - for i in range(args.loops): - task_log(tid, f'running main task {i}') - start_time = time.time() - ctxt = schedule["main"]((args, tid, ctxt)) - task_barrier(aio_barrier, args.threads) - stop_time = time.time() - ctxt["main_task_sec"] += stop_time - start_time - - # Run post task - task_log(tid, f'running post-task') - ctxt = schedule["post"]((args, tid, ctxt)) - task_barrier(aio_barrier, args.threads) - - return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops - - -def _init_tasklet(b): - global aio_barrier - aio_barrier = b - - -def aio_handle_multiprocessing(args, read_op): - b = Barrier(args.threads) - pool_params = [(args, p, read_op) for p in range(args.threads)] - with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p: - pool_results = p.map(_aio_handle_tasklet, pool_params) - - report_results(args, read_op, pool_results) diff --git a/deepspeed/ops/csrc/aio/py_test/parse_aio_stats.py b/deepspeed/ops/csrc/aio/py_test/parse_aio_stats.py deleted file mode 100644 index 1921973..0000000 --- a/deepspeed/ops/csrc/aio/py_test/parse_aio_stats.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" - -import os -import argparse -import re - -READ_SPEED = 'read_speed' -WRITE_SPEED = 'write_speed' - -PERF_METRICS = [READ_SPEED, WRITE_SPEED] - -METRIC_SEARCH = {READ_SPEED: 'E2E Read Speed', WRITE_SPEED: 'E2E Write Speed'} - - -def parse_arguments(): - parser = argparse.ArgumentParser() - - parser.add_argument('--log_dir', - type=str, - required=True, - help='Folder of statistics logs') - - parser.add_argument('--metric', - type=str, - required=True, - help='Performance metric to report: [read_speed|write_speed]') - - args = parser.parse_args() - print(f'args = {args}') - - return args - - -def extract_value(key, file): - INVALID_PREFIXES = ["ds"] - for p in INVALID_PREFIXES: - if key.startswith(p): - return key - try: - if key[0] in ['t', 'd', 'p']: - return int(key[1:]) - if key.startswith("bs"): - if key.endswith('K'): - v = key[2:].split('K') - return int(v[0]) * 1024 - elif key.endswith('M'): - v = key[2:].split('M') - return int(v[0]) * 1024 * 1024 - else: - return int(key[2:]) - except: - print(f"{file}: extract_value fails on {key}") - return None - - return key - - -def get_file_key(file): - f, _ = os.path.splitext(os.path.basename(file)) - fields = f.split('_') - values = [extract_value(k, file) for k in fields] - return tuple(values) - - -def get_thread_count(file): - f, _ = os.path.splitext(os.path.basename(file)) - fields = f.split('_') - for key in fields: - if key[0] == 't': - return int(key[1:]) - return 1 - - -""" -Extract performance metric from log file. -Sample file lines are: -Task Read Latency = 0.031647682189941406 sec -Task Read Speed = 12.342926020792527 GB/sec -E2E Read Latency = 0.031697988510131836 sec -E2E Read Speed = 12.323337169333062 GB/sec - -For the above sample, -metric = "read_speed" corresponds to "E2E Read Speed", and 12.32 will be returned -""" - - -def get_metric(file, metric): - thread_count = get_thread_count(file) - with open(file) as f: - for line in f.readlines(): - if line.startswith(METRIC_SEARCH[metric]): - if metric in [READ_SPEED, WRITE_SPEED]: - fields = line.split() - return float(fields[-2]) - else: - fields = line.split('=') - return float(fields[-1]) - - return None - - -def validate_args(args): - if not args.metric in PERF_METRICS: - print(f'{args.metric} is not a valid performance metrics') - return False - - if not os.path.isdir(args.log_dir): - print(f'{args.log_dir} folder is not existent') - return False - - return True - - -def get_results(log_files, metric): - results = {} - for f in log_files: - file_key = get_file_key(f) - value = get_metric(f, metric) - results[file_key] = value - - return results - - -def get_sorted_results(log_dir, metric): - log_files = [ - f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, - f)) - ] - - log_files_path = [os.path.join(log_dir, f) for f in log_files] - results = get_results(log_files_path, metric) - result_keys = list(results.keys()) - sorted_keys = sorted(result_keys) - return sorted_keys, results - - -def main(): - print("Parsing aio statistics") - args = parse_arguments() - - if not validate_args(args): - quit() - - sorted_keys, results = get_sorted_results(args.log_dir, args.metric) - for k in sorted_keys: - print(f'{k} = {results[k]}') - - -if __name__ == "__main__": - main() diff --git a/deepspeed/ops/csrc/aio/py_test/perf_sweep_utils.py b/deepspeed/ops/csrc/aio/py_test/perf_sweep_utils.py deleted file mode 100644 index 2fd1a4c..0000000 --- a/deepspeed/ops/csrc/aio/py_test/perf_sweep_utils.py +++ /dev/null @@ -1,8 +0,0 @@ -SCRIPT_PREFIX = '_aio_bench' -WRITE_OP_DESC = 'write' -READ_OP_DESC = 'read' -READ_IO_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_io' -WRITE_IO_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_io' -BENCH_LOG_DIR = f'{SCRIPT_PREFIX}_logs' -READ_LOG_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_logs' -WRITE_LOG_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_logs' diff --git a/deepspeed/ops/csrc/aio/py_test/run_read_sweep.sh b/deepspeed/ops/csrc/aio/py_test/run_read_sweep.sh deleted file mode 100644 index b9d7e05..0000000 --- a/deepspeed/ops/csrc/aio/py_test/run_read_sweep.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -if [[ $# -ne 2 ]]; then - echo "Usage: $0 " - exit 1 -fi - - -function validate_environment() -{ - validate_cmd="python ./validate_async_io.py" - eval ${validate_cmd} - res=$? - if [[ $res != 0 ]]; then - echo "Failing because environment is not properly configured" - echo "Possible fix: sudo apt-get install libaio-dev" - exit 1 - fi -} - - -validate_environment - -INPUT_FILE=$1 -if [[ ! -f ${INPUT_FILE} ]]; then - echo "Input file not found: ${INPUT_FILE}" - exit 1 -fi - -LOG_DIR=$2/aio_perf_sweep -RUN_SCRIPT=./test_ds_aio.py -READ_OPT="--read_file ${INPUT_FILE}" - -if [[ -d ${LOG_DIR} ]]; then - rm -f ${LOG_DIR}/* -else - mkdir -p ${LOG_DIR} -fi - -DISABLE_CACHE="sync; sudo bash -c 'echo 1 > /proc/sys/vm/drop_caches' " -SYNC="sync" - -for sub in single block; do - if [[ $sub == "single" ]]; then - sub_opt="--single_submit" - else - sub_opt="" - fi - for ov in overlap sequential; do - if [[ $ov == "overlap" ]]; then - ov_opt="--overlap_events" - else - ov_opt="" - fi - for t in 1 2 4 8; do - for p in 1 ; do - for d in 1 2 4 8 16 32; do - for bs in 128K 256K 512K 1M; do - SCHED_OPTS="${sub_opt} ${ov_opt} --handle --threads ${t}" - OPTS="--io_parallel ${p} --queue_depth ${d} --block_size ${bs}" - LOG="${LOG_DIR}/read_${sub}_${ov}_t${t}_p${p}_d${d}_bs${bs}.txt" - cmd="python ${RUN_SCRIPT} ${READ_OPT} ${OPTS} ${SCHED_OPTS} &> ${LOG}" - echo ${DISABLE_CACHE} - echo ${cmd} - echo ${SYNC} - - eval ${DISABLE_CACHE} - eval ${cmd} - eval ${SYNC} - sleep 2 - done - done - done - done - done -done diff --git a/deepspeed/ops/csrc/aio/py_test/run_write_sweep.sh b/deepspeed/ops/csrc/aio/py_test/run_write_sweep.sh deleted file mode 100644 index 99f2113..0000000 --- a/deepspeed/ops/csrc/aio/py_test/run_write_sweep.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash -function prep_folder() -{ - folder=$1 - if [[ -d ${folder} ]]; then - rm -f ${folder}/* - else - mkdir -p ${folder} - fi -} - -function validate_environment() -{ - validate_cmd="python ./validate_async_io.py" - eval ${validate_cmd} - res=$? - if [[ $res != 0 ]]; then - echo "Failing because environment is not properly configured" - echo "Possible fix: sudo apt-get install libaio-dev" - exit 1 - fi -} - - - -validate_environment - -if [[ $# -ne 3 ]]; then - echo "Usage: $0 " - exit 1 -fi - -SIZE="$1M" -WRITE_DIR=$2 -LOG_DIR=$3/aio_perf_sweep - -OUTPUT_FILE=${WRITE_DIR}/ds_aio_write_${SIZE}B.pt -WRITE_OPT="--write_file ${OUTPUT_FILE} --write_size ${SIZE}" - - -prep_folder ${WRITE_DIR} -prep_folder ${LOG_DIR} - -RUN_SCRIPT=./test_ds_aio.py - -DISABLE_CACHE="sync; sudo bash -c 'echo 1 > /proc/sys/vm/drop_caches' " -SYNC="sync" - -for sub in single block; do - if [[ $sub == "single" ]]; then - sub_opt="--single_submit" - else - sub_opt="" - fi - for ov in overlap sequential; do - if [[ $ov == "overlap" ]]; then - ov_opt="--overlap_events" - else - ov_opt="" - fi - for t in 1 2 4 8; do - for p in 1; do - for d in 1 2 4 8 16 32; do - for bs in 128K 256K 512K 1M; do - SCHED_OPTS="${sub_opt} ${ov_opt} --handle --threads ${t}" - OPTS="--io_parallel ${p} --queue_depth ${d} --block_size ${bs}" - LOG="${LOG_DIR}/write_${sub}_${ov}_t${t}_p${p}_d${d}_bs${bs}.txt" - cmd="python ${RUN_SCRIPT} ${WRITE_OPT} ${OPTS} ${SCHED_OPTS} &> ${LOG}" - echo ${DISABLE_CACHE} - echo ${cmd} - echo ${SYNC} - - eval ${DISABLE_CACHE} - eval ${cmd} - eval ${SYNC} - sleep 2 - done - done - done - done - done -done diff --git a/deepspeed/ops/csrc/aio/py_test/single_process_config.json b/deepspeed/ops/csrc/aio/py_test/single_process_config.json deleted file mode 100644 index 275c541..0000000 --- a/deepspeed/ops/csrc/aio/py_test/single_process_config.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "block_size": [ - "128K", - "256K", - "1M" - ], - "queue_depth": [ - 4, - 16, - 32 - ], - "io_parallel": [ - 1, - 2, - 4, - 8 - ], - "single_submit": [ - true, - false - ], - "overlap_events": [ - true, - false - ], - "threads": [ - 1 - ] -} diff --git a/deepspeed/ops/csrc/aio/py_test/test_ds_aio.py b/deepspeed/ops/csrc/aio/py_test/test_ds_aio.py deleted file mode 100644 index f97d3e6..0000000 --- a/deepspeed/ops/csrc/aio/py_test/test_ds_aio.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" - -import os -import torch -import argparse -import time -import sys -from multiprocessing import Pool -import multiprocessing as mp -from ds_aio_basic import aio_basic_multiprocessing -from ds_aio_handle import aio_handle_multiprocessing -from test_ds_aio_utils import refine_args - - -def parse_arguments(): - parser = argparse.ArgumentParser() - - parser.add_argument('--read_file', type=str, default=None, help='Read file.') - - parser.add_argument('--write_file', type=str, default=None, help='Write file.') - - parser.add_argument('--write_size', - type=str, - default=None, - help='Number of bytes to write.') - - parser.add_argument('--block_size', type=str, default='1M', help='I/O block size.') - - parser.add_argument('--queue_depth', type=int, default=32, help='I/O queue depth.') - - parser.add_argument('--threads', - type=int, - default=1, - help='Thread parallelism count.') - - parser.add_argument( - '--single_submit', - action='store_true', - help= - 'Submit I/O requests in singles (default is submit queue_depth amount at once.).' - ) - - parser.add_argument('--overlap_events', - action='store_true', - help='Overlap I/O submission and completion requests.') - - parser.add_argument('--validate', - action='store_true', - help='Perform validation in library.') - - parser.add_argument('--handle', action='store_true', help='Use AIO handle.') - - parser.add_argument('--loops', - type=int, - default=1, - help='Count of operation repetitions') - - parser.add_argument('--io_parallel', - type=int, - default=None, - help='Per iop parallelism') - - parser.add_argument('--gpu', action='store_true', help='Use GPU memory') - - args = parser.parse_args() - print(f'args = {args}') - return args - - -def validate_args(args): - if args.read_file and not os.path.isfile(args.read_file): - print(f'args validation error: {args.read_file} not found') - return False - - return True - - -def main(): - print(f'Testing deepspeed_aio python frontend') - - args = parse_arguments() - refine_args(args) - if not validate_args(args): - quit() - - mp.set_start_method('spawn') - multiprocess_function = aio_handle_multiprocessing if args.handle else aio_basic_multiprocessing - if args.read_file: - multiprocess_function(args, True) - - if args.write_file: - multiprocess_function(args, False) - - -if __name__ == "__main__": - main() diff --git a/deepspeed/ops/csrc/aio/py_test/test_ds_aio_utils.py b/deepspeed/ops/csrc/aio/py_test/test_ds_aio_utils.py deleted file mode 100644 index c68dfdd..0000000 --- a/deepspeed/ops/csrc/aio/py_test/test_ds_aio_utils.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" - -import os - -BYTES_PER_GB = 1024**3 -LOG_TIDS = [0] - - -def task_log(tid, msg): - if tid in LOG_TIDS: - print(f'tid {tid}: {msg}') - - -def task_barrier(barrier, num_parties): - assert barrier.parties == num_parties - barrier.wait() - assert barrier.broken == False - - -def report_results(args, read_op, pool_results): - #print(f'pool_results = {pool_results}') - io_string = 'Read' if read_op else 'Write' - if None in pool_results: - print(f'Failure in one of {args.threads} {io_string} processes') - return - - total_bytes = sum([num_bytes for _, _, num_bytes in pool_results]) - - task_latency_sec = max([sec for _, sec, _ in pool_results]) - task_speed_GB = total_bytes / task_latency_sec / BYTES_PER_GB - print(f'Task {io_string} Latency = {task_latency_sec} sec') - print(f'Task {io_string} Speed = {task_speed_GB} GB/sec') - - e2e_latency_sec = max([sec for sec, _, _ in pool_results]) - e2e_speed_GB = total_bytes / e2e_latency_sec / BYTES_PER_GB - print(f'E2E {io_string} Latency = {e2e_latency_sec} sec') - print(f'E2E {io_string} Speed = {e2e_speed_GB} GB/sec') - - -def refine_integer_value(value): - unit_dict = {'K': 1024, 'M': 1024**2, 'G': 1024**3} - - if value[-1] in list(unit_dict.keys()): - int_value = int(value[:-1]) * unit_dict[value[-1]] - return int_value - return int(value) - - -def refine_args(args): - if args.write_size and type(args.write_size) == str: - args.write_size = refine_integer_value(args.write_size) - - if args.block_size and type(args.block_size) == str: - args.block_size = refine_integer_value(args.block_size) diff --git a/deepspeed/ops/csrc/aio/py_test/validate_async_io.py b/deepspeed/ops/csrc/aio/py_test/validate_async_io.py deleted file mode 100644 index ceae84c..0000000 --- a/deepspeed/ops/csrc/aio/py_test/validate_async_io.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -Copyright 2021 The Microsoft DeepSpeed Team -Licensed under the MIT license. - -Functionality of swapping optimizer tensors to/from (NVMe) storage devices. -""" -import deepspeed -from deepspeed.ops.aio import AsyncIOBuilder -assert AsyncIOBuilder().is_compatible() diff --git a/deepspeed/ops/csrc/common/custom_cuda_kernel.cu b/deepspeed/ops/csrc/common/custom_cuda_kernel.cu deleted file mode 100644 index f7a2b5d..0000000 --- a/deepspeed/ops/csrc/common/custom_cuda_kernel.cu +++ /dev/null @@ -1,39 +0,0 @@ -#include "custom_cuda_layers.h" - -__global__ void param_update_kernel(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - - if (id < size) { output[id] = (__half)input[id]; } -} - -void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream) -{ - int threads = 1024; - - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - param_update_kernel<<>>(input, output, size); -} - -__global__ void param_update_kernel_half(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - __half2* output_cast = reinterpret_cast<__half2*>(output); - if (id < size) { - float input_f = input[id]; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - output_cast[id] = *input_h; - } -} - -void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream) -{ - int threads = 1024; - size /= 2; - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - param_update_kernel_half<<>>(input, output, size); -} diff --git a/deepspeed/ops/csrc/common/custom_hip_kernel.hip b/deepspeed/ops/csrc/common/custom_hip_kernel.hip deleted file mode 100644 index 119647c..0000000 --- a/deepspeed/ops/csrc/common/custom_hip_kernel.hip +++ /dev/null @@ -1,41 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -__global__ void param_update_kernel(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - - if (id < size) { output[id] = (__half)input[id]; } -} - -void launch_param_update(const float* input, __half* output, int size, hipStream_t stream) -{ - int threads = 1024; - - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( param_update_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, size); -} - -__global__ void param_update_kernel_half(const float* input, __half* output, int size) -{ - int id = blockIdx.x * blockDim.x + threadIdx.x; - __half2* output_cast = reinterpret_cast<__half2*>(output); - if (id < size) { - float input_f = input[id]; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - output_cast[id] = *input_h; - } -} - -void launch_param_update_half(const float* input, __half* output, int size, hipStream_t stream) -{ - int threads = 1024; - size /= 2; - dim3 grid_dim((size - 1) / threads + 1); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( param_update_kernel_half), dim3(grid_dim), dim3(block_dim), 0, stream, input, output, size); -} diff --git a/deepspeed/ops/csrc/includes/StopWatch.h b/deepspeed/ops/csrc/includes/StopWatch.h deleted file mode 100644 index 9bf0401..0000000 --- a/deepspeed/ops/csrc/includes/StopWatch.h +++ /dev/null @@ -1,98 +0,0 @@ -#pragma once -#ifdef _WIN32 -#include -#else -#include -#endif - -#ifdef _WIN32 - -class Stopwatch { -private: - double m_total_time; - LARGE_INTEGER m_start_time; - -public: - Stopwatch() { m_total_time = 0.0; } - - ~Stopwatch() {} - - void Reset() { m_total_time = 0.0; } - - void Start() { QueryPerformanceCounter(&m_start_time); } - - void Restart() - { - m_total_time = 0.0; - QueryPerformanceCounter(&m_start_time); - } - - void Stop() - { - LARGE_INTEGER frequency; - LARGE_INTEGER stop_time; - QueryPerformanceFrequency(&frequency); - QueryPerformanceCounter(&stop_time); - m_total_time += - ((double)(stop_time.QuadPart - m_start_time.QuadPart) / (double)frequency.QuadPart); - } - - double GetTimeInSeconds() { return m_total_time; } -}; - -#else - -class Stopwatch { -private: - double m_total_time; - struct timespec m_start_time; - bool m_is_started; - -public: - Stopwatch() - { - m_total_time = 0.0; - m_is_started = false; - } - - ~Stopwatch() {} - - void Reset() { m_total_time = 0.0; } - - void Start() - { - clock_gettime(CLOCK_MONOTONIC, &m_start_time); - m_is_started = true; - } - - void Restart() - { - m_total_time = 0.0; - clock_gettime(CLOCK_MONOTONIC, &m_start_time); - m_is_started = true; - } - - void Stop() - { - if (m_is_started) { - m_is_started = false; - - struct timespec end_time; - clock_gettime(CLOCK_MONOTONIC, &end_time); - - m_total_time += (double)(end_time.tv_sec - m_start_time.tv_sec) + - (double)(end_time.tv_nsec - m_start_time.tv_nsec) / 1e9; - } - } - - double GetTimeInSeconds() - { - if (m_is_started) { - Stop(); - Start(); - } - return m_total_time; - } -}; - -#endif diff --git a/deepspeed/ops/csrc/includes/Timer.h b/deepspeed/ops/csrc/includes/Timer.h deleted file mode 100644 index efc7fff..0000000 --- a/deepspeed/ops/csrc/includes/Timer.h +++ /dev/null @@ -1,47 +0,0 @@ - -#ifndef __TIMER_H__ -#define __TIMER_H__ - -#include -#include -#include "cuda.h" - -class GPUTimer { - cudaEvent_t start, stop; - -public: - GPUTimer() - { - cudaEventCreate(&start); - cudaEventCreate(&stop); - } - ~GPUTimer() - { - cudaEventDestroy(start); - cudaEventDestroy(stop); - } - inline void Record() { cudaEventRecord(start); } - inline void Elapsed(float& time_elapsed) - { - cudaEventRecord(stop); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&time_elapsed, start, stop); - } -}; - -class CPUTimer { - std::chrono::high_resolution_clock::time_point start; - -public: - CPUTimer() : start(std::chrono::high_resolution_clock::now()) {} - inline void Reset() { start = std::chrono::high_resolution_clock::now(); } - inline float Elapsed() - { - auto temp = start; - start = std::chrono::high_resolution_clock::now(); - return (float)(std::chrono::duration_cast(start - temp).count() / - 1e3); - } -}; - -#endif diff --git a/deepspeed/ops/csrc/includes/Timer_hip.h b/deepspeed/ops/csrc/includes/Timer_hip.h deleted file mode 100644 index 2ee3d6f..0000000 --- a/deepspeed/ops/csrc/includes/Timer_hip.h +++ /dev/null @@ -1,48 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! - -#ifndef __TIMER_H__ -#define __TIMER_H__ - -#include -#include -#include "hip/hip_runtime.h" - -class GPUTimer { - hipEvent_t start, stop; - -public: - GPUTimer() - { - hipEventCreate(&start); - hipEventCreate(&stop); - } - ~GPUTimer() - { - hipEventDestroy(start); - hipEventDestroy(stop); - } - inline void Record() { hipEventRecord(start); } - inline void Elapsed(float& time_elapsed) - { - hipEventRecord(stop); - hipEventSynchronize(stop); - hipEventElapsedTime(&time_elapsed, start, stop); - } -}; - -class CPUTimer { - std::chrono::high_resolution_clock::time_point start; - -public: - CPUTimer() : start(std::chrono::high_resolution_clock::now()) {} - inline void Reset() { start = std::chrono::high_resolution_clock::now(); } - inline float Elapsed() - { - auto temp = start; - start = std::chrono::high_resolution_clock::now(); - return (float)(std::chrono::duration_cast(start - temp).count() / - 1e3); - } -}; - -#endif diff --git a/deepspeed/ops/csrc/includes/compat.h b/deepspeed/ops/csrc/includes/compat.h deleted file mode 100644 index 86f84a8..0000000 --- a/deepspeed/ops/csrc/includes/compat.h +++ /dev/null @@ -1,14 +0,0 @@ -/* Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#ifndef TORCH_CHECK -#define TORCH_CHECK AT_CHECK -#endif - -#ifdef VERSION_GE_1_3 -#define DATA_PTR data_ptr -#else -#define DATA_PTR data -#endif diff --git a/deepspeed/ops/csrc/includes/context.h b/deepspeed/ops/csrc/includes/context.h deleted file mode 100644 index 5f04241..0000000 --- a/deepspeed/ops/csrc/includes/context.h +++ /dev/null @@ -1,171 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" -#include "gemm_test.h" - -#define WARP_SIZE 32 - -#define CUDA_CHECK(callstr) \ - { \ - cudaError_t error_code = callstr; \ - if (error_code != cudaSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define CUDA_1D_KERNEL_LOOP(i, n) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) - -#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ - for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) - -#define DS_CUDA_NUM_THREADS 512 -#define DS_MAXIMUM_NUM_BLOCKS 262144 - -inline int DS_GET_BLOCKS(const int N) -{ - return (std::max)( - (std::min)((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), - // Use at least 1 block, since CUDA does not allow empty block - 1); -} - -class Context { -public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0) - { - curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT); - curandSetPseudoRandomGeneratorSeed(_gen, 123); - if (cublasCreate(&_cublasHandle) != CUBLAS_STATUS_SUCCESS) { - auto message = std::string("Fail to create cublas handle."); - std::cerr << message << std::endl; - throw std::runtime_error(message); - } - } - - virtual ~Context() - { - cublasDestroy(_cublasHandle); - cudaFree(_workspace); - } - - static Context& Instance() - { - static Context _ctx; - return _ctx; - } - - void SetWorkSpace(void* workspace) - { - if (!workspace) { throw std::runtime_error("Workspace is null."); } - _workspace = workspace; - } - - void* GetWorkSpace() { return _workspace; } - - curandGenerator_t& GetRandGenerator() { return _gen; } - - cudaStream_t GetCurrentStream() - { - // get current pytorch stream. - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - return stream; - } - - cudaStream_t GetNewStream() { return at::cuda::getStreamFromPool(); } - - cublasHandle_t GetCublasHandle() { return _cublasHandle; } - - std::pair IncrementOffset(uint64_t offset_inc) - { - uint64_t offset = _curr_offset; - _curr_offset += offset_inc; - return std::pair(_seed, offset); - } - - void SetSeed(uint64_t new_seed) { _seed = new_seed; } - - void TestGemmFP16(bool test_gemm, int batch_size, int seq_len, int head_num, int size_per_head) - { - // avoid rerun. - if (_gemm_algos.size() > 0) return; - - if (test_gemm) { - cublasHandle_t handle = GetCublasHandle(); - - std::unique_ptr> test_qkv_fw( - new GemmTest<__half>(batch_size * seq_len, // M - head_num * size_per_head, // N - head_num * size_per_head, // K - CUBLAS_OP_T, - CUBLAS_OP_N, - handle)); - - std::unique_ptr> test_inter( - new GemmTest<__half>(batch_size * seq_len, // M - 4 * head_num * size_per_head, // N - head_num * size_per_head, // K - CUBLAS_OP_T, - CUBLAS_OP_N, - handle)); - - std::unique_ptr> test_output( - new GemmTest<__half>(batch_size * seq_len, // M - head_num * size_per_head, // N - 4 * head_num * size_per_head, // K - CUBLAS_OP_T, - CUBLAS_OP_N, - handle)); - - std::unique_ptr> test_attn_scores( - new StridedGemmTest<__half>(batch_size * head_num, // batch - seq_len, // M - seq_len, // N - size_per_head, // K - CUBLAS_OP_T, - CUBLAS_OP_N, - handle)); - - std::unique_ptr> test_attn_context( - new StridedGemmTest<__half>(batch_size * head_num, // batch - size_per_head, // M - seq_len, // N - seq_len, // K - CUBLAS_OP_N, - CUBLAS_OP_N, - handle)); - - _gemm_algos.push_back(test_qkv_fw->TestAlgo(100)); - _gemm_algos.push_back(test_inter->TestAlgo(100)); - _gemm_algos.push_back(test_output->TestAlgo(100)); - _gemm_algos.push_back(test_attn_scores->TestAlgo(100)); - _gemm_algos.push_back(test_attn_context->TestAlgo(100)); - } else { - // Use default algo. - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - } - } - - const std::vector>& GetGemmAlgos() const { return _gemm_algos; } - -private: - curandGenerator_t _gen; - cublasHandle_t _cublasHandle; - void* _workspace; - uint64_t _seed; - uint64_t _curr_offset; - std::vector> _gemm_algos; -}; diff --git a/deepspeed/ops/csrc/includes/context_hip.h b/deepspeed/ops/csrc/includes/context_hip.h deleted file mode 100644 index 258b2bc..0000000 --- a/deepspeed/ops/csrc/includes/context_hip.h +++ /dev/null @@ -1,172 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include -#include "rocblas.h" -#include "hip/hip_runtime.h" -#include "hiprand/hiprand.h" -#include "gemm_test_hip.h" - -#define WARP_SIZE 32 - -#define CUDA_CHECK(callstr) \ - { \ - hipError_t error_code = callstr; \ - if (error_code != hipSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define CUDA_1D_KERNEL_LOOP(i, n) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) - -#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ - for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) - -#define DS_CUDA_NUM_THREADS 512 -#define DS_MAXIMUM_NUM_BLOCKS 262144 - -inline int DS_GET_BLOCKS(const int N) -{ - return (std::max)( - (std::min)((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), - // Use at least 1 block, since CUDA does not allow empty block - 1); -} - -class Context { -public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0) - { - hiprandCreateGenerator(&_gen, HIPRAND_RNG_PSEUDO_DEFAULT); - hiprandSetPseudoRandomGeneratorSeed(_gen, 123); - if (rocblas_create_handle(&_cublasHandle) != rocblas_status_success) { - auto message = std::string("Fail to create cublas handle."); - std::cerr << message << std::endl; - throw std::runtime_error(message); - } - } - - virtual ~Context() - { - rocblas_destroy_handle(_cublasHandle); - hipFree(_workspace); - } - - static Context& Instance() - { - static Context _ctx; - return _ctx; - } - - void SetWorkSpace(void* workspace) - { - if (!workspace) { throw std::runtime_error("Workspace is null."); } - _workspace = workspace; - } - - void* GetWorkSpace() { return _workspace; } - - hiprandGenerator_t& GetRandGenerator() { return _gen; } - - hipStream_t GetCurrentStream() - { - // get current pytorch stream. - hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); - return stream; - } - - hipStream_t GetNewStream() { return at::hip::getStreamFromPoolMasqueradingAsCUDA(); } - - rocblas_handle GetCublasHandle() { return _cublasHandle; } - - std::pair IncrementOffset(uint64_t offset_inc) - { - uint64_t offset = _curr_offset; - _curr_offset += offset_inc; - return std::pair(_seed, offset); - } - - void SetSeed(uint64_t new_seed) { _seed = new_seed; } - - void TestGemmFP16(bool test_gemm, int batch_size, int seq_len, int head_num, int size_per_head) - { - // avoid rerun. - if (_gemm_algos.size() > 0) return; - - if (test_gemm) { - rocblas_handle handle = GetCublasHandle(); - - std::unique_ptr> test_qkv_fw( - new GemmTest<__half>(batch_size * seq_len, // M - head_num * size_per_head, // N - head_num * size_per_head, // K - rocblas_operation_transpose, - rocblas_operation_none, - handle)); - - std::unique_ptr> test_inter( - new GemmTest<__half>(batch_size * seq_len, // M - 4 * head_num * size_per_head, // N - head_num * size_per_head, // K - rocblas_operation_transpose, - rocblas_operation_none, - handle)); - - std::unique_ptr> test_output( - new GemmTest<__half>(batch_size * seq_len, // M - head_num * size_per_head, // N - 4 * head_num * size_per_head, // K - rocblas_operation_transpose, - rocblas_operation_none, - handle)); - - std::unique_ptr> test_attn_scores( - new StridedGemmTest<__half>(batch_size * head_num, // batch - seq_len, // M - seq_len, // N - size_per_head, // K - rocblas_operation_transpose, - rocblas_operation_none, - handle)); - - std::unique_ptr> test_attn_context( - new StridedGemmTest<__half>(batch_size * head_num, // batch - size_per_head, // M - seq_len, // N - seq_len, // K - rocblas_operation_none, - rocblas_operation_none, - handle)); - - _gemm_algos.push_back(test_qkv_fw->TestAlgo(100)); - _gemm_algos.push_back(test_inter->TestAlgo(100)); - _gemm_algos.push_back(test_output->TestAlgo(100)); - _gemm_algos.push_back(test_attn_scores->TestAlgo(100)); - _gemm_algos.push_back(test_attn_context->TestAlgo(100)); - } else { - // Use default algo. - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - _gemm_algos.push_back(std::array({99, 99, 99})); - } - } - - const std::vector>& GetGemmAlgos() const { return _gemm_algos; } - -private: - hiprandGenerator_t _gen; - rocblas_handle _cublasHandle; - void* _workspace; - uint64_t _seed; - uint64_t _curr_offset; - std::vector> _gemm_algos; -}; diff --git a/deepspeed/ops/csrc/includes/cpu_adagrad.h b/deepspeed/ops/csrc/includes/cpu_adagrad.h deleted file mode 100644 index 6c21b7c..0000000 --- a/deepspeed/ops/csrc/includes/cpu_adagrad.h +++ /dev/null @@ -1,150 +0,0 @@ -#pragma once - -#define NOMINMAX // Windows idiosyncrasy - // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c - -#include -#include -#include -#include -#include "cuda.h" -#include "custom_cuda_layers.h" -#include "simd.h" - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ - bool half_precision = false); - -class Adagrad_Optimizer { -public: - Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) - : _alpha(alpha), _eps(eps), _weight_decay(weight_decay), _buf_index(false) - { - cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = Context::Instance().GetCurrentStream(); - _streams[1] = Context::Instance().GetNewStream(); - } - ~Adagrad_Optimizer() - { - cudaFreeHost(_doubled_buffer[0]); - cudaFreeHost(_doubled_buffer[1]); - } -#if defined(__AVX512__) or defined(__AVX256__) - template - void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t param_size, - __half* dev_param = nullptr, - bool half_precision = false); -#endif - STEP(1) - STEP(4) - STEP(8) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); - } - inline void IncrementStep(size_t step) - { - _step++; - if (_step != step) { _step = step; } - } - inline void update_state(float lr, float epsilon, float weight_decay) - { - _alpha = lr; - _eps = epsilon; - _weight_decay = weight_decay; - } - -private: - float _alpha; - float _eps; - float _weight_decay; - - float _betta1_t; - float _betta2_t; - size_t _step; - - float* _doubled_buffer[2]; - bool _buf_index; - - cudaStream_t _streams[2]; -}; - -#if defined(__AVX512__) or defined(__AVX256__) -template -void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t new_rounded_size = 0; - AVX_Data eps_4; - eps_4.data = SIMD_SET(_eps); - - float step_size = -1 * _alpha; - AVX_Data step_size_4; - step_size_4.data = SIMD_SET(step_size); - - AVX_Data weight_decay4; - if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay); - new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); - for (size_t t = 0; t < new_rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { - AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); - - AVX_Data momentum_4[span]; - simd_load(momentum_4, grads + i, false); - - AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); - - AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); - - if (_weight_decay > 0) { simd_fma(grad_4, param_4, weight_decay4, grad_4); } - - simd_fma(variance_4, grad_4, grad_4, variance_4); - simd_sqrt(grad_4, variance_4); - simd_add(grad_4, grad_4, eps_4); - simd_div(grad_4, momentum_4, grad_4); - simd_fma(param_4, grad_4, step_size_4, param_4); - - simd_store(_params + i, param_4, half_precision); - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } - simd_store(_exp_avg_sq + i, variance_4, false); - } - - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - *rounded_size = new_rounded_size; -} -#endif diff --git a/deepspeed/ops/csrc/includes/cpu_adagrad_hip.h b/deepspeed/ops/csrc/includes/cpu_adagrad_hip.h deleted file mode 100644 index cb012a1..0000000 --- a/deepspeed/ops/csrc/includes/cpu_adagrad_hip.h +++ /dev/null @@ -1,151 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#define NOMINMAX // Windows idiosyncrasy - // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c - -#include -#include -#include -#include -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" -#include "simd.h" - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ - bool half_precision = false); - -class Adagrad_Optimizer { -public: - Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0) - : _alpha(alpha), _eps(eps), _weight_decay(weight_decay), _buf_index(false) - { - hipHostMalloc((void**)_doubled_buffer, TILE * sizeof(float)); - hipHostMalloc((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = Context::Instance().GetCurrentStream(); - _streams[1] = Context::Instance().GetNewStream(); - } - ~Adagrad_Optimizer() - { - hipHostFree(_doubled_buffer[0]); - hipHostFree(_doubled_buffer[1]); - } -#if defined(__AVX512__) or defined(__AVX256__) - template - void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t param_size, - __half* dev_param = nullptr, - bool half_precision = false); -#endif - STEP(1) - STEP(4) - STEP(8) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) hipStreamSynchronize(_streams[i]); - } - inline void IncrementStep(size_t step) - { - _step++; - if (_step != step) { _step = step; } - } - inline void update_state(float lr, float epsilon, float weight_decay) - { - _alpha = lr; - _eps = epsilon; - _weight_decay = weight_decay; - } - -private: - float _alpha; - float _eps; - float _weight_decay; - - float _betta1_t; - float _betta2_t; - size_t _step; - - float* _doubled_buffer[2]; - bool _buf_index; - - hipStream_t _streams[2]; -}; - -#if defined(__AVX512__) or defined(__AVX256__) -template -void Adagrad_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t new_rounded_size = 0; - AVX_Data eps_4; - eps_4.data = SIMD_SET(_eps); - - float step_size = -1 * _alpha; - AVX_Data step_size_4; - step_size_4.data = SIMD_SET(step_size); - - AVX_Data weight_decay4; - if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay); - new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); - for (size_t t = 0; t < new_rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { - AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); - - AVX_Data momentum_4[span]; - simd_load(momentum_4, grads + i, false); - - AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); - - AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); - - if (_weight_decay > 0) { simd_fma(grad_4, param_4, weight_decay4, grad_4); } - - simd_fma(variance_4, grad_4, grad_4, variance_4); - simd_sqrt(grad_4, variance_4); - simd_add(grad_4, grad_4, eps_4); - simd_div(grad_4, momentum_4, grad_4); - simd_fma(param_4, grad_4, step_size_4, param_4); - - simd_store(_params + i, param_4, half_precision); - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } - simd_store(_exp_avg_sq + i, variance_4, false); - } - - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - *rounded_size = new_rounded_size; -} -#endif diff --git a/deepspeed/ops/csrc/includes/cpu_adam.h b/deepspeed/ops/csrc/includes/cpu_adam.h deleted file mode 100644 index 09677c6..0000000 --- a/deepspeed/ops/csrc/includes/cpu_adam.h +++ /dev/null @@ -1,225 +0,0 @@ -#pragma once - -#define NOMINMAX // Windows idiosyncrasy - // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c - -#include -#include -#include -#include -#include "cuda.h" -#include "custom_cuda_layers.h" -#include "simd.h" - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ - bool half_precision = false); - -class Adam_Optimizer { -public: - Adam_Optimizer(float alpha = 1e-3, - float betta1 = 0.9, - float betta2 = 0.999, - float eps = 1e-8, - float weight_decay = 0, - bool adamw_mode = true) - : _alpha(alpha), - _betta1(betta1), - _betta2(betta2), - _eps(eps), - _weight_decay(weight_decay), - _betta1_t(1.0), - _betta2_t(1.0), - _step(0), - _buf_index(false), - _adamw_mode(adamw_mode) - { - cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float)); - cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = Context::Instance().GetCurrentStream(); - _streams[1] = Context::Instance().GetNewStream(); - } - ~Adam_Optimizer() - { - cudaFreeHost(_doubled_buffer[0]); - cudaFreeHost(_doubled_buffer[1]); - } -#if defined(__AVX512__) or defined(__AVX256__) - template - void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t param_size, - __half* dev_param = nullptr, - bool half_precision = false); -#endif - STEP(1) - STEP(4) - STEP(8) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]); - } - inline void IncrementStep(size_t step, float beta1, float beta2) - { - if (beta1 != _betta1 || beta2 != _betta2) { - _step = step; - _betta1 = beta1; - _betta2 = beta2; - _betta1_t = std::pow(_betta1, step); - _betta2_t = std::pow(_betta2, step); - } else { - _step++; - if (_step != step) { - _betta1_t = std::pow(_betta1, step); - _betta2_t = std::pow(_betta2, step); - _step = step; - } else { - _betta1_t *= _betta1; - _betta2_t *= _betta2; - } - } - } - inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction) - { - _alpha = lr; - _eps = epsilon; - _weight_decay = weight_decay; - - _bias_correction1 = 1.0f; - _bias_correction2 = 1.0f; - if (bias_correction == 1) { - _bias_correction1 = 1 - _betta1_t; - _bias_correction2 = 1 / sqrt(1 - _betta2_t); - } - } - -private: - float _alpha; - float _betta1; - float _betta2; - float _eps; - float _weight_decay; - - float _betta1_t; - float _betta2_t; - size_t _step; - - float _bias_correction1; - float _bias_correction2; - - float* _doubled_buffer[2]; - bool _buf_index; - bool _adamw_mode; - - cudaStream_t _streams[2]; -}; - -#if defined(__AVX512__) or defined(__AVX256__) -template -void Adam_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t new_rounded_size = 0; - - AVX_Data betta1_4; - betta1_4.data = SIMD_SET(_betta1); - AVX_Data betta2_4; - betta2_4.data = SIMD_SET(_betta2); - - float betta1_minus1 = 1 - _betta1; - float betta2_minus1 = 1 - _betta2; - AVX_Data betta1_minus1_4; - betta1_minus1_4.data = SIMD_SET(betta1_minus1); - AVX_Data betta2_minus1_4; - betta2_minus1_4.data = SIMD_SET(betta2_minus1); - - AVX_Data bias2_sqrt; - bias2_sqrt.data = SIMD_SET(_bias_correction2); - - AVX_Data eps_4; - eps_4.data = SIMD_SET(_eps); - - float step_size = -1 * _alpha / _bias_correction1; - AVX_Data step_size_4; - step_size_4.data = SIMD_SET(step_size); - - float w_decay = -1 * _alpha * _weight_decay; - AVX_Data weight_decay4; - if (_weight_decay > 0) - weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay)); - new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); - for (size_t t = 0; t < new_rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { - AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); - - AVX_Data momentum_4[span]; - simd_load(momentum_4, _exp_avg + i, false); - - AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); - - AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); - - if (_weight_decay > 0 && !_adamw_mode) { - simd_fma(grad_4, param_4, weight_decay4, grad_4); - } - - simd_mul(momentum_4, momentum_4, betta1_4); - simd_fma(momentum_4, grad_4, betta1_minus1_4, momentum_4); - simd_mul(variance_4, variance_4, betta2_4); - simd_mul(grad_4, grad_4, grad_4); - simd_fma(variance_4, grad_4, betta2_minus1_4, variance_4); - simd_sqrt(grad_4, variance_4); - simd_fma(grad_4, grad_4, bias2_sqrt, eps_4); - simd_div(grad_4, momentum_4, grad_4); - - if (_weight_decay > 0 && _adamw_mode) { - simd_fma(param_4, param_4, weight_decay4, param_4); - } - - simd_fma(param_4, grad_4, step_size_4, param_4); - - simd_store(_params + i, param_4, half_precision); - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } - simd_store(_exp_avg + i, momentum_4, false); - simd_store(_exp_avg_sq + i, variance_4, false); - } - - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - *rounded_size = new_rounded_size; -} -#endif diff --git a/deepspeed/ops/csrc/includes/cpu_adam_hip.h b/deepspeed/ops/csrc/includes/cpu_adam_hip.h deleted file mode 100644 index 3622f34..0000000 --- a/deepspeed/ops/csrc/includes/cpu_adam_hip.h +++ /dev/null @@ -1,226 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#define NOMINMAX // Windows idiosyncrasy - // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c - -#include -#include -#include -#include -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" -#include "simd.h" - -#define STEP(SPAN) \ - void Step_##SPAN(float* _params, \ - float* grads, \ - float* _exp_avg, \ - float* _exp_avg_sq, \ - size_t _param_size, \ - __half* dev_param = nullptr, \ - bool half_precision = false); - -class Adam_Optimizer { -public: - Adam_Optimizer(float alpha = 1e-3, - float betta1 = 0.9, - float betta2 = 0.999, - float eps = 1e-8, - float weight_decay = 0, - bool adamw_mode = true) - : _alpha(alpha), - _betta1(betta1), - _betta2(betta2), - _eps(eps), - _weight_decay(weight_decay), - _betta1_t(1.0), - _betta2_t(1.0), - _step(0), - _buf_index(false), - _adamw_mode(adamw_mode) - { - hipHostMalloc((void**)_doubled_buffer, TILE * sizeof(float)); - hipHostMalloc((void**)(_doubled_buffer + 1), TILE * sizeof(float)); - - _streams[0] = Context::Instance().GetCurrentStream(); - _streams[1] = Context::Instance().GetNewStream(); - } - ~Adam_Optimizer() - { - hipHostFree(_doubled_buffer[0]); - hipHostFree(_doubled_buffer[1]); - } -#if defined(__AVX512__) or defined(__AVX256__) - template - void Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t param_size, - __half* dev_param = nullptr, - bool half_precision = false); -#endif - STEP(1) - STEP(4) - STEP(8) - inline void SynchronizeStreams() - { - for (int i = 0; i < 2; i++) hipStreamSynchronize(_streams[i]); - } - inline void IncrementStep(size_t step, float beta1, float beta2) - { - if (beta1 != _betta1 || beta2 != _betta2) { - _step = step; - _betta1 = beta1; - _betta2 = beta2; - _betta1_t = std::pow(_betta1, step); - _betta2_t = std::pow(_betta2, step); - } else { - _step++; - if (_step != step) { - _betta1_t = std::pow(_betta1, step); - _betta2_t = std::pow(_betta2, step); - _step = step; - } else { - _betta1_t *= _betta1; - _betta2_t *= _betta2; - } - } - } - inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction) - { - _alpha = lr; - _eps = epsilon; - _weight_decay = weight_decay; - - _bias_correction1 = 1.0f; - _bias_correction2 = 1.0f; - if (bias_correction == 1) { - _bias_correction1 = 1 - _betta1_t; - _bias_correction2 = 1 / sqrt(1 - _betta2_t); - } - } - -private: - float _alpha; - float _betta1; - float _betta2; - float _eps; - float _weight_decay; - - float _betta1_t; - float _betta2_t; - size_t _step; - - float _bias_correction1; - float _bias_correction2; - - float* _doubled_buffer[2]; - bool _buf_index; - bool _adamw_mode; - - hipStream_t _streams[2]; -}; - -#if defined(__AVX512__) or defined(__AVX256__) -template -void Adam_Optimizer::Step_AVX(size_t* rounded_size, - float* _params, - float* grads, - float* _exp_avg, - float* _exp_avg_sq, - size_t _param_size, - __half* dev_params, - bool half_precision) -{ - size_t new_rounded_size = 0; - - AVX_Data betta1_4; - betta1_4.data = SIMD_SET(_betta1); - AVX_Data betta2_4; - betta2_4.data = SIMD_SET(_betta2); - - float betta1_minus1 = 1 - _betta1; - float betta2_minus1 = 1 - _betta2; - AVX_Data betta1_minus1_4; - betta1_minus1_4.data = SIMD_SET(betta1_minus1); - AVX_Data betta2_minus1_4; - betta2_minus1_4.data = SIMD_SET(betta2_minus1); - - AVX_Data bias2_sqrt; - bias2_sqrt.data = SIMD_SET(_bias_correction2); - - AVX_Data eps_4; - eps_4.data = SIMD_SET(_eps); - - float step_size = -1 * _alpha / _bias_correction1; - AVX_Data step_size_4; - step_size_4.data = SIMD_SET(step_size); - - float w_decay = -1 * _alpha * _weight_decay; - AVX_Data weight_decay4; - if (_weight_decay > 0) - weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay)); - new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span); - for (size_t t = 0; t < new_rounded_size; t += TILE) { - size_t copy_size = TILE; - if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t; - size_t offset = copy_size + t; - if ((t / TILE) >= 2) { hipStreamSynchronize(_streams[_buf_index]); } -#pragma omp parallel for - for (size_t i = t; i < offset; i += SIMD_WIDTH * span) { - AVX_Data grad_4[span]; - simd_load(grad_4, grads + i, half_precision); - - AVX_Data momentum_4[span]; - simd_load(momentum_4, _exp_avg + i, false); - - AVX_Data variance_4[span]; - simd_load(variance_4, _exp_avg_sq + i, false); - - AVX_Data param_4[span]; - simd_load(param_4, _params + i, half_precision); - - if (_weight_decay > 0 && !_adamw_mode) { - simd_fma(grad_4, param_4, weight_decay4, grad_4); - } - - simd_mul(momentum_4, momentum_4, betta1_4); - simd_fma(momentum_4, grad_4, betta1_minus1_4, momentum_4); - simd_mul(variance_4, variance_4, betta2_4); - simd_mul(grad_4, grad_4, grad_4); - simd_fma(variance_4, grad_4, betta2_minus1_4, variance_4); - simd_sqrt(grad_4, variance_4); - simd_fma(grad_4, grad_4, bias2_sqrt, eps_4); - simd_div(grad_4, momentum_4, grad_4); - - if (_weight_decay > 0 && _adamw_mode) { - simd_fma(param_4, param_4, weight_decay4, param_4); - } - - simd_fma(param_4, grad_4, step_size_4, param_4); - - simd_store(_params + i, param_4, half_precision); - if (dev_params) { - simd_store(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision); - } - simd_store(_exp_avg + i, momentum_4, false); - simd_store(_exp_avg_sq + i, variance_4, false); - } - - if (dev_params) { - if (half_precision) - launch_param_update_half( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - else - launch_param_update( - _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]); - - _buf_index = !_buf_index; - } - } - *rounded_size = new_rounded_size; -} -#endif diff --git a/deepspeed/ops/csrc/includes/cublas_wrappers.h b/deepspeed/ops/csrc/includes/cublas_wrappers.h deleted file mode 100644 index 9bb6cc3..0000000 --- a/deepspeed/ops/csrc/includes/cublas_wrappers.h +++ /dev/null @@ -1,87 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include - -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT); -#endif - -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT); -#endif - -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif diff --git a/deepspeed/ops/csrc/includes/cublas_wrappers_hip.h b/deepspeed/ops/csrc/includes/cublas_wrappers_hip.h deleted file mode 100644 index ddfa186..0000000 --- a/deepspeed/ops/csrc/includes/cublas_wrappers_hip.h +++ /dev/null @@ -1,88 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include - -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT); -#endif - -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT); -#endif - -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo algo = rocblas_gemm_algo_standard); -#else - cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif diff --git a/deepspeed/ops/csrc/includes/custom_cuda_layers.h b/deepspeed/ops/csrc/includes/custom_cuda_layers.h deleted file mode 100644 index 30c633f..0000000 --- a/deepspeed/ops/csrc/includes/custom_cuda_layers.h +++ /dev/null @@ -1,303 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif -#include - -#include "context.h" -#include "cublas_wrappers.h" - -#define CUDA_CHECK(callstr) \ - { \ - cudaError_t error_code = callstr; \ - if (error_code != cudaSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define MAX_THREADS 1024 -#define THREADS 256 - -#define MAX_THREAD_STRIDE 32 -#define TILE_DIM 32 - -// Maximum sequence-length support based on the number of threads (2048) allowed in each block and -// this MAX is 8K For higher sequence length we need to use higher Max, like for 64K : 32 -#define MAX_THREAD_ITERATIONS 8 // Maximum 8K -#define MAX_WARP_NUM 32 - -#define MAX_REGISTERS 256 - -#define MAX_REG 256 - -#define WARP_SIZE_BITS 5 - -template -void launch_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_sr_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -template -void launch_sr_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - cudaStream_t stream); -// Fused bias add with gelu activation -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream); - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream); - -// Custom fused bias add with layer normalization -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -template -void launch_layerNorm_backward_fused_add(const T* out_grad1, - const T* out_grad2, - const T* X_data, - const T* vars, - const T* means, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - cudaStream_t stream[2]); -template -void launch_layerNorm_backward_fused_add(const T* out_grad1, - const T* out_grad2, - const T* vals_hat, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - cudaStream_t stream[2], - bool invertible = false, - const T* betta = nullptr); - -template -void launch_layerNorm_backward(const T* out_grad, - const T* X_data, - const T* vars, - const T* means, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - cudaStream_t stream[2]); - -template -void launch_layerNorm_backward(const T* out_grad, - const T* vals_hat, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - cudaStream_t stream[2], - bool invertible = false, - const T* betta = nullptr); - -template -void launch_layerNorm_backward_nreversible(const T* out_grad, - const T* vals, - const T* out_grad_trans, - const T* vals_trans, - const T* means, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - cudaStream_t stream[2]); - -template -void Transpose(const T* inp_mat, T* out_mat, int rows, int cols, cudaStream_t stream); - -template -void launch_attn_softmax_backward(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); - -// Custom softmax with scaling and attention mask addition -template -void launch_attn_softmax(T* vals, - const T* attn_mask, - int batch_size, - int heads, - int sequence_length, - cudaStream_t stream); - -template -void launch_transform_0213(T* output, - const T* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream); - -// Custom bias add -template -void launch_bias_add_transform_0213(T* outputs, - const T* vals, - const T* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream, - int trans_count); - -// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] -template -void launch_transform4d_0213(T* out, - const T* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - cudaStream_t stream, - int trans_count); - -template -void launch_dropout(T* vals, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); - -template -void launch_dropout(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool bwd = false); - -template -void launch_dropout(T* out, - const T* vals, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream); - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - cudaStream_t stream); - -void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream); -void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream); diff --git a/deepspeed/ops/csrc/includes/custom_hip_layers.h b/deepspeed/ops/csrc/includes/custom_hip_layers.h deleted file mode 100644 index 9f48b31..0000000 --- a/deepspeed/ops/csrc/includes/custom_hip_layers.h +++ /dev/null @@ -1,304 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif -#include - -#include "context_hip.h" -#include "cublas_wrappers_hip.h" - -#define CUDA_CHECK(callstr) \ - { \ - hipError_t error_code = callstr; \ - if (error_code != hipSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define MAX_THREADS 1024 -#define THREADS 256 - -#define MAX_THREAD_STRIDE 32 -#define TILE_DIM 32 - -// Maximum sequence-length support based on the number of threads (2048) allowed in each block and -// this MAX is 8K For higher sequence length we need to use higher Max, like for 64K : 32 -#define MAX_THREAD_ITERATIONS 8 // Maximum 8K -#define MAX_WARP_NUM 32 - -#define MAX_REGISTERS 256 - -#define MAX_REG 256 - -#define WARP_SIZE_BITS 5 - -template -void launch_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template -void launch_sr_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template -void launch_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template -void launch_sr_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -// Fused bias add with gelu activation -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream); - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream); - -// Custom fused bias add with layer normalization -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -template -void launch_layerNorm_backward_fused_add(const T* out_grad1, - const T* out_grad2, - const T* X_data, - const T* vars, - const T* means, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - hipStream_t stream[2]); -template -void launch_layerNorm_backward_fused_add(const T* out_grad1, - const T* out_grad2, - const T* vals_hat, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - hipStream_t stream[2], - bool invertible = false, - const T* betta = nullptr); - -template -void launch_layerNorm_backward(const T* out_grad, - const T* X_data, - const T* vars, - const T* means, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - hipStream_t stream[2]); - -template -void launch_layerNorm_backward(const T* out_grad, - const T* vals_hat, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - hipStream_t stream[2], - bool invertible = false, - const T* betta = nullptr); - -template -void launch_layerNorm_backward_nreversible(const T* out_grad, - const T* vals, - const T* out_grad_trans, - const T* vals_trans, - const T* means, - const T* vars, - const T* gamma, - T* gamma_grad, - T* betta_grad, - T* inp_grad, - int batch_size, - int hidden_dim, - hipStream_t stream[2]); - -template -void Transpose(const T* inp_mat, T* out_mat, int rows, int cols, hipStream_t stream); - -template -void launch_attn_softmax_backward(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); - -// Custom softmax with scaling and attention mask addition -template -void launch_attn_softmax(T* vals, - const T* attn_mask, - int batch_size, - int heads, - int sequence_length, - hipStream_t stream); - -template -void launch_transform_0213(T* output, - const T* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream); - -// Custom bias add -template -void launch_bias_add_transform_0213(T* outputs, - const T* vals, - const T* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream, - int trans_count); - -// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] -template -void launch_transform4d_0213(T* out, - const T* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - hipStream_t stream, - int trans_count); - -template -void launch_dropout(T* vals, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); - -template -void launch_dropout(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool bwd = false); - -template -void launch_dropout(T* out, - const T* vals, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream); - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - hipStream_t stream); - -void launch_param_update(const float* input, __half* output, int size, hipStream_t stream); -void launch_param_update_half(const float* input, __half* output, int size, hipStream_t stream); diff --git a/deepspeed/ops/csrc/includes/dropout.h b/deepspeed/ops/csrc/includes/dropout.h deleted file mode 100644 index a72572d..0000000 --- a/deepspeed/ops/csrc/includes/dropout.h +++ /dev/null @@ -1,76 +0,0 @@ -#pragma once - -#include -#include -#include - -template -class Dropout { -public: - struct Config { - float ratio; - uint32_t dim; - bool training; - - Config(float r, uint32_t d) : ratio(r), dim(d), training(true) {} - - float RATIO() const { return training ? ratio : 0.0; } - inline void SetDim(uint32_t d) { dim = d; } - }; - - Dropout(const Config& config) : _config(config), _mask(nullptr) {} - - virtual ~Dropout() {} - - void Forward(int bsz, T* out, const T* vals, cudaStream_t stream, bool bwd = false) - { - launch_dropout( - out, vals, _mask, bsz * _config.dim, _config.dim, _config.RATIO(), stream, bwd); - } - - void ForwardWithBias(int bsz, T* vals, const T* bias, cudaStream_t stream) - { - launch_dropout(vals, bias, _mask, bsz, _config.dim, _config.RATIO(), stream); - } - - void ForwardWithBias(int bsz, - T* out, - const T* vals, - const T* residual, - const T* bias, - cudaStream_t stream) - { - launch_dropout( - out, vals, residual, bias, _mask, bsz, _config.dim, _config.RATIO(), stream); - } - - void Backward(int bsz, T* d_vals, cudaStream_t stream) - { - launch_dropout_grad(d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream); - } - - void Backward(int bsz, T* d_vals_out, const T* d_vals, cudaStream_t stream) - { - launch_dropout_grad( - d_vals_out, d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream); - } - - bool HasDropout() const { return _config.RATIO() > 0.0; } - - void SetTrainingMode(bool training) { _config.training = training; } - - void SetMask(uint8_t* mask) - { - if (!mask) { throw std::runtime_error("Dropout mask is null."); } - - _mask = mask; - } - - Config GetConfig() const { return _config; } - - inline void SetDimension(uint32_t dim) { _config.SetDim(dim); } - -private: - uint8_t* _mask; - Config _config; -}; diff --git a/deepspeed/ops/csrc/includes/dropout_hip.h b/deepspeed/ops/csrc/includes/dropout_hip.h deleted file mode 100644 index 1bf352f..0000000 --- a/deepspeed/ops/csrc/includes/dropout_hip.h +++ /dev/null @@ -1,77 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include - -template -class Dropout { -public: - struct Config { - float ratio; - uint32_t dim; - bool training; - - Config(float r, uint32_t d) : ratio(r), dim(d), training(true) {} - - float RATIO() const { return training ? ratio : 0.0; } - inline void SetDim(uint32_t d) { dim = d; } - }; - - Dropout(const Config& config) : _config(config), _mask(nullptr) {} - - virtual ~Dropout() {} - - void Forward(int bsz, T* out, const T* vals, hipStream_t stream, bool bwd = false) - { - launch_dropout( - out, vals, _mask, bsz * _config.dim, _config.dim, _config.RATIO(), stream, bwd); - } - - void ForwardWithBias(int bsz, T* vals, const T* bias, hipStream_t stream) - { - launch_dropout(vals, bias, _mask, bsz, _config.dim, _config.RATIO(), stream); - } - - void ForwardWithBias(int bsz, - T* out, - const T* vals, - const T* residual, - const T* bias, - hipStream_t stream) - { - launch_dropout( - out, vals, residual, bias, _mask, bsz, _config.dim, _config.RATIO(), stream); - } - - void Backward(int bsz, T* d_vals, hipStream_t stream) - { - launch_dropout_grad(d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream); - } - - void Backward(int bsz, T* d_vals_out, const T* d_vals, hipStream_t stream) - { - launch_dropout_grad( - d_vals_out, d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream); - } - - bool HasDropout() const { return _config.RATIO() > 0.0; } - - void SetTrainingMode(bool training) { _config.training = training; } - - void SetMask(uint8_t* mask) - { - if (!mask) { throw std::runtime_error("Dropout mask is null."); } - - _mask = mask; - } - - Config GetConfig() const { return _config; } - - inline void SetDimension(uint32_t dim) { _config.SetDim(dim); } - -private: - uint8_t* _mask; - Config _config; -}; diff --git a/deepspeed/ops/csrc/includes/ds_transformer_cuda.h b/deepspeed/ops/csrc/includes/ds_transformer_cuda.h deleted file mode 100644 index 09afeb9..0000000 --- a/deepspeed/ops/csrc/includes/ds_transformer_cuda.h +++ /dev/null @@ -1,184 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "dropout.h" -#include "feed_forward.h" -#include "gelu.h" -#include "general_kernels.h" -#include "normalize_layer.h" -#include "softmax.h" -#include "strided_batch_gemm.h" - -struct BertGemmAlgos { - int m_gemm_qkv_algo; - int m_gemm_inter_algo; - int m_gemm_output_algo; - int m_gemm_batch1_algo; - int m_gemm_batch2_algo; - - BertGemmAlgos() - : m_gemm_qkv_algo(-1), - m_gemm_inter_algo(-1), - m_gemm_output_algo(-1), - m_gemm_batch1_algo(-1), - m_gemm_batch2_algo(-1) - { - } -}; - -template -class BertTransformerLayer { -public: - BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode); - - virtual ~BertTransformerLayer(); - - void Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* softmax_output_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr); - - void Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* softmax_output_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr); - - void SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* layer_norm_var, - T* layer_norm_mean, - T* attn_layer_norm_var, - T* attn_layer_norm_mean); - - inline unsigned GetBatchSize() const { return _batch_size; } - inline unsigned GetNumHeads() const { return _heads; } - inline unsigned GetSeqLength() const { return _seq_length; } - inline unsigned GetIntermediateSize() const { return _intermediate_size; } - - void SetSeqLength(unsigned seq_len); - inline unsigned GetHiddenSize() const { return _hidden_size; } - void SetTrainingMode(bool training); - inline bool IsTrainingMode() const { return _training; } - inline bool GeluCheckpoint() const { return _gelu_checkpoint; } - -private: - void Initialize(); - size_t getWorkspaceSize(int maxBatchSize) const; - - // Params - unsigned _layer_id; - unsigned _batch_size; - unsigned _hidden_size; - unsigned _heads; - unsigned _size_per_head; - unsigned _intermediate_size; - unsigned _seq_length; - - bool _pre_or_postLayerNorm; - - cublasHandle_t _cublasHandle; - cudaStream_t _stream; - - // layers - FeedForward _qkv_linear; - FeedForward _attn_out_linear; - Normalize_Layer _attn_layer_norm; - Normalize_Layer _layer_norm; - Normalize_Layer* _last_normalize; - FeedForward _ff1, _ff2; - Softmax _softmax; - Gelu _gelu; - Dropout _attn_prob_dropout; - Dropout _attn_output_dropout; - Dropout _layer_output_dropout; - StridedBatchGemm _attn_scores; - StridedBatchGemm _attn_context; - - bool _training; - - // Memory saving flags - bool _attn_dropout_checkpoint; - bool _normalize_invertible; - bool _gelu_checkpoint; - - // High Performance flags - bool _stochastic_mode; -}; diff --git a/deepspeed/ops/csrc/includes/ds_transformer_hip.h b/deepspeed/ops/csrc/includes/ds_transformer_hip.h deleted file mode 100644 index 502f2f3..0000000 --- a/deepspeed/ops/csrc/includes/ds_transformer_hip.h +++ /dev/null @@ -1,185 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include "rocblas.h" -#include "hip/hip_runtime.h" -#include "dropout_hip.h" -#include "feed_forward_hip.h" -#include "gelu_hip.h" -#include "general_kernels_hip.h" -#include "normalize_layer_hip.h" -#include "softmax_hip.h" -#include "strided_batch_gemm_hip.h" - -struct BertGemmAlgos { - int m_gemm_qkv_algo; - int m_gemm_inter_algo; - int m_gemm_output_algo; - int m_gemm_batch1_algo; - int m_gemm_batch2_algo; - - BertGemmAlgos() - : m_gemm_qkv_algo(-1), - m_gemm_inter_algo(-1), - m_gemm_output_algo(-1), - m_gemm_batch1_algo(-1), - m_gemm_batch2_algo(-1) - { - } -}; - -template -class BertTransformerLayer { -public: - BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode); - - virtual ~BertTransformerLayer(); - - void Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* softmax_output_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr); - - void Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* softmax_output_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr); - - void SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* layer_norm_var, - T* layer_norm_mean, - T* attn_layer_norm_var, - T* attn_layer_norm_mean); - - inline unsigned GetBatchSize() const { return _batch_size; } - inline unsigned GetNumHeads() const { return _heads; } - inline unsigned GetSeqLength() const { return _seq_length; } - inline unsigned GetIntermediateSize() const { return _intermediate_size; } - - void SetSeqLength(unsigned seq_len); - inline unsigned GetHiddenSize() const { return _hidden_size; } - void SetTrainingMode(bool training); - inline bool IsTrainingMode() const { return _training; } - inline bool GeluCheckpoint() const { return _gelu_checkpoint; } - -private: - void Initialize(); - size_t getWorkspaceSize(int maxBatchSize) const; - - // Params - unsigned _layer_id; - unsigned _batch_size; - unsigned _hidden_size; - unsigned _heads; - unsigned _size_per_head; - unsigned _intermediate_size; - unsigned _seq_length; - - bool _pre_or_postLayerNorm; - - rocblas_handle _cublasHandle; - hipStream_t _stream; - - // layers - FeedForward _qkv_linear; - FeedForward _attn_out_linear; - Normalize_Layer _attn_layer_norm; - Normalize_Layer _layer_norm; - Normalize_Layer* _last_normalize; - FeedForward _ff1, _ff2; - Softmax _softmax; - Gelu _gelu; - Dropout _attn_prob_dropout; - Dropout _attn_output_dropout; - Dropout _layer_output_dropout; - StridedBatchGemm _attn_scores; - StridedBatchGemm _attn_context; - - bool _training; - - // Memory saving flags - bool _attn_dropout_checkpoint; - bool _normalize_invertible; - bool _gelu_checkpoint; - - // High Performance flags - bool _stochastic_mode; -}; diff --git a/deepspeed/ops/csrc/includes/feed_forward.h b/deepspeed/ops/csrc/includes/feed_forward.h deleted file mode 100644 index de7a9cf..0000000 --- a/deepspeed/ops/csrc/includes/feed_forward.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __FEEDFORWARD_H__ -#define __FEEDFORWARD_H__ - -#include -#include -#include -#include "custom_cuda_layers.h" - -template -class FeedForward { -public: - struct Config { - int batchSize, outputSize; - int inputSize; - std::array gemm_algos; - Config(int batch, int outputs, int inputs, const std::array& algos) - : batchSize(batch), outputSize(outputs), inputSize(inputs), gemm_algos(algos) - { - } - }; - - FeedForward(Config config) : config_(config) {} - - ~FeedForward() {} - - void Forward(int bsz, - const T* input_ptr, - const T* weights, - T* out, - cublasHandle_t& _cublasHandle) - { - float alpha = T(1.); - float beta = T(0.); - - cublas_gemm_ex(_cublasHandle, - CUBLAS_OP_T, - CUBLAS_OP_N, - config_.outputSize, - bsz, - config_.inputSize, - &alpha, - &beta, - weights, - input_ptr, - out, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[0])); -#else - cublasGemmAlgo_t(config_.gemm_algos[0])); -#endif - } - void Backward(int bsz, - const T* out_grad, - const T* input_ptr, - const T* weights, - T* weights_grad, - T* bias_grad, - cublasHandle_t& _cublasHandle, - cudaStream_t& stream, - T* inp_grad_out = nullptr, - T* out_grad_trans_out = nullptr) - { - float alpha = (T)1.0, beta = (T)0.0; - cublas_gemm_ex(_cublasHandle, - CUBLAS_OP_N, - CUBLAS_OP_T, - config_.inputSize, - config_.outputSize, - bsz, - &alpha, - &beta, - input_ptr, - out_grad, - weights_grad, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[1])); -#else - cublasGemmAlgo_t(config_.gemm_algos[1])); -#endif - - cublas_gemm_ex(_cublasHandle, - CUBLAS_OP_N, - CUBLAS_OP_N, - config_.inputSize, - bsz, - config_.outputSize, - &alpha, - &beta, - weights, - out_grad, - inp_grad_out, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[2])); -#else - cublasGemmAlgo_t(config_.gemm_algos[2])); -#endif - - launch_fuse_transpose_bias_kernel(out_grad, bias_grad, bsz, config_.outputSize, stream); - } - -private: - Config config_; -}; - -#endif diff --git a/deepspeed/ops/csrc/includes/feed_forward_hip.h b/deepspeed/ops/csrc/includes/feed_forward_hip.h deleted file mode 100644 index e7e0600..0000000 --- a/deepspeed/ops/csrc/includes/feed_forward_hip.h +++ /dev/null @@ -1,106 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#ifndef __FEEDFORWARD_H__ -#define __FEEDFORWARD_H__ - -#include -#include -#include -#include "custom_hip_layers.h" - -template -class FeedForward { -public: - struct Config { - int batchSize, outputSize; - int inputSize; - std::array gemm_algos; - Config(int batch, int outputs, int inputs, const std::array& algos) - : batchSize(batch), outputSize(outputs), inputSize(inputs), gemm_algos(algos) - { - } - }; - - FeedForward(Config config) : config_(config) {} - - ~FeedForward() {} - - void Forward(int bsz, - const T* input_ptr, - const T* weights, - T* out, - rocblas_handle& _cublasHandle) - { - float alpha = T(1.); - float beta = T(0.); - - cublas_gemm_ex(_cublasHandle, - rocblas_operation_transpose, - rocblas_operation_none, - config_.outputSize, - bsz, - config_.inputSize, - &alpha, - &beta, - weights, - input_ptr, - out, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[0])); -#else - cublasGemmAlgo_t(config_.gemm_algos[0])); -#endif - } - void Backward(int bsz, - const T* out_grad, - const T* input_ptr, - const T* weights, - T* weights_grad, - T* bias_grad, - rocblas_handle& _cublasHandle, - hipStream_t& stream, - T* inp_grad_out = nullptr, - T* out_grad_trans_out = nullptr) - { - float alpha = (T)1.0, beta = (T)0.0; - cublas_gemm_ex(_cublasHandle, - rocblas_operation_none, - rocblas_operation_transpose, - config_.inputSize, - config_.outputSize, - bsz, - &alpha, - &beta, - input_ptr, - out_grad, - weights_grad, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[1])); -#else - cublasGemmAlgo_t(config_.gemm_algos[1])); -#endif - - cublas_gemm_ex(_cublasHandle, - rocblas_operation_none, - rocblas_operation_none, - config_.inputSize, - bsz, - config_.outputSize, - &alpha, - &beta, - weights, - out_grad, - inp_grad_out, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(config_.gemm_algos[2])); -#else - cublasGemmAlgo_t(config_.gemm_algos[2])); -#endif - - launch_fuse_transpose_bias_kernel(out_grad, bias_grad, bsz, config_.outputSize, stream); - } - -private: - Config config_; -}; - -#endif diff --git a/deepspeed/ops/csrc/includes/gelu.h b/deepspeed/ops/csrc/includes/gelu.h deleted file mode 100644 index 560f414..0000000 --- a/deepspeed/ops/csrc/includes/gelu.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include -#include -#include -#include "custom_cuda_layers.h" - -template -class Gelu { -public: - struct Config { - uint32_t intermediate_size; - Config(uint32_t inter_size) : intermediate_size(inter_size) {} - }; - - Gelu(const Config& config) : _config(config) {} - - virtual ~Gelu() {} - - void ForwardWithBiasAdd(int bsz, - const T* input_buf, - const T* bias, - T* output, - cudaStream_t stream) - { - launch_bias_gelu(input_buf, bias, output, _config.intermediate_size, bsz, stream); - } - - void Backward(int bsz, T* d_output, const T* input_buf, const T* bias, cudaStream_t stream) - { - launch_d_gelu(d_output, input_buf, bias, _config.intermediate_size, bsz, stream); - } - -private: - Config _config; -}; diff --git a/deepspeed/ops/csrc/includes/gelu_hip.h b/deepspeed/ops/csrc/includes/gelu_hip.h deleted file mode 100644 index 0297b66..0000000 --- a/deepspeed/ops/csrc/includes/gelu_hip.h +++ /dev/null @@ -1,37 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include "custom_hip_layers.h" - -template -class Gelu { -public: - struct Config { - uint32_t intermediate_size; - Config(uint32_t inter_size) : intermediate_size(inter_size) {} - }; - - Gelu(const Config& config) : _config(config) {} - - virtual ~Gelu() {} - - void ForwardWithBiasAdd(int bsz, - const T* input_buf, - const T* bias, - T* output, - hipStream_t stream) - { - launch_bias_gelu(input_buf, bias, output, _config.intermediate_size, bsz, stream); - } - - void Backward(int bsz, T* d_output, const T* input_buf, const T* bias, hipStream_t stream) - { - launch_d_gelu(d_output, input_buf, bias, _config.intermediate_size, bsz, stream); - } - -private: - Config _config; -}; diff --git a/deepspeed/ops/csrc/includes/gemm_test.h b/deepspeed/ops/csrc/includes/gemm_test.h deleted file mode 100644 index 22c3512..0000000 --- a/deepspeed/ops/csrc/includes/gemm_test.h +++ /dev/null @@ -1,327 +0,0 @@ - -#pragma once - -#include -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include -#include -#include -#include -#include "StopWatch.h" -#include "cublas_wrappers.h" - -template -void check(T result, char const* const func, const char* const file, int const line) -{ - if (result) { - std::cout << (std::string("CUDA runtime error: ") + +file + ":" + std::to_string(line) + - " \n"); - } -} - -#define check_cuda_error(val) check((val), #val, __FILE__, __LINE__) - -template -class GemmTest { -public: - GemmTest(int m, int n, int k, cublasOperation_t ta, cublasOperation_t tb, cublasHandle_t h) - : M(m), N(n), K(k), transa(ta), transb(tb), handle(h) - { - check_cuda_error(cudaMalloc((void**)&A, sizeof(T) * M * K)); - check_cuda_error(cudaMalloc((void**)&B, sizeof(T) * K * N)); - check_cuda_error(cudaMalloc((void**)&C, sizeof(T) * M * N)); - } - - ~GemmTest() - { - check_cuda_error(cudaFree(A)); - check_cuda_error(cudaFree(B)); - check_cuda_error(cudaFree(C)); - } - - std::array TestAlgo(int loops) - { - float alpha = (T)1.0f; - float beta = (T)0.0f; - - int algo_fw = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - CUBLAS_OP_T, - CUBLAS_OP_N, - N, - M, - K, - &alpha, - &beta, - B, - A, - C, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw1 = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - CUBLAS_OP_N, - CUBLAS_OP_T, - K, - N, - M, - &alpha, - &beta, - A, - C, - B, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw2 = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - CUBLAS_OP_N, - CUBLAS_OP_N, - K, - M, - N, - &alpha, - &beta, - B, - C, - A, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - return std::array({algo_fw, algo_bw1, algo_bw2}); - } - - template - int Run(int loops, Func f) - { - float fast_latency = (std::numeric_limits::max)(); - int fast_algo = 0; - -#ifdef __HIP_PLATFORM_HCC__ - for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard; -#else - for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; - algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; -#endif - algo++) { - int warm_up = 5; - for (int i = 0; i < warm_up; ++i) f(algo); - - cudaDeviceSynchronize(); - Stopwatch timer; - timer.Restart(); - - for (int i = 0; i < loops; ++i) f(algo); - - cudaDeviceSynchronize(); - timer.Stop(); - - float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops; - - printf("algo-%d: %.3fms\n", algo, avg_latency); - - if (avg_latency < fast_latency) { - fast_latency = avg_latency; - fast_algo = algo; - } - } - - printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency); - - return fast_algo; - } - -private: - int M, N, K; - cublasHandle_t handle; - cublasOperation_t transa, transb; - T *A, *B, *C; -}; - -template -class StridedGemmTest { -public: - StridedGemmTest(int b, - int m, - int n, - int k, - cublasOperation_t ta, - cublasOperation_t tb, - cublasHandle_t h) - : bsz(b), M(m), N(n), K(k), transa(ta), transb(tb), handle(h) - { - check_cuda_error(cudaMalloc((void**)&A, sizeof(T) * M * K * bsz)); - check_cuda_error(cudaMalloc((void**)&B, sizeof(T) * K * N * bsz)); - check_cuda_error(cudaMalloc((void**)&C, sizeof(T) * M * N * bsz)); - } - - ~StridedGemmTest() - { - check_cuda_error(cudaFree(A)); - check_cuda_error(cudaFree(B)); - check_cuda_error(cudaFree(C)); - } - - std::array TestAlgo(int loops) - { - float alpha = (T)1.0f; - float beta = (T)0.0f; - - int algo_fw = Run(loops, [=](int algo) { - int stride_a = M * K; - int stride_b = N * K; - int stride_c = M * N; - - cublas_strided_batched_gemm(handle, - M, - N, - K, - &alpha, - &beta, - A, - B, - C, - transa, - transb, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw1 = Run(loops, [=](int algo) { - int mb = (transa == CUBLAS_OP_T ? K : M); - int kb = (transa == CUBLAS_OP_T ? M : K); - - int stride_a = mb * N; - int stride_b = N * kb; - int stride_c = M * K; - - // B need to transpose. - cublasOperation_t op_b = (transb == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T); - - // Calculate d_A. - cublas_strided_batched_gemm(handle, - mb, - kb, - N, - &alpha, - &beta, - (transa == CUBLAS_OP_T ? B : C), - (transa == CUBLAS_OP_T ? C : B), - A, - CUBLAS_OP_N, - op_b, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw2 = Run(loops, [=](int algo) { - // A need to transpose. - cublasOperation_t op_a = (transa == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T); - - int stride_a = M * K; - int stride_b = M * N; - int stride_c = N * K; - - // Calculate d_B. - cublas_strided_batched_gemm(handle, - K, - N, - M, - &alpha, - &beta, - A, - C, - B, - op_a, - CUBLAS_OP_N, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - return std::array({algo_fw, algo_bw1, algo_bw2}); - } - - template - int Run(int loops, Func f) - { - float fast_latency = (std::numeric_limits::max)(); - int fast_algo = 0; - -#ifdef __HIP_PLATFORM_HCC__ - for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard; -#else - for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; - algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; -#endif - algo++) { - int warm_up = 5; - for (int i = 0; i < warm_up; ++i) f(algo); - - cudaDeviceSynchronize(); - Stopwatch timer; - timer.Restart(); - - for (int i = 0; i < loops; ++i) f(algo); - - cudaDeviceSynchronize(); - timer.Stop(); - - float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops; - - printf("algo-%d: %.3fms\n", algo, avg_latency); - - if (avg_latency < fast_latency) { - fast_latency = avg_latency; - fast_algo = algo; - } - } - - printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency); - - return fast_algo; - } - -private: - int bsz, M, N, K; - cublasHandle_t handle; - cublasOperation_t transa, transb; - T *A, *B, *C; -}; diff --git a/deepspeed/ops/csrc/includes/gemm_test_hip.h b/deepspeed/ops/csrc/includes/gemm_test_hip.h deleted file mode 100644 index 117302d..0000000 --- a/deepspeed/ops/csrc/includes/gemm_test_hip.h +++ /dev/null @@ -1,328 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! - -#pragma once - -#include -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include -#include -#include -#include -#include "StopWatch.h" -#include "cublas_wrappers_hip.h" - -template -void check(T result, char const* const func, const char* const file, int const line) -{ - if (result) { - std::cout << (std::string("CUDA runtime error: ") + +file + ":" + std::to_string(line) + - " \n"); - } -} - -#define check_cuda_error(val) check((val), #val, __FILE__, __LINE__) - -template -class GemmTest { -public: - GemmTest(int m, int n, int k, rocblas_operation ta, rocblas_operation tb, rocblas_handle h) - : M(m), N(n), K(k), transa(ta), transb(tb), handle(h) - { - check_cuda_error(hipMalloc((void**)&A, sizeof(T) * M * K)); - check_cuda_error(hipMalloc((void**)&B, sizeof(T) * K * N)); - check_cuda_error(hipMalloc((void**)&C, sizeof(T) * M * N)); - } - - ~GemmTest() - { - check_cuda_error(hipFree(A)); - check_cuda_error(hipFree(B)); - check_cuda_error(hipFree(C)); - } - - std::array TestAlgo(int loops) - { - float alpha = (T)1.0f; - float beta = (T)0.0f; - - int algo_fw = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - rocblas_operation_transpose, - rocblas_operation_none, - N, - M, - K, - &alpha, - &beta, - B, - A, - C, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw1 = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - rocblas_operation_none, - rocblas_operation_transpose, - K, - N, - M, - &alpha, - &beta, - A, - C, - B, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw2 = Run(loops, [=](int algo) { - cublas_gemm_ex(handle, - rocblas_operation_none, - rocblas_operation_none, - K, - M, - N, - &alpha, - &beta, - B, - C, - A, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - return std::array({algo_fw, algo_bw1, algo_bw2}); - } - - template - int Run(int loops, Func f) - { - float fast_latency = (std::numeric_limits::max)(); - int fast_algo = 0; - -#ifdef __HIP_PLATFORM_HCC__ - for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard; -#else - for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; - algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; -#endif - algo++) { - int warm_up = 5; - for (int i = 0; i < warm_up; ++i) f(algo); - - hipDeviceSynchronize(); - Stopwatch timer; - timer.Restart(); - - for (int i = 0; i < loops; ++i) f(algo); - - hipDeviceSynchronize(); - timer.Stop(); - - float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops; - - printf("algo-%d: %.3fms\n", algo, avg_latency); - - if (avg_latency < fast_latency) { - fast_latency = avg_latency; - fast_algo = algo; - } - } - - printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency); - - return fast_algo; - } - -private: - int M, N, K; - rocblas_handle handle; - rocblas_operation transa, transb; - T *A, *B, *C; -}; - -template -class StridedGemmTest { -public: - StridedGemmTest(int b, - int m, - int n, - int k, - rocblas_operation ta, - rocblas_operation tb, - rocblas_handle h) - : bsz(b), M(m), N(n), K(k), transa(ta), transb(tb), handle(h) - { - check_cuda_error(hipMalloc((void**)&A, sizeof(T) * M * K * bsz)); - check_cuda_error(hipMalloc((void**)&B, sizeof(T) * K * N * bsz)); - check_cuda_error(hipMalloc((void**)&C, sizeof(T) * M * N * bsz)); - } - - ~StridedGemmTest() - { - check_cuda_error(hipFree(A)); - check_cuda_error(hipFree(B)); - check_cuda_error(hipFree(C)); - } - - std::array TestAlgo(int loops) - { - float alpha = (T)1.0f; - float beta = (T)0.0f; - - int algo_fw = Run(loops, [=](int algo) { - int stride_a = M * K; - int stride_b = N * K; - int stride_c = M * N; - - cublas_strided_batched_gemm(handle, - M, - N, - K, - &alpha, - &beta, - A, - B, - C, - transa, - transb, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw1 = Run(loops, [=](int algo) { - int mb = (transa == rocblas_operation_transpose ? K : M); - int kb = (transa == rocblas_operation_transpose ? M : K); - - int stride_a = mb * N; - int stride_b = N * kb; - int stride_c = M * K; - - // B need to transpose. - rocblas_operation op_b = (transb == rocblas_operation_transpose ? rocblas_operation_none : rocblas_operation_transpose); - - // Calculate d_A. - cublas_strided_batched_gemm(handle, - mb, - kb, - N, - &alpha, - &beta, - (transa == rocblas_operation_transpose ? B : C), - (transa == rocblas_operation_transpose ? C : B), - A, - rocblas_operation_none, - op_b, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - int algo_bw2 = Run(loops, [=](int algo) { - // A need to transpose. - rocblas_operation op_a = (transa == rocblas_operation_transpose ? rocblas_operation_none : rocblas_operation_transpose); - - int stride_a = M * K; - int stride_b = M * N; - int stride_c = N * K; - - // Calculate d_B. - cublas_strided_batched_gemm(handle, - K, - N, - M, - &alpha, - &beta, - A, - C, - B, - op_a, - rocblas_operation_none, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - static_cast(algo)); -#else - static_cast(algo)); -#endif - }); - - return std::array({algo_fw, algo_bw1, algo_bw2}); - } - - template - int Run(int loops, Func f) - { - float fast_latency = (std::numeric_limits::max)(); - int fast_algo = 0; - -#ifdef __HIP_PLATFORM_HCC__ - for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard; -#else - for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP; - algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP; -#endif - algo++) { - int warm_up = 5; - for (int i = 0; i < warm_up; ++i) f(algo); - - hipDeviceSynchronize(); - Stopwatch timer; - timer.Restart(); - - for (int i = 0; i < loops; ++i) f(algo); - - hipDeviceSynchronize(); - timer.Stop(); - - float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops; - - printf("algo-%d: %.3fms\n", algo, avg_latency); - - if (avg_latency < fast_latency) { - fast_latency = avg_latency; - fast_algo = algo; - } - } - - printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency); - - return fast_algo; - } - -private: - int bsz, M, N, K; - rocblas_handle handle; - rocblas_operation transa, transb; - T *A, *B, *C; -}; diff --git a/deepspeed/ops/csrc/includes/general_kernels.h b/deepspeed/ops/csrc/includes/general_kernels.h deleted file mode 100644 index e949309..0000000 --- a/deepspeed/ops/csrc/includes/general_kernels.h +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include -#include -#include - -#ifdef __HIP_PLATFORM_HCC__ -#include -#else -#include -#endif -#include - -#include "context.h" -#include "cublas_wrappers.h" - -#define THREADS 256 -#define TILE_DIM 32 - -#define minus_infinity -1 * std::numeric_limits::infinity() - -#define FINAL_MASK 0xffffffff - -template -void launch_fused_add2(T* out, - const T* inp1, - const T* inp2, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream); - -template -void launch_fused_add4(T* out, - const T* inp1, - const T* inp2, - const T* inp3, - const T* inp4, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream); - -template -void launch_fused_add3(T* out, - const T* inp1, - const T* inp2, - const T* inp3, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream); diff --git a/deepspeed/ops/csrc/includes/general_kernels_hip.h b/deepspeed/ops/csrc/includes/general_kernels_hip.h deleted file mode 100644 index 2cafd95..0000000 --- a/deepspeed/ops/csrc/includes/general_kernels_hip.h +++ /dev/null @@ -1,52 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include -#include -#include -#include - -#ifdef __HIP_PLATFORM_HCC__ -#include -#else -#include -#endif -#include - -#include "context_hip.h" -#include "cublas_wrappers_hip.h" - -#define THREADS 256 -#define TILE_DIM 32 - -#define minus_infinity -1 * std::numeric_limits::infinity() - -#define FINAL_MASK 0xffffffff - -template -void launch_fused_add2(T* out, - const T* inp1, - const T* inp2, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream); - -template -void launch_fused_add4(T* out, - const T* inp1, - const T* inp2, - const T* inp3, - const T* inp4, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream); - -template -void launch_fused_add3(T* out, - const T* inp1, - const T* inp2, - const T* inp3, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream); diff --git a/deepspeed/ops/csrc/includes/normalize_layer.h b/deepspeed/ops/csrc/includes/normalize_layer.h deleted file mode 100644 index b4d135e..0000000 --- a/deepspeed/ops/csrc/includes/normalize_layer.h +++ /dev/null @@ -1,202 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include "custom_cuda_layers.h" - -using namespace std; - -template -class Normalize_Layer { -public: - struct Config { - uint32_t batchSize; - uint32_t seqLength; - uint32_t hiddenDim; - float epsilon; - bool training; - bool useMean; - Config(uint32_t batch, - uint32_t seq, - uint32_t h, - float epsilon = 1e-12, - bool training = true, - bool useMean = true) - : batchSize(batch), - seqLength(seq), - hiddenDim(h), - epsilon(epsilon), - training(training), - useMean(useMean) - { - } - }; - - Normalize_Layer(Config config) - : config_(config), vars(nullptr), means(nullptr), vals_hat(nullptr) - { - } - - ~Normalize_Layer() {} - - void ForwardCheckpoint(int bsz, // batch * seq - T* vals, - const T* residual, - const T* gamma, - const T* betta, - cudaStream_t& stream, - bool preLayerNorm = false) - { - launch_bias_residual_layer_norm(vals, - residual, - gamma, - betta, - config_.epsilon, - bsz, - config_.hiddenDim, - stream, - preLayerNorm, - config_.training, - vars, - means); - } - - void Forward(int bsz, - T* vals, - const T* residual, - const T* gamma, - const T* betta, - cudaStream_t& stream, - bool preLayerNorm = false) - { - launch_bias_residual_layer_norm(vals, - residual, - gamma, - betta, - config_.epsilon, - bsz, - config_.hiddenDim, - stream, - preLayerNorm, - config_.training, - vars); - } - - void Backward(int bsz, - const T* out_grad, - const T* gamma, - T* gamma_grad, - T* betta_grad, - cudaStream_t stream[2], - T* inp_grad_out, - const T* norm_in = nullptr) - { - launch_layerNorm_backward(out_grad, - norm_in, - vars, - means, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream); - } - - void Backward(int bsz, - const T* out_grad, - const T* gamma, - const T* betta, - T* gamma_grad, - T* betta_grad, - cudaStream_t stream[2], - T* inp_grad_out, - const T* norm_out) - { - launch_layerNorm_backward(out_grad, - norm_out, - vars, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream, - !config_.useMean, - betta); - } - - void BackwardFusedAdd(int bsz, - const T* out_grad1, - const T* out_grad2, - const T* gamma, - T* gamma_grad, - T* betta_grad, - cudaStream_t stream[2], - T* inp_grad_out, - const T* norm_in = nullptr) - { - launch_layerNorm_backward_fused_add(out_grad1, - out_grad2, - norm_in, - vars, - means, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream); - } - - void BackwardFusedAdd(int bsz, - const T* out_grad1, - const T* out_grad2, - const T* gamma, - const T* betta, - T* gamma_grad, - T* betta_grad, - cudaStream_t stream[2], - T* inp_grad_out, - const T* norm_out) - { - launch_layerNorm_backward_fused_add(out_grad1, - out_grad2, - norm_out, - vars, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream, - !config_.useMean, - betta); - } - - inline bool UseMean() const { return config_.useMean; } - - inline void SetVar(T* variance) - { - if (!variance) { throw std::runtime_error("Normalize variance is null."); } - vars = variance; - } - - inline void SetMean(T* mean) - { - if (!mean) { throw std::runtime_error("Normalize mean is null."); } - means = mean; - } - -private: - Config config_; - T* vars; - T* means; - T* vals_hat; -}; diff --git a/deepspeed/ops/csrc/includes/normalize_layer_hip.h b/deepspeed/ops/csrc/includes/normalize_layer_hip.h deleted file mode 100644 index 4170276..0000000 --- a/deepspeed/ops/csrc/includes/normalize_layer_hip.h +++ /dev/null @@ -1,203 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include "custom_hip_layers.h" - -using namespace std; - -template -class Normalize_Layer { -public: - struct Config { - uint32_t batchSize; - uint32_t seqLength; - uint32_t hiddenDim; - float epsilon; - bool training; - bool useMean; - Config(uint32_t batch, - uint32_t seq, - uint32_t h, - float epsilon = 1e-12, - bool training = true, - bool useMean = true) - : batchSize(batch), - seqLength(seq), - hiddenDim(h), - epsilon(epsilon), - training(training), - useMean(useMean) - { - } - }; - - Normalize_Layer(Config config) - : config_(config), vars(nullptr), means(nullptr), vals_hat(nullptr) - { - } - - ~Normalize_Layer() {} - - void ForwardCheckpoint(int bsz, // batch * seq - T* vals, - const T* residual, - const T* gamma, - const T* betta, - hipStream_t& stream, - bool preLayerNorm = false) - { - launch_bias_residual_layer_norm(vals, - residual, - gamma, - betta, - config_.epsilon, - bsz, - config_.hiddenDim, - stream, - preLayerNorm, - config_.training, - vars, - means); - } - - void Forward(int bsz, - T* vals, - const T* residual, - const T* gamma, - const T* betta, - hipStream_t& stream, - bool preLayerNorm = false) - { - launch_bias_residual_layer_norm(vals, - residual, - gamma, - betta, - config_.epsilon, - bsz, - config_.hiddenDim, - stream, - preLayerNorm, - config_.training, - vars); - } - - void Backward(int bsz, - const T* out_grad, - const T* gamma, - T* gamma_grad, - T* betta_grad, - hipStream_t stream[2], - T* inp_grad_out, - const T* norm_in = nullptr) - { - launch_layerNorm_backward(out_grad, - norm_in, - vars, - means, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream); - } - - void Backward(int bsz, - const T* out_grad, - const T* gamma, - const T* betta, - T* gamma_grad, - T* betta_grad, - hipStream_t stream[2], - T* inp_grad_out, - const T* norm_out) - { - launch_layerNorm_backward(out_grad, - norm_out, - vars, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream, - !config_.useMean, - betta); - } - - void BackwardFusedAdd(int bsz, - const T* out_grad1, - const T* out_grad2, - const T* gamma, - T* gamma_grad, - T* betta_grad, - hipStream_t stream[2], - T* inp_grad_out, - const T* norm_in = nullptr) - { - launch_layerNorm_backward_fused_add(out_grad1, - out_grad2, - norm_in, - vars, - means, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream); - } - - void BackwardFusedAdd(int bsz, - const T* out_grad1, - const T* out_grad2, - const T* gamma, - const T* betta, - T* gamma_grad, - T* betta_grad, - hipStream_t stream[2], - T* inp_grad_out, - const T* norm_out) - { - launch_layerNorm_backward_fused_add(out_grad1, - out_grad2, - norm_out, - vars, - gamma, - gamma_grad, - betta_grad, - inp_grad_out, - bsz, - config_.hiddenDim, - stream, - !config_.useMean, - betta); - } - - inline bool UseMean() const { return config_.useMean; } - - inline void SetVar(T* variance) - { - if (!variance) { throw std::runtime_error("Normalize variance is null."); } - vars = variance; - } - - inline void SetMean(T* mean) - { - if (!mean) { throw std::runtime_error("Normalize mean is null."); } - means = mean; - } - -private: - Config config_; - T* vars; - T* means; - T* vals_hat; -}; diff --git a/deepspeed/ops/csrc/includes/quantizer.h b/deepspeed/ops/csrc/includes/quantizer.h deleted file mode 100644 index 79eeb14..0000000 --- a/deepspeed/ops/csrc/includes/quantizer.h +++ /dev/null @@ -1,9 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include diff --git a/deepspeed/ops/csrc/includes/quantizer_hip.h b/deepspeed/ops/csrc/includes/quantizer_hip.h deleted file mode 100644 index 7fbbbaf..0000000 --- a/deepspeed/ops/csrc/includes/quantizer_hip.h +++ /dev/null @@ -1,10 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include -#include -#include diff --git a/deepspeed/ops/csrc/includes/simd.h b/deepspeed/ops/csrc/includes/simd.h deleted file mode 100644 index 44c4da8..0000000 --- a/deepspeed/ops/csrc/includes/simd.h +++ /dev/null @@ -1,137 +0,0 @@ -#pragma once - -#if (__x86_64__ || __i386__) -#include -#include -#endif - -#define TILE (128 * 1024 * 1024) -#if defined(__AVX512__) or defined(__AVX256__) - -#define ROUND_DOWN(size, step) ((size) & ~((step)-1)) - -#if defined(__AVX512__) -#define SIMD_STORE(a, d) _mm512_storeu_ps(a, d) -#define SIMD_LOAD(x) _mm512_loadu_ps(x) -#define SIMD_SET(x) _mm512_set1_ps(x) -#define SIMD_ADD(x, y) _mm512_add_ps(x, y) -#define SIMD_MUL(x, y) _mm512_mul_ps(x, y) -#define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c) -#define SIMD_SQRT(x) _mm512_sqrt_ps(x) -#define SIMD_DIV(x, y) _mm512_div_ps(x, y) -#define SIMD_WIDTH 16 - -#define SIMD_LOAD2(x, h) \ - ((h) ? _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i*)x)) : _mm512_loadu_ps(x)) -#define SIMD_STORE2(x, d, h) \ - ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ - : _mm512_storeu_ps(x, d)) - -#define INTV __m256i -#elif defined(__AVX256__) -#define SIMD_STORE(a, d) _mm256_storeu_ps(a, d) -#define SIMD_LOAD(x) _mm256_loadu_ps(x) -#define SIMD_SET(x) _mm256_set1_ps(x) -#define SIMD_ADD(x, y) _mm256_add_ps(x, y) -#define SIMD_MUL(x, y) _mm256_mul_ps(x, y) -#define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c) -#define SIMD_SQRT(x) _mm256_sqrt_ps(x) -#define SIMD_DIV(x, y) _mm256_div_ps(x, y) -#define SIMD_WIDTH 8 -#define SIMD_LOAD2(x, h) \ - ((h) ? _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)x)) : _mm256_loadu_ps(x)) - -#define SIMD_STORE2(x, d, h) \ - ((h) ? _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \ - : _mm256_storeu_ps(x, d)) - -#define INTV __m128i -#endif - -union AVX_Data { -#if defined(__AVX512__) - __m512 data; -#elif defined(__AVX256__) - __m256 data; -#endif - // float data_f[16]; -}; - -template -inline void simd_store(float* dst, AVX_Data* src, bool half_precision) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { - SIMD_STORE2(dst + SIMD_WIDTH * i, src[i].data, half_precision); - } -} -template -inline void simd_load(AVX_Data* dst, float* src, bool half_precision) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { - dst[i].data = SIMD_LOAD2(src + SIMD_WIDTH * i, half_precision); - } -} -template -inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data* src_a) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { - dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a[i].data); - } -} -template -inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data src_a) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { - dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a.data); - } -} -template -inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data* src_m_r, AVX_Data* src_a) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { - dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r[i].data, src_a[i].data); - } -} -template -inline void simd_sqrt(AVX_Data* dst, AVX_Data* src) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_SQRT(src[i].data); } -} -template -inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r.data); } -} -template -inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r[i].data); } -} -template -inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r.data); } -} -template -inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r[i].data); } -} -template -inline void simd_div(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r) -{ -#pragma unroll - for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_DIV(src_a_l[i].data, src_a_r[i].data); } -} - -#endif diff --git a/deepspeed/ops/csrc/includes/softmax.h b/deepspeed/ops/csrc/includes/softmax.h deleted file mode 100644 index 8d541a6..0000000 --- a/deepspeed/ops/csrc/includes/softmax.h +++ /dev/null @@ -1,60 +0,0 @@ -#pragma once - -#include -#include -#include -#include "custom_cuda_layers.h" - -#include - -using namespace std; - -template -class Softmax { -public: - struct Config { - size_t batchSize; - size_t heads; - size_t seq_length; - size_t prob_depth; - float temperature; - bool mem_alloc; - Config(size_t batch, size_t h, size_t seq, int prob_size = 0, bool mem_alloc = false) - : batchSize(batch), - heads(h), - seq_length(seq), - prob_depth(prob_size), - temperature(1.0), - mem_alloc(mem_alloc) - { - } - }; - - Softmax(Config config) : config_(config) {} - - ~Softmax() {} - - void Forward(int bsz, T* vals, const T* attn_mask, cudaStream_t& stream) - { - launch_attn_softmax(vals, attn_mask, bsz, config_.heads, config_.seq_length, stream); - } - - void Backward(int bsz, T* out_grad, const T* soft_out, cudaStream_t stream) - { - launch_attn_softmax_backward_v2( - out_grad, soft_out, bsz, config_.heads, config_.seq_length, stream); - } - - inline size_t GetProbDepth() const { return config_.prob_depth; } - - inline size_t GetBatchSize() const { return config_.batchSize; } - - inline size_t GetNumHeads() const { return config_.heads; } - - inline size_t GetSeqLength() const { return config_.seq_length; } - - inline void SetSeqLength(size_t seq_len) { config_.seq_length = seq_len; } - -private: - Config config_; -}; diff --git a/deepspeed/ops/csrc/includes/softmax_hip.h b/deepspeed/ops/csrc/includes/softmax_hip.h deleted file mode 100644 index 47822e6..0000000 --- a/deepspeed/ops/csrc/includes/softmax_hip.h +++ /dev/null @@ -1,61 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include "custom_hip_layers.h" - -#include - -using namespace std; - -template -class Softmax { -public: - struct Config { - size_t batchSize; - size_t heads; - size_t seq_length; - size_t prob_depth; - float temperature; - bool mem_alloc; - Config(size_t batch, size_t h, size_t seq, int prob_size = 0, bool mem_alloc = false) - : batchSize(batch), - heads(h), - seq_length(seq), - prob_depth(prob_size), - temperature(1.0), - mem_alloc(mem_alloc) - { - } - }; - - Softmax(Config config) : config_(config) {} - - ~Softmax() {} - - void Forward(int bsz, T* vals, const T* attn_mask, hipStream_t& stream) - { - launch_attn_softmax(vals, attn_mask, bsz, config_.heads, config_.seq_length, stream); - } - - void Backward(int bsz, T* out_grad, const T* soft_out, hipStream_t stream) - { - launch_attn_softmax_backward_v2( - out_grad, soft_out, bsz, config_.heads, config_.seq_length, stream); - } - - inline size_t GetProbDepth() const { return config_.prob_depth; } - - inline size_t GetBatchSize() const { return config_.batchSize; } - - inline size_t GetNumHeads() const { return config_.heads; } - - inline size_t GetSeqLength() const { return config_.seq_length; } - - inline void SetSeqLength(size_t seq_len) { config_.seq_length = seq_len; } - -private: - Config config_; -}; diff --git a/deepspeed/ops/csrc/includes/strided_batch_gemm.h b/deepspeed/ops/csrc/includes/strided_batch_gemm.h deleted file mode 100644 index 037319b..0000000 --- a/deepspeed/ops/csrc/includes/strided_batch_gemm.h +++ /dev/null @@ -1,195 +0,0 @@ -#pragma once - -#include -#include -#include -#include "context.h" - -template -class StridedBatchGemm { -public: - struct Config { - int batch_size; - int m; - int n; - int k; - float alpha; - float beta; - cublasOperation_t op_A; - cublasOperation_t op_B; - std::array gemm_algos; - - Config(int batch, - int mm, - int nn, - int kk, - float param_alpha, - float param_beta, - cublasOperation_t opA, - cublasOperation_t opB, - const std::array& algos) - : batch_size(batch), - m(mm), - n(nn), - k(kk), - alpha(param_alpha), - beta(param_beta), - op_A(opA), - op_B(opB), - gemm_algos(algos) - { - } - void SetConfig(int mm, int nn, int kk) - { - m = mm; - n = nn; - k = kk; - } - }; - - StridedBatchGemm(const Config& config) : _config(config) {} - - virtual ~StridedBatchGemm() {} - - void Forward(int bsz, T* output, const T* _buffer_a, const T* _buffer_b, cublasHandle_t handle) - { - int stride_a = _config.m * _config.k; - int stride_b = _config.n * _config.k; - int stride_c = _config.m * _config.n; - - cublas_strided_batched_gemm(handle, - _config.m, - _config.n, - _config.k, - &_config.alpha, - &_config.beta, - _buffer_a, - _buffer_b, - output, - _config.op_A, - _config.op_B, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[0])); -#else - cublasGemmAlgo_t(_config.gemm_algos[0])); -#endif - } - - void ForwardPlusSave(T* output, const T* _buffer_a, const T* _buffer_b, cublasHandle_t handle) - { - int stride_a = _config.m * _config.k; - int stride_b = _config.n * _config.k; - int stride_c = _config.m * _config.n; - - cublas_strided_batched_gemm(handle, - _config.m, - _config.n, - _config.k, - &_config.alpha, - &_config.beta, - _buffer_a, - _buffer_b, - output, - _config.op_A, - _config.op_B, - stride_a, - stride_b, - stride_c, - _config.batch_size, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[0])); -#else - cublasGemmAlgo_t(_config.gemm_algos[0])); -#endif - - k_buf = _buffer_a; - q_buf = _buffer_b; - } - - void Backward(int bsz, - const T* d_output, - const T* _buffer_a, - const T* _buffer_b, - cublasHandle_t handle, - T* inpGradA = nullptr, - T* inpGradB = nullptr) - { - int mb = (_config.op_A == CUBLAS_OP_T ? _config.k : _config.m); - int kb = (_config.op_A == CUBLAS_OP_T ? _config.m : _config.k); - - int stride_a = mb * _config.n; - int stride_b = _config.n * kb; - int stride_c = _config.m * _config.k; - - // B need to transpose. - cublasOperation_t op_b = (_config.op_B == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T); - - // Calculate d_A. - cublas_strided_batched_gemm(handle, - mb, - kb, - _config.n, - &_config.alpha, - &_config.beta, - (_config.op_A == CUBLAS_OP_T ? _buffer_b : d_output), - (_config.op_A == CUBLAS_OP_T ? d_output : _buffer_b), - inpGradA, - CUBLAS_OP_N, - op_b, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[1])); -#else - cublasGemmAlgo_t(_config.gemm_algos[1])); -#endif - - // A need to transpose. - cublasOperation_t op_a = (_config.op_A == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T); - - stride_a = _config.m * _config.k; - stride_b = _config.m * _config.n; - stride_c = _config.n * _config.k; - - // Calculate d_B. - cublas_strided_batched_gemm(handle, - _config.k, - _config.n, - _config.m, - &_config.alpha, - &_config.beta, - _buffer_a, - d_output, - inpGradB, - op_a, - CUBLAS_OP_N, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[2])); -#else - cublasGemmAlgo_t(_config.gemm_algos[2])); -#endif - } - - inline int GetN() const { return _config.k; } - - inline const T* GetBufferA() const { return k_buf; } - - inline const T* GetBufferB() const { return q_buf; } - - inline void SetConfig(int m, int n, int k) { _config.SetConfig(m, n, k); } - -private: - Config _config; - const T* q_buf; - const T* k_buf; -}; diff --git a/deepspeed/ops/csrc/includes/strided_batch_gemm_hip.h b/deepspeed/ops/csrc/includes/strided_batch_gemm_hip.h deleted file mode 100644 index 9db208d..0000000 --- a/deepspeed/ops/csrc/includes/strided_batch_gemm_hip.h +++ /dev/null @@ -1,196 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include "context_hip.h" - -template -class StridedBatchGemm { -public: - struct Config { - int batch_size; - int m; - int n; - int k; - float alpha; - float beta; - rocblas_operation op_A; - rocblas_operation op_B; - std::array gemm_algos; - - Config(int batch, - int mm, - int nn, - int kk, - float param_alpha, - float param_beta, - rocblas_operation opA, - rocblas_operation opB, - const std::array& algos) - : batch_size(batch), - m(mm), - n(nn), - k(kk), - alpha(param_alpha), - beta(param_beta), - op_A(opA), - op_B(opB), - gemm_algos(algos) - { - } - void SetConfig(int mm, int nn, int kk) - { - m = mm; - n = nn; - k = kk; - } - }; - - StridedBatchGemm(const Config& config) : _config(config) {} - - virtual ~StridedBatchGemm() {} - - void Forward(int bsz, T* output, const T* _buffer_a, const T* _buffer_b, rocblas_handle handle) - { - int stride_a = _config.m * _config.k; - int stride_b = _config.n * _config.k; - int stride_c = _config.m * _config.n; - - cublas_strided_batched_gemm(handle, - _config.m, - _config.n, - _config.k, - &_config.alpha, - &_config.beta, - _buffer_a, - _buffer_b, - output, - _config.op_A, - _config.op_B, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[0])); -#else - cublasGemmAlgo_t(_config.gemm_algos[0])); -#endif - } - - void ForwardPlusSave(T* output, const T* _buffer_a, const T* _buffer_b, rocblas_handle handle) - { - int stride_a = _config.m * _config.k; - int stride_b = _config.n * _config.k; - int stride_c = _config.m * _config.n; - - cublas_strided_batched_gemm(handle, - _config.m, - _config.n, - _config.k, - &_config.alpha, - &_config.beta, - _buffer_a, - _buffer_b, - output, - _config.op_A, - _config.op_B, - stride_a, - stride_b, - stride_c, - _config.batch_size, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[0])); -#else - cublasGemmAlgo_t(_config.gemm_algos[0])); -#endif - - k_buf = _buffer_a; - q_buf = _buffer_b; - } - - void Backward(int bsz, - const T* d_output, - const T* _buffer_a, - const T* _buffer_b, - rocblas_handle handle, - T* inpGradA = nullptr, - T* inpGradB = nullptr) - { - int mb = (_config.op_A == rocblas_operation_transpose ? _config.k : _config.m); - int kb = (_config.op_A == rocblas_operation_transpose ? _config.m : _config.k); - - int stride_a = mb * _config.n; - int stride_b = _config.n * kb; - int stride_c = _config.m * _config.k; - - // B need to transpose. - rocblas_operation op_b = (_config.op_B == rocblas_operation_transpose ? rocblas_operation_none : rocblas_operation_transpose); - - // Calculate d_A. - cublas_strided_batched_gemm(handle, - mb, - kb, - _config.n, - &_config.alpha, - &_config.beta, - (_config.op_A == rocblas_operation_transpose ? _buffer_b : d_output), - (_config.op_A == rocblas_operation_transpose ? d_output : _buffer_b), - inpGradA, - rocblas_operation_none, - op_b, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[1])); -#else - cublasGemmAlgo_t(_config.gemm_algos[1])); -#endif - - // A need to transpose. - rocblas_operation op_a = (_config.op_A == rocblas_operation_transpose ? rocblas_operation_none : rocblas_operation_transpose); - - stride_a = _config.m * _config.k; - stride_b = _config.m * _config.n; - stride_c = _config.n * _config.k; - - // Calculate d_B. - cublas_strided_batched_gemm(handle, - _config.k, - _config.n, - _config.m, - &_config.alpha, - &_config.beta, - _buffer_a, - d_output, - inpGradB, - op_a, - rocblas_operation_none, - stride_a, - stride_b, - stride_c, - bsz, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo(_config.gemm_algos[2])); -#else - cublasGemmAlgo_t(_config.gemm_algos[2])); -#endif - } - - inline int GetN() const { return _config.k; } - - inline const T* GetBufferA() const { return k_buf; } - - inline const T* GetBufferB() const { return q_buf; } - - inline void SetConfig(int m, int n, int k) { _config.SetConfig(m, n, k); } - -private: - Config _config; - const T* q_buf; - const T* k_buf; -}; diff --git a/deepspeed/ops/csrc/includes/type_shim.h b/deepspeed/ops/csrc/includes/type_shim.h deleted file mode 100644 index 4f4e7a5..0000000 --- a/deepspeed/ops/csrc/includes/type_shim.h +++ /dev/null @@ -1,119 +0,0 @@ -/* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */ -#include - -// Forward/backward compatibility hack around -// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288 -// pending more future-proof guidance from upstream. -// struct TypeShim -// { -// const at::Type& payload; -// TypeShim(const at::Type& type) : payload(type) {} -// // Enable trivial conversion to a const at::Type& for pre-3aeb78 -// operator const at::Type&(){ return payload; }; -// // Enable dispatch switch statements to take *this directly for post-3aeb78 -// //operator at::ScalarType(){ return payload.; }; -// }; - -#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Half: { \ - using scalar_t_##LEVEL = at::Half; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::BFloat16: { \ - using scalar_t_##LEVEL = at::BFloat16; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Double: { \ - using scalar_t_##LEVEL = double; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Half: { \ - using scalar_t_##LEVEL = at::Half; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::BFloat16: { \ - using scalar_t_##LEVEL = at::BFloat16; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Double: { \ - using scalar_t_##LEVEL = double; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -template -__device__ __forceinline__ T -reduce_block_into_lanes(T* x, - T val, - int lanes = 1, - bool share_result = false) // lanes is intended to be <= 32. -{ - int tid = threadIdx.x + threadIdx.y * blockDim.x; - int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. - - if (blockSize >= 64) { - x[tid] = val; - __syncthreads(); - } - -#pragma unroll - for (int i = (blockSize >> 1); i >= 64; i >>= 1) { - if (tid < i) x[tid] = x[tid] + x[tid + i]; - __syncthreads(); - } - - T final; - - if (tid < 32) { - if (blockSize >= 64) - final = x[tid] + x[tid + 32]; - else - final = val; - // __SYNCWARP(); - -#pragma unroll - for (int i = 16; i >= lanes; i >>= 1) - final = final + __shfl_down_sync(0xffffffff, final, i); - } - - if (share_result) { - if (tid < lanes) x[tid] = final; // EpilogueOp - // Make sure the smem result is visible to all warps. - __syncthreads(); - } - - return final; -} diff --git a/deepspeed/ops/csrc/includes/type_shim_hip.h b/deepspeed/ops/csrc/includes/type_shim_hip.h deleted file mode 100644 index 3bd86d5..0000000 --- a/deepspeed/ops/csrc/includes/type_shim_hip.h +++ /dev/null @@ -1,121 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -/* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */ -#include - -// Forward/backward compatibility hack around -// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288 -// pending more future-proof guidance from upstream. -// struct TypeShim -// { -// const at::Type& payload; -// TypeShim(const at::Type& type) : payload(type) {} -// // Enable trivial conversion to a const at::Type& for pre-3aeb78 -// operator const at::Type&(){ return payload; }; -// // Enable dispatch switch statements to take *this directly for post-3aeb78 -// //operator at::ScalarType(){ return payload.; }; -// }; - -#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Half: { \ - using scalar_t_##LEVEL = at::Half; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::BFloat16: { \ - using scalar_t_##LEVEL = at::BFloat16; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Double: { \ - using scalar_t_##LEVEL = double; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Half: { \ - using scalar_t_##LEVEL = at::Half; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::BFloat16: { \ - using scalar_t_##LEVEL = at::BFloat16; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \ - switch (TYPE) { \ - case at::ScalarType::Double: { \ - using scalar_t_##LEVEL = double; \ - __VA_ARGS__; \ - break; \ - } \ - case at::ScalarType::Float: { \ - using scalar_t_##LEVEL = float; \ - __VA_ARGS__; \ - break; \ - } \ - default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ - } - -template -__device__ __forceinline__ T -reduce_block_into_lanes(T* x, - T val, - int lanes = 1, - bool share_result = false) // lanes is intended to be <= 32. -{ - int tid = threadIdx.x + threadIdx.y * blockDim.x; - int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. - - if (blockSize >= 64) { - x[tid] = val; - __syncthreads(); - } - -#pragma unroll - for (int i = (blockSize >> 1); i >= 64; i >>= 1) { - if (tid < i) x[tid] = x[tid] + x[tid + i]; - __syncthreads(); - } - - T final; - - if (tid < 32) { - if (blockSize >= 64) - final = x[tid] + x[tid + 32]; - else - final = val; - // __SYNCWARP(); - -#pragma unroll - for (int i = 16; i >= lanes; i >>= 1) - final = final + __shfl_down_sync(0xffffffff, final, i); - } - - if (share_result) { - if (tid < lanes) x[tid] = final; // EpilogueOp - // Make sure the smem result is visible to all warps. - __syncthreads(); - } - - return final; -} diff --git a/deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp b/deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp deleted file mode 100644 index 7a142b1..0000000 --- a/deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright 2019 The Microsoft DeepSpeed Team */ -#include - -// CUDA forward declaration -void fused_lamb_cuda(at::Tensor& p, - at::Tensor& p_copy, - at::Tensor& m, - at::Tensor& v, - at::Tensor& g, - float lr, - float beta1, - float beta2, - float max_coeff, - float min_coeff, - float eps, - float grad_scale, - int step, - int mode, - int bias_correction, - float decay, - at::Tensor& w_l2_i, - at::Tensor& u_l2_i, - at::Tensor& lamb_coeff_val); - -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -// C++ interface -at::Tensor lamb(at::Tensor& p, - at::Tensor& p_copy, - at::Tensor& m, - at::Tensor& v, - at::Tensor& g, - float lr, - float beta1, - float beta2, - float max_coeff, - float min_coeff, - float eps, - float grad_scale, - int step, - int mode, - int bias_correction, - float decay) -{ - CHECK_INPUT(p); - if (p_copy.numel() > 0) CHECK_INPUT(p_copy); - CHECK_INPUT(m); - CHECK_INPUT(v); - CHECK_INPUT(g); - int64_t num_elem = p.numel(); - AT_ASSERTM(m.numel() == num_elem, "number of elements in m and p tensors should be equal"); - AT_ASSERTM(v.numel() == num_elem, "number of elements in v and p tensors should be equal"); - AT_ASSERTM(g.numel() == num_elem, "number of elements in g and p tensors should be equal"); - AT_ASSERTM( - p_copy.numel() == num_elem || p_copy.numel() == 0, - "number of elements in p_copy and p tensors should be equal, or p_copy should be empty"); - - // intermediate for weight L2 reduction - // make sure that the threads per block is at least 512 during the kernel launch otherwise the - // behaviour is unexpected - at::Tensor w_l2_i = at::empty( - {512}, - p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float - : p.type().scalarType())); - - // intermediate for update L2 reduction - // make sure that the threads per block is at least 512 during the kernel launch otherwise the - // behaviour is unexpected - at::Tensor u_l2_i = at::empty( - {512}, - p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float - : p.type().scalarType())); - - at::Tensor lamb_coeff_val = at::empty( - {1}, - p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float - : p.type().scalarType())); - - fused_lamb_cuda(p, - p_copy, - m, - v, - g, - lr, - beta1, - beta2, - max_coeff, - min_coeff, - eps, - grad_scale, - step, - mode, - bias_correction, - decay, - w_l2_i, - u_l2_i, - lamb_coeff_val); - - return lamb_coeff_val; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("lamb", &lamb, "Adam optimized CUDA implementation with LAMB."); -} diff --git a/deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu b/deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu deleted file mode 100644 index c766323..0000000 --- a/deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu +++ /dev/null @@ -1,474 +0,0 @@ -/* Copyright 2019 The Microsoft DeepSpeed Team */ -#include -#include -#include -#include -#include "ATen/ATen.h" -#include "ATen/TensorUtils.h" -#include "ATen/cuda/CUDAContext.h" -#include "ATen/cuda/detail/IndexUtils.cuh" -//#include "ATen/Type.h" -#include "ATen/AccumulateType.h" - -#include - -//#include -#if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION > 305 -#include -#else -#include -#endif -#include -#include - -namespace cg = cooperative_groups; - -// Utility class used to avoid linker errors with extern -// unsized shared memory arrays with templated type -namespace { -// This is the un-specialized struct. Note that we prevent instantiation of this -// struct by putting an undefined symbol in the function body so it won't compile. -template -struct SharedMemory { - // Ensure that we won't compile any un-specialized types - __device__ inline operator T*() - { -#ifndef _WIN32 - extern __device__ void error(void); - error(); -#endif - return NULL; - } -}; - -template <> -struct SharedMemory { - __device__ inline operator float*() - { - extern __shared__ float s_float[]; - return s_float; - } -}; - -template <> -struct SharedMemory { - __device__ inline operator double*() - { - extern __shared__ double s_double[]; - return s_double; - } -}; -} // namespace - -#include "type_shim.h" - -typedef enum { - ADAM_MODE_0 = 0, // eps under square root - ADAM_MODE_1 = 1 // eps outside square root -} adamMode_t; - -// s_a and s_b are in shared memory -// g_a and g_b are in shared memory -template -__device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b) -{ - // Handle to thread block group - cg::thread_block cta = cg::this_thread_block(); - - // perform block reduction in shared memory, - unsigned int tid = cta.thread_rank(); - - T a_sum = s_a[tid]; - T b_sum = s_b[tid]; - - cg::sync(cta); - - // do reduction in shared mem - if ((blockSize >= 512) && (tid < 256)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 256]; - s_b[tid] = b_sum = b_sum + s_b[tid + 256]; - } - - cg::sync(cta); - - if ((blockSize >= 256) && (tid < 128)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 128]; - s_b[tid] = b_sum = b_sum + s_b[tid + 128]; - } - - cg::sync(cta); - - if ((blockSize >= 128) && (tid < 64)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 64]; - s_b[tid] = b_sum = b_sum + s_b[tid + 64]; - } - - cg::sync(cta); - -#if (__CUDA_ARCH__ >= 300) - if (tid < 32) { - cg::coalesced_group active = cg::coalesced_threads(); - - // Fetch final intermediate sum from 2nd warp - if (blockSize >= 64) { - a_sum = a_sum + s_a[tid + 32]; - b_sum = b_sum + s_b[tid + 32]; - } - - // Reduce final warp using shuffle - for (int offset = warpSize / 2; offset > 0; offset /= 2) { - a_sum += active.shfl_down(a_sum, offset); - b_sum += active.shfl_down(b_sum, offset); - } - } -#else - if ((blockSize >= 64) && (tid < 32)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 32]; - s_b[tid] = b_sum = b_sum + s_b[tid + 32]; - } - - cg::sync(cta); - - if ((blockSize >= 32) && (tid < 16)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 16]; - s_b[tid] = b_sum = b_sum + s_b[tid + 16]; - } - - cg::sync(cta); - - if ((blockSize >= 16) && (tid < 8)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 8]; - s_b[tid] = b_sum = b_sum + s_b[tid + 8]; - } - - cg::sync(cta); - - if ((blockSize >= 8) && (tid < 4)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 4]; - s_b[tid] = b_sum = b_sum + s_b[tid + 4]; - } - - cg::sync(cta); - - if ((blockSize >= 4) && (tid < 2)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 2]; - s_b[tid] = b_sum = b_sum + s_b[tid + 2]; - } - - cg::sync(cta); - - if ((blockSize >= 2) && (tid < 1)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 1]; - s_b[tid] = b_sum = b_sum + s_b[tid + 1]; - } - - cg::sync(cta); - -#endif - - // write result for this block to global mem - if (tid == 0) { - g_a[blockIdx.x] = (T)a_sum; - g_b[blockIdx.x] = (T)b_sum; - } -} - -template -__device__ void reduce_two_vectors_in_register(T a, T b, T* g_a, T* g_b) -{ - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - - T* s_a = SharedMemory(); - T* s_b = SharedMemory() + cg::this_thread_block().size(); - - s_a[threadIdInBlock] = a; - s_b[threadIdInBlock] = b; - - reduce_block_in_shared_memory(s_a, s_b, g_a, g_b); -} - -template -__global__ void lamb_cuda_kernel_part1( - T* __restrict__ p, - GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed - T* __restrict__ m, - T* __restrict__ v, - const GRAD_T* __restrict__ g, - const float b1, - const float b2, - const float eps, - const float grad_scale, - const float step_size, - const size_t tsize, - adamMode_t mode, - const float decay, - T* __restrict__ w_l2_i, - T* __restrict__ u_l2_i) -{ - // Assuming 2D grids and 2D blocks - const int blockId = gridDim.x * blockIdx.y + blockIdx.x; - const int threadsPerBlock = blockDim.x * blockDim.y; - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - const int i = (blockId * threadsPerBlock + threadIdInBlock); - const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; - - T reg_w = 0; - T reg_u = 0; - - for (int j = i; j < tsize; j += totThreads) { - T scaled_grad = g[j] / grad_scale; - T pj = p[j]; - m[j] = b1 * m[j] + (1 - b1) * scaled_grad; - v[j] = b2 * v[j] + (1 - b2) * scaled_grad * scaled_grad; - float denom; - if (mode == ADAM_MODE_0) - denom = sqrtf(v[j] + eps); - else // Mode 1 - denom = sqrtf(v[j]) + eps; - T update = (m[j] / denom) + (decay * p[j]); - - reg_u += update * update; - reg_w += pj * pj; - } - - reduce_two_vectors_in_register(reg_w, reg_u, w_l2_i, u_l2_i); -} - -template -__global__ void lamb_cuda_kernel_part2(const size_t tsize, T* __restrict__ g_a, T* __restrict__ g_b) -{ - T* s_a = SharedMemory(); - T* s_b = SharedMemory() + cg::this_thread_block().size(); - - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - - s_a[threadIdInBlock] = g_a[threadIdInBlock]; - s_b[threadIdInBlock] = g_b[threadIdInBlock]; - - if (threadIdInBlock >= tsize) { - s_a[threadIdInBlock] = 0.0; - s_b[threadIdInBlock] = 0.0; - } - - reduce_block_in_shared_memory(s_a, s_b, g_a, g_b); -} - -template -__global__ void lamb_cuda_kernel_part3( - T* __restrict__ p, - GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed - T* __restrict__ m, - T* __restrict__ v, - const GRAD_T* __restrict__ g, - const float b1, - const float b2, - const float max_coeff, - const float min_coeff, - const float eps, - const float grad_scale, - const float step_size, - const size_t tsize, - adamMode_t mode, - const float decay, - T* __restrict__ w_l2_i, - T* __restrict__ u_l2_i, - T* __restrict__ lamb_coeff_val) -{ - // Assuming 2D grids and 2D blocks - const int blockId = gridDim.x * blockIdx.y + blockIdx.x; - const int threadsPerBlock = blockDim.x * blockDim.y; - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - const int i = (blockId * threadsPerBlock + threadIdInBlock); - const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; - - T reg_w = sqrtf(w_l2_i[0]); - T reg_u = sqrtf(u_l2_i[0]); - - float lamb_coeff = 1.0; - - if (reg_w != 0 && reg_u != 0) { - lamb_coeff = reg_w / reg_u; - if (lamb_coeff > max_coeff) { lamb_coeff = max_coeff; } - if (lamb_coeff < min_coeff) { lamb_coeff = min_coeff; } - } - - if (blockId == 0 && threadIdInBlock == 0) { - lamb_coeff_val[0] = lamb_coeff; - // printf("Cuda Lamb Coeff is %.6f \n",lamb_coeff); - } - - for (int j = i; j < tsize; j += totThreads) { - T pj = (float)p[j]; - T mj = m[j]; - T vj = v[j]; - float denom; - if (mode == ADAM_MODE_0) - denom = sqrtf(vj + eps); - else // Mode 1 - denom = sqrtf(vj) + eps; - T update = (mj / denom) + (decay * pj); - - pj = pj - (step_size * lamb_coeff * update); - p[j] = pj; - if (p_copy != NULL) p_copy[j] = (GRAD_T)pj; - } -} - -void fused_lamb_cuda(at::Tensor& p, - at::Tensor& p_copy, - at::Tensor& m, - at::Tensor& v, - at::Tensor& g, - float lr, - float beta1, - float beta2, - float max_coeff, - float min_coeff, - float eps, - float grad_scale, - int step, - int mode, - int bias_correction, - float decay, - at::Tensor& w_l2_i, - at::Tensor& u_l2_i, - at::Tensor& lamb_coeff) -{ - // using namespace at; - - // Get tensor size - int tsize = p.numel(); - // Determine #threads and #blocks - const int threadsPerBlock = 512; - int num_blocks = (tsize + threadsPerBlock - 1) / threadsPerBlock; - if (num_blocks > 512) num_blocks = 512; - - int smemsize = 0; - - if (p.type().scalarType() == at::ScalarType::Double) - smemsize = 2 * threadsPerBlock * sizeof(double); - else - smemsize = 2 * threadsPerBlock * sizeof(float); - - const dim3 blocks(num_blocks); - const dim3 threads(threadsPerBlock); - - AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), - "parameter tensor is too large to be indexed with int32"); - // Constants - float step_size = 0; - if (bias_correction == 1) { - const float bias_correction1 = 1 - std::pow(beta1, step); - const float bias_correction2 = 1 - std::pow(beta2, step); - step_size = lr * std::sqrt(bias_correction2) / bias_correction1; - } else { - step_size = lr; - } - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - - if (g.type().scalarType() == at::ScalarType::Half) { - // all other values should be fp32 for half gradients - AT_ASSERTM(p.type().scalarType() == at::ScalarType::Float, - "expected parameter to be of float type"); - // dispatch is done on the gradient type - using namespace at; // prevents "toString is undefined" errors - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - g.scalar_type(), "lamb_cuda_kernel", ([&] { - using accscalar_t = at::acc_type; - - lamb_cuda_kernel_part1 - <<>>( - p.data(), - p_copy.numel() ? p_copy.data() : NULL, - m.data(), - v.data(), - g.data(), - beta1, - beta2, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data()); - - lamb_cuda_kernel_part2 - <<<1, threadsPerBlock, smemsize, stream>>>( - num_blocks, w_l2_i.data(), u_l2_i.data()); - - lamb_cuda_kernel_part3 - <<>>( - p.data(), - p_copy.numel() ? p_copy.data() : NULL, - m.data(), - v.data(), - g.data(), - beta1, - beta2, - max_coeff, - min_coeff, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data(), - lamb_coeff.data()); - })); - } else { - using namespace at; - AT_DISPATCH_FLOATING_TYPES( - g.scalar_type(), "lamb_cuda_kernel", ([&] { - lamb_cuda_kernel_part1 - <<>>( - p.data(), - NULL, // don't output p_copy for fp32, it's wasted write - m.data(), - v.data(), - g.data(), - beta1, - beta2, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data()); - - lamb_cuda_kernel_part2 - <<<1, threadsPerBlock, smemsize, stream>>>( - num_blocks, w_l2_i.data(), u_l2_i.data()); - - lamb_cuda_kernel_part3 - <<>>( - p.data(), - NULL, // don't output p_copy for fp32, it's wasted write - m.data(), - v.data(), - g.data(), - beta1, - beta2, - max_coeff, - min_coeff, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data(), - lamb_coeff.data()); - })); - } - C10_CUDA_CHECK(cudaGetLastError()); -} - -// template __device__ void reduce_two_vectors_in_register(float a, float b, float* g_a, -// float* g_b, cg::grid_group &cgg); diff --git a/deepspeed/ops/csrc/lamb/fused_lamb_hip_kernel.hip b/deepspeed/ops/csrc/lamb/fused_lamb_hip_kernel.hip deleted file mode 100644 index 2e2bc69..0000000 --- a/deepspeed/ops/csrc/lamb/fused_lamb_hip_kernel.hip +++ /dev/null @@ -1,475 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -/* Copyright 2019 The Microsoft DeepSpeed Team */ -#include -#include -#include -#include -#include "ATen/ATen.h" -#include "ATen/TensorUtils.h" -#include "ATen/hip/HIPContext.h" -#include "ATen/hip/detail/IndexUtils.cuh" -//#include "ATen/Type.h" -#include "ATen/AccumulateType.h" - -#include - -//#include -#if defined(__HIP_PLATFORM_HCC__) && HIP_VERSION > 305 -#include -#else -#include -#endif -#include -#include - -namespace cg = cooperative_groups; - -// Utility class used to avoid linker errors with extern -// unsized shared memory arrays with templated type -namespace { -// This is the un-specialized struct. Note that we prevent instantiation of this -// struct by putting an undefined symbol in the function body so it won't compile. -template -struct SharedMemory { - // Ensure that we won't compile any un-specialized types - __device__ inline operator T*() - { -#ifndef _WIN32 - extern __device__ void error(void); - error(); -#endif - return NULL; - } -}; - -template <> -struct SharedMemory { - __device__ inline operator float*() - { - HIP_DYNAMIC_SHARED( float, s_float) - return s_float; - } -}; - -template <> -struct SharedMemory { - __device__ inline operator double*() - { - HIP_DYNAMIC_SHARED( double, s_double) - return s_double; - } -}; -} // namespace - -#include "type_shim_hip.h" - -typedef enum { - ADAM_MODE_0 = 0, // eps under square root - ADAM_MODE_1 = 1 // eps outside square root -} adamMode_t; - -// s_a and s_b are in shared memory -// g_a and g_b are in shared memory -template -__device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b) -{ - // Handle to thread block group - cg::thread_block cta = cg::this_thread_block(); - - // perform block reduction in shared memory, - unsigned int tid = cta.thread_rank(); - - T a_sum = s_a[tid]; - T b_sum = s_b[tid]; - - cg::sync(cta); - - // do reduction in shared mem - if ((blockSize >= 512) && (tid < 256)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 256]; - s_b[tid] = b_sum = b_sum + s_b[tid + 256]; - } - - cg::sync(cta); - - if ((blockSize >= 256) && (tid < 128)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 128]; - s_b[tid] = b_sum = b_sum + s_b[tid + 128]; - } - - cg::sync(cta); - - if ((blockSize >= 128) && (tid < 64)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 64]; - s_b[tid] = b_sum = b_sum + s_b[tid + 64]; - } - - cg::sync(cta); - -#if (__CUDA_ARCH__ >= 300) - if (tid < 32) { - cg::coalesced_group active = cg::coalesced_threads(); - - // Fetch final intermediate sum from 2nd warp - if (blockSize >= 64) { - a_sum = a_sum + s_a[tid + 32]; - b_sum = b_sum + s_b[tid + 32]; - } - - // Reduce final warp using shuffle - for (int offset = warpSize / 2; offset > 0; offset /= 2) { - a_sum += active.shfl_down(a_sum, offset); - b_sum += active.shfl_down(b_sum, offset); - } - } -#else - if ((blockSize >= 64) && (tid < 32)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 32]; - s_b[tid] = b_sum = b_sum + s_b[tid + 32]; - } - - cg::sync(cta); - - if ((blockSize >= 32) && (tid < 16)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 16]; - s_b[tid] = b_sum = b_sum + s_b[tid + 16]; - } - - cg::sync(cta); - - if ((blockSize >= 16) && (tid < 8)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 8]; - s_b[tid] = b_sum = b_sum + s_b[tid + 8]; - } - - cg::sync(cta); - - if ((blockSize >= 8) && (tid < 4)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 4]; - s_b[tid] = b_sum = b_sum + s_b[tid + 4]; - } - - cg::sync(cta); - - if ((blockSize >= 4) && (tid < 2)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 2]; - s_b[tid] = b_sum = b_sum + s_b[tid + 2]; - } - - cg::sync(cta); - - if ((blockSize >= 2) && (tid < 1)) { - s_a[tid] = a_sum = a_sum + s_a[tid + 1]; - s_b[tid] = b_sum = b_sum + s_b[tid + 1]; - } - - cg::sync(cta); - -#endif - - // write result for this block to global mem - if (tid == 0) { - g_a[blockIdx.x] = (T)a_sum; - g_b[blockIdx.x] = (T)b_sum; - } -} - -template -__device__ void reduce_two_vectors_in_register(T a, T b, T* g_a, T* g_b) -{ - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - - T* s_a = SharedMemory(); - T* s_b = SharedMemory() + cg::this_thread_block().size(); - - s_a[threadIdInBlock] = a; - s_b[threadIdInBlock] = b; - - reduce_block_in_shared_memory(s_a, s_b, g_a, g_b); -} - -template -__global__ void lamb_cuda_kernel_part1( - T* __restrict__ p, - GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed - T* __restrict__ m, - T* __restrict__ v, - const GRAD_T* __restrict__ g, - const float b1, - const float b2, - const float eps, - const float grad_scale, - const float step_size, - const size_t tsize, - adamMode_t mode, - const float decay, - T* __restrict__ w_l2_i, - T* __restrict__ u_l2_i) -{ - // Assuming 2D grids and 2D blocks - const int blockId = gridDim.x * blockIdx.y + blockIdx.x; - const int threadsPerBlock = blockDim.x * blockDim.y; - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - const int i = (blockId * threadsPerBlock + threadIdInBlock); - const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; - - T reg_w = 0; - T reg_u = 0; - - for (int j = i; j < tsize; j += totThreads) { - T scaled_grad = g[j] / grad_scale; - T pj = p[j]; - m[j] = b1 * m[j] + (1 - b1) * scaled_grad; - v[j] = b2 * v[j] + (1 - b2) * scaled_grad * scaled_grad; - float denom; - if (mode == ADAM_MODE_0) - denom = sqrtf(v[j] + eps); - else // Mode 1 - denom = sqrtf(v[j]) + eps; - T update = (m[j] / denom) + (decay * p[j]); - - reg_u += update * update; - reg_w += pj * pj; - } - - reduce_two_vectors_in_register(reg_w, reg_u, w_l2_i, u_l2_i); -} - -template -__global__ void lamb_cuda_kernel_part2(const size_t tsize, T* __restrict__ g_a, T* __restrict__ g_b) -{ - T* s_a = SharedMemory(); - T* s_b = SharedMemory() + cg::this_thread_block().size(); - - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - - s_a[threadIdInBlock] = g_a[threadIdInBlock]; - s_b[threadIdInBlock] = g_b[threadIdInBlock]; - - if (threadIdInBlock >= tsize) { - s_a[threadIdInBlock] = 0.0; - s_b[threadIdInBlock] = 0.0; - } - - reduce_block_in_shared_memory(s_a, s_b, g_a, g_b); -} - -template -__global__ void lamb_cuda_kernel_part3( - T* __restrict__ p, - GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed - T* __restrict__ m, - T* __restrict__ v, - const GRAD_T* __restrict__ g, - const float b1, - const float b2, - const float max_coeff, - const float min_coeff, - const float eps, - const float grad_scale, - const float step_size, - const size_t tsize, - adamMode_t mode, - const float decay, - T* __restrict__ w_l2_i, - T* __restrict__ u_l2_i, - T* __restrict__ lamb_coeff_val) -{ - // Assuming 2D grids and 2D blocks - const int blockId = gridDim.x * blockIdx.y + blockIdx.x; - const int threadsPerBlock = blockDim.x * blockDim.y; - const int threadIdInBlock = cg::this_thread_block().thread_rank(); - const int i = (blockId * threadsPerBlock + threadIdInBlock); - const int totThreads = gridDim.x * gridDim.y * threadsPerBlock; - - T reg_w = sqrtf(w_l2_i[0]); - T reg_u = sqrtf(u_l2_i[0]); - - float lamb_coeff = 1.0; - - if (reg_w != 0 && reg_u != 0) { - lamb_coeff = reg_w / reg_u; - if (lamb_coeff > max_coeff) { lamb_coeff = max_coeff; } - if (lamb_coeff < min_coeff) { lamb_coeff = min_coeff; } - } - - if (blockId == 0 && threadIdInBlock == 0) { - lamb_coeff_val[0] = lamb_coeff; - // printf("Cuda Lamb Coeff is %.6f \n",lamb_coeff); - } - - for (int j = i; j < tsize; j += totThreads) { - T pj = (float)p[j]; - T mj = m[j]; - T vj = v[j]; - float denom; - if (mode == ADAM_MODE_0) - denom = sqrtf(vj + eps); - else // Mode 1 - denom = sqrtf(vj) + eps; - T update = (mj / denom) + (decay * pj); - - pj = pj - (step_size * lamb_coeff * update); - p[j] = pj; - if (p_copy != NULL) p_copy[j] = (GRAD_T)pj; - } -} - -void fused_lamb_cuda(at::Tensor& p, - at::Tensor& p_copy, - at::Tensor& m, - at::Tensor& v, - at::Tensor& g, - float lr, - float beta1, - float beta2, - float max_coeff, - float min_coeff, - float eps, - float grad_scale, - int step, - int mode, - int bias_correction, - float decay, - at::Tensor& w_l2_i, - at::Tensor& u_l2_i, - at::Tensor& lamb_coeff) -{ - // using namespace at; - - // Get tensor size - int tsize = p.numel(); - // Determine #threads and #blocks - const int threadsPerBlock = 512; - int num_blocks = (tsize + threadsPerBlock - 1) / threadsPerBlock; - if (num_blocks > 512) num_blocks = 512; - - int smemsize = 0; - - if (p.type().scalarType() == at::ScalarType::Double) - smemsize = 2 * threadsPerBlock * sizeof(double); - else - smemsize = 2 * threadsPerBlock * sizeof(float); - - const dim3 blocks(num_blocks); - const dim3 threads(threadsPerBlock); - - AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), - "parameter tensor is too large to be indexed with int32"); - // Constants - float step_size = 0; - if (bias_correction == 1) { - const float bias_correction1 = 1 - ::pow(beta1, step); - const float bias_correction2 = 1 - ::pow(beta2, step); - step_size = lr * std::sqrt(bias_correction2) / bias_correction1; - } else { - step_size = lr; - } - hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); - - if (g.type().scalarType() == at::ScalarType::Half) { - // all other values should be fp32 for half gradients - AT_ASSERTM(p.type().scalarType() == at::ScalarType::Float, - "expected parameter to be of float type"); - // dispatch is done on the gradient type - using namespace at; // prevents "toString is undefined" errors - AT_DISPATCH_FLOATING_TYPES_AND_HALF( - g.scalar_type(), "lamb_cuda_kernel", ([&] { - using accscalar_t = at::acc_type; - - hipLaunchKernelGGL(( lamb_cuda_kernel_part1) - , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, - p.data(), - p_copy.numel() ? p_copy.data() : NULL, - m.data(), - v.data(), - g.data(), - beta1, - beta2, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data()); - - hipLaunchKernelGGL(( lamb_cuda_kernel_part2) - , dim3(1), dim3(threadsPerBlock), smemsize, stream, - num_blocks, w_l2_i.data(), u_l2_i.data()); - - hipLaunchKernelGGL(( lamb_cuda_kernel_part3) - , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, - p.data(), - p_copy.numel() ? p_copy.data() : NULL, - m.data(), - v.data(), - g.data(), - beta1, - beta2, - max_coeff, - min_coeff, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data(), - lamb_coeff.data()); - })); - } else { - using namespace at; - AT_DISPATCH_FLOATING_TYPES( - g.scalar_type(), "lamb_cuda_kernel", ([&] { - hipLaunchKernelGGL(( lamb_cuda_kernel_part1) - , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, - p.data(), - NULL, // don't output p_copy for fp32, it's wasted write - m.data(), - v.data(), - g.data(), - beta1, - beta2, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data()); - - hipLaunchKernelGGL(( lamb_cuda_kernel_part2) - , dim3(1), dim3(threadsPerBlock), smemsize, stream, - num_blocks, w_l2_i.data(), u_l2_i.data()); - - hipLaunchKernelGGL(( lamb_cuda_kernel_part3) - , dim3(blocks), dim3(threadsPerBlock), smemsize, stream, - p.data(), - NULL, // don't output p_copy for fp32, it's wasted write - m.data(), - v.data(), - g.data(), - beta1, - beta2, - max_coeff, - min_coeff, - eps, - grad_scale, - step_size, - tsize, - (adamMode_t)mode, - decay, - w_l2_i.data(), - u_l2_i.data(), - lamb_coeff.data()); - })); - } - C10_HIP_CHECK(hipGetLastError()); -} - -// template __device__ void reduce_two_vectors_in_register(float a, float b, float* g_a, -// float* g_b, cg::grid_group &cgg); diff --git a/deepspeed/ops/csrc/quantization/pt_binding.cpp b/deepspeed/ops/csrc/quantization/pt_binding.cpp deleted file mode 100644 index f76c436..0000000 --- a/deepspeed/ops/csrc/quantization/pt_binding.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include -#include -#include -#include "custom_cuda_layers.h" - -template -at::Tensor ds_quantize(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel( - (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); - } - return vals; -} - -template -at::Tensor ds_sr_quantize(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel( - (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); - } - return vals; -} - -template -at::Tensor ds_quantize_asym(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel_asym( - (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); - } - return vals; -} - -template -at::Tensor ds_sr_quantize_asym(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel_asym( - (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream()); - } - return vals; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("ds_quantize_fp32", &ds_quantize, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_quantize_fp16", &ds_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_sr_quantize_fp32", &ds_sr_quantize, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_sr_quantize_fp16", &ds_sr_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_quantize_asym_fp32", &ds_quantize_asym, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def( - "ds_quantize_asym_fp16", &ds_quantize_asym<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_sr_quantize_asym_fp32", - &ds_sr_quantize_asym, - "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_sr_quantize_asym_fp16", - &ds_sr_quantize_asym<__half>, - "DeepSpeed Quantize with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/quantization/pt_binding_hip.cpp b/deepspeed/ops/csrc/quantization/pt_binding_hip.cpp deleted file mode 100644 index 25ddba1..0000000 --- a/deepspeed/ops/csrc/quantization/pt_binding_hip.cpp +++ /dev/null @@ -1,78 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include -#include -#include -#include "custom_hip_layers.h" - -template -at::Tensor ds_quantize(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel( - (T*)vals.data_ptr(), size, groups, bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return vals; -} - -template -at::Tensor ds_sr_quantize(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel( - (T*)vals.data_ptr(), size, groups, bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return vals; -} - -template -at::Tensor ds_quantize_asym(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if ((((size / groups) - 1) / 4096 + 1) <= MAX_REG) { - launch_quantize_kernel_asym( - (T*)vals.data_ptr(), size, groups, bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return vals; -} - -template -at::Tensor ds_sr_quantize_asym(at::Tensor& vals, int groups, int bits) -{ - auto t_size = vals.sizes(); - int size = 1; - for (auto dim : t_size) size *= dim; - - if (((size / groups) / 4 / 1024) <= 256) { - launch_sr_quantize_kernel_asym( - (T*)vals.data_ptr(), size, groups, bits, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return vals; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("ds_quantize_fp32", &ds_quantize, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_quantize_fp16", &ds_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_sr_quantize_fp32", &ds_sr_quantize, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_sr_quantize_fp16", &ds_sr_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_quantize_asym_fp32", &ds_quantize_asym, "DeepSpeed Quantize with fp32 (CUDA)"); - m.def( - "ds_quantize_asym_fp16", &ds_quantize_asym<__half>, "DeepSpeed Quantize with fp16 (CUDA)"); - m.def("ds_sr_quantize_asym_fp32", - &ds_sr_quantize_asym, - "DeepSpeed Quantize with fp32 (CUDA)"); - m.def("ds_sr_quantize_asym_fp16", - &ds_sr_quantize_asym<__half>, - "DeepSpeed Quantize with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/quantization/quantizer.hip b/deepspeed/ops/csrc/quantization/quantizer.hip deleted file mode 100644 index 9134593..0000000 --- a/deepspeed/ops/csrc/quantization/quantizer.hip +++ /dev/null @@ -1,1039 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" - -namespace cg = cooperative_groups; - -__global__ void quantize_kernel(__half* vals, int group_size, int num_bits) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - float2* vals_cast = reinterpret_cast(vals); - - float2 data[MAX_REG]; - - int group_id = blockIdx.x; - - { - int group_index = id; - int reg_count = 0; - int offset = group_id * group_size; - float max = -10000.0; - - while (group_index < group_size && reg_count < MAX_REG) { - data[reg_count] = vals_cast[offset + group_index]; - __half* data_h = reinterpret_cast<__half*>(&data[reg_count]); - - if (abs((float)data_h[0]) > max) max = abs((float)data_h[0]); - if (abs((float)data_h[1]) > max) max = abs((float)data_h[1]); - if (abs((float)data_h[2]) > max) max = abs((float)data_h[2]); - if (abs((float)data_h[3]) > max) max = abs((float)data_h[3]); - - group_index += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - __shared__ float partialMax[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } - - max = g.shfl(max, 0); - - float q_scale = (1 << num_bits) / (2 * max + 1e-5); - float q_scale_inv = 1 / q_scale; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - __half2* data_h = reinterpret_cast<__half2*>(&data[i]); - float2 q_data[2]; - q_data[0] = __half22float2(data_h[0]); - q_data[1] = __half22float2(data_h[1]); - - float2 q_data_int[2]; - - q_data_int[0].x = roundf(q_data[0].x * q_scale); - q_data_int[0].y = roundf(q_data[0].y * q_scale); - q_data_int[1].x = roundf(q_data[1].x * q_scale); - q_data_int[1].y = roundf(q_data[1].y * q_scale); - - q_data_int[0].x *= q_scale_inv; - q_data_int[0].y *= q_scale_inv; - q_data_int[1].x *= q_scale_inv; - q_data_int[1].y *= q_scale_inv; - - data_h[0] = __float22half2_rn(q_data_int[0]); - data_h[1] = __float22half2_rn(q_data_int[1]); - - vals_cast[offset + group_index] = data[i]; - } - } - } -#endif -} - -__global__ void quantize_kernel(float* vals, int group_size, int num_bits) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - float4* vals_cast = reinterpret_cast(vals); - - float4 data[MAX_REG]; - - int bid = blockIdx.x; - - int group_index = bid * group_size + id; - int reg_count = 0; - - float max = -10000.0; - - while (id < group_size && reg_count < MAX_REG) { - float4 data_reg = vals_cast[group_index]; - data[reg_count] = data_reg; - - if (abs(data_reg.x) > max) max = abs(data_reg.x); - if (abs(data_reg.y) > max) max = abs(data_reg.y); - if (abs(data_reg.z) > max) max = abs(data_reg.z); - if (abs(data_reg.w) > max) max = abs(data_reg.w); - - group_index += blockDim.x; - id += blockDim.x; - reg_count++; - } - id = threadIdx.x; -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - __shared__ float partialMax[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - - b.sync(); - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } - - max = g.shfl(max, 0); - - float q_scale = (1 << num_bits) / (2 * max + 1e-5); - float q_scale_inv = 1 / q_scale; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - float4 q_data; - q_data = data[i]; - - float4 q_data_int; - q_data_int.x = roundf(q_data.x * q_scale); - q_data_int.y = roundf(q_data.y * q_scale); - q_data_int.w = roundf(q_data.w * q_scale); - q_data_int.z = roundf(q_data.z * q_scale); - - q_data.x = q_data_int.x * q_scale_inv; - q_data.y = q_data_int.y * q_scale_inv; - q_data.w = q_data_int.w * q_scale_inv; - q_data.z = q_data_int.z * q_scale_inv; - - vals_cast[group_index + bid * group_size] = q_data; - } - } -} - -template -void launch_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream) -{ - dim3 grid_dim(group_num); - dim3 block_dim(1024); - - hipLaunchKernelGGL(( quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, (total_count / group_num) / 4, num_bits); -} - -template void launch_quantize_kernel(float* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template void launch_quantize_kernel(__half* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); - -__global__ void sr_quantize_kernel(__half* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - float2* vals_cast = reinterpret_cast(vals); - - __half2 data_low[128]; - __half2 data_high[128]; - - int bid = blockIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - unsigned int tid = threadIdx.x; - int reg_count = 0; - int offset = bid * token_size; - int group_index = bid * token_size + tid; - - int total_count = token_size * token_num; - if (group_index < total_count) { - // float min = 10000.0; - float max = -10000.0; - while (tid < token_size) { - float2 data = vals_cast[offset + tid]; - __half2* data_h = reinterpret_cast<__half2*>(&data); - data_low[reg_count] = data_h[0]; - data_high[reg_count] = data_h[1]; - - float2 data_f[2]; - data_f[0] = __half22float2(data_h[0]); - data_f[1] = __half22float2(data_h[1]); - - if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x); - if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y); - if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x); - if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y); - - tid += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - - __shared__ float partialMax[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } - - max = g.shfl(max, 0); - - float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5); - float high_q = (float)((1 << (num_bits - 1)) - 1); - float low_q = (float)(-((1 << (num_bits - 1)))); - - for (int i = 0; i < reg_count; i++) { - int token_index = i * blockDim.x + threadIdx.x; - if (token_index < token_size) { - float2 data_f[2]; - data_f[0] = __half22float2(data_low[i]); - data_f[1] = __half22float2(data_high[i]); - - float2 q_data_int[2]; - q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val)); - q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val)); - q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val)); - q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val)); - - // Stochastic rounding - float4 rand = hiprand_uniform4(&state); - - float q_error[4]; - q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val; - q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val; - q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val; - q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val; - - q_data_int[0].x = - (rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q) - ? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1)) - : q_data_int[0].x; - q_data_int[0].y = - (rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q) - ? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1)) - : q_data_int[0].y; - q_data_int[1].x = - (rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q) - ? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1)) - : q_data_int[1].x; - q_data_int[1].y = - (rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q) - ? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1)) - : q_data_int[1].y; - - data_f[0].x = q_data_int[0].x / q_scale_val; - data_f[0].y = q_data_int[0].y / q_scale_val; - data_f[1].x = q_data_int[1].x / q_scale_val; - data_f[1].y = q_data_int[1].y / q_scale_val; - - float2 result; - __half2* result_h = reinterpret_cast<__half2*>(&result); - result_h[0] = __float22half2_rn(data_f[0]); - result_h[1] = __float22half2_rn(data_f[1]); - - vals_cast[offset + token_index] = result; - } - } - } -#endif -} - -__global__ void sr_quantize_kernel(float* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - int idx = blockIdx.x * blockDim.x + id; - - float4* vals_cast = reinterpret_cast(vals); - - float4 data[128]; - - int bid = blockIdx.x; - int tid = threadIdx.x; - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - int group_index = bid * token_size + threadIdx.x; - int reg_count = 0; - int total_count = token_size * token_num; - if (group_index < total_count) { - // float min = 10000.0; - float max = -10000.0; - - while (tid < token_size) { - data[reg_count] = vals_cast[group_index]; - - if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x); - if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y); - if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z); - if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w); - - group_index += blockDim.x; - tid += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - __shared__ float partialMax[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } - - max = g.shfl(max, 0); - - float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5); - float high_q = (float)((1 << (num_bits - 1)) - 1); - float low_q = (float)(-((1 << (num_bits - 1)))); - - int offset = (bid)*token_size; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + threadIdx.x; - if (group_index < token_size) { - float4 q_data = data[i]; - - float4 q_data_int; - q_data_int.x = (float)((int)(q_data.x * q_scale_val)); - q_data_int.y = (float)((int)(q_data.y * q_scale_val)); - q_data_int.w = (float)((int)(q_data.w * q_scale_val)); - q_data_int.z = (float)((int)(q_data.z * q_scale_val)); - - // Stochastic rounding - float4 rand = hiprand_uniform4(&state); - - float q_error[4]; - q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val; - q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val; - q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val; - q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val; - - q_data_int.x = - (rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q) - ? (q_data_int.x + (q_data.x > 0 ? 1 : -1)) - : q_data_int.x; - q_data_int.y = - (rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q) - ? (q_data_int.y + (q_data.y > 0 ? 1 : -1)) - : q_data_int.y; - q_data_int.w = - (rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q) - ? (q_data_int.w + (q_data.w > 0 ? 1 : -1)) - : q_data_int.w; - q_data_int.z = - (rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q) - ? (q_data_int.z + (q_data.z > 0 ? 1 : -1)) - : q_data_int.z; - - q_data_int.x /= q_scale_val; - q_data_int.y /= q_scale_val; - q_data_int.w /= q_scale_val; - q_data_int.z /= q_scale_val; - - vals_cast[group_index + offset] = q_data_int; - } - } - } -} - -template -void launch_sr_quantize_kernel(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream) -{ - dim3 block_dim(1024); - dim3 grid_dim(group_num); - - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( sr_quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, (total_count / group_num) / 4, group_num, num_bits, seed); -} -template void launch_sr_quantize_kernel(float* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template void launch_sr_quantize_kernel(__half* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); - -__global__ void quantize_kernel_asym(__half* vals, int group_size, int num_bits) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - float2* vals_cast = reinterpret_cast(vals); - - float2 data[MAX_REG]; - - int group_id = blockIdx.x; - - { - int group_index = id; - int reg_count = 0; - int offset = group_id * group_size; - float max = -10000.0; - float min = 10000.0; - - while (group_index < group_size && reg_count < MAX_REG) { - data[reg_count] = vals_cast[offset + group_index]; - __half* data_h = reinterpret_cast<__half*>(&data[reg_count]); - - if (((float)data_h[0]) > max) max = (float)data_h[0]; - if (((float)data_h[1]) > max) max = (float)data_h[1]; - if (((float)data_h[2]) > max) max = (float)data_h[2]; - if (((float)data_h[3]) > max) max = (float)data_h[3]; - - if (((float)data_h[0]) < min) min = (float)data_h[0]; - if (((float)data_h[1]) < min) min = (float)data_h[1]; - if (((float)data_h[2]) < min) min = (float)data_h[2]; - if (((float)data_h[3]) < min) min = (float)data_h[3]; - - group_index += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(min, i); - if (min > temp) min = temp; - } - - __shared__ float partialMax[WARP_SIZE]; - __shared__ float partialMin[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - if (lane == 0) partialMin[gid] = min; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - if (lane < warp_num) min = partialMin[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(min, i); - if (min > temp) min = temp; - } - - max = g.shfl(max, 0); - min = g.shfl(min, 0); - - float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits); - float q_scale_inv = 1 / q_scale; - - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - __half2* data_h = reinterpret_cast<__half2*>(&data[i]); - float2 q_data[2]; - q_data[0] = __half22float2(data_h[0]); - q_data[1] = __half22float2(data_h[1]); - - float2 q_data_int[2]; - - q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv); - q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv); - q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv); - q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv); - - q_data_int[0].x = q_data_int[0].x * q_scale + min; - q_data_int[0].y = q_data_int[0].y * q_scale + min; - q_data_int[1].x = q_data_int[1].x * q_scale + min; - q_data_int[1].y = q_data_int[1].y * q_scale + min; - - data_h[0] = __float22half2_rn(q_data_int[0]); - data_h[1] = __float22half2_rn(q_data_int[1]); - - vals_cast[offset + group_index] = data[i]; - } - } - } -#endif -} - -__global__ void quantize_kernel_asym(float* vals, int group_size, int num_bits) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - float4* vals_cast = reinterpret_cast(vals); - - float4 data[MAX_REG]; - - int bid = blockIdx.x; - - int group_index = bid * group_size + id; - int reg_count = 0; - - float max = -10000.0; - float min = 10000.0; - - while (id < group_size && reg_count < MAX_REG) { - float4 data_reg = vals_cast[group_index]; - data[reg_count] = data_reg; - - if (data_reg.x > max) max = data_reg.x; - if (data_reg.y > max) max = data_reg.y; - if (data_reg.w > max) max = data_reg.w; - if (data_reg.z > max) max = data_reg.z; - - if (data_reg.x < min) min = data_reg.x; - if (data_reg.y < min) min = data_reg.y; - if (data_reg.w < min) min = data_reg.w; - if (data_reg.z < min) min = data_reg.z; - - group_index += blockDim.x; - id += blockDim.x; - reg_count++; - } - id = threadIdx.x; - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(min, i); - if (min > temp) min = temp; - } - - __shared__ float partialMax[WARP_SIZE]; - __shared__ float partialMin[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - if (lane == 0) partialMin[gid] = min; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - if (lane < warp_num) min = partialMin[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(min, i); - if (min > temp) min = temp; - } - - max = g.shfl(max, 0); - min = g.shfl(min, 0); - - float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits); - float q_scale_inv = 1 / q_scale; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + id; - if (group_index < group_size) { - float4 q_data; - q_data = data[i]; - - float4 q_data_int; - q_data_int.x = roundf((q_data.x - min) * q_scale_inv); - q_data_int.y = roundf((q_data.y - min) * q_scale_inv); - q_data_int.w = roundf((q_data.w - min) * q_scale_inv); - q_data_int.z = roundf((q_data.z - min) * q_scale_inv); - - q_data.x = q_data_int.x * q_scale + min; - q_data.y = q_data_int.y * q_scale + min; - q_data.w = q_data_int.w * q_scale + min; - q_data.z = q_data_int.z * q_scale + min; - - vals_cast[group_index + bid * group_size] = q_data; - } - } -} - -template -void launch_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream) -{ - dim3 grid_dim(group_num); - dim3 block_dim(1024); - - hipLaunchKernelGGL(( quantize_kernel_asym), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, (total_count / group_num) / 4, num_bits); -} - -template void launch_quantize_kernel_asym(float* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template void launch_quantize_kernel_asym(__half* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); - -__global__ void sr_quantize_kernel_asym(__half* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - float2* vals_cast = reinterpret_cast(vals); - - __half2 data_low[128]; - __half2 data_high[128]; - - int bid = blockIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - unsigned int tid = threadIdx.x; - int reg_count = 0; - int offset = bid * token_size; - int group_index = bid * token_size + tid; - - int total_count = token_size * token_num; - if (group_index < total_count) { - float min = 10000.0; - float max = -10000.0; - while (tid < token_size) { - float2 data = vals_cast[offset + tid]; - __half2* data_h = reinterpret_cast<__half2*>(&data); - data_low[reg_count] = data_h[0]; - data_high[reg_count] = data_h[1]; - - float2 data_f[2]; - data_f[0] = __half22float2(data_h[0]); - data_f[1] = __half22float2(data_h[1]); - - if (((float)data_f[0].x) > max) max = (float)data_f[0].x; - if (((float)data_f[0].y) > max) max = (float)data_f[0].y; - if (((float)data_f[1].x) > max) max = (float)data_f[1].x; - if (((float)data_f[1].y) > max) max = (float)data_f[1].y; - - if (((float)data_f[0].x) < min) min = (float)data_f[0].x; - if (((float)data_f[0].y) < min) min = (float)data_f[0].y; - if (((float)data_f[1].x) < min) min = (float)data_f[1].x; - if (((float)data_f[1].y) < min) min = (float)data_f[1].y; - - tid += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(min, i); - if (min > temp) min = temp; - } - - __shared__ float partialMax[WARP_SIZE]; - __shared__ float partialMin[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - if (lane == 0) partialMin[gid] = min; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - if (lane < warp_num) min = partialMin[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(min, i); - if (min > temp) min = temp; - } - - max = g.shfl(max, 0); - min = g.shfl(min, 0); - - float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits); - float q_scale_val_inv = 1 / q_scale_val; - float high_q = (float)((1 << num_bits) - 1); - - for (int i = 0; i < reg_count; i++) { - int token_index = i * blockDim.x + threadIdx.x; - if (token_index < token_size) { - float2 data_f[2]; - data_f[0] = __half22float2(data_low[i]); - data_f[1] = __half22float2(data_high[i]); - - float2 q_data_int[2]; - q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv)); - q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv)); - q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv)); - q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv)); - - // Stochastic rounding - float4 rand = hiprand_uniform4(&state); - - float q_error[4]; - q_error[0] = - abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv; - q_error[1] = - abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv; - q_error[2] = - abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv; - q_error[3] = - abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv; - - q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q) - ? (q_data_int[0].x + 1) - : q_data_int[0].x; - q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q) - ? (q_data_int[0].y + 1) - : q_data_int[0].y; - q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q) - ? (q_data_int[1].x + 1) - : q_data_int[1].x; - q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q) - ? (q_data_int[1].y + 1) - : q_data_int[1].y; - - data_f[0].x = q_data_int[0].x * q_scale_val + min; - data_f[0].y = q_data_int[0].y * q_scale_val + min; - data_f[1].x = q_data_int[1].x * q_scale_val + min; - data_f[1].y = q_data_int[1].y * q_scale_val + min; - - float2 result; - __half2* result_h = reinterpret_cast<__half2*>(&result); - result_h[0] = __float22half2_rn(data_f[0]); - result_h[1] = __float22half2_rn(data_f[1]); - - vals_cast[offset + token_index] = result; - } - } - } -#endif -} - -__global__ void sr_quantize_kernel_asym(float* vals, - int token_size, - int token_num, - int num_bits, - std::pair seed) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int gid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - int id = threadIdx.x; - - int idx = blockIdx.x * blockDim.x + id; - - float4* vals_cast = reinterpret_cast(vals); - - float4 data[128]; - - int bid = blockIdx.x; - int tid = threadIdx.x; - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - int group_index = bid * token_size + threadIdx.x; - int reg_count = 0; - int total_count = token_size * token_num; - if (group_index < total_count) { - float min = 10000.0; - float max = -10000.0; - - while (tid < token_size) { - float4 data_reg = vals_cast[group_index]; - data[reg_count] = data_reg; - if (data_reg.x > max) max = data_reg.x; - if (data_reg.y > max) max = data_reg.y; - if (data_reg.w > max) max = data_reg.w; - if (data_reg.z > max) max = data_reg.z; - - if (data_reg.x < min) min = data_reg.x; - if (data_reg.y < min) min = data_reg.y; - if (data_reg.w < min) min = data_reg.w; - if (data_reg.z < min) min = data_reg.z; - - group_index += blockDim.x; - tid += blockDim.x; - reg_count++; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(max, i); - if (max < temp) max = temp; - } - -#pragma unroll - for (int i = 1; i < WARP_SIZE; i <<= 1) { - auto temp = g.shfl_xor(min, i); - if (min > temp) min = temp; - } - - __shared__ float partialMax[WARP_SIZE]; - __shared__ float partialMin[WARP_SIZE]; - - if (lane == 0) partialMax[gid] = max; - if (lane == 0) partialMin[gid] = min; - - b.sync(); - - if (lane < warp_num) max = partialMax[lane]; - if (lane < warp_num) min = partialMin[lane]; - -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(max, i); - if (max < temp) max = temp; - } -#pragma unroll - for (int i = 1; i < warp_num; i <<= 1) { - auto temp = g.shfl_down(min, i); - if (min > temp) min = temp; - } - - max = g.shfl(max, 0); - min = g.shfl(min, 0); - - float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits); - float high_q = (float)((1 << num_bits) - 1); - - int offset = (bid)*token_size; - for (int i = 0; i < reg_count; i++) { - group_index = i * blockDim.x + threadIdx.x; - if (group_index < token_size) { - float4 q_data = data[i]; - - float4 q_data_int; - q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val)); - q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val)); - q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val)); - q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val)); - - // Stochastic rounding - float4 rand = hiprand_uniform4(&state); - - float q_error[4]; - q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val; - q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val; - q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val; - q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val; - - q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1) - : q_data_int.x; - q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1) - : q_data_int.y; - q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1) - : q_data_int.w; - q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1) - : q_data_int.z; - - q_data_int.x = q_data_int.x * q_scale_val + min; - q_data_int.y = q_data_int.y * q_scale_val + min; - q_data_int.w = q_data_int.w * q_scale_val + min; - q_data_int.z = q_data_int.z * q_scale_val + min; - - vals_cast[group_index + offset] = q_data_int; - } - } - } -} -template -void launch_sr_quantize_kernel_asym(T* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream) -{ - dim3 block_dim(1024); - dim3 grid_dim(group_num); - - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( sr_quantize_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, (total_count / group_num) / 4, group_num, num_bits, seed); -} -template void launch_sr_quantize_kernel_asym(float* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); -template void launch_sr_quantize_kernel_asym(__half* vals, - int total_count, - int group_num, - int num_bits, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/sparse_attention/utils.cpp b/deepspeed/ops/csrc/sparse_attention/utils.cpp deleted file mode 100644 index 8e4346b..0000000 --- a/deepspeed/ops/csrc/sparse_attention/utils.cpp +++ /dev/null @@ -1,120 +0,0 @@ -// DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a -// https://github.com/ptillet/torch-blocksparse/blob/master/csrc/utils.cpp - -#include -#include -#include -#include -#ifdef _OPENMP -#include -#endif - -typedef std::vector> ret_t; - -void segment_blocks(torch::Tensor layout, - torch::Tensor idx, - torch::Tensor scratch, - int max_width, - ret_t& ret) -{ - size_t H = layout.size(0); - size_t M = layout.size(1); - size_t N = layout.size(2); - torch::Tensor tmp = torch::zeros_like(layout); - - auto _tmp = tmp.accessor(); - auto _layout = layout.accessor(); - auto _idx = idx.accessor(); - auto _scratch = scratch.accessor(); - std::vector current(H, 0); - -#ifdef _OPENMP -#pragma omp parallel for -#endif - for (size_t h = 0; h < H; h++) { - // surrounding indices - std::vector ii_left(max_width, -1); - std::vector> ii_top(max_width, std::vector(N, -1)); - - for (size_t m = 0; m < M; m++) { - for (size_t n = 0; n < N; n++) { - int v = _layout[h][m][n]; - if (v == 0) continue; - int n_left = ii_left[max_width - 1]; - int m_top = ii_top[max_width - 1][n]; - int top = (m_top >= 0) ? _tmp[h][m_top][n] : 0; - int left = (n_left >= 0) ? _tmp[h][m][n_left] : 0; - int topleft = (m_top >= 0 && n_left >= 0) ? _tmp[h][m_top][n_left] : 0; - int width = std::min(left, std::min(top, topleft)) + 1; - - // reset width if blocks cannot be - // packed together (i.e., there's a 1 "in the middle") - for (int nn = n_left + 1; nn < n; nn++) - if (ii_top[max_width - 1][nn] > ii_top[max_width - 1][n]) width = 1; - _tmp[h][m][n] = width; - - // update n_left ring buffer - for (int k = 0; k < max_width - 1; k++) ii_left[k] = ii_left[k + 1]; - ii_left[max_width - 1] = n; - - // update ii_top ring buffer - for (int k = 0; k < max_width - 1; k++) ii_top[k][n] = ii_top[k + 1][n]; - ii_top[max_width - 1][n] = m; - - // block is too small -- skip - if (width != max_width) continue; - - // retained blocks are set to zeros - for (size_t km = 0; km < max_width; km++) - for (size_t kn = 0; kn < max_width; kn++) { - int mm = ii_top[km][n]; - int nn = ii_left[kn]; - if (mm < 0 || nn < 0) continue; - _layout[h][mm][nn] = 0; - _tmp[h][mm][nn] = 0; - _scratch[h][current[h]][0] = (int)h; - _scratch[h][current[h]][1] = (int)mm; - _scratch[h][current[h]][2] = (int)nn; - _scratch[h][current[h]][3] = _idx[h][mm][nn]; - current[h]++; - } - } - } - } - std::vector to_cat; - for (size_t h = 0; h < H; h++) - if (current[h] > 0) to_cat.push_back(scratch[h].slice(0, 0, current[h])); - if (!to_cat.empty()) ret.push_back({max_width, torch::cat(to_cat)}); -} - -ret_t sdd_segment(torch::Tensor layout, int start_width) -{ - ret_t ret; - - // block index - torch::Tensor idx = torch::zeros_like(layout); - int current = 0; - int64_t H = layout.size(0); - int64_t M = layout.size(1); - int64_t N = layout.size(2); - auto _layout = layout.accessor(); - auto _idx = idx.accessor(); - for (int64_t h = 0; h < H; h++) - for (int64_t m = 0; m < M; m++) - for (int64_t n = 0; n < N; n++) { - if (_layout[h][m][n] == 0) continue; - _idx[h][m][n] = current++; - } - - // scratch memory - torch::Tensor scratch = torch::empty({H, layout.sum().item(), 4}, layout.dtype()); - - for (int max_width = start_width; max_width > 0; max_width /= 2) - segment_blocks(layout, idx, scratch, max_width, ret); - return ret; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("sdd_segment", &sdd_segment, "SDD segmentation handler"); -} diff --git a/deepspeed/ops/csrc/transformer/cublas_wrappers.cu b/deepspeed/ops/csrc/transformer/cublas_wrappers.cu deleted file mode 100644 index 75ecd3f..0000000 --- a/deepspeed/ops/csrc/transformer/cublas_wrappers.cu +++ /dev/null @@ -1,403 +0,0 @@ -#include "cublas_wrappers.h" - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f32_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f32_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - rocblas_datatype_f32_r, - m, - C, - rocblas_datatype_f32_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_32F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_32F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - C, - CUDA_R_32F, - m, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f16_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f16_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - rocblas_datatype_f16_r, - m, - (void*)C, - rocblas_datatype_f16_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_16F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_16F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - (void*)C, - CUDA_R_16F, - m, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f32_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f32_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f32_r, - m, - stride_C, - C, - rocblas_datatype_f32_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_32F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_32F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_32F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f16_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f16_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f16_r, - m, - stride_C, - C, - rocblas_datatype_f16_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_16F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_16F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_16F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer/cublas_wrappers.hip b/deepspeed/ops/csrc/transformer/cublas_wrappers.hip deleted file mode 100644 index 04aa0ef..0000000 --- a/deepspeed/ops/csrc/transformer/cublas_wrappers.hip +++ /dev/null @@ -1,404 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "cublas_wrappers_hip.h" - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f32_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f32_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - rocblas_datatype_f32_r, - m, - C, - rocblas_datatype_f32_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR32F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR32F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - hipR32F, - m, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f16_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f16_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - rocblas_datatype_f16_r, - m, - (void*)C, - rocblas_datatype_f16_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR16F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR16F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - hipR16F, - m, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f32_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f32_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f32_r, - m, - stride_C, - C, - rocblas_datatype_f32_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR32F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR32F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR32F, - m, - stride_C, - batch, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f16_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f16_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f16_r, - m, - stride_C, - C, - rocblas_datatype_f16_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR16F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR16F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR16F, - m, - stride_C, - batch, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer/dropout_kernels.cu b/deepspeed/ops/csrc/transformer/dropout_kernels.cu deleted file mode 100644 index d1ba135..0000000 --- a/deepspeed/ops/csrc/transformer/dropout_kernels.cu +++ /dev/null @@ -1,868 +0,0 @@ -#include "custom_cuda_layers.h" - -const int unroll_factor = 4; - -__global__ void dropout_kernel(const int N, - const float ratio, - float* out, - const float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float4 rand = curand_uniform4(&state); - uint8_t m[unroll_factor]; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - int i = j * unroll_factor; - - mask[i] = (uint8_t)m[0]; - mask[i + 1] = (uint8_t)m[1]; - mask[i + 2] = (uint8_t)m[2]; - mask[i + 3] = (uint8_t)m[3]; - - out[i] = Xdata[i] * scale * m[0]; - out[i + 1] = Xdata[i + 1] * scale * m[1]; - out[i + 2] = Xdata[i + 2] * scale * m[2]; - out[i + 3] = Xdata[i + 3] * scale * m[3]; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = Xdata[i] * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const float ratio, - __half* out, - const __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - uint32_t m_32; - uint8_t* m = reinterpret_cast(&m_32); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - __half2 mask_h[2]; - float2 mask_f[2]; - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - float4 rand = curand_uniform4(&state); - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - - mask_cast[j] = m_32; - } - -#else - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - float2 vals_half_f[2]; - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - uint8_t m[unroll_factor]; - float4 rand = curand_uniform4(&state); - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - - mask[i] = m[0]; - mask[i + 1] = m[1]; - mask[i + 2] = m[2]; - mask[i + 3] = m[3]; - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = __float2half((float)Xdata[i] * scale * m); - mask[i] = m; - } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const float* Xdata, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - out[i] = mask[i] ? Xdata[i] * scale : 0.0; - out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; - out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; - out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const __half* Xdata, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - -#pragma unroll - for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - } - -#else - - const __half h_scale = __float2half(scale); - const __half h_zero = __float2half(0.0); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - - uint8_t* m = mask + i; - - float2 vals_half_f[2]; - - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout(T* out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool bwd) -{ - assert(unroll_factor == 4); - - dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - if (dim > 512) { - block_dim.x >>= 1; - grid_dim.x <<= 1; - } - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - if (bwd) - dropout_kernel_bwd<<>>( - total_count, ratio, vals, out, mask, seed); - else - dropout_kernel<<>>( - total_count, ratio, out, vals, mask, seed); -} - -template void launch_dropout(float* out, - const float* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool); -template void launch_dropout(__half* out, - const __half* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool); - -__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) -{ - const __half2 h_scale = __float2half2_rn(scale); - float2* x_cast = reinterpret_cast(Xdata); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - -#ifdef __STOCHASTIC_MODE__ - - __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_data_h[0] * h_scale * mask_h[0]; - result_h[1] = x_data_h[1] * h_scale * mask_h[1]; - -#else - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - -#endif - x_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - dropout_grad_kernel<<>>(total_count, scale, vals, mask); -} - -template void launch_dropout_grad(float* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); -template void launch_dropout_grad(__half* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const float* Xdata, - float* out, - uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const __half* Xdata, - __half* out, - uint8_t* mask) -{ - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - const uint32_t* mask_cast = reinterpret_cast(mask); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - - out_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - dropout_grad_kernel<<>>(total_count, scale, vals, vals_out, mask); -} -template void launch_dropout_grad(float*, - const float* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); -template void launch_dropout_grad(__half*, - const __half* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* bias, - float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float4* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float4* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 x_data = Xdata_cast[j]; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - - x_data.x += b_data.x; - x_data.y += b_data.y; - x_data.z += b_data.z; - x_data.w += b_data.w; - - x_data.x = x_data.x * scale * m[0]; - x_data.y = x_data.y * scale * m[1]; - x_data.z = x_data.z * scale * m[2]; - x_data.w = x_data.w * scale * m[3]; - - mask_32[j] = m_32; - Xdata_cast[j] = x_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = Xdata[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = x_data * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* bias, - __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float2* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float2* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - data_f = Xdata_cast[j]; - bias_f = bias_cast[j % (dim / unroll_factor)]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - data_h_0.x += bias_h_0.x; - data_h_0.y += bias_h_0.y; - data_h_1.x += bias_h_1.x; - data_h_1.y += bias_h_1.y; - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - Xdata_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)Xdata[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = __float2half(x_data * scale * m); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - dropout_kernel<<>>( - total_count, dim, ratio, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); -template void launch_dropout(__half*, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* input, - const float* residual, - const float* bias, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float4* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float4* bias_cast = reinterpret_cast(bias); - const float4* residual_cast = reinterpret_cast(residual); - const float4* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 out_data; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - float4 res_data = residual_cast[j]; - float4 inp_data = input_cast[j]; - - out_data.x = (b_data.x + inp_data.x); - out_data.y = (b_data.y + inp_data.y); - out_data.z = (b_data.z + inp_data.z); - out_data.w = (b_data.w + inp_data.w); - - out_data.x = out_data.x * scale * m[0]; - out_data.y = out_data.y * scale * m[1]; - out_data.z = out_data.z * scale * m[2]; - out_data.w = out_data.w * scale * m[3]; - - out_data.x += res_data.x; - out_data.y += res_data.y; - out_data.z += res_data.z; - out_data.w += res_data.w; - - mask_32[j] = m_32; - out_cast[j] = out_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = input[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += residual[i]; - - out[i] = x_data; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* input, - const __half* residual, - const __half* bias, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float2* bias_cast = reinterpret_cast(bias); - const float2* residual_cast = reinterpret_cast(residual); - const float2* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - float2 residual_f; - __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); - - float2 input_f; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - - bias_f = bias_cast[j % (dim / unroll_factor)]; - residual_f = residual_cast[j]; - input_f = input_cast[j]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - float2 residual_h_0 = __half22float2(residual_h[0]); - float2 residual_h_1 = __half22float2(residual_h[1]); - - float2 input_h_0 = __half22float2(input_h[0]); - float2 input_h_1 = __half22float2(input_h[1]); - - data_h_0.x = (bias_h_0.x + input_h_0.x); - data_h_0.y = (bias_h_0.y + input_h_0.y); - data_h_1.x = (bias_h_1.x + input_h_1.x); - data_h_1.y = (bias_h_1.y + input_h_1.y); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - data_h_0.x += residual_h_0.x; - data_h_0.y += residual_h_0.y; - data_h_1.x += residual_h_1.x; - data_h_1.y += residual_h_1.y; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - out_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)input[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += (float)residual[i]; - - out[i] = __float2half(x_data); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* input, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - dropout_kernel<<>>( - total_count, dim, ratio, input, residual, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float*, - const float* residual, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); -template void launch_dropout(__half*, - const __half*, - const __half* residual, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/dropout_kernels.hip b/deepspeed/ops/csrc/transformer/dropout_kernels.hip deleted file mode 100644 index a4b880a..0000000 --- a/deepspeed/ops/csrc/transformer/dropout_kernels.hip +++ /dev/null @@ -1,870 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -const int unroll_factor = 4; - -__global__ void dropout_kernel(const int N, - const float ratio, - float* out, - const float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float4 rand = hiprand_uniform4(&state); - uint8_t m[unroll_factor]; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - int i = j * unroll_factor; - - mask[i] = (uint8_t)m[0]; - mask[i + 1] = (uint8_t)m[1]; - mask[i + 2] = (uint8_t)m[2]; - mask[i + 3] = (uint8_t)m[3]; - - out[i] = Xdata[i] * scale * m[0]; - out[i + 1] = Xdata[i + 1] * scale * m[1]; - out[i + 2] = Xdata[i + 2] * scale * m[2]; - out[i + 3] = Xdata[i + 3] * scale * m[3]; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = Xdata[i] * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const float ratio, - __half* out, - const __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - uint32_t m_32; - uint8_t* m = reinterpret_cast(&m_32); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - __half2 mask_h[2]; - float2 mask_f[2]; - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - float4 rand = hiprand_uniform4(&state); - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - - mask_cast[j] = m_32; - } - -#else - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - float2 vals_half_f[2]; - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - uint8_t m[unroll_factor]; - float4 rand = hiprand_uniform4(&state); - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - - mask[i] = m[0]; - mask[i + 1] = m[1]; - mask[i + 2] = m[2]; - mask[i + 3] = m[3]; - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = __float2half((float)Xdata[i] * scale * m); - mask[i] = m; - } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const float* Xdata, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - out[i] = mask[i] ? Xdata[i] * scale : 0.0; - out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; - out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; - out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const __half* Xdata, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - -#pragma unroll - for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - } - -#else - - const __half h_scale = __float2half(scale); - const __half h_zero = __float2half(0.0); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - - uint8_t* m = mask + i; - - float2 vals_half_f[2]; - - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout(T* out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool bwd) -{ - assert(unroll_factor == 4); - - dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - if (dim > 512) { - block_dim.x >>= 1; - grid_dim.x <<= 1; - } - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - if (bwd) - hipLaunchKernelGGL(( dropout_kernel_bwd), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, ratio, vals, out, mask, seed); - else - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, ratio, out, vals, mask, seed); -} - -template void launch_dropout(float* out, - const float* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool); -template void launch_dropout(__half* out, - const __half* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool); - -__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) -{ - const __half2 h_scale = __float2half2_rn(scale); - float2* x_cast = reinterpret_cast(Xdata); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - -#ifdef __STOCHASTIC_MODE__ - - __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_data_h[0] * h_scale * mask_h[0]; - result_h[1] = x_data_h[1] * h_scale * mask_h[1]; - -#else - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - -#endif - x_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), - dim3(DS_CUDA_NUM_THREADS), - 0, - stream, total_count, scale, vals, mask); -} - -template void launch_dropout_grad(float* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); -template void launch_dropout_grad(__half* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const float* Xdata, - float* out, - uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const __half* Xdata, - __half* out, - uint8_t* mask) -{ - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - const uint32_t* mask_cast = reinterpret_cast(mask); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - - out_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), - dim3(DS_CUDA_NUM_THREADS), - 0, - stream, total_count, scale, vals, vals_out, mask); -} -template void launch_dropout_grad(float*, - const float* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); -template void launch_dropout_grad(__half*, - const __half* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* bias, - float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float4* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float4* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 x_data = Xdata_cast[j]; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - - x_data.x += b_data.x; - x_data.y += b_data.y; - x_data.z += b_data.z; - x_data.w += b_data.w; - - x_data.x = x_data.x * scale * m[0]; - x_data.y = x_data.y * scale * m[1]; - x_data.z = x_data.z * scale * m[2]; - x_data.w = x_data.w * scale * m[3]; - - mask_32[j] = m_32; - Xdata_cast[j] = x_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = Xdata[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = x_data * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* bias, - __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float2* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float2* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - data_f = Xdata_cast[j]; - bias_f = bias_cast[j % (dim / unroll_factor)]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - data_h_0.x += bias_h_0.x; - data_h_0.y += bias_h_0.y; - data_h_1.x += bias_h_1.x; - data_h_1.y += bias_h_1.y; - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - Xdata_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)Xdata[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = __float2half(x_data * scale * m); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, dim, ratio, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); -template void launch_dropout(__half*, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* input, - const float* residual, - const float* bias, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float4* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float4* bias_cast = reinterpret_cast(bias); - const float4* residual_cast = reinterpret_cast(residual); - const float4* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 out_data; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - float4 res_data = residual_cast[j]; - float4 inp_data = input_cast[j]; - - out_data.x = (b_data.x + inp_data.x); - out_data.y = (b_data.y + inp_data.y); - out_data.z = (b_data.z + inp_data.z); - out_data.w = (b_data.w + inp_data.w); - - out_data.x = out_data.x * scale * m[0]; - out_data.y = out_data.y * scale * m[1]; - out_data.z = out_data.z * scale * m[2]; - out_data.w = out_data.w * scale * m[3]; - - out_data.x += res_data.x; - out_data.y += res_data.y; - out_data.z += res_data.z; - out_data.w += res_data.w; - - mask_32[j] = m_32; - out_cast[j] = out_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = input[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += residual[i]; - - out[i] = x_data; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* input, - const __half* residual, - const __half* bias, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float2* bias_cast = reinterpret_cast(bias); - const float2* residual_cast = reinterpret_cast(residual); - const float2* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - float2 residual_f; - __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); - - float2 input_f; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - - bias_f = bias_cast[j % (dim / unroll_factor)]; - residual_f = residual_cast[j]; - input_f = input_cast[j]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - float2 residual_h_0 = __half22float2(residual_h[0]); - float2 residual_h_1 = __half22float2(residual_h[1]); - - float2 input_h_0 = __half22float2(input_h[0]); - float2 input_h_1 = __half22float2(input_h[1]); - - data_h_0.x = (bias_h_0.x + input_h_0.x); - data_h_0.y = (bias_h_0.y + input_h_0.y); - data_h_1.x = (bias_h_1.x + input_h_1.x); - data_h_1.y = (bias_h_1.y + input_h_1.y); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - data_h_0.x += residual_h_0.x; - data_h_0.y += residual_h_0.y; - data_h_1.x += residual_h_1.x; - data_h_1.y += residual_h_1.y; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - out_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)input[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += (float)residual[i]; - - out[i] = __float2half(x_data); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* input, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, dim, ratio, input, residual, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float*, - const float* residual, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); -template void launch_dropout(__half*, - const __half*, - const __half* residual, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp b/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp deleted file mode 100644 index 0e86322..0000000 --- a/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp +++ /dev/null @@ -1,1051 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include "Timer.h" -#include "context.h" -#include "cublas_wrappers.h" -#include "custom_cuda_layers.h" -#include "ds_transformer_cuda.h" - -static std::unordered_map> s_transformer_layers; - -const int init_seq_length = 128; - -// C++ interface - -template -unsigned get_workspace_size(unsigned maxBatchSize, - unsigned seq_len, - unsigned hidden_size, - unsigned intermediate_size, - unsigned heads, - bool training, - bool gelu_checkpoint) -{ - unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); - if (training) { - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); - workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), - 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); - if (gelu_checkpoint) - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); - } - return workSpacesize; // * sizeof(T); -} - -// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -template -BertTransformerLayer::BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_prob_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) - : _layer_id(layer_id), - _batch_size(batch_size), - _hidden_size(hidden_size), - _heads(num_heads), - _intermediate_size(intermediate_size), - _seq_length(seq_length), - _training(true), - _pre_or_postLayerNorm(pre_or_postLayerNorm), - _attn_dropout_checkpoint(attn_dropout_checkpoint), - _normalize_invertible(normalize_invertible), - _gelu_checkpoint(gelu_checkpoint), - _stochastic_mode(stochastic_mode), - _stream(Context::Instance().GetCurrentStream()), - _cublasHandle(Context::Instance().GetCublasHandle()), - _qkv_linear(typename FeedForward::Config(batch_size * seq_length, - 3 * hidden_size, - hidden_size, - gemm_algos[0])), - _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - hidden_size, - gemm_algos[0])), - _attn_layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _ff1(typename FeedForward::Config(batch_size * seq_length, - _intermediate_size, - hidden_size, - gemm_algos[1])), - _ff2(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - _intermediate_size, - gemm_algos[2])), - _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), - _gelu(typename Gelu::Config(_intermediate_size)), - _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), - _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, - _seq_length, - _seq_length, - _hidden_size / _heads, - //(T(1.0) / T(sqrt(_hidden_size / _heads))), - //aiss debug 0506 - (T(1.0 / (sqrt(_hidden_size / _heads)))), - T(0.0), - CUBLAS_OP_T, - CUBLAS_OP_N, - gemm_algos[3])), - _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, - _hidden_size / _heads, - _seq_length, - _seq_length, - T(1.0), - T(0.0), - CUBLAS_OP_N, - CUBLAS_OP_N, - gemm_algos[4])) -{ - assert(_hidden_size % _heads == 0); - - Initialize(); -} - -template -BertTransformerLayer::~BertTransformerLayer() -{ -} - -template -void BertTransformerLayer::Initialize() -{ -#ifndef __HIP_PLATFORM_HCC__ - if (std::is_same::value) cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); -#endif -} - -template -void BertTransformerLayer::Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* soft_out_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr) -{ - cublasSetStream(_cublasHandle, _stream); - - if (!_stochastic_mode) cudaStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1; - - if (_normalize_invertible) { - add_res_ptr = buf_1 + 3 * small_buf_size; - buf_2 = add_res_ptr; - } - if (_gelu_checkpoint) buf_2 += small_buf_size; - if (_attn_dropout_checkpoint) - ctx_bufB_ptr = - (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) - : (buf_1 + 4 * small_buf_size)); - - int bsz_seq = bsz * _seq_length; - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - - else - _layer_norm.Forward( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } - - if (_pre_or_postLayerNorm) - _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - else - _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - - launch_bias_add_transform_0213( - q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); - - int bsz_heads = bsz * _heads; - - // attention scores - _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); - - // Softmax + Mask - _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); - - // attn prob dropout. - _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); - - // attention context - _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); - - launch_transform4d_0213( - attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); - - if (_pre_or_postLayerNorm) - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); - else - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); - - // attn output dropout. - if (_pre_or_postLayerNorm) - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); - else - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } - - _ff1.Forward(bsz_seq, - ff1_inp_ptr, - inter_w_ptr, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - _cublasHandle); - - _gelu.ForwardWithBiasAdd(bsz_seq, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - inter_b_ptr, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - _stream); - - _ff2.Forward( - bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); - - // layer output dropout. - if (_pre_or_postLayerNorm) - _layer_output_dropout.ForwardWithBias( - bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); - else - _layer_output_dropout.ForwardWithBias( - bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - else - _layer_norm.Forward( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } -} - -template -void BertTransformerLayer::Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* soft_out_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr) -{ - cublasSetStream(_cublasHandle, _stream); - - if (!_stochastic_mode) cudaStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1 + small_buf_size; - T* buf_3 = buf_2 + small_buf_size; - - T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) - : buf_3 + small_buf_size); - T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); - - cudaStream_t streams[2] = {_stream, _stream}; - - int bsz_seq = bsz * _seq_length; - int bsz_heads = bsz * _heads; - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - inp_norm_ptr); - - else - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - output_ptr); - } - - if (_pre_or_postLayerNorm) - _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); - else - _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); - - const T* layer_dropout_buf = _layer_output_dropout.HasDropout() - ? buf_0 - : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); - - if (_gelu_checkpoint) - _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); - _ff2.Backward(bsz_seq, - layer_dropout_buf, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - output_w_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - _cublasHandle, - _stream, - ff2_buf); - - _gelu.Backward( - bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); - - _ff1.Backward(bsz_seq, - ff2_buf, - ff1_inp_ptr, - inter_w_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - _cublasHandle, - _stream, - buf_3); - - if (!_pre_or_postLayerNorm) - launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } - - _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); - - T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; - - _attn_out_linear.Backward(bsz_seq, - attn_output_dropout_buf, - attn_o_inp_ptr, - attn_ow_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - _cublasHandle, - _stream, - buf_1); - - launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); - - if (_attn_prob_dropout.HasDropout()) { - if (_attn_dropout_checkpoint) - _attn_prob_dropout.Forward( - bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); - - _attn_context.Backward(bsz_heads, - buf_2, - v_tf_ptr, - (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), - _cublasHandle, - buf_3, - ff2_buf); - } else - _attn_context.Backward( - bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); - - _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); - - _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); - - _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); - - launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); - - if (_pre_or_postLayerNorm) - _qkv_linear.Backward(bsz_seq, - ff2_buf, - inp_norm_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - else - _qkv_linear.Backward(bsz_seq, - ff2_buf, - input_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - input_ptr); - - else - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - inp_norm_ptr); - } else - launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); -} - -template -void BertTransformerLayer::SetTrainingMode(bool training) -{ - // Dropout will be skipped when not in training model. - _attn_prob_dropout.SetTrainingMode(training); - _attn_output_dropout.SetTrainingMode(training); - _layer_output_dropout.SetTrainingMode(training); -} - -template -void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* attn_layer_norm_var, - T* attn_layer_norm_mean, - T* layer_norm_var, - T* layer_norm_mean) -{ - _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); - _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); - _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); - - _attn_layer_norm.SetVar(attn_layer_norm_var); - _attn_layer_norm.SetMean(attn_layer_norm_mean); - _layer_norm.SetVar(layer_norm_var); - _layer_norm.SetMean(layer_norm_mean); -} - -template -void BertTransformerLayer::SetSeqLength(unsigned seq_len) -{ - _seq_length = seq_len; - - _softmax.SetSeqLength(_seq_length); - _attn_prob_dropout.SetDimension(_seq_length); - _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); - _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); -} - -template -int create_transformer_layer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_dim, - unsigned num_heads, - unsigned intermediate_size, - float attn_dropout_ratio, - float hidden_dropout_ratio, - float layer_norm_eps, - int seed, - bool pre_or_postLayerNorm, - bool test_gemm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) -{ - Context::Instance().SetSeed(seed); - Context::Instance().TestGemmFP16( - test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); - - auto layer = std::make_shared>(layer_id, - batch_size, - hidden_dim, - num_heads, - intermediate_size, - init_seq_length, - attn_dropout_ratio, - hidden_dropout_ratio, - layer_norm_eps, - pre_or_postLayerNorm, - Context::Instance().GetGemmAlgos(), - attn_dropout_checkpoint, - normalize_invertible, - gelu_checkpoint, - stochastic_mode); - - s_transformer_layers[layer_id] = layer; - - std::string dtype = (std::is_same::value) ? "half" : "float"; - - std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." - << std::endl; - - return 0; -} - -template -std::vector ds_transformer_forward(unsigned layer_id, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b, - bool training_mode, - bool prelayernorm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint) -{ - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = input.size(0); - - const T* input_ptr = (const T*)input.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* output_b_ptr = (const T*)output_b.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - auto output = torch::empty_like(input); - T* out_ptr = (T*)output.data_ptr(); - - auto options = torch::TensorOptions() - .dtype(input.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - - auto uint8_options = torch::TensorOptions() - .dtype(torch::kInt8) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(false); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (input.size(1) != seq_len) { - seq_len = input.size(1); - layer->SetSeqLength(seq_len); - } - - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); - auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); - auto attn_o_inp = torch::empty_like(input); - auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); - - auto attn_prob_dropout_mask = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); - auto attn_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - auto layer_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - - auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - - T* inp_norm_ptr = (T*)inp_norm.data_ptr(); - T* add_res_ptr = (T*)add_res.data_ptr(); - T* q_tf_ptr = (T*)qkv_tf.data_ptr(); - T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); - T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); - T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); - - torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); - torch::Tensor gelu_inp = - (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); - auto ff1_inp = torch::empty_like(input); - T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); - T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); - T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); - - torch::Tensor soft_out = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); - torch::Tensor ctx_bufB = - (attn_dropout_checkpoint - ? soft_out - : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); - T* soft_out_ptr = (T*)soft_out.data_ptr(); - T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); - - layer->SetTrainingMode(training_mode); - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Forward(bsz, - input_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_qkvb_ptr, - attn_ow_ptr, - attn_ob_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - output_b_ptr, - norm_w_ptr, - norm_b_ptr, - out_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr); - - return {output, - inp_norm, - qkv_tf, - soft_out, - ctx_bufB, - attn_o_inp, - add_res, - ff1_inp, - gelu_inp, - ff2_inp, - attn_prob_dropout_mask, - attn_output_dropout_mask, - layer_output_dropout_mask, - attn_layer_norm_var, - attn_layer_norm_mean, - layer_norm_var, - layer_norm_mean}; -} - -template -std::vector ds_transformer_backward(unsigned layer_id, - const torch::Tensor& grad_output, - const torch::Tensor& output, - const torch::Tensor& inp_norm, - const torch::Tensor& qkv_tf, - const torch::Tensor& soft_out, - const torch::Tensor& ctx_bufB, - const torch::Tensor& attn_o_inp, - const torch::Tensor& add_res, - const torch::Tensor& ff1_inp, - const torch::Tensor& gelu_inp, - const torch::Tensor& ff2_inp, - const torch::Tensor& attn_prob_dropout_mask, - const torch::Tensor& attn_output_dropout_mask, - const torch::Tensor& layer_output_dropout_mask, - const torch::Tensor& attn_layer_norm_var, - const torch::Tensor& attn_layer_norm_mean, - const torch::Tensor& layer_norm_var, - const torch::Tensor& layer_norm_mean, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b) -{ - auto g_output = grad_output.contiguous(); - CHECK_INPUT(g_output); - CHECK_INPUT(output); - CHECK_INPUT(inp_norm); - CHECK_INPUT(qkv_tf); - CHECK_INPUT(add_res); - CHECK_INPUT(soft_out); - CHECK_INPUT(ctx_bufB); - CHECK_INPUT(attn_o_inp); - CHECK_INPUT(ff1_inp); - CHECK_INPUT(gelu_inp); - CHECK_INPUT(ff2_inp); - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = g_output.size(0); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (g_output.size(1) != seq_len) { - seq_len = g_output.size(1); - layer->SetSeqLength(seq_len); - } - auto options = torch::TensorOptions() - .dtype(g_output.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto grad_input = torch::empty_like(input); - auto grad_attn_qkvw = torch::empty_like(attn_qkvw); - auto grad_attn_qkvb = torch::empty_like(attn_qkvb); - auto grad_attn_ow = torch::empty_like(attn_ow); - auto grad_attn_ob = torch::empty_like(attn_ob); - auto grad_attn_nw = torch::empty_like(attn_nw); - auto grad_attn_nb = torch::empty_like(attn_nb); - auto grad_inter_w = torch::empty_like(inter_w); - auto grad_inter_b = torch::empty_like(inter_b); - auto grad_output_w = torch::empty_like(output_w); - auto grad_output_b = torch::empty_like(output_b); - auto grad_norm_w = torch::empty_like(norm_w); - auto grad_norm_b = torch::empty_like(norm_b); - - // inputs. - const T* grad_output_ptr = (const T*)g_output.data_ptr(); - const T* input_ptr = (const T*)input.data_ptr(); - const T* output_ptr = (const T*)output.data_ptr(); - const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); - const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); - const T* add_res_ptr = (const T*)add_res.data_ptr(); - const T* k_tf_ptr = - q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); - const T* v_tf_ptr = - k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); - const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); - const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); - const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); - const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); - const T* soft_out_ptr = (const T*)soft_out.data_ptr(); - const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - // outputs. - T* grad_input_ptr = (T*)grad_input.data_ptr(); - T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); - T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); - T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); - T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); - T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); - T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); - T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); - T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); - T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); - T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); - T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); - T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); - - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Backward(bsz, - grad_output_ptr, - input_ptr, - output_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_ow_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - norm_w_ptr, - norm_b_ptr, - - grad_input_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr); - - return {grad_input, - grad_attn_qkvw, - grad_attn_qkvb, - grad_attn_ow, - grad_attn_ob, - grad_attn_nw, - grad_attn_nb, - grad_inter_w, - grad_inter_b, - grad_output_w, - grad_output_b, - grad_norm_w, - grad_norm_b}; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("forward_fp32", - &ds_transformer_forward, - "DeepSpeed Transformer forward with fp32 (CUDA)"); - m.def("forward_fp16", - &ds_transformer_forward<__half>, - "DeepSpeed Transformer forward with fp16 (CUDA)"); - m.def("backward_fp32", - &ds_transformer_backward, - "DeepSpeed Transformer backward with fp32 (CUDA)"); - m.def("backward_fp16", - &ds_transformer_backward<__half>, - "DeepSpeed Transformer backward with fp16 (CUDA)"); - m.def("create_transformer_layer_fp32", - &create_transformer_layer, - "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); - m.def("create_transformer_layer_fp16", - &create_transformer_layer<__half>, - "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer/ds_transformer_hip.cpp b/deepspeed/ops/csrc/transformer/ds_transformer_hip.cpp deleted file mode 100644 index f9e0a53..0000000 --- a/deepspeed/ops/csrc/transformer/ds_transformer_hip.cpp +++ /dev/null @@ -1,1052 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include - -#include -#include -#include -#include -#include -#include -#include "Timer_hip.h" -#include "context_hip.h" -#include "cublas_wrappers_hip.h" -#include "custom_hip_layers.h" -#include "ds_transformer_hip.h" - -static std::unordered_map> s_transformer_layers; - -const int init_seq_length = 128; - -// C++ interface - -template -unsigned get_workspace_size(unsigned maxBatchSize, - unsigned seq_len, - unsigned hidden_size, - unsigned intermediate_size, - unsigned heads, - bool training, - bool gelu_checkpoint) -{ - unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); - if (training) { - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); - workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), - 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); - if (gelu_checkpoint) - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); - } - return workSpacesize; // * sizeof(T); -} - -// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -template -BertTransformerLayer::BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_prob_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) - : _layer_id(layer_id), - _batch_size(batch_size), - _hidden_size(hidden_size), - _heads(num_heads), - _intermediate_size(intermediate_size), - _seq_length(seq_length), - _training(true), - _pre_or_postLayerNorm(pre_or_postLayerNorm), - _attn_dropout_checkpoint(attn_dropout_checkpoint), - _normalize_invertible(normalize_invertible), - _gelu_checkpoint(gelu_checkpoint), - _stochastic_mode(stochastic_mode), - _stream(Context::Instance().GetCurrentStream()), - _cublasHandle(Context::Instance().GetCublasHandle()), - _qkv_linear(typename FeedForward::Config(batch_size * seq_length, - 3 * hidden_size, - hidden_size, - gemm_algos[0])), - _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - hidden_size, - gemm_algos[0])), - _attn_layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _ff1(typename FeedForward::Config(batch_size * seq_length, - _intermediate_size, - hidden_size, - gemm_algos[1])), - _ff2(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - _intermediate_size, - gemm_algos[2])), - _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), - _gelu(typename Gelu::Config(_intermediate_size)), - _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), - _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, - _seq_length, - _seq_length, - _hidden_size / _heads, - //(T(1.0) / T(sqrt(_hidden_size / _heads))), - //aiss debug 0506 - (T(1.0 / (sqrt(_hidden_size / _heads)))), - T(0.0), - rocblas_operation_transpose, - rocblas_operation_none, - gemm_algos[3])), - _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, - _hidden_size / _heads, - _seq_length, - _seq_length, - T(1.0), - T(0.0), - rocblas_operation_none, - rocblas_operation_none, - gemm_algos[4])) -{ - assert(_hidden_size % _heads == 0); - - Initialize(); -} - -template -BertTransformerLayer::~BertTransformerLayer() -{ -} - -template -void BertTransformerLayer::Initialize() -{ -#ifndef __HIP_PLATFORM_HCC__ - if (std::is_same::value) rocblas_set_math_mode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); -#endif -} - -template -void BertTransformerLayer::Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* soft_out_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr) -{ - rocblas_set_stream(_cublasHandle, _stream); - - if (!_stochastic_mode) hipStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1; - - if (_normalize_invertible) { - add_res_ptr = buf_1 + 3 * small_buf_size; - buf_2 = add_res_ptr; - } - if (_gelu_checkpoint) buf_2 += small_buf_size; - if (_attn_dropout_checkpoint) - ctx_bufB_ptr = - (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) - : (buf_1 + 4 * small_buf_size)); - - int bsz_seq = bsz * _seq_length; - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - - else - _layer_norm.Forward( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } - - if (_pre_or_postLayerNorm) - _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - else - _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - - launch_bias_add_transform_0213( - q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); - - int bsz_heads = bsz * _heads; - - // attention scores - _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); - - // Softmax + Mask - _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); - - // attn prob dropout. - _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); - - // attention context - _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); - - launch_transform4d_0213( - attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); - - if (_pre_or_postLayerNorm) - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); - else - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); - - // attn output dropout. - if (_pre_or_postLayerNorm) - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); - else - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } - - _ff1.Forward(bsz_seq, - ff1_inp_ptr, - inter_w_ptr, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - _cublasHandle); - - _gelu.ForwardWithBiasAdd(bsz_seq, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - inter_b_ptr, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - _stream); - - _ff2.Forward( - bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); - - // layer output dropout. - if (_pre_or_postLayerNorm) - _layer_output_dropout.ForwardWithBias( - bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); - else - _layer_output_dropout.ForwardWithBias( - bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - else - _layer_norm.Forward( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } -} - -template -void BertTransformerLayer::Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* soft_out_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr) -{ - rocblas_set_stream(_cublasHandle, _stream); - - if (!_stochastic_mode) hipStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1 + small_buf_size; - T* buf_3 = buf_2 + small_buf_size; - - T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) - : buf_3 + small_buf_size); - T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); - - hipStream_t streams[2] = {_stream, _stream}; - - int bsz_seq = bsz * _seq_length; - int bsz_heads = bsz * _heads; - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - inp_norm_ptr); - - else - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - output_ptr); - } - - if (_pre_or_postLayerNorm) - _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); - else - _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); - - const T* layer_dropout_buf = _layer_output_dropout.HasDropout() - ? buf_0 - : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); - - if (_gelu_checkpoint) - _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); - _ff2.Backward(bsz_seq, - layer_dropout_buf, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - output_w_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - _cublasHandle, - _stream, - ff2_buf); - - _gelu.Backward( - bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); - - _ff1.Backward(bsz_seq, - ff2_buf, - ff1_inp_ptr, - inter_w_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - _cublasHandle, - _stream, - buf_3); - - if (!_pre_or_postLayerNorm) - launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } - - _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); - - T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; - - _attn_out_linear.Backward(bsz_seq, - attn_output_dropout_buf, - attn_o_inp_ptr, - attn_ow_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - _cublasHandle, - _stream, - buf_1); - - launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); - - if (_attn_prob_dropout.HasDropout()) { - if (_attn_dropout_checkpoint) - _attn_prob_dropout.Forward( - bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); - - _attn_context.Backward(bsz_heads, - buf_2, - v_tf_ptr, - (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), - _cublasHandle, - buf_3, - ff2_buf); - } else - _attn_context.Backward( - bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); - - _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); - - _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); - - _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); - - launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); - - if (_pre_or_postLayerNorm) - _qkv_linear.Backward(bsz_seq, - ff2_buf, - inp_norm_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - else - _qkv_linear.Backward(bsz_seq, - ff2_buf, - input_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - input_ptr); - - else - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - inp_norm_ptr); - } else - launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); -} - -template -void BertTransformerLayer::SetTrainingMode(bool training) -{ - // Dropout will be skipped when not in training model. - _attn_prob_dropout.SetTrainingMode(training); - _attn_output_dropout.SetTrainingMode(training); - _layer_output_dropout.SetTrainingMode(training); -} - -template -void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* attn_layer_norm_var, - T* attn_layer_norm_mean, - T* layer_norm_var, - T* layer_norm_mean) -{ - _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); - _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); - _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); - - _attn_layer_norm.SetVar(attn_layer_norm_var); - _attn_layer_norm.SetMean(attn_layer_norm_mean); - _layer_norm.SetVar(layer_norm_var); - _layer_norm.SetMean(layer_norm_mean); -} - -template -void BertTransformerLayer::SetSeqLength(unsigned seq_len) -{ - _seq_length = seq_len; - - _softmax.SetSeqLength(_seq_length); - _attn_prob_dropout.SetDimension(_seq_length); - _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); - _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); -} - -template -int create_transformer_layer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_dim, - unsigned num_heads, - unsigned intermediate_size, - float attn_dropout_ratio, - float hidden_dropout_ratio, - float layer_norm_eps, - int seed, - bool pre_or_postLayerNorm, - bool test_gemm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) -{ - Context::Instance().SetSeed(seed); - Context::Instance().TestGemmFP16( - test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); - - auto layer = std::make_shared>(layer_id, - batch_size, - hidden_dim, - num_heads, - intermediate_size, - init_seq_length, - attn_dropout_ratio, - hidden_dropout_ratio, - layer_norm_eps, - pre_or_postLayerNorm, - Context::Instance().GetGemmAlgos(), - attn_dropout_checkpoint, - normalize_invertible, - gelu_checkpoint, - stochastic_mode); - - s_transformer_layers[layer_id] = layer; - - std::string dtype = (std::is_same::value) ? "half" : "float"; - - std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." - << std::endl; - - return 0; -} - -template -std::vector ds_transformer_forward(unsigned layer_id, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b, - bool training_mode, - bool prelayernorm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint) -{ - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = input.size(0); - - const T* input_ptr = (const T*)input.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* output_b_ptr = (const T*)output_b.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - auto output = torch::empty_like(input); - T* out_ptr = (T*)output.data_ptr(); - - auto options = torch::TensorOptions() - .dtype(input.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - - auto uint8_options = torch::TensorOptions() - .dtype(torch::kInt8) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(false); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (input.size(1) != seq_len) { - seq_len = input.size(1); - layer->SetSeqLength(seq_len); - } - - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); - auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); - auto attn_o_inp = torch::empty_like(input); - auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); - - auto attn_prob_dropout_mask = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); - auto attn_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - auto layer_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - - auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - - T* inp_norm_ptr = (T*)inp_norm.data_ptr(); - T* add_res_ptr = (T*)add_res.data_ptr(); - T* q_tf_ptr = (T*)qkv_tf.data_ptr(); - T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); - T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); - T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); - - torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); - torch::Tensor gelu_inp = - (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); - auto ff1_inp = torch::empty_like(input); - T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); - T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); - T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); - - torch::Tensor soft_out = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); - torch::Tensor ctx_bufB = - (attn_dropout_checkpoint - ? soft_out - : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); - T* soft_out_ptr = (T*)soft_out.data_ptr(); - T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); - - layer->SetTrainingMode(training_mode); - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Forward(bsz, - input_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_qkvb_ptr, - attn_ow_ptr, - attn_ob_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - output_b_ptr, - norm_w_ptr, - norm_b_ptr, - out_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr); - - return {output, - inp_norm, - qkv_tf, - soft_out, - ctx_bufB, - attn_o_inp, - add_res, - ff1_inp, - gelu_inp, - ff2_inp, - attn_prob_dropout_mask, - attn_output_dropout_mask, - layer_output_dropout_mask, - attn_layer_norm_var, - attn_layer_norm_mean, - layer_norm_var, - layer_norm_mean}; -} - -template -std::vector ds_transformer_backward(unsigned layer_id, - const torch::Tensor& grad_output, - const torch::Tensor& output, - const torch::Tensor& inp_norm, - const torch::Tensor& qkv_tf, - const torch::Tensor& soft_out, - const torch::Tensor& ctx_bufB, - const torch::Tensor& attn_o_inp, - const torch::Tensor& add_res, - const torch::Tensor& ff1_inp, - const torch::Tensor& gelu_inp, - const torch::Tensor& ff2_inp, - const torch::Tensor& attn_prob_dropout_mask, - const torch::Tensor& attn_output_dropout_mask, - const torch::Tensor& layer_output_dropout_mask, - const torch::Tensor& attn_layer_norm_var, - const torch::Tensor& attn_layer_norm_mean, - const torch::Tensor& layer_norm_var, - const torch::Tensor& layer_norm_mean, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b) -{ - auto g_output = grad_output.contiguous(); - CHECK_INPUT(g_output); - CHECK_INPUT(output); - CHECK_INPUT(inp_norm); - CHECK_INPUT(qkv_tf); - CHECK_INPUT(add_res); - CHECK_INPUT(soft_out); - CHECK_INPUT(ctx_bufB); - CHECK_INPUT(attn_o_inp); - CHECK_INPUT(ff1_inp); - CHECK_INPUT(gelu_inp); - CHECK_INPUT(ff2_inp); - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = g_output.size(0); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (g_output.size(1) != seq_len) { - seq_len = g_output.size(1); - layer->SetSeqLength(seq_len); - } - auto options = torch::TensorOptions() - .dtype(g_output.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto grad_input = torch::empty_like(input); - auto grad_attn_qkvw = torch::empty_like(attn_qkvw); - auto grad_attn_qkvb = torch::empty_like(attn_qkvb); - auto grad_attn_ow = torch::empty_like(attn_ow); - auto grad_attn_ob = torch::empty_like(attn_ob); - auto grad_attn_nw = torch::empty_like(attn_nw); - auto grad_attn_nb = torch::empty_like(attn_nb); - auto grad_inter_w = torch::empty_like(inter_w); - auto grad_inter_b = torch::empty_like(inter_b); - auto grad_output_w = torch::empty_like(output_w); - auto grad_output_b = torch::empty_like(output_b); - auto grad_norm_w = torch::empty_like(norm_w); - auto grad_norm_b = torch::empty_like(norm_b); - - // inputs. - const T* grad_output_ptr = (const T*)g_output.data_ptr(); - const T* input_ptr = (const T*)input.data_ptr(); - const T* output_ptr = (const T*)output.data_ptr(); - const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); - const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); - const T* add_res_ptr = (const T*)add_res.data_ptr(); - const T* k_tf_ptr = - q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); - const T* v_tf_ptr = - k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); - const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); - const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); - const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); - const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); - const T* soft_out_ptr = (const T*)soft_out.data_ptr(); - const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - // outputs. - T* grad_input_ptr = (T*)grad_input.data_ptr(); - T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); - T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); - T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); - T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); - T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); - T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); - T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); - T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); - T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); - T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); - T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); - T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); - - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Backward(bsz, - grad_output_ptr, - input_ptr, - output_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_ow_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - norm_w_ptr, - norm_b_ptr, - - grad_input_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr); - - return {grad_input, - grad_attn_qkvw, - grad_attn_qkvb, - grad_attn_ow, - grad_attn_ob, - grad_attn_nw, - grad_attn_nb, - grad_inter_w, - grad_inter_b, - grad_output_w, - grad_output_b, - grad_norm_w, - grad_norm_b}; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("forward_fp32", - &ds_transformer_forward, - "DeepSpeed Transformer forward with fp32 (CUDA)"); - m.def("forward_fp16", - &ds_transformer_forward<__half>, - "DeepSpeed Transformer forward with fp16 (CUDA)"); - m.def("backward_fp32", - &ds_transformer_backward, - "DeepSpeed Transformer backward with fp32 (CUDA)"); - m.def("backward_fp16", - &ds_transformer_backward<__half>, - "DeepSpeed Transformer backward with fp16 (CUDA)"); - m.def("create_transformer_layer_fp32", - &create_transformer_layer, - "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); - m.def("create_transformer_layer_fp16", - &create_transformer_layer<__half>, - "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer/gelu_kernels.cu b/deepspeed/ops/csrc/transformer/gelu_kernels.cu deleted file mode 100644 index d683cf0..0000000 --- a/deepspeed/ops/csrc/transformer/gelu_kernels.cu +++ /dev/null @@ -1,330 +0,0 @@ -#include "custom_cuda_layers.h" - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -inline __device__ float d_gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - - float x2mul = x * x * mul_param; - float tan_h = tanhf(sqrt_param * (x + x * x2mul)); - float dg1 = 0.5f * (1.0f + tan_h); - float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); - float dg3 = dg2 * 3 * x2mul; - return (dg1 + dg2 + dg3); -} - -/* -Fused bias add with GELU - -Loads a vector of 4 elements each iteration, for stride -iterations. It was written with the intention to launch 256 thread -threadblocks, so to launch for bert-large, we would set ITERATIONS -to 4. This is currently done automatically as a heuristic, setting -the number of iterations as blocks of 1024. - -For FP16, the values are loaded from memory as __half, but converted -to FP32 for the arithmetic itself, to prevent numerous overflow on -the intermediate hyperbolic tangent, since there's no intrinsic -that computes it directly. -*/ - -__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void fused_bias_gelu(const float* input, - const float* bias, - float* vals, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void fused_bias_gelu(const __half* input, - const __half* bias, - __half* vals, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - const float2* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void d_gelu_func(float* d_output, - const float* gelu_input, - const float* bias, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float4* d_output_cast = reinterpret_cast(d_output); - const float4* gelu_input_cast = reinterpret_cast(gelu_input); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - gelu_input_data.x += bias_data.x; - gelu_input_data.y += bias_data.y; - gelu_input_data.z += bias_data.z; - gelu_input_data.w += bias_data.w; - - output_data.x *= d_gelu(gelu_input_data.x); - output_data.y *= d_gelu(gelu_input_data.y); - output_data.z *= d_gelu(gelu_input_data.z); - output_data.w *= d_gelu(gelu_input_data.w); - - d_output_cast[row * row_stride + i * loop_stride + id] = output_data; - } - } -} - -__global__ void d_gelu_func(__half* d_output, - const __half* gelu_input, - const __half* bias, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float2* d_output_cast = reinterpret_cast(d_output); - const float2* gelu_input_cast = reinterpret_cast(gelu_input); - const float2* bias_cast = reinterpret_cast(bias); - -#pragma unroll - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); - __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 output_half_0 = __half22float2(output_data_half[0]); - float2 output_half_1 = __half22float2(output_data_half[1]); - - float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); - float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); - - float2 bias_half_0 = __half22float2(bias_half[0]); - float2 bias_half_1 = __half22float2(bias_half[1]); - - gelu_input_half_0.x += bias_half_0.x; - gelu_input_half_0.y += bias_half_0.y; - gelu_input_half_1.x += bias_half_1.x; - gelu_input_half_1.y += bias_half_1.y; - - output_half_0.x *= d_gelu(gelu_input_half_0.x); - output_half_0.y *= d_gelu(gelu_input_half_0.y); - output_half_1.x *= d_gelu(gelu_input_half_1.x); - output_half_1.y *= d_gelu(gelu_input_half_1.y); - - float2 result; - __half2* result_half2 = reinterpret_cast<__half2*>(&result); - - result_half2[0] = __float22half2_rn(output_half_0); - result_half2[1] = __float22half2_rn(output_half_1); - - d_output_cast[row * row_stride + i * loop_stride + id] = result; - } - } -#endif -} - -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - fused_bias_gelu<<>>( - input, bias, output, intermediate_size / 4, iterations); -} - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - gelu_kernel<<>>( - input, output, intermediate_size / 4, iterations); -} - -template void launch_bias_gelu(const float*, const float*, float*, int, int, cudaStream_t); -template void launch_bias_gelu<__half>(const __half*, - const __half*, - __half*, - int, - int, - cudaStream_t); - -template void launch_gelu(const float*, float*, int, int, cudaStream_t); -template void launch_gelu<__half>(const __half*, __half*, int, int, cudaStream_t); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - d_gelu_func<<>>( - d_output, input, bias, intermediate_size / 4, iterations); -} - -template void launch_d_gelu(float*, const float*, const float*, int, int, cudaStream_t); -template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, cudaStream_t); diff --git a/deepspeed/ops/csrc/transformer/gelu_kernels.hip b/deepspeed/ops/csrc/transformer/gelu_kernels.hip deleted file mode 100644 index f7e7a7f..0000000 --- a/deepspeed/ops/csrc/transformer/gelu_kernels.hip +++ /dev/null @@ -1,332 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -inline __device__ float d_gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - - float x2mul = x * x * mul_param; - float tan_h = tanhf(sqrt_param * (x + x * x2mul)); - float dg1 = 0.5f * (1.0f + tan_h); - float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); - float dg3 = dg2 * 3 * x2mul; - return (dg1 + dg2 + dg3); -} - -/* -Fused bias add with GELU - -Loads a vector of 4 elements each iteration, for stride -iterations. It was written with the intention to launch 256 thread -threadblocks, so to launch for bert-large, we would set ITERATIONS -to 4. This is currently done automatically as a heuristic, setting -the number of iterations as blocks of 1024. - -For FP16, the values are loaded from memory as __half, but converted -to FP32 for the arithmetic itself, to prevent numerous overflow on -the intermediate hyperbolic tangent, since there's no intrinsic -that computes it directly. -*/ - -__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void fused_bias_gelu(const float* input, - const float* bias, - float* vals, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void fused_bias_gelu(const __half* input, - const __half* bias, - __half* vals, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - const float2* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void d_gelu_func(float* d_output, - const float* gelu_input, - const float* bias, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float4* d_output_cast = reinterpret_cast(d_output); - const float4* gelu_input_cast = reinterpret_cast(gelu_input); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - gelu_input_data.x += bias_data.x; - gelu_input_data.y += bias_data.y; - gelu_input_data.z += bias_data.z; - gelu_input_data.w += bias_data.w; - - output_data.x *= d_gelu(gelu_input_data.x); - output_data.y *= d_gelu(gelu_input_data.y); - output_data.z *= d_gelu(gelu_input_data.z); - output_data.w *= d_gelu(gelu_input_data.w); - - d_output_cast[row * row_stride + i * loop_stride + id] = output_data; - } - } -} - -__global__ void d_gelu_func(__half* d_output, - const __half* gelu_input, - const __half* bias, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float2* d_output_cast = reinterpret_cast(d_output); - const float2* gelu_input_cast = reinterpret_cast(gelu_input); - const float2* bias_cast = reinterpret_cast(bias); - -#pragma unroll - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); - __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 output_half_0 = __half22float2(output_data_half[0]); - float2 output_half_1 = __half22float2(output_data_half[1]); - - float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); - float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); - - float2 bias_half_0 = __half22float2(bias_half[0]); - float2 bias_half_1 = __half22float2(bias_half[1]); - - gelu_input_half_0.x += bias_half_0.x; - gelu_input_half_0.y += bias_half_0.y; - gelu_input_half_1.x += bias_half_1.x; - gelu_input_half_1.y += bias_half_1.y; - - output_half_0.x *= d_gelu(gelu_input_half_0.x); - output_half_0.y *= d_gelu(gelu_input_half_0.y); - output_half_1.x *= d_gelu(gelu_input_half_1.x); - output_half_1.y *= d_gelu(gelu_input_half_1.y); - - float2 result; - __half2* result_half2 = reinterpret_cast<__half2*>(&result); - - result_half2[0] = __float22half2_rn(output_half_0); - result_half2[1] = __float22half2_rn(output_half_1); - - d_output_cast[row * row_stride + i * loop_stride + id] = result; - } - } -#endif -} - -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, - input, bias, output, intermediate_size / 4, iterations); -} - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( gelu_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, intermediate_size / 4, iterations); -} - -template void launch_bias_gelu(const float*, const float*, float*, int, int, hipStream_t); -template void launch_bias_gelu<__half>(const __half*, - const __half*, - __half*, - int, - int, - hipStream_t); - -template void launch_gelu(const float*, float*, int, int, hipStream_t); -template void launch_gelu<__half>(const __half*, __half*, int, int, hipStream_t); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( d_gelu_func), dim3(grid_dims), dim3(block_dims), 0, stream, - d_output, input, bias, intermediate_size / 4, iterations); -} - -template void launch_d_gelu(float*, const float*, const float*, int, int, hipStream_t); -template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, hipStream_t); diff --git a/deepspeed/ops/csrc/transformer/general_kernels.cu b/deepspeed/ops/csrc/transformer/general_kernels.cu deleted file mode 100644 index 1eaa94e..0000000 --- a/deepspeed/ops/csrc/transformer/general_kernels.cu +++ /dev/null @@ -1,411 +0,0 @@ -#include "general_kernels.h" - -namespace cg = cooperative_groups; - -template -__global__ void column_sum_reduce(const T* __restrict__ inp, - T* __restrict__ out, - int rows, - int width) -{ - __shared__ float tile[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - - int y_stride = width * TILE_DIM; - - float localSum = 0; - - // Loop across matrix height - if (idx < width) { - int offset = threadIdx.y * width + idx; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - localSum += (float)inp[offset]; - offset += y_stride; - } - } - - tile[threadIdx.x][threadIdx.y] = localSum; - - __syncthreads(); - - // Sum the shared buffer. - float sum = tile[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - if (pos < width) out[pos] = sum; - } -} - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - cudaStream_t stream); - -template <> -void launch_fuse_transpose_bias_kernel(const float* inp, - float* out, - int rows, - int cols, - cudaStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - column_sum_reduce<<>>(inp, out, rows, cols); -} - -template <> -void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, - __half* out, - int rows, - int cols, - cudaStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - column_sum_reduce<__half><<>>(inp, out, rows, cols); -} - -__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) -{ - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - float4* out_4 = reinterpret_cast(out); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 val; - float4 inp1_reg = inp1_4[j]; - float4 inp2_reg = inp2_4[j]; - - val.x = inp1_reg.x + inp2_reg.x; - val.y = inp1_reg.y + inp2_reg.y; - val.z = inp1_reg.z + inp2_reg.z; - val.w = inp1_reg.w + inp2_reg.w; - - out_4[j] = val; - } -} - -__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) -{ - float2 inp1_4; - float2 inp2_4; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - - CUDA_1D_KERNEL_LOOP(j, N) - { - inp1_4 = inp1_arr[j]; - inp2_4 = inp2_arr[j]; - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - inp1_h_f_0.x += inp2_h_f_0.x; - inp1_h_f_0.y += inp2_h_f_0.y; - inp1_h_f_1.x += inp2_h_f_1.x; - inp1_h_f_1.y += inp2_h_f_1.y; - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[j] = val_f; - } -} - -template <> -void launch_fused_add2(float* out, - const float* inp1, - const float* inp2, - int batch_size, - int seq_length, - int hidden_dim, - cudaStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - fused_add2_kernel<<>>(total_count, out, inp1, inp2); -} - -template <> -void launch_fused_add2<__half>(__half* out, - const __half* inp1, - const __half* inp2, - int batch_size, - int seq_length, - int hidden_dim, - cudaStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - fused_add2_kernel<<>>(total_count, out, inp1, inp2); -} - -__global__ void fused_add3_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add3_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add3(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add3_kernel<<>>( - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add3<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add3_kernel<<>>( - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -__global__ void fused_add4_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - const float4* inp4_4 = reinterpret_cast(inp4); - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - float4 inp4_reg = inp4_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add4_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - const float2* inp4_arr = reinterpret_cast(inp4); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - float2 inp4_4 = inp4_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - float2 inp4_h_f_0 = __half22float2(inp4_h[0]); - float2 inp4_h_f_1 = __half22float2(inp4_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add4(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add4_kernel<<>>( - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add4<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add4_kernel<<>>( - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} diff --git a/deepspeed/ops/csrc/transformer/general_kernels.hip b/deepspeed/ops/csrc/transformer/general_kernels.hip deleted file mode 100644 index 5be2fc2..0000000 --- a/deepspeed/ops/csrc/transformer/general_kernels.hip +++ /dev/null @@ -1,413 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "general_kernels_hip.h" - -namespace cg = cooperative_groups; - -template -__global__ void column_sum_reduce(const T* __restrict__ inp, - T* __restrict__ out, - int rows, - int width) -{ - __shared__ float tile[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - - int y_stride = width * TILE_DIM; - - float localSum = 0; - - // Loop across matrix height - if (idx < width) { - int offset = threadIdx.y * width + idx; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - localSum += (float)inp[offset]; - offset += y_stride; - } - } - - tile[threadIdx.x][threadIdx.y] = localSum; - - __syncthreads(); - - // Sum the shared buffer. - float sum = tile[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - if (pos < width) out[pos] = sum; - } -} - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - hipStream_t stream); - -template <> -void launch_fuse_transpose_bias_kernel(const float* inp, - float* out, - int rows, - int cols, - hipStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( column_sum_reduce), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); -} - -template <> -void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, - __half* out, - int rows, - int cols, - hipStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( column_sum_reduce<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); -} - -__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) -{ - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - float4* out_4 = reinterpret_cast(out); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 val; - float4 inp1_reg = inp1_4[j]; - float4 inp2_reg = inp2_4[j]; - - val.x = inp1_reg.x + inp2_reg.x; - val.y = inp1_reg.y + inp2_reg.y; - val.z = inp1_reg.z + inp2_reg.z; - val.w = inp1_reg.w + inp2_reg.w; - - out_4[j] = val; - } -} - -__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) -{ - float2 inp1_4; - float2 inp2_4; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - - CUDA_1D_KERNEL_LOOP(j, N) - { - inp1_4 = inp1_arr[j]; - inp2_4 = inp2_arr[j]; - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - inp1_h_f_0.x += inp2_h_f_0.x; - inp1_h_f_0.y += inp2_h_f_0.y; - inp1_h_f_1.x += inp2_h_f_1.x; - inp1_h_f_1.y += inp2_h_f_1.y; - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[j] = val_f; - } -} - -template <> -void launch_fused_add2(float* out, - const float* inp1, - const float* inp2, - int batch_size, - int seq_length, - int hidden_dim, - hipStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); -} - -template <> -void launch_fused_add2<__half>(__half* out, - const __half* inp1, - const __half* inp2, - int batch_size, - int seq_length, - int hidden_dim, - hipStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); -} - -__global__ void fused_add3_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add3_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add3(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add3<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -__global__ void fused_add4_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - const float4* inp4_4 = reinterpret_cast(inp4); - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - float4 inp4_reg = inp4_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add4_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - const float2* inp4_arr = reinterpret_cast(inp4); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - float2 inp4_4 = inp4_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - float2 inp4_h_f_0 = __half22float2(inp4_h[0]); - float2 inp4_h_f_1 = __half22float2(inp4_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add4(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add4<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu b/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu deleted file mode 100644 index 175854b..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu +++ /dev/null @@ -1,374 +0,0 @@ -#include "custom_cuda_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif - -namespace cg = cooperative_groups; - -__global__ void apply_rotary_pos_emb(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} -__global__ void apply_rotary_pos_emb1(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} -__global__ void apply_rotary_pos_emb1(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - constexpr unsigned mask[32] = { - 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, - 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000, - 0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, - 0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, - 0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000, - 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, - 0x40000000, 0x80000000}; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - unsigned half_dim = rotary_dim >> 1; - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim) - : __shfl_sync(mask[lane], q_rot, lane - half_dim); - auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim) - : __shfl_sync(mask[lane], k_rot, lane - half_dim); - q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - cudaStream_t stream) -{ - int total_count = batch * num_heads * seq_len; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - if (rotate_every_two) - apply_rotary_pos_emb<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); - else if (rotate_half) - apply_rotary_pos_emb1<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, - float*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - cudaStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, - __half*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - cudaStream_t); -/* -__global__ void apply_rotary_pos_emb(float* mixed_query, -float* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; - -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = mixed_query[offset + lane]; -float k = key_layer[offset + lane]; -float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -q_rot = g.shfl_xor(q_rot, 1); -k_rot = g.shfl_xor(k_rot, 1); -q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); -k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - -mixed_query[offset + lane] = q; -key_layer[offset + lane] = k; - -lane += WARP_SIZE; -} -} -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, -__half* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; -constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, -0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, -0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000, -0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8, -0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, -0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, -0x1000000, 0x2000000, 0x4000000, 0x8000000, -0x10000000, 0x20000000, 0x40000000, 0x80000000}; -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = (float)mixed_query[offset + lane]; -float k = (float)key_layer[offset + lane]; -float rotary_sign = (lane > 11 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane], -q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], -k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q * -cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - -mixed_query[offset + lane] = (__half)q; -key_layer[offset + lane] = (__half)k; - -lane += WARP_SIZE; -} -} -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, -T* key_layer, -unsigned head_size, -unsigned seq_len, -unsigned rotary_dim, -unsigned offset, -unsigned num_heads, -unsigned batch, -cudaStream_t stream) -{ -int total_count = batch * num_heads * seq_len; -dim3 block_dims(1024); -dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - -apply_rotary_pos_emb<<>>( -mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, -float*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -cudaStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, -__half*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -cudaStream_t); -*/ diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip b/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip deleted file mode 100644 index 4e04f7a..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.hip +++ /dev/null @@ -1,376 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif - -namespace cg = cooperative_groups; - -__global__ void apply_rotary_pos_emb(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} -__global__ void apply_rotary_pos_emb1(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} -__global__ void apply_rotary_pos_emb1(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - constexpr unsigned mask[32] = { - 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, - 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000, - 0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, - 0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, - 0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000, - 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, - 0x40000000, 0x80000000}; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - unsigned half_dim = rotary_dim >> 1; - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim) - : __shfl_sync(mask[lane], q_rot, lane - half_dim); - auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim) - : __shfl_sync(mask[lane], k_rot, lane - half_dim); - q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - hipStream_t stream) -{ - int total_count = batch * num_heads * seq_len; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - if (rotate_every_two) - hipLaunchKernelGGL(( apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); - else if (rotate_half) - hipLaunchKernelGGL(( apply_rotary_pos_emb1), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, - float*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - hipStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, - __half*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - hipStream_t); -/* -__global__ void apply_rotary_pos_emb(float* mixed_query, -float* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; - -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = mixed_query[offset + lane]; -float k = key_layer[offset + lane]; -float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -q_rot = g.shfl_xor(q_rot, 1); -k_rot = g.shfl_xor(k_rot, 1); -q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); -k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - -mixed_query[offset + lane] = q; -key_layer[offset + lane] = k; - -lane += WARP_SIZE; -} -} -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, -__half* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; -constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, -0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, -0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000, -0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8, -0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, -0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, -0x1000000, 0x2000000, 0x4000000, 0x8000000, -0x10000000, 0x20000000, 0x40000000, 0x80000000}; -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = (float)mixed_query[offset + lane]; -float k = (float)key_layer[offset + lane]; -float rotary_sign = (lane > 11 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane], -q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], -k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q * -cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - -mixed_query[offset + lane] = (__half)q; -key_layer[offset + lane] = (__half)k; - -lane += WARP_SIZE; -} -} -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, -T* key_layer, -unsigned head_size, -unsigned seq_len, -unsigned rotary_dim, -unsigned offset, -unsigned num_heads, -unsigned batch, -hipStream_t stream) -{ -int total_count = batch * num_heads * seq_len; -dim3 block_dims(1024); -dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); -hipLaunchKernelGGL(( -apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, -mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, -float*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -hipStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, -__half*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -hipStream_t); -*/ diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu b/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu deleted file mode 100644 index 4ddaabd..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu +++ /dev/null @@ -1,110 +0,0 @@ -#include "custom_cuda_layers.h" - -#define MAX_QUANTIZE_GROUPING 1024 - -#define loop_unroll 1 -#define loop_unroll_bits 1 - -__global__ void dequantize_kernel(float* output, - const int8_t* input, - const float* qscale, - int output_size, - int hidden_dim, - int groups, - int merge_count) -{ - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = (scale_data * (float)q); - tid += blockDim.x; - } -} - -__global__ void dequantize_kernel(__half* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count) -{ -#ifdef HALF_PRECISION_AVAILABLE - - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = __float2half(scale_data * (float)q); - tid += blockDim.x; - } -#endif -} - -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - cudaStream_t stream) -{ - unsigned threads = 1024; - dim3 block_dims(threads); - dim3 grid_dims(hidden_dim); - - dequantize_kernel<<>>( - output, input, qscale, output_size, hidden_dim, groups, merge_count); -} - -template void launch_dequantize(float*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - cudaStream_t); -template void launch_dequantize<__half>(__half*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - cudaStream_t); diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.hip b/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.hip deleted file mode 100644 index 7c22e30..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.hip +++ /dev/null @@ -1,112 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#define MAX_QUANTIZE_GROUPING 1024 - -#define loop_unroll 1 -#define loop_unroll_bits 1 - -__global__ void dequantize_kernel(float* output, - const int8_t* input, - const float* qscale, - int output_size, - int hidden_dim, - int groups, - int merge_count) -{ - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = (scale_data * (float)q); - tid += blockDim.x; - } -} - -__global__ void dequantize_kernel(__half* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count) -{ -#ifdef HALF_PRECISION_AVAILABLE - - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = __float2half(scale_data * (float)q); - tid += blockDim.x; - } -#endif -} - -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - hipStream_t stream) -{ - unsigned threads = 1024; - dim3 block_dims(threads); - dim3 grid_dims(hidden_dim); - - hipLaunchKernelGGL(( dequantize_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, - output, input, qscale, output_size, hidden_dim, groups, merge_count); -} - -template void launch_dequantize(float*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - hipStream_t); -template void launch_dequantize<__half>(__half*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - hipStream_t); diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu b/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu deleted file mode 100644 index 70bbf42..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu +++ /dev/null @@ -1,525 +0,0 @@ -#include "custom_cuda_layers.h" - -#define MAX_CAP 4 -#define MAX_SEQ 2048 - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); - - fused_bias_gelu<<>>( - input, bias, total_count, intermediate_size / 4); -} - -template void launch_bias_gelu(float*, const float*, int, int, cudaStream_t); -template void launch_bias_gelu<__half>(__half*, const __half*, int, int, cudaStream_t); - -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream) -{ - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); - - fused_bias_add<<>>(input, bias, total_count, hidden_size / 4); -} - -template void launch_bias_add(float*, const float*, int, int, cudaStream_t); -template void launch_bias_add<__half>(__half*, const __half*, int, int, cudaStream_t); - -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - cudaStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - fused_bias_residual<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, cudaStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - cudaStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int hidden_dim, - int batch, - int mp_size, - cudaStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - gptj_residual_add<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void launch_gptj_residual_add(float*, - float*, - float*, - float*, - float*, - int, - int, - int, - cudaStream_t); -template void launch_gptj_residual_add<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - cudaStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; - } -} - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream) -{ - dim3 grid_dim(seq_len); - dim3 block_dim(1024); - moe_res_matmul<<>>( - residual, coef, mlp_out, seq_len, hidden_dim / 4); -} - -template void launch_moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); -template void launch_moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/gelu.hip b/deepspeed/ops/csrc/transformer/inference/csrc/gelu.hip deleted file mode 100644 index 00c03ef..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/gelu.hip +++ /dev/null @@ -1,527 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#define MAX_CAP 4 -#define MAX_SEQ 2048 - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, - input, bias, total_count, intermediate_size / 4); -} - -template void launch_bias_gelu(float*, const float*, int, int, hipStream_t); -template void launch_bias_gelu<__half>(__half*, const __half*, int, int, hipStream_t); - -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream) -{ - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, total_count, hidden_size / 4); -} - -template void launch_bias_add(float*, const float*, int, int, hipStream_t); -template void launch_bias_add<__half>(__half*, const __half*, int, int, hipStream_t); - -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - hipStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, hipStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - hipStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int hidden_dim, - int batch, - int mp_size, - hipStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - hipLaunchKernelGGL(( gptj_residual_add), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void launch_gptj_residual_add(float*, - float*, - float*, - float*, - float*, - int, - int, - int, - hipStream_t); -template void launch_gptj_residual_add<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - hipStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; - } -} - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream) -{ - dim3 grid_dim(seq_len); - dim3 block_dim(1024); - hipLaunchKernelGGL(( moe_res_matmul), dim3(grid_dim), dim3(block_dim), 0, stream, - residual, coef, mlp_out, seq_len, hidden_dim / 4); -} - -template void launch_moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); -template void launch_moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/normalize.cu b/deepspeed/ops/csrc/transformer/inference/csrc/normalize.cu deleted file mode 100644 index 7f3cfc1..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/normalize.cu +++ /dev/null @@ -1,453 +0,0 @@ -#include -#include "custom_cuda_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include - -#define NORM_REG (MAX_REGISTERS) - -namespace cg = cooperative_groups; - -__global__ void fused_bias_residual_layer_norm(float* output, - const float* vals, - const float* gamma, - const float* beta, - float epsilon, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - float sum = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - output[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* output, - const __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - const __half2* vals_cast = reinterpret_cast(vals); - __half2* out_cast = reinterpret_cast<__half2*>(output); - - int k = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k++] = vals_cast[input_id + row * row_stride]; - input_id += iteration_stride; - } - float sum = 0; - for (int f = k - 1; f >= 0; f--) { - float2 inp_f = __half22float2(inp_reg[f]); - sum += inp_f.x + inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - out_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream); - -template <> -void launch_layer_norm(float* out, - float* vals, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - out, vals, gamma, beta, epsilon, hidden_dim); -} - -template <> -void launch_layer_norm<__half>(__half* out, - __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - out, vals, gamma, beta, epsilon, hidden_dim / 2); -} - -__global__ void fused_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - float res_f = (residual[input_id + row * row_stride]); - float bias_f = (bias[input_id]); - if (mlp_after_attn) inp_reg[k] += res_f + bias_f; - // if (preLN) res_add[input_id + row * row_stride] = inp_reg[k]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - norm[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_residual_layer_norm(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - __half2* norm_cast = reinterpret_cast<__half2*>(norm); - __half2* res_add_cast = reinterpret_cast<__half2*>(res_add); - __half2* residual_cast = reinterpret_cast<__half2*>(residual); - const __half2* bias_cast = reinterpret_cast(bias); - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals_cast[input_id + row * row_stride]; - float2 inp_f = __half22float2(inp_reg[k]); - float2 res_f = __half22float2(residual_cast[input_id + row * row_stride]); - float2 bias_f = __half22float2(bias_cast[input_id]); - if (mlp_after_attn) { - inp_f.x += res_f.x + bias_f.x; - inp_f.y += res_f.y + bias_f.y; - } - inp_reg[k] = __float22half2_rn(inp_f); - // if (preLN) res_add_cast[input_id + row * row_stride] = __float22half2_rn(res_f); - // //inp_reg[k]; - sum += inp_f.x + inp_f.y; - input_id += iteration_stride; - k++; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - norm_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream); - -template <> -void launch_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - fused_residual_layer_norm<<>>(norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim, - preLN, - mlp_after_attn); -} - -template <> -void launch_residual_layer_norm<__half>(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - fused_residual_layer_norm<<>>(norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim / 2, - preLN, - mlp_after_attn); -} diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/normalize.hip b/deepspeed/ops/csrc/transformer/inference/csrc/normalize.hip deleted file mode 100644 index 333e91f..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/normalize.hip +++ /dev/null @@ -1,455 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include - -#define NORM_REG (MAX_REGISTERS) - -namespace cg = cooperative_groups; - -__global__ void fused_bias_residual_layer_norm(float* output, - const float* vals, - const float* gamma, - const float* beta, - float epsilon, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - float sum = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - output[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* output, - const __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - const __half2* vals_cast = reinterpret_cast(vals); - __half2* out_cast = reinterpret_cast<__half2*>(output); - - int k = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k++] = vals_cast[input_id + row * row_stride]; - input_id += iteration_stride; - } - float sum = 0; - for (int f = k - 1; f >= 0; f--) { - float2 inp_f = __half22float2(inp_reg[f]); - sum += inp_f.x + inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - out_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream); - -template <> -void launch_layer_norm(float* out, - float* vals, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - out, vals, gamma, beta, epsilon, hidden_dim); -} - -template <> -void launch_layer_norm<__half>(__half* out, - __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - out, vals, gamma, beta, epsilon, hidden_dim / 2); -} - -__global__ void fused_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - float res_f = (residual[input_id + row * row_stride]); - float bias_f = (bias[input_id]); - if (mlp_after_attn) inp_reg[k] += res_f + bias_f; - // if (preLN) res_add[input_id + row * row_stride] = inp_reg[k]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - norm[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_residual_layer_norm(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - __half2* norm_cast = reinterpret_cast<__half2*>(norm); - __half2* res_add_cast = reinterpret_cast<__half2*>(res_add); - __half2* residual_cast = reinterpret_cast<__half2*>(residual); - const __half2* bias_cast = reinterpret_cast(bias); - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals_cast[input_id + row * row_stride]; - float2 inp_f = __half22float2(inp_reg[k]); - float2 res_f = __half22float2(residual_cast[input_id + row * row_stride]); - float2 bias_f = __half22float2(bias_cast[input_id]); - if (mlp_after_attn) { - inp_f.x += res_f.x + bias_f.x; - inp_f.y += res_f.y + bias_f.y; - } - inp_reg[k] = __float22half2_rn(inp_f); - // if (preLN) res_add_cast[input_id + row * row_stride] = __float22half2_rn(res_f); - // //inp_reg[k]; - sum += inp_f.x + inp_f.y; - input_id += iteration_stride; - k++; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - norm_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream); - -template <> -void launch_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim, - preLN, - mlp_after_attn); -} - -template <> -void launch_residual_layer_norm<__half>(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim / 2, - preLN, - mlp_after_attn); -} diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp b/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp deleted file mode 100644 index 320e649..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp +++ /dev/null @@ -1,951 +0,0 @@ - -#include -#include -#include -#include "context.h" -#include "cublas_wrappers.h" -#include "custom_cuda_layers.h" - -std::array gemm_algos = std::array({99, 99, 99}); - -#define MAX_OUT_TOKES 10 - -template -at::Tensor ds_softmax(at::Tensor& attn_scores, - at::Tensor& attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - bool async_op) -{ - auto attn_scores_c = attn_scores.contiguous(); - int bsz = attn_scores_c.size(0); - - int seq_len = attn_scores_c.size(1); - int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); - - int soft_len = attn_scores_c.size(2); - if (len > 3) soft_len = attn_scores_c.size(3); - - int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); - - launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), - (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), - triangular, - recompute, - local_attention, - window_size, - bsz, - heads, - seq_len, - soft_len, - 1.0, - Context::Instance().GetCurrentStream(async_op)); - - return attn_scores_c; -} - -template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) -{ - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); -} - -template -at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) -{ - auto options = at::TensorOptions() - .dtype(Q.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - T* workspace = (T*)Context::Instance().GetWorkSpace(); - float alpha = 1; - float gemm_beta = 0.0; - - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); - } - - auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); - unsigned m = W.size(1); - unsigned n = Q.size(1) * Q.size(2); - unsigned k = Q.size(0); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_T, - m, - n, - k, - &alpha, - &gemm_beta, - (T*)W.data_ptr(), - (T*)Q.data_ptr(), - (T*)O.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - return O; -} - -template -void attention_unfused(at::Tensor& prev_key_cont, - at::Tensor& query_cont, - at::Tensor& attn_mask, - at::Tensor& prev_value_cont, - at::Tensor& output, - int& bsz, - int& seq_len, - int& soft_len, - int& heads, - float& norm_factor, - bool triangular, - bool recompute, - bool local_attention, - int window_size) -{ - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - float alpha = norm_factor; - float gemm_beta = 0.0; - auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); - int k = prev_value_cont.size(2) / heads; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - soft_len, - seq_len, - k, - &alpha, - &gemm_beta, - (T*)prev_key_cont.data_ptr(), - (T*)query_cont.data_ptr(), - (T*)attn_score.data_ptr(), - CUBLAS_OP_N, - CUBLAS_OP_N, - soft_len * k, - seq_len * k, - seq_len * soft_len, - bsz * heads, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); - alpha = 1.0; - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - k, - seq_len, - soft_len, - &alpha, - &gemm_beta, - (T*)prev_value_cont.data_ptr(), - (T*)attn_score.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_OP_N, - CUBLAS_OP_N, - soft_len * k, - seq_len * soft_len, - seq_len * k, - bsz * heads, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif -} - -template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) -{ - auto query_cont = query.contiguous(); - auto prev_key_cont = prev_key.contiguous(); - auto prev_value_cont = prev_value.contiguous(); - - int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); - - // Attn_Score [ batch Head Sequence-length Softmax-length] - - int bsz = query_cont.size(0); - int seq_len = query_cont.size(1); - int soft_len = prev_value.size(1); - - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = - at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); - attention_unfused(prev_key_cont, - query_cont, - attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), - prev_value_cont, - output, - bsz, - seq_len, - soft_len, - heads, - norm_factor, - (triangular && (new_size == 0)), - (new_size == 0), - local_attention, - window_size); - - return {output, prev_key, prev_value}; -} - -template -at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - int intermediate_size = input_cont.size(2); - - launch_bias_gelu((T*)input_cont.data_ptr(), - (T*)bias.data_ptr(), - intermediate_size, - bsz, - Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto residual_cont = residual.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - // launch_bias_residual((T*)input_cont.data_ptr(), - // (T*)residual_cont.data_ptr(), - // (T*)bias.data_ptr(), - // bsz, - // input_cont.size(2), - // (bias.size(0) > 1), - // Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) -{ - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); - - // cudaEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -std::vector ds_qkv_gemm(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); - - return {output, inp_norm}; -} - -template -void quantized_gemm(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& qscale, - int groups, - int merge_count) -{ - int bsz = input.size(0) * input.size(1); - auto options = at::TensorOptions() - .dtype(input.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); - - launch_dequantize((T*)weight16.data_ptr(), - (int8_t*)weight.data_ptr(), - (float*)qscale.data_ptr(), - weight.size(1), - weight.size(0), - groups, - merge_count, - Context::Instance().GetCurrentStream()); - - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight16.data_ptr(), - (T*)input.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif -} - -template -at::Tensor ds_qkv_gemm_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool add_bias) -{ - int bsz = input.size(0) * input.size(1); - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& q_scale, - int groups) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - int bsz = input_cont.size(0) * input_cont.size(1); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return output; -} - -template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - return output; -} - -template -at::Tensor ds_vector_matmul_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& q_scale, - int groups, - int merge_count) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); - return output; -} - -template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); -} -template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); - - return output; -} - -template -std::vector ds_mlp_gemm_int8(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - - auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return {output, residual_add}; -} - -template -at::Tensor fused_gemm_gelu(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& weight_out, - const float epsilon, - bool preLayerNorm, - bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - launch_bias_gelu((T*)intermediate.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - // cudaEventRecord(Context::Instance().GetCompEvent(2), - // Context::Instance().GetCurrentStream(true)); - return output; -} - -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // cudaStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); -} - -std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, - at::Tensor& key_layer, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - bool rotate_half, - bool rotate_every_two) -{ - auto query_cont = mixed_query.contiguous(); - auto key_cont = key_layer.contiguous(); - - unsigned bsz = mixed_query.size(0); - unsigned head_size = mixed_query.size(2) / num_heads; - unsigned seq_len = mixed_query.size(1); - - if (mixed_query.scalar_type() == at::kFloat) - launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), - (float*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - else - launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), - (__half*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - return {query_cont, key_cont}; -} - -template -at::Tensor fused_gemm_gelu_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) -{ - int M = moe_res.size(0) * moe_res.size(1); - int N = moe_res.size(2); - Context::Instance().SynchComm(); - if (moe_res.scalar_type() == at::kFloat) { - launch_moe_res_matmul((float*)moe_res.data_ptr(), - (float*)coef.data_ptr(), - (float*)output.data_ptr(), - M, - N, - at::cuda::getCurrentCUDAStream()); - } else { - launch_moe_res_matmul<__half>((__half*)moe_res.data_ptr(), - (__half*)coef.data_ptr(), - (__half*)output.data_ptr(), - M, - N, - at::cuda::getCurrentCUDAStream()); - } - return output; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def( - "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); - m.def("softmax_context_fp16", - &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); - m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_residual_fp32", - &ds_bias_residual, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("bias_residual_fp16", - &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); - m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); - m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); - m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); - m.def("mlp_gemm_fp32", &ds_mlp_gemm, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("mlp_gemm_fp16", &ds_mlp_gemm<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("mlp_gemm_int8", &ds_mlp_gemm_int8<__half>, "DeepSpeed mlp with int8 (CUDA)"); - m.def("vector_matmul_fp32", &ds_vector_matmul, "DeepSpeed vector-MM with fp32 (CUDA)"); - m.def("vector_matmul_fp16", &ds_vector_matmul<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("vector_matmul_int8", - &ds_vector_matmul_int8<__half>, - "DeepSpeed vector-MM with int8 (CUDA)"); - m.def("linear_layer_fp32", &ds_linear_layer, "DeepSpeed linear_layer with fp32 (CUDA)"); - m.def("linear_layer_fp16", &ds_linear_layer<__half>, "DeepSpeed linear_layer with fp16 (CUDA)"); - m.def("linear_layer_int8", - &ds_linear_layer_int8<__half>, - "DeepSpeed linear_layer with int8 (CUDA)"); - m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("einsum_sec_sm_ecm_fp32", - &einsum_sec_sm_ecm, - "DeepSpeed vector-MM with fp32 (CUDA)"); - - m.def("einsum_sec_sm_ecm_fp16", - &einsum_sec_sm_ecm<__half>, - "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding_hip.cpp b/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding_hip.cpp deleted file mode 100644 index 6fed126..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding_hip.cpp +++ /dev/null @@ -1,952 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! - -#include -#include -#include -#include "context_hip.h" -#include "cublas_wrappers_hip.h" -#include "custom_hip_layers.h" - -std::array gemm_algos = std::array({99, 99, 99}); - -#define MAX_OUT_TOKES 10 - -template -at::Tensor ds_softmax(at::Tensor& attn_scores, - at::Tensor& attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - bool async_op) -{ - auto attn_scores_c = attn_scores.contiguous(); - int bsz = attn_scores_c.size(0); - - int seq_len = attn_scores_c.size(1); - int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); - - int soft_len = attn_scores_c.size(2); - if (len > 3) soft_len = attn_scores_c.size(3); - - int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); - - launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), - (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), - triangular, - recompute, - local_attention, - window_size, - bsz, - heads, - seq_len, - soft_len, - 1.0, - Context::Instance().GetCurrentStream(async_op)); - - return attn_scores_c; -} - -template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) -{ - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); -} - -template -at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) -{ - auto options = at::TensorOptions() - .dtype(Q.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - T* workspace = (T*)Context::Instance().GetWorkSpace(); - float alpha = 1; - float gemm_beta = 0.0; - - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); - } - - auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); - unsigned m = W.size(1); - unsigned n = Q.size(1) * Q.size(2); - unsigned k = Q.size(0); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_transpose, - m, - n, - k, - &alpha, - &gemm_beta, - (T*)W.data_ptr(), - (T*)Q.data_ptr(), - (T*)O.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - return O; -} - -template -void attention_unfused(at::Tensor& prev_key_cont, - at::Tensor& query_cont, - at::Tensor& attn_mask, - at::Tensor& prev_value_cont, - at::Tensor& output, - int& bsz, - int& seq_len, - int& soft_len, - int& heads, - float& norm_factor, - bool triangular, - bool recompute, - bool local_attention, - int window_size) -{ - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - float alpha = norm_factor; - float gemm_beta = 0.0; - auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); - int k = prev_value_cont.size(2) / heads; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - soft_len, - seq_len, - k, - &alpha, - &gemm_beta, - (T*)prev_key_cont.data_ptr(), - (T*)query_cont.data_ptr(), - (T*)attn_score.data_ptr(), - rocblas_operation_none, - rocblas_operation_none, - soft_len * k, - seq_len * k, - seq_len * soft_len, - bsz * heads, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); - alpha = 1.0; - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - k, - seq_len, - soft_len, - &alpha, - &gemm_beta, - (T*)prev_value_cont.data_ptr(), - (T*)attn_score.data_ptr(), - (T*)output.data_ptr(), - rocblas_operation_none, - rocblas_operation_none, - soft_len * k, - seq_len * soft_len, - seq_len * k, - bsz * heads, -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif -} - -template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) -{ - auto query_cont = query.contiguous(); - auto prev_key_cont = prev_key.contiguous(); - auto prev_value_cont = prev_value.contiguous(); - - int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); - - // Attn_Score [ batch Head Sequence-length Softmax-length] - - int bsz = query_cont.size(0); - int seq_len = query_cont.size(1); - int soft_len = prev_value.size(1); - - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = - at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); - attention_unfused(prev_key_cont, - query_cont, - attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), - prev_value_cont, - output, - bsz, - seq_len, - soft_len, - heads, - norm_factor, - (triangular && (new_size == 0)), - (new_size == 0), - local_attention, - window_size); - - return {output, prev_key, prev_value}; -} - -template -at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - int intermediate_size = input_cont.size(2); - - launch_bias_gelu((T*)input_cont.data_ptr(), - (T*)bias.data_ptr(), - intermediate_size, - bsz, - Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto residual_cont = residual.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - // launch_bias_residual((T*)input_cont.data_ptr(), - // (T*)residual_cont.data_ptr(), - // (T*)bias.data_ptr(), - // bsz, - // input_cont.size(2), - // (bias.size(0) > 1), - // Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) -{ - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); - - // hipEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -std::vector ds_qkv_gemm(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); - - return {output, inp_norm}; -} - -template -void quantized_gemm(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& qscale, - int groups, - int merge_count) -{ - int bsz = input.size(0) * input.size(1); - auto options = at::TensorOptions() - .dtype(input.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); - - launch_dequantize((T*)weight16.data_ptr(), - (int8_t*)weight.data_ptr(), - (float*)qscale.data_ptr(), - weight.size(1), - weight.size(0), - groups, - merge_count, - Context::Instance().GetCurrentStream()); - - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight16.data_ptr(), - (T*)input.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif -} - -template -at::Tensor ds_qkv_gemm_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool add_bias) -{ - int bsz = input.size(0) * input.size(1); - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& q_scale, - int groups) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - int bsz = input_cont.size(0) * input_cont.size(1); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return output; -} - -template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - return output; -} - -template -at::Tensor ds_vector_matmul_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& q_scale, - int groups, - int merge_count) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); - return output; -} - -template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); -} -template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); - - return output; -} - -template -std::vector ds_mlp_gemm_int8(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - - auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return {output, residual_add}; -} - -template -at::Tensor fused_gemm_gelu(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& weight_out, - const float epsilon, - bool preLayerNorm, - bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - launch_bias_gelu((T*)intermediate.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), -#ifdef __HIP_PLATFORM_HCC__ - rocblas_gemm_algo_standard); -#else - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -#endif - // hipEventRecord(Context::Instance().GetCompEvent(2), - // Context::Instance().GetCurrentStream(true)); - return output; -} - -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // hipStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); -} - -std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, - at::Tensor& key_layer, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - bool rotate_half, - bool rotate_every_two) -{ - auto query_cont = mixed_query.contiguous(); - auto key_cont = key_layer.contiguous(); - - unsigned bsz = mixed_query.size(0); - unsigned head_size = mixed_query.size(2) / num_heads; - unsigned seq_len = mixed_query.size(1); - - if (mixed_query.scalar_type() == at::kFloat) - launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), - (float*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - else - launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), - (__half*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - return {query_cont, key_cont}; -} - -template -at::Tensor fused_gemm_gelu_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) -{ - int M = moe_res.size(0) * moe_res.size(1); - int N = moe_res.size(2); - Context::Instance().SynchComm(); - if (moe_res.scalar_type() == at::kFloat) { - launch_moe_res_matmul((float*)moe_res.data_ptr(), - (float*)coef.data_ptr(), - (float*)output.data_ptr(), - M, - N, - at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } else { - launch_moe_res_matmul<__half>((__half*)moe_res.data_ptr(), - (__half*)coef.data_ptr(), - (__half*)output.data_ptr(), - M, - N, - at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return output; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def( - "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); - m.def("softmax_context_fp16", - &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); - m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_residual_fp32", - &ds_bias_residual, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("bias_residual_fp16", - &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); - m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); - m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); - m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); - m.def("mlp_gemm_fp32", &ds_mlp_gemm, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("mlp_gemm_fp16", &ds_mlp_gemm<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("mlp_gemm_int8", &ds_mlp_gemm_int8<__half>, "DeepSpeed mlp with int8 (CUDA)"); - m.def("vector_matmul_fp32", &ds_vector_matmul, "DeepSpeed vector-MM with fp32 (CUDA)"); - m.def("vector_matmul_fp16", &ds_vector_matmul<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("vector_matmul_int8", - &ds_vector_matmul_int8<__half>, - "DeepSpeed vector-MM with int8 (CUDA)"); - m.def("linear_layer_fp32", &ds_linear_layer, "DeepSpeed linear_layer with fp32 (CUDA)"); - m.def("linear_layer_fp16", &ds_linear_layer<__half>, "DeepSpeed linear_layer with fp16 (CUDA)"); - m.def("linear_layer_int8", - &ds_linear_layer_int8<__half>, - "DeepSpeed linear_layer with int8 (CUDA)"); - m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("einsum_sec_sm_ecm_fp32", - &einsum_sec_sm_ecm, - "DeepSpeed vector-MM with fp32 (CUDA)"); - - m.def("einsum_sec_sm_ecm_fp16", - &einsum_sec_sm_ecm<__half>, - "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu b/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu deleted file mode 100644 index bf3c8bc..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu +++ /dev/null @@ -1,434 +0,0 @@ -#include -#include "custom_cuda_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include - -#define ATTN_THREADS 1024 -#define MAX_REG_SIZE 8 - -#define minus_infinity -10000.0 - -void CheckCudaErrorAux(const char* file, unsigned line) -{ - cudaError_t err = cudaGetLastError(); - if (err == cudaSuccess) return; - std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line - << std::endl; - throw std::runtime_error("CUDA ERROR!!!\n"); -} - -#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) - -namespace cg = cooperative_groups; - -__global__ void attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ -#ifdef HALF_PRECISION_AVAILABLE - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float2 low_data[MAX_REG_SIZE]; - float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) - : minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); - } - } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && - (data_id + 1) > window_stride) && - (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && - (data_id + 2) > window_stride) && - (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - if ((data_id + 1) < sequence_length) - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - if ((data_id + 2) < sequence_length) - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - } - } - // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } else { - low_data[i].x = minus_infinity; - low_data[i].y = minus_infinity; - high_data[i].x = minus_infinity; - high_data[i].y = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - float sum = 0; - for (int i = 0; i < iterations; i++) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; - } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; - } - } - } - } -#endif -} - -__global__ void attn_softmax_v2(float* vals, - float* attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float4 data[MAX_REG_SIZE]; - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); - data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? vals[data_id + 1] - : minus_infinity; - data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? vals[data_id + 2] - : minus_infinity; - data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? vals[data_id + 3] - : minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - data[i].y += attn_mask[data_id + mask_offset + 1]; - data[i].z += attn_mask[data_id + mask_offset + 2]; - data[i].w += attn_mask[data_id + mask_offset + 3]; - } - } else { - data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; - data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride && (data_id + 1) < sequence_length) - ? (vals[data_id + 1]) - : minus_infinity; - data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride && (data_id + 2) < sequence_length) - ? (vals[data_id + 2]) - : minus_infinity; - data[i].w = minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - if ((data_id + 1) < sequence_length) - data[i].y += attn_mask[data_id + mask_offset + 1]; - if ((data_id + 2) < sequence_length) - data[i].z += attn_mask[data_id + mask_offset + 2]; - } - } - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = data[i].x / sum; - vals[data_id + 1] = data[i].y / sum; - vals[data_id + 2] = data[i].z / sum; - vals[data_id + 3] = data[i].w / sum; - } else { - vals[data_id] = data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; - } - } - } - } -} - -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream) -{ - int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); - dim3 block_dim(ATTN_THREADS); - - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; - const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; - - if (sequence_length <= 32768) - attn_softmax_v2<<>>( - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); - else - throw std::runtime_error("Unsupport Seq_Length!"); -} - -template void launch_attn_softmax_v2(float* vals, - float* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); -template void launch_attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/inference/csrc/softmax.hip b/deepspeed/ops/csrc/transformer/inference/csrc/softmax.hip deleted file mode 100644 index 51d5bef..0000000 --- a/deepspeed/ops/csrc/transformer/inference/csrc/softmax.hip +++ /dev/null @@ -1,436 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" - -#ifndef __HIP_PLATFORM_HCC__ -#include -#endif -#include -#include -#include - -#define ATTN_THREADS 1024 -#define MAX_REG_SIZE 8 - -#define minus_infinity -10000.0 - -void CheckCudaErrorAux(const char* file, unsigned line) -{ - hipError_t err = hipGetLastError(); - if (err == hipSuccess) return; - std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line - << std::endl; - throw std::runtime_error("CUDA ERROR!!!\n"); -} - -#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) - -namespace cg = cooperative_groups; - -__global__ void attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ -#ifdef HALF_PRECISION_AVAILABLE - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float2 low_data[MAX_REG_SIZE]; - float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) - : minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); - } - } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && - (data_id + 1) > window_stride) && - (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && - (data_id + 2) > window_stride) && - (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - if ((data_id + 1) < sequence_length) - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - if ((data_id + 2) < sequence_length) - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - } - } - // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } else { - low_data[i].x = minus_infinity; - low_data[i].y = minus_infinity; - high_data[i].x = minus_infinity; - high_data[i].y = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - float sum = 0; - for (int i = 0; i < iterations; i++) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; - } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; - } - } - } - } -#endif -} - -__global__ void attn_softmax_v2(float* vals, - float* attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float4 data[MAX_REG_SIZE]; - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); - data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? vals[data_id + 1] - : minus_infinity; - data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? vals[data_id + 2] - : minus_infinity; - data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? vals[data_id + 3] - : minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - data[i].y += attn_mask[data_id + mask_offset + 1]; - data[i].z += attn_mask[data_id + mask_offset + 2]; - data[i].w += attn_mask[data_id + mask_offset + 3]; - } - } else { - data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; - data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride && (data_id + 1) < sequence_length) - ? (vals[data_id + 1]) - : minus_infinity; - data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride && (data_id + 2) < sequence_length) - ? (vals[data_id + 2]) - : minus_infinity; - data[i].w = minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - if ((data_id + 1) < sequence_length) - data[i].y += attn_mask[data_id + mask_offset + 1]; - if ((data_id + 2) < sequence_length) - data[i].z += attn_mask[data_id + mask_offset + 2]; - } - } - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = data[i].x / sum; - vals[data_id + 1] = data[i].y / sum; - vals[data_id + 2] = data[i].z / sum; - vals[data_id + 3] = data[i].w / sum; - } else { - vals[data_id] = data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; - } - } - } - } -} - -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream) -{ - int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); - dim3 block_dim(ATTN_THREADS); - - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; - const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; - - if (sequence_length <= 32768) - hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); - else - throw std::runtime_error("Unsupport Seq_Length!"); -} - -template void launch_attn_softmax_v2(float* vals, - float* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); -template void launch_attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/inference/includes/custom_cuda_layers.h b/deepspeed/ops/csrc/transformer/inference/includes/custom_cuda_layers.h deleted file mode 100644 index 06b4340..0000000 --- a/deepspeed/ops/csrc/transformer/inference/includes/custom_cuda_layers.h +++ /dev/null @@ -1,124 +0,0 @@ -#pragma once - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif - -#include -#include -#include -#include -#include -#include - -#define MAX_WARP_NUM 32 -#define WARP_SIZE 32 -#define SMs 80 - -#define MAX_REGISTERS 256 -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); - -// Fused bias add with gelu activation -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream); -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream); - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - cudaStream_t stream); - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream); - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream); -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - cudaStream_t stream); - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int head_size, - int mp_size, - cudaStream_t stream); - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - cudaStream_t stream); - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/inference/includes/custom_hip_layers.h b/deepspeed/ops/csrc/transformer/inference/includes/custom_hip_layers.h deleted file mode 100644 index 36cab34..0000000 --- a/deepspeed/ops/csrc/transformer/inference/includes/custom_hip_layers.h +++ /dev/null @@ -1,125 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif - -#include -#include -#include -#include -#include -#include - -#define MAX_WARP_NUM 32 -#define WARP_SIZE 32 -#define SMs 80 - -#define MAX_REGISTERS 256 -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); - -// Fused bias add with gelu activation -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream); -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream); - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - hipStream_t stream); - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream); - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream); -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - hipStream_t stream); - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int head_size, - int mp_size, - hipStream_t stream); - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - hipStream_t stream); - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/normalize_kernels.cu b/deepspeed/ops/csrc/transformer/normalize_kernels.cu deleted file mode 100644 index d634c7f..0000000 --- a/deepspeed/ops/csrc/transformer/normalize_kernels.cu +++ /dev/null @@ -1,2121 +0,0 @@ -#include "custom_cuda_layers.h" - -namespace cg = cooperative_groups; - -/* -Fused bias add, residual (elementwise) add, and normalization layer. - -For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for -__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). - -For specific launch constraints, see the launch functions. -*/ - -#define NORM_REG (MAX_REGISTERS / 4) - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - float* means, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / WARP_SIZE; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if (high_index < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - if (training) - if (threadIdx.x == 0) means[row] = mean; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - __half* means, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) { - vars[row] = __float2half(variance); - means[row] = __float2half(mean); - } - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - float* vars, - float* means) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - __half* vars, - __half* means) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); -} - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / 32; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) vars[row] = __float2half(variance); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -/* -To tune this launch the following restrictions must be met: - -For float: -row_stride == hidden_size -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -For half: -row_stride == hidden_size / 2 -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -*/ - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - float* vars) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - __half* vars) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using either X_hat or - * normalize input (invertible). - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using the input to - * the normalize. - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} -/* - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is invertible! - * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * - sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad - vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - - LayerNormBackward2<<>>( - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - // LayerNormBackward1<__half><<>>( - // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - - LayerNormBackward2<<>>( - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 xu[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; - - __half mean_h = means[row]; - __half2 mean_reg = __halves2half2(mean_h, mean_h); -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - xu[iterations] = (vals_hat_h[high_index] - mean_reg); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2<<>>( - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2<<>>( - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - // float2 result[iterations]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - LayerNormBackward1<<>>( - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = X_vals[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = X_vals[high_index]; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - inp_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = vals_hat_h[high_index]; - iterations++; - } - - __half mean_h = means[row]; - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - __half2 mean_reg = __halves2half2(mean_h, mean_h); - __half2 xu[NORM_REG]; - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} diff --git a/deepspeed/ops/csrc/transformer/normalize_kernels.hip b/deepspeed/ops/csrc/transformer/normalize_kernels.hip deleted file mode 100644 index 3d1b17c..0000000 --- a/deepspeed/ops/csrc/transformer/normalize_kernels.hip +++ /dev/null @@ -1,2123 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -namespace cg = cooperative_groups; - -/* -Fused bias add, residual (elementwise) add, and normalization layer. - -For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for -__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). - -For specific launch constraints, see the launch functions. -*/ - -#define NORM_REG (MAX_REGISTERS / 4) - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - float* means, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / WARP_SIZE; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if (high_index < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - if (training) - if (threadIdx.x == 0) means[row] = mean; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - __half* means, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) { - vars[row] = __float2half(variance); - means[row] = __float2half(mean); - } - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - float* vars, - float* means) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - __half* vars, - __half* means) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); -} - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / 32; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) vars[row] = __float2half(variance); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -/* -To tune this launch the following restrictions must be met: - -For float: -row_stride == hidden_size -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -For half: -row_stride == hidden_size / 2 -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -*/ - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - float* vars) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - __half* vars) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using either X_hat or - * normalize input (invertible). - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using the input to - * the normalize. - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} -/* - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is invertible! - * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * - sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad - vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - //hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 xu[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; - - __half mean_h = means[row]; - __half2 mean_reg = __halves2half2(mean_h, mean_h); -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - xu[iterations] = (vals_hat_h[high_index] - mean_reg); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - // float2 result[iterations]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = X_vals[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = X_vals[high_index]; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - inp_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = vals_hat_h[high_index]; - iterations++; - } - - __half mean_h = means[row]; - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - __half2 mean_reg = __halves2half2(mean_h, mean_h); - __half2 xu[NORM_REG]; - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} diff --git a/deepspeed/ops/csrc/transformer/softmax_kernels.cu b/deepspeed/ops/csrc/transformer/softmax_kernels.cu deleted file mode 100644 index 34487cb..0000000 --- a/deepspeed/ops/csrc/transformer/softmax_kernels.cu +++ /dev/null @@ -1,595 +0,0 @@ -#include -#include "custom_cuda_layers.h" -#include "general_kernels.h" - -namespace cg = cooperative_groups; - -dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) -{ - int seq_length4 = sequence_length / 4; - int block_compute_size = - (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); - // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: - // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications - // The batch size is typically relatively small, while the sequence length could potentially be - // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. - unsigned x = heads * sequence_length / block_compute_size; - unsigned y = batch_size; - return {x, y}; -} - -// Fused attention + softmax -template -__global__ void attn_softmax(float* vals, - const float* attn_mask, - int heads, - int seq_length, - int iterations) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = std::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float4* val_cast = reinterpret_cast(vals); - const float4* attn_mask_cast = reinterpret_cast(attn_mask); - - float4 data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float4 mask = attn_mask_cast[mask_offset + data_id]; - data[i] = val_cast[data_offset + data_id]; - - data[i].x += mask.x; - data[i].y += mask.y; - data[i].z += mask.z; - data[i].w += mask.w; - - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - data[i].x /= sum; - data[i].y /= sum; - data[i].z /= sum; - data[i].w /= sum; - - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; - } -} - -template -__global__ void attn_softmax(__half* vals, - const __half* attn_mask, - int heads, - int seq_length, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = std::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float2* val_cast = reinterpret_cast(vals); - const float2* attn_mask_cast = reinterpret_cast(attn_mask); - - val_cast += data_offset; - attn_mask_cast += mask_offset; - - float2 low_data[MAX_THREAD_ITERATIONS]; - float2 high_data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 data = val_cast[data_id]; - float2 mask = attn_mask_cast[data_id]; - - __half2* data_arr = reinterpret_cast<__half2*>(&data); - __half2* mask_arr = reinterpret_cast<__half2*>(&mask); - - low_data[i] = __half22float2(data_arr[0]); - high_data[i] = __half22float2(data_arr[1]); - float2 low_mask = __half22float2(mask_arr[0]); - float2 high_mask = __half22float2(mask_arr[1]); - - low_data[i].x += low_mask.x; - low_data[i].y += low_mask.y; - high_data[i].x += high_mask.x; - high_data[i].y += high_mask.y; - - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - low_data[i].x /= sum; - low_data[i].y /= sum; - high_data[i].x /= sum; - high_data[i].y /= sum; - - result_h[0] = __float22half2_rn(low_data[i]); - result_h[1] = __float22half2_rn(high_data[i]); - - val_cast[data_id] = result_f; - } - } - -#endif -} - -template -void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t); - -template <> -void launch_attn_softmax(float* vals, - const float* attn_mask, - int batch_size, - int heads, - int sequence_length, - cudaStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - attn_softmax<2, (threads / 2), 2> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - attn_softmax<4, (threads / 4), 4> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - attn_softmax<8, (threads / 8), 8> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - attn_softmax<16, (threads / 16), 16> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - attn_softmax<32, (threads / 32), 32> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - attn_softmax<32, (threads / 64), 64> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - attn_softmax<32, (threads / 128), 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - attn_softmax<32, 1, 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template <> -void launch_attn_softmax<__half>(__half* vals, - const __half* attn_mask, - int batch_size, - int heads, - int sequence_length, - cudaStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - attn_softmax<2, (threads / 2), 2> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - attn_softmax<4, (threads / 4), 4> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - attn_softmax<8, (threads / 8), 8> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - attn_softmax<16, (threads / 16), 16> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - attn_softmax<32, (threads / 32), 32> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - attn_softmax<32, (threads / 64), 64> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - attn_softmax<32, (threads / 128), 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - attn_softmax<32, 1, 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template -__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) - ? (seq_length + iteration_stride - 1) / iteration_stride - : MAX_THREAD_ITERATIONS); - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - - int wid = id >> WARP_SIZE_BITS; - int lane = id & 0x1f; - - T val_reg[MAX_THREAD_ITERATIONS]; - T soft_reg[MAX_THREAD_ITERATIONS]; - float grad_reg = 0.0f; - -#pragma unroll - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - val_reg[i] = out_grad[row * block_width + data_id]; - soft_reg[i] = soft_inp[row * block_width + data_id]; - - grad_reg += ((float)val_reg[i] * - (float)soft_reg[i]); // if done in half, the multiplication, we may lose - // 2% of accuracy in computation!! - } - } - for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = grad_reg; - b.sync(); - - if (lane < warp_num) grad_reg = partialSum[lane]; - - int iters = warp_num; - if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); - - for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - grad_reg = g.shfl(grad_reg, id / tbSize); - } - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); - out_grad[row * block_width + data_id] = (T)temp; - } - } -} - -template -__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, - const T* output, - int softmax_length) -{ - int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; - int offset = batch_idx * softmax_length + threadIdx.x; - - grad += offset; - output += offset; - - T grad_reg[ITERATIONS]; - T output_reg[ITERATIONS]; - float sum = 0.0; - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) { - grad_reg[i] = grad[i * WARP_SIZE]; - output_reg[i] = output[i * WARP_SIZE]; - sum += (float)grad_reg[i] * (float)output_reg[i]; - } - } - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) - grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); - } -} - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream) -{ - const int warps_per_block = 4; - dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); - dim3 block_dim(WARP_SIZE, warps_per_block); - - if (seq_length <= 32) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 64) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 128) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 256) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 384) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 512) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 768) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 1024) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 2048) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else - throw std::runtime_error( - std::string("Special sequence length found in softmax backward, seq_length: ") + - std::to_string(seq_length)); -} - -template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, - const __half* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); -template void launch_attn_softmax_backward_v2(float* out_grad, - const float* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/softmax_kernels.hip b/deepspeed/ops/csrc/transformer/softmax_kernels.hip deleted file mode 100644 index afe65b0..0000000 --- a/deepspeed/ops/csrc/transformer/softmax_kernels.hip +++ /dev/null @@ -1,597 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" -#include "general_kernels_hip.h" - -namespace cg = cooperative_groups; - -dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) -{ - int seq_length4 = sequence_length / 4; - int block_compute_size = - (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); - // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: - // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications - // The batch size is typically relatively small, while the sequence length could potentially be - // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. - unsigned x = heads * sequence_length / block_compute_size; - unsigned y = batch_size; - return {x, y}; -} - -// Fused attention + softmax -template -__global__ void attn_softmax(float* vals, - const float* attn_mask, - int heads, - int seq_length, - int iterations) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = ::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float4* val_cast = reinterpret_cast(vals); - const float4* attn_mask_cast = reinterpret_cast(attn_mask); - - float4 data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float4 mask = attn_mask_cast[mask_offset + data_id]; - data[i] = val_cast[data_offset + data_id]; - - data[i].x += mask.x; - data[i].y += mask.y; - data[i].z += mask.z; - data[i].w += mask.w; - - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - data[i].x /= sum; - data[i].y /= sum; - data[i].z /= sum; - data[i].w /= sum; - - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; - } -} - -template -__global__ void attn_softmax(__half* vals, - const __half* attn_mask, - int heads, - int seq_length, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = ::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float2* val_cast = reinterpret_cast(vals); - const float2* attn_mask_cast = reinterpret_cast(attn_mask); - - val_cast += data_offset; - attn_mask_cast += mask_offset; - - float2 low_data[MAX_THREAD_ITERATIONS]; - float2 high_data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 data = val_cast[data_id]; - float2 mask = attn_mask_cast[data_id]; - - __half2* data_arr = reinterpret_cast<__half2*>(&data); - __half2* mask_arr = reinterpret_cast<__half2*>(&mask); - - low_data[i] = __half22float2(data_arr[0]); - high_data[i] = __half22float2(data_arr[1]); - float2 low_mask = __half22float2(mask_arr[0]); - float2 high_mask = __half22float2(mask_arr[1]); - - low_data[i].x += low_mask.x; - low_data[i].y += low_mask.y; - high_data[i].x += high_mask.x; - high_data[i].y += high_mask.y; - - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - low_data[i].x /= sum; - low_data[i].y /= sum; - high_data[i].x /= sum; - high_data[i].y /= sum; - - result_h[0] = __float22half2_rn(low_data[i]); - result_h[1] = __float22half2_rn(high_data[i]); - - val_cast[data_id] = result_f; - } - } - -#endif -} - -template -void launch_attn_softmax(T*, const T*, int, int, int, hipStream_t); - -template <> -void launch_attn_softmax(float* vals, - const float* attn_mask, - int batch_size, - int heads, - int sequence_length, - hipStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template <> -void launch_attn_softmax<__half>(__half* vals, - const __half* attn_mask, - int batch_size, - int heads, - int sequence_length, - hipStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template -__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) - ? (seq_length + iteration_stride - 1) / iteration_stride - : MAX_THREAD_ITERATIONS); - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - - int wid = id >> WARP_SIZE_BITS; - int lane = id & 0x1f; - - T val_reg[MAX_THREAD_ITERATIONS]; - T soft_reg[MAX_THREAD_ITERATIONS]; - float grad_reg = 0.0f; - -#pragma unroll - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - val_reg[i] = out_grad[row * block_width + data_id]; - soft_reg[i] = soft_inp[row * block_width + data_id]; - - grad_reg += ((float)val_reg[i] * - (float)soft_reg[i]); // if done in half, the multiplication, we may lose - // 2% of accuracy in computation!! - } - } - for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = grad_reg; - b.sync(); - - if (lane < warp_num) grad_reg = partialSum[lane]; - - int iters = warp_num; - if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); - - for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - grad_reg = g.shfl(grad_reg, id / tbSize); - } - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); - out_grad[row * block_width + data_id] = (T)temp; - } - } -} - -template -__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, - const T* output, - int softmax_length) -{ - int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; - int offset = batch_idx * softmax_length + threadIdx.x; - - grad += offset; - output += offset; - - T grad_reg[ITERATIONS]; - T output_reg[ITERATIONS]; - float sum = 0.0; - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) { - grad_reg[i] = grad[i * WARP_SIZE]; - output_reg[i] = output[i * WARP_SIZE]; - sum += (float)grad_reg[i] * (float)output_reg[i]; - } - } - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) - grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); - } -} - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream) -{ - const int warps_per_block = 4; - dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); - dim3 block_dim(WARP_SIZE, warps_per_block); - - if (seq_length <= 32) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 64) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 128) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 256) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 384) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 512) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 768) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 1024) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 2048) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else - throw std::runtime_error( - std::string("Special sequence length found in softmax backward, seq_length: ") + - std::to_string(seq_length)); -} - -template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, - const __half* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); -template void launch_attn_softmax_backward_v2(float* out_grad, - const float* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer/transform_kernels.cu b/deepspeed/ops/csrc/transformer/transform_kernels.cu deleted file mode 100644 index 15a2219..0000000 --- a/deepspeed/ops/csrc/transformer/transform_kernels.cu +++ /dev/null @@ -1,575 +0,0 @@ -#include "custom_cuda_layers.h" - -#define rows_trans 16 -#define cols_trans 16 - -template -__global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) -{ - __shared__ T data_block[rows_trans * (cols_trans + 1)]; - - int r = threadIdx.x / cols_trans; - int c = threadIdx.x % cols_trans; - - int m = row_width / cols_trans; - - int i = blockIdx.x / m * rows_trans + r; - int j = blockIdx.x % m * cols_trans + c; - - int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); - - for (int k = 0; k < rows_trans; k += row_stride) - data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; - - __syncthreads(); - - i = blockIdx.x % m * rows_trans + r; - j = blockIdx.x / m * cols_trans + c; - - for (int k = 0; k < rows_trans; k += row_stride) - out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; -} - -template <> -void Transpose<__half>(const __half* inp_mat, - __half* out_mat, - int rows, - int cols, - cudaStream_t stream) -{ - int threads = THREADS; - - Transpose_Kernel<__half><<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( - inp_mat, out_mat, cols, rows); -} - -template <> -void Transpose(const float* inp_mat, float* out_mat, int rows, int cols, cudaStream_t stream) -{ - int threads = THREADS; - - Transpose_Kernel<<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( - inp_mat, out_mat, cols, rows); -} - -template -__global__ void transform_0213(T* output, - const T* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - -template <> -__global__ void transform_0213(float* output, - const float* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); - - float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; -} - -template <> -__global__ void transform_0213<__half>(__half* output, - const __half* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr[1]; - - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); - - vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; -#endif -} - -template <> -void launch_transform_0213(float* output, - const float* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - - transform_0213 - <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_transform_0213<__half>(__half* output, - const __half* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream) -{ - hidden_dim >>= 3; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - transform_0213<__half> - <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); -} - -// Bias add -template -__global__ void bias_add_transform_0213(T* output, - const T* vals, - const T* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - -template <> -__global__ void bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = blockIdx.z / head_ext; // Hidden count - int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride + - d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3]; - float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; - - float4 outputs; - outputs.x = inputs.x + biases.x; - outputs.y = inputs.y + biases.y; - outputs.z = inputs.z + biases.z; - outputs.w = inputs.w + biases.w; - - output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + - d2 * d2_out_stride + d3] = outputs; -} - -#define ATTN_H 3 -#define MAX_SEQ_LINE 10 - -template <> -__global__ void bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = blockIdx.z / head_ext; // Hidden count - int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr; - float4 bias_arr; - float4 output_arr; - __half2* vals_half = reinterpret_cast<__half2*>(&vals_arr); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_arr); - __half2* output_half = reinterpret_cast<__half2*>(&output_arr); - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); - vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); - vals_vec += (cnt * d1_stride); - vals_vec += (d2 * d2_stride); - - bias_vec += (cnt * d1_stride); - bias_vec += (d2 * d2_stride); - - output_vec += (cnt * d0_stride * gridDim.x); - output_vec += (d1 * d2_stride); - output_vec += (d0 * d0_stride); - output_vec += (d2 * d2_out_stride); - - bias_arr = bias_vec[d3]; - vals_arr = vals_vec[d3]; - -#if defined(__ACC_HALF__) - output_half[0] = vals_half[0] + bias_half[0]; - output_half[1] = vals_half[1] + bias_half[1]; - output_half[2] = vals_half[2] + bias_half[2]; - output_half[3] = vals_half[3] + bias_half[3]; -#else - float2 bias_arr_f[4]; - float2 vals_arr_f[4]; -#pragma unroll - for (int l = 0; l < 4; l++) { - bias_arr_f[l] = __half22float2(bias_half[l]); - vals_arr_f[l] = __half22float2(vals_half[l]); - vals_arr_f[l].x += bias_arr_f[l].x; - vals_arr_f[l].y += bias_arr_f[l].y; - output_half[l] = __float22half2_rn(vals_arr_f[l]); - } -#endif - output_vec[d3] = output_arr; - -#endif -} - -__global__ void bias_add_transform_0213_v2(__half* output, - const __half* vals, - const __half* bias, - int hidden_dim, - int seq_length, - int heads) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float4 in_data[3072]; - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 - int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = threadIdx.z; // blockIdx.z; // Hidden count - int d2 = threadIdx.y; // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr[1]; - float4 bias_arr[1]; - float4 output_arr[1]; - __half2* vals_half = reinterpret_cast<__half2*>(vals_arr); - __half2* bias_half = reinterpret_cast<__half2*>(bias_arr); - __half2* output_half = reinterpret_cast<__half2*>(output_arr); - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - int iter_index = cnt * d1_stride + d2 * d2_stride + d3; - int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); - bias_arr[0] = bias_vec[iter_index]; - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_id = iter * iteration_stride + iter_index; - vals_arr[0] = vals_vec[input_offset + iter_id]; - - output_half[0] = vals_half[0] + bias_half[0]; - output_half[1] = vals_half[1] + bias_half[1]; - output_half[2] = vals_half[2] + bias_half[2]; - output_half[3] = vals_half[3] + bias_half[3]; - - in_data[iter_id] = output_arr[0]; - } - __syncthreads(); - - iteration_stride = blockDim.z * (blockDim.y >> 1); - int matrix_stride = (d0_out_stride * gridDim.x); - int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); - - int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_row = (iter * iteration_stride) + head_count; - int iter_offset = - (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; - output_vec[out_index + iter_offset] = - in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; - } -#endif -} - -// [B S C*H] - > C * [B A S N] -template <> -void launch_bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - - bias_add_transform_0213<<>>( - output, vals, bias, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - bias_add_transform_0213<__half><<>>( - output, vals, bias, hidden_dim, seq_length, heads, head_ext); - } else { - dim3 block_dim(hidden_dim / heads, heads, trans_count); - dim3 grid_dim(batch_size, seq_length / 2); - bias_add_transform_0213_v2<<>>( - output, vals, bias, hidden_dim, seq_length, heads); - } -} - -template -__global__ void transform4d_0213(T* out, - const T* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext); - -template <> -__global__ void transform4d_0213(float* out, - const float* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = d0_stride / heads; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = hidden_dim; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head - int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; - int cnt = blockIdx.z; - int d3 = threadIdx.x; // Values (groups of 8) - - if (d2 < seq_length) { - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + - d2 * d2_stride + d3]; - out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + - d2 * d2_out_stride * gridDim.z + d3] = vals_vec; - } -} - -template <> -__global__ void transform4d_0213<__half>(__half* out, - const __half* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * (seq_length / head_ext); - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0 = blockIdx.x; // Batch - int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head - int d2 = blockIdx.z / head_ext; // Sequence - int cnt = blockIdx.y; // Hidden count - int d3 = threadIdx.x; // Values (groups of 8) - - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - in_vec += (cnt * d0_stride * gridDim.x); - in_vec += (d0 * d0_stride); - in_vec += (d2 * d2_stride); - in_vec += (d1 * d2_stride * seq_length); - - out_vec += (cnt * d1_stride); - out_vec += (d1 * d2_stride); - out_vec += (d0 * d0_stride * gridDim.y); - out_vec += (d2 * d1_stride * gridDim.y); - - out_vec[d3] = in_vec[d3]; - -#endif -} - -__global__ void transform4d_0213_v2(__half* out, - const __half* in, - int heads, - int seq_length, - int hidden_dim) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float4 in_data[3072]; - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0 = blockIdx.x; // Batch - int d1 = threadIdx.y; // Head - int d2 = blockIdx.y; // Sequence - int cnt = threadIdx.z; // Hidden count - int d3 = threadIdx.x; // Values (groups of 8) - - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride; - int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); - int iteration_stride = blockDim.z * (blockDim.y >> 1); - int matrix_stride = (d0_stride * gridDim.x); - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_row = iter * iteration_stride + head_count; - int iter_offset = (iter_row % blockDim.y) * d2_stride; - - in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = - in_vec[input_offset + iter_offset * seq_length + - (iter_row / blockDim.y) * matrix_stride]; - } - __syncthreads(); - - iteration_stride = d1_stride * blockDim.z; - int iter_index = cnt * d1_stride + d1 * d2_stride + d3; - int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_id = iter * iteration_stride + iter_index; - out_vec[output_offset + iter_id] = in_data[iter_id]; - } -#endif -} - -// 3 * [B A S N] - > [B S C*H] -template <> -void launch_transform4d_0213(float* out, - const float* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count); - dim3 block_dims(hidden_dim / heads, 8); - transform4d_0213 - <<>>(out, in, heads, seq_length, hidden_dim, 1); -} - -template <> -void launch_transform4d_0213<__half>(__half* out, - const __half* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - cudaStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); - dim3 block_dims(hidden_dim / heads, (heads / head_ext)); - transform4d_0213<__half><<>>( - out, in, heads, seq_length, hidden_dim, head_ext); - } else { - dim3 grid_dims(batch_size, seq_length / 2); - dim3 block_dims(hidden_dim / heads, heads, trans_count); - transform4d_0213_v2<<>>( - out, in, heads, seq_length, hidden_dim); - } -} diff --git a/deepspeed/ops/csrc/transformer/transform_kernels.hip b/deepspeed/ops/csrc/transformer/transform_kernels.hip deleted file mode 100644 index 0aaa4cc..0000000 --- a/deepspeed/ops/csrc/transformer/transform_kernels.hip +++ /dev/null @@ -1,577 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#define rows_trans 16 -#define cols_trans 16 - -template -__global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) -{ - __shared__ T data_block[rows_trans * (cols_trans + 1)]; - - int r = threadIdx.x / cols_trans; - int c = threadIdx.x % cols_trans; - - int m = row_width / cols_trans; - - int i = blockIdx.x / m * rows_trans + r; - int j = blockIdx.x % m * cols_trans + c; - - int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); - - for (int k = 0; k < rows_trans; k += row_stride) - data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; - - __syncthreads(); - - i = blockIdx.x % m * rows_trans + r; - j = blockIdx.x / m * cols_trans + c; - - for (int k = 0; k < rows_trans; k += row_stride) - out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; -} - -template <> -void Transpose<__half>(const __half* inp_mat, - __half* out_mat, - int rows, - int cols, - hipStream_t stream) -{ - int threads = THREADS; - - hipLaunchKernelGGL(( Transpose_Kernel<__half>), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, - inp_mat, out_mat, cols, rows); -} - -template <> -void Transpose(const float* inp_mat, float* out_mat, int rows, int cols, hipStream_t stream) -{ - int threads = THREADS; - - hipLaunchKernelGGL(( Transpose_Kernel), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, - inp_mat, out_mat, cols, rows); -} - -template -__global__ void transform_0213(T* output, - const T* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - -template <> -__global__ void transform_0213(float* output, - const float* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); - - float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; -} - -template <> -__global__ void transform_0213<__half>(__half* output, - const __half* vals, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) - int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr[1]; - - const float4* vals_vec = reinterpret_cast(vals); - float4* output_vec = reinterpret_cast(output); - - vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; - output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; -#endif -} - -template <> -void launch_transform_0213(float* output, - const float* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - - hipLaunchKernelGGL(( transform_0213) - , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_transform_0213<__half>(__half* output, - const __half* vals, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream) -{ - hidden_dim >>= 3; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, (seq_length * head_ext)); - hipLaunchKernelGGL(( transform_0213<__half>) - , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads, head_ext); -} - -// Bias add -template -__global__ void bias_add_transform_0213(T* output, - const T* vals, - const T* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext); - -template <> -__global__ void bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = blockIdx.z / head_ext; // Hidden count - int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride + - d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3]; - float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; - - float4 outputs; - outputs.x = inputs.x + biases.x; - outputs.y = inputs.y + biases.y; - outputs.z = inputs.z + biases.z; - outputs.w = inputs.w + biases.w; - - output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + - d2 * d2_out_stride + d3] = outputs; -} - -#define ATTN_H 3 -#define MAX_SEQ_LINE 10 - -template <> -__global__ void bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int hidden_dim, - int seq_length, - int heads, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = blockIdx.z / head_ext; // Hidden count - int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr; - float4 bias_arr; - float4 output_arr; - __half2* vals_half = reinterpret_cast<__half2*>(&vals_arr); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_arr); - __half2* output_half = reinterpret_cast<__half2*>(&output_arr); - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); - vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); - vals_vec += (cnt * d1_stride); - vals_vec += (d2 * d2_stride); - - bias_vec += (cnt * d1_stride); - bias_vec += (d2 * d2_stride); - - output_vec += (cnt * d0_stride * gridDim.x); - output_vec += (d1 * d2_stride); - output_vec += (d0 * d0_stride); - output_vec += (d2 * d2_out_stride); - - bias_arr = bias_vec[d3]; - vals_arr = vals_vec[d3]; - -#if defined(__ACC_HALF__) - output_half[0] = vals_half[0] + bias_half[0]; - output_half[1] = vals_half[1] + bias_half[1]; - output_half[2] = vals_half[2] + bias_half[2]; - output_half[3] = vals_half[3] + bias_half[3]; -#else - float2 bias_arr_f[4]; - float2 vals_arr_f[4]; -#pragma unroll - for (int l = 0; l < 4; l++) { - bias_arr_f[l] = __half22float2(bias_half[l]); - vals_arr_f[l] = __half22float2(vals_half[l]); - vals_arr_f[l].x += bias_arr_f[l].x; - vals_arr_f[l].y += bias_arr_f[l].y; - output_half[l] = __float22half2_rn(vals_arr_f[l]); - } -#endif - output_vec[d3] = output_arr; - -#endif -} - -__global__ void bias_add_transform_0213_v2(__half* output, - const __half* vals, - const __half* bias, - int hidden_dim, - int seq_length, - int heads) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float4 in_data[3072]; - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 - int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = d2_stride * seq_length; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y; // Sequence ID (0-127) - int cnt = threadIdx.z; // blockIdx.z; // Hidden count - int d2 = threadIdx.y; // Head (0-11) - int d3 = threadIdx.x; // Values (groups of 4) - - float4 vals_arr[1]; - float4 bias_arr[1]; - float4 output_arr[1]; - __half2* vals_half = reinterpret_cast<__half2*>(vals_arr); - __half2* bias_half = reinterpret_cast<__half2*>(bias_arr); - __half2* output_half = reinterpret_cast<__half2*>(output_arr); - - const float4* vals_vec = reinterpret_cast(vals); - const float4* bias_vec = reinterpret_cast(bias); - float4* output_vec = reinterpret_cast(output); - - int iter_index = cnt * d1_stride + d2 * d2_stride + d3; - int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); - bias_arr[0] = bias_vec[iter_index]; - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_id = iter * iteration_stride + iter_index; - vals_arr[0] = vals_vec[input_offset + iter_id]; - - output_half[0] = vals_half[0] + bias_half[0]; - output_half[1] = vals_half[1] + bias_half[1]; - output_half[2] = vals_half[2] + bias_half[2]; - output_half[3] = vals_half[3] + bias_half[3]; - - in_data[iter_id] = output_arr[0]; - } - __syncthreads(); - - iteration_stride = blockDim.z * (blockDim.y >> 1); - int matrix_stride = (d0_out_stride * gridDim.x); - int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); - - int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_row = (iter * iteration_stride) + head_count; - int iter_offset = - (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; - output_vec[out_index + iter_offset] = - in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; - } -#endif -} - -// [B S C*H] - > C * [B A S N] -template <> -void launch_bias_add_transform_0213(float* output, - const float* vals, - const float* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - - hipLaunchKernelGGL(( bias_add_transform_0213), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads, head_ext); -} - -template <> -void launch_bias_add_transform_0213<__half>(__half* output, - const __half* vals, - const __half* bias, - int batch_size, - int seq_length, - int hidden_dim, - int heads, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 block_dim(hidden_dim / heads, (heads / head_ext)); - dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); - hipLaunchKernelGGL(( bias_add_transform_0213<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads, head_ext); - } else { - dim3 block_dim(hidden_dim / heads, heads, trans_count); - dim3 grid_dim(batch_size, seq_length / 2); - hipLaunchKernelGGL(( bias_add_transform_0213_v2), dim3(grid_dim), dim3(block_dim), 0, stream, - output, vals, bias, hidden_dim, seq_length, heads); - } -} - -template -__global__ void transform4d_0213(T* out, - const T* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext); - -template <> -__global__ void transform4d_0213(float* out, - const float* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext) -{ - int d0_stride = hidden_dim * seq_length; - int d1_stride = d0_stride / heads; - int d2_stride = hidden_dim / heads; - - int d0_out_stride = d0_stride; - int d1_out_stride = d2_stride; - int d2_out_stride = hidden_dim; - - int d0 = blockIdx.x; // Batch - int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head - int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; - int cnt = blockIdx.z; - int d3 = threadIdx.x; // Values (groups of 8) - - if (d2 < seq_length) { - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + - d2 * d2_stride + d3]; - out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + - d2 * d2_out_stride * gridDim.z + d3] = vals_vec; - } -} - -template <> -__global__ void transform4d_0213<__half>(__half* out, - const __half* in, - int heads, - int seq_length, - int hidden_dim, - int head_ext) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int d0_stride = hidden_dim * (seq_length / head_ext); - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0 = blockIdx.x; // Batch - int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head - int d2 = blockIdx.z / head_ext; // Sequence - int cnt = blockIdx.y; // Hidden count - int d3 = threadIdx.x; // Values (groups of 8) - - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - in_vec += (cnt * d0_stride * gridDim.x); - in_vec += (d0 * d0_stride); - in_vec += (d2 * d2_stride); - in_vec += (d1 * d2_stride * seq_length); - - out_vec += (cnt * d1_stride); - out_vec += (d1 * d2_stride); - out_vec += (d0 * d0_stride * gridDim.y); - out_vec += (d2 * d1_stride * gridDim.y); - - out_vec[d3] = in_vec[d3]; - -#endif -} - -__global__ void transform4d_0213_v2(__half* out, - const __half* in, - int heads, - int seq_length, - int hidden_dim) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float4 in_data[3072]; - - int d0_stride = hidden_dim * seq_length; - int d1_stride = hidden_dim; - int d2_stride = hidden_dim / heads; - - int d0 = blockIdx.x; // Batch - int d1 = threadIdx.y; // Head - int d2 = blockIdx.y; // Sequence - int cnt = threadIdx.z; // Hidden count - int d3 = threadIdx.x; // Values (groups of 8) - - const float4* in_vec = reinterpret_cast(in); - float4* out_vec = reinterpret_cast(out); - - int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride; - int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); - int iteration_stride = blockDim.z * (blockDim.y >> 1); - int matrix_stride = (d0_stride * gridDim.x); - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_row = iter * iteration_stride + head_count; - int iter_offset = (iter_row % blockDim.y) * d2_stride; - - in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = - in_vec[input_offset + iter_offset * seq_length + - (iter_row / blockDim.y) * matrix_stride]; - } - __syncthreads(); - - iteration_stride = d1_stride * blockDim.z; - int iter_index = cnt * d1_stride + d1 * d2_stride + d3; - int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); - -#pragma unroll - for (int iter = 0; iter < 2; iter++) { - int iter_id = iter * iteration_stride + iter_index; - out_vec[output_offset + iter_id] = in_data[iter_id]; - } -#endif -} - -// 3 * [B A S N] - > [B S C*H] -template <> -void launch_transform4d_0213(float* out, - const float* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 2; - dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count); - dim3 block_dims(hidden_dim / heads, 8); - hipLaunchKernelGGL(( transform4d_0213) - , dim3(grid_dims), dim3(block_dims), 0, stream, out, in, heads, seq_length, hidden_dim, 1); -} - -template <> -void launch_transform4d_0213<__half>(__half* out, - const __half* in, - int batch_size, - int heads, - int seq_length, - int hidden_dim, - hipStream_t stream, - int trans_count) -{ - hidden_dim >>= 3; - if (hidden_dim > 128 || hidden_dim < 16) { - int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; - dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); - dim3 block_dims(hidden_dim / heads, (heads / head_ext)); - hipLaunchKernelGGL(( transform4d_0213<__half>), dim3(grid_dims), dim3(block_dims), 0, stream, - out, in, heads, seq_length, hidden_dim, head_ext); - } else { - dim3 grid_dims(batch_size, seq_length / 2); - dim3 block_dims(hidden_dim / heads, heads, trans_count); - hipLaunchKernelGGL(( transform4d_0213_v2), dim3(grid_dims), dim3(block_dims), 0, stream, - out, in, heads, seq_length, hidden_dim); - } -} diff --git a/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.cu b/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.cu deleted file mode 100644 index 75ecd3f..0000000 --- a/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.cu +++ /dev/null @@ -1,403 +0,0 @@ -#include "cublas_wrappers.h" - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f32_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f32_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - rocblas_datatype_f32_r, - m, - C, - rocblas_datatype_f32_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_32F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_32F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - C, - CUDA_R_32F, - m, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f16_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f16_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - rocblas_datatype_f16_r, - m, - (void*)C, - rocblas_datatype_f16_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_16F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_16F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - (void*)C, - CUDA_R_16F, - m, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f32_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f32_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f32_r, - m, - stride_C, - C, - rocblas_datatype_f32_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_32F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_32F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_32F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f16_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f16_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f16_r, - m, - stride_C, - C, - rocblas_datatype_f16_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_16F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_16F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_16F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != CUBLAS_STATUS_SUCCESS) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.hip b/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.hip deleted file mode 100644 index 04aa0ef..0000000 --- a/deepspeed/ops/csrc/transformer_bak/cublas_wrappers.hip +++ /dev/null @@ -1,404 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "cublas_wrappers_hip.h" - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f32_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f32_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - rocblas_datatype_f32_r, - m, - C, - rocblas_datatype_f32_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR32F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR32F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - hipR32F, - m, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_gemm_algo algo) -#else -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = rocblas_gemm_ex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - rocblas_datatype_f16_r, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - rocblas_datatype_f16_r, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - rocblas_datatype_f16_r, - m, - (void*)C, - rocblas_datatype_f16_r, - m, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR16F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR16F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - hipR16F, - m, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f32_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f32_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f32_r, - m, - stride_C, - C, - rocblas_datatype_f32_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR32F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR32F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR32F, - m, - stride_C, - batch, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -#ifdef __HIP_PLATFORM_HCC__ -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - rocblas_gemm_algo algo) -#else -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -#endif -{ -#ifdef __HIP_PLATFORM_HCC__ - rocblas_status status = - rocblas_gemm_strided_batched_ex(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - rocblas_datatype_f16_r, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - rocblas_datatype_f16_r, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - rocblas_datatype_f16_r, - m, - stride_C, - C, - rocblas_datatype_f16_r, - m, - stride_C, - batch, - rocblas_datatype_f32_r, - algo, - 0, - 0); -#else - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR16F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR16F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR16F, - m, - stride_C, - batch, - hipR32F, - algo); -#endif - -#ifdef __HIP_PLATFORM_HCC__ - if (status != rocblas_status_success) { -#else - if (status != rocblas_status_success) { -#endif - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer_bak/dropout_kernels.cu b/deepspeed/ops/csrc/transformer_bak/dropout_kernels.cu deleted file mode 100644 index d1ba135..0000000 --- a/deepspeed/ops/csrc/transformer_bak/dropout_kernels.cu +++ /dev/null @@ -1,868 +0,0 @@ -#include "custom_cuda_layers.h" - -const int unroll_factor = 4; - -__global__ void dropout_kernel(const int N, - const float ratio, - float* out, - const float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float4 rand = curand_uniform4(&state); - uint8_t m[unroll_factor]; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - int i = j * unroll_factor; - - mask[i] = (uint8_t)m[0]; - mask[i + 1] = (uint8_t)m[1]; - mask[i + 2] = (uint8_t)m[2]; - mask[i + 3] = (uint8_t)m[3]; - - out[i] = Xdata[i] * scale * m[0]; - out[i + 1] = Xdata[i + 1] * scale * m[1]; - out[i + 2] = Xdata[i + 2] * scale * m[2]; - out[i + 3] = Xdata[i + 3] * scale * m[3]; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = Xdata[i] * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const float ratio, - __half* out, - const __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - uint32_t m_32; - uint8_t* m = reinterpret_cast(&m_32); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - __half2 mask_h[2]; - float2 mask_f[2]; - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - float4 rand = curand_uniform4(&state); - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - - mask_cast[j] = m_32; - } - -#else - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - float2 vals_half_f[2]; - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - uint8_t m[unroll_factor]; - float4 rand = curand_uniform4(&state); - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - - mask[i] = m[0]; - mask[i + 1] = m[1]; - mask[i + 2] = m[2]; - mask[i + 3] = m[3]; - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = __float2half((float)Xdata[i] * scale * m); - mask[i] = m; - } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const float* Xdata, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - out[i] = mask[i] ? Xdata[i] * scale : 0.0; - out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; - out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; - out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const __half* Xdata, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - -#pragma unroll - for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - } - -#else - - const __half h_scale = __float2half(scale); - const __half h_zero = __float2half(0.0); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - - uint8_t* m = mask + i; - - float2 vals_half_f[2]; - - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout(T* out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool bwd) -{ - assert(unroll_factor == 4); - - dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - if (dim > 512) { - block_dim.x >>= 1; - grid_dim.x <<= 1; - } - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - if (bwd) - dropout_kernel_bwd<<>>( - total_count, ratio, vals, out, mask, seed); - else - dropout_kernel<<>>( - total_count, ratio, out, vals, mask, seed); -} - -template void launch_dropout(float* out, - const float* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool); -template void launch_dropout(__half* out, - const __half* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - cudaStream_t stream, - bool); - -__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) -{ - const __half2 h_scale = __float2half2_rn(scale); - float2* x_cast = reinterpret_cast(Xdata); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - -#ifdef __STOCHASTIC_MODE__ - - __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_data_h[0] * h_scale * mask_h[0]; - result_h[1] = x_data_h[1] * h_scale * mask_h[1]; - -#else - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - -#endif - x_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - dropout_grad_kernel<<>>(total_count, scale, vals, mask); -} - -template void launch_dropout_grad(float* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); -template void launch_dropout_grad(__half* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const float* Xdata, - float* out, - uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const __half* Xdata, - __half* out, - uint8_t* mask) -{ - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - const uint32_t* mask_cast = reinterpret_cast(mask); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - - out_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - dropout_grad_kernel<<>>(total_count, scale, vals, vals_out, mask); -} -template void launch_dropout_grad(float*, - const float* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); -template void launch_dropout_grad(__half*, - const __half* vals, - uint8_t* mask, - int total_count, - float ratio, - cudaStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* bias, - float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float4* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float4* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 x_data = Xdata_cast[j]; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - - x_data.x += b_data.x; - x_data.y += b_data.y; - x_data.z += b_data.z; - x_data.w += b_data.w; - - x_data.x = x_data.x * scale * m[0]; - x_data.y = x_data.y * scale * m[1]; - x_data.z = x_data.z * scale * m[2]; - x_data.w = x_data.w * scale * m[3]; - - mask_32[j] = m_32; - Xdata_cast[j] = x_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = Xdata[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = x_data * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* bias, - __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float2* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float2* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - data_f = Xdata_cast[j]; - bias_f = bias_cast[j % (dim / unroll_factor)]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - data_h_0.x += bias_h_0.x; - data_h_0.y += bias_h_0.y; - data_h_1.x += bias_h_1.x; - data_h_1.y += bias_h_1.y; - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - Xdata_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)Xdata[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = __float2half(x_data * scale * m); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - dropout_kernel<<>>( - total_count, dim, ratio, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); -template void launch_dropout(__half*, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* input, - const float* residual, - const float* bias, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float4* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float4* bias_cast = reinterpret_cast(bias); - const float4* residual_cast = reinterpret_cast(residual); - const float4* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 out_data; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - float4 res_data = residual_cast[j]; - float4 inp_data = input_cast[j]; - - out_data.x = (b_data.x + inp_data.x); - out_data.y = (b_data.y + inp_data.y); - out_data.z = (b_data.z + inp_data.z); - out_data.w = (b_data.w + inp_data.w); - - out_data.x = out_data.x * scale * m[0]; - out_data.y = out_data.y * scale * m[1]; - out_data.z = out_data.z * scale * m[2]; - out_data.w = out_data.w * scale * m[3]; - - out_data.x += res_data.x; - out_data.y += res_data.y; - out_data.z += res_data.z; - out_data.w += res_data.w; - - mask_32[j] = m_32; - out_cast[j] = out_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = input[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += residual[i]; - - out[i] = x_data; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* input, - const __half* residual, - const __half* bias, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - curandStatePhilox4_32_10_t state; - curand_init(seed.first, idx, seed.second, &state); - - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float2* bias_cast = reinterpret_cast(bias); - const float2* residual_cast = reinterpret_cast(residual); - const float2* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = curand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - float2 residual_f; - __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); - - float2 input_f; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - - bias_f = bias_cast[j % (dim / unroll_factor)]; - residual_f = residual_cast[j]; - input_f = input_cast[j]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - float2 residual_h_0 = __half22float2(residual_h[0]); - float2 residual_h_1 = __half22float2(residual_h[1]); - - float2 input_h_0 = __half22float2(input_h[0]); - float2 input_h_1 = __half22float2(input_h[1]); - - data_h_0.x = (bias_h_0.x + input_h_0.x); - data_h_0.y = (bias_h_0.y + input_h_0.y); - data_h_1.x = (bias_h_1.x + input_h_1.x); - data_h_1.y = (bias_h_1.y + input_h_1.y); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - data_h_0.x += residual_h_0.x; - data_h_0.y += residual_h_0.y; - data_h_1.x += residual_h_1.x; - data_h_1.y += residual_h_1.y; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - out_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = curand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)input[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += (float)residual[i]; - - out[i] = __float2half(x_data); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* input, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - dropout_kernel<<>>( - total_count, dim, ratio, input, residual, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float*, - const float* residual, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); -template void launch_dropout(__half*, - const __half*, - const __half* residual, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/dropout_kernels.hip b/deepspeed/ops/csrc/transformer_bak/dropout_kernels.hip deleted file mode 100644 index a4b880a..0000000 --- a/deepspeed/ops/csrc/transformer_bak/dropout_kernels.hip +++ /dev/null @@ -1,870 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -const int unroll_factor = 4; - -__global__ void dropout_kernel(const int N, - const float ratio, - float* out, - const float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float4 rand = hiprand_uniform4(&state); - uint8_t m[unroll_factor]; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - int i = j * unroll_factor; - - mask[i] = (uint8_t)m[0]; - mask[i + 1] = (uint8_t)m[1]; - mask[i + 2] = (uint8_t)m[2]; - mask[i + 3] = (uint8_t)m[3]; - - out[i] = Xdata[i] * scale * m[0]; - out[i + 1] = Xdata[i + 1] * scale * m[1]; - out[i + 2] = Xdata[i + 2] * scale * m[2]; - out[i + 3] = Xdata[i + 3] * scale * m[3]; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = Xdata[i] * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const float ratio, - __half* out, - const __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - - int idx = blockIdx.x * blockDim.x + threadIdx.x; - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - uint32_t m_32; - uint8_t* m = reinterpret_cast(&m_32); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - __half2 mask_h[2]; - float2 mask_f[2]; - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - float4 rand = hiprand_uniform4(&state); - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - - mask_cast[j] = m_32; - } - -#else - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - float2 vals_half_f[2]; - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - uint8_t m[unroll_factor]; - float4 rand = hiprand_uniform4(&state); - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - - mask[i] = m[0]; - mask[i + 1] = m[1]; - mask[i + 2] = m[2]; - mask[i + 3] = m[3]; - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - out[i] = __float2half((float)Xdata[i] * scale * m); - mask[i] = m; - } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const float* Xdata, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - out[i] = mask[i] ? Xdata[i] * scale : 0.0; - out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; - out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; - out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } - } -} - -__global__ void dropout_kernel_bwd(const int N, - const float ratio, - const __half* Xdata, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - -#ifdef __STOCHASTIC_MODE__ - - const __half2 h_scale = __float2half2_rn(scale); - - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_f = x_cast[j]; - __half2* x_h = reinterpret_cast<__half2*>(&x_f); - - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); - -#pragma unroll - for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = x_h[0] * h_scale * mask_h[0]; - result_h[1] = x_h[1] * h_scale * mask_h[1]; - - out_cast[j] = result_f; - } - -#else - - const __half h_scale = __float2half(scale); - const __half h_zero = __float2half(0.0); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - int i = j * unroll_factor; - - const __half2* vals_half = reinterpret_cast(Xdata + i); - - uint8_t* m = mask + i; - - float2 vals_half_f[2]; - - vals_half_f[0] = __half22float2(vals_half[0]); - vals_half_f[1] = __half22float2(vals_half[1]); - - out[i] = __float2half(vals_half_f[0].x * scale * m[0]); - out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); - out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); - out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); - } - -#endif - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout(T* out, - const T* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool bwd) -{ - assert(unroll_factor == 4); - - dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - if (dim > 512) { - block_dim.x >>= 1; - grid_dim.x <<= 1; - } - uint64_t inc = total_count / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - if (bwd) - hipLaunchKernelGGL(( dropout_kernel_bwd), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, ratio, vals, out, mask, seed); - else - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, ratio, out, vals, mask, seed); -} - -template void launch_dropout(float* out, - const float* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool); -template void launch_dropout(__half* out, - const __half* vals, - uint8_t* mask, - int total_count, - int dim, - float ratio, - hipStream_t stream, - bool); - -__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) -{ - const __half2 h_scale = __float2half2_rn(scale); - float2* x_cast = reinterpret_cast(Xdata); - uint32_t* mask_cast = reinterpret_cast(mask); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - -#ifdef __STOCHASTIC_MODE__ - - __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); - __half2 mask_h[2]; - float2 mask_f[2]; - - float* mask_f_data = &mask_f[0].x; -#pragma unroll - for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); - - mask_h[0] = __float22half2_rn(mask_f[0]); - mask_h[1] = __float22half2_rn(mask_f[1]); - - result_h[0] = x_data_h[0] * h_scale * mask_h[0]; - result_h[1] = x_data_h[1] * h_scale * mask_h[1]; - -#else - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - -#endif - x_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, hipStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), - dim3(DS_CUDA_NUM_THREADS), - 0, - stream, total_count, scale, vals, mask); -} - -template void launch_dropout_grad(float* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); -template void launch_dropout_grad(__half* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const float* Xdata, - float* out, - uint8_t* mask) -{ - CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } -} - -__global__ void dropout_grad_kernel(const int N, - const float scale, - const __half* Xdata, - __half* out, - uint8_t* mask) -{ - const float2* x_cast = reinterpret_cast(Xdata); - float2* out_cast = reinterpret_cast(out); - const uint32_t* mask_cast = reinterpret_cast(mask); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) - { - float2 x_data = x_cast[j]; - uint32_t m_32 = mask_cast[j]; - uint8_t* m = (uint8_t*)&m_32; - - __half* x_data_h = reinterpret_cast<__half*>(&x_data); - float2 result[2]; - - result[0].x = (float)x_data_h[0] * scale * m[0]; - result[0].y = (float)x_data_h[1] * scale * m[1]; - result[1].x = (float)x_data_h[2] * scale * m[2]; - result[1].y = (float)x_data_h[3] * scale * m[3]; - - result_h[0] = __float22half2_rn(result[0]); - result_h[1] = __float22half2_rn(result[1]); - - out_cast[j] = result_f; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - for (int i = high_index; i < N; i++) { - out[i] = __float2half((float)Xdata[i] * scale * mask[i]); - } - } -} - -template -void launch_dropout_grad(T* vals_out, - const T* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - const float scale = 1. / (1. - ratio); - hipLaunchKernelGGL(( dropout_grad_kernel), dim3(DS_GET_BLOCKS(total_count / unroll_factor)), - dim3(DS_CUDA_NUM_THREADS), - 0, - stream, total_count, scale, vals, vals_out, mask); -} -template void launch_dropout_grad(float*, - const float* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); -template void launch_dropout_grad(__half*, - const __half* vals, - uint8_t* mask, - int total_count, - float ratio, - hipStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* bias, - float* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float4* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float4* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 x_data = Xdata_cast[j]; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - - x_data.x += b_data.x; - x_data.y += b_data.y; - x_data.z += b_data.z; - x_data.w += b_data.w; - - x_data.x = x_data.x * scale * m[0]; - x_data.y = x_data.y * scale * m[1]; - x_data.z = x_data.z * scale * m[2]; - x_data.w = x_data.w * scale * m[3]; - - mask_32[j] = m_32; - Xdata_cast[j] = x_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = Xdata[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = x_data * scale * m; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* bias, - __half* Xdata, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float2* Xdata_cast = reinterpret_cast(Xdata); - uint32_t* mask_32 = reinterpret_cast(mask); - const float2* bias_cast = reinterpret_cast(bias); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - data_f = Xdata_cast[j]; - bias_f = bias_cast[j % (dim / unroll_factor)]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - data_h_0.x += bias_h_0.x; - data_h_0.y += bias_h_0.y; - data_h_1.x += bias_h_1.x; - data_h_1.y += bias_h_1.y; - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - Xdata_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)Xdata[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - Xdata[i] = __float2half(x_data * scale * m); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, dim, ratio, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); -template void launch_dropout(__half*, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const float* input, - const float* residual, - const float* bias, - float* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float4* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float4* bias_cast = reinterpret_cast(bias); - const float4* residual_cast = reinterpret_cast(residual); - const float4* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - float4 out_data; - float4 b_data = bias_cast[j % (dim / unroll_factor)]; - float4 res_data = residual_cast[j]; - float4 inp_data = input_cast[j]; - - out_data.x = (b_data.x + inp_data.x); - out_data.y = (b_data.y + inp_data.y); - out_data.z = (b_data.z + inp_data.z); - out_data.w = (b_data.w + inp_data.w); - - out_data.x = out_data.x * scale * m[0]; - out_data.y = out_data.y * scale * m[1]; - out_data.z = out_data.z * scale * m[2]; - out_data.w = out_data.w * scale * m[3]; - - out_data.x += res_data.x; - out_data.y += res_data.y; - out_data.z += res_data.z; - out_data.w += res_data.w; - - mask_32[j] = m_32; - out_cast[j] = out_data; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = input[i] + bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += residual[i]; - - out[i] = x_data; - mask[i] = m; - } - } -} - -__global__ void dropout_kernel(const int N, - const int dim, - const float ratio, - const __half* input, - const __half* residual, - const __half* bias, - __half* out, - uint8_t* mask, - std::pair seed) -{ - const float scale = 1. / (1. - ratio); - int idx = blockIdx.x * blockDim.x + threadIdx.x; - int tid = threadIdx.x % (dim / unroll_factor); - - hiprandStatePhilox4_32_10_t state; - hiprand_init(seed.first, idx, seed.second, &state); - - float2* out_cast = reinterpret_cast(out); - uint32_t* mask_32 = reinterpret_cast(mask); - - const float2* bias_cast = reinterpret_cast(bias); - const float2* residual_cast = reinterpret_cast(residual); - const float2* input_cast = reinterpret_cast(input); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 rand = hiprand_uniform4(&state); - - float2 data_f; - __half2* data_h = reinterpret_cast<__half2*>(&data_f); - - float2 bias_f; - __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); - - float2 residual_f; - __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); - - float2 input_f; - __half2* input_h = reinterpret_cast<__half2*>(&input_f); - - bias_f = bias_cast[j % (dim / unroll_factor)]; - residual_f = residual_cast[j]; - input_f = input_cast[j]; - - float2 data_h_0 = __half22float2(data_h[0]); - float2 data_h_1 = __half22float2(data_h[1]); - - float2 bias_h_0 = __half22float2(bias_h[0]); - float2 bias_h_1 = __half22float2(bias_h[1]); - - float2 residual_h_0 = __half22float2(residual_h[0]); - float2 residual_h_1 = __half22float2(residual_h[1]); - - float2 input_h_0 = __half22float2(input_h[0]); - float2 input_h_1 = __half22float2(input_h[1]); - - data_h_0.x = (bias_h_0.x + input_h_0.x); - data_h_0.y = (bias_h_0.y + input_h_0.y); - data_h_1.x = (bias_h_1.x + input_h_1.x); - data_h_1.y = (bias_h_1.y + input_h_1.y); - - uint32_t m_32; - uint8_t* m = (uint8_t*)&m_32; - - m[0] = (uint8_t)(rand.x > ratio); - m[1] = (uint8_t)(rand.y > ratio); - m[2] = (uint8_t)(rand.z > ratio); - m[3] = (uint8_t)(rand.w > ratio); - - data_h_0.x = __float2half(data_h_0.x * scale * m[0]); - data_h_0.y = __float2half(data_h_0.y * scale * m[1]); - data_h_1.x = __float2half(data_h_1.x * scale * m[2]); - data_h_1.y = __float2half(data_h_1.y * scale * m[3]); - - data_h_0.x += residual_h_0.x; - data_h_0.y += residual_h_0.y; - data_h_1.x += residual_h_1.x; - data_h_1.y += residual_h_1.y; - - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - result_h[0] = __float22half2_rn(data_h_0); - result_h[1] = __float22half2_rn(data_h_1); - - out_cast[j] = result_f; - mask_32[j] = m_32; - } - int high_index = - ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; - if (N > high_index) { - float4 rand = hiprand_uniform4(&state); - float* rand_data = &(rand.x); - int k = 0; - for (int i = high_index; i < N; i++) { - float x_data = (float)input[i] + (float)bias[i % dim]; - uint8_t m = (uint8_t)(rand_data[k++] > ratio); - x_data = x_data * scale * m; - x_data += (float)residual[i]; - - out[i] = __float2half(x_data); - mask[i] = m; - } - } -} - -template -void launch_dropout(T* out, - const T* input, - const T* residual, - const T* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream) -{ - assert(unroll_factor == 4); - - int total_count = batch * dim / unroll_factor; - dim3 grid_dim = DS_GET_BLOCKS(total_count); - dim3 block_dim = DS_CUDA_NUM_THREADS; - - uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; - std::pair seed = Context::Instance().IncrementOffset(inc); - - hipLaunchKernelGGL(( dropout_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - total_count, dim, ratio, input, residual, bias, out, mask, seed); -} - -template void launch_dropout(float*, - const float*, - const float* residual, - const float* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); -template void launch_dropout(__half*, - const __half*, - const __half* residual, - const __half* bias, - uint8_t* mask, - int batch, - int dim, - float ratio, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/ds_transformer_cuda.cpp b/deepspeed/ops/csrc/transformer_bak/ds_transformer_cuda.cpp deleted file mode 100644 index 18e7fff..0000000 --- a/deepspeed/ops/csrc/transformer_bak/ds_transformer_cuda.cpp +++ /dev/null @@ -1,1051 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include "Timer.h" -#include "context.h" -#include "cublas_wrappers.h" -#include "custom_cuda_layers.h" -#include "ds_transformer_cuda.h" - -static std::unordered_map> s_transformer_layers; - -const int init_seq_length = 128; - -// C++ interface - -template -unsigned get_workspace_size(unsigned maxBatchSize, - unsigned seq_len, - unsigned hidden_size, - unsigned intermediate_size, - unsigned heads, - bool training, - bool gelu_checkpoint) -{ - unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); - if (training) { - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); - workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), - 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); - if (gelu_checkpoint) - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); - } - return workSpacesize; // * sizeof(T); -} - -// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -template -BertTransformerLayer::BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_prob_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) - : _layer_id(layer_id), - _batch_size(batch_size), - _hidden_size(hidden_size), - _heads(num_heads), - _intermediate_size(intermediate_size), - _seq_length(seq_length), - _training(true), - _pre_or_postLayerNorm(pre_or_postLayerNorm), - _attn_dropout_checkpoint(attn_dropout_checkpoint), - _normalize_invertible(normalize_invertible), - _gelu_checkpoint(gelu_checkpoint), - _stochastic_mode(stochastic_mode), - _stream(Context::Instance().GetCurrentStream()), - _cublasHandle(Context::Instance().GetCublasHandle()), - _qkv_linear(typename FeedForward::Config(batch_size * seq_length, - 3 * hidden_size, - hidden_size, - gemm_algos[0])), - _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - hidden_size, - gemm_algos[0])), - _attn_layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _ff1(typename FeedForward::Config(batch_size * seq_length, - _intermediate_size, - hidden_size, - gemm_algos[1])), - _ff2(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - _intermediate_size, - gemm_algos[2])), - _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), - _gelu(typename Gelu::Config(_intermediate_size)), - _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), - _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, - _seq_length, - _seq_length, - _hidden_size / _heads, - //aiss debug 0506 - //(T(1.0) / T(sqrt(_hidden_size / _heads))), - (T(1.0 / (sqrt(_hidden_size / _heads)))), - T(0.0), - CUBLAS_OP_T, - CUBLAS_OP_N, - gemm_algos[3])), - _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, - _hidden_size / _heads, - _seq_length, - _seq_length, - T(1.0), - T(0.0), - CUBLAS_OP_N, - CUBLAS_OP_N, - gemm_algos[4])) -{ - assert(_hidden_size % _heads == 0); - - Initialize(); -} - -template -BertTransformerLayer::~BertTransformerLayer() -{ -} - -template -void BertTransformerLayer::Initialize() -{ -#ifndef __HIP_PLATFORM_HCC__ - if (std::is_same::value) cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); -#endif -} - -template -void BertTransformerLayer::Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* soft_out_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr) -{ - cublasSetStream(_cublasHandle, _stream); - - if (!_stochastic_mode) cudaStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1; - - if (_normalize_invertible) { - add_res_ptr = buf_1 + 3 * small_buf_size; - buf_2 = add_res_ptr; - } - if (_gelu_checkpoint) buf_2 += small_buf_size; - if (_attn_dropout_checkpoint) - ctx_bufB_ptr = - (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) - : (buf_1 + 4 * small_buf_size)); - - int bsz_seq = bsz * _seq_length; - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - - else - _layer_norm.Forward( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } - - if (_pre_or_postLayerNorm) - _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - else - _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - - launch_bias_add_transform_0213( - q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); - - int bsz_heads = bsz * _heads; - - // attention scores - _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); - - // Softmax + Mask - _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); - - // attn prob dropout. - _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); - - // attention context - _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); - - launch_transform4d_0213( - attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); - - if (_pre_or_postLayerNorm) - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); - else - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); - - // attn output dropout. - if (_pre_or_postLayerNorm) - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); - else - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } - - _ff1.Forward(bsz_seq, - ff1_inp_ptr, - inter_w_ptr, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - _cublasHandle); - - _gelu.ForwardWithBiasAdd(bsz_seq, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - inter_b_ptr, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - _stream); - - _ff2.Forward( - bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); - - // layer output dropout. - if (_pre_or_postLayerNorm) - _layer_output_dropout.ForwardWithBias( - bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); - else - _layer_output_dropout.ForwardWithBias( - bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - else - _layer_norm.Forward( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } -} - -template -void BertTransformerLayer::Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* soft_out_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr) -{ - cublasSetStream(_cublasHandle, _stream); - - if (!_stochastic_mode) cudaStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1 + small_buf_size; - T* buf_3 = buf_2 + small_buf_size; - - T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) - : buf_3 + small_buf_size); - T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); - - cudaStream_t streams[2] = {_stream, _stream}; - - int bsz_seq = bsz * _seq_length; - int bsz_heads = bsz * _heads; - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - inp_norm_ptr); - - else - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - output_ptr); - } - - if (_pre_or_postLayerNorm) - _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); - else - _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); - - const T* layer_dropout_buf = _layer_output_dropout.HasDropout() - ? buf_0 - : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); - - if (_gelu_checkpoint) - _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); - _ff2.Backward(bsz_seq, - layer_dropout_buf, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - output_w_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - _cublasHandle, - _stream, - ff2_buf); - - _gelu.Backward( - bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); - - _ff1.Backward(bsz_seq, - ff2_buf, - ff1_inp_ptr, - inter_w_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - _cublasHandle, - _stream, - buf_3); - - if (!_pre_or_postLayerNorm) - launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } - - _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); - - T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; - - _attn_out_linear.Backward(bsz_seq, - attn_output_dropout_buf, - attn_o_inp_ptr, - attn_ow_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - _cublasHandle, - _stream, - buf_1); - - launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); - - if (_attn_prob_dropout.HasDropout()) { - if (_attn_dropout_checkpoint) - _attn_prob_dropout.Forward( - bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); - - _attn_context.Backward(bsz_heads, - buf_2, - v_tf_ptr, - (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), - _cublasHandle, - buf_3, - ff2_buf); - } else - _attn_context.Backward( - bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); - - _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); - - _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); - - _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); - - launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); - - if (_pre_or_postLayerNorm) - _qkv_linear.Backward(bsz_seq, - ff2_buf, - inp_norm_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - else - _qkv_linear.Backward(bsz_seq, - ff2_buf, - input_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - input_ptr); - - else - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - inp_norm_ptr); - } else - launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); -} - -template -void BertTransformerLayer::SetTrainingMode(bool training) -{ - // Dropout will be skipped when not in training model. - _attn_prob_dropout.SetTrainingMode(training); - _attn_output_dropout.SetTrainingMode(training); - _layer_output_dropout.SetTrainingMode(training); -} - -template -void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* attn_layer_norm_var, - T* attn_layer_norm_mean, - T* layer_norm_var, - T* layer_norm_mean) -{ - _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); - _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); - _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); - - _attn_layer_norm.SetVar(attn_layer_norm_var); - _attn_layer_norm.SetMean(attn_layer_norm_mean); - _layer_norm.SetVar(layer_norm_var); - _layer_norm.SetMean(layer_norm_mean); -} - -template -void BertTransformerLayer::SetSeqLength(unsigned seq_len) -{ - _seq_length = seq_len; - - _softmax.SetSeqLength(_seq_length); - _attn_prob_dropout.SetDimension(_seq_length); - _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); - _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); -} - -template -int create_transformer_layer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_dim, - unsigned num_heads, - unsigned intermediate_size, - float attn_dropout_ratio, - float hidden_dropout_ratio, - float layer_norm_eps, - int seed, - bool pre_or_postLayerNorm, - bool test_gemm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) -{ - Context::Instance().SetSeed(seed); - Context::Instance().TestGemmFP16( - test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); - - auto layer = std::make_shared>(layer_id, - batch_size, - hidden_dim, - num_heads, - intermediate_size, - init_seq_length, - attn_dropout_ratio, - hidden_dropout_ratio, - layer_norm_eps, - pre_or_postLayerNorm, - Context::Instance().GetGemmAlgos(), - attn_dropout_checkpoint, - normalize_invertible, - gelu_checkpoint, - stochastic_mode); - - s_transformer_layers[layer_id] = layer; - - std::string dtype = (std::is_same::value) ? "half" : "float"; - - std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." - << std::endl; - - return 0; -} - -template -std::vector ds_transformer_forward(unsigned layer_id, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b, - bool training_mode, - bool prelayernorm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint) -{ - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = input.size(0); - - const T* input_ptr = (const T*)input.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* output_b_ptr = (const T*)output_b.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - auto output = torch::empty_like(input); - T* out_ptr = (T*)output.data_ptr(); - - auto options = torch::TensorOptions() - .dtype(input.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - - auto uint8_options = torch::TensorOptions() - .dtype(torch::kInt8) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(false); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (input.size(1) != seq_len) { - seq_len = input.size(1); - layer->SetSeqLength(seq_len); - } - - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); - auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); - auto attn_o_inp = torch::empty_like(input); - auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); - - auto attn_prob_dropout_mask = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); - auto attn_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - auto layer_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - - auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - - T* inp_norm_ptr = (T*)inp_norm.data_ptr(); - T* add_res_ptr = (T*)add_res.data_ptr(); - T* q_tf_ptr = (T*)qkv_tf.data_ptr(); - T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); - T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); - T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); - - torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); - torch::Tensor gelu_inp = - (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); - auto ff1_inp = torch::empty_like(input); - T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); - T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); - T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); - - torch::Tensor soft_out = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); - torch::Tensor ctx_bufB = - (attn_dropout_checkpoint - ? soft_out - : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); - T* soft_out_ptr = (T*)soft_out.data_ptr(); - T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); - - layer->SetTrainingMode(training_mode); - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Forward(bsz, - input_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_qkvb_ptr, - attn_ow_ptr, - attn_ob_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - output_b_ptr, - norm_w_ptr, - norm_b_ptr, - out_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr); - - return {output, - inp_norm, - qkv_tf, - soft_out, - ctx_bufB, - attn_o_inp, - add_res, - ff1_inp, - gelu_inp, - ff2_inp, - attn_prob_dropout_mask, - attn_output_dropout_mask, - layer_output_dropout_mask, - attn_layer_norm_var, - attn_layer_norm_mean, - layer_norm_var, - layer_norm_mean}; -} - -template -std::vector ds_transformer_backward(unsigned layer_id, - const torch::Tensor& grad_output, - const torch::Tensor& output, - const torch::Tensor& inp_norm, - const torch::Tensor& qkv_tf, - const torch::Tensor& soft_out, - const torch::Tensor& ctx_bufB, - const torch::Tensor& attn_o_inp, - const torch::Tensor& add_res, - const torch::Tensor& ff1_inp, - const torch::Tensor& gelu_inp, - const torch::Tensor& ff2_inp, - const torch::Tensor& attn_prob_dropout_mask, - const torch::Tensor& attn_output_dropout_mask, - const torch::Tensor& layer_output_dropout_mask, - const torch::Tensor& attn_layer_norm_var, - const torch::Tensor& attn_layer_norm_mean, - const torch::Tensor& layer_norm_var, - const torch::Tensor& layer_norm_mean, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b) -{ - auto g_output = grad_output.contiguous(); - CHECK_INPUT(g_output); - CHECK_INPUT(output); - CHECK_INPUT(inp_norm); - CHECK_INPUT(qkv_tf); - CHECK_INPUT(add_res); - CHECK_INPUT(soft_out); - CHECK_INPUT(ctx_bufB); - CHECK_INPUT(attn_o_inp); - CHECK_INPUT(ff1_inp); - CHECK_INPUT(gelu_inp); - CHECK_INPUT(ff2_inp); - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = g_output.size(0); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (g_output.size(1) != seq_len) { - seq_len = g_output.size(1); - layer->SetSeqLength(seq_len); - } - auto options = torch::TensorOptions() - .dtype(g_output.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto grad_input = torch::empty_like(input); - auto grad_attn_qkvw = torch::empty_like(attn_qkvw); - auto grad_attn_qkvb = torch::empty_like(attn_qkvb); - auto grad_attn_ow = torch::empty_like(attn_ow); - auto grad_attn_ob = torch::empty_like(attn_ob); - auto grad_attn_nw = torch::empty_like(attn_nw); - auto grad_attn_nb = torch::empty_like(attn_nb); - auto grad_inter_w = torch::empty_like(inter_w); - auto grad_inter_b = torch::empty_like(inter_b); - auto grad_output_w = torch::empty_like(output_w); - auto grad_output_b = torch::empty_like(output_b); - auto grad_norm_w = torch::empty_like(norm_w); - auto grad_norm_b = torch::empty_like(norm_b); - - // inputs. - const T* grad_output_ptr = (const T*)g_output.data_ptr(); - const T* input_ptr = (const T*)input.data_ptr(); - const T* output_ptr = (const T*)output.data_ptr(); - const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); - const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); - const T* add_res_ptr = (const T*)add_res.data_ptr(); - const T* k_tf_ptr = - q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); - const T* v_tf_ptr = - k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); - const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); - const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); - const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); - const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); - const T* soft_out_ptr = (const T*)soft_out.data_ptr(); - const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - // outputs. - T* grad_input_ptr = (T*)grad_input.data_ptr(); - T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); - T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); - T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); - T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); - T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); - T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); - T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); - T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); - T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); - T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); - T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); - T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); - - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Backward(bsz, - grad_output_ptr, - input_ptr, - output_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_ow_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - norm_w_ptr, - norm_b_ptr, - - grad_input_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr); - - return {grad_input, - grad_attn_qkvw, - grad_attn_qkvb, - grad_attn_ow, - grad_attn_ob, - grad_attn_nw, - grad_attn_nb, - grad_inter_w, - grad_inter_b, - grad_output_w, - grad_output_b, - grad_norm_w, - grad_norm_b}; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("forward_fp32", - &ds_transformer_forward, - "DeepSpeed Transformer forward with fp32 (CUDA)"); - m.def("forward_fp16", - &ds_transformer_forward<__half>, - "DeepSpeed Transformer forward with fp16 (CUDA)"); - m.def("backward_fp32", - &ds_transformer_backward, - "DeepSpeed Transformer backward with fp32 (CUDA)"); - m.def("backward_fp16", - &ds_transformer_backward<__half>, - "DeepSpeed Transformer backward with fp16 (CUDA)"); - m.def("create_transformer_layer_fp32", - &create_transformer_layer, - "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); - m.def("create_transformer_layer_fp16", - &create_transformer_layer<__half>, - "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer_bak/ds_transformer_hip.cpp b/deepspeed/ops/csrc/transformer_bak/ds_transformer_hip.cpp deleted file mode 100644 index 7b47686..0000000 --- a/deepspeed/ops/csrc/transformer_bak/ds_transformer_hip.cpp +++ /dev/null @@ -1,1052 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include - -#include -#include -#include -#include -#include -#include -#include "Timer_hip.h" -#include "context_hip.h" -#include "cublas_wrappers_hip.h" -#include "custom_hip_layers.h" -#include "ds_transformer_hip.h" - -static std::unordered_map> s_transformer_layers; - -const int init_seq_length = 128; - -// C++ interface - -template -unsigned get_workspace_size(unsigned maxBatchSize, - unsigned seq_len, - unsigned hidden_size, - unsigned intermediate_size, - unsigned heads, - bool training, - bool gelu_checkpoint) -{ - unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); - if (training) { - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); - workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), - 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); - if (gelu_checkpoint) - workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); - } - return workSpacesize; // * sizeof(T); -} - -// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. -#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) \ - CHECK_CUDA(x); \ - CHECK_CONTIGUOUS(x) - -template -BertTransformerLayer::BertTransformerLayer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_size, - unsigned num_heads, - unsigned intermediate_size, - unsigned seq_length, - float attn_prob_dropout_ratio, - float hidden_output_dropout_ratio, - float layer_norm_eps, - bool pre_or_postLayerNorm, - const std::vector>& gemm_algos, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) - : _layer_id(layer_id), - _batch_size(batch_size), - _hidden_size(hidden_size), - _heads(num_heads), - _intermediate_size(intermediate_size), - _seq_length(seq_length), - _training(true), - _pre_or_postLayerNorm(pre_or_postLayerNorm), - _attn_dropout_checkpoint(attn_dropout_checkpoint), - _normalize_invertible(normalize_invertible), - _gelu_checkpoint(gelu_checkpoint), - _stochastic_mode(stochastic_mode), - _stream(Context::Instance().GetCurrentStream()), - _cublasHandle(Context::Instance().GetCublasHandle()), - _qkv_linear(typename FeedForward::Config(batch_size * seq_length, - 3 * hidden_size, - hidden_size, - gemm_algos[0])), - _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - hidden_size, - gemm_algos[0])), - _attn_layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _layer_norm(typename Normalize_Layer::Config(batch_size, - seq_length, - hidden_size, - layer_norm_eps, - true, - !normalize_invertible)), - _ff1(typename FeedForward::Config(batch_size * seq_length, - _intermediate_size, - hidden_size, - gemm_algos[1])), - _ff2(typename FeedForward::Config(batch_size * seq_length, - hidden_size, - _intermediate_size, - gemm_algos[2])), - _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), - _gelu(typename Gelu::Config(_intermediate_size)), - _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), - _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), - _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, - _seq_length, - _seq_length, - _hidden_size / _heads, - //aiss debug 0506 - //(T(1.0) / T(sqrt(_hidden_size / _heads))), - (T(1.0 / (sqrt(_hidden_size / _heads)))), - T(0.0), - rocblas_operation_transpose, - rocblas_operation_none, - gemm_algos[3])), - _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, - _hidden_size / _heads, - _seq_length, - _seq_length, - T(1.0), - T(0.0), - rocblas_operation_none, - rocblas_operation_none, - gemm_algos[4])) -{ - assert(_hidden_size % _heads == 0); - - Initialize(); -} - -template -BertTransformerLayer::~BertTransformerLayer() -{ -} - -template -void BertTransformerLayer::Initialize() -{ -#ifndef __HIP_PLATFORM_HCC__ - if (std::is_same::value) rocblas_set_math_mode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); -#endif -} - -template -void BertTransformerLayer::Forward(unsigned bsz, - const T* input_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_qkvb_ptr, - const T* attn_ow_ptr, - const T* attn_ob_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* output_b_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - T* out_ptr, - T* inp_norm_ptr, - T* q_tf_ptr, - T* k_tf_ptr, - T* v_tf_ptr, - T* soft_out_ptr, - T* ctx_bufB_ptr, - T* attn_o_inp_ptr, - T* add_res_ptr, - T* ff1_inp_ptr, - T* gelu_inp_ptr, - T* ff2_inp_ptr) -{ - rocblas_set_stream(_cublasHandle, _stream); - - if (!_stochastic_mode) hipStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1; - - if (_normalize_invertible) { - add_res_ptr = buf_1 + 3 * small_buf_size; - buf_2 = add_res_ptr; - } - if (_gelu_checkpoint) buf_2 += small_buf_size; - if (_attn_dropout_checkpoint) - ctx_bufB_ptr = - (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) - : (buf_1 + 4 * small_buf_size)); - - int bsz_seq = bsz * _seq_length; - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - - else - _layer_norm.Forward( - bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } - - if (_pre_or_postLayerNorm) - _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - else - _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); - - launch_bias_add_transform_0213( - q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); - - int bsz_heads = bsz * _heads; - - // attention scores - _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); - - // Softmax + Mask - _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); - - // attn prob dropout. - _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); - - // attention context - _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); - - launch_transform4d_0213( - attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); - - if (_pre_or_postLayerNorm) - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); - else - _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); - - // attn output dropout. - if (_pre_or_postLayerNorm) - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); - else - _attn_output_dropout.ForwardWithBias( - bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.ForwardCheckpoint( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - else - _attn_layer_norm.Forward( - bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); - } - - _ff1.Forward(bsz_seq, - ff1_inp_ptr, - inter_w_ptr, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - _cublasHandle); - - _gelu.ForwardWithBiasAdd(bsz_seq, - (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), - inter_b_ptr, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - _stream); - - _ff2.Forward( - bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); - - // layer output dropout. - if (_pre_or_postLayerNorm) - _layer_output_dropout.ForwardWithBias( - bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); - else - _layer_output_dropout.ForwardWithBias( - bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.ForwardCheckpoint( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - else - _layer_norm.Forward( - bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); - } -} - -template -void BertTransformerLayer::Backward(unsigned bsz, - const T* grad_output_ptr, - const T* input_ptr, - const T* output_ptr, - const T* inp_norm_ptr, - const T* q_tf_ptr, - const T* k_tf_ptr, - const T* v_tf_ptr, - const T* soft_out_ptr, - const T* ctx_bufB_ptr, - const T* attn_o_inp_ptr, - const T* add_res_ptr, - const T* ff1_inp_ptr, - const T* gelu_inp_ptr, - const T* ff2_inp_ptr, - const T* input_mask_ptr, - const T* attn_qkvw_ptr, - const T* attn_ow_ptr, - const T* attn_nw_ptr, - const T* attn_nb_ptr, - const T* inter_w_ptr, - const T* inter_b_ptr, - const T* output_w_ptr, - const T* norm_w_ptr, - const T* norm_b_ptr, - - T* grad_input_ptr, - T* grad_attn_qkvw_ptr, - T* grad_attn_qkvb_ptr, - T* grad_attn_ow_ptr, - T* grad_attn_ob_ptr, - T* grad_attn_nw_ptr, - T* grad_attn_nb_ptr, - T* grad_inter_w_ptr, - T* grad_inter_b_ptr, - T* grad_output_w_ptr, - T* grad_output_b_ptr, - T* grad_norm_w_ptr, - T* grad_norm_b_ptr) -{ - rocblas_set_stream(_cublasHandle, _stream); - - if (!_stochastic_mode) hipStreamSynchronize(_stream); - - T* workspace = static_cast(Context::Instance().GetWorkSpace()); - size_t small_buf_size = bsz * _seq_length * _hidden_size; - T* buf_0 = workspace; - T* buf_1 = buf_0 + small_buf_size; - T* buf_2 = buf_1 + small_buf_size; - T* buf_3 = buf_2 + small_buf_size; - - T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) - : buf_3 + small_buf_size); - T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); - - hipStream_t streams[2] = {_stream, _stream}; - - int bsz_seq = bsz * _seq_length; - int bsz_heads = bsz * _heads; - - if (!_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - inp_norm_ptr); - - else - _layer_norm.Backward(bsz_seq, - grad_output_ptr, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - buf_1, - output_ptr); - } - - if (_pre_or_postLayerNorm) - _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); - else - _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); - - const T* layer_dropout_buf = _layer_output_dropout.HasDropout() - ? buf_0 - : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); - - if (_gelu_checkpoint) - _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); - _ff2.Backward(bsz_seq, - layer_dropout_buf, - (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), - output_w_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - _cublasHandle, - _stream, - ff2_buf); - - _gelu.Backward( - bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); - - _ff1.Backward(bsz_seq, - ff2_buf, - ff1_inp_ptr, - inter_w_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - _cublasHandle, - _stream, - buf_3); - - if (!_pre_or_postLayerNorm) - launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); - - if (_pre_or_postLayerNorm) { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.BackwardFusedAdd(bsz_seq, - buf_3, - grad_output_ptr, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } else { - if (_attn_layer_norm.UseMean()) - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - add_res_ptr); - - else - _attn_layer_norm.Backward(bsz_seq, - buf_2, - attn_nw_ptr, - attn_nb_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - streams, - buf_0, - ff1_inp_ptr); - } - - _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); - - T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; - - _attn_out_linear.Backward(bsz_seq, - attn_output_dropout_buf, - attn_o_inp_ptr, - attn_ow_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - _cublasHandle, - _stream, - buf_1); - - launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); - - if (_attn_prob_dropout.HasDropout()) { - if (_attn_dropout_checkpoint) - _attn_prob_dropout.Forward( - bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); - - _attn_context.Backward(bsz_heads, - buf_2, - v_tf_ptr, - (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), - _cublasHandle, - buf_3, - ff2_buf); - } else - _attn_context.Backward( - bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); - - _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); - - _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); - - _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); - - launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); - - if (_pre_or_postLayerNorm) - _qkv_linear.Backward(bsz_seq, - ff2_buf, - inp_norm_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - else - _qkv_linear.Backward(bsz_seq, - ff2_buf, - input_ptr, - attn_qkvw_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - _cublasHandle, - _stream, - buf_2); - - if (_pre_or_postLayerNorm) { - if (_layer_norm.UseMean()) - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - input_ptr); - - else - _layer_norm.BackwardFusedAdd(bsz_seq, - buf_2, - buf_0, - norm_w_ptr, - norm_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr, - streams, - grad_input_ptr, - inp_norm_ptr); - } else - launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); -} - -template -void BertTransformerLayer::SetTrainingMode(bool training) -{ - // Dropout will be skipped when not in training model. - _attn_prob_dropout.SetTrainingMode(training); - _attn_output_dropout.SetTrainingMode(training); - _layer_output_dropout.SetTrainingMode(training); -} - -template -void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, - uint8_t* attn_output_dropout_mask_ptr, - uint8_t* layer_output_dropout_mask_ptr, - T* attn_layer_norm_var, - T* attn_layer_norm_mean, - T* layer_norm_var, - T* layer_norm_mean) -{ - _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); - _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); - _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); - - _attn_layer_norm.SetVar(attn_layer_norm_var); - _attn_layer_norm.SetMean(attn_layer_norm_mean); - _layer_norm.SetVar(layer_norm_var); - _layer_norm.SetMean(layer_norm_mean); -} - -template -void BertTransformerLayer::SetSeqLength(unsigned seq_len) -{ - _seq_length = seq_len; - - _softmax.SetSeqLength(_seq_length); - _attn_prob_dropout.SetDimension(_seq_length); - _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); - _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); -} - -template -int create_transformer_layer(unsigned layer_id, - unsigned batch_size, - unsigned hidden_dim, - unsigned num_heads, - unsigned intermediate_size, - float attn_dropout_ratio, - float hidden_dropout_ratio, - float layer_norm_eps, - int seed, - bool pre_or_postLayerNorm, - bool test_gemm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint, - bool stochastic_mode) -{ - Context::Instance().SetSeed(seed); - Context::Instance().TestGemmFP16( - test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); - - auto layer = std::make_shared>(layer_id, - batch_size, - hidden_dim, - num_heads, - intermediate_size, - init_seq_length, - attn_dropout_ratio, - hidden_dropout_ratio, - layer_norm_eps, - pre_or_postLayerNorm, - Context::Instance().GetGemmAlgos(), - attn_dropout_checkpoint, - normalize_invertible, - gelu_checkpoint, - stochastic_mode); - - s_transformer_layers[layer_id] = layer; - - std::string dtype = (std::is_same::value) ? "half" : "float"; - - std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." - << std::endl; - - return 0; -} - -template -std::vector ds_transformer_forward(unsigned layer_id, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b, - bool training_mode, - bool prelayernorm, - bool attn_dropout_checkpoint, - bool normalize_invertible, - bool gelu_checkpoint) -{ - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = input.size(0); - - const T* input_ptr = (const T*)input.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* output_b_ptr = (const T*)output_b.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - auto output = torch::empty_like(input); - T* out_ptr = (T*)output.data_ptr(); - - auto options = torch::TensorOptions() - .dtype(input.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - - auto uint8_options = torch::TensorOptions() - .dtype(torch::kInt8) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(false); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (input.size(1) != seq_len) { - seq_len = input.size(1); - layer->SetSeqLength(seq_len); - } - - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); - auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); - auto attn_o_inp = torch::empty_like(input); - auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); - - auto attn_prob_dropout_mask = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); - auto attn_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - auto layer_output_dropout_mask = - torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); - - auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); - auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); - - T* inp_norm_ptr = (T*)inp_norm.data_ptr(); - T* add_res_ptr = (T*)add_res.data_ptr(); - T* q_tf_ptr = (T*)qkv_tf.data_ptr(); - T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); - T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); - T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); - - torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); - torch::Tensor gelu_inp = - (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); - auto ff1_inp = torch::empty_like(input); - T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); - T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); - T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); - - torch::Tensor soft_out = - torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); - torch::Tensor ctx_bufB = - (attn_dropout_checkpoint - ? soft_out - : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); - T* soft_out_ptr = (T*)soft_out.data_ptr(); - T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); - - layer->SetTrainingMode(training_mode); - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Forward(bsz, - input_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_qkvb_ptr, - attn_ow_ptr, - attn_ob_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - output_b_ptr, - norm_w_ptr, - norm_b_ptr, - out_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr); - - return {output, - inp_norm, - qkv_tf, - soft_out, - ctx_bufB, - attn_o_inp, - add_res, - ff1_inp, - gelu_inp, - ff2_inp, - attn_prob_dropout_mask, - attn_output_dropout_mask, - layer_output_dropout_mask, - attn_layer_norm_var, - attn_layer_norm_mean, - layer_norm_var, - layer_norm_mean}; -} - -template -std::vector ds_transformer_backward(unsigned layer_id, - const torch::Tensor& grad_output, - const torch::Tensor& output, - const torch::Tensor& inp_norm, - const torch::Tensor& qkv_tf, - const torch::Tensor& soft_out, - const torch::Tensor& ctx_bufB, - const torch::Tensor& attn_o_inp, - const torch::Tensor& add_res, - const torch::Tensor& ff1_inp, - const torch::Tensor& gelu_inp, - const torch::Tensor& ff2_inp, - const torch::Tensor& attn_prob_dropout_mask, - const torch::Tensor& attn_output_dropout_mask, - const torch::Tensor& layer_output_dropout_mask, - const torch::Tensor& attn_layer_norm_var, - const torch::Tensor& attn_layer_norm_mean, - const torch::Tensor& layer_norm_var, - const torch::Tensor& layer_norm_mean, - const torch::Tensor& input, - const torch::Tensor& input_mask, - const torch::Tensor& attn_qkvw, - const torch::Tensor& attn_qkvb, - const torch::Tensor& attn_ow, - const torch::Tensor& attn_ob, - const torch::Tensor& attn_nw, - const torch::Tensor& attn_nb, - const torch::Tensor& inter_w, - const torch::Tensor& inter_b, - const torch::Tensor& output_w, - const torch::Tensor& output_b, - const torch::Tensor& norm_w, - const torch::Tensor& norm_b) -{ - auto g_output = grad_output.contiguous(); - CHECK_INPUT(g_output); - CHECK_INPUT(output); - CHECK_INPUT(inp_norm); - CHECK_INPUT(qkv_tf); - CHECK_INPUT(add_res); - CHECK_INPUT(soft_out); - CHECK_INPUT(ctx_bufB); - CHECK_INPUT(attn_o_inp); - CHECK_INPUT(ff1_inp); - CHECK_INPUT(gelu_inp); - CHECK_INPUT(ff2_inp); - CHECK_INPUT(input); - CHECK_INPUT(input_mask); - CHECK_INPUT(attn_qkvw); - CHECK_INPUT(attn_qkvb); - CHECK_INPUT(attn_ow); - CHECK_INPUT(attn_ob); - CHECK_INPUT(attn_nw); - CHECK_INPUT(attn_nb); - CHECK_INPUT(inter_w); - CHECK_INPUT(inter_b); - CHECK_INPUT(output_w); - CHECK_INPUT(output_b); - CHECK_INPUT(norm_w); - CHECK_INPUT(norm_b); - - unsigned bsz = g_output.size(0); - - std::shared_ptr> layer = - std::static_pointer_cast>(s_transformer_layers[layer_id]); - - unsigned seq_len = layer->GetSeqLength(); - if (g_output.size(1) != seq_len) { - seq_len = g_output.size(1); - layer->SetSeqLength(seq_len); - } - auto options = torch::TensorOptions() - .dtype(g_output.options().dtype()) - .layout(torch::kStrided) - .device(torch::kCUDA) - .requires_grad(true); - auto workspace = torch::empty({get_workspace_size(bsz, - seq_len, - layer->GetHiddenSize(), - layer->GetIntermediateSize(), - layer->GetNumHeads(), - layer->IsTrainingMode(), - layer->GeluCheckpoint())}, - options); - Context::Instance().SetWorkSpace((T*)workspace.data_ptr()); - - auto grad_input = torch::empty_like(input); - auto grad_attn_qkvw = torch::empty_like(attn_qkvw); - auto grad_attn_qkvb = torch::empty_like(attn_qkvb); - auto grad_attn_ow = torch::empty_like(attn_ow); - auto grad_attn_ob = torch::empty_like(attn_ob); - auto grad_attn_nw = torch::empty_like(attn_nw); - auto grad_attn_nb = torch::empty_like(attn_nb); - auto grad_inter_w = torch::empty_like(inter_w); - auto grad_inter_b = torch::empty_like(inter_b); - auto grad_output_w = torch::empty_like(output_w); - auto grad_output_b = torch::empty_like(output_b); - auto grad_norm_w = torch::empty_like(norm_w); - auto grad_norm_b = torch::empty_like(norm_b); - - // inputs. - const T* grad_output_ptr = (const T*)g_output.data_ptr(); - const T* input_ptr = (const T*)input.data_ptr(); - const T* output_ptr = (const T*)output.data_ptr(); - const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); - const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); - const T* add_res_ptr = (const T*)add_res.data_ptr(); - const T* k_tf_ptr = - q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); - const T* v_tf_ptr = - k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); - const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); - const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); - const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); - const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); - const T* soft_out_ptr = (const T*)soft_out.data_ptr(); - const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); - const T* input_mask_ptr = (const T*)input_mask.data_ptr(); - const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); - const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); - const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); - const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); - const T* inter_w_ptr = (const T*)inter_w.data_ptr(); - const T* inter_b_ptr = (const T*)inter_b.data_ptr(); - const T* output_w_ptr = (const T*)output_w.data_ptr(); - const T* norm_w_ptr = (const T*)norm_w.data_ptr(); - const T* norm_b_ptr = (const T*)norm_b.data_ptr(); - - // outputs. - T* grad_input_ptr = (T*)grad_input.data_ptr(); - T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); - T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); - T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); - T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); - T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); - T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); - T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); - T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); - T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); - T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); - T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); - T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); - - layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), - (uint8_t*)attn_output_dropout_mask.data_ptr(), - (uint8_t*)layer_output_dropout_mask.data_ptr(), - (T*)attn_layer_norm_var.data_ptr(), - (T*)attn_layer_norm_mean.data_ptr(), - (T*)layer_norm_var.data_ptr(), - (T*)layer_norm_mean.data_ptr()); - - layer->Backward(bsz, - grad_output_ptr, - input_ptr, - output_ptr, - inp_norm_ptr, - q_tf_ptr, - k_tf_ptr, - v_tf_ptr, - soft_out_ptr, - ctx_bufB_ptr, - attn_o_inp_ptr, - add_res_ptr, - ff1_inp_ptr, - gelu_inp_ptr, - ff2_inp_ptr, - input_mask_ptr, - attn_qkvw_ptr, - attn_ow_ptr, - attn_nw_ptr, - attn_nb_ptr, - inter_w_ptr, - inter_b_ptr, - output_w_ptr, - norm_w_ptr, - norm_b_ptr, - - grad_input_ptr, - grad_attn_qkvw_ptr, - grad_attn_qkvb_ptr, - grad_attn_ow_ptr, - grad_attn_ob_ptr, - grad_attn_nw_ptr, - grad_attn_nb_ptr, - grad_inter_w_ptr, - grad_inter_b_ptr, - grad_output_w_ptr, - grad_output_b_ptr, - grad_norm_w_ptr, - grad_norm_b_ptr); - - return {grad_input, - grad_attn_qkvw, - grad_attn_qkvb, - grad_attn_ow, - grad_attn_ob, - grad_attn_nw, - grad_attn_nb, - grad_inter_w, - grad_inter_b, - grad_output_w, - grad_output_b, - grad_norm_w, - grad_norm_b}; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("forward_fp32", - &ds_transformer_forward, - "DeepSpeed Transformer forward with fp32 (CUDA)"); - m.def("forward_fp16", - &ds_transformer_forward<__half>, - "DeepSpeed Transformer forward with fp16 (CUDA)"); - m.def("backward_fp32", - &ds_transformer_backward, - "DeepSpeed Transformer backward with fp32 (CUDA)"); - m.def("backward_fp16", - &ds_transformer_backward<__half>, - "DeepSpeed Transformer backward with fp16 (CUDA)"); - m.def("create_transformer_layer_fp32", - &create_transformer_layer, - "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); - m.def("create_transformer_layer_fp16", - &create_transformer_layer<__half>, - "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer_bak/gelu_kernels.cu b/deepspeed/ops/csrc/transformer_bak/gelu_kernels.cu deleted file mode 100644 index d683cf0..0000000 --- a/deepspeed/ops/csrc/transformer_bak/gelu_kernels.cu +++ /dev/null @@ -1,330 +0,0 @@ -#include "custom_cuda_layers.h" - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -inline __device__ float d_gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - - float x2mul = x * x * mul_param; - float tan_h = tanhf(sqrt_param * (x + x * x2mul)); - float dg1 = 0.5f * (1.0f + tan_h); - float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); - float dg3 = dg2 * 3 * x2mul; - return (dg1 + dg2 + dg3); -} - -/* -Fused bias add with GELU - -Loads a vector of 4 elements each iteration, for stride -iterations. It was written with the intention to launch 256 thread -threadblocks, so to launch for bert-large, we would set ITERATIONS -to 4. This is currently done automatically as a heuristic, setting -the number of iterations as blocks of 1024. - -For FP16, the values are loaded from memory as __half, but converted -to FP32 for the arithmetic itself, to prevent numerous overflow on -the intermediate hyperbolic tangent, since there's no intrinsic -that computes it directly. -*/ - -__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void fused_bias_gelu(const float* input, - const float* bias, - float* vals, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void fused_bias_gelu(const __half* input, - const __half* bias, - __half* vals, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - const float2* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void d_gelu_func(float* d_output, - const float* gelu_input, - const float* bias, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float4* d_output_cast = reinterpret_cast(d_output); - const float4* gelu_input_cast = reinterpret_cast(gelu_input); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - gelu_input_data.x += bias_data.x; - gelu_input_data.y += bias_data.y; - gelu_input_data.z += bias_data.z; - gelu_input_data.w += bias_data.w; - - output_data.x *= d_gelu(gelu_input_data.x); - output_data.y *= d_gelu(gelu_input_data.y); - output_data.z *= d_gelu(gelu_input_data.z); - output_data.w *= d_gelu(gelu_input_data.w); - - d_output_cast[row * row_stride + i * loop_stride + id] = output_data; - } - } -} - -__global__ void d_gelu_func(__half* d_output, - const __half* gelu_input, - const __half* bias, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float2* d_output_cast = reinterpret_cast(d_output); - const float2* gelu_input_cast = reinterpret_cast(gelu_input); - const float2* bias_cast = reinterpret_cast(bias); - -#pragma unroll - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); - __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 output_half_0 = __half22float2(output_data_half[0]); - float2 output_half_1 = __half22float2(output_data_half[1]); - - float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); - float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); - - float2 bias_half_0 = __half22float2(bias_half[0]); - float2 bias_half_1 = __half22float2(bias_half[1]); - - gelu_input_half_0.x += bias_half_0.x; - gelu_input_half_0.y += bias_half_0.y; - gelu_input_half_1.x += bias_half_1.x; - gelu_input_half_1.y += bias_half_1.y; - - output_half_0.x *= d_gelu(gelu_input_half_0.x); - output_half_0.y *= d_gelu(gelu_input_half_0.y); - output_half_1.x *= d_gelu(gelu_input_half_1.x); - output_half_1.y *= d_gelu(gelu_input_half_1.y); - - float2 result; - __half2* result_half2 = reinterpret_cast<__half2*>(&result); - - result_half2[0] = __float22half2_rn(output_half_0); - result_half2[1] = __float22half2_rn(output_half_1); - - d_output_cast[row * row_stride + i * loop_stride + id] = result; - } - } -#endif -} - -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - fused_bias_gelu<<>>( - input, bias, output, intermediate_size / 4, iterations); -} - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - gelu_kernel<<>>( - input, output, intermediate_size / 4, iterations); -} - -template void launch_bias_gelu(const float*, const float*, float*, int, int, cudaStream_t); -template void launch_bias_gelu<__half>(const __half*, - const __half*, - __half*, - int, - int, - cudaStream_t); - -template void launch_gelu(const float*, float*, int, int, cudaStream_t); -template void launch_gelu<__half>(const __half*, __half*, int, int, cudaStream_t); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - d_gelu_func<<>>( - d_output, input, bias, intermediate_size / 4, iterations); -} - -template void launch_d_gelu(float*, const float*, const float*, int, int, cudaStream_t); -template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, cudaStream_t); diff --git a/deepspeed/ops/csrc/transformer_bak/gelu_kernels.hip b/deepspeed/ops/csrc/transformer_bak/gelu_kernels.hip deleted file mode 100644 index f7e7a7f..0000000 --- a/deepspeed/ops/csrc/transformer_bak/gelu_kernels.hip +++ /dev/null @@ -1,332 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -inline __device__ float d_gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - - float x2mul = x * x * mul_param; - float tan_h = tanhf(sqrt_param * (x + x * x2mul)); - float dg1 = 0.5f * (1.0f + tan_h); - float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); - float dg3 = dg2 * 3 * x2mul; - return (dg1 + dg2 + dg3); -} - -/* -Fused bias add with GELU - -Loads a vector of 4 elements each iteration, for stride -iterations. It was written with the intention to launch 256 thread -threadblocks, so to launch for bert-large, we would set ITERATIONS -to 4. This is currently done automatically as a heuristic, setting -the number of iterations as blocks of 1024. - -For FP16, the values are loaded from memory as __half, but converted -to FP32 for the arithmetic itself, to prevent numerous overflow on -the intermediate hyperbolic tangent, since there's no intrinsic -that computes it directly. -*/ - -__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void fused_bias_gelu(const float* input, - const float* bias, - float* vals, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float4* input_cast = reinterpret_cast(input); - float4* vals_cast = reinterpret_cast(vals); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 data = input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - vals_cast[row * row_stride + i * loop_stride + id] = data; - } - } -} - -__global__ void fused_bias_gelu(const __half* input, - const __half* bias, - __half* vals, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - const float2* input_cast = reinterpret_cast(input); - float2* vals_cast = reinterpret_cast(vals); - const float2* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; - } - } -#endif -} - -__global__ void d_gelu_func(float* d_output, - const float* gelu_input, - const float* bias, - int row_stride, - int iterations) -{ - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float4* d_output_cast = reinterpret_cast(d_output); - const float4* gelu_input_cast = reinterpret_cast(gelu_input); - const float4* bias_cast = reinterpret_cast(bias); - - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float4 bias_data = bias_cast[i * loop_stride + id]; - - gelu_input_data.x += bias_data.x; - gelu_input_data.y += bias_data.y; - gelu_input_data.z += bias_data.z; - gelu_input_data.w += bias_data.w; - - output_data.x *= d_gelu(gelu_input_data.x); - output_data.y *= d_gelu(gelu_input_data.y); - output_data.z *= d_gelu(gelu_input_data.z); - output_data.w *= d_gelu(gelu_input_data.w); - - d_output_cast[row * row_stride + i * loop_stride + id] = output_data; - } - } -} - -__global__ void d_gelu_func(__half* d_output, - const __half* gelu_input, - const __half* bias, - int row_stride, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - int row = blockIdx.x; - int id = threadIdx.x; - int loop_stride = blockDim.x; - - float2* d_output_cast = reinterpret_cast(d_output); - const float2* gelu_input_cast = reinterpret_cast(gelu_input); - const float2* bias_cast = reinterpret_cast(bias); - -#pragma unroll - for (int i = 0; i < iterations; i++) { - if (i * loop_stride + id < row_stride) { - float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; - float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; - float2 bias_vec = bias_cast[i * loop_stride + id]; - - __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); - __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 output_half_0 = __half22float2(output_data_half[0]); - float2 output_half_1 = __half22float2(output_data_half[1]); - - float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); - float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); - - float2 bias_half_0 = __half22float2(bias_half[0]); - float2 bias_half_1 = __half22float2(bias_half[1]); - - gelu_input_half_0.x += bias_half_0.x; - gelu_input_half_0.y += bias_half_0.y; - gelu_input_half_1.x += bias_half_1.x; - gelu_input_half_1.y += bias_half_1.y; - - output_half_0.x *= d_gelu(gelu_input_half_0.x); - output_half_0.y *= d_gelu(gelu_input_half_0.y); - output_half_1.x *= d_gelu(gelu_input_half_1.x); - output_half_1.y *= d_gelu(gelu_input_half_1.y); - - float2 result; - __half2* result_half2 = reinterpret_cast<__half2*>(&result); - - result_half2[0] = __float22half2_rn(output_half_0); - result_half2[1] = __float22half2_rn(output_half_1); - - d_output_cast[row * row_stride + i * loop_stride + id] = result; - } - } -#endif -} - -template -void launch_bias_gelu(const T* input, - const T* bias, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, - input, bias, output, intermediate_size / 4, iterations); -} - -template -void launch_gelu(const T* input, - T* output, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( gelu_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, intermediate_size / 4, iterations); -} - -template void launch_bias_gelu(const float*, const float*, float*, int, int, hipStream_t); -template void launch_bias_gelu<__half>(const __half*, - const __half*, - __half*, - int, - int, - hipStream_t); - -template void launch_gelu(const float*, float*, int, int, hipStream_t); -template void launch_gelu<__half>(const __half*, __half*, int, int, hipStream_t); - -template -void launch_d_gelu(T* d_output, - const T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int iterations = (intermediate_size + 1023) / 1024; - int threads = (intermediate_size - 1) / (iterations * 4) + 1; - dim3 block_dims(threads); - dim3 grid_dims(batch_size); - - hipLaunchKernelGGL(( d_gelu_func), dim3(grid_dims), dim3(block_dims), 0, stream, - d_output, input, bias, intermediate_size / 4, iterations); -} - -template void launch_d_gelu(float*, const float*, const float*, int, int, hipStream_t); -template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, hipStream_t); diff --git a/deepspeed/ops/csrc/transformer_bak/general_kernels.cu b/deepspeed/ops/csrc/transformer_bak/general_kernels.cu deleted file mode 100644 index 1eaa94e..0000000 --- a/deepspeed/ops/csrc/transformer_bak/general_kernels.cu +++ /dev/null @@ -1,411 +0,0 @@ -#include "general_kernels.h" - -namespace cg = cooperative_groups; - -template -__global__ void column_sum_reduce(const T* __restrict__ inp, - T* __restrict__ out, - int rows, - int width) -{ - __shared__ float tile[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - - int y_stride = width * TILE_DIM; - - float localSum = 0; - - // Loop across matrix height - if (idx < width) { - int offset = threadIdx.y * width + idx; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - localSum += (float)inp[offset]; - offset += y_stride; - } - } - - tile[threadIdx.x][threadIdx.y] = localSum; - - __syncthreads(); - - // Sum the shared buffer. - float sum = tile[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - if (pos < width) out[pos] = sum; - } -} - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - cudaStream_t stream); - -template <> -void launch_fuse_transpose_bias_kernel(const float* inp, - float* out, - int rows, - int cols, - cudaStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - column_sum_reduce<<>>(inp, out, rows, cols); -} - -template <> -void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, - __half* out, - int rows, - int cols, - cudaStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - column_sum_reduce<__half><<>>(inp, out, rows, cols); -} - -__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) -{ - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - float4* out_4 = reinterpret_cast(out); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 val; - float4 inp1_reg = inp1_4[j]; - float4 inp2_reg = inp2_4[j]; - - val.x = inp1_reg.x + inp2_reg.x; - val.y = inp1_reg.y + inp2_reg.y; - val.z = inp1_reg.z + inp2_reg.z; - val.w = inp1_reg.w + inp2_reg.w; - - out_4[j] = val; - } -} - -__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) -{ - float2 inp1_4; - float2 inp2_4; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - - CUDA_1D_KERNEL_LOOP(j, N) - { - inp1_4 = inp1_arr[j]; - inp2_4 = inp2_arr[j]; - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - inp1_h_f_0.x += inp2_h_f_0.x; - inp1_h_f_0.y += inp2_h_f_0.y; - inp1_h_f_1.x += inp2_h_f_1.x; - inp1_h_f_1.y += inp2_h_f_1.y; - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[j] = val_f; - } -} - -template <> -void launch_fused_add2(float* out, - const float* inp1, - const float* inp2, - int batch_size, - int seq_length, - int hidden_dim, - cudaStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - fused_add2_kernel<<>>(total_count, out, inp1, inp2); -} - -template <> -void launch_fused_add2<__half>(__half* out, - const __half* inp1, - const __half* inp2, - int batch_size, - int seq_length, - int hidden_dim, - cudaStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - fused_add2_kernel<<>>(total_count, out, inp1, inp2); -} - -__global__ void fused_add3_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add3_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add3(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add3_kernel<<>>( - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add3<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add3_kernel<<>>( - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -__global__ void fused_add4_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - const float4* inp4_4 = reinterpret_cast(inp4); - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - float4 inp4_reg = inp4_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add4_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - const float2* inp4_arr = reinterpret_cast(inp4); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - float2 inp4_4 = inp4_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - float2 inp4_h_f_0 = __half22float2(inp4_h[0]); - float2 inp4_h_f_1 = __half22float2(inp4_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add4(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add4_kernel<<>>( - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add4<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int batch_size, - int seq_length, - int hidden_size, - cudaStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - fused_add4_kernel<<>>( - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} diff --git a/deepspeed/ops/csrc/transformer_bak/general_kernels.hip b/deepspeed/ops/csrc/transformer_bak/general_kernels.hip deleted file mode 100644 index 5be2fc2..0000000 --- a/deepspeed/ops/csrc/transformer_bak/general_kernels.hip +++ /dev/null @@ -1,413 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "general_kernels_hip.h" - -namespace cg = cooperative_groups; - -template -__global__ void column_sum_reduce(const T* __restrict__ inp, - T* __restrict__ out, - int rows, - int width) -{ - __shared__ float tile[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - - int y_stride = width * TILE_DIM; - - float localSum = 0; - - // Loop across matrix height - if (idx < width) { - int offset = threadIdx.y * width + idx; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - localSum += (float)inp[offset]; - offset += y_stride; - } - } - - tile[threadIdx.x][threadIdx.y] = localSum; - - __syncthreads(); - - // Sum the shared buffer. - float sum = tile[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - if (pos < width) out[pos] = sum; - } -} - -template -void launch_fuse_transpose_bias_kernel(const T* inp, - T* out, - int rows, - int cols, - hipStream_t stream); - -template <> -void launch_fuse_transpose_bias_kernel(const float* inp, - float* out, - int rows, - int cols, - hipStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( column_sum_reduce), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); -} - -template <> -void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, - __half* out, - int rows, - int cols, - hipStream_t stream) -{ - // assert(rows % TILE_DIM == 0); - // assert(cols % TILE_DIM == 0); - - dim3 grid_dim((cols - 1) / TILE_DIM + 1); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( column_sum_reduce<__half>), dim3(grid_dim), dim3(block_dim), 0, stream, inp, out, rows, cols); -} - -__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) -{ - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - float4* out_4 = reinterpret_cast(out); - - CUDA_1D_KERNEL_LOOP(j, N) - { - float4 val; - float4 inp1_reg = inp1_4[j]; - float4 inp2_reg = inp2_4[j]; - - val.x = inp1_reg.x + inp2_reg.x; - val.y = inp1_reg.y + inp2_reg.y; - val.z = inp1_reg.z + inp2_reg.z; - val.w = inp1_reg.w + inp2_reg.w; - - out_4[j] = val; - } -} - -__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) -{ - float2 inp1_4; - float2 inp2_4; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - - CUDA_1D_KERNEL_LOOP(j, N) - { - inp1_4 = inp1_arr[j]; - inp2_4 = inp2_arr[j]; - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - inp1_h_f_0.x += inp2_h_f_0.x; - inp1_h_f_0.y += inp2_h_f_0.y; - inp1_h_f_1.x += inp2_h_f_1.x; - inp1_h_f_1.y += inp2_h_f_1.y; - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[j] = val_f; - } -} - -template <> -void launch_fused_add2(float* out, - const float* inp1, - const float* inp2, - int batch_size, - int seq_length, - int hidden_dim, - hipStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); -} - -template <> -void launch_fused_add2<__half>(__half* out, - const __half* inp1, - const __half* inp2, - int batch_size, - int seq_length, - int hidden_dim, - hipStream_t& stream) -{ - int total_count = batch_size * seq_length * hidden_dim / 4; - dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); - - dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); - - hipLaunchKernelGGL(( fused_add2_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, total_count, out, inp1, inp2); -} - -__global__ void fused_add3_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add3_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add3(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add3<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add3_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -__global__ void fused_add4_kernel(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - - const float4* inp1_4 = reinterpret_cast(inp1); - const float4* inp2_4 = reinterpret_cast(inp2); - const float4* inp3_4 = reinterpret_cast(inp3); - const float4* inp4_4 = reinterpret_cast(inp4); - float4* out_4 = reinterpret_cast(out); - - float4 val; - float4 inp1_reg = inp1_4[row * row_stride + id]; - float4 inp2_reg = inp2_4[row * row_stride + id]; - float4 inp3_reg = inp3_4[row * row_stride + id]; - float4 inp4_reg = inp4_4[row * row_stride + id]; - - val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; - val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; - val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; - val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; - - out_4[row * row_stride + id] = val; -} - -__global__ void fused_add4_kernel(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int size, - int row_stride) -{ - int row = blockIdx.x; - int id = threadIdx.x; - const float2* inp1_arr = reinterpret_cast(inp1); - const float2* inp2_arr = reinterpret_cast(inp2); - const float2* inp3_arr = reinterpret_cast(inp3); - const float2* inp4_arr = reinterpret_cast(inp4); - - float2 inp1_4 = inp1_arr[row * row_stride + id]; - float2 inp2_4 = inp2_arr[row * row_stride + id]; - float2 inp3_4 = inp3_arr[row * row_stride + id]; - float2 inp4_4 = inp4_arr[row * row_stride + id]; - - __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); - __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); - __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); - __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); - - float2 inp1_h_f_0 = __half22float2(inp1_h[0]); - float2 inp1_h_f_1 = __half22float2(inp1_h[1]); - - float2 inp2_h_f_0 = __half22float2(inp2_h[0]); - float2 inp2_h_f_1 = __half22float2(inp2_h[1]); - - float2 inp3_h_f_0 = __half22float2(inp3_h[0]); - float2 inp3_h_f_1 = __half22float2(inp3_h[1]); - - float2 inp4_h_f_0 = __half22float2(inp4_h[0]); - float2 inp4_h_f_1 = __half22float2(inp4_h[1]); - - inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); - inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); - inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); - inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); - - float2 val_f; - __half2* val_h = reinterpret_cast<__half2*>(&val_f); - - val_h[0] = __float22half2_rn(inp1_h_f_0); - val_h[1] = __float22half2_rn(inp1_h_f_1); - - float2* out_4 = reinterpret_cast(out); - out_4[row * row_stride + id] = val_f; -} - -template <> -void launch_fused_add4(float* out, - const float* inp1, - const float* inp2, - const float* inp3, - const float* inp4, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} - -template <> -void launch_fused_add4<__half>(__half* out, - const __half* inp1, - const __half* inp2, - const __half* inp3, - const __half* inp4, - int batch_size, - int seq_length, - int hidden_size, - hipStream_t& stream) -{ - dim3 grid_dim(batch_size * seq_length); - - dim3 block_dim(hidden_size / 4); - - hipLaunchKernelGGL(( fused_add4_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, - out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.cu b/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.cu deleted file mode 100644 index 524a63a..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.cu +++ /dev/null @@ -1,372 +0,0 @@ -#include "custom_cuda_layers.h" - -//#include - -namespace cg = cooperative_groups; - -__global__ void apply_rotary_pos_emb(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} -__global__ void apply_rotary_pos_emb1(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} -__global__ void apply_rotary_pos_emb1(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - constexpr unsigned mask[32] = { - 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, - 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000, - 0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, - 0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, - 0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000, - 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, - 0x40000000, 0x80000000}; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - unsigned half_dim = rotary_dim >> 1; - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim) - : __shfl_sync(mask[lane], q_rot, lane - half_dim); - auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim) - : __shfl_sync(mask[lane], k_rot, lane - half_dim); - q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - cudaStream_t stream) -{ - int total_count = batch * num_heads * seq_len; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - if (rotate_every_two) - apply_rotary_pos_emb<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); - else if (rotate_half) - apply_rotary_pos_emb1<<>>( - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, - float*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - cudaStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, - __half*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - cudaStream_t); -/* -__global__ void apply_rotary_pos_emb(float* mixed_query, -float* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; - -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = mixed_query[offset + lane]; -float k = key_layer[offset + lane]; -float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -q_rot = g.shfl_xor(q_rot, 1); -k_rot = g.shfl_xor(k_rot, 1); -q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); -k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - -mixed_query[offset + lane] = q; -key_layer[offset + lane] = k; - -lane += WARP_SIZE; -} -} -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, -__half* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; -constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, -0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, -0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000, -0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8, -0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, -0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, -0x1000000, 0x2000000, 0x4000000, 0x8000000, -0x10000000, 0x20000000, 0x40000000, 0x80000000}; -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = (float)mixed_query[offset + lane]; -float k = (float)key_layer[offset + lane]; -float rotary_sign = (lane > 11 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane], -q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], -k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q * -cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - -mixed_query[offset + lane] = (__half)q; -key_layer[offset + lane] = (__half)k; - -lane += WARP_SIZE; -} -} -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, -T* key_layer, -unsigned head_size, -unsigned seq_len, -unsigned rotary_dim, -unsigned offset, -unsigned num_heads, -unsigned batch, -cudaStream_t stream) -{ -int total_count = batch * num_heads * seq_len; -dim3 block_dims(1024); -dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - -apply_rotary_pos_emb<<>>( -mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, -float*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -cudaStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, -__half*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -cudaStream_t); -*/ diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.hip b/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.hip deleted file mode 100644 index 4d70a0a..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/apply_rotary_pos_emb.hip +++ /dev/null @@ -1,374 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -//#include - -namespace cg = cooperative_groups; - -__global__ void apply_rotary_pos_emb(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} -__global__ void apply_rotary_pos_emb1(float* mixed_query, - float* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = mixed_query[offset + lane]; - float k = key_layer[offset + lane]; - float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - q_rot = g.shfl_xor(q_rot, 1); - k_rot = g.shfl_xor(k_rot, 1); - q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - - mixed_query[offset + lane] = q; - key_layer[offset + lane] = k; - - lane += WARP_SIZE; - } - } -} -__global__ void apply_rotary_pos_emb1(__half* mixed_query, - __half* key_layer, - unsigned rotary_dim, - unsigned seq_len, - unsigned seq_offset, - unsigned num_heads, - unsigned head_size, - unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int id = threadIdx.x; - int gid = id >> 5; - int lane = id & 0x1f; - - unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; - unsigned offset = head_id * head_size; - - constexpr unsigned mask[32] = { - 0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, 0x10 | 0x10000, - 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, 0x100 | 0x100000, 0x200 | 0x200000, - 0x400 | 0x400000, 0x800 | 0x800000, 0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, - 0x8000 | 0x8, 0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, - 0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, 0x1000000, - 0x2000000, 0x4000000, 0x8000000, 0x10000000, 0x20000000, - 0x40000000, 0x80000000}; - - unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - unsigned half_dim = rotary_dim >> 1; - if (head_id < total_count) { - while (lane < rotary_dim) { - float inv_freq = (float)((lane % half_dim) * 2) / (float)rotary_dim; - inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; - float q = (float)mixed_query[offset + lane]; - float k = (float)key_layer[offset + lane]; - float rotary_sign = (lane > (half_dim - 1) ? -1.0 : 1.0); - float q_rot = (q * rotary_sign); - float k_rot = (k * rotary_sign); - auto q_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], q_rot, lane + half_dim) - : __shfl_sync(mask[lane], q_rot, lane - half_dim); - auto k_rot_tmp = lane < half_dim ? __shfl_sync(mask[lane], k_rot, lane + half_dim) - : __shfl_sync(mask[lane], k_rot, lane - half_dim); - q = q * cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); - k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - - mixed_query[offset + lane] = (__half)q; - key_layer[offset + lane] = (__half)k; - - lane += WARP_SIZE; - } - } -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - hipStream_t stream) -{ - int total_count = batch * num_heads * seq_len; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); - if (rotate_every_two) - hipLaunchKernelGGL(( apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); - else if (rotate_half) - hipLaunchKernelGGL(( apply_rotary_pos_emb1), dim3(grid_dims), dim3(block_dims), 0, stream, - mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, - float*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - hipStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, - __half*, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - unsigned, - bool, - bool, - hipStream_t); -/* -__global__ void apply_rotary_pos_emb(float* mixed_query, -float* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; - -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = mixed_query[offset + lane]; -float k = key_layer[offset + lane]; -float rotary_sign = (lane % 2 == 1 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -q_rot = g.shfl_xor(q_rot, 1); -k_rot = g.shfl_xor(k_rot, 1); -q = q * cosf(inv_freq) + q_rot * sinf(inv_freq); -k = k * cosf(inv_freq) + k_rot * sinf(inv_freq); - -mixed_query[offset + lane] = q; -key_layer[offset + lane] = k; - -lane += WARP_SIZE; -} -} -} - -__global__ void apply_rotary_pos_emb(__half* mixed_query, -__half* key_layer, -unsigned rotary_dim, -unsigned seq_len, -unsigned seq_offset, -unsigned num_heads, -unsigned head_size, -unsigned total_count) -{ -#if __CUDA_ARCH__ >= 700 -cg::thread_block b = cg::this_thread_block(); -cg::thread_block_tile g = cg::tiled_partition(b); - -int id = threadIdx.x; -int gid = id >> 5; -int lane = id & 0x1f; - -unsigned head_id = blockIdx.x * MAX_WARP_NUM + gid; -unsigned offset = head_id * head_size; -constexpr unsigned mask[32] = {0x1 | 0x1000, 0x2 | 0x2000, 0x4 | 0x4000, 0x8 | 0x8000, -0x10 | 0x10000, 0x20 | 0x20000, 0x40 | 0x40000, 0x80 | 0x80000, -0x100 | 0x100000, 0x200 | 0x200000, 0x400 | 0x400000, 0x800 | 0x800000, -0x1000 | 0x1, 0x2000 | 0x2, 0x4000 | 0x4, 0x8000 | 0x8, -0x10000 | 0x10, 0x20000 | 0x20, 0x40000 | 0x40, 0x80000 | 0x80, -0x100000 | 0x100, 0x200000 | 0x200, 0x400000 | 0x400, 0x800000 | 0x800, -0x1000000, 0x2000000, 0x4000000, 0x8000000, -0x10000000, 0x20000000, 0x40000000, 0x80000000}; -unsigned seq_id = (head_id / num_heads) % seq_len + seq_offset; - -if (head_id < total_count) { -while (lane < rotary_dim) { -//float inv_freq = (float)((lane / 2) * 2) / (float)rotary_dim; -float inv_freq = (float)((lane % (rotary_dim >> 1)) * 2) / (float)rotary_dim; -inv_freq = 1.0 / powf(10000.0, inv_freq) * (float)seq_id; -float q = (float)mixed_query[offset + lane]; -float k = (float)key_layer[offset + lane]; -float rotary_sign = (lane > 11 ? -1.0 : 1.0); -float q_rot = (q * rotary_sign); -float k_rot = (k * rotary_sign); -auto q_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], q_rot, lane + 12) : __shfl_sync(mask[lane], -q_rot, lane - 12);//g.shfl_xor(q_rot, 12); auto k_rot_tmp = lane < 12 ? __shfl_sync(mask[lane], -k_rot, lane + 12) : __shfl_sync(mask[lane], k_rot, lane - 12);//g.shfl_xor(k_rot, 12); q = q * -cosf(inv_freq) + q_rot_tmp * sinf(inv_freq); k = k * cosf(inv_freq) + k_rot_tmp * sinf(inv_freq); - -mixed_query[offset + lane] = (__half)q; -key_layer[offset + lane] = (__half)k; - -lane += WARP_SIZE; -} -} -#endif -} - -template -void launch_apply_rotary_pos_emb(T* mixed_query, -T* key_layer, -unsigned head_size, -unsigned seq_len, -unsigned rotary_dim, -unsigned offset, -unsigned num_heads, -unsigned batch, -hipStream_t stream) -{ -int total_count = batch * num_heads * seq_len; -dim3 block_dims(1024); -dim3 grid_dims((total_count - 1) / MAX_WARP_NUM + 1); // (batch_size); -hipLaunchKernelGGL(( -apply_rotary_pos_emb), dim3(grid_dims), dim3(block_dims), 0, stream, -mixed_query, key_layer, rotary_dim, seq_len, offset, num_heads, head_size, total_count); -} - -template void launch_apply_rotary_pos_emb(float*, -float*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -hipStream_t); -template void launch_apply_rotary_pos_emb<__half>(__half*, -__half*, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -unsigned, -hipStream_t); -*/ diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.cu b/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.cu deleted file mode 100644 index 4ddaabd..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.cu +++ /dev/null @@ -1,110 +0,0 @@ -#include "custom_cuda_layers.h" - -#define MAX_QUANTIZE_GROUPING 1024 - -#define loop_unroll 1 -#define loop_unroll_bits 1 - -__global__ void dequantize_kernel(float* output, - const int8_t* input, - const float* qscale, - int output_size, - int hidden_dim, - int groups, - int merge_count) -{ - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = (scale_data * (float)q); - tid += blockDim.x; - } -} - -__global__ void dequantize_kernel(__half* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count) -{ -#ifdef HALF_PRECISION_AVAILABLE - - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = __float2half(scale_data * (float)q); - tid += blockDim.x; - } -#endif -} - -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - cudaStream_t stream) -{ - unsigned threads = 1024; - dim3 block_dims(threads); - dim3 grid_dims(hidden_dim); - - dequantize_kernel<<>>( - output, input, qscale, output_size, hidden_dim, groups, merge_count); -} - -template void launch_dequantize(float*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - cudaStream_t); -template void launch_dequantize<__half>(__half*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - cudaStream_t); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.hip b/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.hip deleted file mode 100644 index 7c22e30..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/dequantize.hip +++ /dev/null @@ -1,112 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#define MAX_QUANTIZE_GROUPING 1024 - -#define loop_unroll 1 -#define loop_unroll_bits 1 - -__global__ void dequantize_kernel(float* output, - const int8_t* input, - const float* qscale, - int output_size, - int hidden_dim, - int groups, - int merge_count) -{ - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = (scale_data * (float)q); - tid += blockDim.x; - } -} - -__global__ void dequantize_kernel(__half* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count) -{ -#ifdef HALF_PRECISION_AVAILABLE - - unsigned merge_hidden = hidden_dim >> merge_count; - unsigned quantization_stride = (merge_hidden * output_size) / groups; - - unsigned bid = blockIdx.x; - unsigned tid = threadIdx.x; - - while (tid < output_size) { - unsigned w_index = bid / merge_hidden; - unsigned q_index = tid + bid * output_size; - - auto q = input[q_index]; - - unsigned merge_hidden_total = w_index * merge_hidden; - unsigned scale_index = - ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) - << merge_count) + - w_index; - - float scale_data = qscale[scale_index]; - - output[q_index] = __float2half(scale_data * (float)q); - tid += blockDim.x; - } -#endif -} - -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - hipStream_t stream) -{ - unsigned threads = 1024; - dim3 block_dims(threads); - dim3 grid_dims(hidden_dim); - - hipLaunchKernelGGL(( dequantize_kernel), dim3(grid_dims), dim3(block_dims), 0, stream, - output, input, qscale, output_size, hidden_dim, groups, merge_count); -} - -template void launch_dequantize(float*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - hipStream_t); -template void launch_dequantize<__half>(__half*, - const int8_t*, - const float*, - unsigned, - unsigned, - unsigned, - unsigned, - hipStream_t); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.cu b/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.cu deleted file mode 100644 index 70bbf42..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.cu +++ /dev/null @@ -1,525 +0,0 @@ -#include "custom_cuda_layers.h" - -#define MAX_CAP 4 -#define MAX_SEQ 2048 - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream) -{ - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); - - fused_bias_gelu<<>>( - input, bias, total_count, intermediate_size / 4); -} - -template void launch_bias_gelu(float*, const float*, int, int, cudaStream_t); -template void launch_bias_gelu<__half>(__half*, const __half*, int, int, cudaStream_t); - -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream) -{ - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); - - fused_bias_add<<>>(input, bias, total_count, hidden_size / 4); -} - -template void launch_bias_add(float*, const float*, int, int, cudaStream_t); -template void launch_bias_add<__half>(__half*, const __half*, int, int, cudaStream_t); - -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - cudaStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - fused_bias_residual<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, cudaStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - cudaStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int hidden_dim, - int batch, - int mp_size, - cudaStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - gptj_residual_add<<>>( - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void launch_gptj_residual_add(float*, - float*, - float*, - float*, - float*, - int, - int, - int, - cudaStream_t); -template void launch_gptj_residual_add<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - cudaStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; - } -} - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream) -{ - dim3 grid_dim(seq_len); - dim3 block_dim(1024); - moe_res_matmul<<>>( - residual, coef, mlp_out, seq_len, hidden_dim / 4); -} - -template void launch_moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); -template void launch_moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.hip b/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.hip deleted file mode 100644 index 00c03ef..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/gelu.hip +++ /dev/null @@ -1,527 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -#define MAX_CAP 4 -#define MAX_SEQ 2048 - -inline __device__ float gelu(const float x) -{ - const float sqrt_param = 0.79788456080286535587989211986876f; - const float mul_param = 0.044715; - return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); -} - -__global__ void fused_bias_gelu(float* input, - const float* bias, - int total_count, - int intermediate_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - data.x = gelu(data.x); - data.y = gelu(data.y); - data.z = gelu(data.z); - data.w = gelu(data.w); - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_gelu(__half* input, - const __half* bias, - int total_count, - int intermediate_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - low_data.x = gelu(low_data.x); - low_data.y = gelu(low_data.y); - high_data.x = gelu(high_data.x); - high_data.y = gelu(high_data.y); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream) -{ - int total_count = batch_size * (intermediate_size / 4); - int threads = 1024; // intermediate_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / 1024 + 1)); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_gelu), dim3(grid_dims), dim3(block_dims), 0, stream, - input, bias, total_count, intermediate_size / 4); -} - -template void launch_bias_gelu(float*, const float*, int, int, hipStream_t); -template void launch_bias_gelu<__half>(__half*, const __half*, int, int, hipStream_t); - -__global__ void fused_bias_add(float* input, const float* bias, int total_count, int hidden_size) -{ - float4* input_cast = reinterpret_cast(input); - const float4* bias_cast = reinterpret_cast(bias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 bias_data = bias_cast[offset % hidden_size]; - - data.x += bias_data.x; - data.y += bias_data.y; - data.z += bias_data.z; - data.w += bias_data.w; - - input_cast[offset] = data; - } -} - -__global__ void fused_bias_add(__half* input, const __half* bias, int total_count, int hidden_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - const float2* bias_cast = reinterpret_cast(bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 bias_vec = bias_cast[offset % hidden_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - low_data.x += low_bias.x; - low_data.y += low_bias.y; - high_data.x += high_bias.x; - high_data.y += high_bias.y; - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - input_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream) -{ - int total_count = batch_size * (hidden_size / 4); - int threads = 1024; // hidden_size / iterations / 4; - dim3 block_dims(threads); - dim3 grid_dims(((total_count - 1) / threads + 1)); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_add), dim3(grid_dims), dim3(block_dims), 0, stream, input, bias, total_count, hidden_size / 4); -} - -template void launch_bias_add(float*, const float*, int, int, hipStream_t); -template void launch_bias_add<__half>(__half*, const __half*, int, int, hipStream_t); - -__global__ void fused_bias_residual(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - int mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = (data.x + res_vec.x) * mp_size + (out.x + bias_data.x + attn_bias.x); - data.y = (data.y + res_vec.y) * mp_size + (out.y + bias_data.y + attn_bias.y); - data.z = (data.z + res_vec.z) * mp_size + (out.z + bias_data.z + attn_bias.z); - data.w = (data.w + res_vec.w) * mp_size + (out.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void fused_bias_residual(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - int mp_size) -{ -#ifdef HALF_PRECISION_AVAILABLE - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - (low_data.x + low_res.x) * mp_size + (low_out.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - (low_data.y + low_res.y) * mp_size + (low_out.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - (high_data.x + high_res.x) * mp_size + (high_out.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - (high_data.y + high_res.y) * mp_size + (high_out.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - hipStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - hipLaunchKernelGGL(( fused_bias_residual), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void -launch_bias_residual(float*, float*, float*, float*, float*, int, int, int, hipStream_t); -template void launch_bias_residual<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - hipStream_t); - -__global__ void gptj_residual_add(float* input, - float* output, - float* attn, - float* bias, - float* attnbias, - int total_count, - int intermediate_size, - float mp_size) -{ - float4* input_cast = reinterpret_cast(input); - float4* output_cast = reinterpret_cast(output); - float4* attn_cast = reinterpret_cast(attn); - float4* bias_cast = reinterpret_cast(bias); - float4* attnbias_cast = reinterpret_cast(attnbias); - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float4 data = input_cast[offset]; - float4 out = output_cast[offset]; - float4 res_vec = attn_cast[offset]; - float4 bias_data = bias_cast[offset % intermediate_size]; - float4 attn_bias = attnbias_cast[offset % intermediate_size]; - - data.x = data.x * mp_size + (out.x + res_vec.x + bias_data.x + attn_bias.x); - data.y = data.y * mp_size + (out.y + res_vec.y + bias_data.y + attn_bias.y); - data.z = data.z * mp_size + (out.z + res_vec.z + bias_data.z + attn_bias.z); - data.w = data.w * mp_size + (out.w + res_vec.w + bias_data.w + attn_bias.w); - - output_cast[offset] = data; - } -} - -__global__ void gptj_residual_add(__half* input, - __half* output, - __half* attn, - __half* bias, - __half* attn_bias, - int total_count, - int intermediate_size, - float mp_size) -{ -#if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_HCC__) - - float2* input_cast = reinterpret_cast(input); - float2* output_cast = reinterpret_cast(output); - float2* attn_cast = reinterpret_cast(attn); - - float2* bias_cast = reinterpret_cast(bias); - float2* attnbias_cast = reinterpret_cast(attn_bias); - - int offset = blockIdx.x * blockDim.x + threadIdx.x; - - if (offset < total_count) { - float2 vals_vec = input_cast[offset]; - float2 out_vec = output_cast[offset]; - float2 res_vec = attn_cast[offset]; - - float2 bias_vec = bias_cast[offset % intermediate_size]; - float2 attn_bias_vec = attnbias_cast[offset % intermediate_size]; - - __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); - __half2* out_half = reinterpret_cast<__half2*>(&out_vec); - __half2* res_half = reinterpret_cast<__half2*>(&res_vec); - __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); - __half2* attnbias_half = reinterpret_cast<__half2*>(&attn_bias_vec); - - float2 low_data = __half22float2(vals_half[0]); - float2 high_data = __half22float2(vals_half[1]); - - float2 low_out = __half22float2(out_half[0]); - float2 high_out = __half22float2(out_half[1]); - - float2 low_res = __half22float2(res_half[0]); - float2 high_res = __half22float2(res_half[1]); - - float2 low_bias = __half22float2(bias_half[0]); - float2 high_bias = __half22float2(bias_half[1]); - - float2 attn_low_bias = __half22float2(attnbias_half[0]); - float2 attn_high_bias = __half22float2(attnbias_half[1]); - - low_data.x = - low_data.x * mp_size + (low_out.x + low_res.x + (low_bias.x + attn_low_bias.x)); - low_data.y = - low_data.y * mp_size + (low_out.y + low_res.y + (low_bias.y + attn_low_bias.y)); - high_data.x = - high_data.x * mp_size + (high_out.x + high_res.x + (high_bias.x + attn_high_bias.x)); - high_data.y = - high_data.y * mp_size + (high_out.y + high_res.y + (high_bias.y + attn_high_bias.y)); - - vals_half[0] = __float22half2_rn(low_data); - vals_half[1] = __float22half2_rn(high_data); - - output_cast[offset] = vals_vec; - } -#endif -} - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int hidden_dim, - int batch, - int mp_size, - hipStream_t stream) -{ - int total_count = batch * hidden_dim / 4; - dim3 block_dims(1024); - dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size); - - hipLaunchKernelGGL(( gptj_residual_add), dim3(grid_dims), dim3(block_dims), 0, stream, - input, output, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size); -} - -template void launch_gptj_residual_add(float*, - float*, - float*, - float*, - float*, - int, - int, - int, - hipStream_t); -template void launch_gptj_residual_add<__half>(__half*, - __half*, - __half*, - __half*, - __half*, - int, - int, - int, - hipStream_t); - -__global__ void moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - float4* residual_cast = reinterpret_cast(residual); - float4* coef_cast = reinterpret_cast(coef); - float4* mlp_out_cast = reinterpret_cast(mlp_out); - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - float4* coef_cast2 = coef_cast + hidden_dim; - - while (tid < hidden_dim) { - float4 res = residual_cast[tid]; - float4 mlp = mlp_out_cast[tid]; - float4 coef1 = coef_cast[tid]; - float4 coef2 = coef_cast2[tid]; - mlp.x = mlp.x * coef2.x + res.x * coef1.x; - mlp.y = mlp.y * coef2.y + res.y * coef1.y; - mlp.z = mlp.z * coef2.z + res.z * coef1.z; - mlp.w = mlp.w * coef2.w + res.w * coef1.w; - mlp_out_cast[tid] = mlp; - tid += blockDim.x; - } -} - -__global__ void moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim) -{ - unsigned tid = threadIdx.x; - - float2* residual_cast = reinterpret_cast(residual); - float2* mlp_out_cast = reinterpret_cast(mlp_out); - float2* coef_cast = reinterpret_cast(coef); - float2* coef_cast2 = coef_cast + hidden_dim; - - residual_cast += blockIdx.x * hidden_dim; - mlp_out_cast += blockIdx.x * hidden_dim; - - while (tid < hidden_dim) { - float2 res = residual_cast[tid]; - float2 coef1 = coef_cast[tid]; - float2 coef2 = coef_cast[tid]; - float2 data = mlp_out_cast[tid]; - __half* data_h = reinterpret_cast<__half*>(&data); - __half* coef1_h = reinterpret_cast<__half*>(&coef1); - __half* coef2_h = reinterpret_cast<__half*>(&coef2); - __half* res_h = reinterpret_cast<__half*>(&res); - data_h[0] = res_h[0] * coef1_h[0] + data_h[0] * coef2_h[0]; - data_h[1] = res_h[1] * coef1_h[1] + data_h[1] * coef2_h[1]; - data_h[2] = res_h[2] * coef1_h[2] + data_h[2] * coef2_h[2]; - data_h[3] = res_h[3] * coef1_h[3] + data_h[3] * coef2_h[3]; - - mlp_out_cast[tid] = data; - tid += blockDim.x; - } -} - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream) -{ - dim3 grid_dim(seq_len); - dim3 block_dim(1024); - hipLaunchKernelGGL(( moe_res_matmul), dim3(grid_dim), dim3(block_dim), 0, stream, - residual, coef, mlp_out, seq_len, hidden_dim / 4); -} - -template void launch_moe_res_matmul(float* residual, - float* coef, - float* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); -template void launch_moe_res_matmul(__half* residual, - __half* coef, - __half* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.cu b/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.cu deleted file mode 100644 index 1d69139..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.cu +++ /dev/null @@ -1,451 +0,0 @@ -#include -#include "custom_cuda_layers.h" - -//#include -#include -#include -#include - -#define NORM_REG (MAX_REGISTERS) - -namespace cg = cooperative_groups; - -__global__ void fused_bias_residual_layer_norm(float* output, - const float* vals, - const float* gamma, - const float* beta, - float epsilon, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - float sum = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - output[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* output, - const __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - const __half2* vals_cast = reinterpret_cast(vals); - __half2* out_cast = reinterpret_cast<__half2*>(output); - - int k = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k++] = vals_cast[input_id + row * row_stride]; - input_id += iteration_stride; - } - float sum = 0; - for (int f = k - 1; f >= 0; f--) { - float2 inp_f = __half22float2(inp_reg[f]); - sum += inp_f.x + inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - out_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream); - -template <> -void launch_layer_norm(float* out, - float* vals, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - out, vals, gamma, beta, epsilon, hidden_dim); -} - -template <> -void launch_layer_norm<__half>(__half* out, - __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - out, vals, gamma, beta, epsilon, hidden_dim / 2); -} - -__global__ void fused_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - float res_f = (residual[input_id + row * row_stride]); - float bias_f = (bias[input_id]); - if (mlp_after_attn) inp_reg[k] += res_f + bias_f; - // if (preLN) res_add[input_id + row * row_stride] = inp_reg[k]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - norm[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_residual_layer_norm(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - __half2* norm_cast = reinterpret_cast<__half2*>(norm); - __half2* res_add_cast = reinterpret_cast<__half2*>(res_add); - __half2* residual_cast = reinterpret_cast<__half2*>(residual); - const __half2* bias_cast = reinterpret_cast(bias); - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals_cast[input_id + row * row_stride]; - float2 inp_f = __half22float2(inp_reg[k]); - float2 res_f = __half22float2(residual_cast[input_id + row * row_stride]); - float2 bias_f = __half22float2(bias_cast[input_id]); - if (mlp_after_attn) { - inp_f.x += res_f.x + bias_f.x; - inp_f.y += res_f.y + bias_f.y; - } - inp_reg[k] = __float22half2_rn(inp_f); - // if (preLN) res_add_cast[input_id + row * row_stride] = __float22half2_rn(res_f); - // //inp_reg[k]; - sum += inp_f.x + inp_f.y; - input_id += iteration_stride; - k++; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - norm_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream); - -template <> -void launch_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - fused_residual_layer_norm<<>>(norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim, - preLN, - mlp_after_attn); -} - -template <> -void launch_residual_layer_norm<__half>(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - fused_residual_layer_norm<<>>(norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim / 2, - preLN, - mlp_after_attn); -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.hip b/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.hip deleted file mode 100644 index dc7fa7a..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/normalize.hip +++ /dev/null @@ -1,453 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" - -//#include -#include -#include -#include - -#define NORM_REG (MAX_REGISTERS) - -namespace cg = cooperative_groups; - -__global__ void fused_bias_residual_layer_norm(float* output, - const float* vals, - const float* gamma, - const float* beta, - float epsilon, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - float sum = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - output[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* output, - const __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - const __half2* vals_cast = reinterpret_cast(vals); - __half2* out_cast = reinterpret_cast<__half2*>(output); - - int k = 0; - int input_id = id; - while (input_id < row_stride) { - inp_reg[k++] = vals_cast[input_id + row * row_stride]; - input_id += iteration_stride; - } - float sum = 0; - for (int f = k - 1; f >= 0; f--) { - float2 inp_f = __half22float2(inp_reg[f]); - sum += inp_f.x + inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - out_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream); - -template <> -void launch_layer_norm(float* out, - float* vals, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - out, vals, gamma, beta, epsilon, hidden_dim); -} - -template <> -void launch_layer_norm<__half>(__half* out, - __half* vals, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - out, vals, gamma, beta, epsilon, hidden_dim / 2); -} - -__global__ void fused_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - float inp_reg[NORM_REG]; - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals[input_id + row * row_stride]; - float res_f = (residual[input_id + row * row_stride]); - float bias_f = (bias[input_id]); - if (mlp_after_attn) inp_reg[k] += res_f + bias_f; - // if (preLN) res_add[input_id + row * row_stride] = inp_reg[k]; - sum += inp_reg[k++]; - input_id += iteration_stride; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride); - sum = 0.f; - for (int f = 0; f < k; f++) { - inp_reg[f] -= mean; - sum += inp_reg[f] * inp_reg[f]; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride); - sum += epsilon; - sum = __frsqrt_rn(sum); - - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * sum; - inp_reg[f] = inp_reg[f] * gamma[out_id] + beta[out_id]; - norm[out_id + row * row_stride] = inp_reg[f]; - } -} - -__global__ void fused_residual_layer_norm(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int row_stride, - bool preLN, - bool mlp_after_attn) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> 5; - int warp_num = iteration_stride >> 5; - - __half2 inp_reg[NORM_REG]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - __half2* norm_cast = reinterpret_cast<__half2*>(norm); - __half2* res_add_cast = reinterpret_cast<__half2*>(res_add); - __half2* residual_cast = reinterpret_cast<__half2*>(residual); - const __half2* bias_cast = reinterpret_cast(bias); - - int k = 0; - int input_id = id; - - float sum = 0; - while (input_id < row_stride) { - inp_reg[k] = vals_cast[input_id + row * row_stride]; - float2 inp_f = __half22float2(inp_reg[k]); - float2 res_f = __half22float2(residual_cast[input_id + row * row_stride]); - float2 bias_f = __half22float2(bias_cast[input_id]); - if (mlp_after_attn) { - inp_f.x += res_f.x + bias_f.x; - inp_f.y += res_f.y + bias_f.y; - } - inp_reg[k] = __float22half2_rn(inp_f); - // if (preLN) res_add_cast[input_id + row * row_stride] = __float22half2_rn(res_f); - // //inp_reg[k]; - sum += inp_f.x + inp_f.y; - input_id += iteration_stride; - k++; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - __shared__ float shr[MAX_WARP_NUM]; - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - float mean = sum / (row_stride << 1); - sum = 0.f; - for (int f = 0; f < k; f++) { - float2 inp_f = __half22float2(inp_reg[f]); - inp_f.x -= mean; - inp_f.y -= mean; - inp_reg[f] = __float22half2_rn(inp_f); - sum += inp_f.x * inp_f.x; - sum += inp_f.y * inp_f.y; - } - for (int i = 1; i < 32; i *= 2) sum += g.shfl_down(sum, i); - if (g.thread_rank() == 0) shr[gid] = sum; - b.sync(); - if (g.thread_rank() < (warp_num)) sum = shr[g.thread_rank()]; - b.sync(); - for (int i = 1; i < (warp_num); i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= (row_stride << 1); - sum += epsilon; - sum = __frsqrt_rn(sum); - __half2 variance_h = __float2half2_rn(sum); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - for (int f = 0; f < k; f++) { - int out_id = f * iteration_stride + id; - inp_reg[f] = inp_reg[f] * variance_h; - inp_reg[f] = inp_reg[f] * gamma_cast[out_id] + beta_cast[out_id]; - norm_cast[out_id + row * row_stride] = inp_reg[f]; - } -#endif -} - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream); - -template <> -void launch_residual_layer_norm(float* norm, - float* res_add, - float* vals, - float* residual, - const float* bias, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim, - preLN, - mlp_after_attn); -} - -template <> -void launch_residual_layer_norm<__half>(__half* norm, - __half* res_add, - __half* vals, - __half* residual, - const __half* bias, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream) -{ - constexpr int threads = 1024; - - dim3 grid_dim(batch_size); - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, norm, - res_add, - vals, - residual, - bias, - gamma, - beta, - epsilon, - hidden_dim / 2, - preLN, - mlp_after_attn); -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding.cpp b/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding.cpp deleted file mode 100644 index 5432314..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding.cpp +++ /dev/null @@ -1,911 +0,0 @@ - -#include -#include -#include -#include "context.h" -#include "cublas_wrappers.h" -#include "custom_cuda_layers.h" - -std::array gemm_algos = std::array({99, 99, 99}); - -#define MAX_OUT_TOKES 10 - -template -at::Tensor ds_softmax(at::Tensor& attn_scores, - at::Tensor& attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - bool async_op) -{ - auto attn_scores_c = attn_scores.contiguous(); - int bsz = attn_scores_c.size(0); - - int seq_len = attn_scores_c.size(1); - int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); - - int soft_len = attn_scores_c.size(2); - if (len > 3) soft_len = attn_scores_c.size(3); - - int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); - - launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), - (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), - triangular, - recompute, - local_attention, - window_size, - bsz, - heads, - seq_len, - soft_len, - 1.0, - Context::Instance().GetCurrentStream(async_op)); - - return attn_scores_c; -} - -template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) -{ - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); -} - -template -at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) -{ - auto options = at::TensorOptions() - .dtype(Q.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - T* workspace = (T*)Context::Instance().GetWorkSpace(); - float alpha = 1; - float gemm_beta = 0.0; - - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); - } - - auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); - unsigned m = W.size(1); - unsigned n = Q.size(1) * Q.size(2); - unsigned k = Q.size(0); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_T, - m, - n, - k, - &alpha, - &gemm_beta, - (T*)W.data_ptr(), - (T*)Q.data_ptr(), - (T*)O.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - return O; -} - -template -void attention_unfused(at::Tensor& prev_key_cont, - at::Tensor& query_cont, - at::Tensor& attn_mask, - at::Tensor& prev_value_cont, - at::Tensor& output, - int& bsz, - int& seq_len, - int& soft_len, - int& heads, - float& norm_factor, - bool triangular, - bool recompute, - bool local_attention, - int window_size) -{ - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - float alpha = norm_factor; - float gemm_beta = 0.0; - auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); - int k = prev_value_cont.size(2) / heads; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - soft_len, - seq_len, - k, - &alpha, - &gemm_beta, - (T*)prev_key_cont.data_ptr(), - (T*)query_cont.data_ptr(), - (T*)attn_score.data_ptr(), - CUBLAS_OP_N, - CUBLAS_OP_N, - soft_len * k, - seq_len * k, - seq_len * soft_len, - bsz * heads, - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); - alpha = 1.0; - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - k, - seq_len, - soft_len, - &alpha, - &gemm_beta, - (T*)prev_value_cont.data_ptr(), - (T*)attn_score.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_OP_N, - CUBLAS_OP_N, - soft_len * k, - seq_len * soft_len, - seq_len * k, - bsz * heads, - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -} - -template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) -{ - auto query_cont = query.contiguous(); - auto prev_key_cont = prev_key.contiguous(); - auto prev_value_cont = prev_value.contiguous(); - - int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); - - // Attn_Score [ batch Head Sequence-length Softmax-length] - - int bsz = query_cont.size(0); - int seq_len = query_cont.size(1); - int soft_len = prev_value.size(1); - - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = - at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); - attention_unfused(prev_key_cont, - query_cont, - attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), - prev_value_cont, - output, - bsz, - seq_len, - soft_len, - heads, - norm_factor, - (triangular && (new_size == 0)), - (new_size == 0), - local_attention, - window_size); - - return {output, prev_key, prev_value}; -} - -template -at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - int intermediate_size = input_cont.size(2); - - launch_bias_gelu((T*)input_cont.data_ptr(), - (T*)bias.data_ptr(), - intermediate_size, - bsz, - Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto residual_cont = residual.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - // launch_bias_residual((T*)input_cont.data_ptr(), - // (T*)residual_cont.data_ptr(), - // (T*)bias.data_ptr(), - // bsz, - // input_cont.size(2), - // (bias.size(0) > 1), - // Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) -{ - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); - - // cudaEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -std::vector ds_qkv_gemm(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); - - return {output, inp_norm}; -} - -template -void quantized_gemm(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& qscale, - int groups, - int merge_count) -{ - int bsz = input.size(0) * input.size(1); - auto options = at::TensorOptions() - .dtype(input.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); - - launch_dequantize((T*)weight16.data_ptr(), - (int8_t*)weight.data_ptr(), - (float*)qscale.data_ptr(), - weight.size(1), - weight.size(0), - groups, - merge_count, - Context::Instance().GetCurrentStream()); - - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight16.data_ptr(), - (T*)input.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -} - -template -at::Tensor ds_qkv_gemm_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool add_bias) -{ - int bsz = input.size(0) * input.size(1); - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& q_scale, - int groups) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - int bsz = input_cont.size(0) * input_cont.size(1); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return output; -} - -template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - return output; -} - -template -at::Tensor ds_vector_matmul_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& q_scale, - int groups, - int merge_count) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); - return output; -} - -template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); -} -template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); - - return output; -} - -template -std::vector ds_mlp_gemm_int8(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - - auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return {output, residual_add}; -} - -template -at::Tensor fused_gemm_gelu(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& weight_out, - const float epsilon, - bool preLayerNorm, - bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublasSetStream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - launch_bias_gelu((T*)intermediate.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - CUBLAS_OP_N, - CUBLAS_OP_N, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - // cudaEventRecord(Context::Instance().GetCompEvent(2), - // Context::Instance().GetCurrentStream(true)); - return output; -} - -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // cudaStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); -} - -std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, - at::Tensor& key_layer, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - bool rotate_half, - bool rotate_every_two) -{ - auto query_cont = mixed_query.contiguous(); - auto key_cont = key_layer.contiguous(); - - unsigned bsz = mixed_query.size(0); - unsigned head_size = mixed_query.size(2) / num_heads; - unsigned seq_len = mixed_query.size(1); - - if (mixed_query.scalar_type() == at::kFloat) - launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), - (float*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - else - launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), - (__half*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - return {query_cont, key_cont}; -} - -template -at::Tensor fused_gemm_gelu_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) -{ - int M = moe_res.size(0) * moe_res.size(1); - int N = moe_res.size(2); - Context::Instance().SynchComm(); - if (moe_res.scalar_type() == at::kFloat) { - launch_moe_res_matmul((float*)moe_res.data_ptr(), - (float*)coef.data_ptr(), - (float*)output.data_ptr(), - M, - N, - at::cuda::getCurrentCUDAStream()); - } else { - launch_moe_res_matmul<__half>((__half*)moe_res.data_ptr(), - (__half*)coef.data_ptr(), - (__half*)output.data_ptr(), - M, - N, - at::cuda::getCurrentCUDAStream()); - } - return output; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def( - "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); - m.def("softmax_context_fp16", - &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); - m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_residual_fp32", - &ds_bias_residual, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("bias_residual_fp16", - &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); - m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); - m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); - m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); - m.def("mlp_gemm_fp32", &ds_mlp_gemm, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("mlp_gemm_fp16", &ds_mlp_gemm<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("mlp_gemm_int8", &ds_mlp_gemm_int8<__half>, "DeepSpeed mlp with int8 (CUDA)"); - m.def("vector_matmul_fp32", &ds_vector_matmul, "DeepSpeed vector-MM with fp32 (CUDA)"); - m.def("vector_matmul_fp16", &ds_vector_matmul<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("vector_matmul_int8", - &ds_vector_matmul_int8<__half>, - "DeepSpeed vector-MM with int8 (CUDA)"); - m.def("linear_layer_fp32", &ds_linear_layer, "DeepSpeed linear_layer with fp32 (CUDA)"); - m.def("linear_layer_fp16", &ds_linear_layer<__half>, "DeepSpeed linear_layer with fp16 (CUDA)"); - m.def("linear_layer_int8", - &ds_linear_layer_int8<__half>, - "DeepSpeed linear_layer with int8 (CUDA)"); - m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("einsum_sec_sm_ecm_fp32", - &einsum_sec_sm_ecm, - "DeepSpeed vector-MM with fp32 (CUDA)"); - - m.def("einsum_sec_sm_ecm_fp16", - &einsum_sec_sm_ecm<__half>, - "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding_hip.cpp b/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding_hip.cpp deleted file mode 100644 index 009951d..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/pt_binding_hip.cpp +++ /dev/null @@ -1,912 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! - -#include -#include -#include -#include "context_hip.h" -#include "cublas_wrappers_hip.h" -#include "custom_hip_layers.h" - -std::array gemm_algos = std::array({99, 99, 99}); - -#define MAX_OUT_TOKES 10 - -template -at::Tensor ds_softmax(at::Tensor& attn_scores, - at::Tensor& attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - bool async_op) -{ - auto attn_scores_c = attn_scores.contiguous(); - int bsz = attn_scores_c.size(0); - - int seq_len = attn_scores_c.size(1); - int len = attn_scores_c.sizes().size(); - if (len > 3) seq_len = attn_scores_c.size(2); - - int soft_len = attn_scores_c.size(2); - if (len > 3) soft_len = attn_scores_c.size(3); - - int heads = 1; - if (len > 3) heads = attn_scores_c.size(1); - - launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), - (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), - triangular, - recompute, - local_attention, - window_size, - bsz, - heads, - seq_len, - soft_len, - 1.0, - Context::Instance().GetCurrentStream(async_op)); - - return attn_scores_c; -} - -template -void allocate_workspace(size_t hidden_dim, - size_t max_seq_len, - size_t batch_size, - size_t head_size = 128) -{ - size_t _workSpaceSize = (hidden_dim * batch_size * max_seq_len); - Context::Instance().GenWorkSpace(_workSpaceSize * sizeof(T)); -} - -template -at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) -{ - auto options = at::TensorOptions() - .dtype(Q.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - T* workspace = (T*)Context::Instance().GetWorkSpace(); - float alpha = 1; - float gemm_beta = 0.0; - - if (!workspace) { - allocate_workspace(W.size(1), MAX_OUT_TOKES, Q.size(0)); - workspace = (T*)Context::Instance().GetWorkSpace(); - } - - auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); - unsigned m = W.size(1); - unsigned n = Q.size(1) * Q.size(2); - unsigned k = Q.size(0); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_transpose, - m, - n, - k, - &alpha, - &gemm_beta, - (T*)W.data_ptr(), - (T*)Q.data_ptr(), - (T*)O.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - return O; -} - -template -void attention_unfused(at::Tensor& prev_key_cont, - at::Tensor& query_cont, - at::Tensor& attn_mask, - at::Tensor& prev_value_cont, - at::Tensor& output, - int& bsz, - int& seq_len, - int& soft_len, - int& heads, - float& norm_factor, - bool triangular, - bool recompute, - bool local_attention, - int window_size) -{ - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - float alpha = norm_factor; - float gemm_beta = 0.0; - auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); - int k = prev_value_cont.size(2) / heads; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - soft_len, - seq_len, - k, - &alpha, - &gemm_beta, - (T*)prev_key_cont.data_ptr(), - (T*)query_cont.data_ptr(), - (T*)attn_score.data_ptr(), - rocblas_operation_none, - rocblas_operation_none, - soft_len * k, - seq_len * k, - seq_len * soft_len, - bsz * heads, - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - attn_score = ds_softmax( - attn_score, attn_mask, triangular, recompute, local_attention, window_size, false); - alpha = 1.0; - cublas_strided_batched_gemm(Context::Instance().GetCublasHandle(), - k, - seq_len, - soft_len, - &alpha, - &gemm_beta, - (T*)prev_value_cont.data_ptr(), - (T*)attn_score.data_ptr(), - (T*)output.data_ptr(), - rocblas_operation_none, - rocblas_operation_none, - soft_len * k, - seq_len * soft_len, - seq_len * k, - bsz * heads, - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -} - -template -std::vector ds_softmax_context(at::Tensor& query, - at::Tensor& prev_key, - at::Tensor& new_key, - at::Tensor& attn_mask, - at::Tensor& prev_value, - at::Tensor& new_value, - int heads, - float norm_factor, - bool merging, - bool triangular, - bool local_attention, - int window_size, - bool no_masking) -{ - auto query_cont = query.contiguous(); - auto prev_key_cont = prev_key.contiguous(); - auto prev_value_cont = prev_value.contiguous(); - - int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); - - // Attn_Score [ batch Head Sequence-length Softmax-length] - - int bsz = query_cont.size(0); - int seq_len = query_cont.size(1); - int soft_len = prev_value.size(1); - - auto options = at::TensorOptions() - .dtype(query_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = - at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); - attention_unfused(prev_key_cont, - query_cont, - attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), - prev_value_cont, - output, - bsz, - seq_len, - soft_len, - heads, - norm_factor, - (triangular && (new_size == 0)), - (new_size == 0), - local_attention, - window_size); - - return {output, prev_key, prev_value}; -} - -template -at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - int intermediate_size = input_cont.size(2); - - launch_bias_gelu((T*)input_cont.data_ptr(), - (T*)bias.data_ptr(), - intermediate_size, - bsz, - Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto residual_cont = residual.contiguous(); - - int bsz = input_cont.size(0) * input_cont.size(1); - // launch_bias_residual((T*)input_cont.data_ptr(), - // (T*)residual_cont.data_ptr(), - // (T*)bias.data_ptr(), - // bsz, - // input_cont.size(2), - // (bias.size(0) > 1), - // Context::Instance().GetCurrentStream()); - return input_cont; -} - -template -at::Tensor ds_layernorm(at::Tensor& input_cont, at::Tensor& gamma, at::Tensor& betta, float epsilon) -{ - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - launch_layer_norm((T*)inp_norm.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)gamma.data_ptr(), - (T*)betta.data_ptr(), - epsilon, - bsz, - input_cont.size(2), - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -at::Tensor qkv_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto inp_norm = ds_layernorm(input, gamma, beta, epsilon); - - // hipEventRecord(Context::Instance().GetCompEvent(1), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - int bsz = input.size(0) * input.size(1); - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return inp_norm; -} - -template -std::vector ds_qkv_gemm(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool add_bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = - qkv_unfused_cublas(output, input_cont, weight, bias, gamma, beta, epsilon, add_bias); - - return {output, inp_norm}; -} - -template -void quantized_gemm(at::Tensor& output, - at::Tensor& input, - at::Tensor& weight, - at::Tensor& qscale, - int groups, - int merge_count) -{ - int bsz = input.size(0) * input.size(1); - auto options = at::TensorOptions() - .dtype(input.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); - - launch_dequantize((T*)weight16.data_ptr(), - (int8_t*)weight.data_ptr(), - (float*)qscale.data_ptr(), - weight.size(1), - weight.size(0), - groups, - merge_count, - Context::Instance().GetCurrentStream()); - - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight16.data_ptr(), - (T*)input.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); -} - -template -at::Tensor ds_qkv_gemm_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool add_bias) -{ - int bsz = input.size(0) * input.size(1); - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - auto inp_norm = ds_layernorm(input_cont, gamma, beta, epsilon); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - if (add_bias) - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer(at::Tensor& input, at::Tensor& weight, at::Tensor& bias) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -template -at::Tensor ds_linear_layer_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& q_scale, - int groups) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - int bsz = input_cont.size(0) * input_cont.size(1); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_add((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - return output; -} - -template -at::Tensor ds_vector_matmul(at::Tensor& input, at::Tensor& weight, bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), - Context::Instance().GetCurrentStream(async_op)); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input_cont.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - return output; -} - -template -at::Tensor ds_vector_matmul_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& q_scale, - int groups, - int merge_count) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); - return output; -} - -template -void mlp_unfused_cublas(at::Tensor& output, - at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - auto inp_norm = at::empty_like(input); - - launch_residual_layer_norm((T*)inp_norm.data_ptr(), - (T*)nullptr, - (T*)input.data_ptr(), - (T*)residual.data_ptr(), - (T*)input_bias.data_ptr(), - (T*)gamma.data_ptr(), - (T*)beta.data_ptr(), - epsilon, - bsz, - input.size(2), - preLayerNorm, - mlp_after_attn, - Context::Instance().GetCurrentStream()); - - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)inp_norm.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); -} -template -at::Tensor ds_mlp_gemm(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - bool preLayerNorm, - bool mlp_after_attn) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - - mlp_unfused_cublas(output, - mlp_after_attn ? input : residual, - residual, - input_bias, - weight, - bias, - gamma, - beta, - epsilon, - preLayerNorm, - mlp_after_attn); - - return output; -} - -template -std::vector ds_mlp_gemm_int8(at::Tensor& input, - at::Tensor& residual, - at::Tensor& input_bias, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& gamma, - at::Tensor& beta, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - auto inp_norm = at::empty_like(input_cont); - - auto residual_add = (preLayerNorm ? at::empty_like(input_cont) : inp_norm); - // computing the blocking across K dimension - // launch_residual_layer_norm((T*)inp_norm.data_ptr(), - // (T*)residual_add.data_ptr(), - // (T*)input_cont.data_ptr(), - // (T*)residual.data_ptr(), - // (T*)input_bias.data_ptr(), - // (T*)gamma.data_ptr(), - // (T*)beta.data_ptr(), - // epsilon, - // bsz, - // input_cont.size(2), - // preLayerNorm, - // Context::Instance().GetCurrentStream()); - - quantized_gemm(output, inp_norm, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return {output, residual_add}; -} - -template -at::Tensor fused_gemm_gelu(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - at::Tensor& weight_out, - const float epsilon, - bool preLayerNorm, - bool async_op) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto intermediate = - at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight_out.size(1)}, options); - int bsz = input_cont.size(0) * input_cont.size(1); - float alpha = (T)1.0; - float gemm_beta = (T)0.0; - rocblas_set_stream(Context::Instance().GetCublasHandle(), Context::Instance().GetCurrentStream()); - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight.size(1), - bsz, - input.size(2), - &alpha, - &gemm_beta, - (T*)weight.data_ptr(), - (T*)input_cont.data_ptr(), - (T*)intermediate.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - launch_bias_gelu((T*)intermediate.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - cublas_gemm_ex(Context::Instance().GetCublasHandle(), - rocblas_operation_none, - rocblas_operation_none, - weight_out.size(1), - bsz, - intermediate.size(2), - &alpha, - &gemm_beta, - (T*)weight_out.data_ptr(), - (T*)intermediate.data_ptr(), - (T*)output.data_ptr(), - CUBLAS_GEMM_DEFAULT_TENSOR_OP); - // hipEventRecord(Context::Instance().GetCompEvent(2), - // Context::Instance().GetCurrentStream(true)); - return output; -} - -void residual_add_bias(at::Tensor& output, - at::Tensor& input, - at::Tensor& attention_output, - at::Tensor& output_b, - at::Tensor& attention_b, - int mp_size, - bool mlp_after_attn) -{ - int bsz = input.size(0) * input.size(1); - int hidden_size = input.size(2); - // hipStreamWaitEvent( - // Context::Instance().GetCurrentStream(), Context::Instance().GetCompEvent(2), 0); - if (input.scalar_type() == at::kFloat) - if (mlp_after_attn) - launch_bias_residual((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add((float*)input.data_ptr(), - (float*)output.data_ptr(), - (float*)attention_output.data_ptr(), - (float*)output_b.data_ptr(), - (float*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); - else if (mlp_after_attn) - launch_bias_residual((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - bsz, - hidden_size, - mp_size, - Context::Instance().GetCurrentStream()); - else - launch_gptj_residual_add<__half>((__half*)input.data_ptr(), - (__half*)output.data_ptr(), - (__half*)attention_output.data_ptr(), - (__half*)output_b.data_ptr(), - (__half*)attention_b.data_ptr(), - hidden_size, - bsz, - mp_size, - Context::Instance().GetCurrentStream()); -} - -std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, - at::Tensor& key_layer, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - bool rotate_half, - bool rotate_every_two) -{ - auto query_cont = mixed_query.contiguous(); - auto key_cont = key_layer.contiguous(); - - unsigned bsz = mixed_query.size(0); - unsigned head_size = mixed_query.size(2) / num_heads; - unsigned seq_len = mixed_query.size(1); - - if (mixed_query.scalar_type() == at::kFloat) - launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), - (float*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - else - launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), - (__half*)key_cont.data_ptr(), - head_size, - seq_len, - rotary_dim, - offset, - num_heads, - bsz, - rotate_half, - rotate_every_two, - Context::Instance().GetCurrentStream()); - return {query_cont, key_cont}; -} - -template -at::Tensor fused_gemm_gelu_int8(at::Tensor& input, - at::Tensor& weight, - at::Tensor& bias, - const float epsilon, - at::Tensor& q_scale, - int groups, - bool preLayerNorm) -{ - auto input_cont = input.contiguous(); - auto options = at::TensorOptions() - .dtype(input_cont.options().dtype()) - .layout(at::kStrided) - .device(at::kCUDA) - .requires_grad(false); - - auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); - - int bsz = input_cont.size(0) * input_cont.size(1); - - quantized_gemm(output, input_cont, weight, q_scale, groups, 0); - launch_bias_gelu((T*)output.data_ptr(), - (T*)bias.data_ptr(), - weight.size(1), - bsz, - Context::Instance().GetCurrentStream()); - - return output; -} - -at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) -{ - int M = moe_res.size(0) * moe_res.size(1); - int N = moe_res.size(2); - Context::Instance().SynchComm(); - if (moe_res.scalar_type() == at::kFloat) { - launch_moe_res_matmul((float*)moe_res.data_ptr(), - (float*)coef.data_ptr(), - (float*)output.data_ptr(), - M, - N, - at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } else { - launch_moe_res_matmul<__half>((__half*)moe_res.data_ptr(), - (__half*)coef.data_ptr(), - (__half*)output.data_ptr(), - M, - N, - at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); - } - return output; -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("softmax_fp32", &ds_softmax, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def("softmax_fp16", &ds_softmax<__half>, "DeepSpeed SoftMax with fp32 (CUDA)"); - m.def( - "softmax_context_fp32", &ds_softmax_context, "DeepSpeed attention with fp32 (CUDA)"); - m.def("softmax_context_fp16", - &ds_softmax_context<__half>, - "DeepSpeed attention with fp32 (CUDA)"); - m.def("bias_gelu_fp32", &ds_bias_gelu, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_gelu_fp16", &ds_bias_gelu<__half>, "DeepSpeed Gelu with fp32 (CUDA)"); - m.def("bias_residual_fp32", - &ds_bias_residual, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("bias_residual_fp16", - &ds_bias_residual<__half>, - "DeepSpeed residual-bias add with fp32 (CUDA)"); - m.def("layer_norm_fp32", &ds_layernorm, "DeepSpeed layer-norm with fp32 (CUDA)"); - m.def("layer_norm_fp16", &ds_layernorm<__half>, "DeepSpeed layer-norm with fp16 (CUDA)"); - m.def("qkv_gemm_fp32", &ds_qkv_gemm, "DeepSpeed qkv gemm with fp32 (CUDA)"); - m.def("qkv_gemm_fp16", &ds_qkv_gemm<__half>, "DeepSpeed qkv gemm with fp16 (CUDA)"); - m.def("qkv_gemm_int8", &ds_qkv_gemm_int8<__half>, "DeepSpeed qkv gemm with int8 (CUDA)"); - m.def("mlp_gemm_fp32", &ds_mlp_gemm, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("mlp_gemm_fp16", &ds_mlp_gemm<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("mlp_gemm_int8", &ds_mlp_gemm_int8<__half>, "DeepSpeed mlp with int8 (CUDA)"); - m.def("vector_matmul_fp32", &ds_vector_matmul, "DeepSpeed vector-MM with fp32 (CUDA)"); - m.def("vector_matmul_fp16", &ds_vector_matmul<__half>, "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("vector_matmul_int8", - &ds_vector_matmul_int8<__half>, - "DeepSpeed vector-MM with int8 (CUDA)"); - m.def("linear_layer_fp32", &ds_linear_layer, "DeepSpeed linear_layer with fp32 (CUDA)"); - m.def("linear_layer_fp16", &ds_linear_layer<__half>, "DeepSpeed linear_layer with fp16 (CUDA)"); - m.def("linear_layer_int8", - &ds_linear_layer_int8<__half>, - "DeepSpeed linear_layer with int8 (CUDA)"); - m.def("fused_gemm_gelu_fp32", &fused_gemm_gelu, "DeepSpeed mlp with fp32 (CUDA)"); - m.def("fused_gemm_gelu_fp16", &fused_gemm_gelu<__half>, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("residual_add", &residual_add_bias, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); - m.def("einsum_sec_sm_ecm_fp32", - &einsum_sec_sm_ecm, - "DeepSpeed vector-MM with fp32 (CUDA)"); - - m.def("einsum_sec_sm_ecm_fp16", - &einsum_sec_sm_ecm<__half>, - "DeepSpeed vector-MM with fp16 (CUDA)"); - m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.cu b/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.cu deleted file mode 100644 index 788de78..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.cu +++ /dev/null @@ -1,432 +0,0 @@ -#include -#include "custom_cuda_layers.h" - -//#include -#include -#include -#include - -#define ATTN_THREADS 1024 -#define MAX_REG_SIZE 8 - -#define minus_infinity -10000.0 - -void CheckCudaErrorAux(const char* file, unsigned line) -{ - cudaError_t err = cudaGetLastError(); - if (err == cudaSuccess) return; - std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line - << std::endl; - throw std::runtime_error("CUDA ERROR!!!\n"); -} - -#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) - -namespace cg = cooperative_groups; - -__global__ void attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ -#ifdef HALF_PRECISION_AVAILABLE - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float2 low_data[MAX_REG_SIZE]; - float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) - : minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); - } - } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && - (data_id + 1) > window_stride) && - (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && - (data_id + 2) > window_stride) && - (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - if ((data_id + 1) < sequence_length) - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - if ((data_id + 2) < sequence_length) - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - } - } - // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } else { - low_data[i].x = minus_infinity; - low_data[i].y = minus_infinity; - high_data[i].x = minus_infinity; - high_data[i].y = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - float sum = 0; - for (int i = 0; i < iterations; i++) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; - } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; - } - } - } - } -#endif -} - -__global__ void attn_softmax_v2(float* vals, - float* attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float4 data[MAX_REG_SIZE]; - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); - data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? vals[data_id + 1] - : minus_infinity; - data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? vals[data_id + 2] - : minus_infinity; - data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? vals[data_id + 3] - : minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - data[i].y += attn_mask[data_id + mask_offset + 1]; - data[i].z += attn_mask[data_id + mask_offset + 2]; - data[i].w += attn_mask[data_id + mask_offset + 3]; - } - } else { - data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; - data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride && (data_id + 1) < sequence_length) - ? (vals[data_id + 1]) - : minus_infinity; - data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride && (data_id + 2) < sequence_length) - ? (vals[data_id + 2]) - : minus_infinity; - data[i].w = minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - if ((data_id + 1) < sequence_length) - data[i].y += attn_mask[data_id + mask_offset + 1]; - if ((data_id + 2) < sequence_length) - data[i].z += attn_mask[data_id + mask_offset + 2]; - } - } - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = data[i].x / sum; - vals[data_id + 1] = data[i].y / sum; - vals[data_id + 2] = data[i].z / sum; - vals[data_id + 3] = data[i].w / sum; - } else { - vals[data_id] = data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; - } - } - } - } -} - -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream) -{ - int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); - dim3 block_dim(ATTN_THREADS); - - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; - const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; - - if (sequence_length <= 32768) - attn_softmax_v2<<>>( - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); - else - throw std::runtime_error("Unsupport Seq_Length!"); -} - -template void launch_attn_softmax_v2(float* vals, - float* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); -template void launch_attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.hip b/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.hip deleted file mode 100644 index a933d51..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/csrc/softmax.hip +++ /dev/null @@ -1,434 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" - -//#include -#include -#include -#include - -#define ATTN_THREADS 1024 -#define MAX_REG_SIZE 8 - -#define minus_infinity -10000.0 - -void CheckCudaErrorAux(const char* file, unsigned line) -{ - hipError_t err = hipGetLastError(); - if (err == hipSuccess) return; - std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line - << std::endl; - throw std::runtime_error("CUDA ERROR!!!\n"); -} - -#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) - -namespace cg = cooperative_groups; - -__global__ void attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ -#ifdef HALF_PRECISION_AVAILABLE - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float2 low_data[MAX_REG_SIZE]; - float2 high_data[MAX_REG_SIZE]; - - __half2 h_scale = __float2half2_rn(scale); - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? __half2float(vals[data_id + 3]) - : minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); - } - } else { - low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) - : minus_infinity; - low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && - (data_id + 1) > window_stride) && - (data_id + 1) < sequence_length) - ? __half2float(vals[data_id + 1]) - : minus_infinity; - high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && - (data_id + 2) > window_stride) && - (data_id + 2) < sequence_length) - ? __half2float(vals[data_id + 2]) - : minus_infinity; - high_data[i].y = minus_infinity; - if (mask && recompute) { - low_data[i].x += __half2float(mask[data_id + mask_offset]); - if ((data_id + 1) < sequence_length) - low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); - if ((data_id + 2) < sequence_length) - high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); - } - } - // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } else { - low_data[i].x = minus_infinity; - low_data[i].y = minus_infinity; - high_data[i].x = minus_infinity; - high_data[i].y = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - float sum = 0; - for (int i = 0; i < iterations; i++) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = low_data[i].x / sum; - vals[data_id + 1] = low_data[i].y / sum; - vals[data_id + 2] = high_data[i].x / sum; - vals[data_id + 3] = high_data[i].y / sum; - } else { - vals[data_id] = low_data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; - } - } - } - } -#endif -} - -__global__ void attn_softmax_v2(float* vals, - float* attn_mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int total_count, - int heads, - int sequence_length, - int num_seq, - float scale, - int iterations, - int reduceWidth) -{ - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - float4 data[MAX_REG_SIZE]; - - int wid = threadIdx.x >> 5; - int lane = threadIdx.x & 0x1f; - int warp_num = blockDim.x >> 5; - - int reduce_blocks = reduceWidth >> 5; - int seq_lane = threadIdx.x % reduceWidth; - - __shared__ float partialSum[MAX_WARP_NUM]; - - int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); - if (iter_offset < total_count) { - vals += (iter_offset * sequence_length); - - int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); - int seq_id = iter_offset % num_seq; - int seq_id4 = seq_id >> 2; - - int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); - int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) - ? (real_seq_id >> 2) - (window_size >> 2) - : 0; - int window_stride = - (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && - data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); - data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride) - ? vals[data_id + 1] - : minus_infinity; - data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride) - ? vals[data_id + 2] - : minus_infinity; - data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && - (data_id + 3) > window_stride) - ? vals[data_id + 3] - : minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - data[i].y += attn_mask[data_id + mask_offset + 1]; - data[i].z += attn_mask[data_id + mask_offset + 2]; - data[i].w += attn_mask[data_id + mask_offset + 3]; - } - } else { - data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; - data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && - (data_id + 1) > window_stride && (data_id + 1) < sequence_length) - ? (vals[data_id + 1]) - : minus_infinity; - data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && - (data_id + 2) > window_stride && (data_id + 2) < sequence_length) - ? (vals[data_id + 2]) - : minus_infinity; - data[i].w = minus_infinity; - if (attn_mask && recompute) { - data[i].x += attn_mask[data_id + mask_offset]; - if ((data_id + 1) < sequence_length) - data[i].y += attn_mask[data_id + mask_offset + 1]; - if ((data_id + 2) < sequence_length) - data[i].z += attn_mask[data_id + mask_offset + 2]; - } - } - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); - - if (reduceWidth > WARP_SIZE) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - - b.sync(); - - for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / WARP_SIZE); - } - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * (reduceWidth << 2) + (seq_lane << 2); - - if (data_id < sequence_length) { - if ((sequence_length - data_id) >= 4) { - vals[data_id] = data[i].x / sum; - vals[data_id + 1] = data[i].y / sum; - vals[data_id + 2] = data[i].z / sum; - vals[data_id + 3] = data[i].w / sum; - } else { - vals[data_id] = data[i].x / sum; - if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; - if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; - } - } - } - } -} - -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream) -{ - int total_count = batch_size * heads * num_seq; - dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); - dim3 block_dim(ATTN_THREADS); - - const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; - const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; - - if (sequence_length <= 32768) - hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, - mask, - triangular, - recompute, - local_attention, - window_size, - total_count, - (triangular ? (heads * batch_size) : heads), - sequence_length, - num_seq, - scale, - iterations, - reduce_width); - else - throw std::runtime_error("Unsupport Seq_Length!"); -} - -template void launch_attn_softmax_v2(float* vals, - float* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); -template void launch_attn_softmax_v2(__half* vals, - __half* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/context.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/context.h deleted file mode 100644 index 79008d4..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/context.h +++ /dev/null @@ -1,177 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include "cublas_v2.h" -#include "cuda.h" -#include "curand.h" - -#define WARP_SIZE 32 - -#define CUDA_CHECK(callstr) \ - { \ - cudaError_t error_code = callstr; \ - if (error_code != cudaSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define CUDA_1D_KERNEL_LOOP(i, n) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) - -#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ - for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) - -#define DS_CUDA_NUM_THREADS 512 -#define DS_MAXIMUM_NUM_BLOCKS 262144 - -inline int DS_GET_BLOCKS(const int N) -{ - return std::max( - std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), - // Use at least 1 block, since CUDA does not allow empty block - 1); -} - -class Context { -public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0), _stream(0) - { - curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT); - curandSetPseudoRandomGeneratorSeed(_gen, 123); - if (cublasCreate(&_cublasHandle) != CUBLAS_STATUS_SUCCESS) { - auto message = std::string("Fail to create cublas handle."); - std::cerr << message << std::endl; - throw std::runtime_error(message); - } - cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); - cudaEventCreate(&_comp1_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comp2_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comp_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - cudaEventCreate(&_comm_event, (cudaEventDisableTiming | cudaEventBlockingSync)); - } - - virtual ~Context() - { - cublasDestroy(_cublasHandle); - cudaFree(_workspace); - cudaEventDestroy(_comp1_event); - cudaEventDestroy(_comp2_event); - cudaEventDestroy(_comp_event); - cudaEventDestroy(_comm_event); - } - - static Context& Instance() - { - static Context _ctx; - return _ctx; - } - - void GenWorkSpace(size_t size) - { - if (!_workspace) { - assert(_workspace == nullptr); - cudaMalloc(&_workspace, size); - } else if (_workSpaceSize < size) { - cudaFree(_workspace); - cudaMalloc(&_workspace, size); - } - - _workSpaceSize = size; - } - - cudaEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } - - size_t get_workspace_size() const { return _workSpaceSize; } - void* GetWorkSpace() { return _workspace; } - - inline unsigned new_token(unsigned layer_id) - { - if (layer_id == 0) _token_length++; - return _token_length; - } - - inline void reset_tokens(unsigned initial_tokens = 0) - { - _num_tokens = initial_tokens; - } //_token_length = 0; } - - inline unsigned current_tokens() const { return _num_tokens; } - - inline void advance_tokens() { _num_tokens++; } - - curandGenerator_t& GetRandGenerator() { return _gen; } - - cudaStream_t GetCommStream(bool async_op = false) - { - if (!_comm_stream) - _comm_stream = async_op ? at::cuda::getStreamFromPool(true) - : at::cuda::getCurrentCUDAStream(); - return _comm_stream; - } - cudaStream_t GetCurrentStream(bool other_stream = false) - { - // get current pytorch stream. - if (other_stream) { - if (!_stream) _stream = at::cuda::getStreamFromPool(true); - return _stream; - } - cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - return stream; - } - - cublasHandle_t GetCublasHandle() { return _cublasHandle; } - - std::pair IncrementOffset(uint64_t offset_inc) - { - uint64_t offset = _curr_offset; - _curr_offset += offset_inc; - return std::pair(_seed, offset); - } - - void SetSeed(uint64_t new_seed) { _seed = new_seed; } - - const std::vector>& GetGemmAlgos() const { return _gemm_algos; } - - inline void SynchComp() - { - cudaEventRecord(_comp_event, _comp_stream); - cudaStreamWaitEvent(_comm_stream, _comp_event, 0); - } - inline void SynchComm() - { - cudaEventRecord(_comm_event, _comm_stream); - cudaStreamWaitEvent(_comp_stream, _comm_event, 0); - } - -private: - curandGenerator_t _gen; - cublasHandle_t _cublasHandle; - - cudaEvent_t _comp_event; - cudaEvent_t _comm_event; - - void* _workspace; - uint64_t _seed; - uint64_t _curr_offset; - size_t _workSpaceSize; - - cudaEvent_t _comp1_event; - cudaEvent_t _comp2_event; - - cudaStream_t _stream; - - unsigned _token_length; - unsigned _num_tokens; - std::vector> _gemm_algos; - - cudaStream_t _comp_stream; - cudaStream_t _comm_stream; - - std::unordered_map _world_sizes; -}; diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/context_hip.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/context_hip.h deleted file mode 100644 index 89c6299..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/context_hip.h +++ /dev/null @@ -1,178 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include -#include "rocblas.h" -#include "hip/hip_runtime.h" -#include "hiprand/hiprand.h" - -#define WARP_SIZE 32 - -#define CUDA_CHECK(callstr) \ - { \ - hipError_t error_code = callstr; \ - if (error_code != hipSuccess) { \ - std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ - assert(0); \ - } \ - } - -#define CUDA_1D_KERNEL_LOOP(i, n) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) - -#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ - for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ - for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) - -#define DS_CUDA_NUM_THREADS 512 -#define DS_MAXIMUM_NUM_BLOCKS 262144 - -inline int DS_GET_BLOCKS(const int N) -{ - return std::max( - std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), - // Use at least 1 block, since CUDA does not allow empty block - 1); -} - -class Context { -public: - Context() : _workspace(nullptr), _seed(42), _curr_offset(0), _stream(0) - { - hiprandCreateGenerator(&_gen, HIPRAND_RNG_PSEUDO_DEFAULT); - hiprandSetPseudoRandomGeneratorSeed(_gen, 123); - if (rocblas_create_handle(&_cublasHandle) != rocblas_status_success) { - auto message = std::string("Fail to create cublas handle."); - std::cerr << message << std::endl; - throw std::runtime_error(message); - } - rocblas_set_math_mode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); - hipEventCreate(&_comp1_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comp2_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comp_event, (hipEventDisableTiming | hipEventBlockingSync)); - hipEventCreate(&_comm_event, (hipEventDisableTiming | hipEventBlockingSync)); - } - - virtual ~Context() - { - rocblas_destroy_handle(_cublasHandle); - hipFree(_workspace); - hipEventDestroy(_comp1_event); - hipEventDestroy(_comp2_event); - hipEventDestroy(_comp_event); - hipEventDestroy(_comm_event); - } - - static Context& Instance() - { - static Context _ctx; - return _ctx; - } - - void GenWorkSpace(size_t size) - { - if (!_workspace) { - assert(_workspace == nullptr); - hipMalloc(&_workspace, size); - } else if (_workSpaceSize < size) { - hipFree(_workspace); - hipMalloc(&_workspace, size); - } - - _workSpaceSize = size; - } - - hipEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } - - size_t get_workspace_size() const { return _workSpaceSize; } - void* GetWorkSpace() { return _workspace; } - - inline unsigned new_token(unsigned layer_id) - { - if (layer_id == 0) _token_length++; - return _token_length; - } - - inline void reset_tokens(unsigned initial_tokens = 0) - { - _num_tokens = initial_tokens; - } //_token_length = 0; } - - inline unsigned current_tokens() const { return _num_tokens; } - - inline void advance_tokens() { _num_tokens++; } - - hiprandGenerator_t& GetRandGenerator() { return _gen; } - - hipStream_t GetCommStream(bool async_op = false) - { - if (!_comm_stream) - _comm_stream = async_op ? at::hip::getStreamFromPoolMasqueradingAsCUDA(true) - : at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); - return _comm_stream; - } - hipStream_t GetCurrentStream(bool other_stream = false) - { - // get current pytorch stream. - if (other_stream) { - if (!_stream) _stream = at::hip::getStreamFromPoolMasqueradingAsCUDA(true); - return _stream; - } - hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); - return stream; - } - - rocblas_handle GetCublasHandle() { return _cublasHandle; } - - std::pair IncrementOffset(uint64_t offset_inc) - { - uint64_t offset = _curr_offset; - _curr_offset += offset_inc; - return std::pair(_seed, offset); - } - - void SetSeed(uint64_t new_seed) { _seed = new_seed; } - - const std::vector>& GetGemmAlgos() const { return _gemm_algos; } - - inline void SynchComp() - { - hipEventRecord(_comp_event, _comp_stream); - hipStreamWaitEvent(_comm_stream, _comp_event, 0); - } - inline void SynchComm() - { - hipEventRecord(_comm_event, _comm_stream); - hipStreamWaitEvent(_comp_stream, _comm_event, 0); - } - -private: - hiprandGenerator_t _gen; - rocblas_handle _cublasHandle; - - hipEvent_t _comp_event; - hipEvent_t _comm_event; - - void* _workspace; - uint64_t _seed; - uint64_t _curr_offset; - size_t _workSpaceSize; - - hipEvent_t _comp1_event; - hipEvent_t _comp2_event; - - hipStream_t _stream; - - unsigned _token_length; - unsigned _num_tokens; - std::vector> _gemm_algos; - - hipStream_t _comp_stream; - hipStream_t _comm_stream; - - std::unordered_map _world_sizes; -}; diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers.h deleted file mode 100644 index 3addd02..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers.h +++ /dev/null @@ -1,207 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -{ - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_32F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_32F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - C, - CUDA_R_32F, - m, - CUDA_R_32F, - algo); - - if (status != CUBLAS_STATUS_SUCCESS) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_gemm_ex(cublasHandle_t handle, - cublasOperation_t transa, - cublasOperation_t transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -{ - cublasStatus_t status = cublasGemmEx(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - CUDA_R_16F, - (transa == CUBLAS_OP_N) ? m : k, - (const void*)B, - CUDA_R_16F, - (transb == CUBLAS_OP_N) ? k : n, - (const void*)beta, - (void*)C, - CUDA_R_16F, - m, - CUDA_R_32F, - algo); - - if (status != CUBLAS_STATUS_SUCCESS) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -{ - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_32F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_32F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_32F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); - - if (status != CUBLAS_STATUS_SUCCESS) { - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_strided_batched_gemm(cublasHandle_t handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasOperation_t op_A, - cublasOperation_t op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -{ - cublasStatus_t status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - CUDA_R_16F, - (op_A == CUBLAS_OP_N) ? m : k, - stride_A, - B, - CUDA_R_16F, - (op_B == CUBLAS_OP_N) ? k : n, - stride_B, - beta, - C, - CUDA_R_16F, - m, - stride_C, - batch, - CUDA_R_32F, - algo); - - if (status != CUBLAS_STATUS_SUCCESS) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers_hip.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers_hip.h deleted file mode 100644 index 285e5be..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/cublas_wrappers_hip.h +++ /dev/null @@ -1,208 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - cublasGemmAlgo_t algo) -{ - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR32F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR32F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - C, - hipR32F, - m, - hipR32F, - algo); - - if (status != rocblas_status_success) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_gemm_ex(rocblas_handle handle, - rocblas_operation transa, - rocblas_operation transb, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - cublasGemmAlgo_t algo) -{ - rocblas_status status = rocblas_gemmex(handle, - transa, - transb, - m, - n, - k, - (const void*)alpha, - (const void*)A, - hipR16F, - (transa == rocblas_operation_none) ? m : k, - (const void*)B, - hipR16F, - (transb == rocblas_operation_none) ? k : n, - (const void*)beta, - (void*)C, - hipR16F, - m, - hipR32F, - algo); - - if (status != rocblas_status_success) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const float* A, - const float* B, - float* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -{ - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR32F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR32F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR32F, - m, - stride_C, - batch, - hipR32F, - algo); - - if (status != rocblas_status_success) { - fprintf(stderr, - "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", - batch, - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - return 0; -} - -int cublas_strided_batched_gemm(rocblas_handle handle, - int m, - int n, - int k, - const float* alpha, - const float* beta, - const __half* A, - const __half* B, - __half* C, - rocblas_operation op_A, - rocblas_operation op_B, - int stride_A, - int stride_B, - int stride_C, - int batch, - cublasGemmAlgo_t algo) -{ - rocblas_status status = cublasGemmStridedBatchedEx(handle, - op_A, - op_B, - m, - n, - k, - alpha, - A, - hipR16F, - (op_A == rocblas_operation_none) ? m : k, - stride_A, - B, - hipR16F, - (op_B == rocblas_operation_none) ? k : n, - stride_B, - beta, - C, - hipR16F, - m, - stride_C, - batch, - hipR32F, - algo); - - if (status != rocblas_status_success) { - fprintf(stderr, - "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", - m, - n, - k, - (int)status); - return EXIT_FAILURE; - } - - return 0; -} diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_cuda_layers.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_cuda_layers.h deleted file mode 100644 index 06b4340..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_cuda_layers.h +++ /dev/null @@ -1,124 +0,0 @@ -#pragma once - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif - -#include -#include -#include -#include -#include -#include - -#define MAX_WARP_NUM 32 -#define WARP_SIZE 32 -#define SMs 80 - -#define MAX_REGISTERS 256 -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - cudaStream_t stream); - -// Fused bias add with gelu activation -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - cudaStream_t stream); -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream); - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - cudaStream_t stream); - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream); - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - cudaStream_t stream); -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - cudaStream_t stream); - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int head_size, - int mp_size, - cudaStream_t stream); - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - cudaStream_t stream); - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_hip_layers.h b/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_hip_layers.h deleted file mode 100644 index 36cab34..0000000 --- a/deepspeed/ops/csrc/transformer_bak/inference/includes/custom_hip_layers.h +++ /dev/null @@ -1,125 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#pragma once - -#ifdef __HIP_PLATFORM_HCC__ -#define HALF_PRECISION_AVAILABLE = 1 -#include -#else -#if __CUDA_ARCH__ >= 700 -#define HALF_PRECISION_AVAILABLE = 1 -#endif -#include -#endif - -#include -#include -#include -#include -#include -#include - -#define MAX_WARP_NUM 32 -#define WARP_SIZE 32 -#define SMs 80 - -#define MAX_REGISTERS 256 -template -void launch_attn_softmax_v2(T* vals, - T* mask, - bool triangular, - bool recompute, - bool local_attention, - int window_size, - int batch_size, - int heads, - int num_seq, - int sequence_length, - float scale, - hipStream_t stream); - -// Fused bias add with gelu activation -template -void launch_bias_gelu(T* input, - const T* bias, - int intermediate_size, - int batch_size, - hipStream_t stream); -template -void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, hipStream_t stream); - -template -void launch_bias_residual(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int hidden_dim, - int mp_size, - hipStream_t stream); - -template -void launch_layer_norm(T* out, - T* vals, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream); - -template -void launch_residual_layer_norm(T* norm, - T* res_add, - T* vals, - T* residual, - const T* bias, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - bool preLN, - bool mlp_after_attn, - hipStream_t stream); -template -void launch_dequantize(T* output, - const int8_t* input, - const float* qscale, - unsigned output_size, - unsigned hidden_dim, - unsigned groups, - unsigned merge_count, - hipStream_t stream); - -template -void launch_gptj_residual_add(T* input, - T* output, - T* attn, - T* bias, - T* attn_bias, - int batch, - int head_size, - int mp_size, - hipStream_t stream); - -template -void launch_apply_rotary_pos_emb(T* mixed_query, - T* key_layer, - unsigned head_size, - unsigned seq_len, - unsigned rotary_dim, - unsigned offset, - unsigned num_heads, - unsigned batch, - bool rotate_half, - bool rotate_every_two, - hipStream_t stream); - -template -void launch_moe_res_matmul(T* residual, - T* coef, - T* mlp_out, - int seq_len, - int hidden_dim, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/normalize_kernels.cu b/deepspeed/ops/csrc/transformer_bak/normalize_kernels.cu deleted file mode 100644 index d634c7f..0000000 --- a/deepspeed/ops/csrc/transformer_bak/normalize_kernels.cu +++ /dev/null @@ -1,2121 +0,0 @@ -#include "custom_cuda_layers.h" - -namespace cg = cooperative_groups; - -/* -Fused bias add, residual (elementwise) add, and normalization layer. - -For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for -__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). - -For specific launch constraints, see the launch functions. -*/ - -#define NORM_REG (MAX_REGISTERS / 4) - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - float* means, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / WARP_SIZE; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if (high_index < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - if (training) - if (threadIdx.x == 0) means[row] = mean; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - __half* means, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) { - vars[row] = __float2half(variance); - means[row] = __float2half(mean); - } - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - float* vars, - float* means) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - __half* vars, - __half* means) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); -} - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / 32; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) vars[row] = __float2half(variance); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -/* -To tune this launch the following restrictions must be met: - -For float: -row_stride == hidden_size -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -For half: -row_stride == hidden_size / 2 -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -*/ - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - float* vars) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - cudaStream_t stream, - bool preLayerNorm, - bool training, - __half* vars) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - fused_bias_residual_layer_norm<<>>( - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using either X_hat or - * normalize input (invertible). - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using the input to - * the normalize. - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} -/* - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is invertible! - * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * - sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad - vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - - LayerNormBackward2<<>>( - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - // LayerNormBackward1<__half><<>>( - // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - - LayerNormBackward2<<>>( - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 xu[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; - - __half mean_h = means[row]; - __half2 mean_reg = __halves2half2(mean_h, mean_h); -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - xu[iterations] = (vals_hat_h[high_index] - mean_reg); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2<<>>( - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2<<>>( - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - // float2 result[iterations]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - LayerNormBackward1<<>>( - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = X_vals[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = X_vals[high_index]; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - inp_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = vals_hat_h[high_index]; - iterations++; - } - - __half mean_h = means[row]; - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - __half2 mean_reg = __halves2half2(mean_h, mean_h); - __half2 xu[NORM_REG]; - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<<>>( - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - cudaStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - LayerNormBackward1<__half><<>>( - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - LayerNormBackward2_fused_add<<>>( - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} diff --git a/deepspeed/ops/csrc/transformer_bak/normalize_kernels.hip b/deepspeed/ops/csrc/transformer_bak/normalize_kernels.hip deleted file mode 100644 index 3d1b17c..0000000 --- a/deepspeed/ops/csrc/transformer_bak/normalize_kernels.hip +++ /dev/null @@ -1,2123 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include "custom_hip_layers.h" - -namespace cg = cooperative_groups; - -/* -Fused bias add, residual (elementwise) add, and normalization layer. - -For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for -__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). - -For specific launch constraints, see the launch functions. -*/ - -#define NORM_REG (MAX_REGISTERS / 4) - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - float* means, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / WARP_SIZE; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if (high_index < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - if (training) - if (threadIdx.x == 0) means[row] = mean; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - __half* means, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) { - vars[row] = __float2half(variance); - means[row] = __float2half(mean); - } - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars, - T* means); - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - float* vars, - float* means) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - __half* vars, - __half* means) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); -} - -__global__ void fused_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - bool preLayerNorm, - bool training, - float* vars, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id / 32; - - float vals_arr[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - residual += (row * row_stride); - vals += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_arr[i] = residual[i * iteration_stride + id]; - sum += vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = residual[high_index]; - sum += vals_arr[iterations]; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - - sum = g.shfl(sum, 0); - float mean = sum / row_stride; - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_arr[i] -= mean; - variance += vals_arr[i] * vals_arr[i]; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= row_stride; - variance += epsilon; - if (training) - if (threadIdx.x == 0) vars[row] = variance; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr[i] = vals_arr[i] * rsqrtf(variance); - vals_arr[i] = - vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; - vals[i * iteration_stride + id] = vals_arr[i]; - } - if ((high_index) < row_stride) { - vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); - vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; - vals[high_index] = vals_arr[iterations]; - } -} - -__global__ void fused_bias_residual_layer_norm(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - bool preLayerNorm, - bool training, - __half* vars, - int row_stride) -{ -#ifdef HALF_PRECISION_AVAILABLE - - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int gid = id >> WARP_SIZE_BITS; - - float2 vals_f[NORM_REG]; - __shared__ float shr[MAX_WARP_NUM]; - - __half2* vals_cast = reinterpret_cast<__half2*>(vals); - const __half2* residual_cast = reinterpret_cast(residual); - - residual_cast += (row * row_stride); - vals_cast += (row * row_stride); - - float sum = 0.f; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); - sum += vals_f[i].x; - sum += vals_f[i].y; - } - if ((high_index) < row_stride) { - vals_f[iterations] = __half22float2(residual_cast[high_index]); - sum += vals_f[iterations].x; - sum += vals_f[iterations].y; - iterations++; - } - - for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) shr[gid] = sum; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - sum += g.shfl_down(sum, i); - } - sum = g.shfl(sum, 0); - float mean = sum / (row_stride * 2); - - float variance = 0.f; - for (int i = 0; i < iterations; i++) { - vals_f[i].x -= mean; - vals_f[i].y -= mean; - variance += vals_f[i].x * vals_f[i].x; - variance += vals_f[i].y * vals_f[i].y; - } - - for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } - - if (g.thread_rank() == 0) shr[gid] = variance; - - b.sync(); - - if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { - variance += g.shfl_down(variance, i); - } - variance = g.shfl(variance, 0); - variance /= (row_stride * 2); - variance += epsilon; - - __half2 variance_h = __float2half2_rn(variance); - const __half2* gamma_cast = reinterpret_cast(gamma); - const __half2* beta_cast = reinterpret_cast(beta); - - if (training && threadIdx.x == 0) vars[row] = __float2half(variance); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - __half2 vals_arr = __float22half2_rn(vals_f[i]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = - vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; - vals_cast[i * iteration_stride + id] = vals_arr; - } - if ((high_index) < row_stride) { - __half2 vals_arr = __float22half2_rn(vals_f[iterations]); - vals_arr = vals_arr * h2rsqrt(variance_h); - vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; - vals_cast[high_index] = vals_arr; - } -#endif -} - -template -void launch_bias_residual_layer_norm(T* vals, - const T* residual, - const T* gamma, - const T* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - T* vars); - -/* -To tune this launch the following restrictions must be met: - -For float: -row_stride == hidden_size -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -For half: -row_stride == hidden_size / 2 -threads * iterations == row_stride -threads is in [32, 64, 128, 256, 512, 1024] - -*/ - -template <> -void launch_bias_residual_layer_norm(float* vals, - const float* residual, - const float* gamma, - const float* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - float* vars) -{ - int threads = THREADS; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); -} - -template <> -void launch_bias_residual_layer_norm<__half>(__half* vals, - const __half* residual, - const __half* gamma, - const __half* beta, - float epsilon, - int batch_size, - int hidden_dim, - hipStream_t stream, - bool preLayerNorm, - bool training, - __half* vars) -{ - int threads = 128; - - dim3 grid_dim(batch_size); - - // There are some limitations to call below functions, now just enumerate the situations. - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim(threads); - hipLaunchKernelGGL(( fused_bias_residual_layer_norm), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using either X_hat or - * normalize input (invertible). - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -/* Normalize Gamma & Betta gradients - * Compute gradients using the input to - * the normalize. - * Combine transpose with gradients computation. - */ - -template -__global__ void LayerNormBackward1(const T* __restrict__ out_grad, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} -/* - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is invertible! - * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * - sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad - vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - //hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2(const float* out_grad, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad[high_index]; - vals_arr[iterations] *= gamma_reg; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); - if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); -} - -__global__ void LayerNormBackward2(const __half* out_grad, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id >> WARP_SIZE_BITS; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 xu[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h = reinterpret_cast(out_grad); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - inp_grad_h += (row * row_stride); - out_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; - - __half mean_h = means[row]; - __half2 mean_reg = __halves2half2(mean_h, mean_h); -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - xu[iterations] = (vals_hat_h[high_index] - mean_reg); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp; - } -} - -template <> -void launch_layerNorm_backward(const float* out_grad, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward<__half>(const __half* out_grad, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ vals_hat, - const T* __restrict__ gamma, - const T* __restrict__ betta, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width, - bool invertible) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - float betta_reg = (invertible ? (float)betta[idx] : 0.0f); - float gamma_reg = (float)gamma[idx]; - - // Loop across matrix height - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg - : (float)vals_hat[offset]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -template -__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, - const T* __restrict__ out_grad2, - const T* __restrict__ X_data, - const T* __restrict__ vars, - const T* __restrict__ means, - T* __restrict__ gamma_grad, - T* __restrict__ betta_grad, - int rows, - int width) -{ - __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; - __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int idx = blockDim.x * blockIdx.x + threadIdx.x; - int offset = threadIdx.y * width + idx; - int y_stride = width * TILE_DIM; - - int pos = blockIdx.x * TILE_DIM + threadIdx.y; - // Loop across matrix height - - float betta_tmp = 0; - float gamma_tmp = 0; - for (int r = threadIdx.y; r < rows; r += TILE_DIM) { - float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; - float val = (float)X_data[offset]; - val = (val - (float)means[r]) * rsqrtf((float)vars[r]); - betta_tmp += grad; - gamma_tmp += (val * grad); - - offset += y_stride; - } - - betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; - gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; - - __syncthreads(); - - // Sum the shared buffer. - float s1 = betta_buffer[threadIdx.y][threadIdx.x]; - float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < TILE_DIM; i <<= 1) { - s1 += g.shfl_down(s1, i); - s2 += g.shfl_down(s2, i); - } - - if (threadIdx.x == 0) { - betta_grad[pos] = s1; - gamma_grad[pos] = s2; - } -} - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* gamma, - const float* betta, - const float* vars, - float* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - vals_hat += (row * row_stride); - inp_grad += (row * row_stride); - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = - (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / - gamma_reg - : vals_hat[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = - (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg - : vals_hat[high_index]); - iterations++; - } - - float var_reg = vars[row]; - - float sum = 0; - for (int i = 0; i < iterations; i++) { - sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* gamma, - const __half* betta, - const __half* vars, - __half* inp_grad, - bool invertible, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - // float2 result[iterations]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(vals_hat); - - inp_grad_h += (row * row_stride); - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = - (invertible - ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / - gamma_reg - : vals_hat_h[i * iteration_stride + id]); - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = - (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg - : vals_hat_h[high_index]); - iterations++; - } - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 temp_f = __half22float2(temp); - vals_arr_f[i].x += temp_f.x; - vals_arr_f[i].y += temp_f.y; - } - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* vals_hat, - const float* vars, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const float* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* vals_hat, - const __half* vars, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2], - bool invertible, - const __half* betta) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); -} - -/* Backward Normalize (Input-Gradient) - * Using the means and variances from the input - * This type of backward is not invertible! - * We do the backward using the input (X) - */ - -__global__ void LayerNormBackward2_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_vals, - const float* gamma, - const float* vars, - const float* means, - float* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - __shared__ float partialSum[MAX_WARP_NUM]; - - float vals_arr[NORM_REG]; - float vals_hat_arr[NORM_REG]; - - out_grad1 += (row * row_stride); - out_grad2 += (row * row_stride); - X_vals += (row * row_stride); - inp_grad += (row * row_stride); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - float gamma_reg = gamma[i * iteration_stride + id]; - vals_arr[i] = out_grad1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; - vals_hat_arr[i] = X_vals[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - float gamma_reg = gamma[high_index]; - vals_arr[iterations] = out_grad1[high_index]; - vals_arr[iterations] *= gamma_reg; - vals_hat_arr[iterations] = X_vals[high_index]; - iterations++; - } - - float var_reg = vars[row]; - float mean_reg = means[row]; - - float sum = 0; - float xu[NORM_REG]; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - sum += vals_arr[i] * xu[i]; - vals_arr[i] *= rsqrtf(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= row_stride; - - for (int i = 0; i < iterations; i++) { - vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); - } - - sum = 0; - for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - sum = g.shfl(sum, 0); - sum /= row_stride; - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) - inp_grad[i * iteration_stride + id] = - (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; - if ((high_index) < row_stride) - inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; -} - -__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, - const __half* out_grad2, - const __half* X_vals, - const __half* gamma, - const __half* vars, - const __half* means, - __half* inp_grad, - int row_stride) -{ - int iteration_stride = blockDim.x; - int iterations = row_stride / iteration_stride; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - int wid = id / WARP_SIZE; - int warp_num = iteration_stride >> WARP_SIZE_BITS; - - __shared__ float partialSum[MAX_WARP_NUM]; - - __half2 vals_arr[NORM_REG]; - float2 vals_arr_f[NORM_REG]; - __half2 vals_hat_arr[NORM_REG]; - - __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); - const __half2* out_grad_h1 = reinterpret_cast(out_grad1); - const __half2* out_grad_h2 = reinterpret_cast(out_grad2); - const __half2* vals_hat_h = reinterpret_cast(X_vals); - - out_grad_h1 += (row * row_stride); - out_grad_h2 += (row * row_stride); - inp_grad_h += (row * row_stride); - vals_hat_h += (row * row_stride); - - const __half2* gamma_h = reinterpret_cast(gamma); - int high_index = iterations * iteration_stride + id; -#pragma unroll - for (int i = 0; i < iterations; i++) { - __half2 gamma_reg = gamma_h[i * iteration_stride + id]; - vals_arr[i] = out_grad_h1[i * iteration_stride + id]; - vals_arr[i] *= gamma_reg; // out_grad * gamma - vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - __half2 gamma_reg = gamma_h[high_index]; - vals_arr[iterations] = out_grad_h1[high_index]; - vals_arr[iterations] *= gamma_reg; // out_grad * gamma - vals_hat_arr[iterations] = vals_hat_h[high_index]; - iterations++; - } - - __half mean_h = means[row]; - __half var_h = vars[row]; - __half2 var_reg = __halves2half2(var_h, var_h); - __half2 mean_reg = __halves2half2(mean_h, mean_h); - __half2 xu[NORM_REG]; - - float sum = 0.f; - for (int i = 0; i < iterations; i++) { - xu[i] = (vals_hat_arr[i] - mean_reg); - __half2 result_h = (xu[i] * vals_arr[i]); - float2 result_f = __half22float2(result_h); - sum += result_f.x; - sum += result_f.y; - vals_arr[i] *= h2rsqrt(var_reg); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - __half2 sum_h = __float2half2_rn(sum); - - for (int i = 0; i < iterations; i++) { - __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); - vals_arr_f[i] = __half22float2(vals_arr[i]); - float2 xu_grad_f = __half22float2(xu_grad); - vals_arr_f[i].x += xu_grad_f.x; - vals_arr_f[i].y += xu_grad_f.y; - } - - sum = 0.f; - for (int i = 0; i < iterations; i++) { - sum += (vals_arr_f[i].x); - sum += (vals_arr_f[i].y); - } - - for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } - - if (g.thread_rank() == 0) partialSum[wid] = sum; - - __syncthreads(); - - if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; - -#ifndef __STOCHASTIC_MODE__ - __syncthreads(); -#endif - - for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); - - sum = g.shfl(sum, 0); - sum /= (2 * row_stride); - - iterations = row_stride / iteration_stride; - for (int i = 0; i < iterations; i++) { - vals_arr_f[i].x -= sum; - vals_arr_f[i].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[i]); - inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; - } - if ((high_index) < row_stride) { - vals_arr_f[iterations].x -= sum; - vals_arr_f[iterations].y -= sum; - __half2 temp = __float22half2_rn(vals_arr_f[iterations]); - inp_grad_h[high_index] = temp + out_grad_h2[high_index]; - } -} - -template <> -void launch_layerNorm_backward_fused_add(const float* out_grad1, - const float* out_grad2, - const float* X_data, - const float* vars, - const float* means, - const float* gamma, - float* gamma_grad, - float* betta_grad, - float* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 1; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 2; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); -} - -template <> -void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, - const __half* out_grad2, - const __half* X_data, - const __half* vars, - const __half* means, - const __half* gamma, - __half* gamma_grad, - __half* betta_grad, - __half* inp_grad, - int batch, - int hidden_dim, - hipStream_t stream[2]) -{ - int threads = THREADS; - - dim3 grid_dim(hidden_dim / TILE_DIM); - dim3 block_dim(TILE_DIM, TILE_DIM); - - hipLaunchKernelGGL(( LayerNormBackward1<__half>), dim3(grid_dim), dim3(block_dim), 0, stream[0], - out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); - - dim3 grid_dim2(batch); - - if (hidden_dim > 8192 && hidden_dim <= 16384) - threads <<= 1; - else if (hidden_dim > 16384 && hidden_dim <= 32768) - threads <<= 2; - else if (hidden_dim > 32768 && hidden_dim <= 65536) - threads <<= 3; - else if (hidden_dim > 65536) - throw std::runtime_error("Unsupport hidden_dim."); - - dim3 block_dim2(threads / 2); - hipLaunchKernelGGL(( LayerNormBackward2_fused_add), dim3(grid_dim2), dim3(block_dim2), 0, stream[1], - out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); -} diff --git a/deepspeed/ops/csrc/transformer_bak/softmax_kernels.cu b/deepspeed/ops/csrc/transformer_bak/softmax_kernels.cu deleted file mode 100644 index 34487cb..0000000 --- a/deepspeed/ops/csrc/transformer_bak/softmax_kernels.cu +++ /dev/null @@ -1,595 +0,0 @@ -#include -#include "custom_cuda_layers.h" -#include "general_kernels.h" - -namespace cg = cooperative_groups; - -dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) -{ - int seq_length4 = sequence_length / 4; - int block_compute_size = - (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); - // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: - // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications - // The batch size is typically relatively small, while the sequence length could potentially be - // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. - unsigned x = heads * sequence_length / block_compute_size; - unsigned y = batch_size; - return {x, y}; -} - -// Fused attention + softmax -template -__global__ void attn_softmax(float* vals, - const float* attn_mask, - int heads, - int seq_length, - int iterations) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = std::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float4* val_cast = reinterpret_cast(vals); - const float4* attn_mask_cast = reinterpret_cast(attn_mask); - - float4 data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float4 mask = attn_mask_cast[mask_offset + data_id]; - data[i] = val_cast[data_offset + data_id]; - - data[i].x += mask.x; - data[i].y += mask.y; - data[i].z += mask.z; - data[i].w += mask.w; - - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - data[i].x /= sum; - data[i].y /= sum; - data[i].z /= sum; - data[i].w /= sum; - - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; - } -} - -template -__global__ void attn_softmax(__half* vals, - const __half* attn_mask, - int heads, - int seq_length, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = std::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float2* val_cast = reinterpret_cast(vals); - const float2* attn_mask_cast = reinterpret_cast(attn_mask); - - val_cast += data_offset; - attn_mask_cast += mask_offset; - - float2 low_data[MAX_THREAD_ITERATIONS]; - float2 high_data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 data = val_cast[data_id]; - float2 mask = attn_mask_cast[data_id]; - - __half2* data_arr = reinterpret_cast<__half2*>(&data); - __half2* mask_arr = reinterpret_cast<__half2*>(&mask); - - low_data[i] = __half22float2(data_arr[0]); - high_data[i] = __half22float2(data_arr[1]); - float2 low_mask = __half22float2(mask_arr[0]); - float2 high_mask = __half22float2(mask_arr[1]); - - low_data[i].x += low_mask.x; - low_data[i].y += low_mask.y; - high_data[i].x += high_mask.x; - high_data[i].y += high_mask.y; - - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - low_data[i].x /= sum; - low_data[i].y /= sum; - high_data[i].x /= sum; - high_data[i].y /= sum; - - result_h[0] = __float22half2_rn(low_data[i]); - result_h[1] = __float22half2_rn(high_data[i]); - - val_cast[data_id] = result_f; - } - } - -#endif -} - -template -void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t); - -template <> -void launch_attn_softmax(float* vals, - const float* attn_mask, - int batch_size, - int heads, - int sequence_length, - cudaStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - attn_softmax<2, (threads / 2), 2> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - attn_softmax<4, (threads / 4), 4> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - attn_softmax<8, (threads / 8), 8> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - attn_softmax<16, (threads / 16), 16> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - attn_softmax<32, (threads / 32), 32> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - attn_softmax<32, (threads / 64), 64> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - attn_softmax<32, (threads / 128), 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - attn_softmax<32, 1, 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template <> -void launch_attn_softmax<__half>(__half* vals, - const __half* attn_mask, - int batch_size, - int heads, - int sequence_length, - cudaStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - attn_softmax<2, (threads / 2), 2> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - attn_softmax<4, (threads / 4), 4> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - attn_softmax<8, (threads / 8), 8> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - attn_softmax<16, (threads / 16), 16> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - attn_softmax<32, (threads / 32), 32> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - attn_softmax<32, (threads / 64), 64> - <<>>(vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - attn_softmax<32, (threads / 128), 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - attn_softmax<32, 1, 128><<>>( - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template -__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) - ? (seq_length + iteration_stride - 1) / iteration_stride - : MAX_THREAD_ITERATIONS); - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - - int wid = id >> WARP_SIZE_BITS; - int lane = id & 0x1f; - - T val_reg[MAX_THREAD_ITERATIONS]; - T soft_reg[MAX_THREAD_ITERATIONS]; - float grad_reg = 0.0f; - -#pragma unroll - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - val_reg[i] = out_grad[row * block_width + data_id]; - soft_reg[i] = soft_inp[row * block_width + data_id]; - - grad_reg += ((float)val_reg[i] * - (float)soft_reg[i]); // if done in half, the multiplication, we may lose - // 2% of accuracy in computation!! - } - } - for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = grad_reg; - b.sync(); - - if (lane < warp_num) grad_reg = partialSum[lane]; - - int iters = warp_num; - if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); - - for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - grad_reg = g.shfl(grad_reg, id / tbSize); - } - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); - out_grad[row * block_width + data_id] = (T)temp; - } - } -} - -template -__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, - const T* output, - int softmax_length) -{ - int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; - int offset = batch_idx * softmax_length + threadIdx.x; - - grad += offset; - output += offset; - - T grad_reg[ITERATIONS]; - T output_reg[ITERATIONS]; - float sum = 0.0; - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) { - grad_reg[i] = grad[i * WARP_SIZE]; - output_reg[i] = output[i * WARP_SIZE]; - sum += (float)grad_reg[i] * (float)output_reg[i]; - } - } - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) - grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); - } -} - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream) -{ - const int warps_per_block = 4; - dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); - dim3 block_dim(WARP_SIZE, warps_per_block); - - if (seq_length <= 32) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 64) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 128) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 256) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 384) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 512) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 768) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 1024) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else if (seq_length <= 2048) - softmax_backward_kernel_v2 - <<>>(out_grad, soft_inp, seq_length); - else - throw std::runtime_error( - std::string("Special sequence length found in softmax backward, seq_length: ") + - std::to_string(seq_length)); -} - -template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, - const __half* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); -template void launch_attn_softmax_backward_v2(float* out_grad, - const float* soft_inp, - int batch_size, - int heads, - int seq_length, - cudaStream_t stream); diff --git a/deepspeed/ops/csrc/transformer_bak/softmax_kernels.hip b/deepspeed/ops/csrc/transformer_bak/softmax_kernels.hip deleted file mode 100644 index afe65b0..0000000 --- a/deepspeed/ops/csrc/transformer_bak/softmax_kernels.hip +++ /dev/null @@ -1,597 +0,0 @@ -// !!! This is a file automatically generated by hipify!!! -#include "hip/hip_runtime.h" -#include -#include "custom_hip_layers.h" -#include "general_kernels_hip.h" - -namespace cg = cooperative_groups; - -dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) -{ - int seq_length4 = sequence_length / 4; - int block_compute_size = - (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); - // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: - // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications - // The batch size is typically relatively small, while the sequence length could potentially be - // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. - unsigned x = heads * sequence_length / block_compute_size; - unsigned y = batch_size; - return {x, y}; -} - -// Fused attention + softmax -template -__global__ void attn_softmax(float* vals, - const float* attn_mask, - int heads, - int seq_length, - int iterations) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = ::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float4* val_cast = reinterpret_cast(vals); - const float4* attn_mask_cast = reinterpret_cast(attn_mask); - - float4 data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float4 mask = attn_mask_cast[mask_offset + data_id]; - data[i] = val_cast[data_offset + data_id]; - - data[i].x += mask.x; - data[i].y += mask.y; - data[i].z += mask.z; - data[i].w += mask.w; - - max_val = (data[i].x > max_val ? data[i].x : max_val); - max_val = (data[i].y > max_val ? data[i].y : max_val); - max_val = (data[i].z > max_val ? data[i].z : max_val); - max_val = (data[i].w > max_val ? data[i].w : max_val); - } else { - data[i].x = minus_infinity; - data[i].y = minus_infinity; - data[i].z = minus_infinity; - data[i].w = minus_infinity; - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - data[i].x = __expf(data[i].x - max_val); - data[i].y = __expf(data[i].y - max_val); - data[i].z = __expf(data[i].z - max_val); - data[i].w = __expf(data[i].w - max_val); - - sum += (data[i].x + data[i].y + data[i].z + data[i].w); - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - data[i].x /= sum; - data[i].y /= sum; - data[i].z /= sum; - data[i].w /= sum; - - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; - } -} - -template -__global__ void attn_softmax(__half* vals, - const __half* attn_mask, - int heads, - int seq_length, - int iterations) -{ -#ifdef HALF_PRECISION_AVAILABLE - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int batch = blockIdx.y; - int row = blockIdx.x; - int max_threads_in_sequence = ::max(seq_length, tbSeq); - int seq_lane = threadIdx.x % max_threads_in_sequence; - - int data_offset = batch * (gridDim.x * block_width) + row * block_width + - (threadIdx.x / max_threads_in_sequence) * seq_length; - int mask_offset = batch * seq_length; - - int wid = threadIdx.x >> WARP_SIZE_BITS; - int lane = threadIdx.x & 0x1f; - - float2* val_cast = reinterpret_cast(vals); - const float2* attn_mask_cast = reinterpret_cast(attn_mask); - - val_cast += data_offset; - attn_mask_cast += mask_offset; - - float2 low_data[MAX_THREAD_ITERATIONS]; - float2 high_data[MAX_THREAD_ITERATIONS]; - - float max_val = minus_infinity; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 data = val_cast[data_id]; - float2 mask = attn_mask_cast[data_id]; - - __half2* data_arr = reinterpret_cast<__half2*>(&data); - __half2* mask_arr = reinterpret_cast<__half2*>(&mask); - - low_data[i] = __half22float2(data_arr[0]); - high_data[i] = __half22float2(data_arr[1]); - float2 low_mask = __half22float2(mask_arr[0]); - float2 high_mask = __half22float2(mask_arr[1]); - - low_data[i].x += low_mask.x; - low_data[i].y += low_mask.y; - high_data[i].x += high_mask.x; - high_data[i].y += high_mask.y; - - max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); - max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); - max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); - max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); - } - } - - for (int i = 1; i < tbSize; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = max_val; - b.sync(); - - if (lane < warp_num) max_val = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { - auto temp = g.shfl_xor(max_val, i); - max_val = (temp > max_val ? temp : max_val); - } - - max_val = g.shfl(max_val, threadIdx.x / tbSize); - } - - float sum = 0; - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - low_data[i].x = __expf(low_data[i].x - max_val); - low_data[i].y = __expf(low_data[i].y - max_val); - high_data[i].x = __expf(high_data[i].x - max_val); - high_data[i].y = __expf(high_data[i].y - max_val); - - sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); - } - } - - for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = sum; - b.sync(); - - if (lane < warp_num) sum = partialSum[lane]; - -#ifndef __STOCHASTIC_MODE__ - b.sync(); -#endif - - int iters = warp_num; - if (seq_length < iteration_stride) - iters = warp_num / (iteration_stride / max_threads_in_sequence); - - for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } - - sum = g.shfl(sum, threadIdx.x / tbSize); - } - - sum += 1e-6; - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + seq_lane; - if (data_id < seq_length) { - float2 result_f; - __half2* result_h = reinterpret_cast<__half2*>(&result_f); - - low_data[i].x /= sum; - low_data[i].y /= sum; - high_data[i].x /= sum; - high_data[i].y /= sum; - - result_h[0] = __float22half2_rn(low_data[i]); - result_h[1] = __float22half2_rn(high_data[i]); - - val_cast[data_id] = result_f; - } - } - -#endif -} - -template -void launch_attn_softmax(T*, const T*, int, int, int, hipStream_t); - -template <> -void launch_attn_softmax(float* vals, - const float* attn_mask, - int batch_size, - int heads, - int sequence_length, - hipStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template <> -void launch_attn_softmax<__half>(__half* vals, - const __half* attn_mask, - int batch_size, - int heads, - int sequence_length, - hipStream_t stream) -{ - const int threads = 128; - int seq_length4 = sequence_length / 4; - - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - - int iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - - if (sequence_length <= 8) - hipLaunchKernelGGL(( attn_softmax<2, (threads / 2), 2>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 16) - hipLaunchKernelGGL(( attn_softmax<4, (threads / 4), 4>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 32) - hipLaunchKernelGGL(( attn_softmax<8, (threads / 8), 8>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 64) - hipLaunchKernelGGL(( attn_softmax<16, (threads / 16), 16>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 128) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 32), 32>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length <= 256) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 64), 64>) - , dim3(grid_dim), dim3(block_dim), 0, stream, vals, attn_mask, heads, seq_length4, iterations); - else { - const int threads = 256; - dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); - - int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; - - dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / - subblock_max_workload * threads) - : threads); - iterations = - (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads - : MAX_THREAD_ITERATIONS); - if (sequence_length <= 512) - hipLaunchKernelGGL(( attn_softmax<32, (threads / 128), 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) - hipLaunchKernelGGL(( attn_softmax<32, 1, 128>), dim3(grid_dim), dim3(block_dim), 0, stream, - vals, attn_mask, heads, seq_length4, iterations); - else - throw std::runtime_error( - "Unsupport Seq_Length! Check the restriction of the max_threads and " - "max_thread_iterations!"); - } -} - -template -__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) -{ - __shared__ float partialSum[MAX_WARP_NUM]; - - int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) - - int iteration_stride = blockDim.x; - int block_width = blockStride * seq_length; - - int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) - ? (seq_length + iteration_stride - 1) / iteration_stride - : MAX_THREAD_ITERATIONS); - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - int row = blockIdx.x; - int id = threadIdx.x; - - int wid = id >> WARP_SIZE_BITS; - int lane = id & 0x1f; - - T val_reg[MAX_THREAD_ITERATIONS]; - T soft_reg[MAX_THREAD_ITERATIONS]; - float grad_reg = 0.0f; - -#pragma unroll - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - val_reg[i] = out_grad[row * block_width + data_id]; - soft_reg[i] = soft_inp[row * block_width + data_id]; - - grad_reg += ((float)val_reg[i] * - (float)soft_reg[i]); // if done in half, the multiplication, we may lose - // 2% of accuracy in computation!! - } - } - for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - if (seq_length > tbSize) { - if (lane == 0) partialSum[wid] = grad_reg; - b.sync(); - - if (lane < warp_num) grad_reg = partialSum[lane]; - - int iters = warp_num; - if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); - - for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); - - grad_reg = g.shfl(grad_reg, id / tbSize); - } - - for (int i = 0; i < iterations; i++) { - int data_id = i * iteration_stride + id; - if (data_id < block_width) { - float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); - out_grad[row * block_width + data_id] = (T)temp; - } - } -} - -template -__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, - const T* output, - int softmax_length) -{ - int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; - int offset = batch_idx * softmax_length + threadIdx.x; - - grad += offset; - output += offset; - - T grad_reg[ITERATIONS]; - T output_reg[ITERATIONS]; - float sum = 0.0; - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) { - grad_reg[i] = grad[i * WARP_SIZE]; - output_reg[i] = output[i * WARP_SIZE]; - sum += (float)grad_reg[i] * (float)output_reg[i]; - } - } - - cg::thread_block b = cg::this_thread_block(); - cg::thread_block_tile g = cg::tiled_partition(b); - - for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); - -#pragma unroll - for (int i = 0; i < ITERATIONS; ++i) { - int curr_idx = threadIdx.x + i * WARP_SIZE; - if (curr_idx < softmax_length) - grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); - } -} - -template -void launch_attn_softmax_backward_v2(T* out_grad, - const T* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream) -{ - const int warps_per_block = 4; - dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); - dim3 block_dim(WARP_SIZE, warps_per_block); - - if (seq_length <= 32) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 64) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 128) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 256) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 384) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 512) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 768) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 1024) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else if (seq_length <= 2048) - hipLaunchKernelGGL(( softmax_backward_kernel_v2) - , dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, seq_length); - else - throw std::runtime_error( - std::string("Special sequence length found in softmax backward, seq_length: ") + - std::to_string(seq_length)); -} - -template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, - const __half* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); -template void launch_attn_softmax_backward_v2(float* out_grad, - const float* soft_inp, - int batch_size, - int heads, - int seq_length, - hipStream_t stream); diff --git a/deepspeed/ops/csrc/utils/flatten_unflatten.cpp b/deepspeed/ops/csrc/utils/flatten_unflatten.cpp deleted file mode 100644 index 7d16c5c..0000000 --- a/deepspeed/ops/csrc/utils/flatten_unflatten.cpp +++ /dev/null @@ -1,25 +0,0 @@ -/* - Copyright 2020 The Microsoft DeepSpeed Team - Copyright NVIDIA/apex - This file is adapted from fused adam in NVIDIA/apex, commit a109f85 -*/ - -#include -#include -// https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h - -at::Tensor flatten(std::vector tensors) -{ - return torch::utils::flatten_dense_tensors(tensors); -} - -std::vector unflatten(at::Tensor flat, std::vector tensors) -{ - return torch::utils::unflatten_dense_tensors(flat, tensors); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("flatten", &flatten, "Flatten dense tensors"); - m.def("unflatten", &unflatten, "Unflatten dense tensors"); -} diff --git a/deepspeed/ops/lamb/__init__.py b/deepspeed/ops/lamb/__init__.py index 9d3448d..942d766 100644 --- a/deepspeed/ops/lamb/__init__.py +++ b/deepspeed/ops/lamb/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .fused_lamb import FusedLamb diff --git a/deepspeed/ops/lamb/fused_lamb.py b/deepspeed/ops/lamb/fused_lamb.py index e9210cd..33a1461 100644 --- a/deepspeed/ops/lamb/fused_lamb.py +++ b/deepspeed/ops/lamb/fused_lamb.py @@ -6,7 +6,7 @@ This file is adapted from NVIDIA/apex/optimizer/fused_adam and implements the LA ''' import types import torch -from ..op_builder import FusedLambBuilder +from deepspeed.ops.op_builder import FusedLambBuilder class FusedLamb(torch.optim.Optimizer): diff --git a/deepspeed/ops/op_builder b/deepspeed/ops/op_builder new file mode 120000 index 0000000..db4f9c3 --- /dev/null +++ b/deepspeed/ops/op_builder @@ -0,0 +1 @@ +../../op_builder \ No newline at end of file diff --git a/deepspeed/ops/op_builder/__init__.py b/deepspeed/ops/op_builder/__init__.py deleted file mode 100644 index dcac710..0000000 --- a/deepspeed/ops/op_builder/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -from .cpu_adam import CPUAdamBuilder -from .cpu_adagrad import CPUAdagradBuilder -from .fused_adam import FusedAdamBuilder -from .fused_lamb import FusedLambBuilder -from .sparse_attn import SparseAttnBuilder -from .transformer import TransformerBuilder -from .stochastic_transformer import StochasticTransformerBuilder -from .utils import UtilsBuilder -from .async_io import AsyncIOBuilder -from .transformer_inference import InferenceBuilder -from .quantizer import QuantizerBuilder -from .builder import get_default_compute_capabilities, OpBuilder - -# TODO: infer this list instead of hard coded -# List of all available ops -__op_builders__ = [ - CPUAdamBuilder(), - CPUAdagradBuilder(), - FusedAdamBuilder(), - FusedLambBuilder(), - SparseAttnBuilder(), - TransformerBuilder(), - StochasticTransformerBuilder(), - AsyncIOBuilder(), - UtilsBuilder(), - QuantizerBuilder(), - InferenceBuilder() -] -ALL_OPS = {op.name: op for op in __op_builders__} diff --git a/deepspeed/ops/op_builder/async_io.py b/deepspeed/ops/op_builder/async_io.py deleted file mode 100644 index aec7911..0000000 --- a/deepspeed/ops/op_builder/async_io.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -import distutils.spawn -import subprocess - -from .builder import OpBuilder - - -class AsyncIOBuilder(OpBuilder): - BUILD_VAR = "DS_BUILD_AIO" - NAME = "async_io" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.aio.{self.NAME}_op' - - def sources(self): - return [ - 'csrc/aio/py_lib/deepspeed_py_copy.cpp', - 'csrc/aio/py_lib/py_ds_aio.cpp', - 'csrc/aio/py_lib/deepspeed_py_aio.cpp', - 'csrc/aio/py_lib/deepspeed_py_aio_handle.cpp', - 'csrc/aio/py_lib/deepspeed_aio_thread.cpp', - 'csrc/aio/common/deepspeed_aio_utils.cpp', - 'csrc/aio/common/deepspeed_aio_common.cpp', - 'csrc/aio/common/deepspeed_aio_types.cpp' - ] - - def include_paths(self): - return ['csrc/aio/py_lib', 'csrc/aio/common'] - - def cxx_args(self): - # -O0 for improved debugging, since performance is bound by I/O - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - return [ - '-g', - '-Wall', - '-O0', - '-std=c++14', - '-shared', - '-fPIC', - '-Wno-reorder', - CPU_ARCH, - '-fopenmp', - SIMD_WIDTH, - '-laio', - ] - - def extra_ldflags(self): - return ['-laio'] - - def check_for_libaio_pkg(self): - libs = dict( - dpkg=["-l", - "libaio-dev", - "apt"], - pacman=["-Q", - "libaio", - "pacman"], - rpm=["-q", - "libaio-devel", - "yum"], - ) - - found = False - for pkgmgr, data in libs.items(): - flag, lib, tool = data - path = distutils.spawn.find_executable(pkgmgr) - if path is not None: - cmd = f"{pkgmgr} {flag} {lib}" - result = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - if result.wait() == 0: - found = True - else: - self.warning( - f"{self.NAME}: please install the {lib} package with {tool}") - break - return found - - def is_compatible(self, verbose=True): - # Check for the existence of libaio by using distutils - # to compile and link a test program that calls io_submit, - # which is a function provided by libaio that is used in the async_io op. - # If needed, one can define -I and -L entries in CFLAGS and LDFLAGS - # respectively to specify the directories for libaio.h and libaio.so. - aio_compatible = self.has_function('io_submit', ('aio', )) - if verbose and not aio_compatible: - self.warning( - f"{self.NAME} requires the dev libaio .so object and headers but these were not found." - ) - - # Check for the libaio package via known package managers - # to print suggestions on which package to install. - self.check_for_libaio_pkg() - - self.warning( - "If libaio is already installed (perhaps from source), try setting the CFLAGS and LDFLAGS environment variables to where it can be found." - ) - return super().is_compatible(verbose) and aio_compatible diff --git a/deepspeed/ops/op_builder/builder.py b/deepspeed/ops/op_builder/builder.py deleted file mode 100644 index 8c5396f..0000000 --- a/deepspeed/ops/op_builder/builder.py +++ /dev/null @@ -1,699 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -import os -import sys -import time -import json -import importlib -from pathlib import Path -import subprocess -import shlex -import shutil -import tempfile -import distutils.ccompiler -import distutils.log -import distutils.sysconfig -from distutils.errors import CompileError, LinkError -from abc import ABC, abstractmethod - -YELLOW = '\033[93m' -END = '\033[0m' -WARNING = f"{YELLOW} [WARNING] {END}" - -DEFAULT_TORCH_EXTENSION_PATH = "/tmp/torch_extensions" -DEFAULT_COMPUTE_CAPABILITIES = "6.0;6.1;7.0" - -try: - import torch -except ImportError: - print( - f"{WARNING} unable to import torch, please install it if you want to pre-compile any deepspeed ops." - ) -else: - TORCH_MAJOR = int(torch.__version__.split('.')[0]) - TORCH_MINOR = int(torch.__version__.split('.')[1]) - - -def installed_cuda_version(): - import torch.utils.cpp_extension - cuda_home = torch.utils.cpp_extension.CUDA_HOME - assert cuda_home is not None, "CUDA_HOME does not exist, unable to compile CUDA op(s)" - # Ensure there is not a cuda version mismatch between torch and nvcc compiler - output = subprocess.check_output([cuda_home + "/bin/nvcc", - "-V"], - universal_newlines=True) - output_split = output.split() - release_idx = output_split.index("release") - release = output_split[release_idx + 1].replace(',', '').split(".") - # Ignore patch versions, only look at major + minor - cuda_major, cuda_minor = release[:2] - installed_cuda_version = ".".join(release[:2]) - return int(cuda_major), int(cuda_minor) - - -def get_default_compute_capabilities(): - compute_caps = DEFAULT_COMPUTE_CAPABILITIES - import torch.utils.cpp_extension - if torch.utils.cpp_extension.CUDA_HOME is not None and installed_cuda_version( - )[0] >= 11: - if installed_cuda_version()[0] == 11 and installed_cuda_version()[1] == 0: - # Special treatment of CUDA 11.0 because compute_86 is not supported. - compute_caps += ";8.0" - else: - compute_caps += ";8.0;8.6" - return compute_caps - - -# list compatible minor CUDA versions - so that for example pytorch built with cuda-11.0 can be used -# to build deepspeed and system-wide installed cuda 11.2 -cuda_minor_mismatch_ok = { - 10: [ - "10.0", - "10.1", - "10.2", - ], - 11: [ - "11.0", - "11.1", - "11.2", - "11.3", - "11.4", - "11.5", - "11.6", - ], -} - - -def assert_no_cuda_mismatch(): - cuda_major, cuda_minor = installed_cuda_version() - sys_cuda_version = f'{cuda_major}.{cuda_minor}' - torch_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) - # This is a show-stopping error, should probably not proceed past this - if sys_cuda_version != torch_cuda_version: - if (cuda_major in cuda_minor_mismatch_ok - and sys_cuda_version in cuda_minor_mismatch_ok[cuda_major] - and torch_cuda_version in cuda_minor_mismatch_ok[cuda_major]): - print(f"Installed CUDA version {sys_cuda_version} does not match the " - f"version torch was compiled with {torch.version.cuda} " - "but since the APIs are compatible, accepting this combination") - return - raise Exception( - f"Installed CUDA version {sys_cuda_version} does not match the " - f"version torch was compiled with {torch.version.cuda}, unable to compile " - "cuda/cpp extensions without a matching cuda version.") - - -class OpBuilder(ABC): - _rocm_version = None - _is_rocm_pytorch = None - - def __init__(self, name): - self.name = name - self.jit_mode = False - - @abstractmethod - def absolute_name(self): - ''' - Returns absolute build path for cases where the op is pre-installed, e.g., deepspeed.ops.adam.cpu_adam - will be installed as something like: deepspeed/ops/adam/cpu_adam.so - ''' - pass - - @abstractmethod - def sources(self): - ''' - Returns list of source files for your op, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) - ''' - pass - - def hipify_extension(self): - pass - - @staticmethod - def assert_torch_info(torch_info): - install_torch_version = torch_info['version'] - install_cuda_version = torch_info['cuda_version'] - install_hip_version = torch_info['hip_version'] - - if not OpBuilder.is_rocm_pytorch(): - current_cuda_version = ".".join(torch.version.cuda.split('.')[:2]) - else: - current_hip_version = ".".join(torch.version.hip.split('.')[:2]) - - current_torch_version = ".".join(torch.__version__.split('.')[:2]) - - if not OpBuilder.is_rocm_pytorch(): - if install_cuda_version != current_cuda_version or install_torch_version != current_torch_version: - raise RuntimeError( - "PyTorch and CUDA version mismatch! DeepSpeed ops were compiled and installed " - "with a different version than what is being used at runtime. Please re-install " - f"DeepSpeed or switch torch versions. DeepSpeed install versions: " - f"torch={install_torch_version}, cuda={install_cuda_version}, runtime versions:" - f"torch={current_torch_version}, cuda={current_cuda_version}") - else: - if install_hip_version != current_hip_version or install_torch_version != current_torch_version: - raise RuntimeError( - "PyTorch and HIP version mismatch! DeepSpeed ops were compiled and installed " - "with a different version than what is being used at runtime. Please re-install " - f"DeepSpeed or switch torch versions. DeepSpeed install versions: " - f"torch={install_torch_version}, hip={install_hip_version}, runtime versions:" - f"torch={current_torch_version}, hip={current_hip_version}") - - @staticmethod - def is_rocm_pytorch(): - if OpBuilder._is_rocm_pytorch is not None: - return OpBuilder._is_rocm_pytorch - - _is_rocm_pytorch = False - try: - import torch - except ImportError: - pass - else: - if TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 5): - _is_rocm_pytorch = hasattr(torch.version, - 'hip') and torch.version.hip is not None - if _is_rocm_pytorch: - from torch.utils.cpp_extension import ROCM_HOME - _is_rocm_pytorch = ROCM_HOME is not None - OpBuilder._is_rocm_pytorch = _is_rocm_pytorch - return OpBuilder._is_rocm_pytorch - - @staticmethod - def installed_rocm_version(): - if OpBuilder._rocm_version: - return OpBuilder._rocm_version - - ROCM_MAJOR = '0' - ROCM_MINOR = '0' - if OpBuilder.is_rocm_pytorch(): - from torch.utils.cpp_extension import ROCM_HOME - #with open('/opt/rocm/.info/version-dev', 'r') as file: - with open('/opt/dtk-22.04/.info/version-dev', 'r') as file: - ROCM_VERSION_DEV_RAW = file.read() - ROCM_MAJOR = ROCM_VERSION_DEV_RAW.split('.')[0] - ROCM_MINOR = ROCM_VERSION_DEV_RAW.split('.')[1] - OpBuilder._rocm_version = (int(ROCM_MAJOR), int(ROCM_MINOR)) - return OpBuilder._rocm_version - - def include_paths(self): - ''' - Returns list of include paths, relative to root of deepspeed package (i.e., DeepSpeed/deepspeed) - ''' - return [] - - def nvcc_args(self): - ''' - Returns optional list of compiler flags to forward to nvcc when building CUDA sources - ''' - return [] - - def cxx_args(self): - ''' - Returns optional list of compiler flags to forward to the build - ''' - return [] - - def is_compatible(self, verbose=True): - ''' - Check if all non-python dependencies are satisfied to build this op - ''' - return True - - def extra_ldflags(self): - return [] - - def libraries_installed(self, libraries): - valid = False - check_cmd = 'dpkg -l' - for lib in libraries: - result = subprocess.Popen(f'dpkg -l {lib}', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True) - valid = valid or result.wait() == 0 - return valid - - def has_function(self, funcname, libraries, verbose=False): - ''' - Test for existence of a function within a tuple of libraries. - - This is used as a smoke test to check whether a certain library is available. - As a test, this creates a simple C program that calls the specified function, - and then distutils is used to compile that program and link it with the specified libraries. - Returns True if both the compile and link are successful, False otherwise. - ''' - tempdir = None # we create a temporary directory to hold various files - filestderr = None # handle to open file to which we redirect stderr - oldstderr = None # file descriptor for stderr - try: - # Echo compile and link commands that are used. - if verbose: - distutils.log.set_verbosity(1) - - # Create a compiler object. - compiler = distutils.ccompiler.new_compiler(verbose=verbose) - - # Configure compiler and linker to build according to Python install. - distutils.sysconfig.customize_compiler(compiler) - - # Create a temporary directory to hold test files. - tempdir = tempfile.mkdtemp() - - # Define a simple C program that calls the function in question - prog = "void %s(void); int main(int argc, char** argv) { %s(); return 0; }" % ( - funcname, - funcname) - - # Write the test program to a file. - filename = os.path.join(tempdir, 'test.c') - with open(filename, 'w') as f: - f.write(prog) - - # Redirect stderr file descriptor to a file to silence compile/link warnings. - if not verbose: - filestderr = open(os.path.join(tempdir, 'stderr.txt'), 'w') - oldstderr = os.dup(sys.stderr.fileno()) - os.dup2(filestderr.fileno(), sys.stderr.fileno()) - - # Workaround for behavior in distutils.ccompiler.CCompiler.object_filenames() - # Otherwise, a local directory will be used instead of tempdir - drive, driveless_filename = os.path.splitdrive(filename) - root_dir = driveless_filename[0] if os.path.isabs(driveless_filename) else '' - output_dir = os.path.join(drive, root_dir) - - # Attempt to compile the C program into an object file. - cflags = shlex.split(os.environ.get('CFLAGS', "")) - objs = compiler.compile([filename], - output_dir=output_dir, - extra_preargs=self.strip_empty_entries(cflags)) - - # Attempt to link the object file into an executable. - # Be sure to tack on any libraries that have been specified. - ldflags = shlex.split(os.environ.get('LDFLAGS', "")) - compiler.link_executable(objs, - os.path.join(tempdir, - 'a.out'), - extra_preargs=self.strip_empty_entries(ldflags), - libraries=libraries) - - # Compile and link succeeded - return True - - except CompileError: - return False - - except LinkError: - return False - - except: - return False - - finally: - # Restore stderr file descriptor and close the stderr redirect file. - if oldstderr is not None: - os.dup2(oldstderr, sys.stderr.fileno()) - if filestderr is not None: - filestderr.close() - - # Delete the temporary directory holding the test program and stderr files. - if tempdir is not None: - shutil.rmtree(tempdir) - - def strip_empty_entries(self, args): - ''' - Drop any empty strings from the list of compile and link flags - ''' - return [x for x in args if len(x) > 0] - - def cpu_arch(self): - try: - from cpuinfo import get_cpu_info - except ImportError as e: - cpu_info = self._backup_cpuinfo() - if cpu_info is None: - return "-march=native" - - try: - cpu_info = get_cpu_info() - except Exception as e: - self.warning( - f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " - "falling back to `lscpu` to get this information.") - cpu_info = self._backup_cpuinfo() - if cpu_info is None: - return "-march=native" - - if cpu_info['arch'].startswith('PPC_'): - # gcc does not provide -march on PowerPC, use -mcpu instead - return '-mcpu=native' - return '-march=native' - - def _backup_cpuinfo(self): - # Construct cpu_info dict from lscpu that is similar to what py-cpuinfo provides - if not self.command_exists('lscpu'): - self.warning( - f"{self.name} attempted to query 'lscpu' after failing to use py-cpuinfo " - "to detect the CPU architecture. 'lscpu' does not appear to exist on " - "your system, will fall back to use -march=native and non-vectorized execution." - ) - return None - result = subprocess.check_output('lscpu', shell=True) - result = result.decode('utf-8').strip().lower() - - cpu_info = {} - cpu_info['arch'] = None - cpu_info['flags'] = "" - if 'genuineintel' in result or 'authenticamd' in result: - cpu_info['arch'] = 'X86_64' - if 'avx512' in result: - cpu_info['flags'] += 'avx512,' - if 'avx2' in result: - cpu_info['flags'] += 'avx2' - elif 'ppc64le' in result: - cpu_info['arch'] = "PPC_" - - return cpu_info - - def simd_width(self): - try: - from cpuinfo import get_cpu_info - except ImportError as e: - cpu_info = self._backup_cpuinfo() - if cpu_info is None: - return '-D__SCALAR__' - - try: - cpu_info = get_cpu_info() - except Exception as e: - self.warning( - f"{self.name} attempted to use `py-cpuinfo` but failed (exception type: {type(e)}, {e}), " - "falling back to `lscpu` to get this information.") - cpu_info = self._backup_cpuinfo() - if cpu_info is None: - return '-D__SCALAR__' - - if cpu_info['arch'] == 'X86_64': - if 'avx512' in cpu_info['flags']: - return '-D__AVX512__' - elif 'avx2' in cpu_info['flags']: - return '-D__AVX256__' - return '-D__SCALAR__' - - def python_requirements(self): - ''' - Override if op wants to define special dependencies, otherwise will - take self.name and load requirements-.txt if it exists. - ''' - path = f'requirements/requirements-{self.name}.txt' - requirements = [] - if os.path.isfile(path): - with open(path, 'r') as fd: - requirements = [r.strip() for r in fd.readlines()] - return requirements - - def command_exists(self, cmd): - if '|' in cmd: - cmds = cmd.split("|") - else: - cmds = [cmd] - valid = False - for cmd in cmds: - result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True) - valid = valid or result.wait() == 0 - - if not valid and len(cmds) > 1: - print( - f"{WARNING} {self.name} requires one of the following commands '{cmds}', but it does not exist!" - ) - elif not valid and len(cmds) == 1: - print( - f"{WARNING} {self.name} requires the '{cmd}' command, but it does not exist!" - ) - return valid - - def warning(self, msg): - print(f"{WARNING} {msg}") - - def deepspeed_src_path(self, code_path): - if os.path.isabs(code_path): - return code_path - else: - return os.path.join(Path(__file__).parent.parent.absolute(), code_path) - - def builder(self): - from torch.utils.cpp_extension import CppExtension - return CppExtension( - name=self.absolute_name(), - sources=self.strip_empty_entries(self.sources()), - include_dirs=self.strip_empty_entries(self.include_paths()), - extra_compile_args={'cxx': self.strip_empty_entries(self.cxx_args())}, - extra_link_args=self.strip_empty_entries(self.extra_ldflags())) - - def load(self, verbose=True): - from ...git_version_info import installed_ops, torch_info - if installed_ops[self.name]: - # Ensure the op we're about to load was compiled with the same - # torch/cuda versions we are currently using at runtime. - if isinstance(self, CUDAOpBuilder): - self.assert_torch_info(torch_info) - - return importlib.import_module(self.absolute_name()) - else: - return self.jit_load(verbose) - - def jit_load(self, verbose=True): - if not self.is_compatible(verbose): - raise RuntimeError( - f"Unable to JIT load the {self.name} op due to it not being compatible due to hardware/software issue." - ) - try: - import ninja - except ImportError: - raise RuntimeError( - f"Unable to JIT load the {self.name} op due to ninja not being installed." - ) - - if isinstance(self, CUDAOpBuilder) and not self.is_rocm_pytorch(): - assert_no_cuda_mismatch() - - self.jit_mode = True - from torch.utils.cpp_extension import load - - # Ensure directory exists to prevent race condition in some cases - ext_path = os.path.join( - os.environ.get('TORCH_EXTENSIONS_DIR', - DEFAULT_TORCH_EXTENSION_PATH), - self.name) - os.makedirs(ext_path, exist_ok=True) - - start_build = time.time() - sources = [self.deepspeed_src_path(path) for path in self.sources()] - extra_include_paths = [ - self.deepspeed_src_path(path) for path in self.include_paths() - ] - - # Torch will try and apply whatever CCs are in the arch list at compile time, - # we have already set the intended targets ourselves we know that will be - # needed at runtime. This prevents CC collisions such as multiple __half - # implementations. Stash arch list to reset after build. - torch_arch_list = None - if "TORCH_CUDA_ARCH_LIST" in os.environ: - torch_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST") - os.environ["TORCH_CUDA_ARCH_LIST"] = "" - - op_module = load( - name=self.name, - sources=self.strip_empty_entries(sources), - extra_include_paths=self.strip_empty_entries(extra_include_paths), - extra_cflags=self.strip_empty_entries(self.cxx_args()), - extra_cuda_cflags=self.strip_empty_entries(self.nvcc_args()), - extra_ldflags=self.strip_empty_entries(self.extra_ldflags()), - verbose=verbose) - build_duration = time.time() - start_build - if verbose: - print(f"Time to load {self.name} op: {build_duration} seconds") - - # Reset arch list so we are not silently removing it for other possible use cases - if torch_arch_list: - os.environ["TORCH_CUDA_ARCH_LIST"] = torch_arch_list - - return op_module - - -class CUDAOpBuilder(OpBuilder): - def compute_capability_args(self, cross_compile_archs=None): - """ - Returns nvcc compute capability compile flags. - - 1. `TORCH_CUDA_ARCH_LIST` takes priority over `cross_compile_archs`. - 2. If neither is set default compute capabilities will be used - 3. Under `jit_mode` compute capabilities of all visible cards will be used plus PTX - - Format: - - - `TORCH_CUDA_ARCH_LIST` may use ; or whitespace separators. Examples: - - TORCH_CUDA_ARCH_LIST="6.1;7.5;8.6" pip install ... - TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" pip install ... - - - `cross_compile_archs` uses ; separator. - - """ - ccs = [] - if self.jit_mode: - # Compile for underlying architectures since we know those at runtime - for i in range(torch.cuda.device_count()): - CC_MAJOR, CC_MINOR = torch.cuda.get_device_capability(i) - cc = f"{CC_MAJOR}.{CC_MINOR}" - if cc not in ccs: - ccs.append(cc) - ccs = sorted(ccs) - ccs[-1] += '+PTX' - else: - # Cross-compile mode, compile for various architectures - # env override takes priority - cross_compile_archs_env = os.environ.get('TORCH_CUDA_ARCH_LIST', None) - if cross_compile_archs_env is not None: - if cross_compile_archs is not None: - print( - f"{WARNING} env var `TORCH_CUDA_ARCH_LIST={cross_compile_archs_env}` overrides `cross_compile_archs={cross_compile_archs}`" - ) - cross_compile_archs = cross_compile_archs_env.replace(' ', ';') - else: - if cross_compile_archs is None: - cross_compile_archs = get_default_compute_capabilities() - ccs = cross_compile_archs.split(';') - - args = [] - for cc in ccs: - num = cc[0] + cc[2] - args.append(f'-gencode=arch=compute_{num},code=sm_{num}') - if cc.endswith('+PTX'): - args.append(f'-gencode=arch=compute_{num},code=compute_{num}') - - return args - - def version_dependent_macros(self): - # Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456 - version_ge_1_1 = [] - if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): - version_ge_1_1 = ['-DVERSION_GE_1_1'] - version_ge_1_3 = [] - if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): - version_ge_1_3 = ['-DVERSION_GE_1_3'] - version_ge_1_5 = [] - if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): - version_ge_1_5 = ['-DVERSION_GE_1_5'] - return version_ge_1_1 + version_ge_1_3 + version_ge_1_5 - - def is_compatible(self, verbose=True): - return super().is_compatible(verbose) - - def builder(self): - from torch.utils.cpp_extension import CUDAExtension - if not self.is_rocm_pytorch(): - assert_no_cuda_mismatch() - cuda_ext = CUDAExtension( - name=self.absolute_name(), - sources=self.strip_empty_entries(self.sources()), - include_dirs=self.strip_empty_entries(self.include_paths()), - libraries=self.strip_empty_entries(self.libraries_args()), - extra_compile_args={ - 'cxx': self.strip_empty_entries(self.cxx_args()), - 'nvcc': self.strip_empty_entries(self.nvcc_args()) - }) - if self.is_rocm_pytorch(): - # hip converts paths to absolute, this converts back to relative - sources = cuda_ext.sources - curr_file = Path(__file__).parent.parent # ds root - for i in range(len(sources)): - src = Path(sources[i]) - sources[i] = str(src.relative_to(curr_file)) - cuda_ext.sources = sources - return cuda_ext - - def hipify_extension(self): - if self.is_rocm_pytorch(): - from torch.utils.hipify import hipify_python - hipify_python.hipify( - project_directory=os.getcwd(), - output_directory=os.getcwd(), - header_include_dirs=self.include_paths(), - includes=[os.path.join(os.getcwd(), - '*')], - extra_files=[os.path.abspath(s) for s in self.sources()], - show_detailed=True, - is_pytorch_extension=True, - hipify_extra_files_only=True, - ) - - def cxx_args(self): - if sys.platform == "win32": - return ['-O2'] - else: - return ['-O3', '-std=c++14', '-g', '-Wno-reorder'] - - def nvcc_args(self): - args = ['-O3'] - if self.is_rocm_pytorch(): - ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() - args += [ - '-std=c++14', - '-U__HIP_NO_HALF_OPERATORS__', - '-U__HIP_NO_HALF_CONVERSIONS__', - '-U__HIP_NO_HALF2_OPERATORS__', - '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, - '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR - ] - else: - cuda_major, _ = installed_cuda_version() - args += [ - '--use_fast_math', - '-std=c++17' - if sys.platform == "win32" and cuda_major > 10 else '-std=c++14', - '-U__CUDA_NO_HALF_OPERATORS__', - '-U__CUDA_NO_HALF_CONVERSIONS__', - '-U__CUDA_NO_HALF2_OPERATORS__' - ] - args += self.compute_capability_args() - return args - - def libraries_args(self): - if sys.platform == "win32": - return ['cublas', 'curand'] - else: - return [] - - -class TorchCPUOpBuilder(CUDAOpBuilder): - def extra_ldflags(self): - if not self.is_rocm_pytorch(): - return ['-lcurand'] - else: - return [] - - def cxx_args(self): - import torch - if not self.is_rocm_pytorch(): - CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.CUDA_HOME, "lib64") - else: - CUDA_LIB64 = os.path.join(torch.utils.cpp_extension.ROCM_HOME, "lib") - CPU_ARCH = self.cpu_arch() - SIMD_WIDTH = self.simd_width() - - args = super().cxx_args() - ###aiss debug0506########### - args += [ - f'-L{CUDA_LIB64}', - #'-lcudart', - #'-lcublas', - '-lrocblas', - '-lhipblas', - '-g', - CPU_ARCH, - '-fopenmp', - SIMD_WIDTH, - ] - return args diff --git a/deepspeed/ops/op_builder/cpu_adagrad.py b/deepspeed/ops/op_builder/cpu_adagrad.py deleted file mode 100644 index 24f0ff3..0000000 --- a/deepspeed/ops/op_builder/cpu_adagrad.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -import os -from .builder import TorchCPUOpBuilder - - -class CPUAdagradBuilder(TorchCPUOpBuilder): - BUILD_VAR = "DS_BUILD_CPU_ADAGRAD" - NAME = "cpu_adagrad" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.adagrad.{self.NAME}_op' - - def sources(self): - return ['csrc/adagrad/cpu_adagrad.cpp', 'csrc/common/custom_cuda_kernel.cu'] - - def include_paths(self): - import torch - if not self.is_rocm_pytorch(): - CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] - else: - CUDA_INCLUDE = [ - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include", - "rocrand"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include", - "hiprand"), - ] - return ['csrc/includes'] + CUDA_INCLUDE - - ##aiss add - def cxx_args(self): - - return [ - '-lrocblas', - '-lhipblas', - '-D__HIP_DEVICE_COMPILE__',] diff --git a/deepspeed/ops/op_builder/cpu_adam.py b/deepspeed/ops/op_builder/cpu_adam.py deleted file mode 100644 index 35a2a8a..0000000 --- a/deepspeed/ops/op_builder/cpu_adam.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -import os -from .builder import TorchCPUOpBuilder - - -class CPUAdamBuilder(TorchCPUOpBuilder): - BUILD_VAR = "DS_BUILD_CPU_ADAM" - NAME = "cpu_adam" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.adam.{self.NAME}_op' - - def sources(self): - return ['csrc/adam/cpu_adam.cpp', 'csrc/common/custom_cuda_kernel.cu'] - - def libraries_args(self): - args = super().libraries_args() - #args += ['curand'] - #aiss debug 0506######## - args += ['hiprand'] - args += ['rocrand'] - return args - - def include_paths(self): - import torch - if not self.is_rocm_pytorch(): - CUDA_INCLUDE = [os.path.join(torch.utils.cpp_extension.CUDA_HOME, "include")] - else: - CUDA_INCLUDE = [ - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include", - "rocrand"), - os.path.join(torch.utils.cpp_extension.ROCM_HOME, - "include", - "hiprand"), - ] - return ['csrc/includes'] + CUDA_INCLUDE - - ##aiss add - def cxx_args(self): - - return [ - '-lrocblas', - '-lhipblas', - '-D__HIP_DEVICE_COMPILE__', - ] diff --git a/deepspeed/ops/op_builder/fused_adam.py b/deepspeed/ops/op_builder/fused_adam.py deleted file mode 100644 index 6ff264f..0000000 --- a/deepspeed/ops/op_builder/fused_adam.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -from .builder import CUDAOpBuilder - - -class FusedAdamBuilder(CUDAOpBuilder): - BUILD_VAR = "DS_BUILD_FUSED_ADAM" - NAME = "fused_adam" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.adam.{self.NAME}_op' - - def sources(self): - return ['csrc/adam/fused_adam_frontend.cpp', 'csrc/adam/multi_tensor_adam.cu'] - - def include_paths(self): - return ['csrc/includes', 'csrc/adam'] - - def cxx_args(self): - args = super().cxx_args() - return args + self.version_dependent_macros() - - def nvcc_args(self): - nvcc_flags = ['-O3'] + self.version_dependent_macros() - if not self.is_rocm_pytorch(): - nvcc_flags.extend(['-lineinfo', - '--use_fast_math'] + self.compute_capability_args()) - return nvcc_flags diff --git a/deepspeed/ops/op_builder/fused_lamb.py b/deepspeed/ops/op_builder/fused_lamb.py deleted file mode 100644 index 106728f..0000000 --- a/deepspeed/ops/op_builder/fused_lamb.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -from .builder import CUDAOpBuilder - - -class FusedLambBuilder(CUDAOpBuilder): - BUILD_VAR = 'DS_BUILD_FUSED_LAMB' - NAME = "fused_lamb" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.lamb.{self.NAME}_op' - - def sources(self): - return ['csrc/lamb/fused_lamb_cuda.cpp', 'csrc/lamb/fused_lamb_cuda_kernel.cu'] - - def include_paths(self): - return ['csrc/includes'] - - def cxx_args(self): - args = super().cxx_args() - return args + self.version_dependent_macros() - - def nvcc_args(self): - nvcc_flags = ['-O3'] + self.version_dependent_macros() - if self.is_rocm_pytorch(): - ROCM_MAJOR, ROCM_MINOR = self.installed_rocm_version() - nvcc_flags += [ - '-DROCM_VERSION_MAJOR=%s' % ROCM_MAJOR, - '-DROCM_VERSION_MINOR=%s' % ROCM_MINOR - ] - else: - nvcc_flags.extend(['-lineinfo', - '--use_fast_math'] + self.compute_capability_args()) - return nvcc_flags diff --git a/deepspeed/ops/op_builder/quantizer.py b/deepspeed/ops/op_builder/quantizer.py deleted file mode 100644 index 43bc577..0000000 --- a/deepspeed/ops/op_builder/quantizer.py +++ /dev/null @@ -1,22 +0,0 @@ -from .builder import CUDAOpBuilder - - -class QuantizerBuilder(CUDAOpBuilder): - BUILD_VAR = "DS_BUILD_QUANTIZER" - NAME = "quantizer" - - def __init__(self, name=None): - name = self.NAME if name is None else name - super().__init__(name=name) - - def absolute_name(self): - return f'deepspeed.ops.quantizer.{self.NAME}_op' - - def sources(self): - return [ - 'csrc/quantization/pt_binding.cpp', - 'csrc/quantization/quantizer.cu', - ] - - def include_paths(self): - return ['csrc/includes'] diff --git a/deepspeed/ops/op_builder/sparse_attn.py b/deepspeed/ops/op_builder/sparse_attn.py deleted file mode 100644 index 004fdd6..0000000 --- a/deepspeed/ops/op_builder/sparse_attn.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -import warnings -from .builder import OpBuilder - -try: - from packaging import version as pkg_version -except ImportError: - pkg_version = None - - -class SparseAttnBuilder(OpBuilder): - BUILD_VAR = "DS_BUILD_SPARSE_ATTN" - NAME = "sparse_attn" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.sparse_attention.{self.NAME}_op' - - def sources(self): - return ['csrc/sparse_attention/utils.cpp'] - - def cxx_args(self): - return ['-O2', '-fopenmp'] - - def is_compatible(self, verbose=True): - # Check to see if llvm and cmake are installed since they are dependencies - #required_commands = ['llvm-config|llvm-config-9', 'cmake'] - #command_status = list(map(self.command_exists, required_commands)) - #deps_compatible = all(command_status) - -#####aiss debug 0506############## - if self.is_rocm_pytorch(): - # self.warning(f'{self.NAME} is not compatible with ROCM') - # return False - return True - try: - import torch - except ImportError: - self.warning(f"unable to import torch, please install it first") - return False - - # torch-cpu will not have a cuda version - if torch.version.cuda is None: - cuda_compatible = False - self.warning(f"{self.NAME} cuda is not available from torch") - else: - major, minor = torch.version.cuda.split('.')[:2] - cuda_compatible = (int(major) == 10 - and int(minor) >= 1) or (int(major) >= 11) - if not cuda_compatible: - self.warning(f"{self.NAME} requires CUDA version 10.1+") - - TORCH_MAJOR = int(torch.__version__.split('.')[0]) - TORCH_MINOR = int(torch.__version__.split('.')[1]) - torch_compatible = TORCH_MAJOR == 1 and TORCH_MINOR >= 5 - if not torch_compatible: - self.warning( - f'{self.NAME} requires a torch version >= 1.5 but detected {TORCH_MAJOR}.{TORCH_MINOR}' - ) - - try: - import triton - except ImportError: - # auto-install of triton is broken on some systems, reverting to manual install for now - # see this issue: https://github.com/microsoft/DeepSpeed/issues/1710 - self.warning( - f"please install triton==1.0.0 if you want to use sparse attention") - return False - - if pkg_version: - installed_triton = pkg_version.parse(triton.__version__) - triton_mismatch = installed_triton != pkg_version.parse("1.0.0") - else: - installed_triton = triton.__version__ - triton_mismatch = installed_triton != "1.0.0" - - if triton_mismatch: - self.warning( - f"using untested triton version ({installed_triton}), only 1.0.0 is known to be compatible" - ) - return False - - return super().is_compatible(verbose) and torch_compatible and cuda_compatible diff --git a/deepspeed/ops/op_builder/stochastic_transformer.py b/deepspeed/ops/op_builder/stochastic_transformer.py deleted file mode 100644 index aa47c13..0000000 --- a/deepspeed/ops/op_builder/stochastic_transformer.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -from .transformer import TransformerBuilder - - -class StochasticTransformerBuilder(TransformerBuilder): - BUILD_VAR = "DS_BUILD_STOCHASTIC_TRANSFORMER" - NAME = "stochastic_transformer" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.transformer.{self.NAME}_op' - - def nvcc_args(self): - args = super().nvcc_args() - args.append('-D__STOCHASTIC_MODE__') - return args diff --git a/deepspeed/ops/op_builder/transformer_inference.py b/deepspeed/ops/op_builder/transformer_inference.py deleted file mode 100644 index 23eab48..0000000 --- a/deepspeed/ops/op_builder/transformer_inference.py +++ /dev/null @@ -1,32 +0,0 @@ -from .builder import CUDAOpBuilder - - -class InferenceBuilder(CUDAOpBuilder): - BUILD_VAR = "DS_BUILD_TRANSFORMER_INFERENCE" - NAME = "transformer_inference" - - def __init__(self, name=None): - name = self.NAME if name is None else name - super().__init__(name=name) - - def absolute_name(self): - return f'deepspeed.ops.transformer.inference.{self.NAME}_op' - - def sources(self): - return [ - 'csrc/transformer/inference/csrc/pt_binding.cpp', - 'csrc/transformer/inference/csrc/gelu.cu', - 'csrc/transformer/inference/csrc/normalize.cu', - 'csrc/transformer/inference/csrc/softmax.cu', - 'csrc/transformer/inference/csrc/dequantize.cu', - 'csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu', - ] - - def extra_ldflags(self): - if not self.is_rocm_pytorch(): - return ['-lcurand'] - else: - return [] - - def include_paths(self): - return ['csrc/transformer/inference/includes'] diff --git a/deepspeed/ops/op_builder/utils.py b/deepspeed/ops/op_builder/utils.py deleted file mode 100644 index 02d4daa..0000000 --- a/deepspeed/ops/op_builder/utils.py +++ /dev/null @@ -1,18 +0,0 @@ -""" -Copyright 2020 The Microsoft DeepSpeed Team -""" -from .builder import OpBuilder - - -class UtilsBuilder(OpBuilder): - BUILD_VAR = "DS_BUILD_UTILS" - NAME = "utils" - - def __init__(self): - super().__init__(name=self.NAME) - - def absolute_name(self): - return f'deepspeed.ops.{self.NAME}_op' - - def sources(self): - return ['csrc/utils/flatten_unflatten.cpp'] diff --git a/deepspeed/ops/quantizer/__init__.py b/deepspeed/ops/quantizer/__init__.py index 0bf4045..9222565 100644 --- a/deepspeed/ops/quantizer/__init__.py +++ b/deepspeed/ops/quantizer/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .quantizer import ds_quantizer diff --git a/deepspeed/ops/quantizer/quantizer.py b/deepspeed/ops/quantizer/quantizer.py old mode 100644 new mode 100755 index cea9434..31cfa96 --- a/deepspeed/ops/quantizer/quantizer.py +++ b/deepspeed/ops/quantizer/quantizer.py @@ -1,14 +1,9 @@ ''' Copyright 2020 The Microsoft DeepSpeed Team ''' -import json -import math -import importlib import torch -from torch import nn -from torch.autograd import Function -from ..op_builder import QuantizerBuilder +from deepspeed.ops.op_builder import QuantizerBuilder # Cuda modules will be imported if needed quantizer_cuda_module = None diff --git a/deepspeed/ops/random_ltd/__init__.py b/deepspeed/ops/random_ltd/__init__.py new file mode 100644 index 0000000..34b0dd3 --- /dev/null +++ b/deepspeed/ops/random_ltd/__init__.py @@ -0,0 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens diff --git a/deepspeed/ops/random_ltd/dropping_utils.py b/deepspeed/ops/random_ltd/dropping_utils.py new file mode 100644 index 0000000..102ffe1 --- /dev/null +++ b/deepspeed/ops/random_ltd/dropping_utils.py @@ -0,0 +1,145 @@ +""" +Copyright 2022 The Microsoft DeepSpeed Team +""" +import torch + +from deepspeed.ops.op_builder import RandomLTDBuilder +""" +Returns: + sampled_indices: [layers, batch_size, reserved_length] + new_mask: [batch_size, 1, reserved_length, reserved_length] +""" + +random_ltd_module = None + + +def gpt_sample_tokens(reserved_length: int, + seq_length: int, + batch_size: int, + layers: int = 1, + device: str = 'cpu', + attn_mask: torch.Tensor = None): + + prob_dist = torch.ones((layers * batch_size, seq_length), device=device) + sampled_indices = torch.multinomial(prob_dist, reserved_length) + + sampled_indices = sampled_indices.reshape(layers, + batch_size, + reserved_length).to(torch.int32) + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length) + + # Not certain the optimized kernel is actually better here, cause it kind of screws + # with alignment right if the sequence length is not divisble by like 16 + # new_mask = random_ltd_module.mask_gather_gpt(attn_mask, reserved_length) + if attn_mask is not None: + new_mask = attn_mask[:, :, :reserved_length, :reserved_length] + else: + new_mask = None + + return sampled_indices, new_mask + + +""" +Returns: + sampled_indices: [layers, batch_size, reserved_length] + new_mask: [layers, batch_size, 1, reserved_length, reserved_length] +""" + + +def bert_sample_tokens(reserved_length: int, + seq_length: int, + batch_size: int, + layers: int = 1, + device: str = 'cpu', + attn_mask: torch.Tensor = None): + assert attn_mask is not None + prob_dist = torch.ones((layers * batch_size, seq_length), device=device) + sampled_indices = torch.multinomial(prob_dist, reserved_length) + + sampled_indices = sampled_indices.reshape(layers, + batch_size, + reserved_length).to(torch.int32) + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + + sampled_indices = random_ltd_module.token_sort_(sampled_indices, seq_length) + dtype = sampled_indices.dtype + + sampled_indices = sampled_indices.to(torch.long) + new_mask = [] + for l in range(layers): + tmp_mask_list = [] + for i in range(batch_size): + mask_tmp = attn_mask[i:i + 1, :, sampled_indices[l][i], :] + tmp_mask_list.append(mask_tmp[:, :, :, sampled_indices[l][i]]) + new_mask.append(torch.cat(tmp_mask_list, dim=0)) + + return sampled_indices.to(dtype), new_mask + + +class GatherTokens(torch.autograd.Function): + @staticmethod + def forward(ctx, + activations: torch.Tensor, + sorted_indices: torch.Tensor, + batch_first: bool): + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + ctx.save_for_backward(activations, sorted_indices) + ctx.batch_first = batch_first + return activations, random_ltd_module.token_gather(activations, sorted_indices, batch_first) + + @staticmethod + def backward(ctx, a_gradients: torch.Tensor, g_gradients: torch.Tensor): + + g_gradients = g_gradients.contiguous() + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + activations, sorted_indices = ctx.saved_tensors + batch_first = ctx.batch_first + + return random_ltd_module.token_scatter_(a_gradients, + g_gradients, + sorted_indices, + batch_first), None, None + + +class ScatterTokens(torch.autograd.Function): + @staticmethod + def forward(ctx, + all_activations: torch.Tensor, + layer_activations: torch.Tensor, + sorted_indices: torch.Tensor, + batch_first: bool): + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + scatter_results = random_ltd_module.token_scatter_(all_activations.clone(), + layer_activations, + sorted_indices, + batch_first) + + ctx.save_for_backward(sorted_indices) + ctx.batch_first = batch_first + return scatter_results + + @staticmethod + def backward(ctx, out_gradients: torch.Tensor): + + out_gradients = out_gradients.contiguous() + global random_ltd_module + if random_ltd_module is None: + random_ltd_module = RandomLTDBuilder().load() + sorted_indices, = ctx.saved_tensors + batch_first = ctx.batch_first + + ret_val = random_ltd_module.token_gather(out_gradients, + sorted_indices, + batch_first) + return out_gradients, ret_val, None, None diff --git a/deepspeed/ops/sparse_attention/__init__.py b/deepspeed/ops/sparse_attention/__init__.py index b7e1db3..870d737 100644 --- a/deepspeed/ops/sparse_attention/__init__.py +++ b/deepspeed/ops/sparse_attention/__init__.py @@ -1,4 +1,6 @@ -from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig +'''Copyright The Microsoft DeepSpeed Team''' + +from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig, LocalSlidingWindowSparsityConfig from .sparse_self_attention import SparseSelfAttention from .bert_sparse_self_attention import BertSparseSelfAttention from .sparse_attention_utils import SparseAttentionUtils diff --git a/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py b/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py old mode 100644 new mode 100755 diff --git a/deepspeed/ops/sparse_attention/matmul.py b/deepspeed/ops/sparse_attention/matmul.py old mode 100644 new mode 100755 index ea83f09..17b0898 --- a/deepspeed/ops/sparse_attention/matmul.py +++ b/deepspeed/ops/sparse_attention/matmul.py @@ -1,13 +1,14 @@ +'''Copyright The Microsoft DeepSpeed Team''' + # DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a # https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py import importlib -import warnings import torch -import math import triton import triton.language as tl import triton._C.libtriton as libtriton +from deepspeed.accelerator import get_accelerator @triton.jit @@ -339,8 +340,8 @@ class _sparse_matmul(torch.autograd.Function): a_inner, b_inner = a.shape[a_dim], b.shape[b_dim] if a_inner != b_inner: raise ValueError( - f"Size of tensor A along the {_dim_to_name(a_dim)} dim ({a_inner}) must match size " - f"of tensor B along the {_dim_to_name(b_dim)} dim ({b_inner})") + f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size " + f"of tensor B along the {b_dim} dim ({b_inner})") if a_inner % 16 != 0: raise ValueError('Reduction size for SDD must be a multiple of 16') @@ -950,7 +951,7 @@ class MatMul: raise ValueError( f"Inputs must be on the same device; got {a.device} for tensor A " f"and {b.device} for tensor B") - if not a.is_cuda: + if not get_accelerator().on_accelerator(a): raise ValueError("Only GPU devices are supported for now") # When autocast is enabled, torch.matmul autocasts to float16, so we do the same here diff --git a/deepspeed/ops/sparse_attention/softmax.py b/deepspeed/ops/sparse_attention/softmax.py old mode 100644 new mode 100755 index 11d4583..09560e1 --- a/deepspeed/ops/sparse_attention/softmax.py +++ b/deepspeed/ops/sparse_attention/softmax.py @@ -1,14 +1,12 @@ +'''Copyright The Microsoft DeepSpeed Team''' + # DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a # https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py -import warnings -import importlib import torch -import math import triton import triton.language as tl -import triton._C.libtriton as libtriton def next_power_of_2(n): diff --git a/deepspeed/ops/sparse_attention/sparse_attention_utils.py b/deepspeed/ops/sparse_attention/sparse_attention_utils.py index 700363b..90edb10 100644 --- a/deepspeed/ops/sparse_attention/sparse_attention_utils.py +++ b/deepspeed/ops/sparse_attention/sparse_attention_utils.py @@ -2,7 +2,7 @@ Copyright 2020 The Microsoft DeepSpeed Team """ -from torch import nn +import torch from torch.nn import functional as F from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig ''' @@ -102,13 +102,13 @@ class SparseAttentionUtils: if hasattr(model, 'bert'): model.config.max_position_embeddings = max_position - replace_self_attention_layer_with_sparse_self_attention_layer( + model.replace_self_attention_layer_with_sparse_self_attention_layer( model.config, model.bert.encoder.layer, sparsity_config) elif hasattr(model, 'roberta'): model.config.max_position_embeddings = max_position + 2 - replace_self_attention_layer_with_sparse_self_attention_layer( + model.replace_self_attention_layer_with_sparse_self_attention_layer( model.config, model.roberta.encoder.layer, sparsity_config) @@ -155,7 +155,7 @@ class SparseAttentionUtils: position_ids, inputs_embeds, pad_token_id, - model_mbeddings): + model_embeddings): """This function pads input tokens and attention mask on sequence length dimension to be multiple of block size. This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size. It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs. diff --git a/deepspeed/ops/sparse_attention/sparse_self_attention.py b/deepspeed/ops/sparse_attention/sparse_self_attention.py index 5ccfa13..46dedd0 100644 --- a/deepspeed/ops/sparse_attention/sparse_self_attention.py +++ b/deepspeed/ops/sparse_attention/sparse_self_attention.py @@ -3,10 +3,8 @@ Copyright 2020 The Microsoft DeepSpeed Team """ import torch.nn as nn -from torch.nn.functional import * import torch from torch import distributed as dist -from collections import namedtuple from deepspeed.ops.sparse_attention import SparsityConfig diff --git a/deepspeed/ops/sparse_attention/sparsity_config.py b/deepspeed/ops/sparse_attention/sparsity_config.py index 3b9224f..efdad2e 100644 --- a/deepspeed/ops/sparse_attention/sparsity_config.py +++ b/deepspeed/ops/sparse_attention/sparsity_config.py @@ -429,7 +429,8 @@ class BigBirdSparsityConfig(SparsityConfig): different_layout_per_head=False, num_random_blocks=1, num_sliding_window_blocks=3, - num_global_blocks=1): + num_global_blocks=1, + attention='bidirectional'): """Initialize the BigBird Sparsity Pattern Config. For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial @@ -441,6 +442,7 @@ class BigBirdSparsityConfig(SparsityConfig): num_random_blocks: optional: an integer determining the number of random blocks in each block row. num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. """ super().__init__(num_heads, block, different_layout_per_head) @@ -449,6 +451,11 @@ class BigBirdSparsityConfig(SparsityConfig): self.num_sliding_window_blocks = num_sliding_window_blocks self.num_global_blocks = num_global_blocks + if (attention != 'unidirectional' and attention != 'bidirectional'): + raise NotImplementedError( + 'only \"uni/bi-directional\" attentions are supported for now!') + self.attention = attention + def set_random_layout(self, h, layout): """Sets random attention layout used by the given head in the sparse attention. Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout. @@ -468,7 +475,11 @@ class BigBirdSparsityConfig(SparsityConfig): ) for row in range(0, num_blocks): - rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks) + sample_range = range( + 0, + num_blocks) if self.attention == 'bidirectional' else range(0, + row + 1) + rnd_cols = random.sample(sample_range, self.num_random_blocks) layout[h, row, rnd_cols] = 1 return layout @@ -519,6 +530,10 @@ class BigBirdSparsityConfig(SparsityConfig): #global columns layout[h, :, 0:self.num_global_blocks] = 1 + if self.attention == 'unidirectional': + # zero out anything attending to the future + layout = torch.tril(layout) + return layout def make_layout(self, seq_len): @@ -555,7 +570,8 @@ class BSLongformerSparsityConfig(SparsityConfig): different_layout_per_head=False, num_sliding_window_blocks=3, global_block_indices=[0], - global_block_end_indices=None): + global_block_end_indices=None, + attention='bidirectional'): """Initialize the edited `Longformer` Sparsity Pattern Config. For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial @@ -568,12 +584,14 @@ class BSLongformerSparsityConfig(SparsityConfig): num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window. global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. """ super().__init__(num_heads, block, different_layout_per_head) self.num_sliding_window_blocks = num_sliding_window_blocks self.global_block_indices = global_block_indices + self.attention = attention if (global_block_end_indices is not None): if (len(global_block_indices) != len(global_block_end_indices)): @@ -642,6 +660,8 @@ class BSLongformerSparsityConfig(SparsityConfig): #global columns layout[h, :, start_idx:end_idx] = 1 + if self.attention == 'unidirectional': + layout = torch.tril(layout) return layout def make_layout(self, seq_len): @@ -661,3 +681,63 @@ class BSLongformerSparsityConfig(SparsityConfig): layout = self.check_and_propagate_first_head_layout(layout) return layout + + +class LocalSlidingWindowSparsityConfig(SparsityConfig): + """Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention. + This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity. + """ + def __init__(self, + num_heads, + block=16, + num_sliding_window_blocks=3, + attention='unidirectional'): + """Initialize the Local Sliding Window Sparsity Pattern Config. + For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial + Arguments: + num_heads: required: an integer determining number of attention heads of the layer. + block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`. + num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window. + attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure. + """ + + super().__init__(num_heads, block) + self.num_sliding_window_blocks = num_sliding_window_blocks + self.attention = attention + + def set_sliding_window_layout(self, h, layout): + """Sets sliding local attention layout used by the given head in the sparse attention. + Arguments: + h: required: an integer determining head index + layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set + """ + + num_blocks = layout.shape[1] + if (num_blocks < self.num_sliding_window_blocks): + raise ValueError( + f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overal number of blocks in a row, {num_blocks}!' + ) + + w = self.num_sliding_window_blocks // 2 + for row in range(0, num_blocks): + start = max(0, row - w) + end = min(row + w + 1, + num_blocks) if self.attention == "bidirectional" else row + 1 + layout[h, row, start:end] = 1 + return layout + + def make_layout(self, seq_len): + """Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention. + Arguments: + seq_len: required: an integer determining number of attention heads of the layer. + Return: + layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head + """ + + layout = self.setup_layout(seq_len) + for h in range(0, self.num_layout_heads): + layout = self.set_sliding_window_layout(h, layout) + layout = self.check_and_propagate_first_head_layout(layout) + return layout diff --git a/deepspeed/ops/sparse_attention/trsrc/__init__.py b/deepspeed/ops/sparse_attention/trsrc/__init__.py index 765d34f..b21068e 100644 --- a/deepspeed/ops/sparse_attention/trsrc/__init__.py +++ b/deepspeed/ops/sparse_attention/trsrc/__init__.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import sys import os diff --git a/deepspeed/ops/transformer/__init__.py b/deepspeed/ops/transformer/__init__.py old mode 100644 new mode 100755 index 28c8de6..77d666c --- a/deepspeed/ops/transformer/__init__.py +++ b/deepspeed/ops/transformer/__init__.py @@ -1,3 +1,6 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig -from .inference.transformer_inference import DeepSpeedTransformerInference, DeepSpeedInferenceConfig +from .inference.config import DeepSpeedInferenceConfig +from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference diff --git a/deepspeed/ops/transformer/inference/__init__.py b/deepspeed/ops/transformer/inference/__init__.py index 66f8124..6e9ca00 100644 --- a/deepspeed/ops/transformer/inference/__init__.py +++ b/deepspeed/ops/transformer/inference/__init__.py @@ -1,2 +1,5 @@ -from .transformer_inference import DeepSpeedTransformerInference, DeepSpeedInferenceConfig +'''Copyright The Microsoft DeepSpeed Team''' + +from .config import DeepSpeedInferenceConfig +from ....model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference from .moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference diff --git a/deepspeed/ops/transformer/inference/bias_add.py b/deepspeed/ops/transformer/inference/bias_add.py new file mode 100644 index 0000000..b7d2944 --- /dev/null +++ b/deepspeed/ops/transformer/inference/bias_add.py @@ -0,0 +1,28 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from typing import Optional +import torch +from deepspeed.ops.op_builder import SpatialInferenceBuilder + +spatial_cuda_module = None + + +def nhwc_bias_add(activation: torch.Tensor, + bias: torch.Tensor, + other: Optional[torch.Tensor] = None, + other_bias: Optional[torch.Tensor] = None) -> torch.Tensor: + global spatial_cuda_module + if spatial_cuda_module is None: + spatial_cuda_module = SpatialInferenceBuilder().load() + + if other is None: + return spatial_cuda_module.nhwc_bias_add(activation, bias) + elif other_bias is None: + return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other) + else: + return spatial_cuda_module.nhwc_bias_add_bias_add(activation, + bias, + other, + other_bias) diff --git a/deepspeed/ops/transformer/inference/config.py b/deepspeed/ops/transformer/inference/config.py new file mode 100644 index 0000000..f0c87ba --- /dev/null +++ b/deepspeed/ops/transformer/inference/config.py @@ -0,0 +1,115 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import json +from deepspeed.utils.types import ActivationFuncType + + +class TransformerConfig(): + def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers): + self.layer_id = -1 + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.heads = heads + self.num_hidden_layers = num_hidden_layers + + +class DeepSpeedInferenceConfig(TransformerConfig): + """Initialize the DeepSpeed Transformer Config. + Arguments: + hidden_size: The hidden size of the transformer layer + intermediate_size: The intermediate size of the feed-forward part of transformer layer + heads: The number of heads in the self-attention of the transformer layer + num_hidden_layers: The number of transformer layers + layer_norm_eps: The epsilon value for the layer norm + local_rank: Optional: The rank of GPU running the transformer kernel, it is not required + to use if the model already set the current device, otherwise need to set it + so that the transformer kernel can work on the right device + mp_size (optional): This argument is mainly used to create the parameters on the kernel side + using model-parallel architecture. If the client model already takes care of this, there is no + need to pass this argument. + fp16: Enable half-precision computation + pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture + stochastic_mode: Enable for high performance, please note that this flag has some level of + non-determinism and can produce different results on different runs. However, we have seen + that by enabling it, the pretraining tasks such as BERT are not affected and can obtain + a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend + to turn it off in order to be able to reproduce the same result through the regular kernel execution. + + scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation. + return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor + bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture. + """ + def __init__(self, + hidden_size=-1, + intermediate_size=-1, + heads=-1, + num_hidden_layers=-1, + layer_norm_eps=1e-12, + local_rank=-1, + mp_size=1, + fp16=False, + q_int8=False, + pre_layer_norm=True, + stochastic_mode=False, + scale_attention=True, + triangular_masking=True, + local_attention=False, + window_size=256, + rotary_dim=-1, + rotate_half=False, + rotate_every_two=True, + return_tuple=True, + mlp_after_attn=True, + mlp_act_func_type=ActivationFuncType.GELU, + training_mp_size=1, + bigscience_bloom=False, + max_out_tokens=1024, + enable_qkv_quantization=False, + use_mup=False, + scale_attn_by_inverse_layer_idx=False, + return_single_tuple=False): + super(DeepSpeedInferenceConfig, + self).__init__( + hidden_size, + (intermediate_size if intermediate_size > 0 else 4 * hidden_size), + heads, + num_hidden_layers) + self.fp16 = fp16 + self.pre_layer_norm = pre_layer_norm + self.local_rank = local_rank + self.stochastic_mode = stochastic_mode + self.epsilon = layer_norm_eps + self.mp_size = mp_size + self.q_int8 = q_int8 + self.scale_attention = scale_attention + self.triangular_masking = triangular_masking + self.local_attention = local_attention + self.window_size = window_size + self.rotary_dim = rotary_dim + self.rotate_half = rotate_half + self.rotate_every_two = rotate_every_two + self.return_tuple = return_tuple + self.mlp_after_attn = mlp_after_attn + self.mlp_act_func_type = mlp_act_func_type + self.specialized_mode = False + self.training_mp_size = training_mp_size + self.bigscience_bloom = bigscience_bloom + self.max_out_tokens = max_out_tokens + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx + self.enable_qkv_quantization = enable_qkv_quantization + self.use_mup = use_mup + self.return_single_tuple = return_single_tuple + + @classmethod + def from_dict(cls, json_object): + config = DeepSpeedInferenceConfig() + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) diff --git a/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py b/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py new file mode 100644 index 0000000..246eab8 --- /dev/null +++ b/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py @@ -0,0 +1,8 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + + +class Diffusers2DTransformerConfig(): + def __init__(self, int8_quantization=False): + self.int8_quantization = int8_quantization diff --git a/deepspeed/ops/transformer/inference/diffusers_attention.py b/deepspeed/ops/transformer/inference/diffusers_attention.py new file mode 100644 index 0000000..9d829ce --- /dev/null +++ b/deepspeed/ops/transformer/inference/diffusers_attention.py @@ -0,0 +1,248 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import math +import torch +from torch.autograd import Function +import torch.nn as nn +from packaging import version as pkg_version +from deepspeed.utils.logging import log_dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder + +# Cuda modules will be imported if needed +inference_cuda_module = None +minus_inf = -10000.0 +triton_flash_attn = None + + +def load_triton_flash_attn(): + global triton_flash_attn + try: + import triton + except ImportError: + raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`") + + if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"): + raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`") + + from .triton_ops import triton_flash_attn + + +class DeepSpeedDiffusersAttentionFunction(Function): + @staticmethod + def forward(ctx, + input, + context, + input_mask, + config, + attn_qkvw, + attn_qw, + attn_kw, + attn_vw, + attn_qkvb, + num_attention_heads_per_partition, + norm_factor, + hidden_size_per_partition, + attn_ow, + attn_ob, + do_out_bias, + score_context_func, + linear_func, + triton_flash_attn_kernel): + def _transpose_for_context(x): + x = x.permute(0, 2, 1, 3) + new_x_layer_shape = x.size()[:-2] + \ + (hidden_size_per_partition,) + return x.reshape(*new_x_layer_shape) + + def _transpose_for_scores(x): + attention_head_size = x.shape[-1] // num_attention_heads_per_partition + new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, + attention_head_size) + x = x.reshape(*new_x_shape) + x = x.permute(0, 2, 1, 3) + return x.contiguous() + + def selfAttention_fp(input, context, input_mask): + if config.fp16 and input.dtype == torch.float32: + input = input.half() + head_size = input.shape[-1] // config.heads + do_flash_attn = (head_size <= 128) + scale = (1 / norm_factor) * (1 / norm_factor) + if do_flash_attn and context == None: + qkv_out = linear_func(input, + attn_qkvw, + attn_qkvb if attn_qkvb is not None else attn_qkvw, + attn_qkvb is not None, + do_flash_attn, + config.heads) + + context_layer = triton_flash_attn_kernel(qkv_out[0], + qkv_out[1], + qkv_out[2], + scale, + input.shape[-2] % 128 == 0) + context_layer = _transpose_for_context(context_layer[:,:,:,:head_size]) + + else: + do_flash_attn = False + if context is not None: + query = torch.matmul(input, attn_qw) + key = torch.matmul(context, attn_kw) + value = torch.matmul(context, attn_vw) + else: + qkv = torch.matmul(input, attn_qkvw) + query, key, value = qkv.chunk(3, dim=-1) + query = query.contiguous() + key = key.contiguous() + value = value.contiguous() + query, key, value = inference_cuda_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn) + attention_scores = (torch.matmul(query, + key.transpose(-1, + -2)) * + scale).softmax(dim=-1) + context_layer = _transpose_for_context( + torch.matmul(attention_scores, + value)) + + output = linear_func(context_layer, + attn_ow, + attn_ob, + do_out_bias, + False, + config.heads) + return output + + output = selfAttention_fp(input, context, input_mask) + + return output + + @staticmethod + def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3): + raise RuntimeError('You are running with DeepSpeed Inference mode. \ + Please switch to Training mode for running backward!') + + +class DeepSpeedDiffusersAttention(nn.Module): + """Initialize the DeepSpeed Transformer Layer. + Arguments: + layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers, + layer_id will be 0,1,2...23 when each layer object is instantiated + config: An object of DeepSpeedInferenceConfig + """ + layer_id = 0 + + def __init__( + self, + config, + ): + super(DeepSpeedDiffusersAttention, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedDiffusersAttention.layer_id + DeepSpeedDiffusersAttention.layer_id += 1 + device = get_accelerator().current_device_name( + ) if config.bigscience_bloom else 'cpu' + qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 + + data_type = torch.int8 if config.q_int8 else torch.half if config.fp16 else torch.float + data_type_fp = torch.half if config.fp16 else torch.float + global inference_cuda_module + if inference_cuda_module is None: + builder = InferenceBuilder() + inference_cuda_module = builder.load() + + if DeepSpeedDiffusersAttention.layer_id == 1: + log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0]) + + self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size, + qkv_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, + dtype=data_type_fp, + device=device), + requires_grad=False) + out_size_per_partition = self.config.hidden_size // self.config.mp_size + self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + + self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type_fp, + device=device), + requires_grad=False) + self.do_out_bias = True + + if triton_flash_attn is None: + load_triton_flash_attn() + self.triton_flash_attn_kernel = triton_flash_attn() + self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size + self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size + self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads + + self.norm_factor = math.sqrt( + math.sqrt(self.config.hidden_size // self.config.heads)) + + if self.config.scale_attn_by_inverse_layer_idx is True: + self.norm_factor *= math.sqrt(self.config.layer_id + 1) + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 + + self.score_context_func = inference_cuda_module.softmax_context_fp32 if (not config.fp16) else \ + inference_cuda_module.softmax_context_fp16 + self.linear_func = inference_cuda_module.linear_layer_fp16 if config.fp16 else \ + inference_cuda_module.linear_layer_fp32 + self.allocate_workspace = inference_cuda_module.allocate_workspace_fp32 if not (config.fp16) else \ + inference_cuda_module.allocate_workspace_fp16 + + def forward(self, input, context=None, input_mask=None): + if self.config.layer_id == 0: + self.allocate_workspace(self.config.hidden_size, + self.config.heads, + input.size()[1], + input.size()[0], + DeepSpeedDiffusersAttention.layer_id, + self.config.mp_size, + False, + 0, + self.config.max_out_tokens) + output = DeepSpeedDiffusersAttentionFunction.apply( + input, + context, + input_mask, + self.config, + self.attn_qkvw, + self.attn_qw, + self.attn_kw, + self.attn_vw, + self.attn_qkvb, + self.num_attention_heads_per_partition, + self.norm_factor, + self.hidden_size_per_partition, + self.attn_ow, + self.attn_ob, + self.do_out_bias, + self.score_context_func, + self.linear_func, + self.triton_flash_attn_kernel) + + return output diff --git a/deepspeed/ops/transformer/inference/diffusers_transformer_block.py b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py new file mode 100644 index 0000000..e453c34 --- /dev/null +++ b/deepspeed/ops/transformer/inference/diffusers_transformer_block.py @@ -0,0 +1,126 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import torch +import torch.nn as nn + +from deepspeed import module_inject +from .diffusers_attention import DeepSpeedDiffusersAttention +from .bias_add import nhwc_bias_add +from .diffusers_2d_transformer import Diffusers2DTransformerConfig +from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder + +# Ops will be loaded on demand +transformer_cuda_module = None +spatial_cuda_module = None + + +def load_transformer_module(): + global transformer_cuda_module + if transformer_cuda_module is None: + transformer_cuda_module = InferenceBuilder().load() + return transformer_cuda_module + + +def load_spatial_module(): + global spatial_cuda_module + if spatial_cuda_module is None: + spatial_cuda_module = SpatialInferenceBuilder().load() + return spatial_cuda_module + + +class DeepSpeedDiffusersTransformerBlock(nn.Module): + def __init__(self, + equivalent_module: nn.Module, + config: Diffusers2DTransformerConfig): + super(DeepSpeedDiffusersTransformerBlock, self).__init__() + self.quantizer = module_inject.GroupQuantizer(q_int8=config.int8_quantization) + # Ensure ops are built by the time we start running + self.config = config + + self.ff1_w = self.quantizer.quantize( + nn.Parameter(equivalent_module.ff.net[0].proj.weight.data, + requires_grad=False)) + self.ff1_b = nn.Parameter(equivalent_module.ff.net[0].proj.bias.data, + requires_grad=False) + self.ff2_w = self.quantizer.quantize( + nn.Parameter(equivalent_module.ff.net[2].weight.data, + requires_grad=False)) + self.ff2_b = nn.Parameter(equivalent_module.ff.net[2].bias.data, + requires_grad=False) + + self.norm1_g = nn.Parameter(equivalent_module.norm1.weight.data, + requires_grad=False) + self.norm1_b = nn.Parameter(equivalent_module.norm1.bias.data, + requires_grad=False) + self.norm1_eps = equivalent_module.norm1.eps + + self.norm2_g = nn.Parameter(equivalent_module.norm2.weight.data, + requires_grad=False) + self.norm2_b = nn.Parameter(equivalent_module.norm2.bias.data, + requires_grad=False) + self.norm2_eps = equivalent_module.norm2.eps + + self.norm3_g = nn.Parameter(equivalent_module.norm3.weight.data, + requires_grad=False) + self.norm3_b = nn.Parameter(equivalent_module.norm3.bias.data, + requires_grad=False) + self.norm3_eps = equivalent_module.norm3.eps + + self.attn_1 = equivalent_module.attn1 + self.attn_2 = equivalent_module.attn2 + + # Pull the bias in if we can + if isinstance(self.attn_1, DeepSpeedDiffusersAttention): + self.attn_1.do_out_bias = False + self.attn_1_bias = self.attn_1.attn_ob + else: + self.attn_1_bias = nn.Parameter(torch.zeros_like(self.norm2_g), + requires_grad=False) + + # Pull the bias in if we can + if isinstance(self.attn_2, DeepSpeedDiffusersAttention): + self.attn_2.do_out_bias = False + self.attn_2_bias = self.attn_2.attn_ob + else: + self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), + requires_grad=False) + + self.transformer_cuda_module = load_transformer_module() + load_spatial_module() + + def forward(self, hidden_states, context=None, timestep=None, **kwargs): + # In v0.12.0 of diffuser, several new kwargs were added. Capturing + # those with kwargs to maintain backward compatibility + + # In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states' + # This is so we can support older and newer versions of diffusers + if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] != None: + context = kwargs["encoder_hidden_states"] + + out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, + self.norm1_g, + self.norm1_b, + self.norm1_eps) + out_attn_1 = self.attn_1(out_norm_1) + + out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(out_attn_1, + self.attn_1_bias, + hidden_states, + self.norm2_g, + self.norm2_b, + self.norm2_eps) + out_attn_2 = self.attn_2(out_norm_2, context=context) + out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(out_attn_2, + self.attn_2_bias, + out_attn_1, + self.norm3_g, + self.norm3_b, + self.norm3_eps) + + out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w) + out_geglu = self.transformer_cuda_module.bias_geglu(out_ff1, self.ff1_b) + + out_ff2 = nn.functional.linear(out_geglu, self.ff2_w) + return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2) diff --git a/deepspeed/ops/transformer/inference/ds_attention.py b/deepspeed/ops/transformer/inference/ds_attention.py new file mode 100644 index 0000000..f4ec14b --- /dev/null +++ b/deepspeed/ops/transformer/inference/ds_attention.py @@ -0,0 +1,277 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import math +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp + +minus_inf = -10000.0 + + +class DeepSpeedSelfAttention(nn.Module): + num_layers = 0 + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1): + super(DeepSpeedSelfAttention, self).__init__() + self.config = config + data_type = torch.int8 if config.q_int8 else torch.half if config.fp16 else torch.float + data_type_fp = torch.half if config.fp16 else torch.float + self.config.layer_id = DeepSpeedSelfAttention.num_layers + DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1 + device = get_accelerator().current_device_name( + ) #if config.bigscience_bloom else 'cpu' + qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 + self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size, + qkv_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, + dtype=data_type_fp, + device=device), + requires_grad=False) + out_size_per_partition = self.config.hidden_size // self.config.mp_size + self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + + self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type_fp, + device=device), + requires_grad=False) + + self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size + self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size + self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads + + self.mp_group = mp_group + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups + self.merge_count = int(math.log2(merge_count)) + + self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads) + if not config.use_mup: + self.norm_factor = math.sqrt(self.norm_factor) + + if self.config.scale_attn_by_inverse_layer_idx is True: + self.norm_factor *= math.sqrt(self.config.layer_id + 1) + # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191 + + self.qkv_func = QKVGemmOp(config) + self.score_context_func = SoftmaxContextOp(config) + self.linear_func = LinearOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + + def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + if isinstance(qkv_out, list): + qkv_out = qkv_out[0] + + no_masking = input_mask is None + + if no_masking: + input_mask = torch.empty(1) + + attn_key_value = self.score_context_func( + query_key_value=qkv_out, + attn_mask=((1 - input_mask).to(qkv_out.dtype) * + minus_inf) if input_mask.dtype == torch.int64 else input_mask, + heads=self.num_attention_heads_per_partition, + norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0), + no_masking=no_masking, + layer_id=self.config.layer_id, + num_layers=DeepSpeedSelfAttention.num_layers, + alibi=alibi) + + context_layer, key_layer, value_layer = attn_key_value + return context_layer, key_layer, value_layer + + def forward(self, + input, + input_mask, + head_mask=None, + layer_past=None, + get_present=False, + encoder_hidden_states=None, + encoder_attention_mask=None, + output_attentions=False, + norm_w=None, + norm_b=None, + alibi=None): + + if not self.config.pre_layer_norm: + qkv_out = self.linear_func(input=input, + weight=self.attn_qkvw, + bias=self.attn_qkvb, + add_bias=self.attn_qkvb is not None, + do_flash_attn=False, + num_heads=self.num_attention_heads_per_partition, + num_layers=DeepSpeedSelfAttention.num_layers) + else: + qkv_out = self.qkv_func( + input=input, + weight=self.attn_qkvw, + bias=(self.attn_qkvb if self.attn_qkvb is not None else norm_b), + gamma=norm_w, + beta=norm_b, + add_bias=(self.attn_qkvb is not None), + num_layers=DeepSpeedSelfAttention.num_layers, + num_heads=self.num_attention_heads_per_partition) + + context_layer, key_layer, value_layer = self.compute_attention( + qkv_out=qkv_out, + input_mask=input_mask, + layer_past=layer_past, + alibi=alibi) + + output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow) + + inp_norm = qkv_out[-1] + + if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size( + group=self.mp_group) > 1: + dist.all_reduce(output, group=self.mp_group) + + return (output, key_layer, value_layer, context_layer, inp_norm) + + +class BloomSelfAttention(DeepSpeedSelfAttention): + def __init__(self, *args, **kwargs): + super(BloomSelfAttention, self).__init__(*args, **kwargs) + self.softmax_func = SoftmaxOp(self.config) + + ########### This part is taken/modified form the HF modeling_bloom.py ################ + # Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py + + def _transpose_for_context(self, x): + x = x.permute(0, 2, 1, 3).contiguous() + new_x_layer_shape = x.size()[:-2] + \ + (self.hidden_size_per_partition,) + return x.view(*new_x_layer_shape).contiguous() + + def _split_tensor_along_last_dim(self, + tensor, + num_partitions, + contiguous_split_chunks=True): + """Split a tensor along its last dimension. + + Args: + tensor: ([`torch.tensor`], *required*): + input tensor to split + num_partitions ([`int`], *required*): + number of partitions to split the tensor + contiguous_split_chunks ([`bool`], *optional*, default=`False`):: + If True, make each chunk contiguous in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + numerator, denominator = tensor.size()[last_dim], num_partitions + if not (numerator % denominator == 0): + raise ValueError(f"{numerator} is not divisible by {denominator}") + last_dim_size = numerator // denominator + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def compute_attention(self, qkv_out, input_mask, layer_past, alibi): + if isinstance(qkv_out, list): + qkv_out = qkv_out[0] + + no_masking = input_mask is None + + if no_masking: + input_mask = torch.empty(1) + + mixed_x_layer = qkv_out + alibi = alibi.to(get_accelerator().current_device_name()) + head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition + new_tensor_shape = mixed_x_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * head_dim) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3) + + # [batch_size, head_dim, q_length, k_length] + output_size = (query_layer.size(0), + query_layer.size(2), + query_layer.size(1), + key_layer.size(1)) + # [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim] + query_layer = query_layer.transpose(1, + 2).reshape(output_size[0] * output_size[1], + output_size[2], + -1) + # [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim] + key_layer = key_layer.transpose(1, + 2).reshape(output_size[0] * output_size[1], + output_size[3], + -1).transpose(-1, + -2) + value_layer = value_layer.transpose(1, + 2).reshape(output_size[0] * output_size[1], + output_size[3], + -1) + if layer_past is not None: + past_key, past_value = layer_past + # concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim] + key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1) + value_layer = torch.cat((past_value.type_as(value_layer), + value_layer), + dim=-2) + + presents = (key_layer, value_layer) + # Raw attention scores. [batch_size * num_heads, q_length, k_length] + matmul_result = torch.matmul(query_layer, key_layer) + # change view to [batch_size, num_heads, q_length, k_length] + attention_scores = matmul_result.view(output_size[0], + output_size[1], + output_size[2], + -1) + + offset = dist.get_rank( + ) * self.num_attention_heads_per_partition if dist.is_initialized() else 0 + attention_probs = self.softmax_func( + attn_scores=attention_scores, + attn_mask=((1 - input_mask).half() * minus_inf), + alibi=alibi, + triangular=(self.config.triangular_masking + and (attention_scores.shape[-2] > 1)), + recompute=False, + local_attention=False, + window_size=1, + async_op=False, + layer_scale=1 / (self.norm_factor * self.norm_factor), + head_offset=offset) + + # change view [batch_size x num_heads, q_length, k_length] + attention_probs_reshaped = attention_probs.view(*matmul_result.shape) + + # matmul: [batch_size * num_heads, q_length, head_dim] + context_layer = torch.bmm(attention_probs_reshaped, value_layer) + + # change view [batch_size, num_heads, q_length, head_dim] + context_layer = context_layer.view( + context_layer.size(0) // self.num_attention_heads_per_partition, + self.num_attention_heads_per_partition, + context_layer.size(1), + context_layer.shape[-1]) + + context_layer = self._transpose_for_context(context_layer) + key_layer = presents[0] + value_layer = presents[1] + + return context_layer, key_layer, value_layer + + ###################### End of HF modeling_bloom addition ######################## diff --git a/deepspeed/ops/transformer/inference/ds_mlp.py b/deepspeed/ops/transformer/inference/ds_mlp.py new file mode 100644 index 0000000..02d492d --- /dev/null +++ b/deepspeed/ops/transformer/inference/ds_mlp.py @@ -0,0 +1,94 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import math +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp + + +class DeepSpeedMLP(nn.Module): + def __init__(self, + config, + mp_group=None, + q_scales=None, + q_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super(DeepSpeedMLP, self).__init__() + + self.config = config + data_type = torch.int8 if config.q_int8 else torch.half if config.fp16 else torch.float + data_type_fp = torch.half if config.fp16 else torch.float + device = get_accelerator().current_device_name() + self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type_fp, + device=device), + requires_grad=False) + self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type_fp, + device=device), + requires_grad=False) + intm_size_per_partition = self.config.intermediate_size // self.config.mp_size + self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size, + intm_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.inter_b = nn.Parameter(torch.empty(intm_size_per_partition, + dtype=data_type_fp, + device=device), + requires_grad=False) + self.output_w = nn.Parameter(torch.empty(intm_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, + dtype=data_type_fp, + device=device), + requires_grad=False) + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups + self.merge_count = int(math.log2(merge_count)) + self.mp_group = mp_group + + self.mlp_gemm_func = MLPGemmOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + self.fused_gemm_gelu = GELUGemmOp(config) + self.residual_add_func = ResidualAddOp(config) + + def forward(self, input, residual, residual_norm, bias): + residual_add = None + if self.attn_nw is None: + output = self.fused_gemm_gelu(input=residual_norm, + weight=self.inter_w, + bias=self.inter_b, + weight_out=self.output_w) + else: + output, residual_add = self.mlp_gemm_func(input=input, + residual=residual, + input_bias=bias, + weight_interm=self.inter_w, + weight_out=self.output_w, + bias=self.inter_b, + gamma=self.attn_nw, + beta=self.attn_nb) + residual = self.residual_add_func( + hidden_state=output, + residual=residual, + attention_output=input, + attention_bias=bias if bias is not None else self.output_b, + final_bias=self.output_b, + add_bias=bias is not None, + residual_add=residual_add) + + if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(residual, group=self.mp_group) + + return residual diff --git a/deepspeed/ops/transformer/inference/moe_inference.py b/deepspeed/ops/transformer/inference/moe_inference.py index 855211b..d5e45c7 100644 --- a/deepspeed/ops/transformer/inference/moe_inference.py +++ b/deepspeed/ops/transformer/inference/moe_inference.py @@ -3,22 +3,19 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' import json import math -import importlib import torch -from torch import nn from torch.autograd import Function -import time -from ... import op_builder #from ...inference.engine import inference_cuda_module, specialized_mode # Cuda modules will be imported if needed inference_cuda_module = None specialized_mode = None import torch.nn as nn -from .transformer_inference import DeepSpeedSelfAttention, DeepSpeedInferenceConfig +from .ds_attention import DeepSpeedSelfAttention +from .config import DeepSpeedInferenceConfig from ....moe.sharded_moe import TopKGate -import torch.distributed as dist - -import torch.nn.functional as F +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): @@ -72,7 +69,8 @@ class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): noisy_gate_policy=None, drop_tokens=True, use_rts=False, - mlp_type='standard'): + mlp_type='standard', + scale_attn_by_inverse_layer_idx=False): super(DeepSpeedMoEInferenceConfig, self).__init__( hidden_size, @@ -101,6 +99,7 @@ class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig): self.use_rts = use_rts self.global_experts = global_experts self.mlp_type = mlp_type + self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx @classmethod def from_dict(cls, json_object): @@ -233,8 +232,7 @@ class DeepSpeedMoEInference(nn.Module): quantize_scales=None, quantize_groups=1, merge_count=1, - mlp_extra_grouping=False, - qkv_merging=False): + mlp_extra_grouping=False): super(DeepSpeedMoEInference, self).__init__() self.config = config @@ -243,15 +241,13 @@ class DeepSpeedMoEInference(nn.Module): global specialized_mode if inference_cuda_module is None: specialized_mode = False - if hasattr(op_builder, 'InferenceSpecializedBuilder'): - builder = op_builder.InferenceSpecializedBuilder() - if builder.is_compatible(): - inference_cuda_module = builder.load() - specialized_mode = True - else: - inference_cuda_module = op_builder.InferenceBuilder().load() + # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string + builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder") + if builder != None and builder.is_compatible(): + inference_cuda_module = builder.load() + specialized_mode = True else: - inference_cuda_module = op_builder.InferenceBuilder().load() + inference_cuda_module = InferenceBuilder().load() self.config.specialized_mode = specialized_mode DeepSpeedMoEInference.layer_id += 1 @@ -259,8 +255,7 @@ class DeepSpeedMoEInference(nn.Module): mp_group, quantize_scales, quantize_groups, - merge_count, - qkv_merging) + merge_count) self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size)) self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size)) diff --git a/deepspeed/ops/transformer/inference/op_binding/__init__.py b/deepspeed/ops/transformer/inference/op_binding/__init__.py new file mode 100644 index 0000000..8902906 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/__init__.py @@ -0,0 +1,10 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .linear import LinearOp +from .vector_matmul import VectorMatMulOp +from .softmax_context import SoftmaxContextOp +from .qkv_gemm import QKVGemmOp +from .softmax import SoftmaxOp +from .mlp_gemm import MLPGemmOp +from .gelu_gemm import GELUGemmOp +from .residual_add import ResidualAddOp diff --git a/deepspeed/ops/transformer/inference/op_binding/base.py b/deepspeed/ops/transformer/inference/op_binding/base.py new file mode 100644 index 0000000..ad29e0f --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/base.py @@ -0,0 +1,17 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig + +from deepspeed.ops.op_builder import InferenceBuilder + + +class BaseOp(torch.nn.Module): + inference_cuda_module = None + + def __init__(self, config: DeepSpeedInferenceConfig): + super(BaseOp, self).__init__() + self.config = config + if BaseOp.inference_cuda_module is None: + builder = InferenceBuilder() + BaseOp.inference_cuda_module = builder.load() diff --git a/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py new file mode 100644 index 0000000..9ab4ef9 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/gelu_gemm.py @@ -0,0 +1,32 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class GELUGemmOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(GELUGemmOp, self).__init__(config) + if self.config.fp16: + self.fused_gemm_gelu = self.inference_cuda_module.fused_gemm_gelu_fp16 + else: + self.fused_gemm_gelu = self.inference_cuda_module.fused_gemm_gelu_fp32 + + def forward(self, + input: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + weight_out: torch.Tensor, + async_op: bool = False): + output = self.fused_gemm_gelu(input, + weight, + weight.scale, + bias, + weight_out, + weight_out.scale, + self.config.epsilon, + self.config.pre_layer_norm, + self.config.q_int8, + async_op) + return output diff --git a/deepspeed/ops/transformer/inference/op_binding/linear.py b/deepspeed/ops/transformer/inference/op_binding/linear.py new file mode 100644 index 0000000..6d83ffc --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/linear.py @@ -0,0 +1,31 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class LinearOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(LinearOp, self).__init__(config) + if self.config.fp16: + self.linear_func = self.inference_cuda_module.linear_layer_fp16 + else: + self.linear_func = self.inference_cuda_module.linear_layer_fp32 + + def forward(self, + input: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + add_bias: bool, + do_flash_attn: bool, + num_heads: int, + external_cache: bool = None, + num_layers: int = None): + qkv_out = self.linear_func(input, + weight, + bias, + add_bias, + do_flash_attn, + num_heads) + return qkv_out diff --git a/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py new file mode 100644 index 0000000..4df8ef5 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/mlp_gemm.py @@ -0,0 +1,41 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class MLPGemmOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(MLPGemmOp, self).__init__(config) + if self.config.fp16: + self.mlp_gemm_func = self.inference_cuda_module.mlp_gemm_fp16 + else: + self.mlp_gemm_func = self.inference_cuda_module.mlp_gemm_fp32 + + def forward(self, + input: torch.Tensor, + residual: torch.Tensor, + input_bias: torch.Tensor, + weight_interm: torch.Tensor, + weight_out: torch.Tensor, + bias: torch.Tensor, + gamma: torch.Tensor, + beta: torch.Tensor): + output, residual_add = self.mlp_gemm_func( + input, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + self.config.epsilon, + self.config.pre_layer_norm, + self.config.mlp_after_attn, + weight_interm.scale, + weight_out.scale, + self.config.q_int8, + self.config.mlp_act_func_type) + return output, residual_add diff --git a/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py new file mode 100644 index 0000000..d50dbfd --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/qkv_gemm.py @@ -0,0 +1,44 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp +from deepspeed import comm as dist + + +class QKVGemmOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(QKVGemmOp, self).__init__(config) + if self.config.fp16: + self.qkv_gemm_func = self.inference_cuda_module.qkv_gemm_fp16 + else: + self.qkv_gemm_func = self.inference_cuda_module.qkv_gemm_fp32 + + def forward(self, + input: torch.Tensor, + weight: torch.Tensor, + bias: torch.Tensor, + gamma: torch.Tensor, + beta: torch.Tensor, + add_bias: bool, + num_layers: int, + num_heads: int = None, + max_out_tokens: int = None): + q_scale = weight.scale + external_cache = self.config.bigscience_bloom + rank = dist.get_rank() if dist.is_initialized() else 0 + q_int8 = self.config.q_int8 + output = self.qkv_gemm_func(input, + weight, + q_scale, + bias, + gamma, + beta, + self.config.epsilon, + add_bias, + num_layers, + external_cache, + self.config.mp_size, + rank, + q_int8) + return output diff --git a/deepspeed/ops/transformer/inference/op_binding/residual_add.py b/deepspeed/ops/transformer/inference/op_binding/residual_add.py new file mode 100644 index 0000000..0fb1741 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/residual_add.py @@ -0,0 +1,38 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class ResidualAddOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(ResidualAddOp, self).__init__(config) + if self.config.fp16 or self.config.q_int8: + self.residual_add_func = self.inference_cuda_module.residual_add_bias_fp16 + else: + self.residual_add_func = self.inference_cuda_module.residual_add_bias_fp32 + + def forward(self, + hidden_state: torch.Tensor, + residual: torch.Tensor, + attention_output: torch.Tensor, + attention_bias: torch.Tensor, + final_bias: torch.Tensor, + add_bias: bool, + residual_add: torch.Tensor): + + if not self.config.pre_layer_norm and residual_add is not None: + # only use residual add if its set and we are not pre layer norm + residual = residual_add + + self.residual_add_func(hidden_state, + residual, + attention_output, + attention_bias, + final_bias, + self.config.mp_size, + self.config.mlp_after_attn, + add_bias, + self.config.pre_layer_norm) + return residual diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax.py b/deepspeed/ops/transformer/inference/op_binding/softmax.py new file mode 100644 index 0000000..4d58ba4 --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/softmax.py @@ -0,0 +1,41 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class SoftmaxOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(SoftmaxOp, self).__init__(config) + if self.config.fp16: + self.softmax_func = self.inference_cuda_module.softmax_fp16 + else: + self.softmax_func = self._not_implemented + + def _not_implemented(self, *args, **kwargs): + raise NotImplementedError + + def forward(self, + attn_scores: torch.Tensor, + attn_mask: torch.Tensor, + alibi: torch.Tensor, + triangular: bool, + recompute: bool, + local_attention: bool, + window_size: int, + async_op: bool, + layer_scale: float, + head_offset: int): + output = self.softmax_func(attn_scores, + attn_mask, + alibi, + triangular, + recompute, + local_attention, + window_size, + async_op, + layer_scale, + head_offset, + self.config.mp_size) + return output diff --git a/deepspeed/ops/transformer/inference/op_binding/softmax_context.py b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py new file mode 100644 index 0000000..818af5f --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/softmax_context.py @@ -0,0 +1,48 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from deepspeed import comm as dist +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class SoftmaxContextOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(SoftmaxContextOp, self).__init__(config) + if self.config.fp16: + self.softmax_context_func = self.inference_cuda_module.softmax_context_fp16 + else: + self.softmax_context_func = self.inference_cuda_module.softmax_context_fp32 + + def forward(self, + query_key_value: torch.Tensor, + attn_mask: torch.Tensor, + heads: int, + norm_factor: float, + no_masking: bool, + layer_id: int, + num_layers: int, + alibi: torch.Tensor): + + if alibi is not None: + batch_heads = query_key_value.shape[0] * heads + offset = dist.get_rank() * batch_heads if dist.is_initialized() else 0 + alibi = alibi[offset:batch_heads + offset, :, :] + else: + alibi = torch.empty(1) + + output = self.softmax_context_func(query_key_value, + attn_mask, + self.config.rotary_dim, + self.config.rotate_half, + self.config.rotate_every_two, + heads, + norm_factor, + self.config.triangular_masking, + self.config.local_attention, + self.config.window_size, + no_masking, + layer_id, + num_layers, + alibi) + return output diff --git a/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py new file mode 100644 index 0000000..c22b61c --- /dev/null +++ b/deepspeed/ops/transformer/inference/op_binding/vector_matmul.py @@ -0,0 +1,20 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from ..config import DeepSpeedInferenceConfig +from .base import BaseOp + + +class VectorMatMulOp(BaseOp): + def __init__(self, config: DeepSpeedInferenceConfig): + super(VectorMatMulOp, self).__init__(config) + if self.config.fp16: + self.vector_matmul_func = self.inference_cuda_module.vector_matmul_fp16 + else: + self.vector_matmul_func = self.inference_cuda_module.vector_matmul_fp32 + + def forward(self, input: torch.Tensor, weight: torch.Tensor, async_op: bool = False): + q_scale = weight.scale + q_int8 = self.config.q_int8 + output = self.vector_matmul_func(input, weight, async_op, q_scale, q_int8) + return output diff --git a/deepspeed/ops/transformer/inference/triton_ops.py b/deepspeed/ops/transformer/inference/triton_ops.py new file mode 100644 index 0000000..45c6db6 --- /dev/null +++ b/deepspeed/ops/transformer/inference/triton_ops.py @@ -0,0 +1,152 @@ +'''Copyright The Microsoft DeepSpeed Team''' +""" +Inspired by original Triton implementation: +https://github.com/openai/triton/blob/b244db06da24a87453a40ad35b085ee37dac3705/python/tutorials/06-fused-attention.py +""" + +import torch +import triton +import triton.language as tl + + +@triton.jit +def _fwd_kernel( + Q, + K, + V, + sm_scale, + TMP, + Out, + stride_qz, + stride_qh, + stride_qm, + stride_qk, + stride_kz, + stride_kh, + stride_kn, + stride_kk, + stride_vz, + stride_vh, + stride_vk, + stride_vn, + stride_oz, + stride_oh, + stride_om, + stride_on, + Z, + H, + N_CTX, + BLOCK_M: tl.constexpr, + BLOCK_DMODEL: tl.constexpr, + BLOCK_N: tl.constexpr, +): + start_m = tl.program_id(0) + off_hz = tl.program_id(1) + # initialize offsets + offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) + offs_n = tl.arange(0, BLOCK_N) + offs_d = tl.arange(0, BLOCK_DMODEL) + off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk + off_k = off_hz * stride_kh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk + off_v = off_hz * stride_vh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk + # Initialize pointers to Q, K, V + q_ptrs = Q + off_q + k_ptrs = K + off_k + v_ptrs = V + off_v + # initialize pointer to m and l + t_ptrs = TMP + off_hz * N_CTX + offs_m + m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") + l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) + # load q: it will stay in SRAM throughout + q = tl.load(q_ptrs) + # loop over k, v and update accumulator + for start_n in range(0, N_CTX, BLOCK_N): + start_n = tl.multiple_of(start_n, BLOCK_N) + # -- compute qk ---- + k = tl.load(k_ptrs + start_n * stride_kn) + + qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) + qk += tl.dot(q, k, trans_b=True) + qk *= sm_scale + # -- compute m_ij, p, l_ij + m_ij = tl.max(qk, 1) + p = tl.exp(qk - m_ij[:, None]) + l_ij = tl.sum(p, 1) + # -- update m_i and l_i + m_i_new = tl.maximum(m_i, m_ij) + alpha = tl.exp(m_i - m_i_new) + beta = tl.exp(m_ij - m_i_new) + l_i_new = alpha * l_i + beta * l_ij + # -- update output accumulator -- + # scale p + p_scale = beta / l_i_new + p = p * p_scale[:, None] + # scale acc + acc_scale = l_i / l_i_new * alpha + tl.store(t_ptrs, acc_scale) + acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load + acc = acc * acc_scale[:, None] + # update acc + v = tl.load(v_ptrs + start_n * stride_vk) + p = p.to(tl.float16) + acc += tl.dot(p, v) + # update m_i and l_i + l_i = l_i_new + m_i = m_i_new + # initialize pointers to output + offs_n = tl.arange(0, BLOCK_DMODEL) + off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on + out_ptrs = Out + off_o + tl.store(out_ptrs, acc) + + +class triton_flash_attn(torch.nn.Module): + def __init__(self, ): + super(triton_flash_attn, self).__init__() + + def forward(self, q, k, v, sm_scale, block_128=True): + BLOCK = 128 if block_128 else 64 + # shape constraints + Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] + o = torch.empty_like(q) + grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1]) + tmp = torch.empty((q.shape[0] * q.shape[1], + q.shape[2]), + device=q.device, + dtype=torch.float32) + num_warps = 4 if Lk <= 64 else 8 + + _fwd_kernel[grid]( + q, + k, + v, + sm_scale, + tmp, + o, + q.stride(0), + q.stride(1), + q.stride(2), + q.stride(3), + k.stride(0), + k.stride(1), + k.stride(2), + k.stride(3), + v.stride(0), + v.stride(1), + v.stride(2), + v.stride(3), + o.stride(0), + o.stride(1), + o.stride(2), + o.stride(3), + k.shape[0], + k.shape[1], + k.shape[2], + BLOCK_M=BLOCK, + BLOCK_N=BLOCK, + BLOCK_DMODEL=Lk, + num_warps=num_warps, + num_stages=1, + ) + return o diff --git a/deepspeed/ops/transformer/transformer.py b/deepspeed/ops/transformer/transformer.py old mode 100644 new mode 100755 index 4b3104b..ae62730 --- a/deepspeed/ops/transformer/transformer.py +++ b/deepspeed/ops/transformer/transformer.py @@ -3,12 +3,11 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' import json import math -import importlib import torch from torch import nn from torch.autograd import Function - -from ..op_builder import TransformerBuilder, StochasticTransformerBuilder +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import TransformerBuilder, StochasticTransformerBuilder # Cuda modules will be imported if needed transformer_cuda_module = None @@ -218,7 +217,7 @@ class DeepSpeedTransformerFunction(Function): output_b, norm_w, norm_b, - config.training, + config.training and config.is_grad_enabled, config.pre_layer_norm, config.attn_dropout_checkpoint, config.normalize_invertible, @@ -482,7 +481,7 @@ class DeepSpeedTransformerLayer(nn.Module): print("DeepSpeed Transformer config is ", self.config.__dict__) if self.config.local_rank >= 0: - torch.cuda.set_device(self.config.local_rank) + get_accelerator().set_device(self.config.local_rank) if initial_weights is None and initial_biases is None: self.attn_qkvw = nn.Parameter( @@ -587,6 +586,7 @@ class DeepSpeedTransformerLayer(nn.Module): output_attentions=False, grads=None): self.config.is_grad_enabled = torch.is_grad_enabled() + self.config.training = self.training return DeepSpeedTransformerFunction.apply(hidden_states, attention_mask, self, diff --git a/deepspeed/pipe/__init__.py b/deepspeed/pipe/__init__.py index db13081..bbabf4f 100644 --- a/deepspeed/pipe/__init__.py +++ b/deepspeed/pipe/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from ..runtime.pipe import PipelineModule, LayerSpec, TiedLayerSpec diff --git a/deepspeed/profiling/__init__.py b/deepspeed/profiling/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/profiling/__init__.py +++ b/deepspeed/profiling/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/profiling/config.py b/deepspeed/profiling/config.py index 0671211..c22cd45 100644 --- a/deepspeed/profiling/config.py +++ b/deepspeed/profiling/config.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. diff --git a/deepspeed/profiling/constants.py b/deepspeed/profiling/constants.py index d999dc6..162f1d3 100644 --- a/deepspeed/profiling/constants.py +++ b/deepspeed/profiling/constants.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. diff --git a/deepspeed/profiling/flops_profiler/__init__.py b/deepspeed/profiling/flops_profiler/__init__.py index 2f033c8..7454821 100644 --- a/deepspeed/profiling/flops_profiler/__init__.py +++ b/deepspeed/profiling/flops_profiler/__init__.py @@ -1 +1,3 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .profiler import * diff --git a/deepspeed/profiling/flops_profiler/profiler.py b/deepspeed/profiling/flops_profiler/profiler.py index 92c25ec..b6684f6 100644 --- a/deepspeed/profiling/flops_profiler/profiler.py +++ b/deepspeed/profiling/flops_profiler/profiler.py @@ -1,11 +1,14 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import time import torch import torch.nn as nn import torch.nn.functional as F from functools import partial -from typing import Callable, List, Optional, Tuple +from typing import List, Optional from collections import OrderedDict import numpy as np +from deepspeed.accelerator import get_accelerator Tensor = torch.Tensor @@ -74,8 +77,9 @@ class FlopsProfiler(object): # if computing the flops of a module directly if type(module) in MODULE_HOOK_MAPPING: - module.__flops_handle__ = module.register_forward_hook( - MODULE_HOOK_MAPPING[type(module)]) + if not hasattr(module, "__flops_handle__"): + module.__flops_handle__ = module.register_forward_hook( + MODULE_HOOK_MAPPING[type(module)]) return # if computing the flops of the functionals in a module @@ -83,7 +87,8 @@ class FlopsProfiler(object): module_flop_count.append([]) module_mac_count.append([]) - module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook) + if not hasattr(module, "__pre_hook_handle__"): + module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook) def post_hook(module, input, output): if module_flop_count: @@ -92,20 +97,24 @@ class FlopsProfiler(object): module.__macs__ += sum([elem[1] for elem in module_mac_count[-1]]) module_mac_count.pop() - module.__post_hook_handle__ = module.register_forward_hook(post_hook) + if not hasattr(module, "__post_hook_handle__"): + module.__post_hook_handle__ = module.register_forward_hook(post_hook) def start_time_hook(module, input): - torch.cuda.synchronize() + get_accelerator().synchronize() module.__start_time__ = time.time() - module.__start_time_hook_handle__ = module.register_forward_pre_hook( - start_time_hook) + if not hasattr(module, "__start_time_hook_handle"): + module.__start_time_hook_handle__ = module.register_forward_pre_hook( + start_time_hook) def end_time_hook(module, input, output): - torch.cuda.synchronize() + get_accelerator().synchronize() module.__duration__ += time.time() - module.__start_time__ - module.__end_time_hook_handle__ = module.register_forward_hook(end_time_hook) + if not hasattr(module, "__end_time_hook_handle__"): + module.__end_time_hook_handle__ = module.register_forward_hook( + end_time_hook) self.model.apply(partial(register_module_hooks, ignore_list=ignore_list)) self.started = True @@ -148,8 +157,7 @@ class FlopsProfiler(object): def add_or_reset_attrs(module): module.__flops__ = 0 module.__macs__ = 0 - module.__params__ = sum(p.numel() for p in module.parameters() - if p.requires_grad) + module.__params__ = sum(p.numel() for p in module.parameters()) module.__start_time__ = 0 module.__duration__ = 0 @@ -246,11 +254,10 @@ class FlopsProfiler(object): return import sys import os.path - from os import path original_stdout = None f = None if output_file and output_file != "": - dir_path = os.path.dirname(output_file) + dir_path = os.path.dirname(os.path.abspath(output_file)) if not os.path.exists(dir_path): os.makedirs(dir_path) original_stdout = sys.stdout @@ -287,7 +294,7 @@ class FlopsProfiler(object): print('{:<60} {:<8}'.format( 'params of model = params per GPU * mp_size: ', params_to_string(total_params * - (self.ds_engine.mp_world_size) if self.ds_engine else 1))) + ((self.ds_engine.mp_world_size) if self.ds_engine else 1)))) print('{:<60} {:<8}'.format('fwd MACs per GPU: ', macs_to_string(total_macs))) @@ -296,19 +303,19 @@ class FlopsProfiler(object): print('{:<60} {:<8}'.format( 'fwd flops of model = fwd flops per GPU * mp_size: ', num_to_string(total_flops * - (self.ds_engine.mp_world_size) if self.ds_engine else 1))) + ((self.ds_engine.mp_world_size) if self.ds_engine else 1)))) fwd_latency = self.get_total_duration() if self.ds_engine and self.ds_engine.wall_clock_breakdown(): - fwd_latency = self.ds_engine.timers('forward').elapsed(False) + fwd_latency = self.ds_engine.timers('forward').elapsed(False) / 1000.0 print('{:<60} {:<8}'.format('fwd latency: ', duration_to_string(fwd_latency))) print('{:<60} {:<8}'.format( 'fwd FLOPS per GPU = fwd flops per GPU / fwd latency: ', flops_to_string(total_flops / fwd_latency))) if self.ds_engine and self.ds_engine.wall_clock_breakdown(): - bwd_latency = self.ds_engine.timers('backward').elapsed(False) - step_latency = self.ds_engine.timers('step').elapsed(False) + bwd_latency = self.ds_engine.timers('backward').elapsed(False) / 1000.0 + step_latency = self.ds_engine.timers('step').elapsed(False) / 1000.0 print('{:<60} {:<8}'.format('bwd latency: ', duration_to_string(bwd_latency))) print('{:<60} {:<8}'.format( @@ -339,7 +346,7 @@ class FlopsProfiler(object): macs = get_module_macs(module) items = [ params_to_string(params), - "{:.2%} Params".format(params / total_params), + "{:.2%} Params".format(params / total_params if total_params else 0), macs_to_string(macs), "{:.2%} MACs".format(0.0 if total_macs == 0 else macs / total_macs), ] @@ -476,50 +483,50 @@ def _prod(dims): def _linear_flops_compute(input, weight, bias=None): out_features = weight.shape[0] - macs = torch.numel(input) * out_features + macs = input.numel() * out_features return 2 * macs, macs def _relu_flops_compute(input, inplace=False): - return torch.numel(input), 0 + return input.numel(), 0 def _prelu_flops_compute(input: Tensor, weight: Tensor): - return torch.numel(input), 0 + return input.numel(), 0 def _elu_flops_compute(input: Tensor, alpha: float = 1.0, inplace: bool = False): - return torch.numel(input), 0 + return input.numel(), 0 def _leaky_relu_flops_compute(input: Tensor, negative_slope: float = 0.01, inplace: bool = False): - return torch.numel(input), 0 + return input.numel(), 0 def _relu6_flops_compute(input: Tensor, inplace: bool = False): - return torch.numel(input), 0 + return input.numel(), 0 def _silu_flops_compute(input: Tensor, inplace: bool = False): - return torch.numel(input), 0 + return input.numel(), 0 -def _gelu_flops_compute(input): - return torch.numel(input), 0 +def _gelu_flops_compute(input, **kwargs): + return input.numel(), 0 -def _pool_flops_compute( - input, - kernel_size, - stride=None, - padding=0, - ceil_mode=False, - count_include_pad=True, - divisor_override=None, -): - return torch.numel(input), 0 +def _pool_flops_compute(input, + kernel_size, + stride=None, + padding=0, + dilation=None, + ceil_mode=False, + count_include_pad=True, + divisor_override=None, + return_indices=None): + return input.numel(), 0 def _conv_flops_compute(input, @@ -534,7 +541,7 @@ def _conv_flops_compute(input, batch_size = input.shape[0] in_channels = input.shape[1] out_channels = weight.shape[0] - kernel_dims = list(weight.shape[-2:]) + kernel_dims = list(weight.shape[2:]) input_dims = list(input.shape[2:]) length = len(input_dims) @@ -575,7 +582,7 @@ def _conv_trans_flops_compute( batch_size = input.shape[0] in_channels = input.shape[1] out_channels = weight.shape[0] - kernel_dims = list(weight.shape[-2:]) + kernel_dims = list(weight.shape[2:]) input_dims = list(input.shape[2:]) length = len(input_dims) @@ -621,8 +628,8 @@ def _batch_norm_flops_compute( has_affine = weight is not None if training: # estimation - return torch.numel(input) * (5 if has_affine else 4), 0 - flops = torch.numel(input) * (2 if has_affine else 1) + return input.numel() * (5 if has_affine else 4), 0 + flops = input.numel() * (2 if has_affine else 1) return flops, 0 @@ -635,7 +642,7 @@ def _layer_norm_flops_compute( ): has_affine = weight is not None # estimation - return torch.numel(input) * (5 if has_affine else 4), 0 + return input.numel() * (5 if has_affine else 4), 0 def _group_norm_flops_compute(input: Tensor, @@ -645,7 +652,7 @@ def _group_norm_flops_compute(input: Tensor, eps: float = 1e-5): has_affine = weight is not None # estimation - return torch.numel(input) * (5 if has_affine else 4), 0 + return input.numel() * (5 if has_affine else 4), 0 def _instance_norm_flops_compute( @@ -660,21 +667,19 @@ def _instance_norm_flops_compute( ): has_affine = weight is not None # estimation - return torch.numel(input) * (5 if has_affine else 4), 0 + return input.numel() * (5 if has_affine else 4), 0 -def _upsample_flops_compute(input, - size=None, - scale_factor=None, - mode="nearest", - align_corners=None): +def _upsample_flops_compute(input, **kwargs): + size = kwargs.get('size', None) if size is not None: - if isinstance(size, tuple): + if isinstance(size, tuple) or isinstance(size, list): return int(_prod(size)), 0 else: return int(size), 0 + scale_factor = kwargs.get('scale_factor', None) assert scale_factor is not None, "either size or scale_factor should be defined" - flops = torch.numel(input) + flops = input.numel() if isinstance(scale_factor, tuple) and len(scale_factor) == len(input): flops * int(_prod(scale_factor)) else: @@ -683,7 +688,7 @@ def _upsample_flops_compute(input, def _softmax_flops_compute(input, dim=None, _stacklevel=3, dtype=None): - return torch.numel(input), 0 + return input.numel(), 0 def _embedding_flops_compute( @@ -783,7 +788,7 @@ def _elementwise_flops_compute(input, other): def wrapFunc(func, funcFlopCompute): oldFunc = func - name = func.__name__ + name = func.__str__ old_functions[name] = oldFunc def newFunc(*args, **kwds): @@ -794,7 +799,7 @@ def wrapFunc(func, funcFlopCompute): module_mac_count[-1].append((name, macs)) return oldFunc(*args, **kwds) - newFunc.__name__ = func.__name__ + newFunc.__str__ = func.__str__ return newFunc @@ -860,7 +865,7 @@ def _patch_tensor_methods(): torch.mm = wrapFunc(torch.mm, _matmul_flops_compute) torch.Tensor.mm = wrapFunc(torch.Tensor.mm, _matmul_flops_compute) torch.bmm = wrapFunc(torch.bmm, _matmul_flops_compute) - torch.Tensor.bmm = wrapFunc(torch.bmm, _matmul_flops_compute) + torch.Tensor.bmm = wrapFunc(torch.Tensor.bmm, _matmul_flops_compute) torch.addmm = wrapFunc(torch.addmm, _addmm_flops_compute) torch.Tensor.addmm = wrapFunc(torch.Tensor.addmm, _tensor_addmm_flops_compute) @@ -873,42 +878,65 @@ def _patch_tensor_methods(): torch.einsum = wrapFunc(torch.einsum, _einsum_flops_compute) + torch.baddbmm = wrapFunc(torch.baddbmm, _tensor_addmm_flops_compute) + def _reload_functionals(): # torch.nn.functional does not support importlib.reload() - F.linear = old_functions[F.linear.__name__] - F.conv1d = old_functions[F.conv1d.__name__] - F.conv2d = old_functions[F.conv2d.__name__] - F.conv3d = old_functions[F.conv3d.__name__] - F.conv_transpose1d = old_functions[F.conv_transpose1d.__name__] - F.conv_transpose2d = old_functions[F.conv_transpose2d.__name__] - F.conv_transpose3d = old_functions[F.conv_transpose3d.__name__] - F.relu = old_functions[F.relu.__name__] - F.prelu = old_functions[F.prelu.__name__] - F.elu = old_functions[F.elu.__name__] - F.leaky_relu = old_functions[F.leaky_relu.__name__] - F.relu6 = old_functions[F.relu6.__name__] - F.batch_norm = old_functions[F.batch_norm.__name__] - F.avg_pool1d = old_functions[F.avg_pool1d.__name__] - F.avg_pool2d = old_functions[F.avg_pool2d.__name__] - F.avg_pool3d = old_functions[F.avg_pool3d.__name__] - F.max_pool1d = old_functions[F.max_pool1d.__name__] - F.max_pool2d = old_functions[F.max_pool2d.__name__] - F.max_pool3d = old_functions[F.max_pool3d.__name__] - F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__name__] - F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__name__] - F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__name__] - F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__name__] - F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__name__] - F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__name__] - F.upsample = old_functions[F.upsample.__name__] - F.interpolate = old_functions[F.interpolate.__name__] - F.softmax = old_functions[F.softmax.__name__] - F.embedding = old_functions[F.embedding.__name__] + F.linear = old_functions[F.linear.__str__] + F.conv1d = old_functions[F.conv1d.__str__] + F.conv2d = old_functions[F.conv2d.__str__] + F.conv3d = old_functions[F.conv3d.__str__] + F.conv_transpose1d = old_functions[F.conv_transpose1d.__str__] + F.conv_transpose2d = old_functions[F.conv_transpose2d.__str__] + F.conv_transpose3d = old_functions[F.conv_transpose3d.__str__] + F.relu = old_functions[F.relu.__str__] + F.prelu = old_functions[F.prelu.__str__] + F.elu = old_functions[F.elu.__str__] + F.leaky_relu = old_functions[F.leaky_relu.__str__] + F.relu6 = old_functions[F.relu6.__str__] + if hasattr(F, "silu"): + F.silu = old_functions[F.silu.__str__] + F.gelu = old_functions[F.gelu.__str__] + F.batch_norm = old_functions[F.batch_norm.__str__] + F.layer_norm = old_functions[F.layer_norm.__str__] + F.instance_norm = old_functions[F.instance_norm.__str__] + F.group_norm = old_functions[F.group_norm.__str__] + F.avg_pool1d = old_functions[F.avg_pool1d.__str__] + F.avg_pool2d = old_functions[F.avg_pool2d.__str__] + F.avg_pool3d = old_functions[F.avg_pool3d.__str__] + F.max_pool1d = old_functions[F.max_pool1d.__str__] + F.max_pool2d = old_functions[F.max_pool2d.__str__] + F.max_pool3d = old_functions[F.max_pool3d.__str__] + F.adaptive_avg_pool1d = old_functions[F.adaptive_avg_pool1d.__str__] + F.adaptive_avg_pool2d = old_functions[F.adaptive_avg_pool2d.__str__] + F.adaptive_avg_pool3d = old_functions[F.adaptive_avg_pool3d.__str__] + F.adaptive_max_pool1d = old_functions[F.adaptive_max_pool1d.__str__] + F.adaptive_max_pool2d = old_functions[F.adaptive_max_pool2d.__str__] + F.adaptive_max_pool3d = old_functions[F.adaptive_max_pool3d.__str__] + F.upsample = old_functions[F.upsample.__str__] + F.interpolate = old_functions[F.interpolate.__str__] + F.softmax = old_functions[F.softmax.__str__] + F.embedding = old_functions[F.embedding.__str__] def _reload_tensor_methods(): - torch.matmul = old_functions[torch.matmul.__name__] + torch.matmul = old_functions[torch.matmul.__str__] + torch.Tensor.matmul = old_functions[torch.Tensor.matmul.__str__] + torch.mm = old_functions[torch.mm.__str__] + torch.Tensor.mm = old_functions[torch.Tensor.mm.__str__] + torch.bmm = old_functions[torch.matmul.__str__] + torch.Tensor.bmm = old_functions[torch.Tensor.bmm.__str__] + torch.addmm = old_functions[torch.addmm.__str__] + torch.Tensor.addmm = old_functions[torch.Tensor.addmm.__str__] + torch.mul = old_functions[torch.mul.__str__] + torch.Tensor.mul = old_functions[torch.Tensor.mul.__str__] + torch.add = old_functions[torch.add.__str__] + torch.Tensor.add = old_functions[torch.Tensor.add.__str__] + + torch.einsum = old_functions[torch.einsum.__str__] + + torch.baddbmm = old_functions[torch.baddbmm.__str__] def _rnn_flops(flops, rnn_module, w_ih, w_hh, input_size): @@ -1191,15 +1219,19 @@ def get_model_profile( input = torch.ones(()).new_empty((*input_shape, )) args = [input] - assert (len(args) > 0) or (len(kwargs) > 0), "args and/or kwargs must be specified if input_shape is None" for _ in range(warm_up): - _ = model(*args, **kwargs) - + if kwargs: + _ = model(*args, **kwargs) + else: + _ = model(*args) prof.start_profile(ignore_list=ignore_modules) - _ = model(*args, **kwargs) + if kwargs: + _ = model(*args, **kwargs) + else: + _ = model(*args) flops = prof.get_total_flops() macs = prof.get_total_macs() diff --git a/deepspeed/runtime/__init__.py b/deepspeed/runtime/__init__.py index e69de29..aa1f39f 100644 --- a/deepspeed/runtime/__init__.py +++ b/deepspeed/runtime/__init__.py @@ -0,0 +1,9 @@ +'''Copyright The Microsoft DeepSpeed Team''' + + +class DeepSpeedOptimizer(object): + pass + + +class ZeROOptimizer(DeepSpeedOptimizer): + pass diff --git a/deepspeed/runtime/activation_checkpointing/__init__.py b/deepspeed/runtime/activation_checkpointing/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/runtime/activation_checkpointing/__init__.py +++ b/deepspeed/runtime/activation_checkpointing/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/activation_checkpointing/checkpointing.py b/deepspeed/runtime/activation_checkpointing/checkpointing.py index 06d1a2a..7437254 100644 --- a/deepspeed/runtime/activation_checkpointing/checkpointing.py +++ b/deepspeed/runtime/activation_checkpointing/checkpointing.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' ''' Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -16,16 +17,16 @@ b886b7bb972afe72bac0f5de4f42a4a7bae8ebef import copy import torch import contextlib -import torch.distributed as dist +from deepspeed import comm as dist import mmap from torch import _C -from torch.cuda import _lazy_call, device as device_ctx_manager from deepspeed.runtime.config import DeepSpeedConfig from deepspeed.utils import logger from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers +from deepspeed.accelerator import get_accelerator # DeepSpeed Checkpointing Enabled or Disabled deepspeed_checkpointing_enabled = False @@ -98,25 +99,25 @@ def _set_cuda_rng_state(new_state, device=-1): if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): # older PyTorch def cb(): - with device_ctx_manager(device): + with get_accelerator().device(device): _C._cuda_setRNGState(new_state) else: # newer PyTorch if device == -1: - device = torch.device('cuda') + device = torch.device(get_accelerator().device_name()) elif isinstance(device, str): device = torch.device(device) elif isinstance(device, int): - device = torch.device('cuda', device) + device = torch.device(get_accelerator().device_name(), device) def cb(): idx = device.index if idx is None: - idx = torch.cuda.current_device() - default_generator = torch.cuda.default_generators[idx] + idx = get_accelerator().current_device() + default_generator = get_accelerator().default_generator(idx) default_generator.set_state(new_state) - _lazy_call(cb) + get_accelerator().lazy_call(cb) class CudaRNGStatesTracker: @@ -158,10 +159,10 @@ class CudaRNGStatesTracker: if name in self.states_: raise Exception('cuda rng state {} already exists'.format(name)) # Get the current rng state. - orig_rng_state = torch.cuda.get_rng_state() + orig_rng_state = get_accelerator().get_rng_state() # Set the new state and store it. - torch.cuda.manual_seed(seed) - self.states_[name] = torch.cuda.get_rng_state() + get_accelerator().manual_seed(seed) + self.states_[name] = get_accelerator().get_rng_state() # Reset rng state to what it was. _set_cuda_rng_state(orig_rng_state) @@ -173,7 +174,7 @@ class CudaRNGStatesTracker: if name not in self.states_: raise Exception('cuda rng state {} is not added'.format(name)) # Store current rng state. - orig_cuda_rng_state = torch.cuda.get_rng_state() + orig_cuda_rng_state = get_accelerator().get_rng_state() # Set rng state to the desired one _set_cuda_rng_state(self.states_[name]) # Do the stuff we wanted to do. @@ -181,7 +182,7 @@ class CudaRNGStatesTracker: yield finally: # Update the current rng state for later use. - self.states_[name] = torch.cuda.get_rng_state() + self.states_[name] = get_accelerator().get_rng_state() # And set the state to the original state we started with. _set_cuda_rng_state(orig_cuda_rng_state) @@ -199,7 +200,7 @@ def model_parallel_cuda_manual_seed(seed): """Initialize model parallel cuda seed. This function should be called after the model parallel is - initialized. Also, no torch.cuda.manual_seed should be called + initialized. Also, no get_accelerator().manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: @@ -222,12 +223,12 @@ def model_parallel_cuda_manual_seed(seed): # Data parallel gets the original seed. data_parallel_seed = seed - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.info( '> initializing model parallel cuda seeds on global rank {}, ' 'model parallel rank {}, and data parallel rank {} with ' 'model parallel seed: {} and data parallel seed: {}'.format( - torch.distributed.get_rank(), + dist.get_rank(), tp_rank, mpu.get_data_parallel_rank(), model_parallel_seed, @@ -235,7 +236,7 @@ def model_parallel_cuda_manual_seed(seed): ) _CUDA_RNG_STATE_TRACKER.reset() # Set the default state. - torch.cuda.manual_seed(data_parallel_seed) + get_accelerator().manual_seed(data_parallel_seed) # and model parallel state. _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed) @@ -270,6 +271,12 @@ def gather_partitioned_activations(tensors, device=None): inputs.append(item) continue + # don't need to do all_gather if model parallel is not enabled + if mp_group is None or mp_size == 1: + item = item.view(list(size.numpy())) + inputs.append(item) + continue + partition_size = item.numel() tensor_size = partition_size * mp_size if device is not None: @@ -284,8 +291,7 @@ def gather_partitioned_activations(tensors, device=None): if i == mp_rank: part_i.copy_(item) partitions.append(part_i) - if mp_group is not None: - dist.all_gather(partitions, partitions[mp_rank], group=mp_group) + dist.all_gather(partitions, partitions[mp_rank], group=mp_group) input_tensor = flat_tensor.view(list(size.numpy())) item.data = input_tensor.data @@ -511,7 +517,7 @@ class CheckpointFunction(torch.autograd.Function): ctx.tensor_flags = tensor_flags if SYNCHRONIZE: - torch.cuda.synchronize() + get_accelerator().synchronize() if timers is None and PROFILE_TIME: timers = Timers() @@ -554,8 +560,8 @@ class CheckpointFunction(torch.autograd.Function): logger.info(f"----Synchronization {SYNCHRONIZE}") logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") - cuda_device = torch.cuda.current_device() - transport_stream = torch.cuda.Stream(device=cuda_device) + cuda_device = get_accelerator().current_device_name() + transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, @@ -573,7 +579,7 @@ class CheckpointFunction(torch.autograd.Function): # Copy the rng states. ctx.fwd_cpu_rng_state = torch.get_rng_state() - ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state() + ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state() ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() see_memory_usage("Before running forward on the layer", force=False) @@ -601,7 +607,7 @@ class CheckpointFunction(torch.autograd.Function): timers('forward').stop() timers.log(['forward']) if SYNCHRONIZE: - torch.cuda.synchronize() + get_accelerator().synchronize() # Tensors returned from forward() may not be differentiable. if torch.is_tensor(outputs): @@ -628,7 +634,7 @@ class CheckpointFunction(torch.autograd.Function): # so that they can be garbage collected once the checkpoints # have been used if SYNCHRONIZE: - torch.cuda.synchronize() + get_accelerator().synchronize() if PROFILE_TIME: timers('backward').start() @@ -654,7 +660,7 @@ class CheckpointFunction(torch.autograd.Function): global cuda_device, transport_stream, PARTITION_ACTIVATIONS if PARTITION_ACTIVATIONS: - # with torch.cuda.stream(transport_stream): + # with get_accelerator().stream(transport_stream): inputs = gather_partitioned_activations( ctx.deepspeed_saved_tensors, device=cuda_device if CPU_CHECKPOINT else None) @@ -675,7 +681,7 @@ class CheckpointFunction(torch.autograd.Function): # Store the current states. bwd_cpu_rng_state = torch.get_rng_state() - bwd_cuda_rng_state = torch.cuda.get_rng_state() + bwd_cuda_rng_state = get_accelerator().get_rng_state() bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() # Set the states to what it used to be before the forward pass. @@ -684,7 +690,7 @@ class CheckpointFunction(torch.autograd.Function): get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) # if PARTITION_ACTIVATIONS: - # current_stream=torch.cuda.current_stream() + # current_stream=get_accelerator().current_stream() # current_stream.wait_stream(transport_stream) see_memory_usage("In backward checkpointing code before forward", force=False) @@ -729,7 +735,7 @@ class CheckpointFunction(torch.autograd.Function): timers('backward').stop() timers.log(['backward']) if SYNCHRONIZE: - torch.cuda.synchronize() + get_accelerator().synchronize() ret_list = [None, None] # first None for ctx for inp in detached_inputs: if torch.is_tensor(inp): @@ -856,7 +862,7 @@ def configure( checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with partition_activation. Default is false. Will overwrite deepspeed_config if provided - synchronize: Optional: Performs torch.cuda.synchronize() at the beginning and end of + synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of each call to deepspeed.checkpointing.checkpoint for both forward and backward pass. By default false. Will overwrite deepspeed_config if provided diff --git a/deepspeed/runtime/activation_checkpointing/config.py b/deepspeed/runtime/activation_checkpointing/config.py old mode 100644 new mode 100755 index 0ab59ac..0e79579 --- a/deepspeed/runtime/activation_checkpointing/config.py +++ b/deepspeed/runtime/activation_checkpointing/config.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. diff --git a/deepspeed/runtime/bf16_optimizer.py b/deepspeed/runtime/bf16_optimizer.py index 43c23f0..f667d27 100644 --- a/deepspeed/runtime/bf16_optimizer.py +++ b/deepspeed/runtime/bf16_optimizer.py @@ -1,80 +1,44 @@ +""" +Copyright 2022 The Microsoft DeepSpeed Team +""" + +from collections import OrderedDict import torch -import torch.distributed as dist +import sys +import os +from deepspeed import comm as dist from deepspeed.runtime.constants import PIPE_REPLICATED from deepspeed.ops.op_builder import UtilsBuilder +from deepspeed.runtime import ZeROOptimizer from packaging import version as pkg_version from deepspeed.git_version_info import version from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, - get_grad_norm, - clip_gradients, + DummyOptim, align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank, is_model_parallel_parameter, see_memory_usage) +from deepspeed.utils import link_hp_params, fragment_address +from deepspeed.checkpoint import enable_universal_checkpoint from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, - GROUPS_PADDING) - -import types - -from dataclasses import dataclass - - -@dataclass -class fragment_address: - numel: int - start: int - - -@dataclass -class tensor_fragment: - lp_fragment: torch.Tensor - lp_fragment_address: fragment_address - hp_fragment: torch.Tensor - hp_fragment_address: fragment_address - optim_fragment: {} - - def update_hp(self): - self.hp_fragment.data.copy_(self.lp_fragment.data) - - def update_lp(self): - self.lp_fragment.data.copy_(self.hp_fragment.data) + GROUP_PADDINGS, + PARAM_SLICE_MAPPINGS) - def get_optim_state_fragment(self, key): - if key in self.optim_fragment: - return self.optim_fragment[key] - else: - raise ValueError(f'{key} not found in optimizer state fragment') - - -def get_full_hp_param(self, optim_state_key=None): - reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() - if self._hp_mapping is not None: - lp_frag_address = self._hp_mapping.lp_fragment_address - reduce_fragment = torch.narrow(reduce_buffer, - 0, - lp_frag_address.start, - lp_frag_address.numel) - if optim_state_key is None: - hp_fragment = self._hp_mapping.hp_fragment - else: - hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key) - - reduce_fragment.data.copy_(hp_fragment.data) - torch.distributed.all_reduce(reduce_buffer, group=self._dp_group) - return reduce_buffer.reshape_as(self) +setattr(sys.modules[__name__], 'fragment_address', fragment_address) -class BF16_Optimizer: +class BF16_Optimizer(ZeROOptimizer): def __init__(self, init_optimizer, + param_names, mpu=None, clip_grad=0.0, norm_type=2, @@ -85,6 +49,9 @@ class BF16_Optimizer: see_memory_usage('begin bf16_optimizer', force=True) self.timers = timers self.optimizer = init_optimizer + self.param_names = param_names + self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) + self.clip_grad = clip_grad self.norm_type = norm_type self.mpu = mpu @@ -94,10 +61,6 @@ class BF16_Optimizer: self.real_dp_process_group = [ dp_process_group for i in range(len(self.optimizer.param_groups)) ] - dp_world_size = dist.get_world_size(group=self.dp_process_group) - self.partition_count = [ - dp_world_size for i in range(len(self.optimizer.param_groups)) - ] # Load pre-built or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() @@ -116,13 +79,25 @@ class BF16_Optimizer: # Maintain different fp32 gradients views for convenience self.fp32_groups_gradients = [] + self.fp32_groups_gradient_dict = {} self.fp32_groups_gradients_flat = [] self.fp32_groups_actual_gradients_flat = [] self.fp32_groups_gradient_flat_partition = [] self.fp32_groups_has_gradients = [] self.step_count = 0 - self.groups_padding = [] + self.group_paddings = [] + + if self.using_real_optimizer: + self._setup_for_real_optimizer() + + see_memory_usage('end bf16_optimizer', force=True) + + def _setup_for_real_optimizer(self): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + self.partition_count = [ + dp_world_size for i in range(len(self.optimizer.param_groups)) + ] for i, param_group in enumerate(self.optimizer.param_groups): see_memory_usage(f'before initializing group {i}', force=True) @@ -170,6 +145,7 @@ class BF16_Optimizer: flat_tensor=self.fp32_groups_gradients_flat[i], num_elem_list=num_elem_list) self.fp32_groups_gradients.append(fp32_gradients) + self.fp32_groups_gradient_dict[i] = fp32_gradients # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) length_without_padding = sum(num_elem_list) @@ -196,7 +172,7 @@ class BF16_Optimizer: else: padding = 0 - self.groups_padding.append(padding) + self.group_paddings.append(padding) # update optimizer param groups to reference fp32 params partition param_group['params'] = [self.fp32_groups_flat_partition[i]] @@ -209,96 +185,44 @@ class BF16_Optimizer: # Need optimizer states initialized before linking lp to optimizer state self._link_all_hp_params() - - see_memory_usage('end bf16_optimizer', force=True) + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _enable_universal_checkpoint(self): + for lp_param_group in self.bf16_groups: + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, _ in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + for lp in self.bf16_groups[i]: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[ + lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping def _link_all_hp_params(self): dp_world_size = dist.get_world_size(group=self.dp_process_group) - for i, param_group in enumerate(self.optimizer.param_groups): + for i, _ in enumerate(self.optimizer.param_groups): # Link bf16 and fp32 params in partition - # TODO: Make this configurable partition_id = dist.get_rank(group=self.real_dp_process_group[i]) partition_size = self.bf16_groups_flat[i].numel() // dp_world_size - self._link_hp_params(self.bf16_groups[i], - self.fp32_groups_flat_partition[i], - partition_id * partition_size, - partition_size, - self.real_dp_process_group[i]) - - def _init_lp_to_hp_mapping(self, - lp_param_list, - partition_start, - partition_size, - dp_group): - current_offset = 0 - param_and_offset_list = [] - partition_end = partition_start + partition_size - for lp_param in lp_param_list: - lp_param._hp_mapping = None - lp_param._dp_group = dp_group - lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param) - # lp_param overlaps with partition if both are true - # 1) current_offset < partition_end, - # 2) current_offset + lp_param.numel() >= partition_start - lp_param_end = current_offset + lp_param.numel() - if current_offset < partition_end and lp_param_end > partition_start: - param_and_offset_list.append((lp_param, current_offset)) - current_offset += lp_param.numel() - - return param_and_offset_list - - def _link_hp_params(self, - lp_param_list, - flat_hp_partition, - partition_start, - partition_size, - dp_group): - local_lp_param_and_offset = self._init_lp_to_hp_mapping( - lp_param_list, - partition_start, - partition_size, - dp_group) - - hp_end = partition_start + partition_size - for lp_param, lp_start in local_lp_param_and_offset: - lp_end = lp_param.numel() + lp_start - hp_start = partition_start - - fragment_start = max(lp_start, hp_start) - fragment_end = min(lp_end, hp_end) - # print( - # f'{self.dp_rank=} {lp_start=} {lp_end-lp_start=} {hp_start=} {hp_end-hp_start=} {fragment_start=} {fragment_end-fragment_start=}' - # ) - assert fragment_start < fragment_end, \ - f'fragment start {fragment_start} should be < fragment_end {fragment_end}' - - fragment_numel = fragment_end - fragment_start - hp_frag_address = fragment_address(start=fragment_start - hp_start, - numel=fragment_numel) - hp_fragment_tensor = flat_hp_partition.narrow(0, - hp_frag_address.start, - hp_frag_address.numel) - - optim_fragment = { - key: value.narrow(0, - hp_frag_address.start, - hp_frag_address.numel) - for key, - value in self.optimizer.state[flat_hp_partition].items() - if torch.is_tensor(value) - } - - lp_frag_address = fragment_address(start=fragment_start - lp_start, - numel=fragment_numel) - lp_fragment_tensor = lp_param.flatten().narrow(0, - lp_frag_address.start, - lp_frag_address.numel) - - lp_param._hp_mapping = tensor_fragment(lp_fragment=lp_fragment_tensor, - lp_fragment_address=lp_frag_address, - hp_fragment=hp_fragment_tensor, - hp_fragment_address=hp_frag_address, - optim_fragment=optim_fragment) + flat_hp_partition = self.fp32_groups_flat_partition[i] + link_hp_params( + lp_param_list=self.bf16_groups[i], + flat_hp_partition=flat_hp_partition, + gradient_dict=self.fp32_groups_gradient_dict, + offload_gradient_dict=None, + use_offload=False, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + partition_optimizer_state=self.optimizer.state[flat_hp_partition], + dp_group=self.real_dp_process_group[i]) def initialize_optimizer_states(self): """Take an optimizer step with zero-valued gradients to allocate internal @@ -356,11 +280,6 @@ class BF16_Optimizer: self.update_lp_params() - all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups, - dp_process_group=self.real_dp_process_group, - start_alignment_factor=self.nccl_start_alignment_factor, - allgather_bucket_size=self.allgather_bucket_size) - self.clear_hp_grads() self.step_count += 1 @@ -427,6 +346,14 @@ class BF16_Optimizer: for i, (bf16_partitions, fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) bf16_partitions[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) def clear_hp_grads(self): for flat_gradients in self.fp32_groups_gradients_flat: @@ -445,9 +372,10 @@ class BF16_Optimizer: state_dict[CLIP_GRAD] = self.clip_grad state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition - state_dict[GROUPS_PADDING] = self.groups_padding + state_dict[GROUP_PADDINGS] = self.group_paddings state_dict[PARTITION_COUNT] = self.partition_count state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings return state_dict @@ -463,8 +391,23 @@ class BF16_Optimizer: def load_state_dict(self, state_dict_list, + checkpoint_folder, load_optimizer_states=True, load_from_fp32_weights=False): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, + load_optimizer_states, + load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, + load_optimizer_states, + load_from_fp32_weights) + + def _load_legacy_checkpoint(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False): + dp_rank = dist.get_rank(group=self.dp_process_group) current_rank_sd = state_dict_list[dp_rank] @@ -482,13 +425,35 @@ class BF16_Optimizer: src_tensor = _get_padded_tensor(saved, current.numel()) current.data.copy_(src_tensor.data) - self._link_all_hp_params() + if load_optimizer_states: + self._link_all_hp_params() + + def _load_universal_checkpoint(self, + checkpoint_folder, + load_optimizer_states, + load_from_fp32_weights): + self._load_hp_checkpoint_state(checkpoint_folder) @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups + def _load_hp_checkpoint_state(self, checkpoint_dir): + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + tp_world_size = self.mpu.get_slice_parallel_world_size() + + for i, _ in enumerate(self.optimizer.param_groups): + for lp in self.bf16_groups[i]: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + lp.load_hp_checkpoint_state( + os.path.join(checkpoint_dir, + self.param_names[lp]), + tp_rank, + tp_world_size) + def _get_padded_tensor(src_tensor, size): if src_tensor.numel() >= size: @@ -497,86 +462,3 @@ def _get_padded_tensor(src_tensor, size): slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) slice_tensor.data.copy_(src_tensor.data) return padded_tensor - - -''' -Logic for lp_param to hp_param mapping - -lp lp0 lp1 lp2 lp3 lp4 <------- indices/names -lp [ ][ ][ ][ ][ ] <-------- tensors -flat_lp [ ] <-------- flat lp params -flat_hp [ ] <------------------ flat hp partition on current rank -full_hp [ ] <------- full flat hp params - - -lp2 - full numel = 16 - lp_frag - numel = 12 - frag_start = 3 - frag_end = 15 - hp_frag - numel = 12 - frag_start = 0 - frag_end = 11 - - hp_frag.copy_(lp_frag) - - -lp3: - full numel = 4 - lp_frag - numel = 4 - start = 0 - end = 3 - hp_frag - numel = 4 - start = 12 - end = 15 - - -lp4: - full numel = 12 - lp_frag - numel = 4 - start = 0 - end = 3 - hp_frag - numel = 4 - start = 16 - end = 19 - - - -Visual depiction of above -lp { } -flat_lp [ ] -flat_hp ( ) - - -flat_lp [ { ( } ) ] - lx hx ly hy - ly-hx - - -lp { } -flat_lp [ ] -flat_hp ( ) - - -flat_lp [ ( { ) } ] - hx lx hy ly - hy-lx - -lp { } -flat_lp [ ] -flat_hp ( ) - - -flat_lp [ ( { } ) ] - hx lx ly hy - ly-lx - -lp -> (lx, hy) -flat_hp -> (hx, hy) -''' diff --git a/deepspeed/runtime/checkpoint_engine/README.md b/deepspeed/runtime/checkpoint_engine/README.md new file mode 100644 index 0000000..a19f548 --- /dev/null +++ b/deepspeed/runtime/checkpoint_engine/README.md @@ -0,0 +1,37 @@ +# Checkpoint Engine + + +The `CheckpointEngine` was designed to modularized the checkpoint serialization. In this way, we can simply replace/refine the checkpoint serialization methods. + +### Interface for `CheckpointEngine` + +Basically, for checkpoint management(save/load by deepspeed with the given tag), the `CheckpointEngine` will: + + 1. To make preliminaries ready by call `create(tag)`. For `torch`, we can just log some extra info as `torch` can directly call `save/load` without other preparation. + + 2. After the `create(tag)`, deepspeed can call `save/load` to persist files into disk/memory/etc. + + 3. When all the files for a tag are ready, deepspeed engine will call `commit()` to tell the checkpoint engine current checkpoint is complete. For original torch, it also plays the role of logger. + + +```python +class CheckpointEngine(object): + # init checkpoint engine for save/load + def __init__(self, config_params=None): + pass + + def create(self, tag): + # create checkpoint on give tag for save/load. + pass + + def save(self, state_dict, path: str): + pass + + def load(self, path: str, map_location=None): + pass + + def commit(self, tag): + # to tell checkpoint services if all files are readys. + pass + +``` diff --git a/deepspeed/runtime/checkpoint_engine/__init__.py b/deepspeed/runtime/checkpoint_engine/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/runtime/checkpoint_engine/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py b/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py new file mode 100644 index 0000000..cb6decb --- /dev/null +++ b/deepspeed/runtime/checkpoint_engine/checkpoint_engine.py @@ -0,0 +1,22 @@ +'''Copyright The Microsoft DeepSpeed Team''' + + +class CheckpointEngine(object): + + # init checkpoint engine for save/load + def __init__(self, config_params=None): + pass + + def create(self, tag): + # create checkpoint on give tag for save/load. + pass + + def save(self, state_dict, path: str): + pass + + def load(self, path: str, map_location=None): + pass + + def commit(self, tag): + # to tell checkpoint services if all files are readys. + pass diff --git a/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py b/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py new file mode 100644 index 0000000..42bb067 --- /dev/null +++ b/deepspeed/runtime/checkpoint_engine/nebula_checkpoint_engine.py @@ -0,0 +1,115 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import os +import torch +import torch_nebula + +from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ + CheckpointEngine +from deepspeed.utils import logger, log_dist +from deepspeed.nebula.constants import * + + +def _get_tag_from_path(path): + return os.path.basename(os.path.dirname(path)) + + +class NebulaCheckpointEngine(CheckpointEngine): + def __init__(self, config_params=None): + super().__init__(config_params) + self.checkpoint = None + self.tag_flag = None + self.enable_nebula_load = config_params.enable_nebula_load + self.nebula_load_path = config_params.load_path + if self.nebula_load_path is None: + self.nebula_load_path = config_params.persistent_storage_path + + nebula_config_params = { + NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path, + NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval, + NEBULA_NUM_OF_VERSION_IN_RETENTION: + config_params.num_of_version_in_retention, + } + torch_nebula.init(**nebula_config_params) + + def create(self, tag): + log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0]) + # -2 means: customer needs to explicitly tell nebula + # current checkpoint is complete by commit methond. + self.checkpoint = torch_nebula.Checkpoint(tag, -2) + + def save(self, state_dict, path: str): + log_dist(f"[Nebula] Create dummy files for loading.") + torch.save("", path) + + tag = _get_tag_from_path(path) + partititon_name = os.path.basename(path) + logger.info(f"[Nebula] Saving {partititon_name} under tag {tag}...") + self.checkpoint.save(partititon_name, state_dict) + logger.info(f"[Nebula] Saved {partititon_name} under tag {tag}.") + return None + + def load(self, path: str, map_location=None): + tag = _get_tag_from_path(path) + first_load_flag = self.tag_flag is None or self.tag_flag == tag + if not self.enable_nebula_load and first_load_flag: + self.tag_flag = tag + logger.info( + f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...") + partition = torch.load(path, map_location=map_location) + logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .") + return partition + + partititon_name = os.path.basename(path) + logger.info( + f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}..." + ) + + checkpoint = None + if tag in (None, 'latest', 'latest_universal'): + # In some cases, there is the inconsistent tag between deepspeed metadata (latest file) + # and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we + # will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary + # when met failure loading for given tag, the loading priority would be like: + # nebula tier3 latest > nebula tier1 latest. + checkpoint = torch_nebula.get_latest_checkpoint( + persist_path=self.nebula_load_path) + else: + checkpoint = torch_nebula.get_checkpoint(tag=tag, + persist_path=self.nebula_load_path) + + if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): + logger.info( + f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!" + ) + # nebula tier3 latest + checkpoint = torch_nebula.get_latest_checkpoint( + persist_path=self.nebula_load_path) + if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): + logger.info( + f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!" + ) + # nebula tier1 latest + checkpoint = torch_nebula.get_latest_checkpoint() + logger.warning( + f"Unable to find valid checkpoint from Nebula under tag:{tag}.") + return None + + tag = checkpoint.tag + self.tag_flag = -1 + partition = checkpoint.load(partititon_name, map_location=map_location) + logger.info( + f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.") + return partition + + def commit(self, tag): + # nebula commit will be call when all files under give tag are ready to be persisted in the async way. + logger.info( + f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting" + ) + commit_rls = self.checkpoint.commit() + if not commit_rls: + logger.error( + f"[Nebula] failed to commit the checkpoint, please check the log.") + return False + return commit_rls diff --git a/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py b/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py new file mode 100644 index 0000000..5ee1318 --- /dev/null +++ b/deepspeed/runtime/checkpoint_engine/torch_checkpoint_engine.py @@ -0,0 +1,30 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import torch +from deepspeed.utils import logger, log_dist +from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ + CheckpointEngine + + +class TorchCheckpointEngine(CheckpointEngine): + def __init__(self, config_params=None): + super().__init__(config_params) + + def create(self, tag): + log_dist(f"[Torch] Checkpoint {tag} is begin to save!", ranks=[0]) + + def save(self, state_dict, path: str): + logger.info(f"[Torch] Saving {path}...") + torch.save(state_dict, path) + logger.info(f"[Torch] Saved {path}.") + return None + + def load(self, path: str, map_location=None): + logger.info(f"[Torch] Loading checkpoint from {path}...") + partition = torch.load(path, map_location=map_location) + logger.info(f"[Torch] Loaded checkpoint from {path}.") + return partition + + def commit(self, tag): + logger.info(f"[Torch] Checkpoint {tag} is ready now!") + return True diff --git a/deepspeed/runtime/comm/__init__.py b/deepspeed/runtime/comm/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/runtime/comm/__init__.py +++ b/deepspeed/runtime/comm/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/comm/coalesced_collectives.py b/deepspeed/runtime/comm/coalesced_collectives.py index 3b42d22..07302b1 100644 --- a/deepspeed/runtime/comm/coalesced_collectives.py +++ b/deepspeed/runtime/comm/coalesced_collectives.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """batched collective operations for overhead amortization and better bandwidth utilization""" @@ -6,36 +7,23 @@ from typing import List import torch from torch import Tensor -import torch.distributed +from deepspeed import comm as dist +# NOTE: Use torch.distributed's ProcessGroup class until we have our own. from torch.distributed import ProcessGroup import torch.nn.functional from deepspeed.utils import instrument_w_nvtx -from deepspeed.utils.logging import logger - -if hasattr(torch.distributed, "_reduce_scatter_base"): - - def torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group): - instrument_w_nvtx(torch.distributed._reduce_scatter_base)( - output_tensor, - input_tensor, - group=group, - ) -else: - logger.warning( - "unable to find torch.distributed._reduce_scatter_base. will fall back to " - "torch.distributed.reduce_scatter which will result in suboptimal performance. " - "please consider upgrading your pytorch installation.") - - def torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group): - input_tensor_lst = list( - torch.chunk(input_tensor, - torch.distributed.get_world_size(group))) - instrument_w_nvtx(torch.distributed.reduce_scatter)( - output_tensor, - input_tensor_lst, - group=group, - ) + + +def _torch_reduce_scatter_fn(input_tensor: Tensor, + output_tensor: Tensor, + group=None, + async_op=False, + prof=False): + return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, + input_tensor, + group=group, + async_op=async_op) @instrument_w_nvtx @@ -49,8 +37,8 @@ def reduce_scatter_coalesced( TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL """ - this_rank = torch.distributed.get_rank(group) - world_sz = torch.distributed.get_world_size(group) + this_rank = dist.get_rank(group) + world_sz = dist.get_world_size(group) partition_lst_for_each_tensor = [None] * len(tensors) for tensor_idx, tensor in enumerate(tensors): @@ -97,9 +85,9 @@ def reduce_scatter_coalesced( world_sz) # batched reduce-scatter call - torch_reduce_scatter_fn(tensor_partition_flat_buffer, - tensor_partition_buffer_for_each_rank[this_rank], - group) + _torch_reduce_scatter_fn(tensor_partition_flat_buffer, + tensor_partition_buffer_for_each_rank[this_rank], + group=group) # reverse procedure of the interleaving done previously, done on the # result of the batched reduce-scatter diff --git a/deepspeed/runtime/comm/nccl.py b/deepspeed/runtime/comm/nccl.py index fcab478..fd99068 100644 --- a/deepspeed/runtime/comm/nccl.py +++ b/deepspeed/runtime/comm/nccl.py @@ -3,12 +3,12 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' import torch -import torch.distributed as dist -import time +from deepspeed import comm as dist import cupy import numpy as np from deepspeed.runtime.compression.cupy import CupyBackend +from deepspeed.accelerator import get_accelerator class NcclBackend(object): @@ -69,7 +69,7 @@ class NcclBackend(object): buffer_m = torch.cat([buffer_m, empty_tensor]) buffer_m.add_(worker_error) - worker_scale = torch.norm(buffer_m) / np.sqrt(torch.numel(buffer_m)) + worker_scale = torch.norm(buffer_m) / np.sqrt(buffer_m.numel()) worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) @@ -101,7 +101,8 @@ class NcclBackend(object): recvbuf_scale = [ torch.zeros(1, dtype=worker_scale.dtype, - device=torch.device(local_rank)) for i in range(self.size) + device=torch.device(get_accelerator().device_name(local_rank))) + for i in range(self.size) ] # communication phase 1 diff --git a/deepspeed/runtime/compression/__init__.py b/deepspeed/runtime/compression/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/runtime/compression/__init__.py +++ b/deepspeed/runtime/compression/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/config.py b/deepspeed/runtime/config.py old mode 100644 new mode 100755 index 1df5912..9da1058 --- a/deepspeed/runtime/config.py +++ b/deepspeed/runtime/config.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -7,7 +8,9 @@ from typing import Union import torch import json +import hjson import copy +import base64 from .constants import * from .fp16.loss_scaler import ( @@ -21,9 +24,12 @@ from .config_utils import ( dict_raise_error_on_duplicate_keys, ScientificNotationEncoder, ) -from .zero.config import DeepSpeedZeroConfig -from .zero.constants import * +from .zero.config import get_zero_config, ZeroStageEnum from .activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig +from ..comm.config import DeepSpeedCommsConfig +from ..monitor.config import get_monitor_config + +from deepspeed import comm as dist from ..git_version_info import version as __version__ from ..utils import logger @@ -38,13 +44,23 @@ from ..elasticity.constants import ( ELASTICITY, IGNORE_NON_ELASTIC_BATCH_INFO, IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT, + MODEL_PARLLEL_SIZE, + MODEL_PARLLEL_SIZE_DEFAULT, + NUM_GPUS_PER_NODE, + NUM_GPUS_PER_NODE_DEFAULT, ) from ..profiling.config import DeepSpeedFlopsProfilerConfig from ..autotuning.config import DeepSpeedAutotuningConfig +from ..nebula.config import DeepSpeedNebulaConfig +from ..compression.config import get_compression_config, get_quantize_enabled +from ..compression.constants import * from .swap_tensor.aio_config import get_aio_config +from .data_pipeline.config import get_data_efficiency_enabled, get_data_efficiency_config, get_curriculum_enabled_legacy, get_curriculum_params_legacy +from .data_pipeline.constants import * + TENSOR_CORE_ALIGN_SIZE = 8 ADAGRAD_OPTIMIZER = 'adagrad' @@ -76,24 +92,6 @@ class DeepSpeedConfigError(Exception): pass -def get_curriculum_enabled(param_dict): - if CURRICULUM_LEARNING in param_dict.keys(): - return get_scalar_param(param_dict[CURRICULUM_LEARNING], - CURRICULUM_ENABLED, - CURRICULUM_ENABLED_DEFAULT) - else: - return False - - -def get_curriculum_params(param_dict): - if CURRICULUM_LEARNING in param_dict.keys(): - curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING]) - curriculum_params.pop(CURRICULUM_ENABLED) - return curriculum_params - else: - return False - - def get_pld_enabled(param_dict): if PROGRESSIVE_LAYER_DROP in param_dict.keys(): return get_scalar_param(param_dict[PROGRESSIVE_LAYER_DROP], @@ -153,6 +151,11 @@ def get_fp16_master_weights_and_grads_enabled(param_dict): return False +def get_fp16_auto_cast(param_dict): + if get_fp16_enabled(param_dict): + return get_scalar_param(param_dict[FP16], FP16_AUTO_CAST, FP16_AUTO_CAST_DEFAULT) + + def get_loss_scale(param_dict): if get_fp16_enabled(param_dict): return get_scalar_param(param_dict[FP16], @@ -220,18 +223,6 @@ def get_sparse_gradients_enabled(param_dict): return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT) -def get_zero_optimization(param_dict): - return get_scalar_param(param_dict, ZERO_OPTIMIZATION, ZERO_OPTIMIZATION_DEFAULT) - - -def get_zero_reduce_scatter(param_dict): - return get_scalar_param( - param_dict, - ZERO_OPTIMIZATION_REDUCE_SCATTER, - ZERO_OPTIMIZATION_REDUCE_SCATTER_DEFAULT, - ) - - def get_communication_data_type(param_dict): val = get_scalar_param(param_dict, COMMUNICATION_DATA_TYPE, @@ -261,73 +252,6 @@ def get_gradient_predivide_factor(param_dict): GRADIENT_PREDIVIDE_FACTOR_DEFAULT) -def get_quantize_enabled(param_dict): - if QUANTIZE_TRAINING in param_dict.keys(): - return get_scalar_param( - param_dict[QUANTIZE_TRAINING], - QUANTIZE_TRAINING_ENABLED, - QUANTIZE_TRAINING_ENABLED_DEFAULT, - ) - else: - return False - - -def get_quantize_training(param_dict): - if QUANTIZE_TRAINING in param_dict.keys(): - return ( - (param_dict[QUANTIZE_TRAINING][QUANTIZE_BITS][TARGET_BITS]), - (param_dict[QUANTIZE_TRAINING][QUANTIZE_BITS][START_BITS] - if START_BITS in param_dict[QUANTIZE_TRAINING][QUANTIZE_BITS].keys() else - QUANTIZE_START_BITS_DEFAULT), - (param_dict[QUANTIZE_TRAINING][QUANTIZE_SCHEDULE][QUANTIZE_PERIOD] - if QUANTIZE_SCHEDULE in param_dict[QUANTIZE_TRAINING].keys() else - QUANTIZE_PERIOD_DEFAULT), - (param_dict[QUANTIZE_TRAINING][QUANTIZE_SCHEDULE][SCHEDULE_OFFSET] - if QUANTIZE_SCHEDULE in param_dict[QUANTIZE_TRAINING].keys() and - SCHEDULE_OFFSET in param_dict[QUANTIZE_TRAINING][QUANTIZE_SCHEDULE].keys() - else QUANTIZE_OFFSET_DEFAULT), - (param_dict[QUANTIZE_TRAINING][QUANTIZE_GROUPS] if QUANTIZE_GROUPS - in param_dict[QUANTIZE_TRAINING].keys() else QUANTIZE_GROUPS_DEFAULT), - (param_dict[QUANTIZE_TRAINING][FP16_MIXED_QUANTIZE] - [FP16_MIXED_QUANTIZE_ENABLED] - if FP16_MIXED_QUANTIZE in param_dict[QUANTIZE_TRAINING].keys() - and FP16_MIXED_QUANTIZE_ENABLED - in param_dict[QUANTIZE_TRAINING][FP16_MIXED_QUANTIZE].keys() else - FP16_MIXED_QUANTIZE_ENABLED_DEFAULT), - (param_dict[QUANTIZE_TRAINING][FP16_MIXED_QUANTIZE][QUANTIZE_CHANGE_RATIO] - if FP16_MIXED_QUANTIZE in param_dict[QUANTIZE_TRAINING].keys() - and QUANTIZE_CHANGE_RATIO - in param_dict[QUANTIZE_TRAINING][FP16_MIXED_QUANTIZE].keys() else - QUANTIZE_CHANGE_RATIO_DEFAULT), - (1 if QUANTIZE_ALGO in param_dict[QUANTIZE_TRAINING] - and QUANTIZE_TYPE in param_dict[QUANTIZE_TRAINING][QUANTIZE_ALGO].keys() - and param_dict[QUANTIZE_TRAINING][QUANTIZE_ALGO][QUANTIZE_TYPE] - == QUANTIZE_ASYMMETRIC else QUANTIZE_TYPE_DEFAULT), - (1 if QUANTIZE_ALGO in param_dict[QUANTIZE_TRAINING] and QUANTIZE_ROUNDING - in param_dict[QUANTIZE_TRAINING][QUANTIZE_ALGO].keys() - and param_dict[QUANTIZE_TRAINING][QUANTIZE_ALGO][QUANTIZE_ROUNDING] - == STOCHASTIC_ROUNDING else QUANTIZE_ROUNDING_DEFAULT), - (param_dict[QUANTIZE_TRAINING][QUANTIZE_VERBOSE] if QUANTIZE_VERBOSE - in param_dict[QUANTIZE_TRAINING].keys() else QUANTIZE_VERBOSE_DEFAULT), - (param_dict[QUANTIZE_TRAINING][QUANTIZER_KERNEL] if QUANTIZER_KERNEL - in param_dict[QUANTIZE_TRAINING].keys() else QUANTIZER_KERNEL_DEFAULT), - ) - else: - return ( - QUANTIZE_TARGET_BITS_DEFAULT, - QUANTIZE_START_BITS_DEFAULT, - QUANTIZE_PERIOD_DEFAULT, - QUANTIZE_OFFSET_DEFAULT, - QUANTIZE_GROUPS_DEFAULT, - FP16_MIXED_QUANTIZE_ENABLED_DEFAULT, - QUANTIZE_CHANGE_RATIO_DEFAULT, - QUANTIZE_TYPE_DEFAULT, - QUANTIZE_ROUNDING_DEFAULT, - QUANTIZE_VERBOSE_DEFAULT, - QUANTIZER_KERNEL_DEFAULT, - ) - - def get_steps_per_print(param_dict): return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT) @@ -615,18 +539,10 @@ def get_memory_breakdown(param_dict): return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT) -def get_tensorboard_enabled(param_dict): - if TENSORBOARD in param_dict.keys(): - return get_scalar_param(param_dict[TENSORBOARD], - TENSORBOARD_ENABLED, - TENSORBOARD_ENABLED_DEFAULT) - else: - return False - - def get_eigenvalue_config(param_dict): if get_quantize_enabled(param_dict): param_dict = param_dict[QUANTIZE_TRAINING] + assert not get_eigenvalue_enabled(param_dict), "Eigenvalue based MoQ is temporarily disabled" return ( get_eigenvalue_enabled(param_dict), get_eigenvalue_verbose(param_dict), @@ -724,30 +640,14 @@ def get_eigenvalue_layer_num(param_dict): return EIGENVALUE_LAYER_NUM_DEFAULT -def get_tensorboard_output_path(param_dict): - if get_tensorboard_enabled(param_dict): - return get_scalar_param( - param_dict[TENSORBOARD], - TENSORBOARD_OUTPUT_PATH, - TENSORBOARD_OUTPUT_PATH_DEFAULT, - ) - else: - return TENSORBOARD_OUTPUT_PATH_DEFAULT - - -def get_tensorboard_job_name(param_dict): - if get_tensorboard_enabled(param_dict): - return get_scalar_param(param_dict[TENSORBOARD], - TENSORBOARD_JOB_NAME, - TENSORBOARD_JOB_NAME_DEFAULT) - else: - return TENSORBOARD_JOB_NAME_DEFAULT - - def get_checkpoint_params(param_dict): return param_dict.get(CHECKPOINT, {}) +def get_data_types_params(param_dict): + return param_dict.get(DATA_TYPES, {}) + + def get_checkpoint_tag_validation_mode(checkpoint_params): tag_validation_mode = checkpoint_params.get(CHECKPOINT_TAG_VALIDATION, CHECKPOINT_TAG_VALIDATION_DEFAULT) @@ -761,6 +661,19 @@ def get_checkpoint_tag_validation_mode(checkpoint_params): ) +def get_checkpoint_parallel_write_pipeline(checkpoint_params): + par_write_params = checkpoint_params.get(CHECKPOINT_PARALLEL_WRITE, {}) + par_write_pipeline = par_write_params.get( + CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE, + CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT) + if par_write_pipeline in [True, False]: + return par_write_pipeline + else: + raise DeepSpeedConfigError( + "checkpoint::parallel_write::pipeline_stage " + f"value of '{par_write_pipeline}' is invalid, expecting: true or false") + + def get_dataloader_drop_last(param_dict): return get_scalar_param(param_dict, DATALOADER_DROP_LAST, @@ -794,18 +707,22 @@ class DeepSpeedConfig(object): if isinstance(config, dict): self._param_dict = config elif os.path.exists(config): - self._param_dict = json.load( + self._param_dict = hjson.load( open(config, "r"), object_pairs_hook=dict_raise_error_on_duplicate_keys) else: - raise ValueError( - f"Expected a string path to an existing deepspeed config, or a dictionary. Received: {config}" - ) + try: + config_decoded = base64.urlsafe_b64decode(config).decode('utf-8') + self._param_dict = hjson.loads(config_decoded) + except (UnicodeDecodeError, AttributeError): + raise ValueError( + f"Expected a string path to an existing deepspeed config, or a dictionary or a valid base64. Received: {config}" + ) try: - self.global_rank = torch.distributed.get_rank() + self.global_rank = dist.get_rank() if mpu is None: - self.world_size = torch.distributed.get_world_size() + self.world_size = dist.get_world_size() else: self.world_size = mpu.get_data_parallel_world_size() except: @@ -827,6 +744,21 @@ class DeepSpeedConfig(object): # Ensure the resource scheduler saw the same elastic config we are using at runtime ensure_immutable_elastic_config(runtime_elastic_config_dict=elastic_dict) + self.elastic_model_parallel_size = elastic_dict.get( + MODEL_PARLLEL_SIZE, + MODEL_PARLLEL_SIZE_DEFAULT) + if self.elastic_model_parallel_size < 1: + raise ElasticityConfigError( + "Model-Parallel size cannot be less than 1, " + f"given model-parallel size: {self.elastic_model_parallel_size}") + + self.num_gpus_per_node = elastic_dict.get(NUM_GPUS_PER_NODE, + NUM_GPUS_PER_NODE_DEFAULT) + if self.num_gpus_per_node < 1: + raise ElasticityConfigError( + "NUmber of GPUs per node cannot be less than 1, " + f"given number of GPUs per node: {self.num_gpus_per_node}") + ignore_non_elastic_batch_info = elastic_dict.get( IGNORE_NON_ELASTIC_BATCH_INFO, IGNORE_NON_ELASTIC_BATCH_INFO_DEFAULT) @@ -871,7 +803,8 @@ class DeepSpeedConfig(object): self._param_dict[TRAIN_MICRO_BATCH_SIZE_PER_GPU] = micro_batch_size self._param_dict[GRADIENT_ACCUMULATION_STEPS] = gradient_accu_steps - self._initialize_params(self._param_dict) + # Pass a copy so that user json is unmodified, e.g. for logging + self._initialize_params(copy.copy(self._param_dict)) self._configure_train_batch_size() self._do_sanity_check() @@ -890,15 +823,19 @@ class DeepSpeedConfig(object): self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict) self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict) - self.zero_config = DeepSpeedZeroConfig(param_dict) + self.zero_config = get_zero_config(param_dict) self.zero_optimization_stage = self.zero_config.stage self.zero_enabled = self.zero_optimization_stage > 0 self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig( param_dict) + self.comms_config = DeepSpeedCommsConfig(param_dict) + self.monitor_config = get_monitor_config(param_dict) + self.gradient_clipping = get_gradient_clipping(param_dict) self.fp16_enabled = get_fp16_enabled(param_dict) + self.fp16_auto_cast = get_fp16_auto_cast(param_dict) self.bfloat16_enabled = get_bfloat16_enabled(param_dict) assert not (self.fp16_enabled and self.bfloat16_enabled), 'bfloat16 and fp16 modes cannot be simultaneously enabled' self.fp16_master_weights_and_gradients = get_fp16_master_weights_and_grads_enabled( @@ -909,20 +846,7 @@ class DeepSpeedConfig(object): self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict) self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict) - self.quantize_training_enabled = get_quantize_enabled(param_dict) - ( - self.quantize_target_bits, - self.quantize_start_bits, - self.quantize_period, - self.quantize_offset, - self.quantize_groups, - self.fp16_mixed_quantize, - self.quantize_change_rate, - self.quantize_type, - self.quantize_rounding, - self.quantize_verbose, - self.use_quantizer_kernel, - ) = get_quantize_training(param_dict) + self.compression_config = get_compression_config(param_dict) self.optimizer_name = get_optimizer_name(param_dict) if (self.optimizer_name is not None @@ -943,9 +867,6 @@ class DeepSpeedConfig(object): | self.flops_profiler_config.enabled) self.memory_breakdown = get_memory_breakdown(param_dict) self.autotuning_config = DeepSpeedAutotuningConfig(param_dict) - self.tensorboard_enabled = get_tensorboard_enabled(param_dict) - self.tensorboard_output_path = get_tensorboard_output_path(param_dict) - self.tensorboard_job_name = get_tensorboard_job_name(param_dict) ( self.eigenvalue_enabled, @@ -964,19 +885,38 @@ class DeepSpeedConfig(object): self.pld_enabled = get_pld_enabled(param_dict) self.pld_params = get_pld_params(param_dict) - self.curriculum_enabled = get_curriculum_enabled(param_dict) - self.curriculum_params = get_curriculum_params(param_dict) + self.curriculum_enabled_legacy = get_curriculum_enabled_legacy(param_dict) + self.curriculum_params_legacy = get_curriculum_params_legacy(param_dict) + + self.data_efficiency_enabled = get_data_efficiency_enabled(param_dict) + self.data_efficiency_config = get_data_efficiency_config(param_dict) checkpoint_params = get_checkpoint_params(param_dict) validation_mode = get_checkpoint_tag_validation_mode(checkpoint_params) self.checkpoint_tag_validation_enabled = (validation_mode != ValidationMode.IGNORE) self.checkpoint_tag_validation_fail = validation_mode == ValidationMode.FAIL + self.load_universal_checkpoint = checkpoint_params.get( + LOAD_UNIVERSAL_CHECKPOINT, + LOAD_UNIVERSAL_CHECKPOINT_DEFAULT) + + self.use_node_local_storage = checkpoint_params.get( + USE_NODE_LOCAL_STORAGE_CHECKPOINT, + USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT) + + data_types_params = get_data_types_params(param_dict) + self.grad_accum_dtype = data_types_params.get(GRAD_ACCUM_DTYPE, + GRAD_ACCUM_DTYPE_DEFAULT) + + par_write_pipe = get_checkpoint_parallel_write_pipeline(checkpoint_params) + self.checkpoint_parallel_write_pipeline = par_write_pipe self.aio_config = get_aio_config(param_dict) self.dataloader_drop_last = get_dataloader_drop_last(param_dict) + self.nebula_config = DeepSpeedNebulaConfig(param_dict) + def _batch_assertion(self): train_batch = self.train_batch_size @@ -1055,13 +995,7 @@ class DeepSpeedConfig(object): self._do_warning_check() - def print(self, name): - logger.info("{}:".format(name)) - for arg in sorted(vars(self)): - if arg != "_param_dict": - dots = "." * (29 - len(arg)) - logger.info(" {} {} {}".format(arg, dots, getattr(self, arg))) - + def print_user_config(self): logger.info(" json = {}".format( json.dumps( self._param_dict, @@ -1072,6 +1006,15 @@ class DeepSpeedConfig(object): ":"), ))) + def print(self, name): + logger.info("{}:".format(name)) + for arg in sorted(vars(self)): + if arg != "_param_dict": + dots = "." * (29 - len(arg)) + logger.info(" {} {} {}".format(arg, dots, getattr(self, arg))) + + self.print_user_config() + def _do_error_check(self): assert ( self.train_micro_batch_size_per_gpu @@ -1083,13 +1026,13 @@ class DeepSpeedConfig(object): if self.zero_enabled: assert ( - self.zero_optimization_stage <= MAX_STAGE_ZERO_OPTIMIZATION + self.zero_optimization_stage <= ZeroStageEnum.max_stage ), "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format( - MAX_STAGE_ZERO_OPTIMIZATION + ZeroStageEnum.max_stage ) if self.fp16_master_weights_and_gradients: - assert self.zero_enabled and self.zero_optimization_stage == ZERO_OPTIMIZATION_GRADIENTS, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now." + assert self.zero_enabled and self.zero_optimization_stage == ZeroStageEnum.gradients, "Fp16_master_weights_and_grads is only supported with ZeRO Stage 2 for now." def _do_warning_check(self): fp16_enabled = self.fp16_enabled diff --git a/deepspeed/runtime/config_utils.py b/deepspeed/runtime/config_utils.py old mode 100644 new mode 100755 index 83c48bb..cd262f1 --- a/deepspeed/runtime/config_utils.py +++ b/deepspeed/runtime/config_utils.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -8,6 +9,133 @@ Collection of DeepSpeed configuration utilities import json import collections import collections.abc +from functools import reduce +from pydantic import BaseModel +from deepspeed.utils import logger + + +class DeepSpeedConfigModel(BaseModel): + """ + This class should be used as a base for all DeepSpeed configs. It extends + pydantic.BaseModel to allow for deprecated fields. To enable this feature, + add deprecated=True to pydantic.Field: + + my_dep_field: int = Field(0, deprecated=True) + + Deprecated Field kwargs: + - deprecated: [True|False], default False + Enables / Disables deprecated fields + - deprecated_msg: str, default "" + Message to include with deprecation warning + - new_param: str, default "" + Name of the field replacing the deprecated field + - set_new_param: [True|False], default True + If new_param is provided, enables setting the value of that param with + deprecated field value + - new_param_fn: callable, default (lambda x: x) + If new_param is provided and set_new_param is True, this function will + modify the value of the deprecated field before placing that value in + the new_param field + + Example: + my_new_field is replacing a deprecated my_old_field. The expected type + for my_new_field is int while the expected type for my_old_field is + str. We want to maintain backward compatibility with our configs, so we + define the fields with: + + class MyExampleConfig(DeepSpeedConfigModel): + my_new_field: int = 0 + my_old_field: str = Field('0', + deprecated=True, + new_param='my_new_field', + new_param_fn=(lambda x: int(x))) + """ + def __init__(self, strict=False, **data): + if ( + not strict + ): # This is temporary until we refactor all DS configs, allows HF to load models + data = { + k: v + for k, + v in data.items() if (v != "auto" or k == "replace_method") + } + super().__init__(**data) + self._deprecated_fields_check(self) + + def _process_deprecated_field(self, pydantic_config, field): + # Get information about the deprecated field + fields_set = pydantic_config.__fields_set__ + dep_param = field.name + kwargs = field.field_info.extra + new_param_fn = kwargs.get("new_param_fn", lambda x: x) + param_value = new_param_fn(getattr(pydantic_config, dep_param)) + new_param = kwargs.get("new_param", "") + dep_msg = kwargs.get("deprecated_msg", "") + if dep_param in fields_set: + logger.warning(f"Config parameter {dep_param} is deprecated" + + (f" use {new_param} instead" if new_param else "") + + (f". {dep_msg}" if dep_msg else "")) + # Check if there is a new param and if it should be set with a value + if new_param and kwargs.get("set_new_param", True): + # Remove the deprecate field if there is a replacing field + try: + delattr(pydantic_config, dep_param) + except Exception as e: + logger.error(f"Tried removing deprecated '{dep_param}' from config") + raise e + + # Set new param value + new_param_nested = new_param.split(".") + if len(new_param_nested) > 1: + # If the new param exists in a subconfig, we need to get + # the fields set for that subconfig + pydantic_config = reduce(getattr, + new_param_nested[:-1], + pydantic_config) + fields_set = pydantic_config.__fields_set__ + new_param_name = new_param_nested[-1] + assert ( + new_param_name not in fields_set + ), f"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together" + # A custom function for converting the old param value to new param value can be provided + try: + setattr(pydantic_config, new_param_name, param_value) + except Exception as e: + logger.error( + f"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'" + ) + raise e + + def _deprecated_fields_check(self, pydantic_config): + fields = pydantic_config.__fields__ + for field in fields.values(): + if field.field_info.extra.get("deprecated", False): + self._process_deprecated_field(pydantic_config, field) + + class Config: + validate_all = True + validate_assignment = True + use_enum_values = True + allow_population_by_field_name = True + extra = "forbid" + arbitrary_types_allowed = True + + +class pp_int(int): + """ + A wrapper for integers that will return a custom string or comma-formatted + string of the integer. For example, print(pp_int(1e5)) will return + "10,000". This is useful mainly for auto-generated documentation purposes. + """ + def __new__(cls, val, custom_print_str=None): + inst = super().__new__(cls, val) + inst.custom_print_str = custom_print_str + return inst + + def __repr__(self): + if self.custom_print_str: + return self.custom_print_str + return f"{self.real:,}" # adapted from https://stackoverflow.com/a/50701137/9201239 @@ -37,7 +165,7 @@ class ScientificNotationEncoder(json.JSONEncoder): f'\n{prefix}"{k}": {self.iterencode(v, level=level)}' for k, v in o.items() ] - return "{" + ', '.join(x) + f"\n{prefix_close}" + "}" + return "{" + ", ".join(x) + f"\n{prefix_close}" + "}" elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str): return f"[{ f', '.join(map(self.iterencode, o)) }]" return "\n, ".join(super().iterencode(o, _one_shot)) diff --git a/deepspeed/runtime/constants.py b/deepspeed/runtime/constants.py old mode 100644 new mode 100755 index ee2e51c..6925745 --- a/deepspeed/runtime/constants.py +++ b/deepspeed/runtime/constants.py @@ -1,3 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. @@ -133,8 +134,9 @@ FP16_FORMAT = ''' FP16 parameters should be of the format: "fp16": { "enabled": true, + "auto_cast": false, "loss_scale": 0, - "initial_scale_power": 32, + "initial_scale_power": 16, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 @@ -149,9 +151,12 @@ FP16_ENABLED_DEFAULT = False FP16_LOSS_SCALE = "loss_scale" FP16_LOSS_SCALE_DEFAULT = 0 +FP16_AUTO_CAST = "auto_cast" +FP16_AUTO_CAST_DEFAULT = False + # FP16 initial dynamic scale loss power FP16_INITIAL_SCALE_POWER = "initial_scale_power" -FP16_INITIAL_SCALE_POWER_DEFAULT = 32 +FP16_INITIAL_SCALE_POWER_DEFAULT = 16 # FP16 loss scale window FP16_LOSS_SCALE_WINDOW = "loss_scale_window" @@ -282,33 +287,6 @@ WALL_CLOCK_BREAKDOWN_DEFAULT = False MEMORY_BREAKDOWN = 'memory_breakdown' MEMORY_BREAKDOWN_DEFAULT = False -######################################### -# Tensorboard -######################################### -# Tensorboard. By default, this feature is not enabled. -# Users can configure in ds_config.json as below example: -TENSORBOARD_FORMAT = ''' -Tensorboard can be specified as: -"tensorboard": { - "enabled": true, - "output_path": "/home/myname/foo", - "job_name": "model_lr2e-5_epoch3_seed2_seq64" -} -''' -TENSORBOARD = "tensorboard" - -# Tensorboard enable signal -TENSORBOARD_ENABLED = "enabled" -TENSORBOARD_ENABLED_DEFAULT = False - -# Tensorboard output path -TENSORBOARD_OUTPUT_PATH = "output_path" -TENSORBOARD_OUTPUT_PATH_DEFAULT = "" - -# Tensorboard job name -TENSORBOARD_JOB_NAME = "job_name" -TENSORBOARD_JOB_NAME_DEFAULT = "DeepSpeedJobName" - ######################################### # Eigenvalue ######################################### @@ -366,14 +344,6 @@ PLD_THETA_DEFAULT = 1.0 PLD_GAMMA = "gamma" PLD_GAMMA_DEFAULT = 0.001 -######################################### -# Curriculum Learning -######################################### -CURRICULUM_LEARNING = "curriculum_learning" - -CURRICULUM_ENABLED = "enabled" -CURRICULUM_ENABLED_DEFAULT = False - ######################################### # Validation modes @@ -387,7 +357,14 @@ class ValidationMode: ######################################### # Checkpoint config params ######################################### -# "checkpoint": {tag_validation=["Ignore"|"Warn"|"Fail"]} +# "checkpoint": { +# tag_validation=["Ignore"|"Warn"|"Fail"] +# load_universal=false +# use_node_local_storage=false +# parallel_write: { +# pipeline_stage: [True|False] +# } +# } CHECKPOINT = "checkpoint" CHECKPOINT_TAG_VALIDATION = "tag_validation" CHECKPOINT_TAG_VALIDATION_DEFAULT = ValidationMode.WARN @@ -397,43 +374,27 @@ CHECKPOINT_TAG_VALIDATION_MODES = [ ValidationMode.FAIL ] +LOAD_UNIVERSAL_CHECKPOINT = "load_universal" +LOAD_UNIVERSAL_CHECKPOINT_DEFAULT = False + +USE_NODE_LOCAL_STORAGE_CHECKPOINT = "use_node_local_storage" +USE_NODE_LOCAL_STORAGE_CHECKPOINT_DEFAULT = False + +CHECKPOINT_PARALLEL_WRITE = "parallel_write" +CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE = "pipeline_stage" +CHECKPOINT_PARALLEL_WRITE_PIPELINE_STAGE_DEFAULT = False + +######################################### +# Data types config params ######################################### -# Quantization -######################################### -QUANTIZE_TRAINING = "quantize_training" -QUANTIZE_BITS = "quantize_bits" -START_BITS = "start_bits" -TARGET_BITS = "target_bits" -QUANTIZER_KERNEL = "quantizer_kernel" -QUANTIZE_SCHEDULE = "quantize_schedule" -QUANTIZE_PERIOD = "quantize_period" -SCHEDULE_OFFSET = "schedule_offset" -QUANTIZE_GROUPS = "quantize_groups" -FP16_MIXED_QUANTIZE = "fp16_mixed_quantize" -QUANTIZE_CHANGE_RATIO = "quantize_change_ratio" -FP16_MIXED_QUANTIZE_ENABLED = "enabled" -QUANTIZE_VERBOSE = "quantize_verbose" -QUANTIZE_ALGO = "quantize_algo" -QUANTIZE_TYPE = "q_type" -QUANTIZE_SYMMETRIC = "symmetric" -QUANTIZE_ASYMMETRIC = "asymmetric" -STOCHASTIC_ROUNDING = "stochastic" -NEAREST_ROUNDING = "nearest" -QUANTIZE_ROUNDING = "rounding" -QUANTIZE_TRAINING_ENABLED = "enabled" -QUANTIZE_TRAINING_ENABLED_DEFAULT = False -QUANTIZE_TRAINING_DEFAULT = False -QUANTIZE_START_BITS_DEFAULT = 16 -QUANTIZE_TARGET_BITS_DEFAULT = 8 -QUANTIZER_KERNEL_DEFAULT = False -QUANTIZE_PERIOD_DEFAULT = 1000 -QUANTIZE_OFFSET_DEFAULT = 1000 -QUANTIZE_GROUPS_DEFAULT = 1 -QUANTIZE_TYPE_DEFAULT = 0 #symmetric -QUANTIZE_ROUNDING_DEFAULT = 0 #nearest -FP16_MIXED_QUANTIZE_ENABLED_DEFAULT = False -QUANTIZE_CHANGE_RATIO_DEFAULT = 0.001 -QUANTIZE_VERBOSE_DEFAULT = False +# "data_types": { +# grad_accum_dtype=["bf16"|"fp16"|"fp32"] +# } +# } + +DATA_TYPES = "data_types" +GRAD_ACCUM_DTYPE = "grad_accum_dtype" +GRAD_ACCUM_DTYPE_DEFAULT = None ######################################### # Drop the last incomplete Batch @@ -451,3 +412,9 @@ DATALOADER_DROP_LAST_DEFAULT = False # PIPELINE PARALLELISM ######################################### PIPE_REPLICATED = 'ds_pipe_replicated' + +######################################### +# DATA PARALLELISM +######################################### +DATA_PARALLEL_GROUP = "data_parallel_group" +GLOBAL_RANK = "global_rank" diff --git a/deepspeed/runtime/data_pipeline/__init__.py b/deepspeed/runtime/data_pipeline/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/runtime/data_pipeline/__init__.py +++ b/deepspeed/runtime/data_pipeline/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/data_pipeline/config.py b/deepspeed/runtime/data_pipeline/config.py new file mode 100644 index 0000000..eefa140 --- /dev/null +++ b/deepspeed/runtime/data_pipeline/config.py @@ -0,0 +1,180 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +from .constants import * +import copy +from ..config_utils import get_scalar_param + + +# TODO: Reducing config verbosity by returning None or {} when disabled. +# One challenge is that we still need to somehow include the default values, +# for example the *_ENABLED has default of false. +def get_data_efficiency_config(param_dict): + output = {} + output[DATA_EFFICIENCY_ENABLED] = get_data_efficiency_enabled(param_dict) + output[DATA_EFFICIENCY_SEED] = get_data_efficiency_seed(param_dict) + if DATA_EFFICIENCY not in param_dict.keys(): + param_dict[DATA_EFFICIENCY] = {} + sub_param_dict = param_dict[DATA_EFFICIENCY] + output[DATA_SAMPLING] = get_data_sampling(sub_param_dict) + output[DATA_ROUTING] = get_data_routing(sub_param_dict) + + return output + + +def get_data_efficiency_enabled(param_dict): + if DATA_EFFICIENCY in param_dict.keys(): + return get_scalar_param(param_dict[DATA_EFFICIENCY], + DATA_EFFICIENCY_ENABLED, + DATA_EFFICIENCY_ENABLED_DEFAULT) + else: + return False + + +def get_data_efficiency_seed(param_dict): + if DATA_EFFICIENCY in param_dict.keys(): + return get_scalar_param(param_dict[DATA_EFFICIENCY], + DATA_EFFICIENCY_SEED, + DATA_EFFICIENCY_SEED_DEFAULT) + else: + return DATA_EFFICIENCY_SEED_DEFAULT + + +def get_data_sampling(param_dict): + output = {} + output[DATA_SAMPLING_ENABLED] = get_data_sampling_enabled(param_dict) + output[DATA_SAMPLING_NUM_EPOCHS] = get_data_sampling_num_epochs(param_dict) + output[DATA_SAMPLING_NUM_WORKERS] = get_data_sampling_num_workers(param_dict) + if DATA_SAMPLING not in param_dict.keys(): + param_dict[DATA_SAMPLING] = {} + sub_param_dict = param_dict[DATA_SAMPLING] + output[CURRICULUM_LEARNING] = get_curriculum_learning(sub_param_dict) + + return output + + +def get_data_sampling_enabled(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], + DATA_SAMPLING_ENABLED, + DATA_SAMPLING_ENABLED_DEFAULT) + else: + return False + + +def get_data_sampling_num_epochs(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], + DATA_SAMPLING_NUM_EPOCHS, + DATA_SAMPLING_NUM_EPOCHS_DEFAULT) + else: + return DATA_SAMPLING_NUM_EPOCHS_DEFAULT + + +def get_data_sampling_num_workers(param_dict): + if DATA_SAMPLING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_SAMPLING], + DATA_SAMPLING_NUM_WORKERS, + DATA_SAMPLING_NUM_WORKERS_DEFAULT) + else: + return DATA_SAMPLING_NUM_WORKERS_DEFAULT + + +def get_curriculum_learning(param_dict): + output = {} + output[CURRICULUM_LEARNING_ENABLED] = get_curriculum_learning_enabled(param_dict) + if CURRICULUM_LEARNING not in param_dict.keys(): + param_dict[CURRICULUM_LEARNING] = {} + sub_param_dict = param_dict[CURRICULUM_LEARNING] + if output[CURRICULUM_LEARNING_ENABLED]: + assert CURRICULUM_LEARNING_METRICS in sub_param_dict.keys(), f"Curriculum learning is enabled, {CURRICULUM_LEARNING_METRICS} must be specified" + for key, val in get_curriculum_learning_params(param_dict).items(): + output[key] = val + return output + + +def get_curriculum_learning_enabled(param_dict): + if CURRICULUM_LEARNING in param_dict.keys(): + return get_scalar_param(param_dict[CURRICULUM_LEARNING], + CURRICULUM_LEARNING_ENABLED, + CURRICULUM_LEARNING_ENABLED_DEFAULT) + else: + return False + + +def get_curriculum_learning_params(param_dict): + if CURRICULUM_LEARNING in param_dict.keys(): + curriculum_learning_params = copy.copy(param_dict[CURRICULUM_LEARNING]) + curriculum_learning_params.pop(CURRICULUM_LEARNING_ENABLED) + return curriculum_learning_params + else: + return {} + + +def get_curriculum_enabled_legacy(param_dict): + if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): + return get_scalar_param(param_dict[CURRICULUM_LEARNING_LEGACY], + CURRICULUM_ENABLED_LEGACY, + CURRICULUM_ENABLED_DEFAULT_LEGACY) + else: + return False + + +def get_curriculum_params_legacy(param_dict): + if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): + curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING_LEGACY]) + curriculum_params.pop(CURRICULUM_ENABLED_LEGACY) + return curriculum_params + else: + return False + + +def get_data_routing(param_dict): + output = {} + output[DATA_ROUTING_ENABLED] = get_data_routing_enabled(param_dict) + if DATA_ROUTING not in param_dict.keys(): + param_dict[DATA_ROUTING] = {} + sub_param_dict = param_dict[DATA_ROUTING] + output[RANDOM_LTD] = get_random_ltd(sub_param_dict) + + return output + + +def get_data_routing_enabled(param_dict): + if DATA_ROUTING in param_dict.keys(): + return get_scalar_param(param_dict[DATA_ROUTING], + DATA_ROUTING_ENABLED, + DATA_ROUTING_ENABLED_DEFAULT) + else: + return False + + +def get_random_ltd(param_dict): + output = {} + output[RANDOM_LTD_ENABLED] = RANDOM_LTD_ENABLED_DEFAULT + output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE] = {} + output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][ + RANDOM_LTD_LAYER_TOKEN_LR_ENABLED] = RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT + if get_random_ltd_enabled(param_dict): + output[RANDOM_LTD_ENABLED] = get_random_ltd_enabled(param_dict) + for key, val in get_random_ltd_params(param_dict).items(): + output[key] = val + return output + + +def get_random_ltd_enabled(param_dict): + if RANDOM_LTD in param_dict.keys(): + return get_scalar_param(param_dict[RANDOM_LTD], + RANDOM_LTD_ENABLED, + RANDOM_LTD_ENABLED_DEFAULT) + else: + return False + + +def get_random_ltd_params(param_dict): + if RANDOM_LTD in param_dict.keys(): + random_ltd_params = copy.copy(param_dict[RANDOM_LTD]) + random_ltd_params.pop(RANDOM_LTD_ENABLED) + return random_ltd_params + else: + return {} diff --git a/deepspeed/runtime/data_pipeline/constants.py b/deepspeed/runtime/data_pipeline/constants.py new file mode 100644 index 0000000..b801d2e --- /dev/null +++ b/deepspeed/runtime/data_pipeline/constants.py @@ -0,0 +1,115 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +######################################### +# Data efficiency library +# See sample config at https://www.deepspeed.ai/docs/config-json/#data-efficiency +######################################### +DATA_EFFICIENCY = "data_efficiency" +DATA_EFFICIENCY_ENABLED = "enabled" +DATA_EFFICIENCY_ENABLED_DEFAULT = False +DATA_EFFICIENCY_SEED = "seed" +DATA_EFFICIENCY_SEED_DEFAULT = 1234 + +######################################### +# Data efficiency - Data Sampling +######################################### +DATA_SAMPLING = "data_sampling" +DATA_SAMPLING_ENABLED = "enabled" +DATA_SAMPLING_ENABLED_DEFAULT = False +DATA_SAMPLING_NUM_EPOCHS = "num_epochs" +DATA_SAMPLING_NUM_EPOCHS_DEFAULT = 1000 +DATA_SAMPLING_NUM_WORKERS = "num_workers" +DATA_SAMPLING_NUM_WORKERS_DEFAULT = 0 + +######################################### +# Data efficiency - Data Sampling - Curriculum Learning +######################################### +CURRICULUM_LEARNING = "curriculum_learning" +CURRICULUM_LEARNING_ENABLED = "enabled" +CURRICULUM_LEARNING_ENABLED_DEFAULT = False +CURRICULUM_LEARNING_CLUSTER_PATH = "data_cluster_path" +CURRICULUM_LEARNING_METRICS = "curriculum_metrics" +CURRICULUM_LEARNING_SAMPLE_PATH = "index_to_sample_path" +CURRICULUM_LEARNING_METRIC_PATH = "index_to_metric_path" +CURRICULUM_LEARNING_CLUSTERING_TYPE = "clustering_type" +CURRICULUM_LEARNING_SINGLE_CLUSTER = "single_cluster" +CURRICULUM_LEARNING_CLUSTER_PREFIX = "cluster" +CURRICULUM_LEARNING_DIFFICULTY_TYPE = "difficulty_type" +CURRICULUM_LEARNING_VALUE_BASED = "value" +CURRICULUM_LEARNING_PERCENTILE_BASED = "percentile" +CURRICULUM_LEARNING_MIN_DIFFICULTY = "min_difficulty" +CURRICULUM_LEARNING_MAX_DIFFICULTY = "max_difficulty" +CURRICULUM_LEARNING_SCHEDULE_TYPE = "schedule_type" +CURRICULUM_LEARNING_SCHEDULE_CONFIG = "schedule_config" +CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY = "difficulty" +CURRICULUM_LEARNING_SCHEDULE_MAX_STEP = "max_step" +CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP = "total_curriculum_step" +CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP = "difficulty_step" +CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE = "root_degree" +CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE = "fixed_discrete" +CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT = "fixed_root" +CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR = "fixed_linear" +CURRICULUM_LEARNING_SCHEDULE_CUSTOM = "custom" +CURRICULUM_LEARNING_CURRENT_DIFFICULTY = "current_difficulty" + +CURRICULUM_LEARNING_BATCH = "batch" +CURRICULUM_LEARNING_CONSUMED_SAMPLES = "consumed_samples" +CURRICULUM_LEARNING_STEP = "curriculum_step" +CURRICULUM_LEARNING_CURRENT_DIFFICULTIES = "current_difficulties" +CURRICULUM_LEARNING_DATA_CLUSTER_PATHS = "data_cluster_paths" +CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION = "data_cluster_current_position" +CURRICULUM_LEARNING_NP_RNG_STATE = "np_rng_state" + +######################################### +# Curriculum Learning legacy implementation +######################################### +CURRICULUM_LEARNING_LEGACY = "curriculum_learning" + +CURRICULUM_ENABLED_LEGACY = "enabled" +CURRICULUM_ENABLED_DEFAULT_LEGACY = False + +######################################### +# Data efficiency - Data Routing +######################################### +DATA_ROUTING = "data_routing" +DATA_ROUTING_ENABLED = "enabled" +DATA_ROUTING_ENABLED_DEFAULT = False + +######################################### +# Data efficiency - Data Routing - Random LTD +######################################### +RANDOM_LTD = "random_ltd" +RANDOM_LTD_ENABLED = "enabled" +RANDOM_LTD_ENABLED_DEFAULT = False + +RANDOM_LTD_MODEL_MASK_NAME = "model_mask_name" +RANDOM_LTD_MODEL_TYPE = "model_type" +RANDOM_LTD_MICRO_BATCH_SIZE = "micro_batch_size" +RANDOM_LTD_GLOBAL_BATCH_SIZE = "global_batch_size" +RANDOM_LTD_SAMPLE_INDEX = "sample_idx" +RANDOM_LTD_ATTENTION_MASK = "attention_mask" +RANDOM_LTD_HIDDEN_STATE_ORDER = "hidden_state_order" +RANDOM_LTD_LAYER_NUM = "random_ltd_layer_num" +RANDOM_LTD_LAYER_ID = "random_ltd_layer_id" +RANDOM_LTD_TOTAL_LAYER_NUM = "total_layer_num" +RANDOM_LTD_CONSUMED_LAYER_TOKENS = "consumed_layer_tokens" + +# scheduler +RANDOM_LTD_SCHEDULER = "random_ltd_schedule" +RANDOM_LTD_MAX_VALUE = "max_value" +RANDOM_LTD_MIN_VALUE = "min_value" +RANDOM_LTD_CURRENT_VALUE = "current_value" +RANDOM_LTD_SCHEDULE_CONFIG = "schedule_config" +RANDOM_LTD_INCREASE_STEP = "seq_per_step" +RANDOM_LTD_REQUIRE_STEP = "require_steps" +RANDOM_LTD_SCHEDULER_TYPE = "schedule_type" +RANDOM_LTD_CURR_STEP = "current_steps" + +# learning rate schedulers +RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE = "layer_token_lr_schedule" +RANDOM_LTD_LAYER_TOKEN_LR_ENABLED = "enabled" +RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT = False +RANDOM_LTD_TOTAL_LAYER_TOKENS = "total_layer_tokens" +RANDOM_LTD_WARMUP_TYPE = "warmup_type" +RANDOM_LTD_WARMUP_LAYER_TOKENS = "warmup_layer_tokens" diff --git a/deepspeed/runtime/data_pipeline/curriculum_scheduler.py b/deepspeed/runtime/data_pipeline/curriculum_scheduler.py index 24ff1f8..b4cb18c 100644 --- a/deepspeed/runtime/data_pipeline/curriculum_scheduler.py +++ b/deepspeed/runtime/data_pipeline/curriculum_scheduler.py @@ -3,22 +3,30 @@ Copyright 2021 The Microsoft DeepSpeed Team ''' import math from deepspeed.utils import logger +from .constants import * class CurriculumScheduler(object): def __init__(self, config): super().__init__() self.state = {} - assert "curriculum_type" in config, "Curriculum learning requires the config 'curriculum_type'" - assert "min_difficulty" in config, "Curriculum learning requires the config 'min_difficulty'" - assert "max_difficulty" in config, "Curriculum learning requires the config 'max_difficulty'" - assert "schedule_type" in config, "Curriculum learning requires the config 'schedule_type'" - self.state['min_difficulty'] = config['min_difficulty'] - self.state['max_difficulty'] = config['max_difficulty'] - self.state['current_difficulty'] = config['min_difficulty'] - self.state['schedule_type'] = config['schedule_type'] + assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'" + assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'" + assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \ + f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'" + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[ + CURRICULUM_LEARNING_MIN_DIFFICULTY] + self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[ + CURRICULUM_LEARNING_MAX_DIFFICULTY] + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[ + CURRICULUM_LEARNING_MIN_DIFFICULTY] + self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] self.first_step = True - if config['schedule_type'] == 'fixed_discrete': + if config[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: """ The schedule_config is a list of difficulty and a list of max step belonging to each difficulty. Example json config: @@ -28,17 +36,25 @@ class CurriculumScheduler(object): } The "max_step" has one less element than "difficulty", because the last difficulty will be used for all following steps. - The self.state['schedule'] is a dictionary of + The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of difficulty : [max step for this difficulty, next difficulty]. """ - assert "difficulty" in config['schedule_config'], "Curriculum learning with fixed_discrete schedule requires the schedule_config 'difficulty'" - assert "max_step" in config['schedule_config'], "Curriculum learning with fixed_discrete schedule requires the schedule_config 'max_step'" - assert len(config['schedule_config']['max_step']) > 0 - assert len(config['schedule_config']['difficulty']) > 0 - assert len(config['schedule_config']['difficulty']) == len( - config['schedule_config']['max_step']) + 1 - self.state['schedule'] = config['schedule_config'] - elif config['schedule_type'] == 'fixed_root': + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'" + assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'" + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + [CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0 + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + [CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0 + assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + [CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len( + config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + [CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1 + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[ + CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: """ The schedule_config includes: total_curriculum_step: how many steps the curriculum learning takes to go @@ -57,38 +73,59 @@ class CurriculumScheduler(object): "root_degree": 2 } """ - assert "total_curriculum_step" in config['schedule_config'], "Curriculum learning with fixed_root schedule requires the schedule_config 'total_curriculum_step'" - assert "difficulty_step" in config['schedule_config'], "Curriculum learning with fixed_root schedule requires the schedule_config 'difficulty_step'" - assert "root_degree" in config['schedule_config'], "Curriculum learning with fixed_root schedule requires the schedule_config 'root_degree'" - if config['schedule_config']['difficulty_step'] % 8 != 0: + assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'" + if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][ + CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: logger.warning( - f'The difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your hardware.' + f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' ) - self.state['schedule'] = config['schedule_config'] - elif config['schedule_type'] == 'fixed_linear': + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[ + CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: """ - The schedule_config is the same as 'fixed_root' but without the + The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the root_degree. "schedule_config": { "total_curriculum_step": 30000, "difficulty_step": 8 } """ - assert "total_curriculum_step" in config['schedule_config'], "Curriculum learning with fixed_linear schedule requires the schedule_config 'total_curriculum_step'" - assert "difficulty_step" in config['schedule_config'], "Curriculum learning with fixed_linear schedule requires the schedule_config 'difficulty_step'" - if config['schedule_config']['difficulty_step'] % 8 != 0: + assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" + assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ + f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" + if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][ + CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: logger.warning( - f'The difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your hardware.' + f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' ) - self.state['schedule'] = config['schedule_config'] + self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[ + CURRICULUM_LEARNING_SCHEDULE_CONFIG] + elif config[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: + """ + Fully customized schedule. User need to provide a custom schedule + function by using the set_custom_curriculum_learning_schedule API + in deepspeed/runtime/engine.py + """ + self.custom_get_difficulty = None else: raise RuntimeError('Unsupported curriculum schedule type') def get_current_difficulty(self): - return self.state['current_difficulty'] + return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] def set_current_difficulty(self, difficulty): - self.state['current_difficulty'] = difficulty + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty + + def set_custom_get_difficulty(self, schedule_function): + self.custom_get_difficulty = schedule_function def get_state(self): return self.state @@ -97,38 +134,49 @@ class CurriculumScheduler(object): self.state = state def __fixed_discrete_get_difficulty(self, global_steps): - s_state = self.state['schedule'] - if global_steps > s_state['max_step'][-1]: - return s_state['difficulty'][-1] - for i in range(len(s_state['max_step'])): - if global_steps <= s_state['max_step'][i]: - return s_state['difficulty'][i] + s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] + if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]: + return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1] + for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])): + if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]: + return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i] def __fixed_root_get_difficulty(self, global_steps, root_degree=None): - s_state = self.state['schedule'] + s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] if root_degree is None: - root_degree = s_state['root_degree'] + root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE] next_difficulty = (float(global_steps) / - s_state['total_curriculum_step'])**(1.0 / root_degree) - next_difficulty = math.floor( - next_difficulty * - (self.state['max_difficulty'] - self.state['min_difficulty']) + - self.state['min_difficulty']) - next_difficulty -= (next_difficulty % s_state['difficulty_step']) - next_difficulty = min(next_difficulty, self.state['max_difficulty']) + s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**( + 1.0 / root_degree) + next_difficulty = math.floor(next_difficulty * + (self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) + + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) + next_difficulty -= (next_difficulty % + s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP]) + next_difficulty = min(next_difficulty, + self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]) return next_difficulty def get_difficulty(self, global_steps): - if self.state['schedule_type'] == 'fixed_discrete': + if self.state[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: return self.__fixed_discrete_get_difficulty(global_steps) - elif self.state['schedule_type'] == 'fixed_linear': + elif self.state[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: return self.__fixed_root_get_difficulty(global_steps, 1) - elif self.state['schedule_type'] == 'fixed_root': + elif self.state[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: return self.__fixed_root_get_difficulty(global_steps) + elif self.state[ + CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: + return self.custom_get_difficulty(global_steps) else: raise RuntimeError('Unsupported curriculum schedule type') def update_difficulty(self, global_steps): - if self.state['current_difficulty'] < self.state['max_difficulty']: - self.state['current_difficulty'] = self.get_difficulty(global_steps) - return self.state['current_difficulty'] + if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[ + CURRICULUM_LEARNING_MAX_DIFFICULTY]: + self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty( + global_steps) + return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] diff --git a/deepspeed/runtime/data_pipeline/data_routing/__init__.py b/deepspeed/runtime/data_pipeline/data_routing/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_routing/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py b/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py new file mode 100644 index 0000000..436da95 --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py @@ -0,0 +1,117 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from deepspeed.utils import logger +from torch import Tensor +from torch.nn import Module +from ..constants import * +from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens + + +#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 +class RandomLayerTokenDrop(Module): + """ + A layer wrapper for random LTD + """ + def __init__(self, layer: Module): + super(RandomLayerTokenDrop, self).__init__() + self.random_ltd_layer = layer + self.reserved_length = None #config['max_value'] + self.random_ltd_scheduler = None + self.max_length = None + self.reserved_length = -1 + self.curr_seq = -1 + self.batch_first = False + + def init_config(self, config, scheduler, random_ltd_layer_id): + self.random_ltd_scheduler = scheduler + self.random_ltd_layer_id = random_ltd_layer_id + self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE] + + self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME] + self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE] + self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num + hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER] + self.model_type = config[RANDOM_LTD_MODEL_TYPE] + + if hs_order == 'batch_seq_dim': + self.get_hidden_tensor_shape = self.get_bsh + self.batch_first = True + elif hs_order == 'seq_batch_dim': + self.get_hidden_tensor_shape = self.get_sbh + self.batch_first = False + else: + logger.warning( + "************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \ + your own input dimension orders************") + raise NotImplementedError + + if self.model_type == 'encoder': + self.index_generator = bert_sample_tokens + elif self.model_type == 'decoder': + self.index_generator = gpt_sample_tokens + else: + logger.warning( + "************For now, we only support encoder-only or decoder-only models************" + ) + raise NotImplementedError + + def get_bsh(self, hidden_stats): + self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0] + + def get_sbh(self, hidden_stats): + self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1] + + def forward(self, hidden_states, **kwargs) -> Tensor: + if self.random_ltd_scheduler is not None: + self.reserved_length = self.random_ltd_scheduler.get_current_seq() + self.get_hidden_tensor_shape(hidden_states) + if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq: + if self.mask_name is not None: + mask = kwargs[self.mask_name] + else: + mask = None + if self.random_ltd_layer_id == 0: + sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\ + self.curr_seq, \ + self.curr_micro_batch, \ + self.random_ltd_num_layer, \ + hidden_states.device, mask) + self.random_ltd_scheduler.state[ + RANDOM_LTD_SAMPLE_INDEX] = sampled_indices + self.random_ltd_scheduler.state[ + RANDOM_LTD_ATTENTION_MASK] = part_attention_mask + else: + sampled_indices = self.random_ltd_scheduler.state[ + RANDOM_LTD_SAMPLE_INDEX] + part_attention_mask = self.random_ltd_scheduler.state[ + RANDOM_LTD_ATTENTION_MASK] + + + hidden_states, part_hidden_states = GatherTokens.apply(hidden_states, sampled_indices[self.random_ltd_layer_id,:,:], self.batch_first) + if self.mask_name is not None: + if self.model_type == 'encoder': + kwargs[self.mask_name] = part_attention_mask[ + self.random_ltd_layer_id] + else: + kwargs[self.mask_name] = part_attention_mask + + outputs = self.random_ltd_layer(part_hidden_states, **kwargs) + + if isinstance(outputs, tuple): + hidden_states = ScatterTokens.apply(hidden_states, outputs[0], sampled_indices[self.random_ltd_layer_id,:,:], self.batch_first) + my_list = list(outputs) + my_list[0] = hidden_states + return tuple(my_list) + elif isinstance(outputs, Tensor): + hidden_states = ScatterTokens.apply(hidden_states, outputs, sampled_indices[self.random_ltd_layer_id,:,:], self.batch_first) + return hidden_states + else: + logger.warning( + "************For now, we only support tuple and tensor output. \ + You need to adjust the output according to the layer in your model************" + ) + raise NotImplementedError + else: + return self.random_ltd_layer(hidden_states, **kwargs) diff --git a/deepspeed/runtime/data_pipeline/data_routing/helper.py b/deepspeed/runtime/data_pipeline/data_routing/helper.py new file mode 100644 index 0000000..0f3791c --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_routing/helper.py @@ -0,0 +1,45 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +from .basic_layer import RandomLayerTokenDrop +from collections import OrderedDict +from deepspeed.compression.helper import recursive_getattr, recursive_setattr + + +def convert_to_random_ltd(model, convert_type): + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + for name, module in c_model.named_modules(): + + if isinstance(module, convert_type): + old_module = recursive_getattr(c_model, name) + new_module = RandomLayerTokenDrop(old_module) + recursive_setattr(c_model, name, new_module) + + model.random_ltd_initialize() + return model + + +def save_without_random_ltd(model): + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + model_dic = c_model.state_dict() + return remove_random_ltd_state_dict(model_dic) + + +def remove_random_ltd_state_dict(state_dict): + new_state_dict = OrderedDict() + for key, value in state_dict.items(): + if '.random_ltd_layer' in key: + new_key = ''.join(key.split('.random_ltd_layer')) + else: + new_key = key + new_state_dict[new_key] = value + return new_state_dict diff --git a/deepspeed/runtime/data_pipeline/data_routing/scheduler.py b/deepspeed/runtime/data_pipeline/data_routing/scheduler.py new file mode 100644 index 0000000..db0a7d4 --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_routing/scheduler.py @@ -0,0 +1,112 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import math + +from deepspeed.utils import logger +# from deepspeed.runtime.lr_schedules import WarmupLR +from ..constants import * + +#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 + + +class BaseScheduler(object): + def __init__(self): + self.state = {} + + def __fixed_root_get_value(self, global_steps, root_degree=None): + s_state = self.state[RANDOM_LTD_SCHEDULE_CONFIG] + if root_degree is None: + root_degree = s_state['root_degree'] + next_seq = (float(global_steps) / + s_state[RANDOM_LTD_REQUIRE_STEP])**(1.0 / root_degree) + next_seq = math.floor( + next_seq * + (self.state[RANDOM_LTD_MAX_VALUE] - self.state[RANDOM_LTD_MIN_VALUE]) + + self.state[RANDOM_LTD_MIN_VALUE]) + next_seq -= (next_seq % s_state[RANDOM_LTD_INCREASE_STEP]) + next_seq = min(next_seq, self.state[RANDOM_LTD_MAX_VALUE]) + return next_seq + + def get_value(self, global_steps): + if self.state[RANDOM_LTD_SCHEDULER_TYPE] == 'fixed_linear': + return self.__fixed_root_get_value(global_steps, 1) + else: + raise RuntimeError('Unsupported random LTD schedule type') + + +class RandomLTDScheduler(BaseScheduler): + def __init__(self, config): + super().__init__() + self.model_layer_num = config[RANDOM_LTD_TOTAL_LAYER_NUM] + self.random_ltd_layer_num = config[RANDOM_LTD_LAYER_NUM] + self.config_schedule = config[RANDOM_LTD_SCHEDULER] + self.global_batch_size = config[RANDOM_LTD_GLOBAL_BATCH_SIZE] + self.reset_to_init() + + if config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: + logger.warning("**********Work In Progress************") + raise NotImplementedError + + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 + + # self.first_step = True + def get_total_layer_tokens(self, train_iters): + for step in range(train_iters): + self.update_seq(step) + return self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] + + def reset_to_init(self): + if self.config_schedule is not None: + self.state[RANDOM_LTD_MIN_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_MAX_VALUE] = self.config_schedule[RANDOM_LTD_MAX_VALUE] + self.state[RANDOM_LTD_CURRENT_VALUE] = self.config_schedule[ + RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_SCHEDULE_CONFIG] = self.config_schedule[ + RANDOM_LTD_SCHEDULE_CONFIG] + self.state[RANDOM_LTD_SCHEDULER_TYPE] = self.config_schedule[ + RANDOM_LTD_SCHEDULER_TYPE] + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 + self.state[RANDOM_LTD_CURR_STEP] = -1 + + def get_current_seq(self): + return self.state[RANDOM_LTD_CURRENT_VALUE] + + def set_current_seq(self, seq_length): + self.state[RANDOM_LTD_CURRENT_VALUE] = seq_length + + def get_random_ltd_layer_num(self): + return self.random_ltd_layer_num + + def get_state(self): + return self.state + + def set_state(self, state): + self.state = state + + def update_seq(self, global_steps): + if self.state[RANDOM_LTD_CURRENT_VALUE] < self.state[RANDOM_LTD_MAX_VALUE]: + self.state[RANDOM_LTD_CURRENT_VALUE] = self.get_value(global_steps) + if global_steps != self.state[RANDOM_LTD_CURR_STEP]: + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] += self.global_batch_size*(self.state[RANDOM_LTD_CURRENT_VALUE] * self.random_ltd_layer_num \ + + self.state[RANDOM_LTD_MAX_VALUE] * (self.model_layer_num - self.random_ltd_layer_num)) + self.state[RANDOM_LTD_CURR_STEP] = global_steps + + def state_dict(self): + return { + RANDOM_LTD_CONSUMED_LAYER_TOKENS: + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS], + RANDOM_LTD_CURR_STEP: self.state[RANDOM_LTD_CURR_STEP], + RANDOM_LTD_CURRENT_VALUE: self.state[RANDOM_LTD_CURRENT_VALUE], + RANDOM_LTD_MIN_VALUE: self.state[RANDOM_LTD_MIN_VALUE], + RANDOM_LTD_MAX_VALUE: self.state[RANDOM_LTD_MAX_VALUE], + } + + def load_state_dict(self, state_dict): + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = state_dict[ + RANDOM_LTD_CONSUMED_LAYER_TOKENS] + self.state[RANDOM_LTD_CURR_STEP] = state_dict[RANDOM_LTD_CURR_STEP] + self.state[RANDOM_LTD_CURRENT_VALUE] = state_dict[RANDOM_LTD_CURRENT_VALUE] + self.state[RANDOM_LTD_MIN_VALUE] = state_dict[RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_MAX_VALUE] = state_dict[RANDOM_LTD_MAX_VALUE] diff --git a/deepspeed/runtime/data_pipeline/data_routing/utils.py b/deepspeed/runtime/data_pipeline/data_routing/utils.py new file mode 100644 index 0000000..8b7bd50 --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_routing/utils.py @@ -0,0 +1,27 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' + +import torch + + +def bsh_decoder_gather(reserved_length, hidden_states, mask): + # random-layer-token-drop + rand_list = [] + part_hidden_states = [] # batch, seq, hidden ## different from megatron + for k in range(hidden_states.size(0)): + B_tmp = torch.randperm(hidden_states.size(1), + device=hidden_states.device)[:reserved_length] + B = B_tmp.sort()[0] + rand_list.append(B) + part_hidden_states.append(hidden_states[k:k + 1, B, :]) + + part_hidden_states = torch.cat(part_hidden_states, dim=0) + part_mask = mask[:, :, :reserved_length, :reserved_length] + return part_hidden_states, rand_list, part_mask + + +def bsh_decoder_scatter(hidden_states, part_hidden_states, rand_list): + for k in range(hidden_states.size(0)): + hidden_states[k, rand_list[k], :] = part_hidden_states[k, :, :] + return hidden_states diff --git a/deepspeed/runtime/data_pipeline/data_sampling/__init__.py b/deepspeed/runtime/data_pipeline/data_sampling/__init__.py new file mode 100644 index 0000000..fcb45ab --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_sampling/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py b/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py new file mode 100644 index 0000000..efd198f --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_sampling/data_analyzer.py @@ -0,0 +1,537 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +''' +import os +from collections import defaultdict +import csv +import time +from multiprocessing import Process, Manager +import numpy as np +import torch +from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset + +from deepspeed.utils import logger +from .indexed_dataset import MMapIndexedDataset +from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DataAnalyzer(object): + def __init__(self, + dataset, + num_workers=1, + worker_id=0, + num_threads=1, + num_threads_reduce=1, + specific_threads=[], + batch_size=1, + metric_names=[], + metric_functions=[], + metric_types=[], + metric_dtypes=[], + save_path="./", + collate_fn=None, + custom_map_init=None, + custom_map_update=None, + custom_map_finalize=None, + custom_reduce=None): + super().__init__() + self.dataset = dataset + self.num_workers = num_workers + self.worker_id = worker_id + self.num_threads = num_threads + self.num_threads_reduce = num_threads_reduce + self.specific_threads = specific_threads + self.batch_size = batch_size + self.metric_names = metric_names + self.metric_functions = metric_functions + self.metric_types = metric_types + self.metric_dtypes = metric_dtypes + self.save_path = save_path + self.collate_fn = collate_fn + self.custom_map_init = custom_map_init + self.custom_map_update = custom_map_update + self.custom_map_finalize = custom_map_finalize + self.custom_reduce = custom_reduce + + def init_metric_results(self, + thread_id, + metric_names, + metric_types, + metric_dtypes, + save_path, + worker_id): + metric_results = [] + for m_idx in range(len(metric_names)): + metric_name, metric_type, metric_dtype = metric_names[m_idx], \ + metric_types[m_idx], metric_dtypes[m_idx] + assert metric_dtype not in [np.float64, np.double], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)." + metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/" + os.makedirs(metric_save_path, exist_ok=True) + if metric_type == 'single_value_per_sample': + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder( + sample_to_metric_fname, + metric_dtype) + metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample" + os.system(f"rm -rf {metric_to_sample_fname}*") + metric_to_sample_dict = defaultdict(list) + metric_results.append({ + "sample_to_metric_fname": sample_to_metric_fname, + "sample_to_metric_builder": sample_to_metric_builder, + "metric_to_sample_fname": metric_to_sample_fname, + "metric_to_sample_dict": metric_to_sample_dict + }) + elif metric_type == 'accumulate_value_over_samples': + metric_value = None + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_results.append({ + "metric_value": metric_value, + "metric_value_fname": metric_value_fname + }) + return metric_results + + def update_metric_results(self, + data, + metric_types, + metric_functions, + metric_results): + for m_idx in range(len(metric_types)): + metric_type, metric_function, metric_result = metric_types[m_idx], \ + metric_functions[m_idx], metric_results[m_idx] + if metric_type == 'single_value_per_sample': + metric_values = metric_function(data) + for row in range(metric_values.size()[0]): + metric_result["sample_to_metric_builder"].add_item( + metric_values[row].reshape(-1)) + metric_result["metric_to_sample_dict"][ + metric_values[row].item()].append(data['index'][row][0].item()) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 100: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows( + [metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + metric_values = metric_function(data) + if metric_result["metric_value"] is None: + metric_result["metric_value"] = metric_values + else: + metric_result["metric_value"].add_(metric_values) + + def finalize_metric_results(self, metric_types, metric_dtypes, metric_results): + for m_idx in range(len(metric_types)): + metric_type, metric_dtype, metric_result = metric_types[m_idx], \ + metric_dtypes[m_idx], metric_results[m_idx] + if metric_type == 'single_value_per_sample': + metric_fname = metric_result["sample_to_metric_fname"] + close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], + metric_fname) + for m_value in metric_result["metric_to_sample_dict"]: + if len(metric_result["metric_to_sample_dict"][m_value]) > 0: + metric_fname = metric_result["metric_to_sample_fname"] + with open(f"{metric_fname}_{m_value}.csv", 'a') as f: + writer = csv.writer(f) + writer.writerows( + [metric_result["metric_to_sample_dict"][m_value]]) + metric_result["metric_to_sample_dict"][m_value] = [] + elif metric_type == 'accumulate_value_over_samples': + if metric_result["metric_value"] is not None: + metric_value_builder = create_mmap_dataset_builder( + metric_result["metric_value_fname"], + metric_dtype) + metric_value_builder.add_item( + metric_result["metric_value"].reshape(-1)) + close_mmap_dataset_builder(metric_value_builder, + metric_result["metric_value_fname"]) + + def run_map_helper(self, thread_id): + start_idx, end_idx = self.thread_splits[thread_id][0], \ + self.thread_splits[thread_id][1] + logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \ + f"on data subset {start_idx} to {end_idx}") + thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx))) + sampler = BatchSampler(SequentialSampler(thread_dataset), + batch_size=self.batch_size, + drop_last=False) + if self.collate_fn is None: + iterator = iter( + DataLoader(thread_dataset, + batch_sampler=sampler, + num_workers=0, + pin_memory=False)) + else: + iterator = iter( + DataLoader(thread_dataset, + batch_sampler=sampler, + num_workers=0, + collate_fn=self.collate_fn, + pin_memory=False)) + if self.custom_map_init is None: + metric_results = self.init_metric_results(thread_id, + self.metric_names, + self.metric_types, + self.metric_dtypes, + self.save_path, + self.worker_id) + else: + metric_results = self.custom_map_init(thread_id, + self.metric_names, + self.metric_types, + self.metric_dtypes, + self.save_path, + self.worker_id) + total_sample = len(thread_dataset) + processed_sample = 0 + start = time.time() + while True: + try: + data = next(iterator) + if self.custom_map_update is None: + self.update_metric_results(data, + self.metric_types, + self.metric_functions, + metric_results) + else: + self.custom_map_update(data, + self.metric_types, + self.metric_functions, + metric_results) + processed_sample += self.batch_size + duration = (time.time() - start) / 3600.0 + remain_duration = duration * total_sample / processed_sample - duration + logger.info( + f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \ + f"out of {total_sample} processed in {duration:.2f} hr, " \ + f"estimated to finish in {remain_duration:.2f} hr") + except StopIteration: + logger.info( + f"worker {self.worker_id} thread {thread_id}: reach end of file") + break + if self.custom_map_finalize is None: + self.finalize_metric_results(self.metric_types, + self.metric_dtypes, + metric_results) + else: + self.custom_map_finalize(self.metric_types, + self.metric_dtypes, + metric_results) + logger.info(f"worker {self.worker_id} thread {thread_id}: finished") + + def run_map(self): + self.worker_splits, self.thread_splits = split_dataset(self.dataset, + self.num_workers, self.worker_id, self.num_threads) + if len(self.specific_threads) > 0: + threads_to_run = self.specific_threads + else: + threads_to_run = list(range(self.num_threads)) + if self.num_threads > 1: + p = [] + for thread in threads_to_run: + p.append(Process(target=self.run_map_helper, args=(thread, ))) + p[thread].start() + + for thread in threads_to_run: + p[thread].join() + else: + assert self.num_threads == 1 + self.run_map_helper(0) + + def get_metric_value_percentiles(self, + metric_name, + num_sample_per_value, + total_num_samples): + logger.info(f"Checking the value percentiles of metric {metric_name}...") + processed_samples = 0 + current_percentile = 5 + for key in sorted(num_sample_per_value.keys()): + processed_samples += num_sample_per_value[key] + if processed_samples >= total_num_samples * current_percentile / 100.0: + logger.info( + f"Metric {metric_name} {current_percentile}th percentile: {key}") + current_percentile += 5 + + def merge_gather_map_stats(self, + num_workers, + num_threads, + num_threads_reduce, + t_idx_reduce, + metric_save_path, + metric_name, + return_dict): + results = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce: + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, + skip_warmup=True) + unique_v = list(np.unique(w_sample_to_metric)) + sample_to_metric_count = len(w_sample_to_metric) + logger.info( + f"Finished gathering map stats from worker {w_idx} thread {t_idx}." + ) + results.append([unique_v, sample_to_metric_count]) + return_dict[t_idx_reduce] = results + + def merge_sample_to_metric(self, + t_idx_reduce, + metric_save_path, + metric_name, + metric_value_dtype, + map_worker_thread): + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + sample_to_metric_builder = create_mmap_dataset_builder( + sample_to_metric_fname, + metric_value_dtype) + for w_t in map_worker_thread: + w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/" + w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" + w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) + for row in range(len(w_data)): + sample_to_metric_builder.add_item( + torch.tensor(w_data[row].astype(np.int64), + dtype=torch.long)) + logger.info( + f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.") + close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) + + def merge_metric_to_sample(self, + t_idx_reduce, + metric_save_path, + metric_name, + sample_idx_dtype, + metric_value_dtype, + unique_metric_values, + num_workers, + num_threads): + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, + sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, + metric_value_dtype) + for unique_v in unique_metric_values: + samples = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv" + if os.path.isfile(w_metric_to_sample_fname): + with open(w_metric_to_sample_fname, 'r') as f: + datareader = csv.reader(f) + for row in datareader: + samples += [int(x) for x in row] + index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long)) + index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long)) + logger.info(f"Finished reducing metric {metric_name} value {unique_v}.") + close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) + + def merge_map_results(self, + dataset, + metric_names, + metric_types, + save_path, + num_workers, + num_threads, + num_threads_reduce): + total_num_samples = len(dataset) + sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1) + logger.info( + f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes." + ) + for m_idx in range(len(metric_names)): + metric_name, metric_type = metric_names[m_idx], metric_types[m_idx] + if metric_type == 'single_value_per_sample': + metric_save_path = f"{save_path}/{metric_name}/" + sample_to_metric_count = 0 + unique_metric_values = set([]) + manager = Manager() + return_dict = manager.dict() + p = [] + for t_idx_reduce in range(num_threads_reduce): + p.append( + Process(target=self.merge_gather_map_stats, + args=( + num_workers, + num_threads, + num_threads_reduce, + t_idx_reduce, + metric_save_path, + metric_name, + return_dict, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + for t_idx_reduce in range(num_threads_reduce): + results = return_dict[t_idx_reduce] + for res in results: + unique_metric_values = unique_metric_values.union(set(res[0])) + sample_to_metric_count += res[1] + value_max = max(unique_metric_values) + value_min = min(unique_metric_values) + assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully." + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + logger.info( + f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values." + ) + + # sample_to_metric + map_worker_thread = [] + for w_idx in range(num_workers): + for t_idx in range(num_threads): + map_worker_thread.append([w_idx, t_idx]) + thread_splits = split_index(0, + len(map_worker_thread), + num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_sample_to_metric, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + metric_value_dtype, + map_worker_thread[start_idx:end_idx], + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + + sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" + sample_to_metric_builder = create_mmap_dataset_builder( + sample_to_metric_fname, + metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_fname}") + sample_to_metric_builder.merge_file_(chunk_fname) + close_mmap_dataset_builder(sample_to_metric_builder, + sample_to_metric_fname) + sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, + skip_warmup=True) + assert len(sample_to_metric) == total_num_samples + + # metric_to_sample + unique_metric_values = list(sorted(unique_metric_values)) + thread_splits = split_index(0, + len(unique_metric_values), + num_threads_reduce) + p = [] + for t_idx_reduce in range(num_threads_reduce): + start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] + p.append( + Process(target=self.merge_metric_to_sample, + args=( + t_idx_reduce, + metric_save_path, + metric_name, + sample_idx_dtype, + metric_value_dtype, + unique_metric_values[start_idx:end_idx], + num_workers, + num_threads, + ))) + p[t_idx_reduce].start() + for t_idx_reduce in range(num_threads_reduce): + p[t_idx_reduce].join() + index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" + index_to_sample_builder = create_mmap_dataset_builder( + index_to_sample_fname, + sample_idx_dtype) + index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" + index_to_metric_builder = create_mmap_dataset_builder( + index_to_metric_fname, + metric_value_dtype) + for t_idx_reduce in range(num_threads_reduce): + chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_is_fname}") + index_to_sample_builder.merge_file_(chunk_is_fname) + chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" + logger.info(f"Merging file {chunk_im_fname}") + index_to_metric_builder.merge_file_(chunk_im_fname) + close_mmap_dataset_builder(index_to_sample_builder, + index_to_sample_fname) + close_mmap_dataset_builder(index_to_metric_builder, + index_to_metric_fname) + num_sample_per_value = {} + index_to_sample = MMapIndexedDataset(index_to_sample_fname, + skip_warmup=True) + index_to_metric = MMapIndexedDataset(index_to_metric_fname, + skip_warmup=True) + index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged" + index_to_sample_merged_builder = create_mmap_dataset_builder( + index_to_sample_merged_fname, + sample_idx_dtype) + for v_idx in range(len(index_to_sample)): + if v_idx > 0: + assert index_to_metric[v_idx] > index_to_metric[v_idx - 1] + num_sample_per_value[index_to_metric[v_idx][0]] = len( + index_to_sample[v_idx]) + assert sum(num_sample_per_value.values()) == total_num_samples + merge_step = len(index_to_sample) // 100 + for v_idx in range(0, len(index_to_sample), merge_step): + merged_samples = np.copy( + np.concatenate( + index_to_sample[v_idx:min(len(index_to_sample), + (v_idx + merge_step))], + axis=None)) + index_to_sample_merged_builder.add_item( + torch.tensor(merged_samples.astype(np.int64), + dtype=torch.long)) + logger.info( + f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}." + ) + close_mmap_dataset_builder(index_to_sample_merged_builder, + index_to_sample_merged_fname) + self.get_metric_value_percentiles(metric_name, + num_sample_per_value, + total_num_samples) + elif metric_type == 'accumulate_value_over_samples': + metric_save_path = f"{save_path}/{metric_name}/" + metric_value = None + for w_idx in range(num_workers): + for t_idx in range(num_threads): + w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" + w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value" + w_metric_value = MMapIndexedDataset(w_metric_value_fname, + skip_warmup=True) + if metric_value is None: + metric_value = np.copy(w_metric_value[0]) + else: + metric_value += np.copy(w_metric_value[0]) + value_max = int(max(metric_value)) + value_min = int(min(metric_value)) + metric_value_dtype = find_fit_int_dtype(value_min, value_max) + metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" + metric_value_builder = create_mmap_dataset_builder( + metric_value_fname, + metric_value_dtype) + metric_value_builder.add_item( + torch.tensor(metric_value.astype(np.int64), + dtype=torch.long)) + close_mmap_dataset_builder(metric_value_builder, metric_value_fname) + + def run_reduce(self): + if self.custom_reduce is None: + self.merge_map_results(self.dataset, + self.metric_names, + self.metric_types, + self.save_path, + self.num_workers, + self.num_threads, + self.num_threads_reduce) + else: + self.custom_reduce(self.dataset, + self.metric_names, + self.metric_types, + self.save_path, + self.num_workers, + self.num_threads, + self.num_threads_reduce) diff --git a/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py b/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py new file mode 100644 index 0000000..24dbcfa --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_sampling/data_sampler.py @@ -0,0 +1,390 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py +''' +# coding=utf-8 +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import os +import numpy as np + +import deepspeed.comm as dist +from deepspeed.utils import logger +from deepspeed.accelerator import get_accelerator +from ..constants import * +from ..curriculum_scheduler import CurriculumScheduler +from .indexed_dataset import MMapIndexedDataset +from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype + + +class DeepSpeedDataSampler(object): + def __init__(self, + data_efficiency_config, + one_epoch_total_samples, + micro_batch_size, + data_parallel_rank, + data_parallel_size, + data_parallel_group, + gradient_accumulation_steps, + global_rank, + drop_last=True): + # Keep a copy of input params for later use. + self.data_efficiency_config = data_efficiency_config + self.one_epoch_total_samples = one_epoch_total_samples + self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples) + self.total_samples = one_epoch_total_samples * self.data_efficiency_config[ + DATA_SAMPLING][DATA_SAMPLING_NUM_EPOCHS] + self.micro_batch_size = micro_batch_size + self.data_parallel_rank = data_parallel_rank + self.data_parallel_group = data_parallel_group + self.micro_batch_times_data_parallel_size = \ + self.micro_batch_size * data_parallel_size + self.gradient_accumulation_steps = gradient_accumulation_steps + self.global_batch_size = self.micro_batch_times_data_parallel_size * \ + self.gradient_accumulation_steps + self.global_rank = global_rank + self.drop_last = drop_last + self.np_rng = np.random.default_rng( + self.data_efficiency_config[DATA_EFFICIENCY_SEED]) + self.state = {} + self.batch = [] + self.consumed_samples = 0 + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step = 0 + self.current_difficulties = {} + self.data_cluster_paths = [] + self.data_cluster_current_position = [] + self.curriculum_schedulers = {} + self.curriculum_index_to_sample = {} + self.curriculum_index_to_metric = {} + self.difficulty_type = {} + self.clustering_type = {} + self.data_1epoch_size = None + if self.global_rank == 0: + self.data_clusters = [] + self.data_cluster_sizes = [] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][ + CURRICULUM_LEARNING][CURRICULUM_LEARNING_CLUSTER_PATH] + if not os.path.exists(cluster_path): + os.makedirs(cluster_path) + for metric in self.data_efficiency_config[DATA_SAMPLING][ + CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]: + self.curriculum_schedulers[metric] = CurriculumScheduler( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING] + [CURRICULUM_LEARNING_METRICS][metric]) + self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][ + CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric][ + CURRICULUM_LEARNING_DIFFICULTY_TYPE] + self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][ + CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric][ + CURRICULUM_LEARNING_CLUSTERING_TYPE] + if self.global_rank == 0: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + self.curriculum_index_to_sample[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING] + [CURRICULUM_LEARNING_METRICS][metric] + [CURRICULUM_LEARNING_SAMPLE_PATH], + skip_warmup=True) + if self.difficulty_type[ + metric] == CURRICULUM_LEARNING_VALUE_BASED: + self.curriculum_index_to_metric[metric] = MMapIndexedDataset( + data_efficiency_config[DATA_SAMPLING] + [CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] + [metric][CURRICULUM_LEARNING_METRIC_PATH], + skip_warmup=True) + + # Sanity checks. + assert self.total_samples > 0, \ + 'no sample to consume: {}'.format(self.total_samples) + assert self.micro_batch_size > 0 + assert data_parallel_size > 0 + assert self.data_parallel_rank < data_parallel_size, \ + 'data_parallel_rank should be smaller than data size: {}, ' \ + '{}'.format(self.data_parallel_rank, data_parallel_size) + + def __len__(self): + return self.total_samples + + def set_custom_curriculum_learning_schedule(self, schedule_func_dict): + for metric in self.curriculum_schedulers: + if metric in schedule_func_dict: + self.curriculum_schedulers[metric].set_custom_get_difficulty( + schedule_func_dict[metric]) + + def get_start_end_idx(self): + start_idx = self.data_parallel_rank * self.micro_batch_size + end_idx = start_idx + self.micro_batch_size + return start_idx, end_idx + + def get_sample_based_on_metric_value(self, metric, value_start, value_end): + new_samples = None + for row in range(len(self.curriculum_index_to_sample[metric])): + if self.curriculum_index_to_metric[metric][ + row] <= value_end and self.curriculum_index_to_metric[metric][ + row] > value_start: + row_samples = np.copy(self.curriculum_index_to_sample[metric][row]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, + row_samples), + axis=None) + return new_samples + + def get_sample_based_on_metric_percentile(self, + metric, + percentile_start, + percentile_end): + new_samples = None + if self.data_1epoch_size is None: + self.data_1epoch_size = sum( + len(x) for x in self.curriculum_index_to_sample[metric]) + max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_MAX_DIFFICULTY] + sample_per_percentile = self.data_1epoch_size // max_percentile + start_count = sample_per_percentile * percentile_start + end_count = sample_per_percentile * percentile_end + if percentile_end == max_percentile: + end_count = self.data_1epoch_size + current_count = 0 + for row in range(len(self.curriculum_index_to_sample[metric])): + row_size = len(self.curriculum_index_to_sample[metric][row]) + if current_count + row_size > start_count: + row_start = max(0, start_count - current_count) + if current_count + row_size <= end_count: + row_end = row_size + else: + row_end = end_count - current_count + row_samples = np.copy( + self.curriculum_index_to_sample[metric][row][row_start:row_end]) + new_samples = row_samples if new_samples is None else np.concatenate( + (new_samples, + row_samples), + axis=None) + current_count += row_size + if current_count >= end_count: + break + return new_samples + + def get_new_cluster(self, previous_difficulties): + cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX + for metric in self.curriculum_schedulers: + cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}" + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + if self.global_rank == 0: + new_cluster = None + need_clustering = 0 + for metric in self.clustering_type: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + need_clustering += 1 + if need_clustering > 1: + for metric in self.curriculum_schedulers: + if self.clustering_type[ + metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER: + metric_cluster = np.arange(start=0, + stop=self.one_epoch_total_samples, + step=1, + dtype=self.index_dtype) + else: + if self.difficulty_type[ + metric] == CURRICULUM_LEARNING_VALUE_BASED: + metric_cluster = self.get_sample_based_on_metric_value( + metric, + float('-inf'), + self.current_difficulties[metric]) + elif self.difficulty_type[ + metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + metric_cluster = self.get_sample_based_on_metric_percentile( + metric, + 0, + self.current_difficulties[metric]) + new_cluster = metric_cluster if new_cluster is None else \ + np.intersect1d(new_cluster, metric_cluster, assume_unique=True) + for cluster in self.data_clusters: + new_cluster = np.setdiff1d(new_cluster, + cluster[0], + assume_unique=True) + else: + if len(self.data_clusters) == 0: + new_cluster = np.arange(start=0, + stop=self.one_epoch_total_samples, + step=1, + dtype=self.index_dtype) + for metric in self.curriculum_schedulers: + if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: + if self.difficulty_type[ + metric] == CURRICULUM_LEARNING_VALUE_BASED: + new_cluster = self.get_sample_based_on_metric_value( + metric, + previous_difficulties[metric], + self.current_difficulties[metric]) + elif self.difficulty_type[ + metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + new_cluster = self.get_sample_based_on_metric_percentile( + metric, + previous_difficulties[metric], + self.current_difficulties[metric]) + if new_cluster is not None and len(new_cluster) > 0: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated." + ) + self.np_rng.shuffle(new_cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, + self.index_dtype) + cluster_builder.add_item_numpy(new_cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters.append( + MMapIndexedDataset(cluster_path, + skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) + else: + logger.info( + f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped." + ) + dist.barrier(group=self.data_parallel_group) + if os.path.isfile(f"{cluster_path}.bin"): + self.data_cluster_paths.append(cluster_fname) + self.data_cluster_current_position.append(0) + + def sample_from_clusters(self): + num_clusters = len(self.data_clusters) + weight_sum = sum(self.data_cluster_sizes) + weights = [x / weight_sum for x in self.data_cluster_sizes] + samples = self.np_rng.choice(num_clusters, + self.global_batch_size, + replace=True, + p=weights) + samples = np.bincount(samples, minlength=num_clusters) + return samples + + def reshuffle_clusters(self, cidx): + cluster_fname = self.data_cluster_paths[cidx] + cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_CLUSTER_PATH] + cluster_path = f"{cluster_path}/{cluster_fname}" + cluster = np.copy(self.data_clusters[cidx][0]) + self.np_rng.shuffle(cluster) + cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) + cluster_builder.add_item_numpy(cluster) + close_mmap_dataset_builder(cluster_builder, cluster_path) + self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True) + + def get_sample_from_cluster(self, cidx, num_samples): + start_idx = self.data_cluster_current_position[cidx] + samples = list( + np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)])) + self.data_cluster_current_position[cidx] += num_samples + if len(samples) < num_samples: + num_samples_remained = num_samples - len(samples) + logger.info(f"reshuffling cluster {cidx}.") + self.reshuffle_clusters(cidx) + samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained])) + self.data_cluster_current_position[cidx] = num_samples_remained + return samples + + def get_next_global_batch(self): + if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_ENABLED]: + self.curriculum_step += 1 + new_cluster = False + previous_difficulties = {} + for metric in self.curriculum_schedulers: + next_difficulty = self.curriculum_schedulers[metric].update_difficulty( + self.curriculum_step) + if metric not in self.current_difficulties or \ + next_difficulty != self.current_difficulties[metric]: + new_cluster = True + if metric in self.current_difficulties: + previous_difficulties[metric] = self.current_difficulties[metric] + else: + if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: + previous_difficulties[metric] = float('-inf') + elif self.difficulty_type[ + metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: + previous_difficulties[metric] = 0 + self.current_difficulties[metric] = next_difficulty + if new_cluster: + self.get_new_cluster(previous_difficulties) + if self.global_rank == 0: + samples_per_cluster = self.sample_from_clusters() + batch = [] + for cidx in range(len(samples_per_cluster)): + batch += self.get_sample_from_cluster(cidx, + samples_per_cluster[cidx]) + self.np_rng.shuffle(batch) + batch = torch.tensor(batch, + device=get_accelerator().current_device_name(), + dtype=torch.long).view(-1) + else: + batch = torch.empty(self.global_batch_size, + device=get_accelerator().current_device_name(), + dtype=torch.long) + dist.broadcast(batch, 0, group=self.data_parallel_group) + self.batch = batch.tolist() + + def __iter__(self): + while self.consumed_samples <= self.total_samples: + if len(self.batch) == 0: + self.get_next_global_batch() + current_batch = self.batch[:self.micro_batch_times_data_parallel_size] + self.batch = self.batch[self.micro_batch_times_data_parallel_size:] + if len(current_batch) == self.micro_batch_times_data_parallel_size or \ + (len(current_batch) > 0 and not self.drop_last): + start_idx, end_idx = self.get_start_end_idx() + yield current_batch[start_idx:end_idx] + self.consumed_samples += len(current_batch) + current_batch = [] + + def state_dict(self): + return { + CURRICULUM_LEARNING_BATCH: self.batch, + CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples, + CURRICULUM_LEARNING_STEP: self.curriculum_step, + CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties, + CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths, + CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: + self.data_cluster_current_position, + CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state() + } + + def load_state_dict(self, state_dict): + self.batch = state_dict[CURRICULUM_LEARNING_BATCH] + self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES] + self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP] + self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES] + self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS] + self.data_cluster_current_position = state_dict[ + CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION] + np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE]) + cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][ + CURRICULUM_LEARNING][CURRICULUM_LEARNING_CLUSTER_PATH] + # Backward compatibility: previously data_cluster_paths were stored as + # absolute paths. Now we changed it to just the file name so that even + # if user moved the cluster files, the checkpoint loading still works + # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH + # in deepspeed json config. + for idx in range(len(self.data_cluster_paths)): + if '/' in self.data_cluster_paths[idx]: + self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split( + '/')[-1] + if self.global_rank == 0: + for cluster_fname in self.data_cluster_paths: + cluster_path = f"{cluster_root_path}/{cluster_fname}" + self.data_clusters.append( + MMapIndexedDataset(cluster_path, + skip_warmup=True)) + self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) diff --git a/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py b/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py new file mode 100644 index 0000000..b2a3166 --- /dev/null +++ b/deepspeed/runtime/data_pipeline/data_sampling/indexed_dataset.py @@ -0,0 +1,645 @@ +''' +Copyright 2022 The Microsoft DeepSpeed Team +Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py +''' + +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +# copied from fairseq/fairseq/data/indexed_dataset.py +# Removed IndexedRawTextDataset since it relied on Fairseq dictionary +# other slight modifications to remove fairseq dependencies +# Added document index to index file and made it accessible. +# An empty sentence no longer separates documents. + +# Some of the fixes/improvements are adopted from +# https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py + +from functools import lru_cache +import os +import shutil +import struct +from itertools import accumulate + +import numpy as np +import torch + + +def __best_fitting_dtype(vocab_size=None): + if vocab_size is not None and vocab_size < 65500: + return np.uint16 + else: + return np.int32 + + +def get_available_dataset_impl(): + return ['lazy', 'cached', 'mmap'] + + +def infer_dataset_impl(path): + if IndexedDataset.exists(path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + if magic == IndexedDataset._HDR_MAGIC: + return 'cached' + elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: + return 'mmap' + else: + return None + else: + print(f"Dataset does not exist: {path}") + print( + "Path should be a basename that both .idx and .bin can be appended to get full filenames." + ) + return None + + +def make_builder(out_file, impl, vocab_size=None): + if impl == 'mmap': + return MMapIndexedDatasetBuilder(out_file, + dtype=__best_fitting_dtype(vocab_size)) + else: + return IndexedDatasetBuilder(out_file) + + +def make_dataset(path, impl, skip_warmup=False): + if not IndexedDataset.exists(path): + print(f"Dataset does not exist: {path}") + print( + "Path should be a basename that both .idx and .bin can be appended to get full filenames." + ) + return None + if impl == 'infer': + impl = infer_dataset_impl(path) + if impl == 'lazy' and IndexedDataset.exists(path): + return IndexedDataset(path) + elif impl == 'cached' and IndexedDataset.exists(path): + return IndexedCachedDataset(path) + elif impl == 'mmap' and MMapIndexedDataset.exists(path): + return MMapIndexedDataset(path, skip_warmup) + print(f"Unknown dataset implementation: {impl}") + return None + + +def dataset_exists(path, impl): + if impl == 'mmap': + return MMapIndexedDataset.exists(path) + else: + return IndexedDataset.exists(path) + + +def read_longs(f, n): + a = np.empty(n, dtype=np.int64) + f.readinto(a) + return a + + +def write_longs(f, a): + f.write(np.array(a, dtype=np.int64)) + + +dtypes = { + 1: np.uint8, + 2: np.int8, + 3: np.int16, + 4: np.int32, + 5: np.int64, + 6: np.float64, + 7: np.double, + 8: np.uint16, + 9: np.uint32, + 10: np.uint64 +} + + +def code(dtype): + for k in dtypes.keys(): + if dtypes[k] == dtype: + return k + raise ValueError(dtype) + + +def index_file_path(prefix_path): + return prefix_path + '.idx' + + +def data_file_path(prefix_path): + return prefix_path + '.bin' + + +def create_doc_idx(sizes): + doc_idx = [0] + for i, s in enumerate(sizes): + if s == 0: + doc_idx.append(i + 1) + return doc_idx + + +class IndexedDataset(torch.utils.data.Dataset): + """Loader for IndexedDataset""" + _HDR_MAGIC = b'TNTIDX\x00\x00' + + def __init__(self, path): + super().__init__() + self.path = path + self.data_file = None + self.read_index(path) + + def read_index(self, path): + with open(index_file_path(path), 'rb') as f: + magic = f.read(8) + assert magic == self._HDR_MAGIC, ( + 'Index file doesn\'t match expected format. ' + 'Make sure that --dataset-impl is configured properly.' + ) + version = f.read(8) + assert struct.unpack('= self._len: + raise IndexError('index out of range') + + def __del__(self): + if self.data_file: + self.data_file.close() + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if not self.data_file: + self.read_data(self.path) + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + return a + elif isinstance(idx, slice): + start, stop, step = idx.indices(len(self)) + if step != 1: + raise ValueError("Slices into indexed_dataset must be contiguous") + sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]] + size = sum(sizes) + a = np.empty(size, dtype=self.dtype) + self.data_file.seek(self.data_offsets[start] * self.element_size) + self.data_file.readinto(a) + offsets = list(accumulate(sizes)) + sents = np.split(a, offsets[:-1]) + return sents + + def __len__(self): + return self._len + + def num_tokens(self, index): + return self.sizes[index] + + def size(self, index): + return self.sizes[index] + + @staticmethod + def exists(path): + return (os.path.exists(index_file_path(path)) + and os.path.exists(data_file_path(path))) + + @property + def supports_prefetch(self): + return False # avoid prefetching to save memory + + +class IndexedCachedDataset(IndexedDataset): + def __init__(self, path): + super().__init__(path) + self.cache = None + self.cache_index = {} + + @property + def supports_prefetch(self): + return True + + def prefetch(self, indices): + if all(i in self.cache_index for i in indices): + return + if not self.data_file: + self.read_data(self.path) + indices = sorted(set(indices)) + total_size = 0 + for i in indices: + total_size += self.data_offsets[i + 1] - self.data_offsets[i] + self.cache = np.empty(total_size, dtype=self.dtype) + ptx = 0 + self.cache_index.clear() + for i in indices: + self.cache_index[i] = ptx + size = self.data_offsets[i + 1] - self.data_offsets[i] + a = self.cache[ptx:ptx + size] + self.data_file.seek(self.data_offsets[i] * self.element_size) + self.data_file.readinto(a) + ptx += size + if self.data_file: + # close and delete data file after prefetch so we can pickle + self.data_file.close() + self.data_file = None + + # @lru_cache(maxsize=8) + def __getitem__(self, idx): + if isinstance(idx, int): + i = idx + self.check_index(i) + tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] + a = np.empty(tensor_size, dtype=self.dtype) + ptx = self.cache_index[i] + np.copyto(a, self.cache[ptx:ptx + a.size]) + return a + elif isinstance(idx, slice): + # Hack just to make this work, can optimizer later if necessary + sents = [] + for i in range(*idx.indices(len(self))): + sents.append(self[i]) + return sents + + +class IndexedDatasetBuilder(object): + element_sizes = { + np.uint8: 1, + np.int8: 1, + np.int16: 2, + np.int32: 4, + np.int64: 8, + np.float64: 4, + np.double: 8 + } + + def __init__(self, out_file, dtype=np.int32): + self.out_file = open(out_file, 'wb') + self.dtype = dtype + self.data_offsets = [0] + self.dim_offsets = [0] + self.sizes = [] + self.element_size = self.element_sizes[self.dtype] + self.doc_idx = [0] + + def add_item(self, tensor): + bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) + self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) + for s in tensor.size(): + self.sizes.append(s) + self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) + + def end_document(self): + self.doc_idx.append(len(self.sizes)) + + def merge_file_(self, another_file): + index = IndexedDataset(another_file) + assert index.dtype == self.dtype + + doc_offset = len(self.sizes) + + begin = self.data_offsets[-1] + for data_offset in index.data_offsets[1:]: + self.data_offsets.append(begin + data_offset) + self.sizes.extend(index.sizes) + begin = self.dim_offsets[-1] + for dim_offset in index.dim_offsets[1:]: + self.dim_offsets.append(begin + dim_offset) + self.doc_idx.extend((doc_offset + index.doc_idx)[1:]) + + with open(data_file_path(another_file), 'rb') as f: + while True: + data = f.read(1024) + if data: + self.out_file.write(data) + else: + break + + def finalize(self, index_file): + self.out_file.close() + index = open(index_file, 'wb') + index.write(b'TNTIDX\x00\x00') + index.write(struct.pack(' [0, 10, 30, 35] + if arr.size > 1: + arr[1:] = arr[:-1] + if arr.size > 0: + arr[0] = 0 + + +def get_pointers_with_total(sizes, elemsize, dtype): + """Return a numpy array of type np.dtype giving the byte offsets. + + Multiplies values in the sizes array by elemsize (bytes), + and then computes an exclusive scan to get byte offsets. + Returns the total number of bytes as second item in a tuple. + """ + + # scale values in sizes array by elemsize to get sizes in bytes + pointers = np.array(sizes, dtype=dtype) + pointers *= elemsize + np.cumsum(pointers, axis=0, out=pointers) + + # get total number of bytes from all sizes (last element) + bytes_last = pointers[-1] if len(sizes) > 0 else 0 + + # convert to byte offsets + exscan_from_cumsum_(pointers) + + return pointers, bytes_last + + +class MMapIndexedDataset(torch.utils.data.Dataset): + class Index(object): + _HDR_MAGIC = b'MMIDIDX\x00\x00' + + @classmethod + def writer(cls, path, dtype): + class _Writer(object): + def __enter__(self): + self._file = open(path, 'wb') + + self._file.write(cls._HDR_MAGIC) + self._file.write(struct.pack('= 0: + if max_value <= 255: + return np.uint8 + elif max_value <= 65535: + return np.uint16 + elif max_value <= 4294967295: + return np.uint32 + else: + return np.uint64 + else: + if max_value <= 127 and min_value >= -128: + return np.int8 + elif max_value <= 32767 and min_value >= -32768: + return np.int16 + elif max_value <= 2147483647 and min_value >= -2147483648: + return np.int32 + else: + return np.int64 + + +def split_index(start_idx, end_idx, num_partitions): + partition_size = math.ceil((end_idx - start_idx) / num_partitions) + partitions = [[ + start_idx + x * partition_size, + min(end_idx, + start_idx + (x + 1) * partition_size) + ] for x in range(num_partitions)] + return partitions + + +def split_dataset(dataset, num_workers, worker_id, num_threads): + worker_splits = split_index(0, len(dataset), num_workers) + thread_splits = split_index(worker_splits[worker_id][0], + worker_splits[worker_id][1], + num_threads) + return worker_splits, thread_splits + + +def create_mmap_dataset_builder(fname, dtype): + logger.info(f"Creating mmap dataset builder at {fname}.") + return MMapIndexedDatasetBuilder(f"{fname}.bin", dtype=dtype) + + +def close_mmap_dataset_builder(builder, fname): + builder.end_document() + builder.finalize(f"{fname}.idx") + logger.info(f"Finalized mmap dataset builder at {fname}.") diff --git a/deepspeed/runtime/dataloader.py b/deepspeed/runtime/dataloader.py index acd21d9..3734eed 100644 --- a/deepspeed/runtime/dataloader.py +++ b/deepspeed/runtime/dataloader.py @@ -2,9 +2,15 @@ Copyright 2019 The Microsoft DeepSpeed Team ''' -import torch from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler +from deepspeed.accelerator import get_accelerator + +from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler +from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \ + DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS +from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \ + DATA_PARALLEL_GROUP, GLOBAL_RANK class RepeatingLoader: @@ -42,24 +48,46 @@ class DeepSpeedDataLoader(object): data_sampler=None, data_parallel_world_size=None, data_parallel_rank=None, - dataloader_drop_last=False): + dataloader_drop_last=False, + deepspeed_dataloader_config={}): + self.deepspeed_dataloader_config = deepspeed_dataloader_config self.tput_timer = tput_timer self.batch_size = batch_size - - if local_rank >= 0: - if data_sampler is None: - data_sampler = DistributedSampler(dataset=dataset, - num_replicas=data_parallel_world_size, - rank=data_parallel_rank) - device_count = 1 + self.curriculum_learning_enabled = False + if CURRICULUM_LEARNING in deepspeed_dataloader_config: + self.curriculum_learning_enabled = deepspeed_dataloader_config[ + CURRICULUM_LEARNING] + + if self.curriculum_learning_enabled: + data_sampler = DeepSpeedDataSampler( + self.deepspeed_dataloader_config[DATA_EFFICIENCY], + len(dataset), + self.batch_size, + data_parallel_rank, + data_parallel_world_size, + self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP], + self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS], + self.deepspeed_dataloader_config[GLOBAL_RANK], + drop_last=dataloader_drop_last) + device_count = get_accelerator().device_count() + num_local_io_workers = self.deepspeed_dataloader_config[ + DATA_SAMPLING_NUM_WORKERS] else: - if data_sampler is None: - data_sampler = RandomSampler(dataset) - device_count = torch.cuda.device_count() - batch_size *= device_count - - if num_local_io_workers is None: - num_local_io_workers = 2 * device_count + if local_rank >= 0: + if data_sampler is None: + data_sampler = DistributedSampler( + dataset=dataset, + num_replicas=data_parallel_world_size, + rank=data_parallel_rank) + device_count = 1 + else: + if data_sampler is None: + data_sampler = RandomSampler(dataset) + device_count = get_accelerator().device_count() + batch_size *= device_count + + if num_local_io_workers is None: + num_local_io_workers = 2 * device_count self.num_local_io_workers = num_local_io_workers self.data_sampler = data_sampler @@ -68,9 +96,15 @@ class DeepSpeedDataLoader(object): self.device_count = device_count self.batch_size = batch_size self.pin_memory = pin_memory - self.len = len(self.data_sampler) self.data = None self.dataloader_drop_last = dataloader_drop_last + self.post_process_func = None + + if self.dataloader_drop_last: + self.len = len(self.data_sampler) // self.batch_size + else: + from math import ceil + self.len = ceil(len(self.data_sampler) / self.batch_size) def __iter__(self): self._create_dataloader() @@ -82,27 +116,48 @@ class DeepSpeedDataLoader(object): def __next__(self): if self.tput_timer: self.tput_timer.start() - return next(self.data) + if self.curriculum_learning_enabled: + data = next(self.data_iterator) + if self.post_process_func is not None: + data = self.post_process_func(data, self.data_sampler.state_dict()) + return data + else: + return next(self.data) def _create_dataloader(self): - if self.collate_fn is None: - self.dataloader = DataLoader(self.dataset, - batch_size=self.batch_size, - pin_memory=self.pin_memory, - sampler=self.data_sampler, - num_workers=self.num_local_io_workers, - drop_last=self.dataloader_drop_last) + if self.curriculum_learning_enabled: + if self.collate_fn is None: + self.dataloader = DataLoader(self.dataset, + pin_memory=self.pin_memory, + batch_sampler=self.data_sampler, + num_workers=self.num_local_io_workers) + else: + self.dataloader = DataLoader(self.dataset, + pin_memory=self.pin_memory, + batch_sampler=self.data_sampler, + collate_fn=self.collate_fn, + num_workers=self.num_local_io_workers) + self.data_iterator = iter(self.dataloader) + return self.dataloader else: - self.dataloader = DataLoader(self.dataset, - batch_size=self.batch_size, - pin_memory=self.pin_memory, - sampler=self.data_sampler, - collate_fn=self.collate_fn, - num_workers=self.num_local_io_workers, - drop_last=self.dataloader_drop_last) - self.data = (x for x in self.dataloader) - - return self.dataloader + if self.collate_fn is None: + self.dataloader = DataLoader(self.dataset, + batch_size=self.batch_size, + pin_memory=self.pin_memory, + sampler=self.data_sampler, + num_workers=self.num_local_io_workers, + drop_last=self.dataloader_drop_last) + else: + self.dataloader = DataLoader(self.dataset, + batch_size=self.batch_size, + pin_memory=self.pin_memory, + sampler=self.data_sampler, + collate_fn=self.collate_fn, + num_workers=self.num_local_io_workers, + drop_last=self.dataloader_drop_last) + self.data = (x for x in self.dataloader) + + return self.dataloader # DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2)) diff --git a/deepspeed/runtime/eigenvalue.py b/deepspeed/runtime/eigenvalue.py old mode 100644 new mode 100755 index 490899b..618ac00 --- a/deepspeed/runtime/eigenvalue.py +++ b/deepspeed/runtime/eigenvalue.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch from deepspeed.utils import log_dist import numpy as np diff --git a/deepspeed/runtime/engine.py b/deepspeed/runtime/engine.py index 1567bd5..08cb1fd 100644 --- a/deepspeed/runtime/engine.py +++ b/deepspeed/runtime/engine.py @@ -5,12 +5,9 @@ Copyright 2019 The Microsoft DeepSpeed Team import os import re import stat -import math import torch -import warnings import hashlib -import torch.distributed as dist -from collections import defaultdict, OrderedDict +from collections import defaultdict, OrderedDict, deque from shutil import copyfile from torch.nn.modules import Module @@ -18,17 +15,18 @@ from torch.nn.parameter import Parameter from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler -from typing import Callable, Dict, Optional, Union, Iterable +from typing import Callable, Dict, Union, Iterable import deepspeed -from deepspeed.runtime.utils import see_memory_usage, get_ma_status, DummyOptim +from deepspeed.runtime.utils import see_memory_usage, DummyOptim +from .zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, ZeRORuntimeException -from deepspeed.runtime.activation_checkpointing import ( - checkpointing as activation_checkpointing, -) +from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload +from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION + from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer @@ -40,27 +38,48 @@ from deepspeed.runtime.config import DeepSpeedConfig, DEEPSPEED_OPTIMIZERS, \ from deepspeed.runtime.dataloader import DeepSpeedDataLoader from deepspeed.runtime.constants import \ ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \ - PLD_THETA, PLD_GAMMA, BFLOAT16, FP16 -from deepspeed.runtime.zero.constants import \ - ZERO_OPTIMIZATION_OPTIMIZER_STATES, ZERO_OPTIMIZATION_GRADIENTS, ZERO_OPTIMIZATION_WEIGHTS + PLD_THETA, PLD_GAMMA, BFLOAT16, FP16, AMP, GRADIENT_ACCUMULATION_STEPS, \ + DATA_PARALLEL_GROUP, GLOBAL_RANK +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.compression import compression_scheduler +from deepspeed.compression.constants import \ + WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, \ + WEIGHT_QUANTIZATION, SHARED_PARAMETERS, \ + WEIGHT_QUANTIZE_ENABLED, \ + WEIGHT_QUANTIZE_GROUPS, \ + WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE, \ + WEIGHT_QUANTIZE_CHANGE_RATIO, \ + WEIGHT_QUANTIZE_TYPE, \ + WEIGHT_QUANTIZE_ROUNDING, \ + WEIGHT_QUANTIZE_VERBOSE, \ + WEIGHT_QUANTIZE_KERNEL from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT from deepspeed.runtime.sparse_tensor import SparseTensor -import deepspeed.runtime.lr_schedules as lr_schedules -import deepspeed.utils.groups as groups -from deepspeed.runtime.utils import get_grad_norm -from deepspeed.utils import logger, log_dist, init_distributed, instrument_w_nvtx +from deepspeed.runtime import lr_schedules +from deepspeed.utils import groups +from deepspeed.utils import logger, log_dist, instrument_w_nvtx from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer from deepspeed.utils.debug import debug_extract_module_and_param_names +from deepspeed.monitor.monitor import MonitorMaster from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop from deepspeed.runtime.utils import clip_grad_norm_ from deepspeed.runtime.eigenvalue import Eigenvalue +from deepspeed.runtime.data_pipeline.constants import DATA_SAMPLING, \ + DATA_ROUTING, DATA_SAMPLING_ENABLED, CURRICULUM_LEARNING, \ + CURRICULUM_LEARNING_ENABLED, DATA_SAMPLING_NUM_WORKERS, RANDOM_LTD, \ + RANDOM_LTD_ENABLED, RANDOM_LTD_LAYER_ID, RANDOM_LTD_LAYER_NUM, \ + RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE, RANDOM_LTD_LAYER_TOKEN_LR_ENABLED, \ + RANDOM_LTD_GLOBAL_BATCH_SIZE, RANDOM_LTD_MICRO_BATCH_SIZE, DATA_EFFICIENCY from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler +from deepspeed.runtime.data_pipeline.data_routing.scheduler import RandomLTDScheduler +from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict +from deepspeed.runtime.data_pipeline.data_routing.basic_layer import RandomLayerTokenDrop + +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from .pipe.module import PipelineModule from .utils import ensure_directory_exists, get_ma_status -from ..ops.op_builder import UtilsBuilder -from ..ops.adam import DeepSpeedCPUAdam from ..ops.adam import FusedAdam from ..moe.sharded_moe import TopKGate, MOELayer from ..moe.layer import MoE @@ -68,7 +87,15 @@ from ..moe.utils import is_moe_param from ..git_version_info import version from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler -from deepspeed.utils.logging import print_json_dist +from deepspeed.utils.logging import print_json_dist, print_configuration + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import UtilsBuilder + +from deepspeed.inference.config import DtypeEnum + +# Set to torch's distributed package or deepspeed.comm based inside DeepSpeedEngine init +dist = None MEMORY_OPT_ALLREDUCE_SIZE = 500000000 @@ -87,11 +114,12 @@ except ImportError: def split_half_float_double_sparse(tensors): + device_type = get_accelerator().device_name() supported_types = [ - "torch.cuda.HalfTensor", - "torch.cuda.FloatTensor", - "torch.cuda.DoubleTensor", - "torch.cuda.BFloat16Tensor", + "torch.{}.HalfTensor".format(device_type), + "torch.{}.FloatTensor".format(device_type), + "torch.{}.DoubleTensor".format(device_type), + "torch.{}.BFloat16Tensor".format(device_type), SparseTensor.type() ] @@ -106,13 +134,6 @@ def split_half_float_double_sparse(tensors): return buckets -def print_configuration(args, name): - logger.info("{}:".format(name)) - for arg in sorted(vars(args)): - dots = "." * (29 - len(arg)) - logger.info(" {} {} {}".format(arg, dots, getattr(args, arg))) - - FORWARD_MICRO_TIMER = 'forward_microstep' FORWARD_GLOBAL_TIMER = 'forward' BACKWARD_MICRO_TIMER = 'backward_microstep' @@ -185,7 +206,6 @@ class DeepSpeedEngine(Module): super(DeepSpeedEngine, self).__init__() self.dont_change_device = dont_change_device self.client_optimizer = optimizer - self.client_model_parameters = model_parameters self.client_lr_scheduler = lr_scheduler self.training_data = training_data self.collate_fn = collate_fn @@ -205,14 +225,21 @@ class DeepSpeedEngine(Module): self.eigenvalue = None self.block_eigenvalue = None self.gas_boundary_ctr = 0 - self.dist_backend = "nccl" + self.dist_backend = get_accelerator().communication_backend_name() self.has_moe_layers = False self.num_experts = [] self.gate_modules = [] self.moe_layers = [] self._step_applied = False self._global_grad_norm = None + self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. + + self.checkpoint_engine = None + + global dist + from deepspeed import comm as dist self._is_gradient_accumulation_boundary = None + self.scale_wrt_gas = None # for debug purposes - can then debug print: debug_get_module_name(module) debug_extract_module_and_param_names(model) @@ -224,16 +251,22 @@ class DeepSpeedEngine(Module): if self.config is None and config_params is not None: self.config = config_params - if dist_init_required is None: - dist_init_required = not dist.is_initialized() - - if dist_init_required is False: - assert ( - dist.is_initialized() is True - ), "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()" + from deepspeed.comm import supported_torch_version + # This supported_torch_version check is for torch1.2 compatibility only + if supported_torch_version: + dist.init_distributed(dist_backend=self.dist_backend, + dist_init_required=dist_init_required) else: - # Initialize torch distributed if needed - init_distributed(dist_backend=self.dist_backend) + if dist_init_required is None: + dist_init_required = not dist.is_initialized() + + if dist_init_required is False: + assert ( + dist.is_initialized() is True + ), "Torch distributed not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()" + else: + if not dist.is_initialized(): + dist.init_process_group(backend=self.dist_backend) self._do_args_sanity_check(args) self._configure_with_arguments(args, mpu) @@ -241,14 +274,17 @@ class DeepSpeedEngine(Module): see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown()) if mpu is not None: - assert not self.elasticity_enabled(), ( - "Elasticity is not currently supported" " with model parallelism." - ) + if self.elasticity_enabled(): + if not self.is_elastic_model_parallel_supported(): + assert not self.elasticity_enabled(), ( + "Elasticity is not currently supported" " with model parallelism." + ) self._set_distributed_vars(args) - if self.tensorboard_enabled() and self.global_rank == 0: - self.summary_writer = self.get_summary_writer() + dist.configure(self._config) + + self.monitor = MonitorMaster(self._config.monitor_config) see_memory_usage( f"DeepSpeed Engine: Before configure distributed model", @@ -268,15 +304,13 @@ class DeepSpeedEngine(Module): self.timers = SynchronizedWallClockTimer() # Throughput timer self.tput_timer = ThroughputTimer( - batch_size=self.train_micro_batch_size_per_gpu(), - num_workers=self.dp_world_size, + batch_size=self.train_batch_size(), steps_per_output=self.steps_per_print(), monitor_memory=False, ) - if dist.get_rank() == 0: - logger.info( - f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}") + log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", + ranks=[0]) if self.flops_profiler_enabled(): self.flops_profiler = FlopsProfiler(self.module, self) @@ -290,15 +324,23 @@ class DeepSpeedEngine(Module): self.optimizer = None self.basic_optimizer = None self.lr_scheduler = None - if model_parameters or optimizer: + has_optimizer = False + + if optimizer or self.optimizer_name(): + has_optimizer = True + # If no parameters given by init default to module parameters + if model_parameters is None: + model_parameters = self.module.parameters() + + if has_optimizer: self._configure_optimizer(optimizer, model_parameters) self._configure_lr_scheduler(lr_scheduler) self._report_progress(0) elif self.zero_optimization(): # no optim selected but zero is enabled self.optimizer = self._configure_zero_optimizer(optimizer=None) - - self._get_model_parameters() + elif self.bfloat16_enabled(): + self.optimizer = self._configure_bf16_optimizer(optimizer=None) # Bookkeeping for sparse support self.sparse_tensor_module_names = set() @@ -313,7 +355,8 @@ class DeepSpeedEngine(Module): self.save_non_zero_checkpoint = False self.save_zero_checkpoint = False - self._configure_checkpointing(dist_init_required) + if not isinstance(self.optimizer, DeepSpeedZeRoOffload): + self._configure_checkpointing(dist_init_required) if self.eigenvalue_enabled(): self.eigenvalue = self._configure_eigenvalue() @@ -321,8 +364,17 @@ class DeepSpeedEngine(Module): if self.pld_enabled(): self.progressive_layer_drop = self._configure_progressive_layer_drop() - if self.curriculum_enabled(): - self.curriculum_scheduler = self._configure_curriculum_scheduler() + if self.curriculum_enabled_legacy(): + self.curriculum_scheduler_legacy = self._configure_curriculum_scheduler_legacy( + ) + + if self.random_ltd_enabled(): + random_ltd_config = self.random_ltd_config() + random_ltd_config[RANDOM_LTD_GLOBAL_BATCH_SIZE] = self.train_batch_size() + random_ltd_config[ + RANDOM_LTD_MICRO_BATCH_SIZE] = self.train_micro_batch_size_per_gpu() + self.random_ltd_scheduler = self._configure_random_ltd_scheduler( + random_ltd_config) # Engine timers @@ -341,6 +393,10 @@ class DeepSpeedEngine(Module): self.flatten = util_ops.flatten self.unflatten = util_ops.unflatten + def destroy(self): + if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): + self.optimizer.destroy() + def _get_model_parameters(self): if self.autotuning_profile_model_info(): self.autotuning_model_info = {} @@ -367,7 +423,6 @@ class DeepSpeedEngine(Module): def get_batch_info(self): """Get all training batch related settings. - Returns: train_batch_size (int): The effective training batch size. This is the amount of data samples that leads to one step of model update. @@ -403,10 +458,18 @@ class DeepSpeedEngine(Module): self._config.train_batch_size = train_batch_size self._config.gradient_accumulation_steps = new_gas + def set_data_post_process_func(self, post_process_func): + if self.training_dataloader is not None: + self.training_dataloader.post_process_func = post_process_func + + def set_custom_curriculum_learning_schedule(self, schedule_func_dict): + if self.training_dataloader is not None and self.curriculum_learning_enabled(): + self.training_dataloader.data_sampler.set_custom_curriculum_learning_schedule( + schedule_func_dict) + def get_global_grad_norm(self) -> float: """Return the 2-norm of all gradients. If there is model parallelism, the norm will be global. - The computed norm will be cached and reused until the next step() pass. .. note:: In the presence of model parallelism, this is a collective call @@ -416,6 +479,22 @@ class DeepSpeedEngine(Module): """ return self._global_grad_norm + def __getattr__(self, name): + """ + Pass through attributes defined in the model if they are not overridden by ds-engine. + """ + + _module = {} + if "module" in self.__dict__: + _module = self.__dict__['module'] + if name in dir(self): + return getattr(self, name) + elif name in dir(_module): + return getattr(_module, name) + else: + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'") + def checkpoint_tag_validation_enabled(self): return self._config.checkpoint_tag_validation_enabled @@ -425,6 +504,14 @@ class DeepSpeedEngine(Module): def elasticity_enabled(self): return self._config.elasticity_enabled + def is_elastic_model_parallel_supported(self): + if self.elasticity_enabled(): + # Add code for finding number of GPUs per node automatically + if self._config.num_gpus_per_node % self._config.elastic_model_parallel_size == 0: + return True + else: + return False + def pld_enabled(self): return self._config.pld_enabled @@ -461,59 +548,64 @@ class DeepSpeedEngine(Module): def eigenvalue_layer_num(self): return self._config.eigenvalue_layer_num - def curriculum_enabled(self): - return self._config.curriculum_enabled + def curriculum_enabled_legacy(self): + return self._config.curriculum_enabled_legacy - def curriculum_params(self): - return self._config.curriculum_params + def curriculum_params_legacy(self): + return self._config.curriculum_params_legacy - def tensorboard_enabled(self): - return self._config.tensorboard_enabled + def data_efficiency_enabled(self): + return self._config.data_efficiency_enabled - def tensorboard_output_path(self): - return self._config.tensorboard_output_path + def data_efficiency_config(self): + return self._config.data_efficiency_config - def tensorboard_job_name(self): - return self._config.tensorboard_job_name + def data_sampling_enabled(self): + return self._config.data_efficiency_config[DATA_SAMPLING][DATA_SAMPLING_ENABLED] - def get_summary_writer( - self, - name="DeepSpeedJobName", - base=os.path.join(os.path.expanduser("~"), - "tensorboard"), - ): - if self.tensorboard_output_path(): - base_dir = self.tensorboard_output_path() - job_name = self.tensorboard_job_name() - log_dir = os.path.join(base_dir, job_name) - else: - if self.tensorboard_job_name(): - name = self.tensorboard_job_name() - - # Infrastructure-specific job-id - if "DLWS_JOB_ID" in os.environ: - infra_job_id = os.environ["DLWS_JOB_ID"] - elif "DLTS_JOB_ID" in os.environ: - infra_job_id = os.environ["DLTS_JOB_ID"] - else: - infra_job_id = "unknown-job-id" + def data_sampling_config(self): + return self._config.data_efficiency_config[DATA_SAMPLING] - summary_writer_dir_name = os.path.join(infra_job_id, "logs") - log_dir = os.path.join(base, summary_writer_dir_name, name) + def curriculum_learning_enabled(self): + return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ + CURRICULUM_LEARNING_ENABLED] - os.makedirs(log_dir, exist_ok=True) - try: - # torch.utils.tensorboard will fail if `tensorboard` is not available, - # see their docs for more details: https://pytorch.org/docs/1.8.0/tensorboard.html - import tensorboard - except ImportError: - print( - 'If you want to use tensorboard logging please `pip install tensorboard`' - ) - raise - from torch.utils.tensorboard import SummaryWriter + def curriculum_learning_config(self): + return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING] + + def random_ltd_enabled(self): + return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD][ + RANDOM_LTD_ENABLED] - return SummaryWriter(log_dir=log_dir) + def random_ltd_config(self): + return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD] + + def random_ltd_initialize(self): + assert self.random_ltd_enabled() + random_ltd_config = self.random_ltd_config() + random_ltd_queue = deque( + [x for x in sorted(random_ltd_config[RANDOM_LTD_LAYER_ID])]) + count = 0 + for name, layer in self.module.named_modules(): + if isinstance(layer, RandomLayerTokenDrop): + if len(random_ltd_queue) != 0 and str( + random_ltd_queue[0]) in name: ###[1,2,3] + layer.init_config(random_ltd_config, + self.random_ltd_scheduler, + count) + random_ltd_queue.popleft() + count += 1 + + if random_ltd_config[RANDOM_LTD_LAYER_NUM] != count: + raise ValueError( + f'random_ltd_layer_num {random_ltd_config[RANDOM_LTD_LAYER_NUM]} must be \ + equivalent to the len of random_ltd_layer_id {count}') + + if random_ltd_config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][ + RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: + assert self.client_lr_scheduler is None + raise ValueError(f'not yet support') + #self.lr_scheduler = lr_schedules.WarmupLayerTokenDecayLR(self.optimizer, self.random_ltd_scheduler) def wall_clock_breakdown(self): return self._config.wall_clock_breakdown @@ -601,18 +693,24 @@ class DeepSpeedEngine(Module): def quantize_training(self): return ( - self._config.quantize_training_enabled, - self._config.quantize_target_bits, - self._config.quantize_start_bits, - self._config.quantize_period, - self._config.quantize_offset, - self._config.quantize_groups, - self._config.fp16_mixed_quantize, - self._config.quantize_change_rate, - self._config.quantize_type, - self._config.quantize_rounding, - self._config.quantize_verbose, - self._config.use_quantizer_kernel, + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_IN_FORWARD_ENABLED], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_ENABLED], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_GROUPS], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_CHANGE_RATIO], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_TYPE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_ROUNDING], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_VERBOSE], + self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] + [WEIGHT_QUANTIZE_KERNEL], ) def zero_optimization(self): @@ -633,8 +731,18 @@ class DeepSpeedEngine(Module): def zero_offload_param(self): return self._config.zero_config.offload_param + def zero_use_cpu_optimizer(self): + if self._config.zero_config.offload_optimizer is not None: + return self._config.zero_config.offload_optimizer.device in [ + OffloadDeviceEnum.cpu, + OffloadDeviceEnum.nvme + ] + return False + def zero_cpu_offload(self): - return self._config.zero_config.offload_optimizer is not None + if self._config.zero_config.offload_optimizer is not None: + return self._config.zero_config.offload_optimizer.device == OffloadDeviceEnum.cpu + return False def zero_sub_group_size(self): return self._config.zero_config.sub_group_size @@ -649,10 +757,10 @@ class DeepSpeedEngine(Module): return self._config.zero_config.allgather_bucket_size def zero_optimization_partition_gradients(self): - return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_GRADIENTS + return self.zero_optimization_stage() >= ZeroStageEnum.gradients def zero_optimization_partition_weights(self): - return self.zero_optimization_stage() >= ZERO_OPTIMIZATION_WEIGHTS + return self.zero_optimization_stage() >= ZeroStageEnum.weights def zero_contiguous_gradients(self): return self._config.zero_config.contiguous_gradients @@ -675,6 +783,9 @@ class DeepSpeedEngine(Module): def zero_param_persistence_threshold(self): return self._config.zero_config.param_persistence_threshold + def zero_model_persistence_threshold(self): + return self._config.zero_config.model_persistence_threshold + def zero_gather_16bit_weights_on_model_save(self): return self._config.zero_config.gather_16bit_weights_on_model_save @@ -702,12 +813,21 @@ class DeepSpeedEngine(Module): def amp_params(self): return self._config.amp_params + def fp16_auto_cast(self): + return self._config.fp16_auto_cast + def loss_scale(self): return self._config.loss_scale def gradient_accumulation_steps(self): return self._config.gradient_accumulation_steps + def use_node_local_storage(self): + return self._config.use_node_local_storage + + def load_universal_checkpoint(self): + return self._config.load_universal_checkpoint + @property def communication_data_type(self): res = self._config.communication_data_type @@ -756,39 +876,71 @@ class DeepSpeedEngine(Module): def aio_config(self): return self._config.aio_config + def get_data_types(self): + model_dtype = torch.float32 + if self.fp16_enabled(): + model_dtype = torch.float16 + elif self.bfloat16_enabled(): + model_dtype = torch.bfloat16 + + if self._config.grad_accum_dtype == None: + if model_dtype == torch.bfloat16 and not self.zero_optimization(): + grad_accum_dtype = torch.float32 + else: + grad_accum_dtype = model_dtype + else: + grad_accum_dtype = DtypeEnum(self._config.grad_accum_dtype).value + + return (model_dtype, grad_accum_dtype) + def _configure_lr_scheduler(self, client_lr_scheduler): # First check for scheduler in json configuration lr_scheduler = self._scheduler_from_config(self.optimizer) if lr_scheduler: - if self.global_rank == 0: - logger.info( - f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}") + log_dist( + f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}", + ranks=[0]) self.lr_scheduler = lr_scheduler else: if isinstance(client_lr_scheduler, Callable): - if self.global_rank == 0: - logger.info('DeepSpeed using client callable to create LR scheduler') + log_dist('DeepSpeed using client callable to create LR scheduler', + ranks=[0]) self.lr_scheduler = client_lr_scheduler(self.basic_optimizer) else: - if self.global_rank == 0: - logger.info('DeepSpeed using client LR scheduler') + log_dist('DeepSpeed using client LR scheduler', ranks=[0]) self.lr_scheduler = client_lr_scheduler log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0]) def _configure_checkpointing(self, dist_init_required): + self.checkpoint_engine = TorchCheckpointEngine() + + if self._config is not None and self._config.nebula_config.enabled: + try: + from deepspeed.runtime.checkpoint_engine.nebula_checkpoint_engine import \ + NebulaCheckpointEngine + self.checkpoint_engine = NebulaCheckpointEngine( + config_params=self._config.nebula_config) + except ImportError as err: + logger.error( + f"No torch_nebula was found! Will fall back to torch.save. Details: {err}" + ) + self.checkpoint_engine = TorchCheckpointEngine() dp_rank = self.global_rank if self.mpu: dp_rank = self.mpu.get_data_parallel_rank() + rank = self.local_rank if self.use_node_local_storage() else dp_rank + # only the first data parallel process needs to store the model checkpoint + # if you want to use node local storage this must be done by rank 0 on each + # node self.save_non_zero_checkpoint = ( - dp_rank == 0) or self.zero_optimization_partition_weights() + rank == 0) or self.zero_optimization_partition_weights() if self.zero_optimization() or self.bfloat16_enabled(): - param_rank = torch.distributed.get_rank( - group=self.optimizer.dp_process_group) + param_rank = dist.get_rank(group=self.optimizer.dp_process_group) # Only the first parameter parallel process needs to store the # optimizer state checkpoints for zero @@ -817,14 +969,14 @@ class DeepSpeedEngine(Module): args, 'device_rank') else self.local_rank if device_rank >= 0: - torch.cuda.set_device(device_rank) - self.device = torch.device("cuda", device_rank) + get_accelerator().set_device(device_rank) + self.device = torch.device(get_accelerator().device_name(), device_rank) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 - self.device = torch.device("cuda") + self.device = torch.device(get_accelerator().device_name()) # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): @@ -863,7 +1015,7 @@ class DeepSpeedEngine(Module): args.deepspeed_config = args.deepscale_config assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \ - "variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch.distributed launcher. If using a " \ + "variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch's launcher. If using a " \ "different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed." if hasattr(args, 'local_rank') and args.local_rank != None: @@ -881,21 +1033,30 @@ class DeepSpeedEngine(Module): args, "deepspeed_config") and args.deepspeed_config is not None ), "DeepSpeed requires --deepspeed_config to specify configuration file" - assert os.path.isfile( - args.deepspeed_config - ), "DeepSpeed configuration file: {} is not an existing file".format( - args.deepspeed_config - ) - def _is_supported_optimizer(self, optimizer_name): return (optimizer_name in DEEPSPEED_OPTIMIZERS or getattr(torch.optim, optimizer_name, None) is not None) + def _supported_optims(self): + FairseqOptimizer = None + try: + from fairseq.optim.fairseq_optimizer import FairseqOptimizer + except ImportError: + pass + + expected_optim_types = [Optimizer] + if FairseqOptimizer: + # fairseq optims are not torch.optim objects + expected_optim_types.append(FairseqOptimizer) + return expected_optim_types + # Validate configuration based on command line arguments def _do_sanity_check(self): - assert isinstance(self.client_optimizer, (type(None), Optimizer, Callable)), \ + expected_optim_types = self._supported_optims() + expected_optim_types += [type(None), Callable] + assert isinstance(self.client_optimizer, tuple(expected_optim_types)), \ f'Client Optimizer is of unexpected type {type(self.client_optimizer)}' if not self.client_optimizer: @@ -940,6 +1101,7 @@ class DeepSpeedEngine(Module): @staticmethod def __check_params(model: Module, dtype: torch.dtype) -> None: + return if not all(param.dtype == dtype for param in model.parameters()) and dist.get_rank() == 0: raise ValueError( @@ -948,31 +1110,27 @@ class DeepSpeedEngine(Module): f"{[(n, p.dtype) for n, p in model.named_parameters() if p.dtype != dtype]}" ) + def _set_client_model(self, model): + # register client model in _modules so that nn.module methods work correctly + modules = self.__dict__.get('_modules') + modules['module'] = model + # register module attribute in engine but avoid getattr + self.__dict__['module'] = model + def _configure_distributed_model(self, model): - self.module = model + self._set_client_model(model) + if self.fp16_enabled(): if self.zero_optimization_partition_weights() and any( [hasattr(param, "ds_id") for param in self.module.parameters()]): - if not all( - [param.dtype == torch.half for param in self.module.parameters()]): - names = [ - n for n, - p in self.module.named_parameters() if p.dtype != torch.half - ] - raise ValueError( - f"fp16 is enabled but the following parameters have dtype that is not fp16: {', '.join(names)}" - ) + self.__check_params(self.module, torch.half) self.module.half() elif self.bfloat16_enabled(): if self.zero_optimization_partition_weights() and any( hasattr(param, 'ds_id') for param in self.module.parameters()): self.__check_params(self.module, torch.bfloat16) - if self.zero_optimization_stage() == 0 and not self.pipeline_parallelism: - raise NotImplementedError( - "When not running ZeRO, BF16 training support is only supported for Pipeline parallelism" - ) self.module.bfloat16() else: self.__check_params(self.module, torch.float) @@ -1031,78 +1189,119 @@ class DeepSpeedEngine(Module): ]) assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behaviour." + def _do_optimizer_sanity_check(self, basic_optimizer): + model_dtype, grad_accum_dtype = self.get_data_types() + zero_enabled = self.zero_optimization() + amp_enabled = self.amp_enabled() + # config based assertions + assert ( + not (amp_enabled and zero_enabled) + ), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2" + if zero_enabled: + if not is_zero_supported_optimizer(basic_optimizer): + assert ( + self.zero_allow_untested_optimizer() + ), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.' + + if self.global_rank == 0: + logger.warning( + "**** You are using ZeRO with an untested optimizer, proceed with caution *****" + ) + + if model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32 and self.zero_optimization_stage( + ) == 1: + return BFLOAT16 + + if model_dtype != grad_accum_dtype: + raise NotImplementedError( + "Model data type and gradient accumulation data type must be equal to use ZeRO" + ) + return ZERO_OPTIMIZATION + elif amp_enabled: + if model_dtype != grad_accum_dtype: + raise NotImplementedError( + "Model data type and gradient accumulation data type must be equal to use Amp" + ) + if model_dtype == torch.bfloat16 or model_dtype == torch.float16: + raise NotImplementedError( + "Cannot enable both amp with (legacy) fp16 or bfloat16 mode") + try: + logger.info("Initializing Apex amp from: {}".format(amp.__path__)) + except NameError: + # If apex/amp is available it will be imported above + raise RuntimeError( + "Unable to import apex/amp, please make sure it is installed") + return AMP + # data type checks + elif model_dtype == grad_accum_dtype: + if model_dtype == torch.bfloat16: + raise NotImplementedError( + "Bfloat16 wrapper must use a gradient accumulation type of fp32, enable ZeRO to use Bfloat16 gradient accumulation" + ) + if model_dtype == torch.float16: + return FP16 + # else optimizer_wrapper = None + elif model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32: + return BFLOAT16 + else: + raise NotImplementedError( + "unsupported mix of model dtype and gradient accummulation type") + + return None + # Configure optimizer def _configure_optimizer(self, client_optimizer, model_parameters): if client_optimizer is not None: - if isinstance(client_optimizer, Optimizer): + if isinstance(client_optimizer, tuple(self._supported_optims())): client_optimizer.param_groups[:] = [ pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0 ] - if self.global_rank == 0: - logger.info( - "Removing param_group that has no 'params' in the client Optimizer" - ) + log_dist( + "Removing param_group that has no 'params' in the client Optimizer", + ranks=[0]) basic_optimizer = client_optimizer - if self.global_rank == 0: - logger.info('Using client Optimizer as basic optimizer') + log_dist('Using client Optimizer as basic optimizer', ranks=[0]) else: basic_optimizer = client_optimizer(model_parameters) - if self.global_rank == 0: - logger.info('Using client callable to create basic optimizer') + log_dist('Using client callable to create basic optimizer', ranks=[0]) else: basic_optimizer = self._configure_basic_optimizer(model_parameters) - if self.global_rank == 0: - logger.info( - "Using DeepSpeed Optimizer param name {} as basic optimizer".format( - self.optimizer_name())) + log_dist( + f"Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer", + ranks=[0]) self._check_for_duplicates(basic_optimizer) self.basic_optimizer = basic_optimizer - if self.global_rank == 0: - logger.info("DeepSpeed Basic Optimizer = {}".format( - basic_optimizer.__class__.__name__)) + log_dist("DeepSpeed Basic Optimizer = {}".format( + basic_optimizer.__class__.__name__), + ranks=[0]) - if self.zero_optimization(): - assert ( - not self.amp_enabled() - ), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2" - if not is_zero_supported_optimizer(basic_optimizer): - assert ( - self.zero_allow_untested_optimizer() - ), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.' + optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer) - if self.global_rank == 0: - logger.warning( - "**** You are using ZeRO with an untested optimizer, proceed with caution *****" - ) + if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) - elif self.amp_enabled(): - assert not (self.fp16_enabled() or self.bfloat16_enabled()), "Cannot enable both amp with (legacy) fp16 or bfloat16 mode" + elif optimizer_wrapper == AMP: amp_params = self.amp_params() - if self.global_rank == 0: - logger.info(f"Initializing AMP with these params: {amp_params}") - try: - logger.info("Initializing Apex amp from: {}".format(amp.__path__)) - except NameError: - # If apex/amp is available it will be imported above - raise RuntimeError( - "Unable to import apex/amp, please make sure it is installed") - self.module, self.optimizer = amp.initialize( + log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) + model, self.optimizer = amp.initialize( self.module, basic_optimizer, **amp_params ) + self._set_client_model(model) self._broadcast_model() # TODO: maybe need to broadcast experts differently? - elif self.fp16_enabled(): + elif optimizer_wrapper == FP16: self.optimizer = self._configure_fp16_optimizer(basic_optimizer) - elif self.bfloat16_enabled(): + elif optimizer_wrapper == BFLOAT16: self.optimizer = self._configure_bf16_optimizer(basic_optimizer) else: self.optimizer = basic_optimizer + log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0]) + self.compression_scheduler = self._configure_compression_scheduler() self.quantizer = self._configure_quantization() def _configure_basic_optimizer(self, model_parameters): @@ -1131,7 +1330,7 @@ class DeepSpeedEngine(Module): optimizer = torch.optim.AdamW(model_parameters, **optimizer_parameters) else: - if self.zero_cpu_offload(): + if self.zero_use_cpu_optimizer(): if self.optimizer_name() == ADAGRAD_OPTIMIZER: from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad optimizer = DeepSpeedCPUAdagrad(model_parameters, @@ -1185,13 +1384,16 @@ class DeepSpeedEngine(Module): optimizer = torch_optimizer(model_parameters, **optimizer_parameters) return optimizer + def _configure_compression_scheduler(self): + return compression_scheduler(self.module, self._config.compression_config) + + def _configure_random_ltd_scheduler(self, configs): + return RandomLTDScheduler(configs) + def _configure_quantization(self): ( + quantize_weight_in_forward, quantize_enabled, - q_target_bits, - q_start_bits, - q_period, - q_offset, q_groups, q_mixed_fp16, q_change_ratio, @@ -1200,15 +1402,13 @@ class DeepSpeedEngine(Module): q_verbose, use_quantizer_kernel, ) = self.quantize_training() + if quantize_enabled and not quantize_weight_in_forward: + assert self.fp16_enabled(), "MoQ (quantize in optimization step) weight quantization is only supported for FP16" quantizer = None - if quantize_enabled: + if quantize_enabled and not quantize_weight_in_forward: from deepspeed.runtime.quantize import Quantizer quantizer = Quantizer( - q_target_bits, - q_start_bits, - q_period, - q_offset, q_groups, q_mixed_fp16, q_change_ratio, @@ -1232,7 +1432,7 @@ class DeepSpeedEngine(Module): if isinstance(optimizer, fused_opts) \ or self.optimizer_name() in [ONEBIT_ADAM_OPTIMIZER, ZERO_ONE_ADAM_OPTIMIZER]: if self.dynamic_loss_scale(): - log_dist("Creating fp16 optimizer with dynamic loss scale", ranks=[0]) + log_dist(f'Creating fp16 optimizer with dynamic loss scale', ranks=[0]) timers = self.timers if self.wall_clock_breakdown() else None optimizer = FP16_Optimizer( optimizer, @@ -1248,10 +1448,8 @@ class DeepSpeedEngine(Module): ) else: log_dist( - "Creating fp16 optimizer with static loss scale: {}".format( - self.loss_scale()), - ranks=[0], - ) + f'Creating fp16 optimizer with static loss scale: {self.loss_scale()}', + ranks=[0]) optimizer = FP16_Optimizer( optimizer, deepspeed=self, @@ -1262,7 +1460,7 @@ class DeepSpeedEngine(Module): has_moe_layers=self.has_moe_layers, ) else: - log_dist("Creating fp16 unfused optimizer with dynamic loss scale", + log_dist(f'Creating fp16 unfused optimizer with dynamic loss scale', ranks=[0]) optimizer = FP16_UnfusedOptimizer( optimizer, @@ -1280,11 +1478,15 @@ class DeepSpeedEngine(Module): def _configure_bf16_optimizer(self, optimizer): clip_grad = self.gradient_clipping() - if self.global_rank == 0: - logger.info('Creating unfused BF16 optimizer') + if optimizer is None: + optimizer = DummyOptim(list(self.module.parameters())) + + log_dist('Creating BF16 optimizer', ranks=[0]) + timers = self.timers if self.wall_clock_breakdown() else None optimizer = BF16_Optimizer( optimizer, + self.param_names, mpu=self.mpu, clip_grad=clip_grad, allgather_bucket_size=self.zero_allgather_bucket_size(), @@ -1295,8 +1497,7 @@ class DeepSpeedEngine(Module): def _configure_zero_optimizer(self, optimizer): zero_stage = self.zero_optimization_stage() - log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage), ranks=[0]) - assert self.communication_data_type in (torch.float16, torch.bfloat16), "ZeRO supports only 'communication_data_type': ['fp16', 'bfp16']" + model_dtype, grad_accum_dtype = self.get_data_types() timers = self.timers if self.wall_clock_breakdown() else None if optimizer is None: @@ -1307,17 +1508,21 @@ class DeepSpeedEngine(Module): "The deprecated version of ZeRO Stage 1 is not supported in deepspeed >= 0.5.9. Please downgrade to a version less than 0.5.9 if you need to use this deprecated version of ZeRO." ) - if zero_stage <= ZERO_OPTIMIZATION_GRADIENTS: + if zero_stage <= ZeroStageEnum.gradients: overlap_comm = self.zero_overlap_comm() contiguous_gradients = self.zero_contiguous_gradients() round_robin_gradients = self.zero_round_robin_gradients() - assert not isinstance(optimizer, DummyOptim), "zero stage 2 requires an optimizer" + assert not isinstance(optimizer, DummyOptim), "zero stage {} requires an optimizer".format(zero_stage) + log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', + ranks=[0]) # Overlap and contiguous grads are meaningless in stage 1 and are ignored - if zero_stage == ZERO_OPTIMIZATION_OPTIMIZER_STATES: + if zero_stage == ZeroStageEnum.optimizer_states: overlap_comm = False - contiguous_gradients = False round_robin_gradients = False + # Non-MoE requires contiguous grads to be disabled w. stage 1 + if not self.has_moe_layers: + contiguous_gradients = False if isinstance(self.module, PipelineModule): if overlap_comm: @@ -1325,9 +1530,9 @@ class DeepSpeedEngine(Module): "Pipeline parallelism does not support overlapped communication, will be disabled." ) overlap_comm = False - optimizer = DeepSpeedZeroOptimizer( optimizer, + self.param_names, timers=timers, static_loss_scale=self.loss_scale(), dynamic_loss_scale=self.dynamic_loss_scale(), @@ -1349,7 +1554,7 @@ class DeepSpeedEngine(Module): gradient_predivide_factor=self.gradient_predivide_factor(), gradient_accumulation_steps=self.gradient_accumulation_steps(), ignore_unused_parameters=self.zero_ignore_unused_parameters(), - partition_grads=zero_stage == ZERO_OPTIMIZATION_GRADIENTS, + partition_grads=zero_stage == ZeroStageEnum.gradients, round_robin_gradients=round_robin_gradients, has_moe_layers=self.has_moe_layers, fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients( @@ -1357,38 +1562,54 @@ class DeepSpeedEngine(Module): communication_data_type=self.communication_data_type, elastic_checkpoint=self.zero_elastic_checkpoint()) - elif zero_stage == ZERO_OPTIMIZATION_WEIGHTS: + elif zero_stage == ZeroStageEnum.weights: assert not self.has_moe_layers, "MoE not supported with Stage 3" - logger.info("Initializing ZeRO Stage 3") if dist.get_rank() == 0 else None - from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 - - optimizer = DeepSpeedZeroOptimizer_Stage3( - self.module, - optimizer, - timers=timers, - ds_config=self.config, - static_loss_scale=self.loss_scale(), - dynamic_loss_scale=self.dynamic_loss_scale(), - dynamic_loss_args=self.dynamic_loss_scale_args(), - clip_grad=self.gradient_clipping(), - contiguous_gradients=self.zero_contiguous_gradients(), - reduce_bucket_size=self.zero_reduce_bucket_size(), - prefetch_bucket_size=self.zero_prefetch_bucket_size(), - max_reuse_distance=self.zero_max_reuse_distance(), - max_live_parameters=self.zero_max_live_parameters(), - param_persistence_threshold=self.zero_param_persistence_threshold(), - dp_process_group=self.data_parallel_group, - reduce_scatter=self.zero_reduce_scatter(), - overlap_comm=self.zero_overlap_comm(), - offload_optimizer_config=self.zero_offload_optimizer(), - offload_param_config=self.zero_offload_param(), - sub_group_size=self.zero_sub_group_size(), - mpu=self.mpu, - postscale_gradients=self.postscale_gradients(), - gradient_predivide_factor=self.gradient_predivide_factor(), - gradient_accumulation_steps=self.gradient_accumulation_steps(), - aio_config=self.aio_config(), - communication_data_type=self.communication_data_type) + if isinstance(optimizer, DummyOptim): + log_dist("Creating ZeRO Offload", ranks=[0]) + optimizer = DeepSpeedZeRoOffload( + self.module, + timers=timers, + ds_config=self.config, + overlap_comm=self.zero_overlap_comm(), + prefetch_bucket_size=self.zero_prefetch_bucket_size(), + max_reuse_distance=self.zero_max_reuse_distance(), + max_live_parameters=self.zero_max_live_parameters(), + param_persistence_threshold=self.zero_param_persistence_threshold(), + model_persistence_threshold=self.zero_model_persistence_threshold(), + offload_param_config=self.zero_offload_param(), + mpu=self.mpu) + else: + log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', + ranks=[0]) + from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 + optimizer = DeepSpeedZeroOptimizer_Stage3( + self.module, + optimizer, + timers=timers, + ds_config=self.config, + static_loss_scale=self.loss_scale(), + dynamic_loss_scale=self.dynamic_loss_scale(), + dynamic_loss_args=self.dynamic_loss_scale_args(), + clip_grad=self.gradient_clipping(), + contiguous_gradients=self.zero_contiguous_gradients(), + reduce_bucket_size=self.zero_reduce_bucket_size(), + prefetch_bucket_size=self.zero_prefetch_bucket_size(), + max_reuse_distance=self.zero_max_reuse_distance(), + max_live_parameters=self.zero_max_live_parameters(), + param_persistence_threshold=self.zero_param_persistence_threshold(), + model_persistence_threshold=self.zero_model_persistence_threshold(), + dp_process_group=self.data_parallel_group, + reduce_scatter=self.zero_reduce_scatter(), + overlap_comm=self.zero_overlap_comm(), + offload_optimizer_config=self.zero_offload_optimizer(), + offload_param_config=self.zero_offload_param(), + sub_group_size=self.zero_sub_group_size(), + mpu=self.mpu, + postscale_gradients=self.postscale_gradients(), + gradient_predivide_factor=self.gradient_predivide_factor(), + gradient_accumulation_steps=self.gradient_accumulation_steps(), + aio_config=self.aio_config(), + communication_data_type=self.communication_data_type) else: raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage)) @@ -1413,8 +1634,8 @@ class DeepSpeedEngine(Module): return pld - def _configure_curriculum_scheduler(self): - scheduler = CurriculumScheduler(self.curriculum_params()) + def _configure_curriculum_scheduler_legacy(self): + scheduler = CurriculumScheduler(self.curriculum_params_legacy()) return scheduler @staticmethod @@ -1432,11 +1653,9 @@ class DeepSpeedEngine(Module): def was_step_applied(self) -> bool: """Returns True if the latest ``step()`` produced in parameter updates. - Note that a ``False`` return is not an error condition. Steps are frequently no-ops, such as between gradient accumulation boundaries or when overflows occur. - Returns: bool: Whether the latest ``step()`` modified model parameters. """ @@ -1454,9 +1673,6 @@ class DeepSpeedEngine(Module): or self.is_iterable_style_dataset(dataset)): raise ValueError("Training data must be a torch Dataset") - if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL): - data_sampler = torch.utils.data.SequentialSampler(dataset) - if batch_size is None: batch_size = self.train_micro_batch_size_per_gpu() @@ -1469,23 +1685,50 @@ class DeepSpeedEngine(Module): deepspeed_io_timer = self.tput_timer # If mpu is provided, forward world size and parallel rank to sampler. - data_parallel_world_size = None - data_parallel_rank = None + data_parallel_world_size = self.dp_world_size + data_parallel_rank = self.global_rank if self.mpu is not None: data_parallel_world_size = self.mpu.get_data_parallel_world_size() data_parallel_rank = self.mpu.get_data_parallel_rank() - return DeepSpeedDataLoader(dataset=dataset, - batch_size=batch_size, - pin_memory=pin_memory, - collate_fn=collate_fn, - local_rank=self.local_rank, - tput_timer=deepspeed_io_timer, - num_local_io_workers=num_local_io_workers, - data_sampler=data_sampler, - data_parallel_world_size=data_parallel_world_size, - data_parallel_rank=data_parallel_rank, - dataloader_drop_last=self.dataloader_drop_last()) + if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL): + data_sampler = torch.utils.data.DistributedSampler( + dataset, + num_replicas=data_parallel_world_size, + rank=data_parallel_rank, + shuffle=False, + ) + + deepspeed_dataloader_config = {} + if self.curriculum_learning_enabled(): + deepspeed_dataloader_config = { + CURRICULUM_LEARNING: + self.curriculum_learning_enabled(), + DATA_EFFICIENCY: + self.data_efficiency_config(), + DATA_PARALLEL_GROUP: + self.data_parallel_group, + GRADIENT_ACCUMULATION_STEPS: + self.gradient_accumulation_steps(), + GLOBAL_RANK: + self.global_rank, + DATA_SAMPLING_NUM_WORKERS: + self.data_sampling_config()[DATA_SAMPLING_NUM_WORKERS] + } + + return DeepSpeedDataLoader( + dataset=dataset, + batch_size=batch_size, + pin_memory=pin_memory, + collate_fn=collate_fn, + local_rank=self.local_rank, + tput_timer=deepspeed_io_timer, + num_local_io_workers=num_local_io_workers, + data_sampler=data_sampler, + data_parallel_world_size=data_parallel_world_size, + data_parallel_rank=data_parallel_rank, + dataloader_drop_last=self.dataloader_drop_last(), + deepspeed_dataloader_config=deepspeed_dataloader_config) def train(self, mode=True): r"""""" @@ -1536,23 +1779,41 @@ class DeepSpeedEngine(Module): == self.flops_profiler_profile_step() and self.global_rank == 0) + # used to check quantization happens at step 0! + if self.global_steps == 0 and hasattr(self, "compression_scheduler"): + self.compression_scheduler.step(step_zero_check=True) + if self.quantizer: + tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( + ) == 2 else self.optimizer.fp16_groups + if self.compression_scheduler.weight_quantization_enabled: + self.quantizer.quantize( + tensor_to_quantize, + (self.optimizer.overflow if self.fp16_enabled() else False), + self.eigenvalue_enabled(), + None, + ) + if flops_profiler_active: self.flops_profiler.start_profile(ignore_list=None) - if self.module.training and self.progressive_layer_drop: - kwargs.update(self.progressive_layer_drop.get_state()) + if self.module.training: + if self.progressive_layer_drop: + kwargs.update(self.progressive_layer_drop.get_state()) if self.__class__.__name__ != "PipelineEngine": # TODO: The above if condition is a HACK since for PipelineEngine # it's difficult to inject argument in forward pass. - if self.module.training and self.curriculum_enabled(): - self.curriculum_scheduler.update_difficulty(self.global_steps + 1) - if self.curriculum_params()["curriculum_type"] == "seqlen": + if self.module.training and self.curriculum_enabled_legacy(): + self.curriculum_scheduler_legacy.update_difficulty(self.global_steps + 1) + if self.curriculum_params_legacy()["curriculum_type"] == "seqlen": kwargs.update({ "curriculum_seqlen": - self.curriculum_scheduler.get_current_difficulty() + self.curriculum_scheduler_legacy.get_current_difficulty() }) + if self.module.training and self.random_ltd_enabled(): + self.random_ltd_scheduler.update_seq(self.global_steps) + if self.zero_optimization_partition_weights(): # Enable automated discovery of external parameters by indicating that # we are in a forward pass. @@ -1565,6 +1826,9 @@ class DeepSpeedEngine(Module): if self.training_dataloader is None: self.tput_timer.start() + if self.fp16_auto_cast(): + inputs = self._cast_inputs_half(inputs) + loss = self.module(*inputs, **kwargs) if self.zero_optimization_partition_weights(): @@ -1588,6 +1852,22 @@ class DeepSpeedEngine(Module): see_memory_usage("Engine after forward", force=self.memory_breakdown()) return loss + def _cast_inputs_half(self, inputs): + if isinstance(inputs, (list, tuple)): + new_inputs = [] + for v in inputs: + new_inputs.append(self._cast_inputs_half(v)) + return inputs.__class__(new_inputs) + elif isinstance(inputs, dict): + new_inputs = {} + for k, v in inputs.items(): + new_inputs[k] = self._cast_inputs_half(v) + return new_inputs + elif hasattr(inputs, 'half'): + return inputs.half() + else: + return inputs + def print_forward_breakdown(self, fwd_time): gate_time = 0.0 moe_time = 0.0 @@ -1606,9 +1886,9 @@ class DeepSpeedEngine(Module): # TODO: Allreduce/average them across ranks for more accurate timing. - # if torch.distributed.get_rank() == 0: + # if deepspeed.comm.get_rank() == 0: log_dist( - f"rank={torch.distributed.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})", + f"rank={dist.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})", ranks=[0]) @instrument_w_nvtx @@ -1619,41 +1899,49 @@ class DeepSpeedEngine(Module): # Pass (PP) gas boundary flag to optimizer (required for zero) self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary( ) - - # ZeRO stage 2 communicates during non gradient accumulation boundaries as well + # ZeRO stage >= 2 communicates during non gradient accumulation boundaries as well if self.zero_optimization_partition_gradients(): self.optimizer.overlapping_partition_gradients_reduce_epilogue() # Communicate only at gradient accumulation boundaries elif self.is_gradient_accumulation_boundary(): - if self.zero_optimization_stage() == ZERO_OPTIMIZATION_OPTIMIZER_STATES: + if self.zero_optimization_stage() == ZeroStageEnum.optimizer_states: self.optimizer.reduce_gradients( pipeline_parallel=self.pipeline_parallelism) else: self.buffered_allreduce_fallback(elements_per_buffer=bucket_size) @instrument_w_nvtx - def backward(self, loss, allreduce_gradients=True, release_loss=False): + def backward(self, + loss, + allreduce_gradients=True, + release_loss=False, + retain_graph=False, + scale_wrt_gas=True): r"""Execute backward pass on the loss - Arguments: loss: Torch tensor on which to execute backward propagation allreduce_gradients: is deprecated, ignored, and will soon be removed' + retain_graph: bool, default: false + forward on user defined choice of retain_graph """ see_memory_usage("Engine before backward", force=self.memory_breakdown()) + if self.scale_wrt_gas is not None: + scale_wrt_gas = self.scale_wrt_gas + if not allreduce_gradients: logger.warning( f"Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed" ) # scale loss w.r.t. gradient accumulation if needed - if self.gradient_accumulation_steps() > 1: + if self.gradient_accumulation_steps() > 1 and scale_wrt_gas: loss = self._scale_loss_by_gas(loss.float()) # Log training Loss - if self.tensorboard_enabled(): + if self.monitor.enabled: if self.is_gradient_accumulation_boundary(): if self.global_rank == 0: self.summary_events = [( @@ -1661,9 +1949,7 @@ class DeepSpeedEngine(Module): loss.mean().item() * self.gradient_accumulation_steps(), self.global_samples, )] - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) - self.summary_writer.flush() + self.monitor.write_events(self.summary_events) self._start_timers(self.engine_timers.backward_timers) @@ -1673,9 +1959,9 @@ class DeepSpeedEngine(Module): self._start_timers(self.engine_timers.backward_inner_timers) if self.zero_optimization(): - self.optimizer.is_gradient_accumulation_boundary = ( - self.is_gradient_accumulation_boundary()) - self.optimizer.backward(loss) + self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary( + ) + self.optimizer.backward(loss, retain_graph=retain_graph) elif self.amp_enabled(): # AMP requires delaying unscale when inside gradient accumulation boundaries # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations @@ -1683,19 +1969,19 @@ class DeepSpeedEngine(Module): with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: - scaled_loss.backward() + scaled_loss.backward(retain_graph=retain_graph) elif self.fp16_enabled(): if self.eigenvalue_enabled(): self.optimizer.backward(loss, create_graph=True, retain_graph=True) else: - self.optimizer.backward(loss) + self.optimizer.backward(loss, retain_graph=retain_graph) elif self.bfloat16_enabled(): self.optimizer.backward(loss) else: if self.eigenvalue_enabled(): loss.backward(create_graph=True, retain_graph=True) else: - loss.backward() + loss.backward(retain_graph=retain_graph) self._stop_timers(self.engine_timers.backward_inner_timers) @@ -1718,11 +2004,14 @@ class DeepSpeedEngine(Module): return loss def is_gradient_accumulation_boundary(self): - """Query whether the current micro-batch is at the boundary of + """ + Query whether the current micro-batch is at the boundary of gradient accumulation, and thus will trigger gradient reductions and an optimizer step. + Returns: bool: if the current step is a gradient accumulation boundary. + """ if self._is_gradient_accumulation_boundary is None: return (self.micro_steps + 1) % \ @@ -1731,14 +2020,13 @@ class DeepSpeedEngine(Module): return self._is_gradient_accumulation_boundary def set_gradient_accumulation_boundary(self, is_boundary): - """Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional + """ + Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional feature and should be used with care. The state should be set before to the intended value before each forward/backward. The final fordward/backward should have the boundary state set to True. This style allows client code to only call engine.step() once after all the gradient accumulation passes are complete. See example below: - .. code-block:: python - engine.set_gradient_accumulation_boundary(False) for _ in range(gradient_accumulation_steps - 1): micro_batch = next(data_loader) @@ -1749,7 +2037,6 @@ class DeepSpeedEngine(Module): loss = engine(micro_batch) engine.backward(loss) engine.step() - Arguments: is_boundary (bool): are we at a gradient accumulation boundary or not? """ @@ -1787,17 +2074,15 @@ class DeepSpeedEngine(Module): # Quantize the updated parameter if there is no overflow if self.quantizer: - if self.fp16_enabled(): - tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( - ) == 2 else self.optimizer.fp16_groups - else: - tensor_to_quantize = self.optimizer.param_groups - self.quantizer.quantize( - tensor_to_quantize, - (self.optimizer.overflow if self.fp16_enabled() else False), - self.eigenvalue_enabled(), - block_eigenvalue, - ) + tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( + ) == 2 else self.optimizer.fp16_groups + if self.compression_scheduler.weight_quantization_enabled: + self.quantizer.quantize( + tensor_to_quantize, + (self.optimizer.overflow if self.fp16_enabled() else False), + self.eigenvalue_enabled(), + block_eigenvalue, + ) # zero grad in basic optimizer could be unreliable and may not exhibit # the behaviour that we want if self.bfloat16_enabled(): @@ -1822,6 +2107,7 @@ class DeepSpeedEngine(Module): if overflow: self.skipped_steps += 1 else: + self.compression_scheduler.step() if self.lr_scheduler is not None: try: self.lr_scheduler.step(**(lr_kwargs or {})) @@ -1854,7 +2140,7 @@ class DeepSpeedEngine(Module): assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \ "must provide optimizer during init in order to use step" - report_progress = self.global_rank == 0 if self.global_rank else True + report_progress = False self._step_applied = False # assume False, will flip to True @@ -1881,19 +2167,21 @@ class DeepSpeedEngine(Module): else: self._take_model_step(lr_kwargs) - self.tput_timer.stop(report_progress) + report_progress = self.global_rank == 0 if self.global_rank else True + + self.tput_timer.stop(global_step=self.is_gradient_accumulation_boundary(), + report_speed=report_progress) self._stop_timers(self.engine_timers.step_timers) # Log learning rate - if self.tensorboard_enabled(): + if self.monitor.enabled: if self.is_gradient_accumulation_boundary(): if self.global_rank == 0: self.summary_events = [(f"Train/Samples/lr", self.get_lr()[0], self.global_samples)] - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) + if self.fp16_enabled() and hasattr(self.optimizer, "cur_scale"): self.summary_events.append(( f"Train/Samples/loss_scale", @@ -1905,16 +2193,12 @@ class DeepSpeedEngine(Module): self.eigenvalue_gas_boundary_resolution()): ev_values = self.block_eigenvalue.values() for i in range(len(ev_values)): - self.summary_writer.add_scalar( + self.summary_events.append(( f"Train/Eigenvalues/ModelBlockParam_{i}", self.ev_values[i][0], self.global_samples, - ) - self.summary_writer.flush() - - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) - self.summary_writer.flush() + )) + self.monitor.write_events(self.summary_events) # Check flops profiling if flops_profiler_active: @@ -1942,12 +2226,11 @@ class DeepSpeedEngine(Module): if self.wall_clock_breakdown() or self.flops_profiler_enabled(): # Log global timing and reset if self.is_gradient_accumulation_boundary(): - if self.tensorboard_enabled(): - self._write_tensorboard() + if self.monitor.enabled: + self._write_monitor() if self.has_moe_layers: - fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed( - reset=False) * 1000 + fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False) self.print_forward_breakdown(fwd_time=fwd_time) self.timers.log(self.engine_timers.global_timers) @@ -1977,49 +2260,48 @@ class DeepSpeedEngine(Module): titer = msg[FORWARD_GLOBAL_TIMER] + msg[BACKWARD_GLOBAL_TIMER] + msg[ STEP_GLOBAL_TIMER] msg["latency"] = titer - msg["FLOPS_per_gpu"] = self.flops * self.gradient_accumulation_steps( + msg["FLOPS_per_gpu"] = self.flops * 1_000_000 * self.gradient_accumulation_steps( ) / titer - msg["throughput"] = self.train_batch_size() * 1000 / \ + msg["throughput"] = self.train_batch_size() * 1_000_000 / \ msg["latency"] print_json_dist(msg, [0], path=self.autotuning_metric_path()) + log_dist( + f"Wrote metrics to {self.autotuning_metric_path()}, {os.path.abspath(self.autotuning_metric_path())}", + ranks=[0]) import atexit atexit.register(print, "Autotuning: done with running current ds config.") exit() - def _write_tensorboard(self): + def _write_monitor(self): if self.global_rank == 0: self.summary_events = [ ( f"Train/Samples/elapsed_time_ms_forward", - self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False) * 1000.0, + self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward", - self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False) * 1000.0, + self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward_inner", - self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False) * - 1000.0, + self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward_allreduce", - self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False) * - 1000.0, + self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_step", - self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False) * 1000.0, + self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ] - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) - self.summary_writer.flush() + self.monitor.write_events(self.summary_events) def _get_optimizer_param(self, param_name): result = [] @@ -2121,7 +2403,8 @@ class DeepSpeedEngine(Module): grad_data = param.grad.data if param_name in self.sparse_tensor_module_names or grad_data.is_sparse: - grad_data = SparseTensor(grad_data) + # Call param.grad without data to avoid problem with setting of updated grads + grad_data = SparseTensor(param.grad) if is_moe_param(param): expert_grads[param.group_name].append(grad_data) @@ -2191,9 +2474,6 @@ class DeepSpeedEngine(Module): return sparse_list def sparse_allreduce(self, sparse, dp_group): - # Pre-divide for fp16 stability - sparse.values.mul_(1.0 / dist.get_world_size(group=dp_group)) - original_data_type = sparse.values.dtype if self.communication_data_type != sparse.values.dtype: if self.communication_data_type in (torch.float16, torch.bfloat16): @@ -2205,6 +2485,13 @@ class DeepSpeedEngine(Module): indices = sparse.indices values = sparse.values + if self.postscale_gradients(): + if self.gradient_average: + values.mul_(self.gradient_predivide_factor() / + dist.get_world_size(group=dp_group)) + else: + values.mul_(1. / dist.get_world_size(group=dp_group)) + indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) @@ -2257,6 +2544,8 @@ class DeepSpeedEngine(Module): def module_state_dict(self, destination=None, prefix="", keep_vars=False): sd = self.module.state_dict(destination, prefix, keep_vars) + if self.random_ltd_enabled(): + sd = remove_random_ltd_state_dict(sd) return sd @staticmethod @@ -2266,7 +2555,8 @@ class DeepSpeedEngine(Module): old_moe_load, model=None, mpu=None, - num_experts=1): + num_experts=1, + checkpoint_engine=TorchCheckpointEngine()): if old_moe_load: expp_rank = groups._get_expert_data_parallel_rank( groups._get_max_expert_size_name()) @@ -2276,7 +2566,7 @@ class DeepSpeedEngine(Module): groups._get_max_expert_size_name()) for local_expert_id in range(num_local_experts): global_expert_id = expp_rank * num_local_experts + local_expert_id - expert_state_dict = torch.load(DeepSpeedEngine._get_expert_ckpt_name( + expert_state_dict = checkpoint_engine.load(DeepSpeedEngine._get_expert_ckpt_name( checkpoint_path, -1, # -1 means ignore layer_id global_expert_id, @@ -2295,14 +2585,14 @@ class DeepSpeedEngine(Module): else: moe_layer_id = 0 for n_module, module in model.named_modules(): - if isinstance(module, MoE): # and torch.distributed.get_rank() == 0: + if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: group_name = module.expert_group_name num_local_experts = module.num_local_experts expp_rank = groups._get_expert_parallel_rank(group_name) # loop all local_experts for local_expert_id in range(num_local_experts): global_expert_id = expp_rank * num_local_experts + local_expert_id - expert_state_dict = torch.load( + expert_state_dict = checkpoint_engine.load( DeepSpeedEngine._get_expert_ckpt_name( checkpoint_path, moe_layer_id, @@ -2321,8 +2611,12 @@ class DeepSpeedEngine(Module): state_dict.update(expert_state_dict) moe_layer_id += 1 - def load_module_state_dict(self, state_dict, strict=True): - self.module.load_state_dict(state_dict, strict=strict) + def load_module_state_dict(self, state_dict, strict=True, custom_load_fn=None): + if custom_load_fn: + custom_load_fn(src=state_dict, dst=self.module) + else: + self.module.load_state_dict(state_dict, # TODO + strict=strict) def _get_zero_ckpt_prefix(self, dp_rank, bf16_mode): return f'{"bf16_" if bf16_mode else ""}zero_pp_rank_{dp_rank}' @@ -2343,7 +2637,7 @@ class DeepSpeedEngine(Module): def _get_zero_ckpt_name(self, checkpoints_path, tag): mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() - pp_rank = torch.distributed.get_rank(group=self.optimizer.dp_process_group) + pp_rank = dist.get_rank(group=self.optimizer.dp_process_group) bf16_mode = self.bfloat16_enabled() return self._get_rank_zero_ckpt_name(checkpoints_path, tag, @@ -2360,7 +2654,7 @@ class DeepSpeedEngine(Module): if self.zero_optimization_partition_weights(): filename = "zero_pp_rank_{}".format( - torch.distributed.get_rank(group=self.optimizer.dp_process_group)) + dist.get_rank(group=self.optimizer.dp_process_group)) ckpt_name = os.path.join( checkpoints_path, str(tag), @@ -2417,8 +2711,10 @@ class DeepSpeedEngine(Module): load_module_strict=True, load_optimizer_states=True, load_lr_scheduler_states=True, - load_module_only=False): - """Load training checkpoint + load_module_only=False, + custom_load_fn=None): + """ + Load training checkpoint Arguments: load_dir: Required. Directory to load the checkpoint from @@ -2427,30 +2723,38 @@ class DeepSpeedEngine(Module): load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint. load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting. + custom_load_fn: Optional. Custom model load function. + Returns: A tuple of ``load_path`` and ``client_state``. - *``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed. - *``client_state``: State dictionary used for loading required training states in the client code. Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and ``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine before ``load_checkpoint()``. + """ if tag is None: - latest_path = os.path.join(load_dir, "latest") + latest_tag = "latest_universal" if self.load_universal_checkpoint( + ) else "latest" + latest_path = os.path.join(load_dir, latest_tag) if os.path.isfile(latest_path): with open(latest_path, "r") as fd: tag = fd.read().strip() else: - logger.warning( - f"Unable to find latest file at {latest_path}, if trying to load latest " - "checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint." - ) - return None, None + if self.load_universal_checkpoint(): + raise ValueError( + f'Invalid for universal checkpoint: {latest_path} does not exist' + ) + else: + logger.warning( + f"Unable to find latest file at {latest_path}, if trying to load latest " + "checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint." + ) + return None, None if self.zero_optimization_partition_weights(): # Prepare for checkpoint load by ensuring all parameters are partitioned @@ -2461,7 +2765,8 @@ class DeepSpeedEngine(Module): load_module_strict=load_module_strict, load_optimizer_states=load_optimizer_states, load_lr_scheduler_states=load_lr_scheduler_states, - load_module_only=load_module_only) + load_module_only=load_module_only, + custom_load_fn=custom_load_fn) load_zero_checkpoint = self.zero_optimization() or self.bfloat16_enabled() if load_zero_checkpoint and load_path is not None: @@ -2483,12 +2788,15 @@ class DeepSpeedEngine(Module): load_module_strict=True, load_optimizer_states=True, load_lr_scheduler_states=True, - load_module_only=False): + load_module_only=False, + custom_load_fn=None): from deepspeed.runtime.state_dict_factory import SDLoaderFactory ckpt_list = self._get_all_ckpt_names(load_dir, tag) - sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list) + sd_loader = SDLoaderFactory.get_sd_loader( + ckpt_list, + checkpoint_engine=self.checkpoint_engine) is_pipe_parallel = isinstance(self.module, PipelineModule) @@ -2515,10 +2823,12 @@ class DeepSpeedEngine(Module): old_moe_load=old_moe_load, model=self.module, mpu=self.mpu, - num_experts=self.num_experts) - - self.load_module_state_dict(state_dict=checkpoint['module'], - strict=load_module_strict) + num_experts=self.num_experts, + checkpoint_engine=self.checkpoint_engine) + if not self.load_universal_checkpoint(): + self.load_module_state_dict(state_dict=checkpoint['module'], + strict=load_module_strict, + custom_load_fn=custom_load_fn) self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size'] @@ -2531,8 +2841,9 @@ class DeepSpeedEngine(Module): largest_group_name = groups._get_max_expert_size_name() expp_rank = groups._get_expert_parallel_rank(largest_group_name) optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank) - optim_checkpoint = torch.load(optim_load_path, - map_location=torch.device('cpu')) + optim_checkpoint = self.checkpoint_engine.load( + optim_load_path, + map_location=torch.device('cpu')) else: optim_checkpoint = checkpoint @@ -2549,6 +2860,15 @@ class DeepSpeedEngine(Module): if load_lr_scheduler_states and self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + if self.random_ltd_enabled( + ) and self.random_ltd_scheduler is not None and 'random_ltd' in checkpoint: + self.random_ltd_scheduler.load_state_dict(checkpoint['random_ltd']) + + if self.training_dataloader is not None and self.curriculum_learning_enabled( + ) and 'data_sampler' in checkpoint: + self.training_dataloader.data_sampler.load_state_dict( + checkpoint['data_sampler']) + def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters, @@ -2595,7 +2915,9 @@ class DeepSpeedEngine(Module): 'skipped_steps', 'global_steps', 'dp_world_size', - 'mp_world_size' + 'mp_world_size', + 'data_sampler', + 'random_ltd' ] client_state = {} @@ -2616,25 +2938,35 @@ class DeepSpeedEngine(Module): return load_path, client_state def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True): - zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag) - if zero_sd_list is None: - return False - - if load_optimizer_states and self.dp_world_size != self.loaded_checkpoint_dp_world_size: - raise ZeRORuntimeException("The checkpoint being loaded used a DP " \ - f"world size of {self.loaded_checkpoint_dp_world_size} but the " \ - f"current world size is {self.dp_world_size}. Automatic adjustment " \ - "of ZeRO's optimizer state partitioning with a new world size is not " \ - "currently supported.") + if self.load_universal_checkpoint(): + zero_sd_list = None + checkpoint_folder = f'{os.path.join(load_dir, tag)}' + else: + if load_optimizer_states and self.dp_world_size != self.loaded_checkpoint_dp_world_size: + raise ZeRORuntimeException("The checkpoint being loaded used a DP " \ + f"world size of {self.loaded_checkpoint_dp_world_size} but the " \ + f"current world size is {self.dp_world_size}. Automatic adjustment " \ + "of ZeRO's optimizer state partitioning with a new world size is not " \ + "currently supported.") + checkpoint_folder = None + zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag) + if zero_sd_list is None: + return False self.optimizer.load_state_dict( state_dict_list=zero_sd_list, load_optimizer_states=load_optimizer_states, load_from_fp32_weights=self.zero_load_from_fp32_weights(), - ) - logger.info( - f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}" - ) + checkpoint_folder=checkpoint_folder) + + if self.load_universal_checkpoint(): + logger.info( + f'loaded universal zero checkpoints from {checkpoint_folder} for rank {self.global_rank}' + ) + else: + logger.info( + f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}" + ) return True def _get_mp_rank_zero_checkpoint_names(self, @@ -2662,7 +2994,6 @@ class DeepSpeedEngine(Module): mp_rank=mp_rank, dp_world_size=self.loaded_checkpoint_dp_world_size, bf16_mode=bf16_mode) - invalid_zero_ckpt_paths = [] for i, ckpt_name in enumerate(zero_ckpt_names): if not os.path.exists(ckpt_name): # transparently handle the old file pattern for optim_states @@ -2672,13 +3003,6 @@ class DeepSpeedEngine(Module): if os.path.exists(ckpt_name_try): zero_ckpt_names[i] = ckpt_name_try continue - invalid_zero_ckpt_paths.append(ckpt_name) - - if len(invalid_zero_ckpt_paths) > 0: - logger.warn( - f"The following zero checkpoints paths are missing: {invalid_zero_ckpt_paths}" - ) - return None return zero_ckpt_names @@ -2686,10 +3010,15 @@ class DeepSpeedEngine(Module): zero_sd_list = [] for i, ckpt_name in enumerate(zero_ckpt_names): _state = None + if ckpt_name is None: + _state = {OPTIMIZER_STATE_DICT: None} # Fully load state for current rank - if self.zero_elastic_checkpoint() or dist.get_rank( + elif self.zero_elastic_checkpoint() or dist.get_rank( group=self.optimizer.dp_process_group) == i: - _state = torch.load(ckpt_name, map_location='cpu') + _state = self.checkpoint_engine.load( + ckpt_name, + map_location='cpu', + ) else: _state = {OPTIMIZER_STATE_DICT: None} zero_sd_list.append(_state) @@ -2724,8 +3053,8 @@ class DeepSpeedEngine(Module): bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device) max_bhash = bhash.clone() min_bhash = bhash.clone() - dist.all_reduce(max_bhash, op=torch.distributed.ReduceOp.MAX) - dist.all_reduce(min_bhash, op=torch.distributed.ReduceOp.MIN) + dist.all_reduce(max_bhash, op=dist.ReduceOp.MAX) + dist.all_reduce(min_bhash, op=dist.ReduceOp.MIN) valid = all(min_bhash == bhash) and all(max_bhash == bhash) msg = ( f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " @@ -2737,7 +3066,7 @@ class DeepSpeedEngine(Module): logger.warning(msg) def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True): - r"""Save training checkpoint + """Save training checkpoint Arguments: save_dir: Required. Directory for saving the checkpoint @@ -2745,28 +3074,31 @@ class DeepSpeedEngine(Module): used if not provided. Tag name must be the same across all ranks. client_state: Optional. State dictionary used for saving required training states in the client code. save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint. - Important: all processes must call this method and not just the process with rank 0. It is because each process needs to save its master weights and scheduler+optimizer states. This method will hang waiting to synchronize with other processes if it's called just for the process with rank 0. + """ if self.zero_optimization_partition_weights(): # Prepare for checkpoint save by ensuring all parameters are partitioned self.optimizer.checkpoint_event_prologue() + rank = self.local_rank if self.use_node_local_storage() else self.global_rank + # This is to make sure the checkpoint names are created without collision # There seems to be issue creating them in parallel # Ensure save_dir directory exists os.makedirs(save_dir, exist_ok=True) - torch.distributed.barrier() + dist.barrier() if tag is None: tag = f"global_step{self.global_steps}" # Ensure tag is a string tag = str(tag) + self.checkpoint_engine.create(tag) # Ensure checkpoint tag is consistent across ranks self._checkpoint_tag_validation(tag) @@ -2776,7 +3108,11 @@ class DeepSpeedEngine(Module): self._create_checkpoint_file(save_dir, tag, False) self._save_moe_checkpoint(save_dir, tag, client_state=client_state) - if self.save_non_zero_checkpoint: + # We distribute the task of saving layer checkpoint files among + # data parallel instances, so all procs should call _save_checkpoint. + # All procs then call module_state_dict(), but only procs of data + # parallel rank 0 save the general model params. + if not self.has_moe_layers: self._create_checkpoint_file(save_dir, tag, False) self._save_checkpoint(save_dir, tag, client_state=client_state) @@ -2788,11 +3124,13 @@ class DeepSpeedEngine(Module): self.optimizer.checkpoint_event_epilogue() # Save latest checkpoint tag - torch.distributed.barrier() - if save_latest and self.global_rank == 0: + self.checkpoint_engine.commit(tag) + if save_latest and rank == 0: with open(os.path.join(save_dir, 'latest'), 'w') as fd: fd.write(tag) + dist.barrier() + return True def _get_non_moe_state_dict(self, full_state_dict): @@ -2814,7 +3152,7 @@ class DeepSpeedEngine(Module): # Using layer_#_export_# to save the model's expert state_dict moe_layer_id = 0 for n_module, module in self.module.named_modules(): - if isinstance(module, MoE): # and torch.distributed.get_rank() == 0: + if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: group_name = module.expert_group_name num_local_experts = module.num_local_experts expp_rank = groups._get_expert_parallel_rank(group_name) @@ -2846,8 +3184,9 @@ class DeepSpeedEngine(Module): num_local_experts + int(local_expert_id) expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}', f'{moe_str_prefix}{global_expert_id}') - experts_state_dict[str( - global_expert_id)][expert_key] = moe_state_dict.pop(key) + # truncating extra tensor (shared) storage + truncated = moe_state_dict.pop(key).clone().detach() + experts_state_dict[str(global_expert_id)][expert_key] = truncated # let save the moe parameters for global_expert_id, expert_state_dict in experts_state_dict.items(): @@ -2858,7 +3197,10 @@ class DeepSpeedEngine(Module): global_expert_id, tag, self.mpu) - torch.save(expert_state_dict, moe_save_path) + if self.random_ltd_enabled(): + expert_state_dict = remove_random_ltd_state_dict( + expert_state_dict) + self.checkpoint_engine.save(expert_state_dict, moe_save_path) moe_layer_id += 1 self._curr_ckpt_path = os.path.join(save_dir, tag) @@ -2879,9 +3221,9 @@ class DeepSpeedEngine(Module): self.optimizer.state_dict() if self.optimizer and not self.zero_optimization() else None } - with open(self._get_optimizer_ckpt_name(save_dir, tag, expp_rank), 'wb') as fd: - torch.save(optimizer_state, fd) - fd.flush() + # TODO: why use BufferedWriter not the path + file_path = self._get_optimizer_ckpt_name(save_dir, tag, expp_rank) + self.checkpoint_engine.save(optimizer_state, file_path) # get non-moe parameters model_state_dict = self._get_non_moe_state_dict(self.module_state_dict()) @@ -2894,6 +3236,13 @@ class DeepSpeedEngine(Module): 'lr_scheduler': self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, + 'data_sampler': + self.training_dataloader.data_sampler.state_dict() if + (self.training_dataloader is not None + and self.curriculum_learning_enabled()) else None, + 'random_ltd': + self.random_ltd_scheduler.state_dict() + if self.random_ltd_enabled() else None, 'sparse_tensor_module_names': self.sparse_tensor_module_names, 'skipped_steps': @@ -2911,9 +3260,7 @@ class DeepSpeedEngine(Module): } state.update(client_state) logger.info(f'Saving model checkpoint: {save_path}') - with open(save_path, 'wb') as fd: - torch.save(state, fd) - fd.flush() + self.checkpoint_engine.save(state, save_path) self._curr_save_path = None def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint): @@ -2942,12 +3289,18 @@ class DeepSpeedEngine(Module): def _save_checkpoint(self, save_dir, tag, client_state={}): save_path = self._get_ckpt_name(save_dir, tag) + + zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() + # A hack to save the checkpointing directory. Pipeline parallelism overrides # module_state_dict() and uses this path to save the model. module_state_dict() - # then instead just returns None. + # then instead just returns None. The module_state_dict() implementation in + # PipelineEngine expects the save path to be set in self._curr_ckpt_path. self._curr_ckpt_path = os.path.join(save_dir, tag) - zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() - state = dict(module=self.module_state_dict(), + module = self.module_state_dict() + self._curr_ckpt_path = None + + state = dict(module=module, buffer_names=self._get_buffer_names(), optimizer=self.optimizer.state_dict() if self.optimizer and not zero_optimizer_state else None, @@ -2955,6 +3308,11 @@ class DeepSpeedEngine(Module): if self.optimizer and zero_optimizer_state else None, lr_scheduler=self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, + data_sampler=self.training_dataloader.data_sampler.state_dict() if + (self.training_dataloader is not None + and self.curriculum_learning_enabled()) else None, + random_ltd=self.random_ltd_scheduler.state_dict() + if self.random_ltd_enabled() else None, sparse_tensor_module_names=self.sparse_tensor_module_names, skipped_steps=self.skipped_steps, global_steps=self.global_steps, @@ -2965,9 +3323,9 @@ class DeepSpeedEngine(Module): ds_version=version) state.update(client_state) - log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1]) - torch.save(state, save_path) - self._curr_save_path = None + if self.save_non_zero_checkpoint: + log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1]) + self.checkpoint_engine.save(state, save_path) def _get_buffer_names(self): buffer_names = [] @@ -2995,11 +3353,9 @@ class DeepSpeedEngine(Module): optimizer. the names are exactly as in state_dict. The order is absolutely important, since the saved data is just flattened data with no identifiers and requires reconstruction in the same order it was saved. - We can't rely on self.module.named_parameters() to get the saved tensors, as some params will be missing and others unsaved and then it'd be impossible to reconstruct state_dict from the flattened weights. - optimizer.bit16_groups seems to be the easiest to use as it's in all zeroX versions. """ param_group_shapes = [] @@ -3049,9 +3405,8 @@ class DeepSpeedEngine(Module): zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(), ds_config=self.config, ds_version=version) - with open(zero_checkpoint_name, 'wb') as fd: - torch.save(zero_sd, fd) - fd.flush() + self.checkpoint_engine.save(zero_sd, zero_checkpoint_name) + if self.global_rank == 0: self._copy_recovery_script(save_path) ckpt_type = 'zero' if self.zero_optimization() else 'bf16_zero' @@ -3059,26 +3414,20 @@ class DeepSpeedEngine(Module): def _zero3_consolidated_16bit_state_dict(self): """ - Get a full non-partitioned state_dict with fp16 weights on cpu. - Important: this function must be called on all ranks and not just rank 0. - This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but: - 1. consolidates the weights from different partitions on gpu0 2. works on one layer at a time to require as little gpu0 memory as possible, by moving the already consolidated weights to cpu 3. takes care to keep the shared params shared when gradually copying the params to cpu - Returns: a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks - """ if not self.zero_optimization_partition_weights(): raise ValueError("this function requires ZeRO-3 mode") - state_dict = OrderedDict() if torch.distributed.get_rank() == 0 else None + state_dict = OrderedDict() if dist.get_rank() == 0 else None shared_params = {} def get_layer_state_dict(module, prefix=""): @@ -3088,7 +3437,7 @@ class DeepSpeedEngine(Module): with deepspeed.zero.GatheredParameters(list( module.parameters(recurse=False)), modifier_rank=0): - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: # handle params for name, param in module.named_parameters(recurse=False): if param is None: @@ -3134,7 +3483,8 @@ class DeepSpeedEngine(Module): return self.save_16bit_model(save_dir, save_filename) def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin"): - r"""Save 16bit model weights + """ + Save 16bit model weights This method saves the 16bit model weights at the desired destination. @@ -3167,9 +3517,9 @@ class DeepSpeedEngine(Module): else: state_dict = self.module.state_dict() - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: os.makedirs(save_dir, exist_ok=True) logger.info(f"Saving model weights to {path}") - torch.save(state_dict, path) + self.checkpoint_engine.save(state_dict, path) return True diff --git a/deepspeed/runtime/fp16/__init__.py b/deepspeed/runtime/fp16/__init__.py index e69de29..fcb45ab 100644 --- a/deepspeed/runtime/fp16/__init__.py +++ b/deepspeed/runtime/fp16/__init__.py @@ -0,0 +1 @@ +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/deepspeed/runtime/fp16/fused_optimizer.py b/deepspeed/runtime/fp16/fused_optimizer.py old mode 100644 new mode 100755 index dc52552..4f4b5cf --- a/deepspeed/runtime/fp16/fused_optimizer.py +++ b/deepspeed/runtime/fp16/fused_optimizer.py @@ -8,14 +8,16 @@ This file is adapted from FP16_Optimizer in NVIDIA/apex import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed.runtime import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import groups, logger, log_dist +from deepspeed import comm as dist from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD -import torch.distributed as dist +from deepspeed.accelerator import get_accelerator -class FP16_Optimizer(object): +class FP16_Optimizer(DeepSpeedOptimizer): """ FP16 Optimizer for training fp16 models. Handles loss scaling. @@ -40,8 +42,8 @@ class FP16_Optimizer(object): self.deepspeed = deepspeed self.has_moe_layers = has_moe_layers self.using_pipeline = self.deepspeed.pipeline_parallelism - if not torch.cuda.is_available: - raise SystemError("Cannot use fp16 without CUDA.") + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # param flattened by groups @@ -93,6 +95,9 @@ class FP16_Optimizer(object): self.cur_scale = static_loss_scale self.verbose = verbose + self.custom_loss_scaler = False + self.external_loss_scale = None + self.clip_grad = clip_grad self.norm_type = 2 self.step_count = 0 @@ -126,14 +131,14 @@ class FP16_Optimizer(object): return - def zero_grad(self, set_grads_to_None=True): + def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: - if set_grads_to_None: + if set_to_none: p.grad = None else: if p.grad is not None: @@ -177,7 +182,7 @@ class FP16_Optimizer(object): apply_scale=False) # Stash unscaled gradient norm - self._global_grad_norm = scaled_global_grad_norm / self.cur_scale + self._global_grad_norm = scaled_grad_norm / self.cur_scale # norm is in fact norm*cur_scale self.optimizer.step(grads=[[g] for g in grads_groups_flat], @@ -206,6 +211,23 @@ class FP16_Optimizer(object): if self.timers is not None: self.timers.log(name_list) + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info( + f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}' + ) + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + def step(self, closure=None): """ Not supporting closure. @@ -317,7 +339,7 @@ class FP16_Optimizer(object): dtype=torch.float) dist.all_reduce(scaled_norm_tensor, group=pg) all_groups_norm = scaled_norm_tensor.item() - #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {torch.distributed.get_rank()}") + #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") return all_groups_norm def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True): @@ -343,9 +365,12 @@ class FP16_Optimizer(object): 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ - - scaled_loss = (loss.float()) * self.cur_scale - scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + scaled_loss = (loss.float()) * self.cur_scale + scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) def _update_scale(self, skip): if self.dynamic_loss_scale: @@ -433,7 +458,7 @@ class FP16_Optimizer(object): will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: - model = torch.nn.Linear(D_in, D_out).cuda().half() + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... @@ -472,6 +497,14 @@ class FP16_Optimizer(object): def __repr__(self): return repr(self.optimizer) - @property - def loss_scale(self): - return self.cur_scale + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) diff --git a/deepspeed/runtime/fp16/loss_scaler.py b/deepspeed/runtime/fp16/loss_scaler.py old mode 100644 new mode 100755 index 954d0ea..58ab2ae --- a/deepspeed/runtime/fp16/loss_scaler.py +++ b/deepspeed/runtime/fp16/loss_scaler.py @@ -37,6 +37,7 @@ class LossScalerBase: """ def __init__(self, cur_scale): self.cur_scale = cur_scale + self.dynamic = False @property def loss_scale(self): @@ -107,7 +108,8 @@ class DynamicLossScaler(LossScalerBase): scale_window=1000, min_scale=1, delayed_shift=1, - consecutive_hysteresis=False): + consecutive_hysteresis=False, + raise_error_at_min_scale=True): super(DynamicLossScaler, self).__init__(init_scale) self.cur_iter = 0 self.last_overflow_iter = -1 @@ -117,6 +119,8 @@ class DynamicLossScaler(LossScalerBase): self.delayed_shift = delayed_shift self.cur_hysteresis = delayed_shift self.consecutive_hysteresis = consecutive_hysteresis + self.raise_error_at_min_scale = raise_error_at_min_scale + self.dynamic = True # `params` is a list / generator of torch.Variable def has_overflow_serial(self, params): @@ -152,6 +156,10 @@ class DynamicLossScaler(LossScalerBase): if overflow: # self.cur_scale /= self.scale_factor if self.delayed_shift == 1 or self.cur_hysteresis == 1: + if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale: + raise Exception( + "Current loss scale already at minimum - cannot decrease scale anymore. Exiting run." + ) self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_scale) else: self.cur_hysteresis -= 1 @@ -166,6 +174,18 @@ class DynamicLossScaler(LossScalerBase): self.cur_iter += 1 +# Although loss scaling is only defined for fp16, yet for backwards compatibility +# we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling. +def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args): + if dtype == torch.half and dynamic_scaling: + if dynamic_loss_args is None: + return DynamicLossScaler() + return DynamicLossScaler(**dynamic_loss_args) + + loss_scale_value = static_loss_scale if dtype == torch.half else 1.0 + return LossScaler(scale=loss_scale_value) + + ############################################################## # Example usage below here -- assuming it's in a separate file ############################################################## diff --git a/deepspeed/runtime/fp16/onebit/__init__.py b/deepspeed/runtime/fp16/onebit/__init__.py index e69de29..289769b 100644 --- a/deepspeed/runtime/fp16/onebit/__init__.py +++ b/deepspeed/runtime/fp16/onebit/__init__.py @@ -0,0 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from .adam import OnebitAdam +from .lamb import OnebitLamb +from .zoadam import ZeroOneAdam diff --git a/deepspeed/runtime/fp16/onebit/adam.py b/deepspeed/runtime/fp16/onebit/adam.py index 5ce451d..5eb22fb 100644 --- a/deepspeed/runtime/fp16/onebit/adam.py +++ b/deepspeed/runtime/fp16/onebit/adam.py @@ -3,12 +3,9 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' import types import torch -import importlib import numpy as np -import time -import torch.distributed as dist - -from deepspeed.utils.logging import logger +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist class OnebitAdam(torch.optim.Optimizer): @@ -37,7 +34,7 @@ class OnebitAdam(torch.optim.Optimizer): cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') - .. _Adam\: A Method for Stochastic Optimization: + .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ @@ -178,14 +175,14 @@ class OnebitAdam(torch.optim.Optimizer): (self.size * self.divider))) state['server_chunk_size'] = state[ 'corrected_tensor_size'] // self.size - torch.cuda.empty_cache() + get_accelerator().empty_cache() state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) - torch.cuda.empty_cache() + get_accelerator().empty_cache() self.adam_freeze_key = True - if not self.initialize and torch.distributed.get_rank() == 0: + if not self.initialize and dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] @@ -249,9 +246,7 @@ class OnebitAdam(torch.optim.Optimizer): if not self.initialize: self.adam_freeze_key = False self.initialize = True - print( - f"Finished the initialization step at rank {torch.distributed.get_rank()}" - ) + print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.adam_freeze_key is False: @@ -282,7 +277,7 @@ class OnebitAdam(torch.optim.Optimizer): state_dict['param_groups'][i].pop('exp_avg_mask') super().load_state_dict(state_dict) if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.") if self.adam_freeze_key is True: self.adam_freeze_key = False @@ -291,7 +286,7 @@ class OnebitAdam(torch.optim.Optimizer): else: self.deepspeed.enable_backward_allreduce = True else: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print( "Checkpoint loaded and OnebitAdam compression stage starts/continues." ) diff --git a/deepspeed/runtime/fp16/onebit/lamb.py b/deepspeed/runtime/fp16/onebit/lamb.py index 01c6cd8..87c2469 100644 --- a/deepspeed/runtime/fp16/onebit/lamb.py +++ b/deepspeed/runtime/fp16/onebit/lamb.py @@ -4,8 +4,9 @@ Copyright 2021 The Microsoft DeepSpeed Team import types import torch import numpy as np -import torch.distributed as dist +from deepspeed import comm as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed.accelerator import get_accelerator class OnebitLamb(torch.optim.Optimizer): @@ -46,9 +47,9 @@ class OnebitLamb(torch.optim.Optimizer): coefficient during compression stage (default: 0.5) factor_threshold (float, optional): threshold of how much the scaling factor can fluctuate between steps (default: 0.1) - .. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes: + .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 - .. _Adam\: A Method for Stochastic Optimization: + .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ @@ -283,7 +284,7 @@ class OnebitLamb(torch.optim.Optimizer): p.data = q.data if self.initialize and len(self.worker_errors) == 0: - torch.cuda.empty_cache() + get_accelerator().empty_cache() for i in range(len(self.exp_avg_flat)): self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], @@ -291,21 +292,21 @@ class OnebitLamb(torch.optim.Optimizer): self.server_errors.append( torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) - torch.cuda.empty_cache() + get_accelerator().empty_cache() if self.lamb_freeze_key: if self.size > 1: for i in range(len(self.exp_avg_flat)): if not self.initialize: - torch.cuda.empty_cache() + get_accelerator().empty_cache() self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) self.server_errors.append( torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) - torch.cuda.empty_cache() - if torch.distributed.get_rank() == 0: + get_accelerator().empty_cache() + if dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") self.comm_backend_handle.compressed_allreduce( @@ -314,7 +315,7 @@ class OnebitLamb(torch.optim.Optimizer): self.server_errors[0], self.deepspeed.local_rank) - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print('Pop out errors', flush=True) del self.worker_errors[:] del self.server_errors[:] @@ -389,9 +390,7 @@ class OnebitLamb(torch.optim.Optimizer): if not self.initialize: self.lamb_freeze_key = False self.initialize = True - print( - f"Finished the initialization step at rank {torch.distributed.get_rank()}" - ) + print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.lamb_freeze_key is False: @@ -427,7 +426,7 @@ class OnebitLamb(torch.optim.Optimizer): del self.corrected_tensor_sizes[:] del self.server_chunk_sizes[:] if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.") if self.lamb_freeze_key is True: self.lamb_freeze_key = False @@ -442,7 +441,7 @@ class OnebitLamb(torch.optim.Optimizer): if 'scaling_coeff' in self.state[p]: self.state[p].pop('scaling_coeff') else: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print( "Checkpoint loaded and OnebitLamb compression stage starts/continues." ) diff --git a/deepspeed/runtime/fp16/onebit/zoadam.py b/deepspeed/runtime/fp16/onebit/zoadam.py index b0238b1..f86ae86 100644 --- a/deepspeed/runtime/fp16/onebit/zoadam.py +++ b/deepspeed/runtime/fp16/onebit/zoadam.py @@ -3,12 +3,9 @@ Copyright 2020 The Microsoft DeepSpeed Team ''' import types import torch -import importlib import numpy as np -import time -import torch.distributed as dist - -from deepspeed.utils.logging import logger +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist class ZeroOneAdam(torch.optim.Optimizer): @@ -47,7 +44,7 @@ class ZeroOneAdam(torch.optim.Optimizer): cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') - .. _Adam\: A Method for Stochastic Optimization: + .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ @@ -189,16 +186,16 @@ class ZeroOneAdam(torch.optim.Optimizer): (self.size * self.divider))) state['server_chunk_size'] = state[ 'corrected_tensor_size'] // self.size - torch.cuda.empty_cache() + get_accelerator().empty_cache() state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) # Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper state['momentum_accumulator'] = torch.zeros_like(p.data) - torch.cuda.empty_cache() + get_accelerator().empty_cache() # self.freeze_key = True - if not self.initialize and torch.distributed.get_rank() == 0: + if not self.initialize and dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] @@ -313,9 +310,7 @@ class ZeroOneAdam(torch.optim.Optimizer): if not self.initialize: self.initialize = True - print( - f"Finished the initialization step at rank {torch.distributed.get_rank()}" - ) + print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step: diff --git a/deepspeed/runtime/fp16/unfused_optimizer.py b/deepspeed/runtime/fp16/unfused_optimizer.py old mode 100644 new mode 100755 index 56b880f..e0249f1 --- a/deepspeed/runtime/fp16/unfused_optimizer.py +++ b/deepspeed/runtime/fp16/unfused_optimizer.py @@ -9,13 +9,16 @@ from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params import torch from torch._utils import _flatten_dense_tensors +from deepspeed.runtime import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import logger from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist -class FP16_UnfusedOptimizer(object): +class FP16_UnfusedOptimizer(DeepSpeedOptimizer): """ FP16 Optimizer without weight fusion to support LAMB optimizer @@ -35,11 +38,11 @@ class FP16_UnfusedOptimizer(object): self.fused_lamb_legacy = fused_lamb_legacy self._global_grad_norm = 0. - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ') - if not torch.cuda.is_available: - raise SystemError("Cannot use fp16 without CUDA.") + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # param groups @@ -84,6 +87,9 @@ class FP16_UnfusedOptimizer(object): self.cur_iter = 0 self.cur_scale = static_loss_scale + self.custom_loss_scaler = False + self.external_loss_scale = None + self.verbose = verbose self.clip_grad = clip_grad @@ -105,7 +111,7 @@ class FP16_UnfusedOptimizer(object): self.initialize_optimizer_states() - def zero_grad(self, set_grads_to_None=True): + def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ @@ -113,7 +119,7 @@ class FP16_UnfusedOptimizer(object): # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: - if set_grads_to_None: + if set_to_none: p.grad = None else: if p.grad is not None: @@ -182,6 +188,23 @@ class FP16_UnfusedOptimizer(object): return self.overflow + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info( + f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}' + ) + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + def step(self, closure=None): """ Not supporting closure. @@ -260,9 +283,12 @@ class FP16_UnfusedOptimizer(object): 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ - scaled_loss = (loss.float()) * self.cur_scale - - scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + scaled_loss = (loss.float()) * self.cur_scale + scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) def _update_scale(self, skip): if self.dynamic_loss_scale: @@ -313,6 +339,18 @@ class FP16_UnfusedOptimizer(object): param_groups = property(_get_param_groups, _set_param_groups) + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. @@ -350,7 +388,7 @@ class FP16_UnfusedOptimizer(object): will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: - model = torch.nn.Linear(D_in, D_out).cuda().half() + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... @@ -395,13 +433,13 @@ class FP16_UnfusedOptimizer(object): for param in group: param.grad = torch.zeros(param.size(), dtype=param.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) for i, group in enumerate(self.fp32_groups): for param in group: param.grad = torch.zeros(param.size(), dtype=param.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) self.optimizer.step() diff --git a/deepspeed/runtime/lr_schedules.py b/deepspeed/runtime/lr_schedules.py old mode 100644 new mode 100755 index b4dc749..faf5e6f --- a/deepspeed/runtime/lr_schedules.py +++ b/deepspeed/runtime/lr_schedules.py @@ -10,9 +10,7 @@ https://github.com/pytorch/pytorch/blob/v1.1.0/torch/optim/lr_scheduler.py import argparse from torch.optim import Optimizer -from typing import Union, List import math -from deepspeed.runtime.constants import * from deepspeed.utils import logger LR_SCHEDULE = 'lr_schedule' diff --git a/deepspeed/runtime/pipe/__init__.py b/deepspeed/runtime/pipe/__init__.py index 55e4c2c..dcffaa6 100644 --- a/deepspeed/runtime/pipe/__init__.py +++ b/deepspeed/runtime/pipe/__init__.py @@ -1,2 +1,4 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .module import PipelineModule, LayerSpec, TiedLayerSpec from .topology import ProcessTopology diff --git a/deepspeed/runtime/pipe/engine.py b/deepspeed/runtime/pipe/engine.py index 5807fb9..a080559 100644 --- a/deepspeed/runtime/pipe/engine.py +++ b/deepspeed/runtime/pipe/engine.py @@ -1,28 +1,19 @@ # Copyright 2019 The Microsoft DeepSpeed Team -import time -import logging -import copy -import os - from types import MethodType -from numpy import prod - import torch -import torch.nn as nn -import torch.optim as optim -import torch.distributed as dist +from deepspeed import comm as dist -from deepspeed.utils.logging import logger -from deepspeed.utils.timer import SynchronizedWallClockTimer, ThroughputTimer +from deepspeed.utils import logger +from deepspeed.utils.timer import ThroughputTimer +from deepspeed.accelerator import get_accelerator -from deepspeed.inference.engine import InferenceEngine from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE -from ..utils import PartitionedTensor, ensure_directory_exists +from ..utils import PartitionedTensor from ..dataloader import RepeatingLoader -from .module import PipelineModule, PipelineError, TiedLayerSpec +from .module import PipelineModule, PipelineError from . import p2p from . import schedule @@ -80,8 +71,10 @@ class PipelineEngine(DeepSpeedEngine): # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB self.pipeline_enable_backward_allreduce = True - assert not self.elasticity_enabled(), "Elasticity is not currently supported" \ - " with pipeline parallelism." + if self.elasticity_enabled(): + if not self.is_elastic_model_parallel_supported(): + assert not self.elasticity_enabled(), "Elasticity is not currently supported" \ + " with pipeline parallelism." # pipeline step for logging self.log_batch_step_id = -1 @@ -112,9 +105,7 @@ class PipelineEngine(DeepSpeedEngine): self._force_grad_boundary = False - self.batch_timer = ThroughputTimer(batch_size=self.micro_batch_size * - self.micro_batches, - num_workers=self.dp_world_size, + self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(), logging_fn=self.tput_log, monitor_memory=False, steps_per_output=self.steps_per_print()) @@ -190,6 +181,8 @@ class PipelineEngine(DeepSpeedEngine): self.module.activation_checkpoint_interval = self._config.pipeline[ 'activation_checkpoint_interval'] + self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline + if self.is_last_stage(): self.loss_model = self.module.loss_fn @@ -328,13 +321,13 @@ class PipelineEngine(DeepSpeedEngine): f'train_batch() requires gradients enabled. Use eval_batch() instead.') # Curriculum learning could change activation shape - if self.curriculum_enabled(): - new_difficulty = self.curriculum_scheduler.update_difficulty( \ + if self.curriculum_enabled_legacy(): + new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ self.global_steps + 1) - if self.global_steps == 0 or self.curriculum_scheduler.first_step: + if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: self.reset_activation_shape() - self.curriculum_scheduler.first_step = False - elif new_difficulty != self.curriculum_scheduler.get_difficulty( \ + self.curriculum_scheduler_legacy.first_step = False + elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ self.global_steps): self.reset_activation_shape() @@ -357,7 +350,7 @@ class PipelineEngine(DeepSpeedEngine): if self.global_steps % self.steps_per_print() == 0: if self.global_rank == 0: - elapsed = self.timers('train_batch').elapsed(reset=True) + elapsed = self.timers('train_batch').elapsed(reset=True) / 1000.0 iter_time = elapsed / self.steps_per_print() tput = self.train_batch_size() / iter_time print(f'steps: {self.global_steps} ' @@ -365,16 +358,12 @@ class PipelineEngine(DeepSpeedEngine): f'iter time (s): {iter_time:0.3f} ' f'samples/sec: {tput:0.3f}') - # Tensorboard - if self.tensorboard_enabled(): - if self.global_rank == 0: - self.summary_events = [(f'Train/Samples/train_loss', - self.agg_train_loss.mean().item(), - self.global_samples)] - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) - if self.global_steps % self.steps_per_print() == 0: - self.summary_writer.flush() + # Monitoring + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/train_loss', + self.agg_train_loss.mean().item(), + self.global_samples)] + self.monitor.write_events(self.summary_events) if self.wall_clock_breakdown( ) and self.global_steps % self.steps_per_print() == 0: @@ -423,13 +412,13 @@ class PipelineEngine(DeepSpeedEngine): self.module.eval() # Curriculum learning could change activation shape - if self.curriculum_enabled(): - new_difficulty = self.curriculum_scheduler.update_difficulty( \ + if self.curriculum_enabled_legacy(): + new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ self.global_steps + 1) - if self.global_steps == 0 or self.curriculum_scheduler.first_step: + if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: self.reset_activation_shape() - self.curriculum_scheduler.first_step = False - elif new_difficulty != self.curriculum_scheduler.get_difficulty( \ + self.curriculum_scheduler_legacy.first_step = False + elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ self.global_steps): self.reset_activation_shape() @@ -445,6 +434,10 @@ class PipelineEngine(DeepSpeedEngine): sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, stages=self.num_stages, stage_id=self.stage_id) + + # prevent dead-lock with multiple evals sequence + dist.barrier() + with torch.no_grad(): self._exec_schedule(sched) @@ -454,14 +447,11 @@ class PipelineEngine(DeepSpeedEngine): if compute_loss: eval_output = self._bcast_pipe_scalar(eval_output) - if self.tensorboard_enabled(): - if self.global_rank == 0: - self.summary_events = [(f'Train/Samples/eval_loss', - eval_output.mean().item(), - self.global_samples)] - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) - self.summary_writer.flush() + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/eval_loss', + eval_output.mean().item(), + self.global_samples)] + self.monitor.write_events(self.summary_events) # Restore the training iterator self.set_dataiterator(train_iterator) @@ -590,6 +580,11 @@ class PipelineEngine(DeepSpeedEngine): self.data_iterator = iterator def set_batch_fn(self, fn): + """Execute a post-processing function on input data. + + Args: + fn (function): The function to run. + """ self.batch_fn = fn def is_gradient_accumulation_boundary(self): @@ -690,9 +685,9 @@ class PipelineEngine(DeepSpeedEngine): # Optionally compute loss on the last device if self.is_last_stage(): - if self._compute_loss and self.loss_model is not None: + if self._compute_loss and self.module.loss_fn is not None: labels = self.pipe_buffers['labels'][buffer_id] - self.loss = self.loss_model(outputs, labels) + self.loss = self.module.loss_fn(outputs, labels) else: # Some models just return loss from forward() self.loss = outputs @@ -1167,17 +1162,15 @@ class PipelineEngine(DeepSpeedEngine): self.mem_status('AFTER STEP') - if self.tensorboard_enabled(): - if self.global_rank == 0: - self.summary_events = [(f'Train/Samples/lr', - self.get_lr()[0], - self.global_samples)] - if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'): - self.summary_events.append((f'Train/Samples/loss_scale', - self.optimizer.cur_scale, - self.global_samples)) - for event in self.summary_events: # write_summary_events - self.summary_writer.add_scalar(event[0], event[1], event[2]) + if self.global_rank == 0 and self.monitor.enabled: + self.summary_events = [(f'Train/Samples/lr', + self.get_lr()[0], + self.global_samples)] + if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'): + self.summary_events.append((f'Train/Samples/loss_scale', + self.optimizer.cur_scale, + self.global_samples)) + self.monitor.write_events(self.summary_events) if self.wall_clock_breakdown(): self.timers('step_microstep').stop() @@ -1278,14 +1271,14 @@ class PipelineEngine(DeepSpeedEngine): if print_rank != -1 and rank != print_rank: return - torch.cuda.synchronize() + get_accelerator().synchronize() if reset_max: - torch.cuda.reset_max_memory_cached() - torch.cuda.reset_max_memory_allocated() + get_accelerator().reset_max_memory_cached() + get_accelerator().reset_max_memory_allocated() - new_alloced = torch.cuda.memory_allocated() - new_cached = torch.cuda.memory_cached() + new_alloced = get_accelerator().memory_allocated() + new_cached = get_accelerator().memory_cached() delta_alloced = new_alloced - mem_alloced delta_cached = new_cached - mem_cached @@ -1293,8 +1286,8 @@ class PipelineEngine(DeepSpeedEngine): mem_cached = new_cached mem_alloced = new_alloced - max_alloced = torch.cuda.max_memory_allocated() - max_cached = torch.cuda.max_memory_cached() + max_alloced = get_accelerator().max_memory_allocated() + max_cached = get_accelerator().max_memory_cached() # convert to GB for printing new_alloced /= 1024**3 @@ -1325,10 +1318,11 @@ class PipelineEngine(DeepSpeedEngine): assert self._curr_ckpt_path is not None, \ "PipelineEngine expects module_state_dict() to be called from save_checkpoint()" - self.module.save_state_dict(self._curr_ckpt_path) + self.module.save_state_dict(self._curr_ckpt_path, + checkpoint_engine=self.checkpoint_engine) return None - def load_module_state_dict(self, state_dict, strict=True): + def load_module_state_dict(self, state_dict, strict=True, custom_load_fn=None): """Override hack to instead use a directory path. This is important because pipeline models checkpoint by layer instead of rank. @@ -1339,12 +1333,14 @@ class PipelineEngine(DeepSpeedEngine): state_dict (str, None): unused strict (bool, optional): Strict state loading. Defaults to True. """ - + assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism" if (state_dict is not None) and (not isinstance(state_dict, str)): super().load_module_state_dict(state_dict, strict) return - self.module.load_state_dir(load_dir=self._curr_ckpt_path, strict=strict) + self.module.load_state_dir(load_dir=self._curr_ckpt_path, + strict=strict, + checkpoint_engine=self.checkpoint_engine) # A map of PipeInstruction types to methods. Each method will be executed with the # kwargs provided to the PipeInstruction from the scheduler. @@ -1378,11 +1374,3 @@ class PipelineEngine(DeepSpeedEngine): # Equivalent to: self._exec_forward_pass(buffer_id=0) self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self) self._exec_instr(**cmd.kwargs) - - def set_batch_fn(self, fn): - """Execute a post-processing function on input data. - - Args: - fn (function): The function to run. - """ - self.batch_fn = fn diff --git a/deepspeed/runtime/pipe/module.py b/deepspeed/runtime/pipe/module.py index 4121a0f..acf066b 100644 --- a/deepspeed/runtime/pipe/module.py +++ b/deepspeed/runtime/pipe/module.py @@ -1,21 +1,22 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import os import glob -import enum import re as regex -from collections import defaultdict from functools import partial import torch import torch.nn as nn -import torch.distributed as dist +from deepspeed import comm as dist from deepspeed.utils import logger from .. import utils as ds_utils from ..activation_checkpointing import checkpointing from .topology import PipeDataParallelTopology, PipelineParallelGrid from deepspeed.runtime.state_dict_factory import SDLoaderFactory +from deepspeed.accelerator import get_accelerator class PipelineError(Exception): @@ -85,6 +86,40 @@ class TiedLayerSpec(LayerSpec): class PipelineModule(nn.Module): + """Modules to be parallelized with pipeline parallelism. + + The key constraint that enables pipeline parallelism is the + representation of the forward pass as a sequence of layers + and the enforcement of a simple interface between them. The + forward pass is implicitly defined by the module ``layers``. The key + assumption is that the output of each layer can be directly fed as + input to the next, like a ``torch.nn.Sequence``. The forward pass is + implicitly: + + .. code-block:: python + + def forward(self, inputs): + x = inputs + for layer in self.layers: + x = layer(x) + return x + + .. note:: + Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3. + + Args: + layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module. + num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided. + topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``. + loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)`` + seed_layers(bool, optional): Use a different seed for each layer. Defaults to False. + seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator. + base_seed (int, optional): The starting seed. Defaults to 1234. + partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'. + activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing. + activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``. + checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering. + """ def __init__(self, layers, num_stages=None, @@ -97,37 +132,6 @@ class PipelineModule(nn.Module): activation_checkpoint_interval=0, activation_checkpoint_func=checkpointing.checkpoint, checkpointable_layers=None): - """Modules to be parallelized with pipeline parallelism. - - The key constraint that enables pipeline parallelism is the - representation of the forward pass as a sequence of layers - and the enforcement of a simple interface between them. The - forward pass is implicitly defined by the module ``layers``. The key - assumption is that the output of each layer can be directly fed as - input to the next, like a ``torch.nn.Sequence``. The forward pass is - implicitly: - - .. code-block:: python - - def forward(self, inputs): - x = inputs - for layer in self.layers: - x = layer(x) - return x - - .. note:: - Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3. - - Args: - layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module. - num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided. - topology (``deepseed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``. - loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)`` - base_seed (int, optional): [description]. Defaults to 1234. - partition_method (str, optional): [description]. Defaults to 'parameters'. - activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing. - activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``. - """ super().__init__() @@ -194,12 +198,12 @@ class PipelineModule(nn.Module): self.tied_weight_attrs = {} # Offset the random seed by the stage ID. - #newseed = torch.cuda.initial_seed() + self._grid.get_stage_id() + #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() #ds_utils.set_random_seed(newseed) - #with torch.random.fork_rng(devices=[torch.cuda.current_device()]): + #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): self._build() - self.to(f'cuda:{self.local_rank}') + self.to(get_accelerator().device_name(self.local_rank)) self.tied_comms = self._index_tied_modules() self._synchronize_tied_weights() @@ -563,14 +567,29 @@ class PipelineModule(nn.Module): ckpt_files.sort() return ckpt_files - def save_state_dict(self, save_dir): - if self._grid.data_parallel_id != 0: - return + def save_state_dict(self, save_dir, checkpoint_engine): + # Processes having the same model parallel rank on different data parallel instances + # have identical layer weights. We can distribute the task of saving the layer weights + # among the data parallel ranks. For example, if a pipeline stage has 9 layers and + # if there are 2 data parallel instances, rank 0 will save the first 5 layers and + # rank 1 will save the last 4. + dp_rank = self._grid.data_parallel_id + dp_size = self._grid.data_parallel_size + num_layers = len(self.forward_funcs) + if self.checkpoint_parallel_write_pipeline: + # spread layers evenly across data parallel ranks + offsets = ds_utils.partition_uniform(num_layers, dp_size) + start, end = offsets[dp_rank], offsets[dp_rank + 1] + else: + # data parallel rank 0 writes all layers + if dp_rank != 0: + return + start, end = 0, num_layers + layer_list = self.forward_funcs[start:end] os.makedirs(save_dir, exist_ok=True) - layer_offset = self._local_start - for idx, layer in enumerate(self.forward_funcs): - model_ckpt_path = self.ckpt_layer_path(save_dir, idx) + for idx, layer in enumerate(layer_list): + model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx) if not hasattr(layer, 'state_dict'): continue # We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save() @@ -584,9 +603,9 @@ class PipelineModule(nn.Module): {k: v.clone() for k, v in orig_state_dict.items()}) - torch.save(final_state_dict, model_ckpt_path) + checkpoint_engine.save(final_state_dict, model_ckpt_path) - def load_state_dir(self, load_dir, strict=True): + def load_state_dir(self, load_dir, checkpoint_engine, strict=True): for idx, layer in enumerate(self.forward_funcs): # Functions, etc. will not have state_dicts if not hasattr(layer, 'load_state_dict'): @@ -597,7 +616,10 @@ class PipelineModule(nn.Module): mp_rank = self._grid.get_slice_parallel_rank() mp_world_size = self._grid.get_slice_parallel_world_size() - sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, version=2.0) + sd_loader = SDLoaderFactory.get_sd_loader( + model_ckpt_list, + version=2.0, + checkpoint_engine=checkpoint_engine) load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True) layer.load_state_dict(checkpoint) diff --git a/deepspeed/runtime/pipe/p2p.py b/deepspeed/runtime/pipe/p2p.py index 8a1b719..93c6111 100644 --- a/deepspeed/runtime/pipe/p2p.py +++ b/deepspeed/runtime/pipe/p2p.py @@ -6,11 +6,12 @@ import pickle import typing import torch -import torch.distributed as dist +from deepspeed import comm as dist # To query whether we have send/recv support from packaging.version import Version from deepspeed.git_version_info import torch_info +from deepspeed.accelerator import get_accelerator _groups = None _grid = None @@ -25,7 +26,7 @@ def can_send_recv() -> bool: #initializes adjacent process groups -#run this only after torch.distributed.init_process_group() has been called +#run this only after deepspeed.init_distributed() has been called def init_process_groups(grid): global _groups, _grid _grid = grid @@ -92,7 +93,7 @@ def wait(): op.wait() _async = [] - torch.cuda.synchronize() + get_accelerator().synchronize() def send_obj(msg: typing.Any, dest: int): @@ -110,10 +111,12 @@ def send_obj(msg: typing.Any, dest: int): # serialize the message msg = pickle.dumps(msg) # construct a tensor to send - msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).cuda() + msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to( + get_accelerator().device_name()) # Send meta and message - length_tensor = torch.tensor([len(msg)], dtype=torch.long).cuda() + length_tensor = torch.tensor([len(msg)], + dtype=torch.long).to(get_accelerator().device_name()) dist.send(length_tensor, dst=dest) dist.send(msg, dst=dest) @@ -128,11 +131,12 @@ def recv_obj(sender: int) -> typing.Any: sender (int): The rank sending the message. """ # Get message meta - length = torch.tensor([0], dtype=torch.long).cuda() + length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name()) dist.recv(length, src=sender) # Receive and deserialize - msg = torch.empty(length.item(), dtype=torch.uint8).cuda() + msg = torch.empty(length.item(), + dtype=torch.uint8).to(get_accelerator().device_name()) dist.recv(msg, src=sender) msg = pickle.loads(msg.cpu().numpy().tobytes()) @@ -140,7 +144,7 @@ def recv_obj(sender: int) -> typing.Any: def _to(x): """Recursively move to the current device.""" if torch.is_tensor(x): - return x.cuda() + return x.to(get_accelerator().device_name()) if isinstance(x, (tuple, list)): ret = [_to(x_) for x_ in x] if isinstance(x, tuple): diff --git a/deepspeed/runtime/pipe/schedule.py b/deepspeed/runtime/pipe/schedule.py index 181d43e..19e73da 100644 --- a/deepspeed/runtime/pipe/schedule.py +++ b/deepspeed/runtime/pipe/schedule.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from ..utils import call_to_str from abc import ABC, abstractmethod diff --git a/deepspeed/runtime/pipe/topology.py b/deepspeed/runtime/pipe/topology.py index 240c973..6c0cd96 100644 --- a/deepspeed/runtime/pipe/topology.py +++ b/deepspeed/runtime/pipe/topology.py @@ -1,9 +1,6 @@ # Copyright 2019 The Microsoft DeepSpeed Team -from deepspeed.utils import logger - -import torch.distributed as dist -import sys +from deepspeed import comm as dist from collections import namedtuple from itertools import product as cartesian_product @@ -58,7 +55,7 @@ class ProcessTopology: raise ValueError('get_rank() does not support slices. Use filter_match())') key = self.ProcessCoord(**coord_kwargs) - assert key in self.mapping, f'key {kwargs} invalid' + assert key in self.mapping, f'key {coord_kwargs} invalid' return self.mapping[key] def get_axis_names(self): diff --git a/deepspeed/runtime/progressive_layer_drop.py b/deepspeed/runtime/progressive_layer_drop.py old mode 100644 new mode 100755 index 41c08cf..65e1a56 --- a/deepspeed/runtime/progressive_layer_drop.py +++ b/deepspeed/runtime/progressive_layer_drop.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import numpy as np from deepspeed.utils import log_dist diff --git a/deepspeed/runtime/quantize.py b/deepspeed/runtime/quantize.py old mode 100644 new mode 100755 index 05fc502..81a7bd5 --- a/deepspeed/runtime/quantize.py +++ b/deepspeed/runtime/quantize.py @@ -1,20 +1,15 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch import math -from deepspeed.utils import log_dist from deepspeed.utils import logger from deepspeed.ops.quantizer import ds_quantizer -# number of 2-dimensional parameters in a layer -# this is set for transformer-based models TWO_D_PARAMS = 6 class Quantizer(object): def __init__(self, - q_target_bits=8, - q_start_bits=16, - q_period=100, - q_offset=100, q_groups=1, q_mixed_fp16=False, q_change_ratio=0.01, @@ -25,17 +20,11 @@ class Quantizer(object): use_quantizer_kernel=False, layer_num=0): - self.q_target_bits = q_target_bits - - self.q_start_bits = [q_start_bits] * (layer_num if layer_num != 0 else 1) - self.q_period = [q_period] * (layer_num if layer_num != 0 else 1) - self.q_offset = q_offset self.q_groups = q_groups self.q_mixed_fp16 = q_mixed_fp16 self.q_change_ratio = q_change_ratio self.q_type = q_type self.qsteps = 0 - self.q_init_period = q_period self.quantize_real_ratio = 1.000 self.q_verbose = q_verbose self.q_eigenvalue = q_eigenvalue @@ -44,6 +33,7 @@ class Quantizer(object): self.layer_num = layer_num def any_precision_switch(self): + # Temporary disabled functionality if self.layer_num == 0: return True result = False @@ -70,54 +60,69 @@ class Quantizer(object): for i in range(len(parameter_group)): for p in parameter_group[i]: - if len(p.size()) > 1: + if len(p.size()) > 1 and hasattr(p, "start_bits") and p.start_bits: param_id = id(p) - eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None, 0) + if block_eigenvalue is None: + eigenvalue, layer_id = None, 0 + else: + eigenvalue, layer_id = block_eigenvalue[param_id] if param_id in block_eigenvalue else (None, 0) if eigenvalue is not None: factor = 1 + math.floor(eigenvalue * 4) p.data = self.compute_quantization(p.data, layer_id, factor) else: - p.data = self.compute_quantization(p.data, layer_id) + p.data = self.compute_quantization(p, layer_id) def step(self): - self.qsteps += (TWO_D_PARAMS * (self.layer_num if self.layer_num != 0 else 1)) + self.qsteps += 1 + + def quantize_highbit(self, inputs, num_bits): + + q_range = 2**num_bits + input_flat = inputs.reshape(self.q_groups, -1) + g_min = input_flat.amin(dim=-1, keepdim=True) + g_max = input_flat.amax(dim=-1, keepdim=True) - def sr_quantize(self, input_flat, input_g, scale): # Random number generator (Uniform) - p = torch.cuda.FloatTensor(input_flat.size(), - device=input_flat.device).uniform_() - p = torch.split(p, p.size(0) // self.q_groups) - add_s = torch.zeros_like(input_flat) - add_s = torch.split(add_s, add_s.size(0) // self.q_groups) - - scale = [q_range / (2 * max(g.max(), g.min().abs())) for g in input_g] - # Quantize with INT rounding - input_flat = [(g * s).int().float() / s for (g, s) in zip(input_g, scale)] - # Compute the error - error = [((g - q).abs() / s) for (g, s, q) in zip(input_g, scale, input_flat)] - # Stochastic Rounding - add_s = [ - a_s.masked_fill_(pg < err_g, - 1 / s) for (a_s, - pg, - err_g, - s) in zip(add_s, - p, - error, - scale) - ] - add_s = [ - a_s * (g > 0).float() - a_s * (g < 0).float() for a_s, - g in zip(add_s, - input_flat) - ] - input_flat = [((q + a_s) * s).clamp(-(q_range >> 1), - (q_range >> 1) - 1) / s for q, - a_s, - s in zip(input_flat, - add_s, - scale)] - return input_flat + if self.q_rounding == 'nearest': + p = 0. + else: + p = input_flat.new(input_flat.shape).uniform_(-0.5, 0.5) + + if self.q_type == 'symmetric': + scale = 2 * torch.max(torch.abs(g_min), torch.abs(g_max)) / q_range + zero_point = 0. + input_flat = (input_flat / scale + p).round().clamp( + -(q_range >> 1), + (q_range >> 1) - 1) * scale + elif self.q_type == 'asymmetric': + scale = (g_max - g_min) / q_range + zero_point = (g_min / scale).round() * scale + input_flat = ((input_flat - zero_point) / scale + p).round().clamp( + 0, + (q_range - 1)) * scale + zero_point + output = input_flat.reshape(inputs.shape).contiguous() + return output + + def quantize_tenary(self, inputs): + input_flat = inputs.reshape(self.q_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1).div(n) + thres = (0.7 * m).view(-1, 1) #.expand_as(input_flat) + pos = (input_flat > thres).type(inputs.type()) + neg = (input_flat < -thres).type(inputs.type()) + mask = (input_flat.abs() > thres).type(inputs.type()) + alpha = ((mask * input_flat).abs().sum(dim=1) / mask.sum(dim=1)).view(-1, 1) + output = alpha * pos - alpha * neg + output = output.reshape(inputs.shape).contiguous() + return output + + def quantize_binary(self, inputs): + input_flat = inputs.reshape(self.q_groups, -1) + n = input_flat.shape[1] + m = input_flat.norm(p=1, dim=1, keepdim=True).div(n) + output = input_flat.sign().mul(m) + output = output.reshape(inputs.shape).contiguous() + return output def mixed_fp16_quantize(self, input, input_q, index): if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - 1): @@ -131,90 +136,49 @@ class Quantizer(object): # when reducing 1 bit at each period, we increase the period # to go slowly toward the target quantization bits # the period and starting bit can be configured - if self.q_offset > 0: - if self.qsteps >= self.q_offset: - self.q_offset = 0 - self.qsteps = 0 - else: - return input - if self.q_start_bits[index] != self.q_target_bits: - if self.qsteps >= self.q_period[index]: + if input.start_bits != input.target_bits: + if self.qsteps >= input.q_period: self.quantize_real_ratio = 1.0 - if self.q_eigenvalue: - self.q_period[index] <<= 1 - self.q_period[index] *= factor - self.q_start_bits[index] -= 1 - else: - for i in range(len(self.q_start_bits)): - self.q_start_bits[i] -= 1 - self.q_period[i] <<= 1 + input.q_period <<= 1 + input.q_period *= factor + input.start_bits -= 1 if self.q_verbose: logger.info( - f'Quantization settings: current bit-precision = {self.q_start_bits[index]}, step = {self.qsteps}, quantization period = {self.q_period[index]}, index = {index}' + f'Quantization settings: current bit-precision = {input.start_bits}, step = {self.qsteps}, quantization period = {input.q_period}, index = {index}' ) - assert (self.q_start_bits[index] >= self.q_target_bits), \ + assert (input.start_bits >= input.target_bits), \ 'Quantization bit is lower than target precision bits!' - # quantize the weights base on the selected bits and the value-range - if not self.use_quantizer_kernel: - q_range = 2**self.q_start_bits[index] - input_flat = input.view(-1) - input_g = torch.split(input_flat, input_flat.size(0) // self.q_groups) - if self.q_type == 0: #symmetric - if self.use_quantizer_kernel: - input_q = ds_quantizer(input.clone(), - self.q_groups, - self.q_start_bits[index]) - else: - scale = [q_range / (2 * max(g.max(), g.min().abs())) for g in input_g] - if self.q_rounding == 0: # Nearest value rounding - input_flat = [(g * s).round().clamp(-(q_range >> 1), - (q_range >> 1) - 1) / s for g, - s in zip(input_g, - scale)] - else: # Stochastic Rounding - if self.use_quantizer_kernel: - input_q = ds_quantizer(input.clone(), - self.q_groups, - self.q_start_bits[index], - sr=True) - else: - input_flat = self.sr_quantize(input_flat, input_g) - else: #asymmetric - if self.q_rounding == 0: - if self.use_quantizer_kernel: - input_q = ds_quantizer(input.clone(), - self.q_groups, - self.q_start_bits[index], - asym=True) - else: - scale = [(g.max() - g.min()) / q_range for g in input_g] - input_flat = [ - ((g - g.min()) / s).round().clamp(0, - (q_range - 1)) * s + g.min() - for g, - s in zip(input_g, - scale) - ] - else: - input_q = ds_quantizer(input.clone(), - self.q_groups, - self.q_start_bits[index], - asym=True) - - if self.use_quantizer_kernel or (self.q_type and self.q_rounding): - return self.mixed_fp16_quantize(input, input_q, index) + if self.use_quantizer_kernel: + if input.start_bits <= 2: + raise ValueError( + 'Quantization bit is too low, please do it without quantization kernel!' + ) + input_q = ds_quantizer( + input.data.clone(), + self.q_groups, + input.start_bits, + asym=False if self.q_type == 'symmetric' else True, + sr=False if self.q_rounding == 'nearest_neighbor' else True) else: - if self.q_mixed_fp16 and self.q_start_bits[index] >= (self.q_target_bits - - 1): - input_flat = [(self.quantize_real_ratio * g) + - ((1 - self.quantize_real_ratio) * g_q) for g, - g_q in zip(input_g, - input_flat)] - input_q = torch.cat(input_flat) - input_q = input_q.reshape(input.size()) - return input_q + if input.start_bits >= 3: + input_flat = self.quantize_highbit(input.data, input.start_bits) + elif input.start_bits == 2: + assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' + assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' + input_flat = self.quantize_tenary(input.data) + elif input.start_bits == 1: + assert self.q_type == 'symmetric', 'Quantization type is not symmetric!' + assert self.q_rounding == 'nearest', 'Quantization rounding is not nearest_neighbor!' + input_flat = self.quantize_binary(input.data) + if self.use_quantizer_kernel: + return self.mixed_fp16_quantize(input.data, input_q, index) + else: + if self.q_mixed_fp16 and input.start_bits >= input.target_bits - 1: + input_flat = self.quantize_real_ratio * input.data + \ + (1 - self.quantize_real_ratio) * input_flat + return input_flat def update_fp16_ratio(self): if self.q_mixed_fp16: diff --git a/deepspeed/runtime/state_dict_factory.py b/deepspeed/runtime/state_dict_factory.py old mode 100644 new mode 100755 index 09887aa..1f5c97f --- a/deepspeed/runtime/state_dict_factory.py +++ b/deepspeed/runtime/state_dict_factory.py @@ -8,7 +8,10 @@ import copy import collections import json from abc import ABC, abstractmethod + from deepspeed.utils import logger +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine + from .weight_quantizer import WeightQuantization AUTO_MODULE_KEY = 'auto' @@ -16,28 +19,41 @@ AUTO_MODULE_KEY = 'auto' class SDLoaderFactory: @staticmethod - def get_sd_loader_json(json_file): - with open(json_file) as f: - data = json.load(f) - sd_type = data['type'] - ckpt_list = data['checkpoints'] - version = data['version'] - return SDLoaderFactory.get_sd_loader(ckpt_list, sd_type, version) + def get_sd_loader_json(json_file, checkpoint_engine): + if isinstance(json_file, str): + with open(json_file) as f: + data = json.load(f) + else: + assert isinstance(json_file, dict) + data = json_file + sd_type = data['type'] + ckpt_list = data['checkpoints'] + version = data['version'] + ckpt_type = data.get('parallelization', 'pp') + mp_size = data.get('mp_size', 0) + if sd_type.lower() in ['bloom', 'ds_model']: + return data + return SDLoaderFactory.get_sd_loader(ckpt_list, + checkpoint_engine, + sd_type, + version) @staticmethod - def get_sd_loader(ckpt_list, sd_type='Megatron', version=None): + def get_sd_loader(ckpt_list, checkpoint_engine, sd_type='Megatron', version=None): if sd_type == 'Megatron': - return MegatronSDLoader(ckpt_list, version) + return MegatronSDLoader(ckpt_list, version, checkpoint_engine) else: assert False, '{} checkpoint type is not supported'.format(sd_type) class SDLoaderBase(ABC): - def __init__(self, ckpt_list, version): + def __init__(self, ckpt_list, version, checkpoint_engine): self.module_key = None self.ckpt_list = ckpt_list - self.check_ckpt_list() self.version = version + self.checkpoint_engine = TorchCheckpointEngine( + ) if checkpoint_engine is None else checkpoint_engine + self.check_ckpt_list() def load(self, mp_world_size, @@ -79,7 +95,8 @@ class SDLoaderBase(ABC): if num_ckpt == mp_world_size: assert os.path.exists(load_path) #logger.info(f'rank: {mp_rank} loading checkpoint: {load_path}') - sd = torch.load(load_path, map_location=lambda storage, loc: storage) + sd = self.checkpoint_engine.load(load_path, map_location=lambda storage, \ + loc: storage) if quantize: quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, @@ -108,9 +125,9 @@ class SDLoaderBase(ABC): logger.info(f"mp_rank: {mp_rank}, ckpt_list: {ckpt_list}") sd_list = [ - torch.load(ckpt, - map_location=lambda storage, - loc: storage) for ckpt in ckpt_list + self.checkpoint_engine.load(ckpt, + map_location=lambda storage, + loc: storage) for ckpt in ckpt_list ] return sd_list @@ -126,9 +143,9 @@ class SDLoaderBase(ABC): f"mp_rank: {mp_rank}, ckpt_list: {self.ckpt_list[ckpt_index]}, offset: {ckpt_offset}" ) - sd = torch.load(self.ckpt_list[ckpt_index], - map_location=lambda storage, - loc: storage) + sd = self.checkpoint_engine.load(self.ckpt_list[ckpt_index], + map_location=lambda storage, + loc: storage) return sd, num_to_split, ckpt_offset @@ -161,7 +178,9 @@ class SDLoaderBase(ABC): #logger.info(f'checkpoint file list: {self.ckpt_list}') assert len(self.ckpt_list) > 0 - sd = torch.load(self.ckpt_list[0], map_location=lambda storage, loc: storage) + sd = self.checkpoint_engine.load(self.ckpt_list[0], + map_location=lambda storage, + loc: storage) # check checkpoint count is same with saved mp_world_size if 'mp_world_size' in sd.keys(): @@ -193,8 +212,8 @@ class SDLoaderBase(ABC): class MegatronSDLoader(SDLoaderBase): - def __init__(self, ckpt_list, version): - super().__init__(ckpt_list, version) + def __init__(self, ckpt_list, version, checkpoint_engine): + super().__init__(ckpt_list, version, checkpoint_engine) """ ## Q/K/V data need special processing key: transformer.layers.0.attention.query_key_value.weight, shape: torch.Size([3192, 4256]) @@ -431,7 +450,9 @@ class MegatronSDLoader(SDLoaderBase): "mlp.dense_h_to_4h.bias" ] - sd = torch.load(ckpt_file_name, map_location=lambda storage, loc: storage) + sd = self.checkpoint_engine.load(ckpt_file_name, + map_location=lambda storage, + loc: storage) # partial_key is a sub-string of one key in the sd def check_key_exist(partial_key, sd): diff --git a/deepspeed/runtime/swap_tensor/async_swapper.py b/deepspeed/runtime/swap_tensor/async_swapper.py index e6e19a4..45614ab 100644 --- a/deepspeed/runtime/swap_tensor/async_swapper.py +++ b/deepspeed/runtime/swap_tensor/async_swapper.py @@ -6,6 +6,7 @@ Functionality of swapping tensors to/from (NVMe) storage devices. """ import torch +from deepspeed import comm as dist from deepspeed.utils.logging import logger from deepspeed.runtime.swap_tensor.utils import swap_out_tensors, SwapBuffer @@ -66,10 +67,10 @@ class AsyncTensorSwapper(object): self._swap_out_tensor(tensor, swap_path) def _report_statistics(self, message): - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: element_size = torch.tensor([], dtype=self.dtype).element_size() swapped_GB = (self.num_elements_swapped * element_size) / (1024**3) - logger.info( + logger.debug( f'{message} num_elems = {self.num_elements_swapped}, {swapped_GB:5.2f} GB' ) diff --git a/deepspeed/runtime/swap_tensor/optimizer_utils.py b/deepspeed/runtime/swap_tensor/optimizer_utils.py index a08af96..70b806c 100644 --- a/deepspeed/runtime/swap_tensor/optimizer_utils.py +++ b/deepspeed/runtime/swap_tensor/optimizer_utils.py @@ -8,11 +8,11 @@ Functionality of swapping tensors to/from (NVMe) storage devices. import os import torch +from deepspeed import comm as dist from deepspeed.utils.logging import logger -from deepspeed.runtime.zero.offload_constants import * from deepspeed.runtime.swap_tensor.constants import * from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \ - MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers, get_sized_buffer + MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool @@ -133,7 +133,7 @@ class OptimizerSwapper(object): self.swap_element_size = torch.tensor([], dtype=dtype).element_size() self.swap_folder = os.path.join(base_folder, 'optimizer', - f'rank{torch.distributed.get_rank()}') + f'rank{dist.get_rank()}') os.makedirs(self.swap_folder, exist_ok=True) self.optimizer = optimizer @@ -146,10 +146,9 @@ class OptimizerSwapper(object): # Swap buffer management self.largest_numel = self._io_aligned_numel(largest_numel) self.dtype = dtype - self.swap_buffer_manager = SwapBufferManager( - num_elems=self.largest_numel, - count=swap_config[OFFLOAD_OPTIMIZER_BUFFER_COUNT], - dtype=dtype) + self.swap_buffer_manager = SwapBufferManager(num_elems=self.largest_numel, + count=swap_config.buffer_count, + dtype=dtype) # Timers self.timers = timers @@ -271,7 +270,7 @@ class OptimizerSwapper(object): fp16_partitions_info=fp16_partitions_info[curr_index:], fp16_swap_buffers=fp16_swap_buffers) - if torch.distributed.get_rank() == 0 and SWAPPER_DEBUG_MODE: + if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: for i, tensor in enumerate(fp16_pinned_tensors): true_index = curr_index + i logger.info( @@ -376,7 +375,7 @@ class OptimizerSwapper(object): dest_paths=swap_paths, pinned_buffers=pinned_buffers) - if torch.distributed.get_rank() == 0 and SWAPPER_DEBUG_MODE: + if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: for i, tensor in enumerate(src_tensors): logger.info( f'copy_in_fp16_param: fp32_id = {id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}' diff --git a/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py b/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py index cbcb136..5158535 100644 --- a/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py +++ b/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py @@ -5,15 +5,15 @@ Licensed under the MIT license. Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ -import os import torch from deepspeed.utils.logging import logger -from deepspeed.ops.aio import AsyncIOBuilder +from deepspeed.ops.op_builder import AsyncIOBuilder +from deepspeed import comm as dist from deepspeed.runtime.swap_tensor.constants import * from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \ - MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers, get_sized_buffer + get_sized_buffers from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper @@ -62,7 +62,7 @@ class PartitionedOptimizerSwapper(OptimizerSwapper): 'print_exclude_list' ] - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print_object(obj=self, name='PartitionedOptimizerSwapper', exclude_list=self.print_exclude_list) @@ -160,7 +160,7 @@ class PartitionedOptimizerSwapper(OptimizerSwapper): self._log_timers([WRITE_TIMER]) - if DEBUG_MODE and torch.distributed.get_rank() == 0: + if DEBUG_MODE and dist.get_rank() == 0: logger.info(f'optimizer_param_swap_out: {(swap_bytes/(1024**3)):5.2f} GB') def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors): @@ -200,7 +200,7 @@ class PartitionedOptimizerSwapper(OptimizerSwapper): t.data = buffer.data self._log_timers([READ_TIMER, WAIT_TIMER]) - if DEBUG_MODE and torch.distributed.get_rank() == 0: + if DEBUG_MODE and dist.get_rank() == 0: logger.info(f'optimizer_param_swap_in: {(swap_bytes/(1024**3)):5.2f} GB') def _separate_pinned_tensors(self, swap_info): diff --git a/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py b/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py index 5128bbf..e9e134e 100644 --- a/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py +++ b/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py @@ -9,17 +9,15 @@ import os import shutil from enum import Enum import torch -import torch.distributed as dist - -from deepspeed.utils.logging import logger -from deepspeed.ops.aio import AsyncIOBuilder +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import AsyncIOBuilder from .constants import * from .utils import swap_in_tensors, swap_out_tensors, MIN_AIO_BYTES, AIO_ALIGNED_BYTES, print_object, SwapBufferPool -from ..zero.offload_constants import * def print_rank_0(message, debug=False, force=False): - if torch.distributed.get_rank() == 0 and (debug or force): + if dist.get_rank() == 0 and (debug or force): print(message) @@ -86,7 +84,7 @@ class AsyncPartitionedParameterSwapper(object): def _configure_aio(self, ds_config): self.swap_config = ds_config.zero_config.offload_param torch_dtype_string = str(self.dtype).split(".")[1] - self.swap_folder = os.path.join(self.swap_config[OFFLOAD_PARAM_NVME_PATH], + self.swap_folder = os.path.join(self.swap_config.nvme_path, 'zero_stage_3', f'{torch_dtype_string}params', f'rank{dist.get_rank()}') @@ -102,18 +100,17 @@ class AsyncPartitionedParameterSwapper(object): self.aligned_bytes = AIO_ALIGNED_BYTES * self.aio_config[AIO_THREAD_COUNT] self.numel_alignment = self.aligned_bytes // self.swap_element_size - self.elements_per_buffer = self.swap_config[OFFLOAD_PARAM_BUFFER_SIZE] + self.elements_per_buffer = self.swap_config.buffer_size self.aligned_elements_per_buffer = self._io_aligned_numel( self.elements_per_buffer) - self.param_buffer_count = self.swap_config[OFFLOAD_PARAM_BUFFER_COUNT] + self.param_buffer_count = self.swap_config.buffer_count self.available_buffer_ids = [i for i in range(self.param_buffer_count)] self.reserved_buffer_ids = [] - self.buffers = torch.empty(int(self.aligned_elements_per_buffer * - self.param_buffer_count), - dtype=self.dtype, - pin_memory=True, - requires_grad=False) + self.buffers = get_accelerator().pin_memory( + torch.empty(int(self.aligned_elements_per_buffer * self.param_buffer_count), + dtype=self.dtype, + requires_grad=False)) self.aio_read_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH], @@ -305,7 +302,7 @@ class AsyncPartitionedParameterSwapper(object): f'Num inflight: params {len(self.inflight_params)}, buffers {len(self.inflight_swap_in_buffers)}, numel = {self.inflight_numel}', force=True) print_rank_0( - f'Num available: param {len(self.available_params)}, numel = {self.available_numel}', + f'Num available params: count = {len(self.available_params)}, ids = {self.available_params}, numel = {self.available_numel}', force=True) assert len(swap_in_paths) <= len(self.available_buffer_ids), f"Not enough buffers {len(self.available_buffer_ids)} for swapping {len(swap_in_paths)}" @@ -395,9 +392,10 @@ class AsyncPartitionedParameterSwapper(object): def reserve_partitioned_swap_space(self, partition_num_elems): aligned_numel = sum( [self._io_aligned_numel(numel) for numel in partition_num_elems]) - self.partitioned_swap_buffer = torch.zeros(aligned_numel, - device='cpu', - dtype=self.dtype).pin_memory() + self.partitioned_swap_buffer = get_accelerator().pin_memory( + torch.zeros(aligned_numel, + device='cpu', + dtype=self.dtype)) self.partitioned_swap_pool = SwapBufferPool([self.partitioned_swap_buffer]) def swap_out_partitioned_params(self, dst_fp16_params, src_fp32_params): diff --git a/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py b/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py index 7d0116f..4e10152 100644 --- a/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py +++ b/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py @@ -5,18 +5,13 @@ Licensed under the MIT license. Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ -import os -import torch +from deepspeed.ops.op_builder import AsyncIOBuilder +from deepspeed import comm as dist -from deepspeed.utils.logging import logger -from deepspeed.ops.aio import AsyncIOBuilder - -from deepspeed.runtime.zero.offload_constants import * from deepspeed.runtime.swap_tensor.constants import * -from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \ - MIN_AIO_BYTES, AIO_ALIGNED_BYTES +from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper -from deepspeed.runtime.swap_tensor.optimizer_utils import SwapBufferManager, get_sized_buffer +from deepspeed.runtime.swap_tensor.utils import get_sized_buffer from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper @@ -95,8 +90,8 @@ class PipelinedOptimizerSwapper(OptimizerSwapper): numel_alignment=self.numel_alignment, timers=self.timers) - self.async_swap_in = swap_config[OFFLOAD_OPTIMIZER_PIPELINE_READ] - self.async_swap_out = swap_config[OFFLOAD_OPTIMIZER_PIPELINE_WRITE] + self.async_swap_in = swap_config.pipeline_read + self.async_swap_out = swap_config.pipeline_write self.swap_ops = { SYNC_SWAP_IN: None, @@ -113,7 +108,7 @@ class PipelinedOptimizerSwapper(OptimizerSwapper): 'print_exclude_list' ] - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print_object(obj=self, name='PipelinedOptimizerSwapper', exclude_list=self.print_exclude_list) @@ -254,7 +249,7 @@ class PipelinedOptimizerSwapper(OptimizerSwapper): count=required_buffer_count, dtype=parameter.dtype) assert allocated_buffers is not None, \ - f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing {OFFLOAD_OPTIMIZER_BUFFER_COUNT}" + f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing 'buffer_count'" state_buffers = allocated_buffers[:len(param_info.tensors)] param_info.set_swap_buffers(state_buffers) diff --git a/deepspeed/runtime/swap_tensor/utils.py b/deepspeed/runtime/swap_tensor/utils.py index 2a751e3..1d236bd 100644 --- a/deepspeed/runtime/swap_tensor/utils.py +++ b/deepspeed/runtime/swap_tensor/utils.py @@ -5,12 +5,11 @@ Licensed under the MIT license. Functionality of swapping tensors to/from (NVMe) storage devices. """ -import os import torch from deepspeed.utils.logging import logger +from deepspeed.accelerator import get_accelerator -from deepspeed.runtime.swap_tensor.constants import AIO_BLOCK_SIZE, AIO_QUEUE_DEPTH, \ - AIO_THREAD_COUNT, AIO_SINGLE_SUBMIT, AIO_OVERLAP_EVENTS +from deepspeed import comm as dist MIN_AIO_BYTES = 1024**2 AIO_ALIGNED_BYTES = 1024 @@ -181,16 +180,17 @@ class SwapBufferManager(object): self.count = count self.dtype = dtype self.all_buffers = [ - torch.zeros(num_elems, - device='cpu', - dtype=dtype).pin_memory() for _ in range(count) + get_accelerator().pin_memory( + torch.zeros(num_elems, + device='cpu', + dtype=dtype)) for _ in range(count) ] self.free_buffer_index = [i for i in range(count)] self.used_buffer_index = {} self.gigabytes = (self.all_buffers[0].element_size() * num_elems * count) / (1024 **3) - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: exclude_list = ['all_buffers'] print_object(obj=self, name='SwapBufferManager', exclude_list=exclude_list) diff --git a/deepspeed/runtime/utils.py b/deepspeed/runtime/utils.py old mode 100644 new mode 100755 index ff6daba..30dad84 --- a/deepspeed/runtime/utils.py +++ b/deepspeed/runtime/utils.py @@ -7,31 +7,29 @@ Helper functions and classes from multiple sources. ''' from collections.abc import Iterable -from deepspeed.moe.utils import is_moe_param, split_params_into_shared_and_expert_params +from deepspeed.moe.utils import is_moe_param import os import psutil import gc -from math import ceil, sqrt +from math import sqrt from math import floor -from bisect import bisect_left, bisect_right +from bisect import bisect_left import torch -from torch._six import inf -import torch.distributed as dist +from deepspeed import comm as dist + +try: + from torch._six import inf as inf +except ModuleNotFoundError: + from torch import inf as inf from deepspeed.utils import groups, logger from deepspeed.runtime.constants import PIPE_REPLICATED from numpy import prod +from deepspeed.accelerator import get_accelerator -# pt-1.9 deprecations -if hasattr(torch.cuda, "memory_reserved"): - torch_memory_reserved = torch.cuda.memory_reserved -else: - torch_memory_reserved = torch.cuda.memory_allocated -if hasattr(torch.cuda, "max_memory_reserved"): - torch_max_memory_reserved = torch.cuda.max_memory_reserved -else: - torch_max_memory_reserved = torch.cuda.memory_cached +torch_memory_reserved = get_accelerator().memory_reserved +torch_max_memory_reserved = get_accelerator().max_memory_reserved class DummyOptim(): @@ -191,7 +189,7 @@ class CheckOverflow(object): def check_using_norm(self, norm_group, reduce_overflow=True): # TODO: I don't think reduce_overflow is needed if mpu is None overflow = -1 in norm_group - overflow_gpu = torch.cuda.FloatTensor([overflow]) + overflow_gpu = get_accelerator().FloatTensor([overflow]) if self.has_moe_params: # In this case, we need to do an all_reduce across # the expert_parallel_group, so that if there was @@ -202,11 +200,11 @@ class CheckOverflow(object): op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) if self.mpu is not None: - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=self.mpu.get_model_parallel_group()) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=self.mpu.get_model_parallel_group()) elif reduce_overflow: - dist.all_reduce(overflow_gpu, op=torch.distributed.ReduceOp.MAX) + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX) dist.barrier() overflow = overflow_gpu[0].item() return bool(overflow) @@ -242,9 +240,9 @@ class CheckOverflow(object): overflow = self.has_overflow_serial(params) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs - overflow_gpu = torch.cuda.ByteTensor([overflow]) - # torch.distributed.all_reduce(overflow_gpu, - # op=torch.distributed.ReduceOp.MAX, + overflow_gpu = get_accelerator().ByteTensor([overflow]) + # deepspeeed.comm.all_reduce(overflow_gpu, + # op=deepspeed.comm.ReduceOp.MAX, # group=mpu.get_model_parallel_group()) if has_moe_params: # All reduce this across expert_parallel_group, so that if an expert @@ -253,9 +251,9 @@ class CheckOverflow(object): op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) if self.zero_reduce_scatter: - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=torch.distributed.group.WORLD) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=dist.get_world_group()) elif self.mpu is not None: if self.deepspeed is not None: using_pipeline = hasattr(self.deepspeed, @@ -264,17 +262,16 @@ class CheckOverflow(object): and self.deepspeed.pipeline_enable_backward_allreduce is False ) or (not using_pipeline and self.deepspeed.enable_backward_allreduce is False): - torch.distributed.all_reduce( - overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=self.mpu.get_data_parallel_group()) - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=self.mpu.get_model_parallel_group()) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=self.mpu.get_data_parallel_group()) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=self.mpu.get_model_parallel_group()) elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False: - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=torch.distributed.group.WORLD) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=dist.get_world_group()) overflow = overflow_gpu[0].item() return bool(overflow) @@ -304,7 +301,7 @@ class CheckOverflow(object): def _handle_overflow(cpu_sum, x, i): import math - rank = torch.distributed.get_rank() + rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): @@ -353,12 +350,12 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.grad.data.abs().max() for p in parameters) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0 @@ -373,18 +370,18 @@ def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): total_norm += param_norm.item()**norm_type # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) # Need to average total_norm across different GPUs due to the presence of moe params pg = groups._get_data_parallel_group() scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg)) - scaled_norm_tensor = torch.cuda.FloatTensor([float(scaled_norm)]) + scaled_norm_tensor = get_accelerator().FloatTensor([float(scaled_norm)]) dist.all_reduce(scaled_norm_tensor, group=pg) total_norm = scaled_norm_tensor.item() @@ -419,12 +416,12 @@ def get_grad_norm(parameters, norm_type=2, mpu=None): norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.grad.data.abs().max() for p in parameters) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0. @@ -443,11 +440,11 @@ def get_grad_norm(parameters, norm_type=2, mpu=None): total_norm += param_norm.item()**norm_type # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float( @@ -489,11 +486,11 @@ def get_grad_zeros(parameters, mpu=None): total_zeros += count_zeros.item() # Sum across all model parallel GPUs. - total_zeros_cuda = torch.cuda.FloatTensor([float(total_zeros)]) + total_zeros_cuda = get_accelerator().FloatTensor([float(total_zeros)]) if mpu is not None: - torch.distributed.all_reduce(total_zeros_cuda, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_zeros_cuda, + op=dist.ReduceOp.SUM, + group=mpu.get_model_parallel_group()) total_zeros = total_zeros_cuda[0].item() return total_zeros @@ -522,12 +519,12 @@ def get_weight_norm(parameters, norm_type=2, mpu=None): norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.data.abs().max() for p in parameters) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0. @@ -546,11 +543,11 @@ def get_weight_norm(parameters, norm_type=2, mpu=None): total_norm += param_norm**norm_type # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float( @@ -670,7 +667,7 @@ class PartitionedTensor: self.local_data, self.partition = self._partition_tensor(tensor) @classmethod - def from_meta(cls, meta, local_part, group, device='cuda'): + def from_meta(cls, meta, local_part, group, device=get_accelerator().device_name()): assert meta.dtype == torch.long dummy = torch.ones(dist.get_world_size(group=group)) part_obj = cls(tensor=dummy, group=group) @@ -774,14 +771,14 @@ def memory_status(msg, print_rank=-1, reset_max=False): if print_rank != -1 and rank != print_rank: return - torch.cuda.synchronize() + get_accelerator().synchronize() if reset_max: - torch.cuda.reset_max_memory_cached() - torch.cuda.reset_max_memory_allocated() + get_accelerator().reset_max_memory_cached() + get_accelerator().reset_max_memory_allocated() - new_alloced = torch.cuda.memory_allocated() - new_cached = torch.cuda.memory_cached() + new_alloced = get_accelerator().memory_allocated() + new_cached = get_accelerator().memory_cached() delta_alloced = new_alloced - mem_alloced delta_cached = new_cached - mem_cached @@ -789,8 +786,8 @@ def memory_status(msg, print_rank=-1, reset_max=False): mem_cached = new_cached mem_alloced = new_alloced - max_alloced = torch.cuda.max_memory_allocated() - max_cached = torch.cuda.max_memory_cached() + max_alloced = get_accelerator().max_memory_allocated() + max_cached = get_accelerator().max_memory_cached() # convert to GB for printing new_alloced /= 1024**3 @@ -803,22 +800,26 @@ def memory_status(msg, print_rank=-1, reset_max=False): print( f'RANK={rank} MEMSTATS', msg, - f'device={torch.cuda.current_device()} ' + f'device={get_accelerator().current_device_name()} ' f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) ' f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)' ) def get_ma_status(): - if torch.distributed.is_initialized() and not torch.distributed.get_rank() == 0: + if dist.is_initialized() and not dist.get_rank() == 0: return 0 - return torch.cuda.memory_allocated() + return get_accelerator().memory_allocated() + + +def empty_cache(): + get_accelerator().empty_cache() def see_memory_usage(message, force=False): if not force: return - if torch.distributed.is_initialized() and not torch.distributed.get_rank() == 0: + if dist.is_initialized() and not dist.get_rank() == 0: return # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports @@ -827,8 +828,8 @@ def see_memory_usage(message, force=False): # Print message except when distributed but not rank 0 logger.info(message) logger.info( - f"MA {round(torch.cuda.memory_allocated() / (1024 * 1024 * 1024),2 )} GB \ - Max_MA {round(torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \ + f"MA {round(get_accelerator().memory_allocated() / (1024 * 1024 * 1024),2 )} GB \ + Max_MA {round(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \ CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \ Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ") @@ -838,8 +839,7 @@ def see_memory_usage(message, force=False): f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%') # get the peak memory to report correct data, so reset the counter for the next call - if hasattr(torch.cuda, "reset_peak_memory_stats"): # pytorch 1.4+ - torch.cuda.reset_peak_memory_stats() + get_accelerator().reset_peak_memory_stats() def call_to_str(base, *args, **kwargs): @@ -913,20 +913,20 @@ def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None): norm_type = float(norm_type) if norm_type == inf: total_norm = max(t.data.abs().max() for t in input_tensors) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = sum( [t.data.float().norm(norm_type).item()**norm_type for t in input_tensors]) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=mpu.get_model_parallel_group()) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float( diff --git a/deepspeed/runtime/weight_quantizer.py b/deepspeed/runtime/weight_quantizer.py index a6c241d..f88a931 100644 --- a/deepspeed/runtime/weight_quantizer.py +++ b/deepspeed/runtime/weight_quantizer.py @@ -1,5 +1,8 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch from ..module_inject.replace_policy import HFBertLayerPolicy, replace_policies +from deepspeed.accelerator import get_accelerator class WeightQuantization(object): @@ -44,9 +47,11 @@ class WeightQuantization(object): q_scale.append(data_scale) value_list[index] = data_int index += 1 - q_scale = (1 / torch.cat(q_scale, - dim=merge_dim).to( - torch.cuda.current_device()).view(-1).unsqueeze(0)) + q_scale = ( + 1 / + torch.cat(q_scale, + dim=merge_dim).to( + get_accelerator().current_device_name()).view(-1).unsqueeze(0)) if "mlp.dense_4h_to_h.weight" in key: self.mlp4hh_scales.append(q_scale) elif "mlp.dense_h_to_4h.weight" in key: @@ -63,7 +68,7 @@ class WeightQuantization(object): torch.cat((s, torch.zeros((1, max_dim - s.shape[-1]), - device=torch.cuda.current_device())), + device=get_accelerator().current_device_name())), dim=-1) if s.shape[-1] < max_dim else s for s in layer_scales ] return torch.cat(layer_scales).unsqueeze(0) @@ -127,16 +132,15 @@ class WeightQuantization(object): layer_scales = [] for key in range(len(keys)): - if self.mlp_extra_grouping and is_mlp(keys[key]): + if self.mlp_extra_grouping and self.is_mlp(keys[key]): data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 2) elif policy_cls is HFBertLayerPolicy and self.is_qkv(keys[key]): data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 3) else: data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups) keys[key].copy_(data_quantized) - layer_scales.append( - (1 / - data_scale.to(torch.cuda.current_device()).view(-1).unsqueeze(0))) + layer_scales.append((1 / data_scale.to( + get_accelerator().current_device_name()).view(-1).unsqueeze(0))) all_scales.append(self.merge_layer_scales(layer_scales)) return layer diff --git a/deepspeed/runtime/zero/config.py b/deepspeed/runtime/zero/config.py index 3804fb5..d7ab552 100644 --- a/deepspeed/runtime/zero/config.py +++ b/deepspeed/runtime/zero/config.py @@ -1,197 +1,277 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ Copyright (c) Microsoft Corporation Licensed under the MIT license. """ -from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject +from pydantic import Field, validator +import sys +from typing import Optional +from enum import Enum +from deepspeed.runtime.config_utils import get_scalar_param, pp_int, DeepSpeedConfigModel from deepspeed.utils import logger -from .constants import * -from .offload_constants import * -from .offload_config import get_offload_param_config, get_default_offload_param_config, \ - get_offload_optimizer_config, get_default_offload_optimizer_config - - -class DeepSpeedZeroConfig(DeepSpeedConfigObject): - def __init__(self, param_dict): - super(DeepSpeedZeroConfig, self).__init__() - - self.stage = None - self.contiguous_gradients = None - self.reduce_scatter = None - self.reduce_bucket_size = None - self.allgather_partitions = None - self.allgather_bucket_size = None - self.overlap_comm = None - self.load_from_fp32_weights = None - - self.elastic_checkpoint = None - - #Offload Specific Parameters - self.offload_param = None - self.offload_optimizer = None - self.sub_group_size = None - - #Stage3 Specific Parameters - self.prefetch_bucket_size = None - self.param_persistence_threshold = None - self.max_live_parameters = None - self.max_reuse_distance = None - self.gather_16bit_weights_on_model_save = None - - self.ignore_unused_parameters = None - self.round_robin_gradients = None - - if ZERO_OPTIMIZATION in param_dict.keys(): - zero_config_dict = param_dict[ZERO_OPTIMIZATION] - if type(zero_config_dict) is bool: - zero_config_dict = self.read_zero_config_deprecated(param_dict) - else: - zero_config_dict = ZERO_OPTIMIZATION_DEFAULT - - self._initialize(zero_config_dict) - - def read_zero_config_deprecated(self, param_dict): +from .offload_config import DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig, OffloadDeviceEnum + +# ZeRO optimization. By default, this optimization is not enabled. +# Users have to configure the desired optimization (0 means disabled) in params.json as below example: +ZERO_FORMAT = """ +ZeRO optimization should be enabled as: +"session_params": { + "zero_optimization": { + "stage": [0|1|2], + "stage3_max_live_parameters" : 1000000000, + "stage3_max_reuse_distance" : 1000000000, + "allgather_partitions": [true|false], + "allgather_bucket_size": 500000000, + "reduce_scatter": [true|false], + "contiguous_gradients" : [true|false] + "overlap_comm": [true|false], + "reduce_bucket_size": 500000000, + "load_from_fp32_weights": [true|false], + "cpu_offload": [true|false] (deprecated), + "cpu_offload_params" : [true|false] (deprecated), + "cpu_offload_use_pin_memory": [true|false] (deprecated), + "sub_group_size" : 1000000000000, + "offload_param": {...}, + "offload_optimizer": {...}, + "ignore_unused_parameters": [true|false], + "round_robin_gradients": [true|false] + } +} +""" + +ZERO_OPTIMIZATION = "zero_optimization" + + +def read_zero_config_deprecated(param_dict): + zero_config_dict = {} + zero_config_dict["stage"] = 1 if param_dict[ZERO_OPTIMIZATION] else 0 + if zero_config_dict["stage"] > 0: + zero_config_dict["allgather_bucket_size"] = get_scalar_param( + param_dict, + "allgather_size", + 5e8) + logger.warning( + "DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}" + .format(ZERO_FORMAT)) + return zero_config_dict + + +def get_zero_config(param_dict): + if ZERO_OPTIMIZATION in param_dict: + zero_config_dict = param_dict[ZERO_OPTIMIZATION] + if isinstance(zero_config_dict, bool): + zero_config_dict = read_zero_config_deprecated(param_dict) + else: zero_config_dict = {} - zero_config_dict[ - ZERO_OPTIMIZATION_STAGE] = 1 if param_dict[ZERO_OPTIMIZATION] else 0 - if zero_config_dict[ZERO_OPTIMIZATION_STAGE] > 0: - zero_config_dict[ZERO_OPTIMIZATION_ALLGATHER_BUCKET_SIZE] = get_scalar_param( - param_dict, - ZERO_OPTIMIZATION_ALLGATHER_BUCKET_SIZE_DEPRECATED, - ZERO_OPTIMIZATION_ALLGATHER_BUCKET_SIZE_DEFAULT) - - logger.warning( - 'DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}' - .format(ZERO_FORMAT)) - return zero_config_dict - - def _sanity_check(self, zero_config_dict): - deprecated_dict = dict( - ZERO_OPTIMIZATION_CPU_OFFLOAD=ZERO_OPTIMIZATION_OFFLOAD_OPTIMIZER, - ZERO_OPTIMIZATION_CPU_OFFLOAD_PARAMS=ZERO_OPTIMIZATION_OFFLOAD_PARAM, - ZERO_OPTIMIZATION_CPU_OFFLOAD_USE_PIN_MEMORY= - f'{ZERO_OPTIMIZATION_OFFLOAD_PARAM} or {ZERO_OPTIMIZATION_OFFLOAD_OPTIMIZER}' - ) - - for old_key, new_key in deprecated_dict.items(): - if old_key in zero_config_dict: - logger.warning( - f'DeepSpeedConfig: {old_key} is deprecated. Please use {new_key}.') - - def _initialize(self, zero_config_dict): - self._sanity_check(zero_config_dict) - - self.stage = get_scalar_param(zero_config_dict, - ZERO_OPTIMIZATION_STAGE, - ZERO_OPTIMIZATION_STAGE_DEFAULT) - - self.contiguous_gradients = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_CONTIGUOUS_GRADIENTS, - ZERO3_OPTIMIZATION_CONTIGUOUS_GRADIENTS_DEFAULT - if self.stage == ZERO_OPTIMIZATION_WEIGHTS else - ZERO_OPTIMIZATION_CONTIGUOUS_GRADIENTS_DEFAULT) - - self.reduce_bucket_size = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_REDUCE_BUCKET_SIZE, - ZERO_OPTIMIZATION_REDUCE_BUCKET_SIZE_DEFAULT) - - self.reduce_scatter = get_scalar_param(zero_config_dict, - ZERO_OPTIMIZATION_REDUCE_SCATTER, - ZERO_OPTIMIZATION_REDUCE_SCATTER_DEFAULT) - - self.overlap_comm = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_OVERLAP_COMM, - ZERO3_OPTIMIZATION_OVERLAP_COMM_DEFAULT if self.stage - == ZERO_OPTIMIZATION_WEIGHTS else ZERO_OPTIMIZATION_OVERLAP_COMM_DEFAULT) - - self.allgather_partitions = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_ALLGATHER_PARTITIONS, - ZERO_OPTIMIZATION_ALLGATHER_PARTITIONS_DEFAULT) - - self.allgather_bucket_size = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_ALLGATHER_BUCKET_SIZE, - ZERO_OPTIMIZATION_ALLGATHER_BUCKET_SIZE_DEFAULT) - - self.load_from_fp32_weights = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_LOAD_FROM_FP32_WEIGHTS, - ZERO_OPTIMIZATION_LOAD_FROM_FP32_WEIGHTS_DEFAULT) - - self.elastic_checkpoint = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_ELASTIC_CHECKPOINT, - ZERO_OPTIMIZATION_ELASTIC_CHECKPOINT_DEFAULT) - - if ZERO_OPTIMIZATION_CPU_OFFLOAD in zero_config_dict: - cpu_offload_optimizer = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_CPU_OFFLOAD, - ZERO_OPTIMIZATION_CPU_OFFLOAD_DEFAULT) - if cpu_offload_optimizer: - self.offload_optimizer = get_default_offload_optimizer_config() - else: - self.offload_optimizer = get_offload_optimizer_config(zero_config_dict) - - if ZERO_OPTIMIZATION_CPU_OFFLOAD_PARAMS in zero_config_dict: - cpu_offload_params = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_CPU_OFFLOAD_PARAMS, - ZERO_OPTIMIZATION_CPU_OFFLOAD_PARAMS_DEFAULT) - if cpu_offload_params: - self.offload_param = get_default_offload_param_config() - else: - self.offload_param = get_offload_param_config(zero_config_dict) - - self.sub_group_size = get_scalar_param(zero_config_dict, - ZERO_OPTIMIZATION_SUB_GROUP_SIZE, - ZERO_OPTIMIZATION_SUB_GROUP_SIZE_DEFAULT) - - self.max_live_parameters = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_MAX_LIVE_PARAMETERS, - ZERO_OPTIMIZATION_MAX_LIVE_PARAMETERS_DEFAULT) - - self.max_reuse_distance = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_MAX_REUSE_DISTANCE, - ZERO_OPTIMIZATION_MAX_REUSE_DISTANCE_DEFAULT) - - self.prefetch_bucket_size = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_PREFETCH_BUCKET_SIZE, - ZERO_OPTIMIZATION_PREFETCH_BUCKET_SIZE_DEFAULT) - - self.param_persistence_threshold = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_PARAM_PERSISTENCE_THRESHOLD, - ZERO_OPTIMIZATION_PARAM_PERSISTENCE_THRESHOLD_DEFAULT) - - # config key has been renamed to use "16bit" instead of "fp16." falling back - # to old config name in order to preserve backwards compatibility - self.gather_16bit_weights_on_model_save = ZERO_OPTIMIZATION_GATHER_16BIT_WEIGHTS_ON_MODEL_SAVE_DEFAULT - for key in [ - ZERO_OPTIMIZATION_GATHER_16BIT_WEIGHTS_ON_MODEL_SAVE, - ZERO_OPTIMIZATION_GATHER_FP16_WEIGHTS_ON_MODEL_SAVE - ]: - if key in zero_config_dict: - self.gather_16bit_weights_on_model_save = zero_config_dict[key] - break - - self.ignore_unused_parameters = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_IGNORE_UNUSED_PARAMETERS, - ZERO_OPTIMIZATION_IGNORE_UNUSED_PARAMETERS_DEFAULT) - - self.legacy_stage1 = get_scalar_param(zero_config_dict, - ZERO_OPTIMIZATION_LEGACY_STAGE1, - ZERO_OPTIMIZATION_LEGACY_STAGE1_DEFAULT) - - self.round_robin_gradients = get_scalar_param( - zero_config_dict, - ZERO_OPTIMIZATION_ROUND_ROBIN_GRADIENTS, - ZERO_OPTIMIZATION_ROUND_ROBIN_GRADIENTS_DEFAULT) + return DeepSpeedZeroConfig(**zero_config_dict) + + +class ZeroStageEnum(int, Enum): + """ Enum class for possible zero stages """ + disabled = 0 + optimizer_states = 1 + gradients = 2 + weights = 3 + max_stage = 3 + + +class DeepSpeedZeroConfig(DeepSpeedConfigModel): + """ + Sets parameters for ZeRO optimizations. + """ + + stage: ZeroStageEnum = 0 + """ + Chooses different stages of ZeRO Optimizer. Stage 0, 1, 2, and 3 refer + to disabled, optimizer state partitioning, and optimizer+gradient state + partitioning, and optimizer+gradient+parameter partitioning, respectively. + """ + + contiguous_gradients: bool = True + """ + Copies the gradients to a contiguous buffer as they are produced. Avoids + memory fragmentation during backward pass. + """ + + reduce_scatter: bool = True + """ + Uses reduce or reduce scatter instead of allreduce to average gradients + """ + + reduce_bucket_size: int = Field(pp_int(5e8), ge=0) + """ + Number of elements reduced/allreduced at a time. Limits the memory required + for the allgather for large model sizes + """ + + allgather_partitions: bool = True + """ + Chooses between allgather collective or a series of broadcast collectives + to gather updated parameters from all the GPUs at the end of each step + """ + + allgather_bucket_size: int = Field(pp_int(5e8), ge=0) + """ + Number of elements allgathered at a time. Limits the memory required for + the allgather for large model sizes + """ + + overlap_comm: bool = None # None for dynamic default value (see validator `overlap_comm_valid` below) + """ + Attempts to overlap the reduction of the gradients with backward computation + """ + + load_from_fp32_weights: bool = True + """ + Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoint (no precision loss) or from model's fp16 copies (with + precision loss). This can be used to initialize optimizer state even when + checkpoint is missing optimizer state. + """ + + elastic_checkpoint: bool = False + """ + Enable loading checkpoint that was saved by job with different GPU count. + No longer supported. + """ + + offload_param: Optional[DeepSpeedZeroOffloadParamConfig] = None + """ + Enable offloading of model parameters to CPU or NVMe. This frees up GPU + memory for larger models or batch sizes. Valid only with stage 3. Expects a + dictionary containing values for :any:`DeepSpeedZeroOffloadParamConfig`. + """ + + offload_optimizer: Optional[DeepSpeedZeroOffloadOptimizerConfig] = None + """ + Enable offloading of optimizer state to CPU or NVMe, and optimizer + computation to CPU. This frees up GPU memory for larger models or batch + sizes. Valid for ZeRO stage 1, 2, 3. Expects a dictionary containing values + for :any:`DeepSpeedZeroOffloadOptimizerConfig`. + """ + + sub_group_size: int = Field(pp_int(1e9), ge=0) + """ + Tile size for parameter processing to fit massive models (with trillions of + parameters). Used by ZeRO3-Offload and ZeRO-Infinity + """ + + cpu_offload_param: bool = Field( + None, + deprecated=True, + new_param="offload_param", + new_param_fn=( + lambda val: DeepSpeedZeroOffloadParamConfig(device=OffloadDeviceEnum.cpu) + if val else None), + ) + """ Deprecated, please use ``offload_param`` """ + + cpu_offload_use_pin_memory: bool = Field( + None, + deprecated=True, + new_param="offload_param or offload_optimizer", + set_new_param=False, + ) + """ Deprecated, please use ``offload_param`` or ``offload_optimizer`` """ + + cpu_offload: bool = Field( + None, + deprecated=True, + new_param="offload_optimizer", + new_param_fn=( + lambda val: DeepSpeedZeroOffloadOptimizerConfig(device=OffloadDeviceEnum.cpu) + if val else None), + ) + """ Deprecated, please use ``offload_optimizer`` """ + + prefetch_bucket_size: int = Field(pp_int(5e7), + ge=0, + alias="stage3_prefetch_bucket_size") + """ + Maximum number of parameter elements to fetch ahead of use. Used by ZeRO3, + ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference. + """ + + param_persistence_threshold: int = Field(pp_int(1e5), + ge=0, + alias="stage3_param_persistence_threshold") + """ + Do not partition parameters smaller than this threshold. Smaller values use + less memory, but can greatly increase communication (especially + latency-bound messages). + """ + + model_persistence_threshold: int = Field(pp_int(sys.maxsize, + "sys.maxsize"), + ge=0, + alias="stage3_model_persistence_threshold") + """ + Maximum number of parameter elements that can be persisted in GPU and not + partitioned. This imposes an upper bound on the number of unpartitioned + parameters resulting from param_persistence_threshold setting. Used by + ZeRO3-Offload, ZeRO-Infinity and ZeRO-Inference. + """ + + max_live_parameters: int = Field(pp_int(1e9), + ge=0, + alias="stage3_max_live_parameters") + """ + The maximum number of parameters resident per GPU before releasing. Smaller + values use less memory, but perform more communication. + """ + + max_reuse_distance: int = Field(pp_int(1e9), ge=0, alias="stage3_max_reuse_distance") + """ + Do not release a parameter if it will be reused within this threshold of + parameters. Smaller values use less memory, but perform more communication. + """ + + gather_16bit_weights_on_model_save: bool = Field( + False, + alias="stage3_gather_16bit_weights_on_model_save") + """ + Consolidate the weights before saving the model by ``save_16bit_model()``. + Since the weights are partitioned across GPUs, they aren’t part of + ``state_dict``, so this function automatically gathers the weights when + this option is enabled and then saves the fp16 model weights. + """ + + stage3_gather_fp16_weights_on_model_save: bool = Field( + False, + deprecated=True, + new_param="gather_16bit_weights_on_model_save") + """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ + + ignore_unused_parameters: bool = True + """ + Unused parameters in modules may be unexpected in static networks, but + could be normal in dynamic networks. This controls whether or not training + should terminate with an error message when unused parameters are detected. + This is set to ``False`` by default, which means unused parameters are + ignored and training continues. Now is just used in stage 2. + """ + + legacy_stage1: bool = False + """ + For backward-compatibility enable old ZeRO stage 1 implementation. Use at + your own risk, will be deprecated soon. + """ + + round_robin_gradients: bool = False + """ + Stage 1 and 2 optimization for CPU offloading that parallelizes gradient + copying to CPU memory among ranks by fine-grained gradient partitioning. + Performance benefit grows with gradient accumulation steps (more copying + between optimizer steps) or GPU count (increased parallelism). + """ + + # Validators + @validator("overlap_comm") + def overlap_comm_valid(cls, field_value, values): + if field_value is None: + assert ( + "stage" in values + ), "DeepSpeedZeroConfig: 'stage' must be defined before 'overlap_comm'" + field_value = values["stage"] == ZeroStageEnum.weights + return field_value diff --git a/deepspeed/runtime/zero/contiguous_memory_allocator.py b/deepspeed/runtime/zero/contiguous_memory_allocator.py index a5b14ae..46e5578 100644 --- a/deepspeed/runtime/zero/contiguous_memory_allocator.py +++ b/deepspeed/runtime/zero/contiguous_memory_allocator.py @@ -1,8 +1,12 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch +from deepspeed import comm as dist + def print_rank_0(message): - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: print(message) diff --git a/deepspeed/runtime/zero/linear.py b/deepspeed/runtime/zero/linear.py index 458f4ff..6fbcabb 100644 --- a/deepspeed/runtime/zero/linear.py +++ b/deepspeed/runtime/zero/linear.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + #Linear Module to use with ZeRO Stage 3 to allow for parameter memory release #after the module execution during forward #Instead of saving variables using save_for_backward, we save variable ids @@ -18,19 +20,26 @@ from torch.nn.parameter import Parameter from torch.nn import init from torch.nn.modules.module import Module from deepspeed.runtime.utils import noop_decorator +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator tensor_map = {} def print_rank_0(message, debug=False, force=False): - if torch.distributed.get_rank() == 0 and (debug or force): + if dist.get_rank() == 0 and (debug or force): print(message) -try: - autocast_custom_fwd = torch.cuda.amp.custom_fwd - autocast_custom_bwd = torch.cuda.amp.custom_bwd -except (ImportError, AttributeError) as exp: +device = get_accelerator().device_name() +if device == 'cuda': + try: + autocast_custom_fwd = torch.cuda.amp.custom_fwd + autocast_custom_bwd = torch.cuda.amp.custom_bwd + except (ImportError, AttributeError) as exp: + autocast_custom_fwd = noop_decorator + autocast_custom_bwd = noop_decorator +else: autocast_custom_fwd = noop_decorator autocast_custom_bwd = noop_decorator @@ -110,6 +119,13 @@ class LinearFunctionForZeroStage3(torch.autograd.Function): return grad_input, grad_weight, grad_bias +def zero3_linear_wrap(input, weight, bias=None): + if bias is None: + return LinearFunctionForZeroStage3.apply(input, weight) + else: + return LinearFunctionForZeroStage3.apply(input, weight, bias) + + class LinearModuleForZeroStage3(Module): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`. The weights are pre-transposed and stored as A^T instead of transposing during each diff --git a/deepspeed/runtime/zero/offload_config.py b/deepspeed/runtime/zero/offload_config.py index c438a76..7ea76c6 100644 --- a/deepspeed/runtime/zero/offload_config.py +++ b/deepspeed/runtime/zero/offload_config.py @@ -1,75 +1,94 @@ -''' -Copyright 2020 The Microsoft DeepSpeed Team. +'''Copyright The Microsoft DeepSpeed Team''' +""" +Copyright (c) Microsoft Corporation Licensed under the MIT license. -''' - -from deepspeed.runtime.config_utils import get_scalar_param -from .offload_constants import * -from .utils import logger - -OFFLOAD_PARAM_KEY_DEFAULT_DICT = { - OFFLOAD_PARAM_DEVICE: OFFLOAD_PARAM_DEVICE_DEFAULT, - OFFLOAD_PARAM_NVME_PATH: OFFLOAD_PARAM_NVME_PATH_DEFAULT, - OFFLOAD_PARAM_BUFFER_COUNT: OFFLOAD_PARAM_BUFFER_COUNT_DEFAULT, - OFFLOAD_PARAM_BUFFER_SIZE: OFFLOAD_PARAM_BUFFER_SIZE_DEFAULT, - OFFLOAD_PARAM_MAX_IN_CPU: OFFLOAD_PARAM_MAX_IN_CPU_DEFAULT, - OFFLOAD_PARAM_PIN_MEMORY: OFFLOAD_PARAM_PIN_MEMORY_DEFAULT -} - -OFFLOAD_OPTIMIZER_KEY_DEFAULT_DICT = { - OFFLOAD_OPTIMIZER_DEVICE: OFFLOAD_OPTIMIZER_DEVICE_DEFAULT, - OFFLOAD_OPTIMIZER_NVME_PATH: OFFLOAD_OPTIMIZER_NVME_PATH_DEFAULT, - OFFLOAD_OPTIMIZER_BUFFER_COUNT: OFFLOAD_OPTIMIZER_BUFFER_COUNT_DEFAULT, - OFFLOAD_OPTIMIZER_PIN_MEMORY: OFFLOAD_OPTIMIZER_PIN_MEMORY_DEFAULT, - OFFLOAD_OPTIMIZER_PIPELINE_READ: OFFLOAD_OPTIMIZER_PIPELINE_READ_DEFAULT, - OFFLOAD_OPTIMIZER_PIPELINE_WRITE: OFFLOAD_OPTIMIZER_PIPELINE_WRITE_DEFAULT, - OFFLOAD_OPTIMIZER_FAST_INIT: OFFLOAD_OPTIMIZER_FAST_INIT_DEFAULT -} - - -def _get_offload_config(param_dict, key_default_dict): - offload_config = {} - for key, default_value in key_default_dict.items(): - offload_config[key] = get_scalar_param(param_dict, key, default_value) - - return offload_config - - -def get_offload_param_config(param_dict): - if OFFLOAD_PARAM in param_dict and param_dict[OFFLOAD_PARAM] is not None: - offload_config = _get_offload_config( - param_dict=param_dict[OFFLOAD_PARAM], - key_default_dict=OFFLOAD_PARAM_KEY_DEFAULT_DICT) - device = offload_config.get("device", OFFLOAD_PARAM_DEVICE_DEFAULT) - assert device in VALID_OFFLOAD_DEVICES, f'Invalid parameter offloading device specified: {device}.' - if device == OFFLOAD_NONE_DEVICE: - return None - return offload_config - return None - - -def get_default_offload_param_config(): - return OFFLOAD_PARAM_KEY_DEFAULT_DICT - - -def get_offload_optimizer_config(param_dict): - if OFFLOAD_OPTIMIZER in param_dict and param_dict[OFFLOAD_OPTIMIZER] is not None: - offload_config = _get_offload_config( - param_dict=param_dict[OFFLOAD_OPTIMIZER], - key_default_dict=OFFLOAD_OPTIMIZER_KEY_DEFAULT_DICT) - - device = offload_config.get("device", OFFLOAD_OPTIMIZER_DEVICE_DEFAULT) - assert device in VALID_OFFLOAD_DEVICES, f'Invalid optimizer offloading device specified: {device}.' - if device == OFFLOAD_NONE_DEVICE: - return None - - offload_config[OFFLOAD_OPTIMIZER_PIPELINE] = offload_config[ - OFFLOAD_OPTIMIZER_PIPELINE_READ] or offload_config[ - OFFLOAD_OPTIMIZER_PIPELINE_WRITE] - return offload_config - - return None - - -def get_default_offload_optimizer_config(): - return OFFLOAD_OPTIMIZER_KEY_DEFAULT_DICT +""" + +from pydantic import Field, validator +from enum import Enum +from pathlib import Path +from deepspeed.runtime.config_utils import DeepSpeedConfigModel, pp_int + + +class OffloadDeviceEnum(str, Enum): + """ Enum for valid offload devices """ + none = "none" + cpu = "cpu" + nvme = "nvme" + + +class DeepSpeedZeroOffloadParamConfig(DeepSpeedConfigModel): + """ Set options for parameter offload. Valid only with stage 3. """ + + device: OffloadDeviceEnum = "none" + """ + Device memory to offload model parameters. Supported options are `cpu` and + `nvme`. + """ + + nvme_path: Path = None + """ Filesystem path for NVMe device for parameter offloading. """ + + buffer_count: int = Field(5, ge=0) + """ Number of buffers in buffer pool for parameter offloading to NVMe. """ + + buffer_size: int = Field(pp_int(1e8), ge=0) + """ Size of buffers in buffer pool for parameter offloading to NVMe. """ + + max_in_cpu: int = Field(pp_int(1e9), ge=0) + """ + Number of parameter elements to maintain in CPU memory when offloading to + NVMe is enabled. + """ + + pin_memory: bool = False + """ + Offload to page-locked CPU memory. This could boost throughput at the cost + of extra memory overhead. + """ + + +class DeepSpeedZeroOffloadOptimizerConfig(DeepSpeedConfigModel): + """ Set options for optimizer offload. Valid with stage 1, 2, and 3. """ + + device: OffloadDeviceEnum = "none" + """ + Device memory to offload optimizer state. Supported options are `cpu` and + `nvme`. Optimizer computation is offload to CPU regardless of device option. + """ + + nvme_path: Path = None + """ Filesystem path for NVMe device for optimizer state offloading. """ + + buffer_count: int = Field(4, ge=0) + """ + Number of buffers in buffer pool for optimizer state offloading to NVMe. + This should be at least the number of states maintained per parameter by + the optimizer. For example, Adam optimizer has 4 states (parameter, + gradient, momentum, and variance). + """ + + pin_memory: bool = False + """ + Offload to page-locked CPU memory. This could boost throughput at the cost + of extra memory overhead. + """ + + pipeline_read: bool = False + """ + For tile-based optimizer step processing, overlap read of next tile with + computation of current tile. Used in ZeRO-Infinity. + """ + + pipeline_write: bool = False + """ + For tile-based optimizer step processing, overlap write of previous tile + with computation of current tile. + """ + + fast_init: bool = False + """ Enable fast optimizer initialization when offloading to NVMe. """ + @validator("pipeline_read", "pipeline_write", always=True) + def set_pipeline(cls, field_value, values): + values["pipeline"] = field_value or values.get("pipeline", False) + return field_value diff --git a/deepspeed/runtime/zero/parameter_offload.py b/deepspeed/runtime/zero/parameter_offload.py new file mode 100644 index 0000000..336c63e --- /dev/null +++ b/deepspeed/runtime/zero/parameter_offload.py @@ -0,0 +1,516 @@ +""" +"Copyright 2022 The Microsoft DeepSpeed Team. +Licensed under the MIT license. +""" + +import sys +import torch +from collections import OrderedDict +from deepspeed.runtime.utils import see_memory_usage +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.partition_parameters import _init_external_params +from deepspeed.runtime.zero.partition_parameters import * +from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, iter_params +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator + +FWD_MODULE_STACK = list() + + +def is_builtin_type(obj): + # https://stackoverflow.com/a/17795199 + return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins" + + +# ensure we only warn once, otherwise every iteration will trigger a warning +warned = False + + +#apply torch.autograd.Function that calls a backward_function to tensors in output +def _apply_to_tensors_only(module, functional, backward_function, outputs): + if isinstance(outputs, (tuple, list)): + touched_outputs = [] + for output in outputs: + touched_output = _apply_to_tensors_only(module, + functional, + backward_function, + output) + touched_outputs.append(touched_output) + return outputs.__class__(touched_outputs) + elif isinstance(outputs, dict): + # apply inplace to avoid recreating dict inherited objects + for key in outputs.keys(): + outputs[key] = _apply_to_tensors_only(module, + functional, + backward_function, + outputs[key]) + return outputs + + elif isinstance(outputs, torch.Tensor): + # this also applies to torch.Tensor's subclasses like torch.nn.parameter.Parameter + touched_outputs = functional.apply(module, backward_function, outputs) + + # restore zero param attributes if those get stripped by `backward_function` + if not is_zero_param(touched_outputs) and is_zero_param(outputs): + touched_outputs.ds_param_alias = outputs + return touched_outputs + else: + if not is_builtin_type(outputs): + global warned + if not warned and dist.get_rank() == 0: + logger.warning( + f"A module has unknown inputs or outputs type ({type(outputs)}) and the tensors embedded in it cannot be detected. " + "The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and " + "output tensors and therefore may not get triggered properly.") + warned = True + return outputs + + +#for each tensor in outputs run the forward_function and register backward_function as hook +def _apply_forward_and_backward_to_tensors_only(module, + forward_function, + backward_function, + outputs): + if type(outputs) is tuple: + touched_outputs = [] + for output in outputs: + touched_output = _apply_forward_and_backward_to_tensors_only( + module, + forward_function, + backward_function, + output) + touched_outputs.append(touched_output) + return tuple(touched_outputs) + elif type(outputs) is torch.Tensor: + forward_function(outputs) + if outputs.requires_grad: + outputs.register_hook(backward_function) + return outputs + else: + return outputs + + +class ZeROOrderedDict(OrderedDict): + def __init__(self, parent_module, *args, **kwargs): + """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. + + Args: + parent_module (``collections.OrderedDict``): the collection to replace + """ + + super().__init__(*args, **kwargs) + self._parent_module = parent_module + self._in_forward = False + + def __getitem__(self, key): + param = super().__getitem__(key) + + # Params can be registered as None (e.g., bias) + if param is None: + return param + + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if self._parent_module._parameters._in_forward: + register_external_parameter(FWD_MODULE_STACK[-1], param) + param.all_gather() + print_rank_0( + f'Registering external parameter from getter {key} ds_id = {param.ds_id}', + force=False) + + return param + + +def _inject_parameters(module, cls): + for module in module.modules(): + if cls == ZeROOrderedDict: + new_param = cls(parent_module=module) + else: + new_param = cls() + + for key, param in module._parameters.items(): + new_param[key] = param + module._parameters = new_param + + +class PreBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, module, pre_backward_function, outputs): + ctx.module = module + ctx.pre_backward_function = pre_backward_function + if not hasattr(module, "applied_pre_backward_ref_cnt"): + module.applied_pre_backward_ref_cnt = 0 + module.applied_pre_backward_ref_cnt += 1 + #print(f"After Forward: {ctx.module.__class__.__name__}") + outputs = outputs.detach() + return outputs + + @staticmethod + def backward(ctx, *args): + #print(f"Before Backward: {ctx.module.__class__.__name__}") + ctx.pre_backward_function(ctx.module) + return (None, None) + args + + +class PostBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, module, pre_backward_function, output): + ctx.module = module + if output.requires_grad: + #TODO SOME TIMES post backward does not seem to be triggered debug in detail + #Should only cause increase in memory not correctness issue + #if output.grad_fn.__class__.__name__ == 'ViewBackward': + # ctx.view=True + # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") + #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." + #if module.ds_grads_remaining == 0: + # print(f"Before Forward: {ctx.module.__class__.__name__}") + module.ds_grads_remaining += 1 + ctx.pre_backward_function = pre_backward_function + output = output.detach() + return output + + @staticmethod + def backward(ctx, *args): + ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 + if ctx.module.ds_grads_remaining == 0: + ctx.pre_backward_function(ctx.module) + #print(f"After Backward: {ctx.module.__class__.__name__}") + return (None, None) + args + + +class DeepSpeedZeRoOffload(object): + def __init__(self, + module, + timers, + ds_config, + overlap_comm=True, + prefetch_bucket_size=50000000, + max_reuse_distance=1000000000, + max_live_parameters=1000000000, + param_persistence_threshold=100000, + model_persistence_threshold=sys.maxsize, + offload_param_config=None, + mpu=None): + + see_memory_usage("DeepSpeedZeRoOffload initialize [begin]", force=True) + + print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", + force=False) + + self.module = module + self.dtype = list(module.parameters())[0].dtype + self.offload_device = None + self.offload_param_pin_memory = False + + if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: + self.offload_device = offload_param_config.device + self.offload_param_pin_memory = offload_param_config.pin_memory + + self._convert_to_zero_parameters(ds_config, module, mpu) + + for m in module.modules(): + _init_external_params(m) + + _inject_parameters(module, ZeROOrderedDict) + + self.param_numel_persistence_threshold = int(param_persistence_threshold) + self.model_persistence_threshold = int(model_persistence_threshold) + self.persistent_parameters = self.mark_persistent_parameters( + self.param_numel_persistence_threshold, + self.model_persistence_threshold) + + self.param_coordinators = {} + self._prefetch_bucket_sz = int(prefetch_bucket_size) + self._max_reuse_distance_in_numel = int(max_reuse_distance) + self._max_available_parameters_in_numel = int(max_live_parameters) + self.__allgather_stream = get_accelerator().Stream( + ) if overlap_comm else get_accelerator().default_stream() + + self.forward_hooks = [] + self.backward_hooks = [] + self.setup_zero_stage3_hooks() + print_rank_0( + f'Created module hooks: forward = {len(self.forward_hooks)}, backward = {len(self.backward_hooks)}', + force=False) + + see_memory_usage("DeepSpeedZeRoOffload initialize [end]", force=True) + + @instrument_w_nvtx + def partition_all_parameters(self): + """Partitioning Parameters that were not partitioned usually if parameters + of modules whose input parameters do not require grad computation do not + trigger post call and will therefore will remain unpartitioned""" + self.get_param_coordinator(training=self.module.training).release_and_reset_all( + self.module) + for param in iter_params(self.module, recurse=True): + if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: + raise RuntimeError(f"{param.ds_summary()} expected to be released") + + def get_param_coordinator(self, training): + if not training in self.param_coordinators: + self.param_coordinators[training] = PartitionedParameterCoordinator( + prefetch_bucket_sz=self._prefetch_bucket_sz, + max_reuse_distance_in_numel=self._max_reuse_distance_in_numel, + max_available_parameters_in_numel=self. + _max_available_parameters_in_numel, + allgather_stream=self.__allgather_stream, + prefetch_nvme=self.offload_device == OffloadDeviceEnum.nvme, + ) + + return self.param_coordinators[training] + + def _convert_to_zero_parameters(self, ds_config, module, mpu): + non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] + if non_zero_params: + zero_params = [p for p in module.parameters() if is_zero_param(p)] + if zero_params: + zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) + else: + group = None + if mpu: + group = mpu.get_data_parallel_group() + + Init(module=module, + data_parallel_group=group, + dtype=self.dtype, + config_dict_or_path=ds_config, + remote_device=self.offload_device, + pin_memory=self.offload_param_pin_memory, + mpu=mpu) + + def destroy(self): + self._remove_module_hooks() + + def _remove_module_hooks(self): + num_forward_hooks = len(self.forward_hooks) + num_backward_hooks = len(self.backward_hooks) + + for hook in self.forward_hooks: + hook.remove() + + for hook in self.backward_hooks: + hook.remove() + + print_rank_0( + f'Deleted module hooks: forward = {num_forward_hooks}, backward = {num_backward_hooks}', + force=False) + + def setup_zero_stage3_hooks(self): + self.hierarchy = 0 + + #reset step if in inference mode + @instrument_w_nvtx + def _end_of_forward_hook(module, *args): + + if not torch._C.is_grad_enabled(): + self.get_param_coordinator(training=False).reset_step() + + #likely one of them should be enough but just to be safe + self._register_hooks_recursively(self.module) + self.module.register_forward_hook(_end_of_forward_hook) + + # Add top module to stack trace + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(self.module) + + def mark_persistent_parameters(self, param_threshold, model_threshold): + persistent_params = [] + total_persistent_parameters = 0 + params_count = 0 + for _, param in self.module.named_parameters(recurse=True): + if param.ds_numel + total_persistent_parameters > model_threshold: + continue + + if param.ds_numel < param_threshold: + params_count += 1 + param.ds_persist = True + persistent_params.append(param) + total_persistent_parameters += param.ds_numel + + print_rank_0( + f"Parameter Offload: Total persistent parameters: {total_persistent_parameters} in {params_count} params", + force=True) + + return persistent_params + + def _register_hooks_recursively(self, module, count=[0]): + my_count = count[0] + module.id = my_count + + #print(f"{module.__class__} : {module.id}") + + for child in module.children(): + count[0] = count[0] + 1 + self._register_hooks_recursively(child, count=count) + + @instrument_w_nvtx + def _pre_forward_module_hook(module, *args): + self.pre_sub_module_forward_function(module) + + @instrument_w_nvtx + def _post_forward_module_hook(module, input, output): + global FWD_MODULE_STACK + FWD_MODULE_STACK.pop() + if output is None: + output = [] + elif not isinstance(output, (list, tuple)): + if torch.is_tensor(output): + output = [output] + else: + #print(f'got UNKNOWN type {type(output)}') + outputs = [] + output = output if isinstance(output, dict) else vars(output) + for name, val in output.items(): + if not name.startswith('__') and torch.is_tensor(val): + outputs.append(val) + output = outputs + + for item in filter( + lambda item: is_zero_param(item) or hasattr(item, + 'ds_param_alias'), + output): + key = id(item) if hasattr(item, 'ds_id') else id(item.ds_param_alias) + actual_external_param = item if hasattr(item, + 'ds_id') else item.ds_param_alias + + if not any(key in m._external_params for m in FWD_MODULE_STACK): + actual_external_param.is_external_param = True + module_to_register = FWD_MODULE_STACK[-1] + register_external_parameter(module_to_register, + actual_external_param) + print_rank_0( + f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {actual_external_param.ds_id}.', + force=False) + + # It's possible that the parameter was already external to the completed module. If so, remove it the + # registration as it will be covered by the outer module instead. + if key in module._external_params: + print_rank_0( + f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {actual_external_param.ds_id}', + force=False) + unregister_external_parameter(module, actual_external_param) + + actual_external_param.all_gather() + + self.post_sub_module_forward_function(module) + + def _pre_backward_module_hook(module, inputs, output): + @instrument_w_nvtx + def _run_before_backward_function(sub_module): + # some models (e.g. Albert) may run multiple forwards on the same layer in a loop + # before doing backwards, so each backward will need a pre-fetch - using reference + # counting to support this scenario + #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") + if sub_module.applied_pre_backward_ref_cnt > 0: + self.pre_sub_module_backward_function(sub_module) + sub_module.applied_pre_backward_ref_cnt -= 1 + #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") + + return _apply_to_tensors_only(module, + PreBackwardFunction, + _run_before_backward_function, + output) + + #This is an alternate to doing _post_backward_module_hook + #it uses tensor.register_hook instead of using torch.autograd.Function + def _alternate_post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + #print(f"Before Forward {module.__class__.__name__}") + + def _run_after_backward_hook(*unused): + module.ds_grads_remaining = module.ds_grads_remaining - 1 + if module.ds_grads_remaining == 0: + #print(f"After backward {module.__class__.__name__}") + self.post_sub_module_backward_function(module) + + def _run_before_forward_function(input): + if input.requires_grad: + module.ds_grads_remaining += 1 + + return _apply_forward_and_backward_to_tensors_only( + module, + _run_before_forward_function, + _run_after_backward_hook, + inputs) + + def _post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + @instrument_w_nvtx + def _run_after_backward_function(sub_module): + if sub_module.ds_grads_remaining == 0: + self.post_sub_module_backward_function(sub_module) + + return _apply_to_tensors_only(module, + PostBackwardFunction, + _run_after_backward_function, + inputs) + + # Pre forward hook + self.forward_hooks.append( + module.register_forward_pre_hook(_pre_forward_module_hook)) + + # Post forward hook + self.forward_hooks.append( + module.register_forward_hook(_post_forward_module_hook)) + + # Pre backward hook + self.backward_hooks.append( + module.register_forward_hook(_pre_backward_module_hook)) + + # post backward hook + self.backward_hooks.append( + module.register_forward_pre_hook(_post_backward_module_hook)) + + @torch.no_grad() + def pre_sub_module_forward_function(self, sub_module): + see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", + force=False) + + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(sub_module) + + param_coordinator = self.get_param_coordinator(training=sub_module.training) + param_coordinator.trace_prologue(sub_module) + if param_coordinator.is_record_trace(): + param_coordinator.record_module(sub_module) + param_coordinator.fetch_sub_module(sub_module) + + see_memory_usage( + f"Before sub module function {sub_module.__class__.__name__} after fetch", + force=False) + + @torch.no_grad() + def post_sub_module_forward_function(self, sub_module): + see_memory_usage( + f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release", + force=False) + + param_coordinator = self.get_param_coordinator(training=sub_module.training) + param_coordinator.release_sub_module(sub_module) + + see_memory_usage( + f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release", + force=False) + + @torch.no_grad() + def pre_sub_module_backward_function(self, sub_module): + param_coordinator = self.get_param_coordinator(training=sub_module.training) + param_coordinator.trace_prologue(sub_module) + if param_coordinator.is_record_trace(): + param_coordinator.record_module(sub_module) + param_coordinator.fetch_sub_module(sub_module) + + @torch.no_grad() + def post_sub_module_backward_function(self, sub_module): + see_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release", + force=False) + + self.get_param_coordinator( + training=sub_module.training).release_sub_module(sub_module) + + see_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release", + force=False) diff --git a/deepspeed/runtime/zero/partition_parameters.py b/deepspeed/runtime/zero/partition_parameters.py old mode 100644 new mode 100755 index 80b1ee3..39cfb7f --- a/deepspeed/runtime/zero/partition_parameters.py +++ b/deepspeed/runtime/zero/partition_parameters.py @@ -5,7 +5,6 @@ Licensed under the MIT license. import math import os -import time import types from typing import Callable, Iterable from enum import Enum @@ -15,60 +14,40 @@ from typing import List import torch from torch import Tensor -import torch.distributed as dist -from torch.distributed.distributed_c10d import _get_global_rank, group +from deepspeed import comm as dist from torch.nn import Module from torch.nn import Parameter -from .linear import LinearModuleForZeroStage3, LinearFunctionForZeroStage3 -from .offload_constants import * +from .linear import zero3_linear_wrap import deepspeed from ..utils import get_only_unique_item, see_memory_usage from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks -from deepspeed.utils import init_distributed, instrument_w_nvtx, logger -from deepspeed.utils.debug import debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name, debug_param2name, debug_param2name_id_shape_status, printflock, log_rank_file -from deepspeed.utils.logging import logger - +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.utils import instrument_w_nvtx, logger +from deepspeed.comm.comm import init_distributed +from deepspeed.utils.debug import (debug_param2name_id_shape, + debug_param2name_id_shape_device, + debug_module2name, + debug_param2name_id, + debug_param2name_id_shape_status) +from deepspeed.accelerator import get_accelerator from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus param_count = 0 partitioned_param_data_shape = [0] +zero_init_enabled = False -if hasattr(torch.distributed, "_all_gather_base"): - def torch_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group): - try: - return instrument_w_nvtx(torch.distributed._all_gather_base)( - output_tensor, - input_tensor, - group=group, - async_op=True, - ) - except RuntimeError as e: - raise RuntimeError( - f"output_tensor: {output_tensor.device}, input_tensor: {input_tensor.device}" - ) from e -else: - logger.warning( - "unable to find torch.distributed._all_gather_base. will fall back to " - "torch.distributed.all_gather which will result in suboptimal performance. " - "please consider upgrading your pytorch installation.") - - def torch_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group): - output_tensors = list( - torch.chunk(output_tensor, - torch.distributed.get_world_size(group))) - return instrument_w_nvtx(torch.distributed.all_gather)( - output_tensors, - input_tensor, - group=group, - async_op=True, - ) +def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None): + return instrument_w_nvtx(dist.allgather_fn)(output_tensor, + input_tensor, + group=group, + async_op=True) def print_rank_0(message, debug=False, force=False): - rank = torch.distributed.get_rank() + rank = dist.get_rank() if rank == 0 and (debug or force): print(message) # other variations @@ -79,7 +58,7 @@ def print_rank_0(message, debug=False, force=False): def debug_rank0(msg: str) -> None: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.debug(msg) @@ -213,7 +192,8 @@ def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable: def wrapped_fn(*args, **kwargs) -> Tensor: if kwargs.get("device", None) is None: - kwargs['device'] = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])) + kwargs['device'] = torch.device(get_accelerator().device_name( + os.environ["LOCAL_RANK"])) tensor: Tensor = fn(*args, **kwargs) if tensor.is_floating_point(): tensor = tensor.to(target_fp_dtype) @@ -225,7 +205,7 @@ def zero_wrapper_for_fp_tensor_constructor(fn: Callable, def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable: def new_tensor(cls, *args) -> Tensor: - device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])) + device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) tensor = _orig_torch_empty(0, device=device).new_empty(*args) if tensor.is_floating_point(): tensor = tensor.to(dtype) @@ -253,10 +233,10 @@ def get_all_subclasses(cls): def free_param(param: Parameter) -> None: """Free underlying storage of a parameter.""" assert not param.ds_active_sub_modules, param.ds_summary() - if param.data.is_cuda: + if get_accelerator().on_accelerator(param.data): # need to make sure that we don't free the parameter while it is still # being used for computation - param.data.record_stream(torch.cuda.current_stream()) + param.data.record_stream(get_accelerator().current_stream()) # param.data doesn't store anything meaningful in partitioned state param.data = torch.empty(0, dtype=param.dtype, device=param.device) param.ds_status = ZeroParamStatus.NOT_AVAILABLE @@ -281,8 +261,10 @@ class InsertPostInitMethodToModuleSubClasses(object): assert self.dtype in [torch.half, torch.bfloat16, torch.float], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]" def __enter__(self): + global zero_init_enabled if not self.enabled: return + zero_init_enabled = True def apply_with_gather(orig_module_apply_fn: Callable) -> Callable: """many models make use of child modules like Linear or Embedding which @@ -334,9 +316,7 @@ class InsertPostInitMethodToModuleSubClasses(object): fn_to_apply(module_to_apply_fn_to) for param in params_to_apply_fn_to: - torch.distributed.broadcast(param.data, - 0, - group=param.ds_process_group) + dist.broadcast(param.data, 0, group=param.ds_process_group) for param in params_to_apply_fn_to: param.partition(has_been_updated=True) @@ -423,36 +403,15 @@ class InsertPostInitMethodToModuleSubClasses(object): "nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.", force=False) self.linear_bk = torch.nn.functional.linear - torch.nn.functional.linear = LinearFunctionForZeroStage3.apply + torch.nn.functional.linear = zero3_linear_wrap def __exit__(self, exc_type, exc_value, traceback): if not self.enabled: return - def _disable_class(cls): - cls.__init__ = cls._old_init - - # Replace .__init__() for all existing subclasses of torch.nn.Module - for subclass in get_all_subclasses(torch.nn.modules.module.Module): - _disable_class(subclass) - - # putting methods back the way we found them - torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass - torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply - - torch.Tensor.__new__ = torch.Tensor.__old_new__ - torch.empty = _orig_torch_empty - torch.zeros = _orig_torch_zeros - torch.ones = _orig_torch_ones - torch.full = _orig_torch_full + shutdown_init_context() - # un doing it here will undo it during training - # if self.mem_efficient_linear: - # torch.nn.functional.linear = self.linear_bk - # if self.mem_efficient_linear: - # torch.nn.functional.linear = self.linear_bk - - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.info("finished initializing model with %.2fB parameters", param_count / 1e9) @@ -479,6 +438,38 @@ class InsertPostInitMethodToModuleSubClasses(object): self.dtype = dtype or torch.half +def shutdown_init_context(): + global zero_init_enabled + + if not zero_init_enabled: + return + + def _disable_class(cls): + cls.__init__ = cls._old_init + + # Replace .__init__() for all existing subclasses of torch.nn.Module + for subclass in get_all_subclasses(torch.nn.modules.module.Module): + _disable_class(subclass) + + # putting methods back the way we found them + torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass + torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply + + torch.Tensor.__new__ = torch.Tensor.__old_new__ + torch.empty = _orig_torch_empty + torch.zeros = _orig_torch_zeros + torch.ones = _orig_torch_ones + torch.full = _orig_torch_full + + # un doing it here will undo it during training + # if self.mem_efficient_linear: + # torch.nn.functional.linear = self.linear_bk + # if self.mem_efficient_linear: + # torch.nn.functional.linear = self.linear_bk + + zero_init_enabled = False + + class AllGatherHandle: def __init__(self, handle, param: Parameter) -> None: if param.ds_status != ZeroParamStatus.INFLIGHT: @@ -537,7 +528,7 @@ class AllGatherCoalescedHandle: param.ds_status = ZeroParamStatus.AVAILABLE for part_to_copy in partitions: - part_to_copy.record_stream(torch.cuda.current_stream()) + part_to_copy.record_stream(get_accelerator().current_stream()) param_offset += param.ds_tensor.ds_numel @@ -566,7 +557,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): Args: module (``torch.nn.Module``, optional): If provided, partition the model as if it was constructed in the context. - data_parallel_group (``torch.distributed`` process group, optional): + data_parallel_group (``deepspeed.comm`` process group, optional): The group of processes to partition among. Defaults to all processes. mem_efficient_linear (bool, optional): Replace torch.nn.functional.linear with an implementation that allows @@ -615,8 +606,8 @@ class Init(InsertPostInitMethodToModuleSubClasses): this feature must be used. .. note:: - Initializes ``torch.distributed`` if it has not already been done so. - See :meth:`deepseed.init_distributed` for more information. + Initializes ``deepspeed.comm`` if it has not already been done so. + See :meth:`deepspeed.init_distributed` for more information. .. note:: Can also be used as a decorator: @@ -670,36 +661,41 @@ class Init(InsertPostInitMethodToModuleSubClasses): mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype) - if not torch.distributed.is_initialized(): + if not dist.is_initialized(): init_distributed() - assert torch.distributed.is_initialized(), "Parameters cannot be scattered without initializing torch.distributed" + assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm" if data_parallel_group is None: - self.ds_process_group = torch.distributed.group.WORLD + self.ds_process_group = dist.get_world_group() else: self.ds_process_group = data_parallel_group - self.rank = torch.distributed.get_rank(group=self.ds_process_group) - self.world_size = torch.distributed.get_world_size(group=self.ds_process_group) + self.rank = dist.get_rank(group=self.ds_process_group) + self.world_size = dist.get_world_size(group=self.ds_process_group) # Local device is the device where the parameters are consumed, must be default device. # It is the device where parameters are fully instantiated using allgather - self.local_device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])) - torch.cuda.set_device(self.local_device) + self.local_device = torch.device(get_accelerator().device_name( + os.environ["LOCAL_RANK"])) + get_accelerator().set_device(self.local_device) if _ds_config is not None and _ds_config.zero_config.offload_param is not None: - remote_device = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_DEVICE] - pin_memory = _ds_config.zero_config.offload_param[OFFLOAD_PARAM_PIN_MEMORY] + remote_device = _ds_config.zero_config.offload_param.device + pin_memory = _ds_config.zero_config.offload_param.pin_memory self._validate_remote_device(remote_device, _ds_config) # Remote device is the device where parameter partitions are stored # It can be same as local_device or it could be CPU or NVMe. - self.remote_device = self.local_device if remote_device is None else remote_device - self.pin_memory = pin_memory if (self.remote_device - == OFFLOAD_CPU_DEVICE) else False + self.remote_device = self.local_device if remote_device in [ + None, + OffloadDeviceEnum.none + ] else remote_device + self.pin_memory = pin_memory if ( + self.remote_device in [OffloadDeviceEnum.cpu, + OffloadDeviceEnum.nvme]) else False # Enable fp16 param swapping to NVMe - if self.remote_device == OFFLOAD_NVME_DEVICE: + if self.remote_device == OffloadDeviceEnum.nvme: self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype) else: self.param_swapper = None @@ -710,10 +706,9 @@ class Init(InsertPostInitMethodToModuleSubClasses): self._convert_to_zero_parameters(module.parameters(recurse=True)) self.use_all_gather_base = False - try: - from torch.distributed.distributed_c10d import _all_gather_base as all_gather + if dist.has_allgather_base(): self.use_all_gather_base = True - except: + else: logger.info( f"_all_gather_base API is not available in torch {torch.__version__}") @@ -726,19 +721,18 @@ class Init(InsertPostInitMethodToModuleSubClasses): def _validate_remote_device(self, remote_device, ds_config): if ds_config is not None: - if remote_device in [None, OFFLOAD_CPU_DEVICE]: + if remote_device in [None, OffloadDeviceEnum.cpu]: if ds_config.zero_config.offload_param is not None: - offload_param_device = ds_config.zero_config.offload_param[ - OFFLOAD_PARAM_DEVICE] - assert offload_param_device != OFFLOAD_NVME_DEVICE, \ - f"{OFFLOAD_PARAM_DEVICE} in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}." + offload_param_device = ds_config.zero_config.offload_param.device + assert offload_param_device != OffloadDeviceEnum.nvme, \ + f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}." - if remote_device == OFFLOAD_NVME_DEVICE: + if remote_device == OffloadDeviceEnum.nvme: assert ds_config.zero_config.offload_param is not None, \ - f'{OFFLOAD_PARAM} must be defined in DeepSpeed Config if remote device is {OFFLOAD_NVME_DEVICE}.' + f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.' - assert ds_config.zero_config.offload_param[OFFLOAD_PARAM_NVME_PATH] is not None, \ - f'{OFFLOAD_PARAM_NVME_PATH} in DeepSpeed Config cannot be None if remote device is {OFFLOAD_NVME_DEVICE}' + assert ds_config.zero_config.offload_param.nvme_path is not None, \ + f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}' def _post_init_method(self, module): #see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False) @@ -756,10 +750,10 @@ class Init(InsertPostInitMethodToModuleSubClasses): f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}" ) - if param.is_cuda: - torch.distributed.broadcast(param, 0, self.ds_process_group) + if get_accelerator().on_accelerator(param): + dist.broadcast(param, 0, self.ds_process_group) else: - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.warn(f"param `{name}` in {module.__class__.__name__} " f"not on GPU so was not broadcasted from rank 0") @@ -801,7 +795,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): # else this is set to None param.nvme_swapper = self.param_swapper - # DeepSped Param ID + # DeepSpeed Param ID param.ds_id = Init.param_id Init.param_id += 1 @@ -848,14 +842,13 @@ class Init(InsertPostInitMethodToModuleSubClasses): param_buffer = torch.empty( math.ceil(param.ds_numel / self.world_size) * self.world_size, dtype=param.dtype, - device=torch.cuda.current_device(), + device=get_accelerator().current_device_name(), requires_grad=False, ) - handle = torch_allgather_fn( - param.ds_tensor.to(torch.cuda.current_device()), + handle = _dist_allgather_fn( + param.ds_tensor.to(get_accelerator().current_device_name()), param_buffer, - self.ds_process_group, - ) + self.ds_process_group) param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to( @@ -866,7 +859,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): flat_tensor = torch.empty(partition_sz * self.world_size, dtype=get_only_unique_item(p.dtype for p in params), - device=torch.cuda.current_device(), + device=get_accelerator().current_device_name(), requires_grad=False) partitions: List[Parameter] = [] for i in range(self.world_size): @@ -875,10 +868,12 @@ class Init(InsertPostInitMethodToModuleSubClasses): partition_sz * i, partition_sz)) - instrument_w_nvtx(torch.cat)( - [p.ds_tensor.to(torch.cuda.current_device()) for p in params], - out=partitions[self.rank]) - handle = torch_allgather_fn(partitions[self.rank], + instrument_w_nvtx(torch.cat)([ + p.ds_tensor.to(get_accelerator().current_device_name()) + for p in params + ], + out=partitions[self.rank]) + handle = _dist_allgather_fn(partitions[self.rank], flat_tensor, self.ds_process_group) @@ -930,16 +925,16 @@ class Init(InsertPostInitMethodToModuleSubClasses): def padding_size(): return self._padding_size(param) - def partitioned_size(): - return self._partitioned_size(param) + def partition_numel(): + return self._partition_numel(param) def item_override(): param.all_gather() return param._orig_item() - def ds_summary(slf: torch.Tensor) -> dict: + def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict: return { - "id": slf.ds_id, + "id": debug_param2name_id(slf) if use_debug_name else slf.ds_id, "status": slf.ds_status.name, "numel": slf.numel(), "ds_numel": slf.ds_numel, @@ -973,7 +968,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): # Partitioning size utilities param.aligned_size = aligned_size param.padding_size = padding_size - param.partitioned_size = partitioned_size + param.partition_numel = partition_numel param.ds_summary = types.MethodType(ds_summary, param) param.item = allgather_before(param.item) @@ -987,7 +982,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): remainder = param.ds_numel % self.world_size return (self.world_size - remainder) if remainder else 0 - def _partitioned_size(self, param): + def _partition_numel(self, param): return param.ds_tensor.ds_numel def _ensure_availability_of_partitioned_params(self, params): @@ -995,10 +990,10 @@ class Init(InsertPostInitMethodToModuleSubClasses): swap_in_flight = [] for param in params: if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: - assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE + assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE swap_in_list.append(param) if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT: - assert param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE and param.ds_status == ZeroParamStatus.NOT_AVAILABLE + assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE swap_in_flight.append(param) if len(swap_in_list) > 0: swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False) @@ -1067,7 +1062,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): # if numel in empty_buffers: # empty_buffers[numel].append(buffer) - # if torch.distributed.get_rank(): + # if deepspeed.comm.get_rank(): # print(f"Releasing {param.data.numel()}") if param.ds_tensor is not None and not has_been_updated: @@ -1081,7 +1076,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) - if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE: + if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: print_rank_0( f"Param {param.ds_id} partition released since it exists in nvme", force=False) @@ -1094,9 +1089,9 @@ class Init(InsertPostInitMethodToModuleSubClasses): if param.ds_tensor is None: final_location = None - if self.remote_device == OFFLOAD_NVME_DEVICE and self.param_swapper.swappable_tensor( + if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor( numel=partition_size): - final_location = OFFLOAD_NVME_DEVICE + final_location = OffloadDeviceEnum.nvme buffer = self.param_swapper.get_buffer(param, partition_size) partitioned_tensor = torch.empty(0, dtype=param.dtype, @@ -1110,10 +1105,11 @@ class Init(InsertPostInitMethodToModuleSubClasses): partitioned_tensor = torch.empty( partition_size, dtype=param.dtype, - device=OFFLOAD_CPU_DEVICE if self.remote_device - == OFFLOAD_NVME_DEVICE else self.remote_device) + device=OffloadDeviceEnum.cpu if self.remote_device + == OffloadDeviceEnum.nvme else self.remote_device) if self.pin_memory: - partitioned_tensor = partitioned_tensor.pin_memory() + partitioned_tensor = get_accelerator().pin_memory( + partitioned_tensor) partitioned_tensor.requires_grad = False param.ds_tensor = partitioned_tensor @@ -1161,7 +1157,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) - if param.ds_tensor.final_location == OFFLOAD_NVME_DEVICE: + if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: self.param_swapper.swap_out_and_release([param]) print_rank_0( f"ID {param.ds_id} Offloaded to nvme offload and buffers released.") @@ -1205,7 +1201,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', force=False) - torch.cuda.synchronize() + get_accelerator().synchronize() print_rank_0( f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" @@ -1218,10 +1214,11 @@ class Init(InsertPostInitMethodToModuleSubClasses): # return None if self.use_all_gather_base: # try the _all_gather_base on PyTorch master branch - handle = dist._all_gather_base(flat_tensor, - param.ds_tensor.cuda(), - group=self.ds_process_group, - async_op=async_op) + handle = dist.all_gather_base(flat_tensor, + param.ds_tensor.to( + get_accelerator().device_name()), + group=self.ds_process_group, + async_op=async_op) else: partitions = [] for i in range(self.world_size): @@ -1253,7 +1250,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): local_tensors = [] for param in param_list: partition_sizes.append(param.ds_tensor.ds_numel) - local_tensors.append(param.ds_tensor.cuda()) + local_tensors.append(param.ds_tensor.to(get_accelerator().device_name())) # allocate memory for allgather params allgather_params = [] @@ -1274,17 +1271,17 @@ class Init(InsertPostInitMethodToModuleSubClasses): if self.use_all_gather_base: # try the _all_gather_base from Pytorch master - h = dist._all_gather_base(allgather_params[param_idx], - input_tensor, - group=self.ds_process_group, - async_op=True) + h = dist.all_gather_base(allgather_params[param_idx], + input_tensor, + group=self.ds_process_group, + async_op=True) else: output_list = [] for i in range(self.world_size): psize = partition_sizes[param_idx] partition = allgather_params[param_idx].narrow(0, i * psize, psize) output_list.append(partition) - if not partition.is_cuda: + if not get_accelerator().on_accelerator(partition): logger.warning( f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}' ) @@ -1307,7 +1304,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): param.ds_numel).view(param.ds_shape).data # guarantee the communication to be completed - torch.cuda.synchronize() + get_accelerator().synchronize() return None @@ -1321,7 +1318,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device) - flat_tensor.requres_grad = False + flat_tensor.requires_grad = False partitions = [] for i in range(self.world_size): start = partition_size * i @@ -1339,10 +1336,10 @@ class Init(InsertPostInitMethodToModuleSubClasses): offset += param_numel - torch.distributed.all_gather(partitions, - partitions[self.rank], - group=self.ds_process_group, - async_op=False) + dist.all_gather(partitions, + partitions[self.rank], + group=self.ds_process_group, + async_op=False) param_offset = 0 for param in param_list: @@ -1436,11 +1433,11 @@ class Init(InsertPostInitMethodToModuleSubClasses): #print("after reduce scatter gradients") input_list.append(input) - rank = torch.distributed.get_rank(group=self.ds_process_group) - handle = torch.distributed.reduce_scatter(input_list[rank], - input_list, - group=self.ds_process_group, - async_op=True) + rank = dist.get_rank(group=self.ds_process_group) + handle = dist.reduce_scatter(input_list[rank], + input_list, + group=self.ds_process_group, + async_op=True) return handle, input_list[rank] @@ -1472,7 +1469,7 @@ class Init(InsertPostInitMethodToModuleSubClasses): assert partition_buffer.numel( ) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}" - rank = torch.distributed.get_rank(group=self.ds_process_group) + rank = dist.get_rank(group=self.ds_process_group) start = partition_size * rank end = start + partition_size @@ -1526,7 +1523,7 @@ class GatheredParameters: again upon exit. Args: - params (``torch.nn.Parameter``): A single parameter or a list of parameters to collect. + params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect. It's assumed that all parameters are zero params. modifier_rank (int, optional): If specified, this rank's parameter will be broadcasted on exit from the context. This argument is required if ``params`` are @@ -1536,9 +1533,11 @@ class GatheredParameters: registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`. enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``. - Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g. ``modifier_rank=0``) + Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``) if you need the GPU memory allocated by gather to be released upon exit from the context manager. + Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored! + Examples ======== @@ -1552,12 +1551,12 @@ class GatheredParameters: with deepspeed.zero.GatheredParameters(linear.weight, modifier_rank=0): - if torch.distributed.get_rank() == 0: + if deepspeed.comm.get_rank() == 0: linear.weight.zero_() with deepspeed.zero.GatheredParameters(linear.weight, modifier_rank=0): - if torch.distributed.get_rank() == 0: + if deepspeed.comm.get_rank() == 0: linear.weight.zero_() #. Collect a partitioned weight to pass to another module during @@ -1591,7 +1590,7 @@ class GatheredParameters: # manager gathers (unpartitions) the params of the current layer, then loads from # the state dict and then re-partitions them again with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): - if torch.distributed.get_rank() == 0: + if deepspeed.comm.get_rank() == 0: module._load_from_state_dict(state_dict, prefix) for name, child in module._modules.items(): @@ -1600,15 +1599,20 @@ class GatheredParameters: load(model, prefix="") - If this approach is not used, then the full model will first get copied to each GPU. For models - bigger than the memory of a single gpu this method is required. + If this approach is not used, then the full model will first be copied to each GPU. For models + bigger than the memory of a single GPU, this method is required. """ self.enabled = enabled if not enabled: return - if not isinstance(params, list): + if isinstance(params, Iterable) and not isinstance(params, torch.Tensor): + # deal with generators like model.parameters() + # must convert to list to be able to iterate more than once if we get a generator + params = list(params) + else: + # single param params = [params] # enable if at least one is zero-param, otherwise a noop @@ -1619,12 +1623,12 @@ class GatheredParameters: self.params = [p for p in params if hasattr(p, "ds_id")] self.src_rank = None if modifier_rank is not None: - if self.params[0].ds_process_group == torch.distributed.group.WORLD: + if self.params[0].ds_process_group == dist.get_world_group(): self.src_rank = modifier_rank else: # A group was specified; convert DP rank to global rank - self.src_rank = _get_global_rank(self.params[0].ds_process_group, - modifier_rank) + self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, + modifier_rank) self.fwd_module = fwd_module if self.fwd_module is not None: # is a no-op if already registered @@ -1640,13 +1644,14 @@ class GatheredParameters: if not self.enabled: return if self.src_rank is None: + self.params[0].partition(param_list=self.params, has_been_updated=False) return handles = [ - torch.distributed.broadcast(p, - self.src_rank, - group=p.ds_process_group, - async_op=True) for p in self.params + dist.broadcast(p, + self.src_rank, + group=p.ds_process_group, + async_op=True) for p in self.params ] for h in handles: h.wait() diff --git a/deepspeed/runtime/zero/partitioned_param_coordinator.py b/deepspeed/runtime/zero/partitioned_param_coordinator.py index 4958dcf..f6edfe6 100644 --- a/deepspeed/runtime/zero/partitioned_param_coordinator.py +++ b/deepspeed/runtime/zero/partitioned_param_coordinator.py @@ -4,18 +4,17 @@ Licensed under the MIT license. """ from dataclasses import dataclass -import functools import collections -from collections import OrderedDict, UserDict -from typing import Deque, Dict, Iterable, Set, Tuple -import torch -from torch.cuda import Event, Stream -from torch.nn import Module, Parameter +from collections import UserDict +from typing import Deque, Set +from deepspeed import comm as dist from deepspeed.utils.logging import logger +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.partition_parameters import * -from deepspeed.runtime.zero.offload_constants import * from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus +from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id +from deepspeed.accelerator import get_accelerator def debug_rank0(message: str) -> None: @@ -33,7 +32,7 @@ def iter_params(module: Module, recurse=False) -> Iterable[Parameter]: return map(lambda pair: pair[1], get_all_parameters(module, recurse)) -class TraceMode(Enum): +class ZeRoTraceMode(Enum): # Record trace of the network during a single forward+backward (for training) or forward (for inference) RECORD = 1 # Use recorded network trace to optimize current forward+backward or forward @@ -67,7 +66,7 @@ class PartitionedParameterCoordinator: prefetch_bucket_sz: int, max_reuse_distance_in_numel: int, max_available_parameters_in_numel: int, - allgather_stream: Stream, + allgather_stream: get_accelerator().Stream, prefetch_nvme: bool = False, ) -> None: # mapping of param -> handle for each param that is currently in flight @@ -75,12 +74,14 @@ class PartitionedParameterCoordinator: # keeps track of the number of submodules invoked so far. self.__step_id: int = 0 # network tracing mode - self.__trace_mode: TraceMode = TraceMode.RECORD + self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD # sequence of submodules/parameters in forward pass + backward pass self.__submodule_order: Iterable[Module] = [] self.__param_order: Iterable[__class__.__ParamInTrace] = [] self.__most_recent_step_id_param_fetched_for = collections.defaultdict( lambda: int(-1e10)) + self.__step_id_module_fetched_for = collections.defaultdict( + lambda: collections.deque()) # number of available params, and max number of available params self.__n_available_params: int = 0 self.__max_n_available_params: int = max_available_parameters_in_numel @@ -94,7 +95,7 @@ class PartitionedParameterCoordinator: self.hierarchy: int = 0 # stream that will be used for allgather operations - self.__allgather_stream: Stream = allgather_stream + self.__allgather_stream: get_accelerator().Stream = allgather_stream # limit the number of fetch events that can be queued at once # otherwise, what happens is memory is allocated by the host thread at the @@ -105,7 +106,7 @@ class PartitionedParameterCoordinator: # cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now # because ideally in the future its replaced by an async allocation # mechanism which doesn't require any configuration by the user. - self.__ongoing_fetch_events: Deque[Event] = collections.deque() + self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON self.__max_ongoing_fetch_events: int = 2 @@ -126,24 +127,29 @@ class PartitionedParameterCoordinator: self.__param_queue = None def is_complete_trace(self) -> bool: - return self.__trace_mode == TraceMode.COMPLETE + return self.__trace_mode == ZeRoTraceMode.COMPLETE def is_invalid_trace(self) -> bool: - return self.__trace_mode == TraceMode.INVALID + return self.__trace_mode == ZeRoTraceMode.INVALID def is_record_trace(self) -> bool: - return self.__trace_mode == TraceMode.RECORD + return self.__trace_mode == ZeRoTraceMode.RECORD def _invalidate_trace(self) -> None: if self.is_invalid_trace(): raise RuntimeError("attempted to invalidate already invalid trace") - self.__trace_mode = TraceMode.INVALID + self.__trace_mode = ZeRoTraceMode.INVALID self._clear_trace_structures() def trace_prologue(self, sub_module: Module) -> None: if self.is_complete_trace(): # sub_module must match expectation else invalidate trace cache if sub_module != self.__submodule_order[self.__step_id]: + expected_module_id = self.__submodule_order[self.__step_id].id + debug_rank0( + f"Invalidate trace cache @ step {self.__step_id}: " + f"expected module {expected_module_id}, but got module {sub_module.id}" + ) self._invalidate_trace() def record_module(self, sub_module: Module) -> None: @@ -151,17 +157,27 @@ class PartitionedParameterCoordinator: if not self.is_record_trace(): raise RuntimeError( f"attempted to record trace when status = {self.__trace_mode}") + self.__submodule_order.append(sub_module) + self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id) def record_parameters(self, sub_module: Module) -> None: """adds sub module to trace""" if not self.is_record_trace(): raise RuntimeError( f"attempted to record trace when status = {self.__trace_mode}") + + step_id = self.__step_id_module_fetched_for[sub_module.id].popleft() for param in sorted(set(iter_params(sub_module)), key=lambda p: p.ds_id): self.__param_order.append( __class__.__ParamInTrace(param=param, - step_id_last_used_at=self.__step_id)) + step_id_last_used_at=step_id)) + + def construct_parameter_trace_from_module_trace(self): + """use module trace to construct parameter trace""" + self.__param_order = [] + for sub_module in self.__submodule_order: + self.record_parameters(sub_module) def reset_step(self) -> None: """indicate that we have completed one fwd+bwd for the model""" @@ -171,31 +187,49 @@ class PartitionedParameterCoordinator: f"{[p.ds_summary for p in self.__inflight_param_registry.keys()]}") if not self.is_complete_trace(): # not self.trace_complete: - # Make sure that recorded parameter and submodule orders are - # identical across ranks + # Make sure that recorded submodule orders are identical across ranks assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order]) - assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order]) - assert_ints_same_as_other_ranks( - [p.step_id_last_used_at for p in self.__param_order]) if self.is_record_trace(): # Successfully recorded a trace + self.construct_parameter_trace_from_module_trace() + # Make sure that recorded parameter orders are identical across ranks + assert_ints_same_as_other_ranks( + [p.param.ds_id for p in self.__param_order]) + assert_ints_same_as_other_ranks( + [p.step_id_last_used_at for p in self.__param_order]) + self.__submodule_order = tuple(self.__submodule_order) # freeze self.__param_order = tuple(self.__param_order) # freeze - self.__trace_mode = TraceMode.COMPLETE # self.trace_complete = True + self.__trace_mode = ZeRoTraceMode.COMPLETE print_rank_0( - f"completed trace: {[m.id for m in self.__submodule_order]}", + f"completed record trace: {[m.id for m in self.__submodule_order]}", force=False) else: # Enable trace recording for next forward/backward pass - self.__trace_mode = TraceMode.RECORD + self.__trace_mode = ZeRoTraceMode.RECORD self.__param_queue = collections.deque(self.__param_order) # reset fetch queue self.__most_recent_step_id_param_fetched_for = collections.defaultdict( lambda: int(-1e10)) + self.__step_id_module_fetched_for = collections.defaultdict( + lambda: collections.deque()) self.__step_id = 0 self.__n_available_params = 0 + def _dump_params(self, tag, sub_module, params, step_id=None): + if step_id is None: + step_id = self.__step_id + param_names = [debug_param2name_id(p) for p in params] + print( + f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}' + ) + + def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None): + if step_id is None: + step_id = self.__step_id + print(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}') + """Fetch and Release Fetching, prefetching, and releasing parameters """ @@ -228,7 +262,7 @@ class PartitionedParameterCoordinator: param.ds_active_sub_modules.add(current_submodule.id) debug_rank0(f"-wait: {param.ds_summary()}") if param in self.__inflight_param_registry: - with torch.cuda.stream(self.__allgather_stream): + with get_accelerator().stream(self.__allgather_stream): while self.__ongoing_fetch_events and self.__ongoing_fetch_events[ 0].query(): self.__ongoing_fetch_events.popleft() @@ -238,12 +272,12 @@ class PartitionedParameterCoordinator: self.__inflight_param_registry.pop(param).wait() - event = Event() + event = get_accelerator().Event() event.record() self.__ongoing_fetch_events.append(event) assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() - torch.cuda.current_stream().wait_stream(self.__allgather_stream) + get_accelerator().current_stream().wait_stream(self.__allgather_stream) # kick off parameter prefetches for upcoming modules # don't prefetch if we dont have a completed model trace @@ -264,15 +298,23 @@ class PartitionedParameterCoordinator: self.__most_recent_step_id_param_fetched_for[ param_in_trace.param] = param_in_trace.step_id_last_used_at discarded_from_prefetch_queue.add(param_in_trace.param) + if discarded_from_prefetch_queue != params_not_already_fetched: raise RuntimeError( f"tracing error at step {self.__step_id}: \n" f"module id: {current_submodule.id}, training: {current_submodule.training}\n" f"expected the next {len(params_not_already_fetched)} parameters in the " - f"parameter fetch queue to be {tuple(p.ds_summary() for p in params_not_already_fetched)} \n" - f"but got \n {tuple(p.ds_summary() for p in discarded_from_prefetch_queue)}." + f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n" + f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}." ) + def _is_currently_on_nvme(param): + if param.nvme_swapper is None: + return False + + return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \ + and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE + # kick off all gather for params in the next few submodules (prefetch) if self.__prefetch_bucket_sz > 0: max_params_to_prefetch = min( @@ -283,11 +325,25 @@ class PartitionedParameterCoordinator: while self.__param_queue and numel_prefetching < max_params_to_prefetch: param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft( ) - self.__most_recent_step_id_param_fetched_for[ - param_in_trace.param] = param_in_trace.step_id_last_used_at - if param_in_trace.param not in params_to_prefetch: + + if _is_currently_on_nvme(param_in_trace.param): + # nvme prefetch is handled elsewhere. Need to break here to preserve fetch order + self.__param_queue.appendleft(param_in_trace) + break + + do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE + if param_in_trace.param in params_to_prefetch: + # Avoid duplicates + do_prefetch = False + + self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \ + max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param], + param_in_trace.step_id_last_used_at) + + if do_prefetch: params_to_prefetch.add(param_in_trace.param) numel_prefetching += param_in_trace.param.ds_numel + for param in params_to_prefetch: debug_rank0(f"-prefetch: {param.ds_summary()}") self.__all_gather_params(params_to_prefetch) @@ -339,13 +395,23 @@ class PartitionedParameterCoordinator: self.__n_available_params += param.ds_numel if partitioned_params: - with torch.cuda.stream(self.__allgather_stream): + with get_accelerator().stream(self.__allgather_stream): handle = partitioned_params[0].all_gather_coalesced(partitioned_params) for param in partitioned_params: assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary() self.__inflight_param_registry[param] = handle + # Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU + swap_persisted_params = [ + p for p in partitioned_params + if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme + ] + if swap_persisted_params: + swap_persisted_params[ + 0].nvme_swapper.remove_partition_and_release_buffers( + swap_persisted_params) + @instrument_w_nvtx def __release_param(self, param: Parameter) -> None: if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules: @@ -364,12 +430,23 @@ class PartitionedParameterCoordinator: params_to_release = set(p.ds_id for p in iter_params(submodule_to_release) if not p.ds_persist) + # Problem: When prefetcher scans the param trace, it skips AVAILABLE params. + # This creates issues if those params are released before the skipped uses: + # 1) It hurts performance as the skipped uses are never prefetched. + # 2) For nvme params, we run out of swap buffers because the prefetch order + # diverges from the trace. + # Solution: Don't release params whose reuse was skipped by prefetch. This is + # possible because we detect such skips during prefetch and mark those params. + for param in iter_params(submodule_to_release): + if self.__most_recent_step_id_param_fetched_for[param] > step_id: + params_to_release.discard(param.ds_id) + # examine all modules within `max_reuse_dist_in_numel` of the current step, # if we see any of the candidate parameters to be released reoccur while # doing this, remove them from the set of parameters to release. params_traversed = 0 for module in self.__submodule_order[step_id:]: - if params_traversed > self.__max_reuse_dist_in_numel: + if params_traversed >= self.__max_reuse_dist_in_numel: break for param in iter_params(module): params_to_release.discard(param.ds_id) diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 836b1ee..9f7cb38 100644 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -3,49 +3,35 @@ Licensed under the MIT license. """ +import sys import gc -from dataclasses import dataclass -import functools -import os import collections -from collections import OrderedDict, UserDict -import itertools -from typing import Deque, Dict, Iterable, Set, Tuple -import torch -from torch.cuda import Event, Stream -from torch.nn import Module, Parameter -import torch.distributed as dist -import math -from torch._six import inf -from torch.nn import Module -from torch.nn.parameter import Parameter - -from deepspeed.utils.logging import logger -from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler +from typing import Deque, Dict, Tuple + +from deepspeed.runtime import ZeROOptimizer +from deepspeed.utils import logger +from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced -from deepspeed.runtime.utils import get_global_norm, see_memory_usage, is_model_parallel_parameter, DummyOptim +from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter from deepspeed.runtime.zero.partition_parameters import * -from deepspeed.runtime.zero.partition_parameters import _init_external_params -from deepspeed.runtime.zero.constants import ZERO_OPTIMIZATION_WEIGHTS +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum +from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload from deepspeed.ops.adam import DeepSpeedCPUAdam -from deepspeed.ops.op_builder import UtilsBuilder -from deepspeed.runtime.zero.offload_constants import * -from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, iter_params from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import UtilsBuilder # Toggle this to true to enable correctness test # with gradient partitioning and without pg_correctness_test = False -FWD_MODULE_STACK = list() -from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id, debug_param2name_id_numel, debug_param2name_id_shape_device, debug_module2name_class, printflock, log_rank_file - def print_rank_0(message, debug=False, force=False): - rank = torch.distributed.get_rank() + rank = dist.get_rank() if rank == 0 and (debug or force): print(message) # other variations @@ -73,157 +59,10 @@ def move_to_cpu(tensor_list): tensor.data = tensor.data.cpu() -def is_builtin_type(obj): - # https://stackoverflow.com/a/17795199 - return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins" - - -#apply torch.autograd.Function that calls a backward_function to tensors in output -def _apply_to_tensors_only(module, functional, backward_function, outputs): - if isinstance(outputs, (tuple, list)): - touched_outputs = [] - for output in outputs: - touched_output = _apply_to_tensors_only(module, - functional, - backward_function, - output) - touched_outputs.append(touched_output) - return outputs.__class__(touched_outputs) - elif isinstance(outputs, dict): - # apply inplace to avoid recreating dict inherited objects - for key in outputs.keys(): - outputs[key] = _apply_to_tensors_only(module, - functional, - backward_function, - outputs[key]) - return outputs - elif type(outputs) is torch.Tensor: - return functional.apply(module, backward_function, outputs) - else: - if not is_builtin_type(outputs): - logger.warning( - f"A module has unknown inputs or outputs type ({type(outputs)}) and the tensors embedded in it cannot be detected. " - "The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and " - "output tensors and therefore may not get triggered properly.") - return outputs - - -#for each tensor in outputs run the forward_function and register backward_function as hook -def _apply_forward_and_backward_to_tensors_only(module, - forward_function, - backward_function, - outputs): - if type(outputs) is tuple: - touched_outputs = [] - for output in outputs: - touched_output = _apply_forward_and_backward_to_tensors_only( - module, - forward_function, - backward_function, - output) - touched_outputs.append(touched_output) - return tuple(touched_outputs) - elif type(outputs) is torch.Tensor: - forward_function(outputs) - if outputs.requires_grad: - outputs.register_hook(backward_function) - return outputs - else: - return outputs - - -class ZeROOrderedDict(OrderedDict): - def __init__(self, parent_module, *args, **kwargs): - """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. - - Args: - parent_module (``collections.OrderedDict``): the collection to replace - """ - - super().__init__(*args, **kwargs) - self._parent_module = parent_module - self._in_forward = False - - def __getitem__(self, key): - param = super().__getitem__(key) - - # Params can be registered as None (e.g., bias) - if param is None: - return param - - if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: - if self._parent_module._parameters._in_forward: - register_external_parameter(FWD_MODULE_STACK[-1], param) - param.all_gather() - print_rank_0( - f'Registering external parameter from getter {key} ds_id = {param.ds_id}', - force=False) - - return param - - -def _inject_parameters(module, cls): - for module in module.modules(): - if cls == ZeROOrderedDict: - new_param = cls(parent_module=module) - else: - new_param = cls() - - for key, param in module._parameters.items(): - new_param[key] = param - module._parameters = new_param - - -class PreBackwardFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, module, pre_backward_function, outputs): - ctx.module = module - ctx.pre_backward_function = pre_backward_function - if not hasattr(module, "applied_pre_backward_ref_cnt"): - module.applied_pre_backward_ref_cnt = 0 - module.applied_pre_backward_ref_cnt += 1 - #print(f"After Forward: {ctx.module.__class__.__name__}") - outputs = outputs.detach() - return outputs - - @staticmethod - def backward(ctx, *args): - #print(f"Before Backward: {ctx.module.__class__.__name__}") - ctx.pre_backward_function(ctx.module) - return (None, None) + args - - -class PostBackwardFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, module, pre_backward_function, output): - ctx.module = module - if output.requires_grad: - #TODO SOME TIMES post backward does not seem to be triggered debug in detail - #Should only cause increase in memory not correctness issue - #if output.grad_fn.__class__.__name__ == 'ViewBackward': - # ctx.view=True - # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") - #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." - #if module.ds_grads_remaining == 0: - # print(f"Before Forward: {ctx.module.__class__.__name__}") - module.ds_grads_remaining += 1 - ctx.pre_backward_function = pre_backward_function - output = output.detach() - return output - - @staticmethod - def backward(ctx, *args): - ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 - if ctx.module.ds_grads_remaining == 0: - ctx.pre_backward_function(ctx.module) - #print(f"After Backward: {ctx.module.__class__.__name__}") - return (None, None) + args - - INITIAL_MICRO_STEP_ID = -1 -class DeepSpeedZeroOptimizer_Stage3(object): +class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer): """ DeepSpeedZeroOptimizer designed to reduce the memory footprint required for training large deep learning models. @@ -249,6 +88,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): max_reuse_distance=1000000000, max_live_parameters=1000000000, param_persistence_threshold=100000, + model_persistence_threshold=sys.maxsize, dp_process_group=None, reduce_scatter=True, overlap_comm=False, @@ -264,14 +104,14 @@ class DeepSpeedZeroOptimizer_Stage3(object): elastic_checkpoint=False, aio_config=None): - see_memory_usage("Stage 3 initialize beginning", force=False) + see_memory_usage("Stage 3 initialize beginning", force=True) print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False) if dist.get_rank() == 0: logger.info(f"Reduce bucket size {reduce_bucket_size}") - logger.info(f"Allgather bucket size {prefetch_bucket_size}") + logger.info(f"Prefetch bucket size {prefetch_bucket_size}") # The fused optimizer does all the work. We need this layer for two reason: # 1. maintain same user API from apex.fp16_utils # 2. keep common stuff here in case we need to add ne552w fused optimizer later @@ -281,10 +121,10 @@ class DeepSpeedZeroOptimizer_Stage3(object): # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? - if not torch.cuda.is_available: - raise SystemError("Cannot use fp16 without CUDA.") + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") + self.optimizer = init_optimizer - self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) # Load pre-built or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() @@ -293,6 +133,9 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.dtype = self.optimizer.param_groups[0]['params'][0].dtype self._global_grad_norm = 0. + self.custom_loss_scaler = False + self.external_loss_scale = None + self.optimizer_swapper = None self.swap_optimizer = False @@ -304,62 +147,45 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.params_in_nvme_and_cpu = False self.max_params_in_cpu = 0 + self.parameter_offload = DeepSpeedZeRoOffload( + module=module, + timers=timers, + ds_config=ds_config, + overlap_comm=overlap_comm, + prefetch_bucket_size=prefetch_bucket_size, + max_reuse_distance=max_reuse_distance, + max_live_parameters=max_live_parameters, + param_persistence_threshold=param_persistence_threshold, + model_persistence_threshold=model_persistence_threshold, + offload_param_config=offload_optimizer_config, + mpu=mpu) + + self.persistent_parameters = self.parameter_offload.persistent_parameters self._configure_offloading(offload_optimizer_config, offload_param_config) - self._convert_to_zero_parameters(ds_config, module, mpu) - - for m in module.modules(): - _init_external_params(m) - self.module = module self.elastic_checkpoint = elastic_checkpoint - # Replace ._parameters with a new class to enable auto-registration of - # external parameters - _inject_parameters(module, ZeROOrderedDict) - self.__inf_or_nan_tracker: Tensor = torch.zeros( 1, dtype=torch.bool, - device=torch.cuda.current_device(), + device=get_accelerator().current_device_name(), requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) - self.device = torch.cuda.current_device( - ) if not self.offload_optimizer else OFFLOAD_CPU_DEVICE + self.device = get_accelerator().current_device_name( + ) if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication - self.__allgather_stream = Stream( - ) if overlap_comm else torch.cuda.default_stream() - self.__reduce_and_partition_stream = Stream( - ) if overlap_comm else torch.cuda.default_stream() + self.__reduce_and_partition_stream = get_accelerator().Stream( + ) if overlap_comm else get_accelerator().default_stream() ############################################################################ - see_memory_usage("Before Partitioned Parameter Coordinator", force=False) - self.param_coordinators = {} - self._prefetch_bucket_sz = int(prefetch_bucket_size) - self._max_reuse_distance_in_numel = int(max_reuse_distance) - self._max_available_parameters_in_numel = int(max_live_parameters) - see_memory_usage("After Partitioned Parameter Coordinator", force=False) - self.__n_caching_allocator_flushes = 0 #-------------Stage 3 Setup-------------------# - # parameters smaller than the threshold will be collectively gathered at the - # end of the optimizer step and will be kept till the end of the backward pass - # TODO maybe worth just replicating these parameters and doing all reduce for them - self.persistence_threshold = int(param_persistence_threshold) - - self.persistent_parameters = self.persistent_parameters() - - self.setup_zero_stage3_hooks() - - #resetting ds_tensor just in case parameters have been changed after initialization - #example .half() or .to() - #self.reset_ds_tensor() - #---------------------------------------------# self.timers = timers @@ -386,9 +212,9 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.reduce_bucket_size = int(reduce_bucket_size) if self.reduce_scatter: - assert self.communication_data_type in (torch.float16, torch.bfloat16), f"ZeRO-3 supports only float16 or bfloat16 communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" - assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled" - assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled" + assert self.communication_data_type in (torch.float16, torch.bfloat16, torch.float32), f"ZeRO-3 supports only float16 or bfloat16 communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" + assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled" + assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled" # Holds the mode parameter # The param.data may not hold any meaningful data @@ -416,6 +242,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.all_reduce_print = False self.prefetch_elements = int(prefetch_bucket_size) + self.contiguous_gradients = contiguous_gradients # padding on each partition for alignment purposes @@ -424,11 +251,15 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.sub_group_size = sub_group_size self.sub_group_to_group_id = {} - see_memory_usage("Before creating fp16 partitions", force=False) - self._create_fp16_partitions_with_defragmentation() + + # Trainable parameters + self.trainable_param_groups = self._get_trainable_parameter_groups() + + see_memory_usage("Before creating fp16 partitions", force=True) + self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups) num_fp16_subgroups = len(self.fp16_partitioned_groups_flat) see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", - force=False) + force=True) # Optimizer tensor swapping if self.swap_optimizer: @@ -437,7 +268,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.__params_in_ipg_bucket: List[Parameter] = [] self.is_gradient_accumulation_boundary: bool = True - self.__param_reduce_events: Deque[Event] = collections.deque() + self.__param_reduce_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON self.__max_param_reduce_events: int = 2 @@ -478,10 +309,9 @@ class DeepSpeedZeroOptimizer_Stage3(object): f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False) + self._setup_for_real_optimizer() self.grad_position = {} - if self.using_real_optimizer: - self._setup_for_real_optimizer() - self.set_grad_positions() + self.set_grad_positions() if self.offload_optimizer: self.norm_for_param_grads = {} @@ -502,38 +332,44 @@ class DeepSpeedZeroOptimizer_Stage3(object): #exit(0) # we may have a way of fusing dynamic scale. Do not support for now - if self.dtype == torch.float or not dynamic_loss_scale: - loss_scale_value = 1.0 if self.dtype == torch.float else static_loss_scale - - self.dynamic_loss_scale = False - self.loss_scaler = LossScaler(scale=loss_scale_value) - cur_iter = 0 - else: - if dynamic_loss_args is None: - self.loss_scaler = DynamicLossScaler() - else: - self.loss_scaler = DynamicLossScaler(**dynamic_loss_args) - - self.dynamic_loss_scale = True + self.loss_scaler = CreateLossScaler(dtype=self.dtype, + static_loss_scale=static_loss_scale, + dynamic_scaling=dynamic_loss_scale, + dynamic_loss_args=dynamic_loss_args) + self.dynamic_loss_scale = self.loss_scaler.dynamic self.debug_fp16_grads = [{} for _ in self.fp16_groups] + self._link_all_hp_params() + if dist.get_rank(group=self.dp_process_group) == 0: see_memory_usage(f"After initializing ZeRO optimizer", force=True) + def destroy(self): + self.parameter_offload.destroy() + + def _get_trainable_parameter_groups(self): + param_groups = [] + for param_group in self.optimizer.param_groups: + trainable_params = { + "params": [p for p in param_group["params"] if p.requires_grad] + } + param_groups.append(trainable_params) + return param_groups + def _setup_for_real_optimizer(self): - see_memory_usage("Before creating fp32 partitions", force=False) + see_memory_usage("Before creating fp32 partitions", force=True) self._create_fp32_partitions() - see_memory_usage("After creating fp32 partitions", force=False) + see_memory_usage("After creating fp32 partitions", force=True) dist.barrier() # To support pipelined optimizer swapping self._create_next_swappable_fp32_groups() - see_memory_usage("Before initializing optimizer states", force=False) + see_memory_usage("Before initializing optimizer states", force=True) self.initialize_optimizer_states() - see_memory_usage("After initializing optimizer states", force=False) + see_memory_usage("After initializing optimizer states", force=True) dist.barrier() if dist.get_rank() == 0: @@ -544,18 +380,20 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.__ipg_bucket_flat_buffer: Tensor = torch.empty( self.reduce_bucket_size, dtype=self.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) grad_partitions_flat_buffer = None self.__param_id_to_grad_partition: Dict[int, Tensor] = {} all_params = list(itertools.chain.from_iterable(self.fp16_groups)) - grad_partitions_flat_buffer: Tensor = torch.zeros( - sum(p.ds_tensor.ds_numel for p in all_params), - dtype=self.dtype, - device=self.device, - pin_memory=self.offload_optimizer_pin_memory) + grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() + for p in all_params), + dtype=self.dtype, + device=self.device) + if self.offload_optimizer_pin_memory: + grad_partitions_flat_buffer = get_accelerator().pin_memory( + grad_partitions_flat_buffer) offset = 0 for param in all_params: @@ -563,8 +401,21 @@ class DeepSpeedZeroOptimizer_Stage3(object): param.ds_id] = grad_partitions_flat_buffer.narrow( 0, offset, - param.ds_tensor.numel()) - offset += param.ds_tensor.numel() + param.partition_numel()) + offset += param.partition_numel() + + def _link_all_hp_params(self): + for p in self.module.parameters(): + p._z3_optimizer = self + + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] # TODO. factor out to a utility outside of stage3 @staticmethod @@ -593,7 +444,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): offset += tensor_numel gc.collect() - torch.cuda.empty_cache() + get_accelerator().empty_cache() # copy tensors (now flattened and contiguous) back to GPU device_buffer = cpu_buffer.to(orig_device) @@ -605,79 +456,34 @@ class DeepSpeedZeroOptimizer_Stage3(object): return device_buffer def _get_param_coordinator(self, training): - if not training in self.param_coordinators: - self.param_coordinators[training] = PartitionedParameterCoordinator( - prefetch_bucket_sz=self._prefetch_bucket_sz, - max_reuse_distance_in_numel=self._max_reuse_distance_in_numel, - max_available_parameters_in_numel=self. - _max_available_parameters_in_numel, - allgather_stream=self.__allgather_stream, - prefetch_nvme=self.params_in_nvme_and_cpu, - ) - - return self.param_coordinators[training] + return self.parameter_offload.get_param_coordinator(training) def _configure_offloading(self, offload_optimizer_config, offload_param_config): ###################### offload optimizer setup ################################## - if offload_optimizer_config is not None: + if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none: self.offload_optimizer = True - self.offload_optimizer_pin_memory = offload_optimizer_config[ - OFFLOAD_OPTIMIZER_PIN_MEMORY] - self.swap_optimizer = offload_optimizer_config[ - OFFLOAD_OPTIMIZER_DEVICE] == OFFLOAD_NVME_DEVICE - self.offload_optimizer_fast_init = offload_optimizer_config[ - OFFLOAD_OPTIMIZER_FAST_INIT] + self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory + self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme + self.offload_optimizer_fast_init = offload_optimizer_config.fast_init ###################### offload param setup ################################## - if offload_param_config is not None: - if self.using_real_optimizer: - assert self.offload_optimizer, "parameter offload is only available with optimizer state offload" + if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: self.offload_param = True - self.offload_param_pin_memory = offload_param_config[ - OFFLOAD_PARAM_PIN_MEMORY] - self.params_in_nvme_and_cpu = offload_param_config[ - OFFLOAD_PARAM_DEVICE] == OFFLOAD_NVME_DEVICE - self.max_params_in_cpu = offload_param_config[OFFLOAD_PARAM_MAX_IN_CPU] + self.offload_param_pin_memory = offload_param_config.pin_memory + self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme + self.max_params_in_cpu = offload_param_config.max_in_cpu print_rank_0( f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}", force=False) - def _convert_to_zero_parameters(self, ds_config, module, mpu): - non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] - if non_zero_params: - zero_params = [p for p in module.parameters() if is_zero_param(p)] - if zero_params: - zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) - else: - group = None - if mpu: - group = mpu.get_data_parallel_group() - - if self.params_in_nvme_and_cpu: - remote_device = OFFLOAD_NVME_DEVICE - elif self.offload_param: - remote_device = OFFLOAD_CPU_DEVICE - else: - remote_device = None - - Init(module=module, - data_parallel_group=group, - dtype=self.dtype, - config_dict_or_path=ds_config, - remote_device=remote_device, - pin_memory=self.offload_param_pin_memory, - mpu=mpu) - def _configure_tensor_swapping(self, offload_optimizer_config, aio_config): - nvme_swap_folder = os.path.join( - offload_optimizer_config[OFFLOAD_OPTIMIZER_NVME_PATH], - 'zero_stage_3') + nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, + 'zero_stage_3') os.makedirs(nvme_swap_folder, exist_ok=True) - if torch.distributed.get_rank() == 0: + if dist.get_rank() == 0: logger.info(f'Tensor Swapping: Adding optimizer tensors') - swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config[ - OFFLOAD_OPTIMIZER_PIPELINE] else PartitionedOptimizerSwapper + swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper self.optimizer_swapper = swapper_type( swap_config=offload_optimizer_config, @@ -712,7 +518,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): '''if the parameter was initialized in nvme then bring it to the destination buffer directly''' if src.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( - f"Swapping in {param.ds_id} with partition size {param.ds_tensor.ds_numel} permanently to CPU" + f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU" ) param.nvme_swapper.swap_into_buffer(param, dest) src.data = dest.data @@ -730,8 +536,8 @@ class DeepSpeedZeroOptimizer_Stage3(object): aggregate_params_count = 0 - for j, param_group in enumerate(self.optimizer.param_groups): - params_in_group = sum([p.ds_tensor.ds_numel for p in param_group['params']]) + for j, param_group in enumerate(self.trainable_param_groups): + params_in_group = sum([p.partition_numel() for p in param_group['params']]) flat_buffer_size = params_in_group @@ -747,9 +553,9 @@ class DeepSpeedZeroOptimizer_Stage3(object): print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False) self.param_groups_fp16_flat_cpu_memory.append( - torch.empty(int(flat_buffer_size), - dtype=self.dtype, - pin_memory=True)) + get_accelerator().pin_memory( + torch.empty(int(flat_buffer_size), + dtype=self.dtype))) else: print_rank_0( f"No flat buffer size. Param group size was {params_in_group}", @@ -759,11 +565,12 @@ class DeepSpeedZeroOptimizer_Stage3(object): torch.empty(1, dtype=self.dtype)) - def _create_fp16_partitions_with_defragmentation(self): + def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups): dist.barrier() + param_groups: List[List[Parameter]] = tuple( self._create_fp16_sub_groups(param_group["params"]) - for param_group in self.optimizer.param_groups) + for param_group in fp16_param_groups) # bookkeeping related to param groups for param_group_idx, param_group in enumerate(param_groups): @@ -780,7 +587,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): # record total elements of parameter partitions in sub group self.fp16_partitioned_groups_flat_numel.append( - sum(p.ds_tensor.ds_numel for p in sub_group)) + sum(p.partition_numel() for p in sub_group)) # record padding required to align group to world size (only applies to last rank) rank_requires_padding = dist.get_rank( @@ -803,7 +610,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): # contiguous flat buffer for all parameters that we created earlier offset = 0 for sub_group in self.fp16_groups: - sub_group_numel = sum(param.ds_tensor.ds_numel for param in sub_group) + sub_group_numel = sum(param.partition_numel() for param in sub_group) self.fp16_partitioned_groups_flat.append( device_buffer.narrow(0, offset, @@ -815,7 +622,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): for param_group_idx, param_group in enumerate(param_groups): flat_offset = 0 for i, sub_group in enumerate(param_group): - total_elements = sum(p.ds_tensor.ds_numel for p in sub_group) + total_elements = sum(p.partition_numel() for p in sub_group) print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}") #Flat buffer may not be available for parameters that reside in NVME if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[ @@ -851,7 +658,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): if should_create_fp16_flat_reuse_buffer: max_partition_numel, largest_partition_numel = 0, None for sub_group in self.fp16_groups: - total_elements = sum(t.ds_tensor.ds_numel for t in sub_group) + total_elements = sum(t.partition_numel() for t in sub_group) if total_elements > max_partition_numel: largest_partition_numel = [t.ds_numel for t in sub_group] max_partition_numel = total_elements @@ -869,7 +676,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel) if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( - f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.ds_tensor.ds_numel}" + f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}" ) param.nvme_swapper.swap_in([param], async_op=False) dest.data.copy_(partitioned_param.data) @@ -899,7 +706,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: swap_path = param.nvme_swapper.get_path(param, True) sub_group_partitions.append((partitioned_param, - param.ds_tensor.ds_numel, + param.partition_numel(), swap_path)) else: sub_group_partitions.append((partitioned_param, @@ -1015,7 +822,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): def _create_fp16_sub_groups(self, params_group): - params_group_numel = sum([param.partitioned_size() for param in params_group]) + params_group_numel = sum([param.partition_numel() for param in params_group]) sub_group_size = self.sub_group_size if sub_group_size is None or sub_group_size >= params_group_numel: @@ -1027,7 +834,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): for param in params_group: sub_group.append(param) - local_sub_group_size += param.partitioned_size() + local_sub_group_size += param.partition_numel() if local_sub_group_size >= sub_group_size or id(param) == id( params_group[-1]): @@ -1039,219 +846,6 @@ class DeepSpeedZeroOptimizer_Stage3(object): return sub_groups - # def reset_ds_tensor(self): - # for name, param in self.module.named_parameters(recurse=True): - # assert hasattr(param,'ds_id'), "Parameters have not been converted to be Zero 3 compatible" - # assert (param.ds_status == ZeroParamStatus.NOT_AVAILABLE), "All the parameters must have been partitioned by now" - # param.ds_tensor.data = param.data - - def setup_zero_stage3_hooks(self): - self.hierarchy = 0 - - #reset step if in inference mode - @instrument_w_nvtx - def _end_of_forward_hook(module, *args): - - if not torch._C.is_grad_enabled(): - self._get_param_coordinator(training=False).reset_step() - - #likely one of them should be enough but just to be safe - self._register_hooks_recursively(self.module) - self.module.register_forward_hook(_end_of_forward_hook) - - # Add top module to stack trace - global FWD_MODULE_STACK - FWD_MODULE_STACK.append(self.module) - - def persistent_parameters(self): - persistent_params = [] - total_persistent_parameters = 0 - params_count = 0 - for _, param in self.module.named_parameters(recurse=True): - if param.ds_numel < self.persistence_threshold: - params_count += 1 - param.ds_persist = True - persistent_params.append(param) - total_persistent_parameters += param.ds_numel - - print_rank_0( - f"ZeRO 3: Total persistent parameters: {total_persistent_parameters} in {params_count} params", - force=False) - return persistent_params - - def _register_hooks_recursively(self, module, count=[0]): - my_count = count[0] - module.id = my_count - - #print(f"{module.__class__} : {module.id}") - - for child in module.children(): - count[0] = count[0] + 1 - self._register_hooks_recursively(child, count=count) - - @instrument_w_nvtx - def _pre_forward_module_hook(module, *args): - self.pre_sub_module_forward_function(module) - - @instrument_w_nvtx - def _post_forward_module_hook(module, input, output): - global FWD_MODULE_STACK - FWD_MODULE_STACK.pop() - if output is None: - output = [] - elif not isinstance(output, (list, tuple)): - if torch.is_tensor(output): - output = [output] - else: - #print(f'got UNKNOWN type {type(output)}') - outputs = [] - output = output if isinstance(output, dict) else vars(output) - for name, val in output.items(): - if not name.startswith('__') and torch.is_tensor(val): - outputs.append(val) - output = outputs - #print(f'convert output to {output}') - - for item in filter(lambda item: is_zero_param(item), output): - if not any(id(item) in m._external_params for m in FWD_MODULE_STACK): - item.is_external_param = True - module_to_register = FWD_MODULE_STACK[-1] - register_external_parameter(module_to_register, item) - print_rank_0( - f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {item.ds_id}.', - force=False) - - # It's possible that the parameter was already external to the completed module. If so, remove it the - # registration as it will be covered by the outer module instead. - if id(item) in module._external_params: - print_rank_0( - f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {item.ds_id}', - force=False) - unregister_external_parameter(module, item) - - item.all_gather() - - self.post_sub_module_forward_function(module) - - def _pre_backward_module_hook(module, inputs, output): - @instrument_w_nvtx - def _run_before_backward_function(sub_module): - # some models (e.g. Albert) may run multiple forwards on the same layer in a loop - # before doing backwards, so each backward will need a pre-fetch - using reference - # counting to support this scenario - #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") - if sub_module.applied_pre_backward_ref_cnt > 0: - self.pre_sub_module_backward_function(sub_module) - sub_module.applied_pre_backward_ref_cnt -= 1 - #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") - - return _apply_to_tensors_only(module, - PreBackwardFunction, - _run_before_backward_function, - output) - - #This is an alternate to doing _post_backward_module_hook - #it uses tensor.register_hook instead of using torch.autograd.Function - def _alternate_post_backward_module_hook(module, inputs): - module.ds_grads_remaining = 0 - - #print(f"Before Forward {module.__class__.__name__}") - - def _run_after_backward_hook(*unused): - module.ds_grads_remaining = module.ds_grads_remaining - 1 - if module.ds_grads_remaining == 0: - #print(f"After backward {module.__class__.__name__}") - self.post_sub_module_backward_function(module) - - def _run_before_forward_function(input): - if input.requires_grad: - module.ds_grads_remaining += 1 - - return _apply_forward_and_backward_to_tensors_only( - module, - _run_before_forward_function, - _run_after_backward_hook, - inputs) - - def _post_backward_module_hook(module, inputs): - module.ds_grads_remaining = 0 - - @instrument_w_nvtx - def _run_after_backward_function(sub_module): - if sub_module.ds_grads_remaining == 0: - self.post_sub_module_backward_function(sub_module) - - return _apply_to_tensors_only(module, - PostBackwardFunction, - _run_after_backward_function, - inputs) - - # Pre forward hook - module.register_forward_pre_hook(_pre_forward_module_hook) - # Post forward hook - module.register_forward_hook(_post_forward_module_hook) - - # Pre backward hook - module.register_forward_hook(_pre_backward_module_hook) - - # post backward hook - module.register_forward_pre_hook(_post_backward_module_hook) - - @torch.no_grad() - def pre_sub_module_forward_function(self, sub_module): - see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", - force=False) - - global FWD_MODULE_STACK - FWD_MODULE_STACK.append(sub_module) - - param_coordinator = self._get_param_coordinator(training=sub_module.training) - param_coordinator.trace_prologue(sub_module) - if param_coordinator.is_record_trace(): - param_coordinator.record_module(sub_module) - param_coordinator.fetch_sub_module(sub_module) - - see_memory_usage( - f"Before sub module function {sub_module.__class__.__name__} after fetch", - force=False) - - @torch.no_grad() - def post_sub_module_forward_function(self, sub_module): - see_memory_usage( - f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release", - force=False) - - param_coordinator = self._get_param_coordinator(training=sub_module.training) - if param_coordinator.is_record_trace(): - param_coordinator.record_parameters(sub_module) - param_coordinator.release_sub_module(sub_module) - - see_memory_usage( - f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release", - force=False) - - @torch.no_grad() - def pre_sub_module_backward_function(self, sub_module): - param_coordinator = self._get_param_coordinator(training=sub_module.training) - param_coordinator.trace_prologue(sub_module) - if param_coordinator.is_record_trace(): - param_coordinator.record_module(sub_module) - param_coordinator.record_parameters(sub_module) - param_coordinator.fetch_sub_module(sub_module) - - @torch.no_grad() - def post_sub_module_backward_function(self, sub_module): - see_memory_usage( - f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release", - force=False) - - self._get_param_coordinator( - training=sub_module.training).release_sub_module(sub_module) - - see_memory_usage( - f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release", - force=False) - def _release_ipg_buffers(self): if self.contiguous_gradients: self.ipg_buffer = None @@ -1304,7 +898,6 @@ class DeepSpeedZeroOptimizer_Stage3(object): dtype=gradient_dtype, device=self.device) - timers = self.timers timer_names = set() if self.swap_optimizer: @@ -1332,7 +925,8 @@ class DeepSpeedZeroOptimizer_Stage3(object): dtype=gradient_dtype, device=self.device) if self.offload_optimizer_pin_memory: - subgroup_gradient_buffer = subgroup_gradient_buffer.pin_memory() + subgroup_gradient_buffer = get_accelerator().pin_memory( + subgroup_gradient_buffer) self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer else: @@ -1509,19 +1103,20 @@ class DeepSpeedZeroOptimizer_Stage3(object): @instrument_w_nvtx @torch.no_grad() def __add_grad_to_ipg_bucket(self, param: Parameter) -> None: - self.__reduce_and_partition_stream.wait_stream(torch.cuda.default_stream()) + self.__reduce_and_partition_stream.wait_stream( + get_accelerator().default_stream()) if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel( ) < self.reduce_bucket_size: # move the gradient to a contiguous buffer - with torch.cuda.stream(self.__reduce_and_partition_stream): + with get_accelerator().stream(self.__reduce_and_partition_stream): # move the parameter's gradient to the contiguous flat buffer new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow( 0, self.elements_in_ipg_bucket, param.grad.numel()).view_as(param.grad) new_grad_tensor.copy_(param.grad, non_blocking=True) - param.grad.record_stream(torch.cuda.current_stream()) + param.grad.record_stream(get_accelerator().current_stream()) param.grad.data = new_grad_tensor self.__params_in_ipg_bucket.append(param) @@ -1548,7 +1143,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): if len(self.__param_reduce_events) > self.__max_param_reduce_events: self.__param_reduce_events.popleft().synchronize() - with torch.cuda.stream(self.__reduce_and_partition_stream): + with get_accelerator().stream(self.__reduce_and_partition_stream): if safe_mode: assert_ints_same_as_other_ranks( [p.ds_id for p in self.__params_in_ipg_bucket]) @@ -1558,18 +1153,19 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.__params_in_ipg_bucket.clear() - event = Event() + event = get_accelerator().Event() event.record() self.__param_reduce_events.append(event) @instrument_w_nvtx def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]: """average gradients and scatter partitions across ranks""" - dtype = get_only_unique_item(p.grad.dtype for p in params_to_reduce) full_grads_for_rank = [p.grad for p in params_to_reduce] - if self.communication_data_type == torch.float32: - full_grads_for_rank = [g.float() for g in full_grads_for_rank] + if self.communication_data_type != self.dtype: + full_grads_for_rank = [ + g.to(self.communication_data_type) for g in full_grads_for_rank + ] if self.postscale_gradients and self.gradient_predivide_factor != 1.0: full_grads_for_rank = [ @@ -1585,8 +1181,10 @@ class DeepSpeedZeroOptimizer_Stage3(object): g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank ] - if self.communication_data_type == torch.float32: - grad_partitions_for_rank = [g.to(dtype) for g in grad_partitions_for_rank] + if self.communication_data_type != self.dtype: + grad_partitions_for_rank = [ + g.to(self.dtype) for g in grad_partitions_for_rank + ] return grad_partitions_for_rank @@ -1595,7 +1193,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): current_offset = 0 for param in group: param_id = self.get_param_id(param) - num_elements = param.ds_tensor.ds_numel + num_elements = param.partition_numel() self.grad_position[param_id] = [ int(i), @@ -1622,7 +1220,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad) def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor): - with torch.cuda.stream(self.copy_grad_stream): + with get_accelerator().stream(self.copy_grad_stream): param_id = self.get_param_id(param) src_tensor = param.grad.view(-1).float() #print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}") @@ -1640,14 +1238,13 @@ class DeepSpeedZeroOptimizer_Stage3(object): total_norm += param_norm.item()**2 # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=self.dp_process_group) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=self.dp_process_group) - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.SUM) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) @@ -1661,10 +1258,15 @@ class DeepSpeedZeroOptimizer_Stage3(object): def __partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: + offload_fp32_gradients = {} + offload_fp32_offsets = {} for param, grad_partition in zip(params_to_release, grad_partitions): - if param.ds_tensor.ds_numel * dist.get_rank( - self.dp_process_group) > param.ds_numel: + + contains_real_data = param.partition_numel() * dist.get_rank( + self.dp_process_group) < param.ds_numel + if not contains_real_data: # this grad partition is empty - don't need to do anything + param.grad = None continue # move or accumulate gradient partition to target buffer @@ -1677,7 +1279,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): # ensure grad buffer is a CUDA buffer to speed up the next few # operations and so it can be used asynchronously grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True) - elif grad_buffer.is_cuda: + elif get_accelerator().on_accelerator(grad_buffer): grad_buffer.add_(grad_partition) else: # if dst is CPU, copy first to src device, do the addition @@ -1702,8 +1304,6 @@ class DeepSpeedZeroOptimizer_Stage3(object): # offload the gradient partition if applicable if self.offload_optimizer: i, dest_offset, _ = self.grad_position[self.get_param_id(param)] - offload_fp32_gradients = {} - offload_fp32_offsets = {} if self.is_gradient_accumulation_boundary: self.norm_for_param_grads[self.get_param_id( @@ -1724,7 +1324,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): fp32_grad_tensor.copy_(grad_buffer) # free the gradient - param.grad.record_stream(torch.cuda.current_stream()) + param.grad.record_stream(get_accelerator().current_stream()) param.grad = None if self.offload_optimizer and self.swap_optimizer: @@ -1806,11 +1406,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): ######################Reduction Related Methods############################## - def allreduce_bucket(self, - bucket, - communication_data_type=torch.float16, - rank=None, - log=None): + def allreduce_bucket(self, bucket, rank=None, log=None): rank = None tensor = self.flatten(bucket) @@ -1818,6 +1414,8 @@ class DeepSpeedZeroOptimizer_Stage3(object): if pg_correctness_test: communication_data_type = torch.float32 + else: + communication_data_type = self.communication_data_type if communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(communication_data_type) @@ -1828,7 +1426,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): # "All Reducing" dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) else: - global_rank = _get_global_rank(self.dp_process_group, rank) + global_rank = dist.get_global_rank(self.dp_process_group, rank) dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group) if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: @@ -1839,7 +1437,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): # if rank is specified do a reduction instead of an allreduce def allreduce_and_copy(self, small_bucket, rank=None, log=None): - with torch.cuda.stream(self.reduction_stream): + with get_accelerator().stream(self.reduction_stream): allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log) if rank is None or rank == dist.get_rank(group=self.dp_process_group): for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): @@ -1919,7 +1517,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): return params_in_partition, params_not_in_partition, first_offset @instrument_w_nvtx - def zero_grad(self, set_grads_to_None=True): + def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ @@ -1929,9 +1527,9 @@ class DeepSpeedZeroOptimizer_Stage3(object): # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: - if set_grads_to_None: - if p.grad is not None and p.grad.is_cuda: - p.grad.record_stream(torch.cuda.current_stream()) + if set_to_none: + if p.grad is not None and get_accelerator().on_accelerator(p.grad): + p.grad.record_stream(get_accelerator().current_stream()) p.grad = None else: if p.grad is not None: @@ -1944,9 +1542,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): if self.model_parallel_group is None: pass else: - torch.distributed.all_reduce(tensor=tensor, - op=op, - group=self.model_parallel_group) + dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) @instrument_w_nvtx def get_grad_norm_direct(self, gradients, params, norm_type=2): @@ -1969,14 +1565,13 @@ class DeepSpeedZeroOptimizer_Stage3(object): norm_type = float(norm_type) if norm_type == inf: total_norm = max(g.data.abs().max() for g in gradients) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=self.dp_process_group) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=self.dp_process_group) # Take max across all GPUs. - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.MAX) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX) total_norm = total_norm_cuda[0].item() else: # if dist.get_rank() == 0: @@ -1984,17 +1579,18 @@ class DeepSpeedZeroOptimizer_Stage3(object): grad_norms = [] for g, p in zip(gradients, params): if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): - grad_norms.append(g.cuda(non_blocking=True).double().norm(2)) + grad_norms.append( + g.to(get_accelerator().device_name(), + non_blocking=True).double().norm(2)) # Sum across all model parallel GPUs. total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2)) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=self.dp_process_group) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=self.dp_process_group) - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.SUM) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda.item()**(1. / norm_type) @@ -2122,10 +1718,11 @@ class DeepSpeedZeroOptimizer_Stage3(object): self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition # release all the gradient since we have already created a necessary copy in dp_grad_partition - self.zero_grad() + self.zero_grad(set_to_none=True) - for grad in filter(lambda g: g.is_cuda, self.averaged_gradients[sub_group_id]): - grad.record_stream(torch.cuda.current_stream()) + for grad in filter(lambda g: get_accelerator().on_accelerator(g), + self.averaged_gradients[sub_group_id]): + grad.record_stream(get_accelerator().current_stream()) self.averaged_gradients[sub_group_id] = None @@ -2230,7 +1827,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): def _overflow_clean_up(self, prev_scale): see_memory_usage('After overflow before clearing gradients', force=False) - self.zero_grad() + self.zero_grad(set_to_none=True) if self.offload_optimizer: self.reset_cpu_buffers() @@ -2239,12 +1836,11 @@ class DeepSpeedZeroOptimizer_Stage3(object): see_memory_usage('After overflow after clearing gradients', force=False) - if torch.distributed.get_rank() == 0: - logger.info( - "[deepspeed] OVERFLOW! Rank {} Skipping step. Attempted loss scale: {}, " - "reducing to {}".format(dist.get_rank(), - prev_scale, - self.loss_scale)) + if dist.get_rank() == 0: + overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." + if self.dtype == torch.half: + overflow_msg += f" Attempted loss scale: {prev_scale}, reducing to {self.loss_scale}" + logger.info(overflow_msg) @instrument_w_nvtx def _overflow_check_and_loss_scale_update(self): @@ -2289,6 +1885,14 @@ class DeepSpeedZeroOptimizer_Stage3(object): else: self._partitioned_params_swap_out(sub_group_id) + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info( + f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}' + ) + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + @instrument_w_nvtx def step(self, closure=None): """ @@ -2337,9 +1941,8 @@ class DeepSpeedZeroOptimizer_Stage3(object): self._post_step(timer_names) # warn user about caching allocator flushes - alloc_retries = torch.cuda.memory_stats()["num_alloc_retries"] if hasattr( - torch.cuda, - "memory_stats") else 0 + memory_stats = get_accelerator().memory_stats() + alloc_retries = memory_stats["num_alloc_retries"] if memory_stats != None else 0 if alloc_retries > self.__n_caching_allocator_flushes: if dist.get_rank() == 0: logger.warning( @@ -2348,7 +1951,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): "performance. if this is happening frequently consider adjusting " "settings to reduce memory consumption. If you are unable to " "make the cache flushes go away consider adding " - "torch.cuda.empty_cache() calls in your training loop to ensure " + "get_accelerator().empty_cache() calls in your training loop to ensure " "that all ranks flush their caches at the same time", alloc_retries - self.__n_caching_allocator_flushes) self.__n_caching_allocator_flushes = alloc_retries @@ -2419,16 +2022,16 @@ class DeepSpeedZeroOptimizer_Stage3(object): @instrument_w_nvtx def has_overflow(self, partition_gradients=True): if partition_gradients: - with torch.cuda.stream(self.__reduce_and_partition_stream): + with get_accelerator().stream(self.__reduce_and_partition_stream): self.local_overflow = bool(self.__inf_or_nan_tracker.item()) self.__inf_or_nan_tracker.zero_() overflow = self.local_overflow #overflow = self.has_overflow_partitioned_grads_serial() - overflow_gpu = torch.cuda.ByteTensor([overflow]) - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=self.dp_process_group) + overflow_gpu = get_accelerator().ByteTensor([overflow]) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=self.dp_process_group) else: params = [] @@ -2437,12 +2040,11 @@ class DeepSpeedZeroOptimizer_Stage3(object): params.append(param) overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients) - overflow_gpu = torch.cuda.ByteTensor([overflow]) + overflow_gpu = get_accelerator().ByteTensor([overflow]) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs - self._model_parallel_all_reduce(tensor=overflow_gpu, - op=torch.distributed.ReduceOp.MAX) + self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) overflow = overflow_gpu[0].item() return bool(overflow) @@ -2483,7 +2085,11 @@ class DeepSpeedZeroOptimizer_Stage3(object): see_memory_usage(f"Before backward", force=False) - self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) self._get_param_coordinator(training=True).reset_step() @@ -2512,16 +2118,68 @@ class DeepSpeedZeroOptimizer_Stage3(object): return grad_dict + def _fp32_state_allgather(self, param, fp32_state): + reduce_buffer = torch.zeros(self.partition_count * fp32_state.numel(), + dtype=torch.float32, + device=param.device).flatten() + my_rank = dist.get_rank(group=self.dp_process_group) + partitions = [ + reduce_buffer.narrow(0, + fp32_state.numel() * i, + fp32_state.numel()) for i in range(self.partition_count) + ] + partitions[my_rank].data.copy_(fp32_state.data, non_blocking=False) + + dist.all_gather(partitions, partitions[my_rank], group=self.dp_process_group) + + return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape) + + def get_fp32_grad_for_param(self, param) -> Tensor: + if not param.requires_grad: + return None + + self.__reduce_and_partition_stream.synchronize() + + if self.offload_optimizer: + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow( + 0, + dest_offset, + num_elements).to(device=param.device) + else: + fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float() + + return self._fp32_state_allgather(param, fp32_grad) + + def get_full_hp_param(self, param, optim_state_key=None) -> Tensor: + if not param.requires_grad: + return None + + self.__reduce_and_partition_stream.synchronize() + group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] + + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_in(group_idx) + + fp32_param = self.fp32_partitioned_groups_flat[group_idx] + if optim_state_key is None: + fp32_opt_state = fp32_param.narrow(0, + dest_offset, + num_elements).to(device=param.device) + else: + fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow( + 0, + dest_offset, + num_elements).to(device=param.device) + + hp_param = self._fp32_state_allgather(param, fp32_opt_state) + if self._swappable_optimizer_subgroup(group_idx): + self._optimizer_states_and_gradient_swap_out(group_idx) + return hp_param + @instrument_w_nvtx def _partition_all_parameters(self): - """Partitioning Parameters that were not partitioned usually if parameters - of modules whose input parameters do not require grad computation do not - trigger post call and will therefore will remain unpartitioned""" - self._get_param_coordinator(training=self.module.training).release_and_reset_all( - self.module) - for param in iter_params(self.module, recurse=True): - if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: - raise RuntimeError(f"{param.ds_summary()} expected to be released") + self.parameter_offload.partition_all_parameters() def check_overflow(self, partition_gradients=True): self._check_overflow(partition_gradients) @@ -2545,12 +2203,16 @@ class DeepSpeedZeroOptimizer_Stage3(object): def _set_param_groups(self, value): self.optimizer.param_groups = value + self.trainable_param_groups = self._get_trainable_parameter_groups() param_groups = property(_get_param_groups, _set_param_groups) # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): - return self.loss_scaler.loss_scale + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.loss_scaler.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value @@ -2613,7 +2275,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): def _rigid_state_dict(self): state_dict = {} - state_dict[ZERO_STAGE] = ZERO_OPTIMIZATION_WEIGHTS + state_dict[ZERO_STAGE] = ZeroStageEnum.weights state_dict['loss_scaler'] = self.loss_scaler state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['overflow'] = self.overflow @@ -2759,7 +2421,8 @@ class DeepSpeedZeroOptimizer_Stage3(object): def load_state_dict(self, state_dict_list, load_optimizer_states=True, - load_from_fp32_weights=False): + load_from_fp32_weights=False, + checkpoint_folder=None): r"""Loading a ZeRO checkpoint Arguments: state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. @@ -2776,7 +2439,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: - model = torch.nn.Linear(D_in, D_out).cuda().half() + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... @@ -2813,7 +2476,7 @@ class DeepSpeedZeroOptimizer_Stage3(object): def _handle_overflow(cpu_sum, x, i): import math - rank = torch.distributed.get_rank() + rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): @@ -2881,9 +2544,6 @@ def model_to_params(model): return total_params, largest_layer_params -import math - - def estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1, @@ -2941,11 +2601,11 @@ def estimate_zero3_model_states_mem_needs_all_cold(total_params, """ def format_options(cpu_offload, cpu_offload_params, zero_init): enabled = [] - padded_cpu_str = f'{OFFLOAD_CPU_DEVICE:4}' + padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}' param_device = padded_cpu_str if cpu_offload_params else "none" - enabled.append(f"{OFFLOAD_PARAM}={param_device}") + enabled.append(f"offload_param={param_device}") optimizer_device = padded_cpu_str if cpu_offload else "none" - enabled.append(f"{OFFLOAD_OPTIMIZER}={optimizer_device}") + enabled.append(f"offload_optimizer={optimizer_device}") enabled.append(f"zero_init={1 if zero_init else 0}") return ", ".join(enabled) diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py old mode 100644 new mode 100755 index ab3eec3..aef2513 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -3,33 +3,43 @@ Copyright 2019 The Microsoft DeepSpeed Team ''' import torch -from torch.distributed.distributed_c10d import _get_global_rank -import torch.distributed as dist -from torch._six import inf +import os +from deepspeed import comm as dist from packaging import version as pkg_version +from collections import OrderedDict -from deepspeed.runtime.fp16.loss_scaler import LossScaler, DynamicLossScaler +from deepspeed.runtime import ZeROOptimizer +from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, get_global_norm, + empty_cache, see_memory_usage, + inf, is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups) -from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_GRADIENTS -from deepspeed.runtime.zero.offload_constants import OFFLOAD_CPU_DEVICE, OFFLOAD_OPTIMIZER +from deepspeed.runtime.zero.config import ZeroStageEnum +from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.ops.adam import DeepSpeedCPUAdam -from deepspeed.ops.op_builder import UtilsBuilder from deepspeed.utils import logger from deepspeed.moe.utils import is_moe_param from deepspeed.git_version_info import version + from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import UtilsBuilder + from deepspeed.checkpoint.constants import (DS_VERSION, + GROUP_PADDINGS, PARTITION_COUNT, SINGLE_PARTITION_OF_FP32_GROUPS, BASE_OPTIMIZER_STATE, CLIP_GRAD, - ZERO_STAGE) + ZERO_STAGE, + PARAM_SLICE_MAPPINGS) +from deepspeed.utils import link_hp_params +from deepspeed.checkpoint import enable_universal_checkpoint # Toggle this to true to enable correctness test # with gradient partitioning and without @@ -41,11 +51,12 @@ def input(msg): def split_half_float_double(tensors): + device_type = get_accelerator().device_name() dtypes = [ - "torch.cuda.HalfTensor", - "torch.cuda.FloatTensor", - "torch.cuda.DoubleTensor", - "torch.cuda.BFloat16Tensor" + "torch.{}.HalfTensor".format(device_type), + "torch.{}.FloatTensor".format(device_type), + "torch.{}.DoubleTensor".format(device_type), + "torch.{}.BFloat16Tensor".format(device_type) ] buckets = [] for i, dtype in enumerate(dtypes): @@ -88,7 +99,7 @@ def _get_padded_tensor(src_tensor, size): return padded_tensor -class DeepSpeedZeroOptimizer(object): +class DeepSpeedZeroOptimizer(ZeROOptimizer): """ DeepSpeedZeroOptimizer designed to reduce the memory footprint required for training large deep learning models. @@ -101,6 +112,7 @@ class DeepSpeedZeroOptimizer(object): """ def __init__(self, init_optimizer, + param_names, timers, static_loss_scale=1.0, dynamic_loss_scale=False, @@ -138,14 +150,15 @@ class DeepSpeedZeroOptimizer(object): # 2. keep common stuff here in case we need to add ne552w fused optimizer later self.elastic_checkpoint = elastic_checkpoint - + self.param_names = param_names + self.mpu = mpu # differences from apex.fp16_utils: # - assume all model params in fp16 # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? - if not torch.cuda.is_available: - raise SystemError("Cannot use fp16 without CUDA.") + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # Load pre-built or JIT compile (un)flatten ops @@ -166,7 +179,8 @@ class DeepSpeedZeroOptimizer(object): self.deepspeed_adam_offload = cpu_offload - self.device = torch.cuda.current_device() if not self.cpu_offload else 'cpu' + self.device = get_accelerator().current_device_name( + ) if not self.cpu_offload else 'cpu' self.dp_process_group = dp_process_group @@ -198,9 +212,11 @@ class DeepSpeedZeroOptimizer(object): if mpu is None: self.model_parallel_group = None + self.model_parallel_world_size = 1 self.model_parallel_rank = 0 else: self.model_parallel_group = mpu.get_model_parallel_group() + self.model_parallel_world_size = mpu.get_model_parallel_world_size() self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu) self.overflow = False @@ -253,7 +269,7 @@ class DeepSpeedZeroOptimizer(object): # number of elements per partition in each group self.partition_size = [] - #align nccl all-gather send buffers to 4-bye boundary + # align nccl all-gather send buffers to 4-byte boundary self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 assert (allgather_bucket_size % self.nccl_start_alignment_factor == 0), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} " @@ -278,21 +294,13 @@ class DeepSpeedZeroOptimizer(object): ] self.bit16_groups.append(trainable_parameters) - # Record padding required to align group to world size - if partition_id == dist.get_world_size( - group=self.real_dp_process_group[i]) - 1: - padding = get_alignment_padding(self.bit16_groups[i], - self.partition_count[i]) - else: - padding = 0 - self.groups_padding.append(padding) - # not sure why apex was cloning the weights before flattening # removing cloning here see_memory_usage(f"Before moving param group {i} to CPU") # move all the parameters to cpu to free up GPU space for creating flat buffer move_to_cpu(self.bit16_groups[i]) + empty_cache() see_memory_usage(f"After moving param group {i} to CPU", force=False) # Reorder group parameters for load balancing of gradient partitioning during backward among ranks. @@ -316,11 +324,20 @@ class DeepSpeedZeroOptimizer(object): self.flatten_dense_tensors_aligned( self.round_robin_bit16_groups[i], self.nccl_start_alignment_factor * - dist.get_world_size(group=self.real_dp_process_group[i])).cuda( - torch.cuda.current_device())) + dist.get_world_size(group=self.real_dp_process_group[i])).to( + get_accelerator().current_device_name())) see_memory_usage(f"After flattening and moving param group {i} to GPU", force=False) + # Record padding required for alignment + if partition_id == dist.get_world_size( + group=self.real_dp_process_group[i]) - 1: + padding = self.bit16_groups_flat[i].numel() - sum( + [t.numel() for t in self.round_robin_bit16_groups[i]]) + else: + padding = 0 + self.groups_padding.append(padding) + if dist.get_rank(group=self.real_dp_process_group[i]) == 0: see_memory_usage( f"After Flattening and after emptying param group {i} cache", @@ -341,12 +358,9 @@ class DeepSpeedZeroOptimizer(object): assert (partitioned_data.data_ptr() % (2 * self.nccl_start_alignment_factor) == 0) - # verify that data partition start locations are 4-byte aligned - for partitioned_data in data_parallel_partitions: - assert (partitioned_data.data_ptr() % - (2 * self.nccl_start_alignment_factor) == 0) - - # a partition of the fp32 master weights that will be updated by this process + # A partition of the fp32 master weights that will be updated by this process. + # Note that the params in single_partition_of_fp32_groups is cloned and detached + # from the origin params of the model. if not fp16_master_weights_and_gradients: self.single_partition_of_fp32_groups.append( self.parallel_partitioned_bit16_groups[i][partition_id].to( @@ -356,7 +370,9 @@ class DeepSpeedZeroOptimizer(object): self.parallel_partitioned_bit16_groups[i][partition_id].to( self.device).clone().half().detach()) - # modify optimizer of have flat master weight + # Set local optimizer to have flat params of its own partition. + # After this, the local optimizer will only contain its own partition of params. + # In that case, the local optimizer only saves the states(momentum, variance, etc.) related to its partition's params(zero stage1). self.single_partition_of_fp32_groups[ i].requires_grad = True # keep this in case internal optimizer uses it param_group['params'] = [self.single_partition_of_fp32_groups[i]] @@ -379,14 +395,15 @@ class DeepSpeedZeroOptimizer(object): f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i,p in enumerate(self.single_partition_of_fp32_groups)]} " ) dist.barrier() - #exit(0) + self.reduce_bucket_size = int(reduce_bucket_size) self.allgather_bucket_size = int(allgather_bucket_size) - self.reduction_event = torch.cuda.Event(enable_timing=False, blocking=False) - self.reduction_stream = torch.cuda.Stream() - self.cpu_computation_stream = torch.cuda.Stream() - self.copy_grad_stream = torch.cuda.Stream() + self.reduction_event = get_accelerator().Event(enable_timing=False, + blocking=False) + self.reduction_stream = get_accelerator().Stream() + self.cpu_computation_stream = get_accelerator().Stream() + self.copy_grad_stream = get_accelerator().Stream() self.callback_queued = False self.param_dict = {} @@ -431,13 +448,13 @@ class DeepSpeedZeroOptimizer(object): self.norm_for_param_grads = {} self.local_overflow = False self.grad_position = {} - self.temp_grad_buffer_for_cpu_offload = torch.zeros( - largest_param_numel, - device=self.device, - dtype=self.dtype).pin_memory() + self.temp_grad_buffer_for_cpu_offload = get_accelerator().pin_memory( + torch.zeros(largest_param_numel, + device=self.device, + dtype=self.dtype)) self.temp_grad_buffer_for_gpu_offload = torch.zeros( largest_param_numel, - device=torch.cuda.current_device(), + device=get_accelerator().current_device_name(), dtype=self.dtype) for i, params_group in enumerate(self.bit16_groups): self.get_grad_position(i, @@ -469,6 +486,9 @@ class DeepSpeedZeroOptimizer(object): # will store the averaged gradients required by this partition self.averaged_gradients = {} + # For cpu_offload, will store the averaged gradients required by this partition + self.offload_gradient_dict = {} + # store index of first parameter in each partition self.first_param_index_in_partition = {} @@ -482,22 +502,15 @@ class DeepSpeedZeroOptimizer(object): if self.partition_gradients or self.overlap_comm: self.create_reduce_and_remove_grad_hooks() - # we may have a way of fusing dynamic scale. Do not support for now - if self.dtype == torch.float or self.dtype == torch.bfloat16 or not dynamic_loss_scale: - loss_scale_value = 1.0 if ( - (self.dtype == torch.float) or - (self.dtype == torch.bfloat16)) else static_loss_scale - - self.dynamic_loss_scale = False - self.loss_scaler = LossScaler(scale=loss_scale_value) - cur_iter = 0 - else: - if dynamic_loss_args is None: - self.loss_scaler = DynamicLossScaler() - else: - self.loss_scaler = DynamicLossScaler(**dynamic_loss_args) + self.custom_loss_scaler = False + self.external_loss_scale = None - self.dynamic_loss_scale = True + # we may have a way of fusing dynamic scale. Do not support for now + self.loss_scaler = CreateLossScaler(dtype=self.dtype, + static_loss_scale=static_loss_scale, + dynamic_scaling=dynamic_loss_scale, + dynamic_loss_args=dynamic_loss_args) + self.dynamic_loss_scale = self.loss_scaler.dynamic see_memory_usage("Before initializing optimizer states", force=True) self.initialize_optimizer_states() @@ -509,11 +522,61 @@ class DeepSpeedZeroOptimizer(object): if dist.get_rank(group=self.dp_process_group) == 0: see_memory_usage(f"After initializing ZeRO optimizer", force=True) + self._link_all_hp_params() + self._enable_universal_checkpoint() + self._param_slice_mappings = self._create_param_mapping() + + def _enable_universal_checkpoint(self): + for lp_param_group in self.bit16_groups: + enable_universal_checkpoint(param_list=lp_param_group) + + def _create_param_mapping(self): + param_mapping = [] + for i, _ in enumerate(self.optimizer.param_groups): + param_mapping_per_group = OrderedDict() + for lp in self.bit16_groups[i]: + if lp._hp_mapping is not None: + lp_name = self.param_names[lp] + param_mapping_per_group[ + lp_name] = lp._hp_mapping.get_hp_fragment_address() + param_mapping.append(param_mapping_per_group) + + return param_mapping + + def _link_all_hp_params(self): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + if self.cpu_offload: + self._get_offload_gradient_dict() + + for i, _ in enumerate(self.optimizer.param_groups): + # Link bit16 and fp32 params in partition + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + partition_size = self.bit16_groups_flat[i].numel() // dp_world_size + flat_hp_partition = self.single_partition_of_fp32_groups[i] + link_hp_params( + lp_param_list=self.bit16_groups[i], + flat_hp_partition=flat_hp_partition, + gradient_dict=self.averaged_gradients, + offload_gradient_dict=self.offload_gradient_dict, + use_offload=self.cpu_offload, + param_group_index=i, + partition_start=partition_id * partition_size, + partition_size=partition_size, + partition_optimizer_state=self.optimizer.state[flat_hp_partition], + dp_group=self.real_dp_process_group[i]) + def is_moe_group(self, group): return 'moe' in group and group['moe'] def _configure_moe_settings(self): - assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + # if we're using ZeRO stage 2, ensure contiguous gradients are used + if self.partition_gradients: + assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + # NOTE: To run ZeRO stage 1 with MoE, we need to set self.contiguous_gradients to True or ignore the assertion + if not self.partition_gradients and not self.contiguous_gradients: + logger.warn( + "ZeRO Stage 1 has not been thoroughly tested with MoE. This configuration is still experimental." + ) assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" assert any([self.is_moe_group(group) for group in self.optimizer.param_groups]), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" @@ -579,9 +642,8 @@ class DeepSpeedZeroOptimizer(object): int(self.partition_size[i]), dtype=self.single_partition_of_fp32_groups[i].dtype, device=self.device) - self.single_partition_of_fp32_groups[ - i].grad = single_grad_partition.pin_memory( - ) if self.cpu_offload else single_grad_partition + self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory( + single_grad_partition) if self.cpu_offload else single_grad_partition self.optimizer.step() @@ -603,7 +665,7 @@ class DeepSpeedZeroOptimizer(object): self.ipg_buffer = [] buf_0 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_0) self.ipg_index = 0 @@ -664,7 +726,7 @@ class DeepSpeedZeroOptimizer(object): self.params_already_reduced[i] = False if self.overlap_comm: - torch.cuda.synchronize() + get_accelerator().synchronize() # It is safe to clear previously reduced grads of other partitions self._clear_previous_reduced_grads() @@ -677,15 +739,16 @@ class DeepSpeedZeroOptimizer(object): self.first_offset[i], self.partition_size[i], dtype=self.dtype, - device=torch.cuda.current_device(), + device=get_accelerator().current_device_name(), return_tensor_list=True) else: - avg_new = self.get_flat_partition(self.params_in_partition[i], - self.first_offset[i], - self.partition_size[i], - dtype=self.dtype, - device=torch.cuda.current_device(), - return_tensor_list=True) + avg_new = self.get_flat_partition( + self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i], + dtype=self.dtype, + device=get_accelerator().current_device_name(), + return_tensor_list=True) for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new): accumulated_grad.add_(new_avg_grad) @@ -695,7 +758,7 @@ class DeepSpeedZeroOptimizer(object): # No need to keep the gradients anymore. # All gradients required by the step # are in self.averaged_gradients - self.zero_grad() + self.zero_grad(set_to_none=True) see_memory_usage(f"End ipg_epilogue") # resets all partition to no reduced @@ -878,12 +941,12 @@ class DeepSpeedZeroOptimizer(object): def average_tensor(self, tensor): if self.overlap_comm: - torch.cuda.synchronize() stream = self.reduction_stream + stream.wait_stream(get_accelerator().current_stream()) else: - stream = torch.cuda.current_stream() + stream = get_accelerator().current_stream() - with torch.cuda.stream(stream): + with get_accelerator().stream(stream): if not self.reduce_scatter: self.gradient_reduction_w_predivide(tensor) return @@ -950,14 +1013,18 @@ class DeepSpeedZeroOptimizer(object): if not self.ipg_bucket_has_moe_params: tensor.div_(dist.get_world_size(group=self.dp_process_group)) + tensor_to_reduce = tensor + if self.communication_data_type != tensor.dtype: + tensor_to_reduce = tensor.to(self.communication_data_type) + async_handles = [] for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets): - grad_slice = tensor.narrow(0, int(bucket_offset), int(numel)) + grad_slice = tensor_to_reduce.narrow(0, int(bucket_offset), int(numel)) # if dist.get_rank() == 0: # print(f"Rank {dist.get_rank()} rank offset id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}") # dist.barrier() #dist.barrier() - dst_rank = _get_global_rank(real_dp_process_group[i], dst) + dst_rank = dist.get_global_rank(real_dp_process_group[i], dst) async_handle = dist.reduce(grad_slice, dst=dst_rank, group=real_dp_process_group[i], @@ -967,6 +1034,9 @@ class DeepSpeedZeroOptimizer(object): for handle in async_handles: handle.wait() + if self.communication_data_type != tensor.dtype: + tensor.copy_(tensor_to_reduce) + ############################################################################## ############################# CPU Offload Methods############################# ############################################################################## @@ -978,7 +1048,6 @@ class DeepSpeedZeroOptimizer(object): param_start_offset = 0 num_elements = tensor.numel() - tensor_offset = 0 # we need to offset to get to the right element if i == 0 and first_offset > 0: @@ -1002,6 +1071,18 @@ class DeepSpeedZeroOptimizer(object): if param.grad is not None and self._has_inf_or_nan(param.grad.data): self.local_overflow = True + def _get_offload_gradient_dict(self): + for param_group_index, _ in enumerate(self.optimizer.param_groups): + self.offload_gradient_dict[param_group_index] = [] + for lp_param in self.params_in_partition[param_group_index]: + param_id = self.get_param_id(lp_param) + [_, _, dest_offset, num_elements] = self.grad_position[param_id] + dest_tensor = self.single_partition_of_fp32_groups[ + param_group_index].grad.view(-1).narrow(0, + dest_offset, + num_elements) + self.offload_gradient_dict[param_group_index].append(dest_tensor) + def async_accumulate_grad_in_cpu_via_gpu(self, param): param_id = self.get_param_id(param) @@ -1016,9 +1097,10 @@ class DeepSpeedZeroOptimizer(object): #buffer for storing gradients for this parameter in CPU def buffer_to_accumulate_to_in_cpu(): if not self.fp16_master_weights_and_gradients: - return torch.zeros(param.numel(), - dtype=param.dtype, - device=self.device).pin_memory() + return get_accelerator().pin_memory( + torch.zeros(param.numel(), + dtype=param.dtype, + device=self.device)) else: return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow( 0, @@ -1137,13 +1219,12 @@ class DeepSpeedZeroOptimizer(object): """ # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=self.dp_process_group) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=self.dp_process_group) - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.SUM) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) @@ -1177,9 +1258,10 @@ class DeepSpeedZeroOptimizer(object): total_size += param_in_partition.numel() see_memory_usage(f"before copying {total_size} gradients into partition") - self.grads_in_partition = torch.empty(int(total_size), - dtype=self.dtype, - device=torch.cuda.current_device()) + self.grads_in_partition = torch.empty( + int(total_size), + dtype=self.dtype, + device=get_accelerator().current_device_name()) see_memory_usage(f"after copying {total_size} gradients into partition") # The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer @@ -1213,13 +1295,13 @@ class DeepSpeedZeroOptimizer(object): stream = self.reduction_stream elif self.cpu_offload: # TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed. - # torch.cuda.synchronize() + # get_accelerator().synchronize() # stream = self.copy_grad_stream - stream = torch.cuda.current_stream() + stream = get_accelerator().current_stream() else: - stream = torch.cuda.current_stream() + stream = get_accelerator().current_stream() - with torch.cuda.stream(stream): + with get_accelerator().stream(stream): for _, param, param_id in self.params_in_ipg_bucket: assert self.params_already_reduced[param_id] == False, \ @@ -1323,11 +1405,7 @@ class DeepSpeedZeroOptimizer(object): param.grad = torch.zero_like(param) ######################Reduction Related Methods############################## - def allreduce_bucket(self, - bucket, - communication_data_type=torch.float16, - rank=None, - log=None): + def allreduce_bucket(self, bucket, rank=None, log=None): rank = None tensor = self.flatten(bucket) @@ -1335,6 +1413,8 @@ class DeepSpeedZeroOptimizer(object): if pg_correctness_test: communication_data_type = torch.float32 + else: + communication_data_type = self.communication_data_type if communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(communication_data_type) @@ -1345,7 +1425,7 @@ class DeepSpeedZeroOptimizer(object): # "All Reducing" dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) else: - global_rank = _get_global_rank(self.dp_process_group, rank) + global_rank = dist.get_global_rank(self.dp_process_group, rank) dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group) if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: @@ -1363,14 +1443,14 @@ class DeepSpeedZeroOptimizer(object): # if rank is specified do a reduction instead of an allreduce def allreduce_and_copy(self, small_bucket, rank=None, log=None): if self.overlap_comm: - torch.cuda.synchronize() + get_accelerator().synchronize() # It is safe to clear the previously reduced grads of other partitions self._clear_previous_reduced_grads() stream = self.reduction_stream else: - stream = torch.cuda.current_stream() + stream = get_accelerator().current_stream() - with torch.cuda.stream(stream): + with get_accelerator().stream(stream): allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log) if rank is None or rank == dist.get_rank(group=self.dp_process_group): for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): @@ -1418,7 +1498,7 @@ class DeepSpeedZeroOptimizer(object): partitions = [] dp = dist.get_world_size(group=self.real_dp_process_group[group_id]) - dp_id = dist.get_rank(group=self.real_dp_process_group[group_id]) + # dp_id = dist.get_rank(group=self.real_dp_process_group[group_id]) total_num_elements = tensor.numel() @@ -1465,7 +1545,7 @@ class DeepSpeedZeroOptimizer(object): return params_in_partition, params_not_in_partition, first_offset - def zero_grad(self, set_grads_to_None=True): + def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ @@ -1473,7 +1553,7 @@ class DeepSpeedZeroOptimizer(object): # For speed, set model fp16 grad to None by default for group in self.bit16_groups: for p in group: - if set_grads_to_None: + if set_to_none: p.grad = None # epilogue and in step else: if p.grad is not None: @@ -1483,12 +1563,10 @@ class DeepSpeedZeroOptimizer(object): def _model_parallel_all_reduce(self, tensor, op): """ Perform all reduce within model parallel group, if any. """ - if self.model_parallel_group is None: + if self.model_parallel_group is None or self.model_parallel_world_size == 1: pass else: - torch.distributed.all_reduce(tensor=tensor, - op=op, - group=self.model_parallel_group) + dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) def get_grad_norm_direct(self, gradients, params, norm_type=2): """Clips gradient norm of an iterable of parameters. @@ -1510,14 +1588,13 @@ class DeepSpeedZeroOptimizer(object): norm_type = float(norm_type) if norm_type == inf: total_norm = max(g.data.abs().max() for g in gradients) - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.MAX, - group=self.dp_process_group) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.MAX, + group=self.dp_process_group) # Take max across all GPUs. - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.MAX) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX) total_norm = total_norm_cuda[0].item() else: total_norm = 0.0 @@ -1531,13 +1608,12 @@ class DeepSpeedZeroOptimizer(object): param_norm = g.data.double().norm(2) total_norm += param_norm.item()**2 # Sum across all model parallel GPUs. - total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) - torch.distributed.all_reduce(total_norm_cuda, - op=torch.distributed.ReduceOp.SUM, - group=self.dp_process_group) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + dist.all_reduce(total_norm_cuda, + op=dist.ReduceOp.SUM, + group=self.dp_process_group) - self._model_parallel_all_reduce(tensor=total_norm_cuda, - op=torch.distributed.ReduceOp.SUM) + self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) @@ -1628,6 +1704,61 @@ class DeepSpeedZeroOptimizer(object): for name in timer_names: self.timers(name).stop() + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info( + f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}' + ) + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + + def scaled_global_norm(self, norm_type=2): + assert norm_type == 2, "only L2 norm supported" + norm_groups = [] + for i, group in enumerate(self.bit16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if self.cpu_offload: + norm_groups.append( + self.complete_grad_norm_calculation_for_cpu_offload( + self.params_in_partition[i])) + single_grad_partition = self.single_partition_of_fp32_groups[i].grad + else: + norm_groups.append( + self.get_grad_norm_direct(self.averaged_gradients[i], + self.params_in_partition[i])) + + if self.has_moe_layers: + self._average_expert_grad_norms(norm_groups) + + # note that the get_global_norm function only supports l2 norm + return get_global_norm(norm_list=norm_groups) + + def get_bit16_param_group(self, group_no): + bit16_partitions = self.parallel_partitioned_bit16_groups[group_no] + partition_id = dist.get_rank(group=self.real_dp_process_group[group_no]) + return [ + bit16_partitions[dist.get_rank(group=self.real_dp_process_group[group_no])] + ] + + def _optimizer_step(self, group_no): + original_param_groups = self.optimizer.param_groups + self.optimizer.param_groups = [original_param_groups[group_no]] + from deepspeed.ops.adam import DeepSpeedCPUAdam + if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half: + self.optimizer.step(fp16_param_groups=[self.get_bit16_param_group(group_no)]) + else: + self.optimizer.step() + self.optimizer.param_groups = original_param_groups + def step(self, closure=None): """ Not supporting closure. @@ -1646,16 +1777,14 @@ class DeepSpeedZeroOptimizer(object): prev_scale = self.loss_scale self._update_scale(self.overflow) if self.overflow: - if dist.get_rank() == 0: - logger.info( - "[deepspeed] OVERFLOW! Rank {} Skipping step. Attempted loss scale: {}, " - "reducing to {}".format(dist.get_rank(), - prev_scale, - self.loss_scale)) + overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." + if self.dtype == torch.half: + overflow_msg += f" Attempted loss scale: {prev_scale}, reducing to {self.loss_scale}" + logger.info(overflow_msg) see_memory_usage('After overflow before clearing gradients') - self.zero_grad() + self.zero_grad(set_to_none=True) if self.cpu_offload: self.reset_cpu_buffers() else: @@ -1667,23 +1796,34 @@ class DeepSpeedZeroOptimizer(object): self.stop_timers(timer_names) return - self.start_timers([OPTIMIZER_GRADIENTS]) - norm_groups = [] - single_partition_grad_groups = [] - skip = False + # Step 1:- Calculate gradient norm using fp-16 grads + see_memory_usage('Before norm calculation') + scaled_global_grad_norm = self.scaled_global_norm() + self._global_grad_norm = scaled_global_grad_norm / prev_scale + + see_memory_usage('After norm before optimizer') + # Step 2:- run optimizer and upscaling simultaneously for i, group in enumerate(self.bit16_groups): + self.start_timers([OPTIMIZER_GRADIENTS]) partition_id = dist.get_rank(group=self.real_dp_process_group[i]) if self.cpu_offload: - norm_groups.append( - self.complete_grad_norm_calculation_for_cpu_offload( - self.params_in_partition[i])) single_grad_partition = self.single_partition_of_fp32_groups[i].grad - else: - norm_groups.append( - self.get_grad_norm_direct(self.averaged_gradients[i], - self.params_in_partition[i])) + self.unscale_and_clip_grads([single_grad_partition], + scaled_global_grad_norm) + self.stop_timers([OPTIMIZER_GRADIENTS]) + self.start_timers([OPTIMIZER_STEP]) + self._optimizer_step(i) + + from deepspeed.ops.adam import DeepSpeedCPUAdam + if not (type(self.optimizer) == DeepSpeedCPUAdam + and self.dtype == torch.half): + bit16_partitions = self.parallel_partitioned_bit16_groups[i] + fp32_partition = self.single_partition_of_fp32_groups[i] + bit16_partitions[partition_id].data.copy_(fp32_partition.data) - # free gradients for all the parameters that are not updated by this process + self.stop_timers([OPTIMIZER_STEP]) + else: + # free gradients for all the parameters that are not updated by this process(ZeRO stage2) self.free_grad_in_param_list(self.params_not_in_partition[i]) # create a flat gradients for parameters updated by this process @@ -1702,55 +1842,33 @@ class DeepSpeedZeroOptimizer(object): single_grad_partition.numel(), self.partition_size[i], i, partition_id) self.single_partition_of_fp32_groups[i].grad = single_grad_partition - # release all the gradient since we have already created a necessary copy in dp_grad_partition + # release all the gradient since we have already created a necessary copy in dp_grad_partition(ZeRO stage2) self.free_grad_in_param_list(self.params_in_partition[i]) self.averaged_gradients[i] = None - single_partition_grad_groups.append(single_grad_partition) - - if self.has_moe_layers: - self._average_expert_grad_norms(norm_groups) - - scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) - self.unscale_and_clip_grads(single_partition_grad_groups, - scaled_global_grad_norm) - - # Stash unscaled gradient norm - self._global_grad_norm = scaled_global_grad_norm / self.loss_scale - - self.stop_timers([OPTIMIZER_GRADIENTS]) - - self.start_timers([OPTIMIZER_STEP]) - if self.deepspeed_adam_offload: - from deepspeed.ops.adam import DeepSpeedCPUAdam - if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half: - bit16_param_groups = [[ - bit16_partitions[partition_id] - ] for bit16_partitions in self.parallel_partitioned_bit16_groups] - self.optimizer.step(fp16_param_groups=bit16_param_groups) - else: - self.optimizer.step() - for bit16_partitions, fp32_partition in zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups): - bit16_partitions[partition_id].data.copy_(fp32_partition.data) - else: - self.optimizer.step() - - # get rid of the fp32 gradients. Not needed anymore - if not self.cpu_offload: - for group in self.single_partition_of_fp32_groups: - group.grad = None # in step - - for bit16_partitions, fp32_partition in zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups): + self.unscale_and_clip_grads([single_grad_partition], + scaled_global_grad_norm) + self.stop_timers([OPTIMIZER_GRADIENTS]) + + # Step 3:- run the optimizer if no offloading + self.start_timers([OPTIMIZER_STEP]) + self._optimizer_step(i) + # Step 4:- get rid of the fp32 gradients. Not needed anymore + self.single_partition_of_fp32_groups[i].grad = None + del single_grad_partition + bit16_partitions = self.parallel_partitioned_bit16_groups[i] + fp32_partition = self.single_partition_of_fp32_groups[i] bit16_partitions[partition_id].data.copy_(fp32_partition.data) + self.stop_timers([OPTIMIZER_STEP]) - self.stop_timers([OPTIMIZER_STEP]) - + see_memory_usage('After optimizer before all-gather') if self.cpu_offload: self.reset_cpu_buffers() self.start_timers([OPTIMIZER_ALLGATHER]) - # gather the updated weights from everyone + # Gather the updated weights from everyone. + # Then all partitions of the model parameters are updated and ready for next round forward. all_gather_dp_groups( partitioned_param_groups=self.parallel_partitioned_bit16_groups, dp_process_group=self.real_dp_process_group, @@ -1760,7 +1878,7 @@ class DeepSpeedZeroOptimizer(object): self.stop_timers([OPTIMIZER_ALLGATHER]) # TODO: we probably don't need this? just to be safe - for i in range(len(norm_groups)): + for i in range(len(self.bit16_groups)): self._update_model_bit16_weights(i) self.log_timers(timer_names) @@ -1768,13 +1886,28 @@ class DeepSpeedZeroOptimizer(object): return + @torch.no_grad() + def update_lp_params(self): + for i, (bit16_partitions, fp32_partition) in enumerate(zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + bit16_partitions[partition_id].data.copy_(fp32_partition.data) + # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) + # if i == 0: + # print_rank_0(f'{fp32_partition[:10]=}', force=True) + + all_gather_dp_groups( + partitioned_param_groups=self.parallel_partitioned_bit16_groups, + dp_process_group=self.real_dp_process_group, + start_alignment_factor=self.nccl_start_alignment_factor, + allgather_bucket_size=self.allgather_bucket_size) + def _average_expert_grad_norms(self, norm_groups): for i, norm in enumerate(norm_groups): if self.is_moe_param_group[i]: scaled_norm = norm * 1.0 / float( dist.get_world_size(group=self.real_dp_process_group[i])) scaled_norm_tensor = torch.tensor(scaled_norm, - device='cuda', + device=get_accelerator().device_name(), dtype=torch.float) dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i]) norm_groups[i] = scaled_norm_tensor.item() @@ -1818,12 +1951,12 @@ class DeepSpeedZeroOptimizer(object): if partition_gradients: overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial( ) - overflow_gpu = torch.cuda.ByteTensor([overflow]) + overflow_gpu = get_accelerator().ByteTensor([overflow]) '''This will capture overflow across all data parallel and expert parallel process Since expert parallel process are a subset of data parallel process''' - torch.distributed.all_reduce(overflow_gpu, - op=torch.distributed.ReduceOp.MAX, - group=self.dp_process_group) + dist.all_reduce(overflow_gpu, + op=dist.ReduceOp.MAX, + group=self.dp_process_group) else: params = [] @@ -1832,12 +1965,11 @@ class DeepSpeedZeroOptimizer(object): params.append(param) overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients) - overflow_gpu = torch.cuda.ByteTensor([overflow]) + overflow_gpu = get_accelerator().ByteTensor([overflow]) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs - self._model_parallel_all_reduce(tensor=overflow_gpu, - op=torch.distributed.ReduceOp.MAX) + self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) overflow = overflow_gpu[0].item() return bool(overflow) @@ -1878,18 +2010,22 @@ class DeepSpeedZeroOptimizer(object): self.ipg_buffer = [] buf_0 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_0) # Use double buffers to avoid data access conflict when overlap_comm is enabled. if self.overlap_comm: buf_1 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, - device=torch.cuda.current_device()) + device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_1) self.ipg_index = 0 - self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) def check_overflow(self, partition_gradients=True): self._check_overflow(partition_gradients) @@ -1918,7 +2054,10 @@ class DeepSpeedZeroOptimizer(object): # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): - return self.loss_scaler.loss_scale + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.loss_scaler.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value @@ -1988,10 +2127,13 @@ class DeepSpeedZeroOptimizer(object): self.single_partition_of_fp32_groups) state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding - state_dict[ZERO_STAGE] = ZERO_OPTIMIZATION_GRADIENTS + state_dict[ + ZERO_STAGE] = ZeroStageEnum.gradients if self.partition_gradients else ZeroStageEnum.optimizer_states + state_dict[GROUP_PADDINGS] = self.groups_padding state_dict[PARTITION_COUNT] = self.partition_count state_dict[DS_VERSION] = version + state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings return state_dict @@ -2101,7 +2243,47 @@ class DeepSpeedZeroOptimizer(object): def load_state_dict(self, state_dict_list, load_optimizer_states=True, - load_from_fp32_weights=False): + load_from_fp32_weights=False, + checkpoint_folder=None): + if checkpoint_folder: + self._load_universal_checkpoint(checkpoint_folder, + load_optimizer_states, + load_from_fp32_weights) + else: + self._load_legacy_checkpoint(state_dict_list, + load_optimizer_states, + load_from_fp32_weights) + + def _load_universal_checkpoint(self, + checkpoint_folder, + load_optimizer_states, + load_from_fp32_weights): + self._load_hp_checkpoint_state(checkpoint_folder) + + @property + def param_groups(self): + """Forward the wrapped optimizer's parameters.""" + return self.optimizer.param_groups + + def _load_hp_checkpoint_state(self, checkpoint_dir): + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + tp_world_size = self.mpu.get_slice_parallel_world_size() + + for i, _ in enumerate(self.optimizer.param_groups): + for lp in self.bit16_groups[i]: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + lp.load_hp_checkpoint_state( + os.path.join(checkpoint_dir, + self.param_names[lp]), + tp_rank, + tp_world_size) + + def _load_legacy_checkpoint(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False): r"""Loading ZeRO checkpoint Arguments: @@ -2119,7 +2301,7 @@ class DeepSpeedZeroOptimizer(object): will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: - model = torch.nn.Linear(D_in, D_out).cuda().half() + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... @@ -2151,6 +2333,16 @@ class DeepSpeedZeroOptimizer(object): ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict) + # padding is always at the last rank/partition + # if DP=1024 and param-group elems=16 -> padding will be 1024-16 across all but one rank + # scenario-1 (shrink): saving w. 4 gpus -> loading w. 2 gpus + # scenario-2 (expand): saving w. 2 gpus -> loading w. 4 gpus + # if load_optimizer_states: + # if new_dp_size: + # self.strip_padding() + # self.add_padding_w_new_dp_size() + # self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) + if load_optimizer_states: if ckpt_is_rigid: # loading rigid ckpt into either rigid or elastic exec @@ -2192,10 +2384,13 @@ class DeepSpeedZeroOptimizer(object): # option 1 from above self._restore_from_bit16_weights() + if load_optimizer_states: + self._link_all_hp_params() + def _handle_overflow(cpu_sum, x, i): import math - rank = torch.distributed.get_rank() + rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): @@ -2287,8 +2482,8 @@ def estimate_zero2_model_states_mem_needs_all_cold(total_params, """ def format_options(cpu_offload): enabled = [] - device = f'{OFFLOAD_CPU_DEVICE:4}' if cpu_offload else "none" - enabled.append(f"{OFFLOAD_OPTIMIZER}={device}") + device = f'{OffloadDeviceEnum.cpu:4}' if cpu_offload else "none" + enabled.append(f"offload_optimizer={device}") return ", ".join(enabled) nodes_str = "nodes" if num_nodes > 1 else "node" diff --git a/deepspeed/runtime/zero/test.py b/deepspeed/runtime/zero/test.py index 29213d6..d13c247 100644 --- a/deepspeed/runtime/zero/test.py +++ b/deepspeed/runtime/zero/test.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch from deepspeed.runtime.zero.contiguous_memory_allocator import ContiguousMemoryAllocator diff --git a/deepspeed/runtime/zero/tiling.py b/deepspeed/runtime/zero/tiling.py index 3a78253..d66caea 100644 --- a/deepspeed/runtime/zero/tiling.py +++ b/deepspeed/runtime/zero/tiling.py @@ -1,3 +1,5 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import torch import deepspeed from deepspeed.runtime.utils import partition_uniform as partition diff --git a/deepspeed/runtime/zero/utils.py b/deepspeed/runtime/zero/utils.py old mode 100644 new mode 100755 index fff8ebd..eafddad --- a/deepspeed/runtime/zero/utils.py +++ b/deepspeed/runtime/zero/utils.py @@ -1,12 +1,15 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import os from typing import List import torch -import torch.distributed as dist +from deepspeed import comm as dist from deepspeed.utils import logger from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.ops.adam import FusedAdam from deepspeed.utils.nvtx import instrument_w_nvtx +from deepspeed.accelerator import get_accelerator def _initialize_parameter_parallel_groups(parameter_parallel_size=None): @@ -21,7 +24,7 @@ def _initialize_parameter_parallel_groups(parameter_parallel_size=None): my_group = None for i in range(data_parallel_size // parameter_parallel_size): ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size) - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) if rank in ranks: my_group = group return my_group @@ -63,8 +66,8 @@ def get_lst_from_rank0(lst: List[int]) -> None: lst_tensor = torch.tensor( lst if dist.get_rank() == 0 else [-1] * len(lst), dtype=int, - # device=torch.cuda.current_device(), - device=torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])), + # device=get_accelerator().current_device_name(), + device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])), requires_grad=False, ) dist.broadcast(lst_tensor, src=0, async_op=False) diff --git a/deepspeed/utils/__init__.py b/deepspeed/utils/__init__.py index efa4585..c48383c 100644 --- a/deepspeed/utils/__init__.py +++ b/deepspeed/utils/__init__.py @@ -1,5 +1,13 @@ +'''Copyright The Microsoft DeepSpeed Team''' + from .logging import logger, log_dist -from .distributed import init_distributed +from .comms_logging import get_caller_func +#from .distributed import init_distributed +from .init_on_device import OnDevice from .groups import * from .nvtx import instrument_w_nvtx +# TODO: Move tensor fragment and mixed precision to zero utils +from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad +from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state +from .mixed_precision_linkage import link_hp_params from deepspeed.runtime.dataloader import RepeatingLoader diff --git a/deepspeed/utils/comms_logging.py b/deepspeed/utils/comms_logging.py new file mode 100644 index 0000000..d6b0410 --- /dev/null +++ b/deepspeed/utils/comms_logging.py @@ -0,0 +1,141 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +import math +from deepspeed.utils import log_dist + + +def get_caller_func(frame=3): + import sys + return sys._getframe(frame).f_code.co_name + + +# Helper function to pretty-print message sizes +def convert_size(size_bytes): + if size_bytes == 0: + return "0B" + size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return "%s %s" % (s, size_name[i]) + + +# Helper function to calculate algbw and busbw. +# See https://gist.github.com/jeffra/b5e80466b4c86be00ea3b6f130fb7a36 and https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md +def calc_bw_log(comm_op, size, duration): + import deepspeed.comm as dist + + n = dist.get_world_size() + tput = 0 + busbw = 0 + if comm_op == "all_to_all_single": + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_gather" or comm_op == "all_gather_base" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_base": + size *= n + tput = (size / duration) + busbw = (size / duration) * ((n - 1) / n) + elif comm_op == "all_reduce": + tput = (size * 2 / duration) + busbw = (size / duration) * (2 * (n - 1) / n) + elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier": + tput = (size / duration) + busbw = tput + else: + print_rank_0("wrong comm_op specified") # noqa: F821 + exit(0) + + # convert to Gbps + tput *= 8 + busbw *= 8 + + tput /= 1e6 + busbw /= 1e6 + + return tput, busbw + + +class CommsLogger: + def __init__(self): + from deepspeed.comm.constants import COMMS_LOGGER_VERBOSE_DEFAULT, COMMS_LOGGER_DEBUG_DEFAULT, COMMS_LOGGER_PROF_OPS_DEFAULT, COMMS_LOGGER_PROF_ALL_DEFAULT, COMMS_LOGGER_ENABLED_DEFAULT + self.comms_dict = {} + self.verbose = COMMS_LOGGER_VERBOSE_DEFAULT + self.debug = COMMS_LOGGER_DEBUG_DEFAULT + self.prof_ops = COMMS_LOGGER_PROF_OPS_DEFAULT + self.prof_all = COMMS_LOGGER_PROF_ALL_DEFAULT + self.enabled = COMMS_LOGGER_ENABLED_DEFAULT + + def configure(self, comms_config): + self.enabled = comms_config.comms_logger_enabled + if self.enabled: + self.verbose = comms_config.comms_logger.verbose + self.debug = comms_config.comms_logger.debug + self.prof_ops = comms_config.comms_logger.prof_ops + self.prof_all = comms_config.comms_logger.prof_all + + # There are three settings for the op profiler: + # - Global profiling (profile all comms) + # - Op-type profiling (e.g. profile all all_reduce comms) + # - Op profiling (e.g. profile a specific all_reduce op) + def start_profiling_comms(self): + self.prof_all = True + + def stop_profiling_comms(self): + self.prof_all = True + + # E.g. start_profiling_op('all_reduce') + def start_profiling_op(self, op_name_list): + self.prof_ops = list(set(self.prof_ops) | set(op_name_list)) + + def stop_profiling_op(self, op_name_list): + self.prof_ops = [op for op in self.prof_ops if op not in op_name_list] + + # Add log entry + def append(self, raw_name, record_name, latency, msg_size): + import deepspeed.comm as dist + algbw, busbw = calc_bw_log(raw_name, msg_size, latency) + if record_name in self.comms_dict.keys(): + # If this comm_op has already been logged with this message size, just add to existing record + if msg_size in self.comms_dict[record_name].keys(): + self.comms_dict[record_name][msg_size][0] += 1 + self.comms_dict[record_name][msg_size][1].append(latency) + self.comms_dict[record_name][msg_size][2].append(algbw) + self.comms_dict[record_name][msg_size][3].append(busbw) + # If this is a new message size for this comm_op, add new record under existing comm_op + else: + self.comms_dict[record_name][msg_size] = [1, [latency], [algbw], [busbw]] + else: + # Create entirely new record + self.comms_dict[record_name] = {msg_size: [1, [latency], [algbw], [busbw]]} + # If verbose, print every comm op + # TODO: Add to tensorboard + if self.verbose: + n = dist.get_world_size() + log_str = f"rank={dist.get_rank()} | comm op: " + record_name + " | time (ms): {:.2f}".format( + latency) + log_str += " | msg size: " + convert_size(msg_size) + log_str += " | algbw (Gbps): {:.2f} ".format(algbw) + log_str += " | busbw (Gbps): {:.2f} ".format(busbw) + log_dist(log_str, [0]) + + # Print summary at end of iteration, epoch, or training + def log_all(self): + from deepspeed.utils.timer import trim_mean + print( + f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total Latency(ms)': <20}{'Avg Latency(ms)': <20}{'tput_avg (Gbps)': <20}{'busbw_avg (Gbps)': <20}" + ) + for record_name in self.comms_dict.keys(): + print(record_name) + for msg_size, vals in sorted(self.comms_dict[record_name].items()): + # vals[0] is the count for each msg size + count = vals[0] + # vals[1] is a list of latency records for each msg size + total_lat = sum(vals[1]) + # vals[2] and vals[3] are the lists of algbw and busbw, respectively + # Get rid of outliers when we print + avg_lat = trim_mean(vals[1], 0.1) + avg_algbw = trim_mean(vals[2], 0.1) + avg_busbw = trim_mean(vals[3], 0.1) + print( + f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{avg_lat: <20.2f}{avg_algbw: <20.2f}{avg_busbw: <20.2f}" + ) diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py index 358e997..f3f3ea5 100644 --- a/deepspeed/utils/debug.py +++ b/deepspeed/utils/debug.py @@ -1,6 +1,8 @@ +'''Copyright The Microsoft DeepSpeed Team''' """ debug utils """ -import fcntl +# For lazy import with printflock() +fcntl = None # for debug purposes map module and param objects to their fully qualified names module_names = {} @@ -69,7 +71,7 @@ def printflock(*msgs): 2. Override the usual versions with :: def print_rank_0(message, debug=False, force=False): - rank = torch.distributed.get_rank() + rank = deepspeed.comm.get_rank() printflock(f"[{rank}] {message}") 3. run the program and you get both logs non-interleaved @@ -78,6 +80,9 @@ def printflock(*msgs): then compare those. """ + global fcntl + if fcntl == None: + import fcntl with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX) @@ -100,7 +105,7 @@ def log_rank_file(rank, *msgs): 2. Override the usual versions of print_rank_0 in those files with :: def print_rank_0(message, debug=False, force=False): - rank = torch.distributed.get_rank() + rank = deepspeed.comm.get_rank() log_rank_file(rank, message) 3. run the program diff --git a/deepspeed/utils/groups.py b/deepspeed/utils/groups.py index 34fd96f..a4d403a 100644 --- a/deepspeed/utils/groups.py +++ b/deepspeed/utils/groups.py @@ -18,25 +18,27 @@ Copyright 2021 The Microsoft DeepSpeed Team # See the License for the specific language governing permissions and # limitations under the License. """ - Support different forms of parallelism in DeepSped using multiple process groups. + Support different forms of parallelism in DeepSpeed using multiple process groups. Given that there are multiple scenarios and use-cases, this file is going to be updated frequently. For now, the group creation needed for the training scenario is being implemented. For inference and other new scenarios, the code will be either reused or added to this file. """ -import torch -from torch.distributed.distributed_c10d import _get_global_rank -from deepspeed.utils import logger, log_dist +from deepspeed import comm as dist + +from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException # Expert parallel group that the current rank belongs to. _EXPERT_PARALLEL_GROUP = {} # Expert data parallel group that the current rank belongs to. _EXPERT_DATA_PARALLEL_GROUP = {} -# torch.distributed world group needs to be cloned for some cases +# dist world group needs to be cloned for some cases _WORLD_GROUP = None # global object to maintain mpu object if passed by a Megatron client mpu = None +# global object that stores tensor parallel world size for experts +expert_tensor_parallel_world_size = 1 # Deprecated groups initialize function. @@ -79,32 +81,32 @@ def _create_model_parallel(model_parallel_size_): log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0]) # Get world size and rank. Ensure some consistencies. - assert torch.distributed.is_initialized() - world_size = torch.distributed.get_world_size() + assert dist.is_initialized() + world_size = dist.get_world_size() model_parallel_size = min(model_parallel_size_, world_size) _ensure_divisibility(world_size, model_parallel_size) - rank = torch.distributed.get_rank() + rank = dist.get_rank() _DATA_PARALLEL_GROUP = None _MODEL_PARALLEL_GROUP = None # Build the data parallel groups. for i in range(model_parallel_size): ranks = range(i, world_size, model_parallel_size) - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) if i == (rank % model_parallel_size): _DATA_PARALLEL_GROUP = group # Build the model parallel groups. for i in range(world_size // model_parallel_size): ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) if i == (rank // model_parallel_size): _MODEL_PARALLEL_GROUP = group return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP -def _create_expert_and_data_parallel(ep_size): +def _create_expert_and_data_parallel(expert_parallel_size_): """ Create expert and data parallel groups. @@ -117,13 +119,14 @@ def _create_expert_and_data_parallel(ep_size): expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE """ - assert torch.distributed.is_initialized() + assert dist.is_initialized() - log_dist(f'Creating expert and data parallel groups with size {ep_size}', ranks=[0]) - world_size = torch.distributed.get_world_size() - rank = torch.distributed.get_rank() + log_dist( + f'Creating expert and data parallel groups with size {expert_parallel_size_}', + ranks=[0]) + world_size = dist.get_world_size() + rank = dist.get_rank() - expert_parallel_size_ = min(ep_size, world_size) _ensure_divisibility(world_size, expert_parallel_size_) group_name = f"ep_size_{expert_parallel_size_}" @@ -135,7 +138,7 @@ def _create_expert_and_data_parallel(ep_size): if group_name not in _EXPERT_DATA_PARALLEL_GROUP: for i in range(expert_parallel_size_): ranks = range(i, world_size, expert_parallel_size_) - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) log_dist( f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0]) @@ -149,7 +152,7 @@ def _create_expert_and_data_parallel(ep_size): if group_name not in _EXPERT_PARALLEL_GROUP: for i in range(world_size // expert_parallel_size_): ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) log_dist( f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) @@ -218,14 +221,20 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu): expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ - assert torch.distributed.is_initialized(), "torch distributed is not initialized" + assert dist.is_initialized(), "dist is not initialized" model_parallel_size_ = mpu.get_model_parallel_world_size() - world_size = torch.distributed.get_world_size() - rank = torch.distributed.get_rank() + global expert_tensor_parallel_world_size + expert_tensor_parallel_world_size = model_parallel_size_ + + world_size = dist.get_world_size() + rank = dist.get_rank() dp_world_size = mpu.get_data_parallel_world_size() dp_rank = mpu.get_data_parallel_rank() + _ensure_divisibility(world_size, model_parallel_size_) + _ensure_divisibility(dp_world_size, expert_parallel_size_) + log_dist( f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}", [0]) @@ -236,9 +245,6 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu): _DATA_PARALLEL_GROUP = mpu.get_data_parallel_group() _MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group() - expert_parallel_size_ = min(expert_parallel_size_, dp_world_size) - _ensure_divisibility(world_size, expert_parallel_size_) - group_name = f"ep_size_{expert_parallel_size_}" # Only create groups if they don't already exist @@ -247,12 +253,12 @@ def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu): expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( world_size, model_parallel_size_, expert_parallel_size_) for ranks in expert_parallel_groups: - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_PARALLEL_GROUP[group_name] = group for ranks in expert_data_parallel_groups: - group = torch.distributed.new_group(ranks) + group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group @@ -304,66 +310,64 @@ def _get_expert_data_parallel_group_dict(): def _clone_world_group(): """Create a clone of the world group - Note: We need to clone the torch.distributed world group because we - use _get_global_rank() utility function in DeepSpeed at many places. - As that function does not work on torch.distributed.group.WORLD, we + Note: We need to clone the dist world group because we + use dist.get_global_rank() utility function in DeepSpeed at many places. + As that function does not work on dist.group.WORLD, we need to keep a clone of it. """ - assert torch.distributed.is_initialized(), "torch.distributed is not initialized" + assert dist.is_initialized(), "dist is not initialized" global _WORLD_GROUP if _WORLD_GROUP is None: # If not cloned already, clone the world group - _WORLD_GROUP = torch.distributed.new_group( - ranks=range(torch.distributed.get_world_size())) + _WORLD_GROUP = dist.new_group(ranks=range(dist.get_world_size())) return _WORLD_GROUP def _get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" - assert torch.distributed.is_initialized(), \ - 'torch.distributed is not initialized' + assert dist.is_initialized(), \ + 'dist is not initialized' global mpu if mpu is not None: return mpu.get_data_parallel_group() - # Return the clone of torch.distributed world group + # Return the clone of dist world group return _clone_world_group() def _get_broadcast_src_rank(): - return _get_global_rank(_get_data_parallel_group(), 0) + return dist.get_global_rank(_get_data_parallel_group(), 0) def _get_expert_broadcast_src_rank(group_name): - return _get_global_rank(_get_expert_data_parallel_group(group_name), 0) + return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0) def _get_expert_parallel_world_size(group_name): """Return world size for the expert parallel group.""" - return torch.distributed.get_world_size(group=_get_expert_parallel_group(group_name)) + return dist.get_world_size(group=_get_expert_parallel_group(group_name)) def _get_expert_data_parallel_world_size(group_name): """Return world size for the expert data parallel group.""" - return torch.distributed.get_world_size( - group=_get_expert_data_parallel_group(group_name)) + return dist.get_world_size(group=_get_expert_data_parallel_group(group_name)) def _get_expert_parallel_rank(group_name): """Return my rank for the expert parallel group.""" - return torch.distributed.get_rank(group=_get_expert_parallel_group(group_name)) + return dist.get_rank(group=_get_expert_parallel_group(group_name)) def _get_expert_parallel_src_rank(group_name): """Calculate the global rank corresponding to a local rank zero in the expert parallel group.""" - global_rank = torch.distributed.get_rank() + global_rank = dist.get_rank() local_world_size = _get_expert_parallel_world_size(group_name) return (global_rank // local_world_size) * local_world_size def _get_expert_data_parallel_rank(group_name): """Return my rank for the expert data parallel group.""" - return torch.distributed.get_rank(group=_get_expert_data_parallel_group(group_name)) + return dist.get_rank(group=_get_expert_data_parallel_group(group_name)) def _get_data_parallel_world_size(): @@ -371,7 +375,7 @@ def _get_data_parallel_world_size(): global mpu if mpu is not None: return mpu.get_data_parallel_world_size() - return torch.distributed.get_world_size(group=_get_data_parallel_group()) + return dist.get_world_size(group=_get_data_parallel_group()) def _get_model_parallel_world_size(): @@ -387,4 +391,9 @@ def _get_data_parallel_rank(): global mpu if mpu is not None: return mpu.get_data_parallel_rank() - return torch.distributed.get_rank(group=_get_data_parallel_group()) + return dist.get_rank(group=_get_data_parallel_group()) + + +def _get_expert_model_parallel_world_size(): + global expert_tensor_parallel_world_size + return expert_tensor_parallel_world_size diff --git a/deepspeed/utils/init_on_device.py b/deepspeed/utils/init_on_device.py new file mode 100644 index 0000000..1b51efe --- /dev/null +++ b/deepspeed/utils/init_on_device.py @@ -0,0 +1,81 @@ +''' +Copyright 2020 The Microsoft DeepSpeed Team +''' +import torch +from typing import Callable +from torch import Tensor +from packaging import version as pkg_version + + +class OnDevice(object): + """ + Create modules/tensors w. specific devices and dtypes. Examples: + + Create MyModule which consists of many different sub-modules and parameters. In this case we can create + MyModule as a collection of 'meta' tensors by passing `device='meta'` or we can create the module _directly_ + on a CUDA device by passing `device=f'cuda:{local_rank}'` (where `local_rank` is the local GPU id. + + with OnDevice(dtype=torch.float16, device='meta'): + model = MyModel() + + with OnDevice(dtype=torch.float16, device=f'cuda:{local_rank}'): + model = MyModel() + + """ + + _orig_torch_empty = torch.empty + _orig_torch_zeros = torch.zeros + _orig_torch_ones = torch.ones + _orig_torch_full = torch.full + + def __init__(self, dtype, device="meta", enabled=True): + self.dtype = dtype + self.enabled = enabled + self.device = device + + if device == "meta": + if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__): + raise NotImplementedError( + "Meta tensor support is not available, please upgrade to torch 1.10+" + ) + + def fp_tensor_constructor(self, + fn: Callable, + target_fp_dtype: torch.dtype) -> Callable: + def wrapped_fn(*args, **kwargs) -> Tensor: + if kwargs.get("device", None) is None: + kwargs['device'] = self.device + tensor: Tensor = fn(*args, **kwargs) + if tensor.is_floating_point(): + tensor = tensor.to(target_fp_dtype) + return tensor + + return wrapped_fn + + def get_new_tensor_fn_for_dtype(self, dtype: torch.dtype) -> Callable: + def new_tensor(cls, *args) -> Tensor: + tensor = OnDevice._orig_torch_empty(0, device=self.device).new_empty(*args) + if tensor.is_floating_point(): + tensor = tensor.to(dtype) + return tensor + + return new_tensor + + def __enter__(self): + if not self.enabled: + return + torch.Tensor.__old_new__ = torch.Tensor.__new__ + torch.Tensor.__new__ = self.get_new_tensor_fn_for_dtype(self.dtype) + torch.empty = self.fp_tensor_constructor(self._orig_torch_empty, self.dtype) + torch.zeros = self.fp_tensor_constructor(self._orig_torch_zeros, self.dtype) + torch.ones = self.fp_tensor_constructor(self._orig_torch_ones, self.dtype) + torch.full = self.fp_tensor_constructor(self._orig_torch_full, self.dtype) + + def __exit__(self, exc_type, exc_value, traceback): + if not self.enabled: + return + torch.Tensor.__new__ = torch.Tensor.__old_new__ + torch.empty = self._orig_torch_empty + torch.zeros = self._orig_torch_zeros + torch.ones = self._orig_torch_ones + torch.full = self._orig_torch_full diff --git a/deepspeed/utils/logging.py b/deepspeed/utils/logging.py index 3a089b3..631ee21 100644 --- a/deepspeed/utils/logging.py +++ b/deepspeed/utils/logging.py @@ -1,9 +1,9 @@ +'''Copyright The Microsoft DeepSpeed Team''' + import logging import sys import os -import torch.distributed as dist - log_levels = { "debug": logging.DEBUG, "info": logging.INFO, @@ -46,7 +46,15 @@ class LoggerFactory: logger = LoggerFactory.create_logger(name="DeepSpeed", level=logging.INFO) +def print_configuration(args, name): + logger.info("{}:".format(name)) + for arg in sorted(vars(args)): + dots = "." * (29 - len(arg)) + logger.info(" {} {} {}".format(arg, dots, getattr(args, arg))) + + def log_dist(message, ranks=None, level=logging.INFO): + from deepspeed import comm as dist """Log message when one of following condition meets + not dist.is_initialized() @@ -70,6 +78,7 @@ def log_dist(message, ranks=None, level=logging.INFO): def print_json_dist(message, ranks=None, path=None): + from deepspeed import comm as dist """Print message when one of following condition meets + not dist.is_initialized() diff --git a/deepspeed/utils/mixed_precision_linkage.py b/deepspeed/utils/mixed_precision_linkage.py new file mode 100644 index 0000000..ac785ed --- /dev/null +++ b/deepspeed/utils/mixed_precision_linkage.py @@ -0,0 +1,58 @@ +""" +Copyright 2022 The Microsoft DeepSpeed Team +""" +import types +from deepspeed.utils import get_full_hp_param, get_full_hp_grad, get_hp_fragment_mapping + + +def link_hp_params(lp_param_list, + flat_hp_partition, + gradient_dict, + offload_gradient_dict, + use_offload, + param_group_index, + partition_start, + partition_size, + partition_optimizer_state, + dp_group): + local_lp_param_and_offset = _init_lp_to_hp_mapping(lp_param_list, + partition_start, + partition_size, + dp_group) + + for lp_param, lp_start in local_lp_param_and_offset: + lp_param._hp_mapping = get_hp_fragment_mapping(lp_param, + lp_start, + flat_hp_partition, + gradient_dict, + offload_gradient_dict, + use_offload, + param_group_index, + partition_start, + partition_size, + partition_optimizer_state) + + +def _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group): + current_offset = 0 + param_and_offset_list = [] + partition_end = partition_start + partition_size + index_in_param_group = 0 + for i, lp_param in enumerate(lp_param_list): + lp_param._hp_mapping = None + lp_param._dp_group = dp_group + lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param) + lp_param.get_full_hp_grad = types.MethodType(get_full_hp_grad, lp_param) + + # lp_param overlaps with partition if both are true + # 1) current_offset < partition_end, + # 2) current_offset + lp_param.numel() >= partition_start + lp_param_end = current_offset + lp_param.numel() + if current_offset < partition_end and lp_param_end > partition_start: + param_and_offset_list.append((lp_param, current_offset)) + lp_param._index_in_param_group = index_in_param_group + # Indices for params in this partition/GPU + index_in_param_group += 1 + current_offset += lp_param.numel() + + return param_and_offset_list diff --git a/deepspeed/utils/nvtx.py b/deepspeed/utils/nvtx.py index 11579ff..8810003 100644 --- a/deepspeed/utils/nvtx.py +++ b/deepspeed/utils/nvtx.py @@ -1,15 +1,15 @@ -import torch +'''Copyright The Microsoft DeepSpeed Team''' + +from deepspeed.accelerator import get_accelerator def instrument_w_nvtx(func): """decorator that causes an NVTX range to be recorded for the duration of the function call.""" - if hasattr(torch.cuda.nvtx, "range"): - - def wrapped_fn(*args, **kwargs): - with torch.cuda.nvtx.range(func.__qualname__): - return func(*args, **kwargs) + def wrapped_fn(*args, **kwargs): + get_accelerator().range_push(func.__qualname__) + ret_val = func(*args, **kwargs) + get_accelerator().range_pop() + return ret_val - return wrapped_fn - else: - return func + return wrapped_fn diff --git a/deepspeed/utils/tensor_fragment.py b/deepspeed/utils/tensor_fragment.py new file mode 100644 index 0000000..8204735 --- /dev/null +++ b/deepspeed/utils/tensor_fragment.py @@ -0,0 +1,284 @@ +""" +Copyright 2022 The Microsoft DeepSpeed Team +""" + +import torch +from dataclasses import dataclass +from deepspeed import comm as dist + + +@dataclass +class fragment_address: + numel: int + start: int + + +@dataclass +class tensor_fragment: + lp_fragment: torch.Tensor + lp_fragment_address: fragment_address + hp_fragment: torch.Tensor + hp_fragment_address: fragment_address + optim_fragment: {} + gradient_dict: {} + offload_gradient_dict: {} + use_offload: bool + param_group_index: int + + def update_hp(self): + self.hp_fragment.data.copy_(self.lp_fragment.data) + + def update_lp(self): + self.lp_fragment.data.copy_(self.hp_fragment.data) + + def get_optim_state_fragment(self, key): + if key in self.optim_fragment: + return self.optim_fragment[key] + else: + raise ValueError(f'{key} not found in optimizer state fragment') + + def get_hp_fragment_address(self): + return self.hp_fragment_address + + def get_optim_state_keys(self): + return list(self.optim_fragment.keys()) + + +def get_full_hp_param(self, optim_state_key=None): + reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() + if self._hp_mapping is not None: + lp_frag_address = self._hp_mapping.lp_fragment_address + reduce_fragment = torch.narrow(reduce_buffer, + 0, + lp_frag_address.start, + lp_frag_address.numel) + if optim_state_key is None: + hp_fragment = self._hp_mapping.hp_fragment + else: + hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key) + + reduce_fragment.data.copy_(hp_fragment.data) + dist.all_reduce(reduce_buffer, group=self._dp_group) + return reduce_buffer.reshape_as(self) + + +def get_full_hp_grad(self): + reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() + if self._hp_mapping is not None: + hp_mapping = self._hp_mapping + + if hp_mapping.use_offload: + gradient_dict = hp_mapping.offload_gradient_dict + else: + gradient_dict = hp_mapping.gradient_dict + + if hp_mapping.param_group_index not in gradient_dict or gradient_dict[ + hp_mapping.param_group_index] is None: + raise ValueError( + "Gradients are only available immediately after backward and before engine step" + ) + + lp_grad_fragment = gradient_dict[hp_mapping.param_group_index][ + self._index_in_param_group] + hp_grad_fragment = lp_grad_fragment.to(torch.float32).flatten() + + lp_frag_address = self._hp_mapping.lp_fragment_address + reduce_fragment = torch.narrow(reduce_buffer, + 0, + lp_frag_address.start, + lp_frag_address.numel) + + if self.view(-1).shape == hp_grad_fragment.shape: + reduce_buffer.data.copy_(hp_grad_fragment.data) + else: + reduce_fragment.data.copy_(hp_grad_fragment.data) + + dist.all_reduce(reduce_buffer, group=self._dp_group) + return reduce_buffer.reshape_as(self) + + +def safe_get_full_fp32_param(param): + """Assemble and return the fp32 parameter of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_full_hp_param(param) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_param() + return None + + +def safe_get_full_optimizer_state(param, optim_state_key): + """Assemble and return the fp32 optimizer state of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + """ + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_full_hp_param(param, optim_state_key) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_param(optim_state_key) + return None + + +# TODO: Figure out the correct return dtype +def safe_get_full_grad(param): + """Assemble and return the fp32 gradient of a low-precision (e.g., fp16) parameter. + + Args: + param (``torch.nn.Parameter``): A model parameter + """ + if param.grad is not None: + return param.grad + + # ZeRO stage 3 param + if hasattr(param, 'ds_id'): + return param._z3_optimizer.get_fp32_grad_for_param(param) + + # ZeRO stage 1, 2, and bf16_optimizer params + if hasattr(param, '_hp_mapping'): + return param.get_full_hp_grad() + + return None + + +def get_hp_fragment_mapping(lp_param, + lp_start, + flat_hp_partition, + gradient_dict, + offload_gradient_dict, + use_offload, + param_group_index, + partition_start, + partition_size, + optimizer_state_dict): + lp_end = lp_param.numel() + lp_start + hp_start = partition_start + hp_end = partition_start + partition_size + + fragment_start = max(lp_start, hp_start) + fragment_end = min(lp_end, hp_end) + assert fragment_start < fragment_end, \ + f'fragment start {fragment_start} should be < fragment_end {fragment_end}' + + fragment_numel = fragment_end - fragment_start + hp_frag_address = fragment_address(start=fragment_start - hp_start, + numel=fragment_numel) + hp_fragment_tensor = flat_hp_partition.narrow(0, + hp_frag_address.start, + hp_frag_address.numel) + optim_fragment = { + key: value.narrow(0, + hp_frag_address.start, + hp_frag_address.numel) + for key, + value in optimizer_state_dict.items() + if torch.is_tensor(value) and value.shape == flat_hp_partition.shape + } + + lp_frag_address = fragment_address(start=fragment_start - lp_start, + numel=fragment_numel) + lp_fragment_tensor = lp_param.flatten().narrow(0, + lp_frag_address.start, + lp_frag_address.numel) + + return tensor_fragment(lp_fragment=lp_fragment_tensor, + lp_fragment_address=lp_frag_address, + hp_fragment=hp_fragment_tensor, + hp_fragment_address=hp_frag_address, + optim_fragment=optim_fragment, + gradient_dict=gradient_dict, + offload_gradient_dict=offload_gradient_dict, + use_offload=use_offload, + param_group_index=param_group_index) + + +''' +Logic for lp_param to hp_param mapping + +lp lp0 lp1 lp2 lp3 lp4 <------- indices/names +lp [ ][ ][ ][ ][ ] <-------- tensors +flat_lp [ ] <-------- flat lp params +flat_hp [ ] <------------------ flat hp partition on current rank +full_hp [ ] <------- full flat hp params + + +lp2 + full numel = 16 + lp_frag + numel = 12 + frag_start = 3 + frag_end = 15 + hp_frag + numel = 12 + frag_start = 0 + frag_end = 11 + + hp_frag.copy_(lp_frag) + + +lp3: + full numel = 4 + lp_frag + numel = 4 + start = 0 + end = 3 + hp_frag + numel = 4 + start = 12 + end = 15 + + +lp4: + full numel = 12 + lp_frag + numel = 4 + start = 0 + end = 3 + hp_frag + numel = 4 + start = 16 + end = 19 + + + +Visual depiction of above +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ { ( } ) ] + lx hx ly hy + ly-hx + + +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ ( { ) } ] + hx lx hy ly + hy-lx + +lp { } +flat_lp [ ] +flat_hp ( ) + + +flat_lp [ ( { } ) ] + hx lx ly hy + ly-lx + +lp -> (lx, hy) +flat_hp -> (hx, hy) +''' diff --git a/deepspeed/utils/timer.py b/deepspeed/utils/timer.py old mode 100644 new mode 100755 index 75b9bcc..16865c5 --- a/deepspeed/utils/timer.py +++ b/deepspeed/utils/timer.py @@ -2,14 +2,11 @@ Copyright 2019 The Microsoft DeepSpeed Team """ -from numpy.core.numeric import count_nonzero -from deepspeed.elasticity.elasticity import compute_elastic_config import time -import torch from numpy import mean from deepspeed.utils.logging import log_dist - -from deepspeed.utils import logger +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist try: import psutil @@ -21,12 +18,14 @@ except ImportError: class CudaEventTimer(object): - def __init__(self, start_event: torch.cuda.Event, end_event: torch.cuda.Event): + def __init__(self, + start_event: get_accelerator().Event, + end_event: get_accelerator().Event): self.start_event = start_event self.end_event = end_event def get_elapsed_msec(self): - torch.cuda.current_stream().wait_event(self.end_event) + get_accelerator().current_stream().wait_event(self.end_event) self.end_event.synchronize() return self.start_event.elapsed_time(self.end_event) @@ -44,15 +43,15 @@ class SynchronizedWallClockTimer: def start(self): """Start the timer.""" - assert not self.started_, f"{self.name} timer has already been started" - self.start_event = torch.cuda.Event(enable_timing=True) + assert not self.started_, f"{self.name_} timer has already been started" + self.start_event = get_accelerator().Event(enable_timing=True) self.start_event.record() self.started_ = True def stop(self, reset=False, record=False): """Stop the timer.""" assert self.started_, "timer is not started" - end_event = torch.cuda.Event(enable_timing=True) + end_event = get_accelerator().Event(enable_timing=True) end_event.record() self.event_timers.append(CudaEventTimer(self.start_event, end_event)) self.start_event = None @@ -87,11 +86,15 @@ class SynchronizedWallClockTimer: return elapsed_ def mean(self): + self.elapsed(reset=False) return trim_mean(self.elapsed_records, 0.1) def __init__(self): self.timers = {} + def get_timers(self): + return self.timers + def __call__(self, name): if name not in self.timers: self.timers[name] = self.Timer(name) @@ -99,20 +102,20 @@ class SynchronizedWallClockTimer: @staticmethod def memory_usage(): - alloc = "mem_allocated: {:.4f} GB".format(torch.cuda.memory_allocated() / + alloc = "mem_allocated: {:.4f} GB".format(get_accelerator().memory_allocated() / (1024 * 1024 * 1024)) max_alloc = "max_mem_allocated: {:.4f} GB".format( - torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024)) - cache = "cache_allocated: {:.4f} GB".format(torch.cuda.memory_cached() / + get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024)) + cache = "cache_allocated: {:.4f} GB".format(get_accelerator().memory_cached() / (1024 * 1024 * 1024)) max_cache = "max_cache_allocated: {:.4f} GB".format( - torch.cuda.max_memory_cached() / (1024 * 1024 * 1024)) + get_accelerator().max_memory_cached() / (1024 * 1024 * 1024)) return " | {} | {} | {} | {}".format(alloc, max_alloc, cache, max_cache) def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False, ranks=None): """Log a group of timers.""" assert normalizer > 0.0 - string = f"rank={torch.distributed.get_rank()} time (ms)" + string = f"rank={dist.get_rank()} time (ms)" for name in names: if name in self.timers: elapsed_time = (self.timers[name].elapsed(reset=reset) / normalizer) @@ -135,24 +138,22 @@ class ThroughputTimer: def __init__( self, batch_size, - num_workers, start_step=2, steps_per_output=50, monitor_memory=False, logging_fn=None, ): + from deepspeed.utils import logger self.start_time = 0 self.end_time = 0 self.started = False - self.batch_size = batch_size - if batch_size is None: - self.batch_size = 1 - self.num_workers = num_workers + self.batch_size = 1 if batch_size is None else batch_size self.start_step = start_step self.epoch_count = 0 - self.local_step_count = 0 - self.total_step_count = 0 + self.micro_step_count = 0 + self.global_step_count = 0 self.total_elapsed_time = 0 + self.step_elapsed_time = 0 self.steps_per_output = steps_per_output self.monitor_memory = monitor_memory self.logging = logging_fn @@ -165,7 +166,7 @@ class ThroughputTimer: def update_epoch_count(self): self.epoch_count += 1 - self.local_step_count = 0 + self.micro_step_count = 0 def _init_timer(self): self.initialized = True @@ -173,49 +174,60 @@ class ThroughputTimer: def start(self): self._init_timer() self.started = True - if self.total_step_count >= self.start_step: - torch.cuda.synchronize() + if self.global_step_count >= self.start_step: + get_accelerator().synchronize() self.start_time = time.time() - def stop(self, report_speed=True): + def stop(self, global_step=False, report_speed=True): if not self.started: return self.started = False - self.total_step_count += 1 - self.local_step_count += 1 - if self.total_step_count > self.start_step: - torch.cuda.synchronize() + self.micro_step_count += 1 + if global_step: + self.global_step_count += 1 + + if self.start_time > 0: + get_accelerator().synchronize() self.end_time = time.time() duration = self.end_time - self.start_time self.total_elapsed_time += duration - if self.local_step_count % self.steps_per_output == 0: - if report_speed: + self.step_elapsed_time += duration + + if global_step: + if report_speed and self.global_step_count % self.steps_per_output == 0: self.logging( - "{}/{}, SamplesPerSec={}, MemAllocated={}GB, MaxMemAllocated={}GB" - .format(self.epoch_count, - self.local_step_count, - self.avg_samples_per_sec(), - round(torch.cuda.memory_allocated() / 1024**3, - 2), - round(torch.cuda.max_memory_allocated() / 1024**3, - 2))) - if self.monitor_memory: - virt_mem = psutil.virtual_memory() - swap = psutil.swap_memory() - self.logging("{}/{}, vm percent: {}, swap percent: {}".format( - self.epoch_count, - self.local_step_count, - virt_mem.percent, - swap.percent, - )) + "epoch={}/micro_step={}/global_step={}, RunningAvgSamplesPerSec={}, CurrSamplesPerSec={}, " + "MemAllocated={}GB, MaxMemAllocated={}GB".format( + self.epoch_count, + self.micro_step_count, + self.global_step_count, + self.avg_samples_per_sec(), + self.batch_size / self.step_elapsed_time, + round(get_accelerator().memory_allocated() / 1024**3, + 2), + round(get_accelerator().max_memory_allocated() / 1024**3, + 2), + )) + if self.monitor_memory: + virt_mem = psutil.virtual_memory() + swap = psutil.swap_memory() + self.logging( + "epoch={}/micro_step={}/global_step={}, vm %: {}, swap %: {}" + .format( + self.epoch_count, + self.micro_step_count, + self.global_step_count, + virt_mem.percent, + swap.percent, + )) + self.step_elapsed_time = 0 def avg_samples_per_sec(self): - if self.total_step_count > 0: - samples_per_step = self.batch_size * self.num_workers - total_step_offset = self.total_step_count - self.start_step + if self.global_step_count > 0: + total_step_offset = self.global_step_count - self.start_step avg_time_per_step = self.total_elapsed_time / total_step_offset # training samples per second - return samples_per_step / avg_time_per_step + return self.batch_size / avg_time_per_step return float("-inf") @@ -231,6 +243,9 @@ def trim_mean(data, trim_percent): """ assert trim_percent >= 0.0 and trim_percent <= 1.0 n = len(data) + # Account for edge case of empty list + if len(data) == 0: + return 0 data.sort() k = int(round(n * (trim_percent))) return mean(data[k:n - k]) diff --git a/deepspeed/utils/types.py b/deepspeed/utils/types.py new file mode 100644 index 0000000..a6d5ffd --- /dev/null +++ b/deepspeed/utils/types.py @@ -0,0 +1,9 @@ +'''Copyright The Microsoft DeepSpeed Team''' + +from enum import IntEnum + + +class ActivationFuncType(IntEnum): + UNKNOWN = 0 + GELU = 1 + ReLU = 2 diff --git a/deepspeed/utils/zero_to_fp32.py b/deepspeed/utils/zero_to_fp32.py old mode 100644 new mode 100755 index 8e8b0fd..f00e256 --- a/deepspeed/utils/zero_to_fp32.py +++ b/deepspeed/utils/zero_to_fp32.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +'''Copyright The Microsoft DeepSpeed Team''' # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in @@ -17,11 +18,9 @@ from collections import OrderedDict # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with # DeepSpeed data structures it has to be available in the current python environment. -import deepspeed from deepspeed.utils import logger from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, - PARAM_SHAPES, SINGLE_PARTITION_OF_FP32_GROUPS, FP32_FLAT_GROUPS, ZERO_STAGE, diff --git a/docker/Dockerfile b/docker/Dockerfile old mode 100644 new mode 100755 diff --git a/docker/Dockerfile.rocm b/docker/Dockerfile.rocm index 5e3f756..cdfe5e8 100644 --- a/docker/Dockerfile.rocm +++ b/docker/Dockerfile.rocm @@ -1,4 +1,4 @@ -FROM rocm/pytorch:rocm5.0.1_ubuntu18.04_py3.7_pytorch_1.10.0 +FROM rocm/pytorch:latest # install latest released version of deepspeed RUN pip install deepspeed && \ diff --git a/docs/README.md b/docs/README.md index 3af6830..fbd9b68 100644 --- a/docs/README.md +++ b/docs/README.md @@ -47,3 +47,13 @@ You can now start a local webserver via: bundle exec jekyll serve ``` The website should now be accessible at [http://localhost:4000](http://localhost:4000) + + +## Update the Readthedocs.io API documentation +Use the following steps to update the public API documentation. + +1. Make your documentation changes and push them to the rtd-staging branch. This will rebuild the docs in the staging branch. +**NOTE**: It is acceptable to force push to this branch to overwrite previous changes. +2. View the result of the result of the build [here](https://readthedocs.org/projects/deepspeed/builds/) +3. Once the build is complete view the newly modified API documentation [here](https://deepspeed.readthedocs.io/en/rtd-staging/) +4. Once you are satisfied with the changes create a new branch off of rtd-staging to push into master. diff --git a/docs/_config.yml b/docs/_config.yml index dc79fc0..7127b84 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -35,10 +35,12 @@ collections: - advanced-install.md - getting-started.md - azure.md + - automatic-tensor-parallelism.md - bert-finetuning.md - bert-pretraining.md - cifar-10.md - curriculum-learning.md + - data-efficiency.md - flops-profiler.md - pytorch-profiler.md - autotuning.md @@ -48,6 +50,9 @@ collections: - mixture-of-experts.md - mixture-of-experts-nlg.md - mixture-of-experts-inference.md + - model-compression.md + - monitor.md + - comms-logging.md - one-cycle.md - onebit-adam.md - zero-one-adam.md @@ -78,6 +83,8 @@ defaults: path: "_pages" values: permalink: /docs/:basename/ + toc: true + toc_label: "Contents" - scope: path: "" type: posts diff --git a/docs/_data/navigation.yml b/docs/_data/navigation.yml old mode 100644 new mode 100755 index 20f00b6..6f7c443 --- a/docs/_data/navigation.yml +++ b/docs/_data/navigation.yml @@ -11,20 +11,15 @@ main: url: https://github.com/microsoft/DeepSpeed lnav: - - title: 'Feature Overview' - url: /features/ + - title: 'Training' + url: /training/ + - title: 'Inference' + url: /inference/ + - title: 'Compression' + url: /compression/ - title: 'Getting Started' url: /getting-started/ - children: - - title: 'Installation' - url: /getting-started/#installation - - title: 'Writing models' - url: /getting-started/#writing-deepspeed-models - - title: 'Training' - url: /getting-started/#training - - title: 'Launching' - url: /getting-started/#launching-deepspeed-training - - title: 'Configuration' + - title: 'ds_config' url: /docs/config-json/ children: - title: 'Autotuning' @@ -33,36 +28,24 @@ lnav: url: /docs/config-json/#batch-size-related-parameters - title: 'Optimizer' url: /docs/config-json/#optimizer-parameters - - title: 'Scheduler' - url: /docs/config-json/#scheduler-parameters - - title: 'Communication' - url: /docs/config-json/#communication-options - title: 'FP16' url: /docs/config-json/#fp16-training-options - title: 'BFLOAT16' url: /docs/config-json/#bfloat16-training-options - - title: 'Gradient Clipping' - url: /docs/config-json/#gradient-clipping - title: 'ZeRO optimizations' url: /docs/config-json/#zero-optimizations-for-fp16-training - - title: 'Parameter Offloading' - url: /docs/config-json/#parameter-offloading - - title: 'Optimizer Offloading' - url: /docs/config-json/#optimizer-offloading - - title: 'Asynchronous I/O' - url: /docs/config-json/#asynchronous-io - title: 'Logging' url: /docs/config-json/#logging - title: 'Flops Profiler' url: /docs/config-json/#flops-profiler - - title: 'PyTorch Profiler' - url: /docs/config-json/#pytorch-profiler - - title: 'Activation checkpointing' - url: /docs/config-json/#activation-checkpointing - - title: 'Sparse Attention' - url: /docs/config-json/#sparse-attention - - title: 'Logging to TensorBoard' - url: /docs/config-json/#tensorboard-options + - title: 'Monitoring' + url: /docs/config-json/#monitoring-module-tensorboard-wandb-csv + - title: 'Communication Logging' + url: /docs/config-json/#communication-logging + - title: 'Model Compression' + url: /docs/config-json/#compression + - title: 'Data Efficiency' + url: /docs/config-json/#data-efficiency - title: 'Tutorials' url: /tutorials/ children: @@ -70,6 +53,8 @@ lnav: url: /getting-started/ - title: 'Getting started on Azure' url: /tutorials/azure/ + - title: 'Automatic Tensor Parallelism' + url: /tutorials/automatic-tensor-parallelism/ - title: 'Autotuning' url: /tutorials/autotuning/ - title: 'BingBertSQuAD Fine-tuning' @@ -80,6 +65,8 @@ lnav: url: /tutorials/cifar-10/ - title: 'Curriculum Learning' url: /tutorials/curriculum-learning/ + - title: 'Data Efficiency' + url: /tutorials/data-efficiency/ - title: 'Flops Profiler' url: /tutorials/flops-profiler/ - title: 'PyTorch Profiler' @@ -98,8 +85,14 @@ lnav: url: /tutorials/mixture-of-experts-nlg/ - title: 'MoE Inference' url: /tutorials/mixture-of-experts-inference/ + - title: 'Model Compression' + url: /tutorials/model-compression/ - title: 'Mixture-of-Quantization' url: /tutorials/MoQ-tutorial/ + - title: 'Monitoring' + url: /tutorials/monitor + - title: 'Communication Logging' + url: /tutorials/comms-logging - title: 'One-Cycle Schedule' url: /tutorials/one-cycle/ - title: 'One-Bit Adam' diff --git a/docs/_pages/compression.md b/docs/_pages/compression.md new file mode 100644 index 0000000..1a7b40d --- /dev/null +++ b/docs/_pages/compression.md @@ -0,0 +1,12 @@ +--- +title: "Compression Overview and Features" +layout: single +permalink: /compression/ +toc: true +toc_label: "Contents" +--- + + +DeepSpeed Compression is a library purposely built to make it easy to compress models for researchers and practitioners while delivering faster speed, smaller model size, and significantly reduced compression cost. Please refer to our [blog](https://www.microsoft.com/en-us/research/blog/deepspeed-compression-a-composable-library-for-extreme-compression-and-zero-cost-quantization/) for more details. + +DeepSpeed Compression offers novel state-of-the-art compression techniques to achieve faster model compression with better model quality and lower compression cost. DeepSpeed Compression also takes an end-to-end approach to improve the computation efficiency of compressed models via a highly optimized inference engine. Furthermore, our library has multiple built-in state-of-the-art compression methods. It supports the synergistic composition of these methods and the system optimizations, offering the best of both worlds while allowing a seamless and easy-to-use pipeline for efficient DL model inference. We highly recommend you also to read our blog to learn more about (at a high level) why we build DeepSpeed Compression and what benefits it provides to users. To try compress your model using DeepSpeed compression library, please checkout our [tutorial](https://www.deepspeed.ai/tutorials/model-compression/). diff --git a/docs/_pages/config-json.md b/docs/_pages/config-json.md old mode 100644 new mode 100755 index 53df586..2d497bb --- a/docs/_pages/config-json.md +++ b/docs/_pages/config-json.md @@ -1,5 +1,7 @@ --- title: "DeepSpeed Configuration JSON" +toc: true +toc_label: "Contents" --- ### Batch Size Related Parameters @@ -106,6 +108,7 @@ A variant ***optimizer*** for 1-bit Adam is 0/1 Adam, which further optimizes 1- } ``` 0/1 Adam supports the following params key/values in addition to standard Adam (learn more in our [tutorial](/tutorial/zero-one-adam/).) + | "params" key | Description | Default | | ------------------- | ---------------------------------------------------------------------------------- | ------- | | var\_freeze\_step | The latest step to update the variance | 100000 | @@ -216,8 +219,9 @@ Example of **scheduler** ```json "fp16": { "enabled": true, + "auto_cast": false, "loss_scale": 0, - "initial_scale_power": 32, + "initial_scale_power": 16, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 @@ -230,6 +234,12 @@ Example of **scheduler** | ------------------------------------------------------------------------------------------- | ------- | | **enabled** is a **fp16** parameter indicating whether or not FP16 training enabled. | `false` | +**fp16:auto_cast**: [boolean] + +| Description | Default | +| -------------------------------------------------------------| ------- | +| **auto_cast** automatically casts inputs to **fp16** | `false` | + **fp16:loss_scale**: [float] | Description | Default | @@ -258,7 +268,7 @@ Example of **scheduler** | Description | Default | | ----------------------------------------------------------------------------------------------------- | ------- | -| **min_loss_scale** is a **fp16** parameter representing the minimum dynamic loss scale value. | `1000` | +| **min_loss_scale** is a **fp16** parameter representing the minimum dynamic loss scale value. | `1` | ### BFLOAT16 training options @@ -363,7 +373,7 @@ Enabling and configuring ZeRO memory optimizations | Description | Default | | --------------------------------------------------------------------------------------------------------- | ------- | -| Enable ZeRO memory optimization wrapper for FP16 Training. Currently compatible only with Adam optimizer. | `false` | +| Enable ZeRO memory optimizations, compatible with FP16/BF16/FP32 and the Adam optimizer. | `false` | **stage**: [integer] @@ -417,7 +427,7 @@ Enabling and configuring ZeRO memory optimizations | Description | Default | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| Stage 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism). | `False` | +| Stage 1 and 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism). | `False` | ***offload_param***: [dictionary] @@ -429,7 +439,7 @@ Enabling and configuring ZeRO memory optimizations | Description | Default | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | -| Enable offloading of optimizer state to CPU or NVMe, and optimizer computation to CPU. This frees up GPU memory for larger models or batch sizes. Valid only with stage 2 and 3. See [here](#optimizer-offloading) for more details. | `False` | +| Enable offloading of optimizer state to CPU or NVMe, and optimizer computation to CPU. This frees up GPU memory for larger models or batch sizes. Valid for ZeRO stage 1, 2, 3. See [here](#optimizer-offloading) for more details. | `False` | ***stage3_max_live_parameters***: [integer] @@ -471,7 +481,7 @@ Enabling and configuring ZeRO memory optimizations | Description | Default | | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| Enable offloading of optimizer memory and computation to CPU. This frees up GPU memory for larger models or batch sizes. Valid only with stage 2. | `False` | +| Enable offloading of optimizer memory and computation to CPU. This frees up GPU memory for larger models or batch sizes. Valid with stage 1 and 2. | `False` | ### Parameter offloading @@ -526,7 +536,7 @@ Note that if the value of "device" is not specified or not supported, an asserti | Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled. | 1e9 | ### Optimizer offloading -Enabling and configuring ZeRO optimization of offloading optimizer computation to CPU and state to CPU/NVMe. CPU offloading is available with ZeRO stage 2 or 3. NVMe offloading is available only with ZeRO stage 3. +Enabling and configuring ZeRO optimization of offloading optimizer computation to CPU and state to CPU/NVMe. CPU offloading is available with ZeRO stage 1, 2, 3. NVMe offloading is available only with ZeRO stage 3. Note that if the value of "device" is not specified or not supported, an assertion will be triggered. ```json "offload_optimizer": { @@ -642,8 +652,8 @@ Configuring the asynchronous I/O module for offloading parameter and optimizer s { "autotuning": { "enabled": false, - "results_dir": null, - "exps_dir": null, + "results_dir": "autotuning_results", + "exps_dir": "autotuning_exps", "overwrite": false, "metric": "throughput", "start_profile_step": 3, @@ -668,15 +678,15 @@ Configuring the asynchronous I/O module for offloading parameter and optimizer s **results_dir**: [string] -| Description | Default | -| -------------------------------------------------------------------------------------------------------------------------------- | ------- | -| Path to the autotuning experiment results directory. If None, "autotuning_results" under the training script launching path is used. | `null` | +| Description | Default | +| ------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| Path to the autotuning experiment results directory. The default appears in the working directory from which Deepspeed was launched. | "autotuning_results" | **exps_dir**: [string] -| Description | Default | -| ---------------------------------------------------------------------------------------------------------------------------------- | ------- | -| Path to the auotuning experiment descriptions directory. If None, "autotuning_exps" under the train script launching path is used. | `null` | +| Description | Default | +| ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| Path to the auotuning experiment descriptions directory. The default appears in the working directory from which Deepspeed was launched. | "autotuning_exps" | **overwrite**: [boolean] @@ -888,7 +898,132 @@ Configuring the asynchronous I/O module for offloading parameter and optimizer s } ``` +### Data Efficiency +DeepSpeed Data Efficiency Library includes two techniques: curriculum learning and random layerwise token dropping (random-LTD). Read more about how to use the DeepSpeed Data Efficiency Library in our [tutorial](/tutorials/data-efficiency/). + +```json +"data_efficiency": { + "enabled": true, + "seed": 1234, + "data_routing": { + "enabled": true, + "random_ltd":{ + "enabled": true, + "total_layer_num": 24, + "random_ltd_layer_num": 22, + "random_ltd_layer_id": [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22], + "model_mask_name": "attention_mask", + "model_type": "decoder", + "hidden_state_order": "seq_batch_dim", + "random_ltd_schedule": { + "min_value": 128, + "max_value": 2048, + "schedule_type":"fixed_linear", + "schedule_config": { + "require_steps": 200000, + "seq_per_step": 16 + } + } + } + }, + "data_sampling": { + "enabled": true, + "num_epochs": 1, + "num_workers": 0, + "curriculum_learning": { + "enabled": true, + "data_cluster_path": "/path/to/data_clusters", + "curriculum_metrics": { + "vocabularyrarity": { + "index_to_sample_path": "/path/to/index_to_sample", + "index_to_metric_path": "/path/to/index_to_metric", + "difficulty_type": "percentile", + "clustering_type": "schedule_based", + "min_difficulty": 1, + "max_difficulty": 100, + "schedule_type": "fixed_root", + "schedule_config": { + "total_curriculum_step": 110000, + "difficulty_step": 1, + "root_degree": 2 + } + } + } + } + } +} +``` + +**data_efficiency**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable data efficiency or not. | `false` | +| **seed**: [integer] | Random seed for data sampling. | 1234 | +| **data_routing**: [dictionary] | Configs for data routing techniques. | N/A | +| **data_sampling**: [dictionary] | Configs for data sampling techniques. | N/A | + +**data_routing**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable data routing techniques or not. | `false` | +| **random_ltd**: [dictionary] | Configs for random-LTD technique. | N/A | + +**data_sampling**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable data sampling techniques or not. | `false` | +| **num_epochs**: [integer] | At most how many epoches of the original dataset will be iterated. | 1000 | +| **num_workers**: [integer] | Data loader number of workers. | 0 | +| **curriculum_learning**: [dictionary] | Configs for curriculum learing technique. | N/A | + +**random_ltd**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable random-LTD technique or not. | `false` | +| **total_layer_num**: [integer] | The number of layer (or the depth) for the pretraining/fine-tuning model. | N/A | +| **random_ltd_layer_num**: [integer] | The number of layers that will be applied with random-LTD. | N/A | +| **random_ltd_layer_id**: [list] | The exact layer_id that will be applied with random-LTD. The length of this list must be the same as `random_ltd_layer_num`. | N/A | +| **model_mask_name**: [str] | The variable name of the attention_mask. Different libraries have different names, such as att_mask. For huggingface model, it’s named “attention_mask”. Users need to check the forward function in the original model files. If the attention mask input in the original model's forward function is not a keyword/named argument (e.g., attention_mask=None), user would need to change it to a keyword/named argument and provide that keyword as `model_mask_name`. | N/A | +| **model_type**: [str] | Users need to identify whether the model is `decoder` or `encoder`. Currently we only support these two. | N/A | +| **hidden_state_order**: [str] | Users need to know the input order of the hidden state tensor. Normally, it’s batch, sequence and then the hidden dimension, which is `batch_seq_dim`. Somethings, the order between batch and sequence will be switch like `seq_batch_dim`. Currently, we support these two. | N/A | +| **random_ltd_schedule**: [dictionary] | The schedule of the effective sequence length after token dropping. It's a linear function where random-LTD gradually drops less tokens and increases effective sequence length. | N/A | +|   **min_value**: [integer] | The initial effective sequence length (after token dropping) at step/iteration 0. | N/A | +|   **max_value**: [integer] | The max effective sequence length (usually the case without any token dropping). Usually this is set as baseline's seqlen. | N/A | +|   **schedule_type**: [str] | The sequence length follows a linear increasing function starting from `min_value` and reaching `max_value`. We currently only support this type. | N/A | +|   **schedule_config**: [dictionary] | Configs for the linear increasing function. | N/A | +|     **require_steps**: [integer] | How many iterations will be needed to reach max_value from min_value. | N/A | +|     **seq_per_step**: [integer] | At any time, the effective sequence length be multiple of this `seq_per_step`. Set this to multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. | N/A | + +**curriculum_learning**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable curriculum learing technique or not. | `false` | +| **data_cluster_path**: [str] | Path to directory where curriculum learning will store the indexes of data samples within the same difficulty ranges. | N/A | +| **curriculum_metrics**: [dictionary] | This dictionary includes all desired curriculum metrics and their configs. Each metric will be a separate sub-dictionary, where the key is the metric name and the values are configs below. | N/A | +|   **index_to_sample_path**: [str] | Path to the index_to_sample file generated during offline data analysis. Note that data analysis will generate two kinds of index_to_sample files: The metric_name_index_to_sample_percentile_merged file is a concatenated index for perf improvement, but it only works when you set difficulty_type=`percentile`. If you use difficulty_type=`value`, you need to change this to use the metric_name_index_to_sample file. | N/A | +|   **index_to_metric_path**: [str] | Path to the index_to_metric_path file generated during offline data analysis. | N/A | +|   **difficulty_type**: [str] | During training, how to increase the max accepted difficulty. Currently support `value` (increase by absolute value) and `percentile` (increase by difficulty percentile). | N/A | +|   **clustering_type**: [str] | Currently support `schedule_based` (cluster data based on the difficulty schedule (pacing function) below) and `single_cluster` (no clustering required and probably CL is achieved by data postprocessing, such as sequence length truncation). | N/A | +|   **min_difficulty**: [integer] | Starting difficulty at first step. When difficulty_type=`value` the `min_difficulty` is an absolute difficulty value. When difficulty_type=`percentile` the `min_difficulty` is a difficulty percentile value. | N/A | +|   **max_difficulty**: [integer] | Final max difficulty. When difficulty_type=`value` the `max_difficulty` is an absolute difficulty value. When difficulty_type=`percentile` the `max_difficulty` is a difficulty percentile value. | N/A | +|   **schedule_type**: [str] | The difficulty schedule (pacing function) that defines how the max accepted difficulty increases from `min_difficulty` to `max_difficulty` during training. Currently support `fixed_linear`, `fixed_root`, `fixed_discrete`, and `custom`. | N/A | +|   **schedule_config**: [dictionary] | Configs for the pacing function. When schedule_type=`custom` this dictionary is not necessary. Instead user needs to provide a callback function (via the `set_custom_curriculum_learning_schedule` API in deepspeed/runtime/engine.py) which will update the max accepted difficulty during training. Configs below are all belongs to `schedule_config`. | N/A | +|     **total_curriculum_step**: [integer] | How many steps the curriculum learning takes to go from min difficulty to max difficulty. Used by `fixed_linear` and `fixed_root` schedule. | N/A | +|     **difficulty_step**: [integer] | The max accepted difficulty level determined every step must be a multiple of this `difficulty_step`. This is used to ensure the use of NVIDIA Tensor Core acceleration (requires multiple of 8 (FP16) or 16 (INT8)). Used by `fixed_linear` and `fixed_root` schedule. | N/A | +|     **root_degree**: [integer] | The degree of the root function. Degree of 2 means square root and degree of 3 means cube root. Degree of 1 is equivalent to linear. Used by `fixed_root` schedule. | N/A | +|     **difficulty**: [list] | List of max accepted difficulty levels to be used during schedule. Used by `fixed_discrete` schedule. | N/A | +|     **max_step**: [list] | List of which step to change max accepted difficulty level. Used by `fixed_discrete` schedule. | N/A | + + ### Curriculum Learning + +**Note:** On 12/12/2022, we released [DeepSpeed Data Efficiency Library](/tutorials/data-efficiency/) which provides a more general curriculum learning support. This legacy curriculum learning feature below is still supported but we recommend to use the Data Efficiency Library. + ```json "curriculum_learning": { "enabled": true, @@ -964,13 +1099,15 @@ Configuring the asynchronous I/O module for offloading parameter and optimizer s | ---------------------------------------------------------------------------------------------------------------------------- | ------- | | List of which step to change difficulty level. One of the `schedule_config` when the `fixed_discrete` schedule_type is used. | N/A | -### Logging to Tensorboard +### Monitoring Module (TensorBoard, WandB, CSV) **Note:** Deepspeed logs to TensorBoard through PyTorch. Logging to TensorBoard requires that the `tensorboard` package is installed (read more in the [PyTorch documentation](https://pytorch.org/docs/1.8.0/tensorboard.html)). {: .notice--warning} +**Note:** Logging to WandB requires that the `wandb` package is installed (read more in the [WandB documentation](https://docs.wandb.ai/quickstart)). +{: .notice--warning} -Deepspeed can log training details into a [Tensorboard](https://www.tensorflow.org/tensorboard)-compatible file. Below is an overview of what deepspeed will log. +Deepspeed's Monitor module can log training details into a [Tensorboard](https://www.tensorflow.org/tensorboard)-compatible file, to [WandB](https://wandb.ai/site), or to simple CSV files. Below is an overview of what DeepSpeed will log automatically. | Field | Description |Conditions | | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | @@ -989,11 +1126,11 @@ Deepspeed can log training details into a [Tensorboard](https://www.tensorflow.o | Fields | Value |Default | | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | | enabled | Whether logging to [Tensorboard](https://www.tensorflow.org/tensorboard) is enabled. | `false` | -| job_name | Name for the current job. This will become a new directory inside `output_path` | `"DeepSpeedJobName"` | -| output_path | Path to where the Tensorboard logs will be written. | `~/tensorboard/` | +| output_path | Path to where the Tensorboard logs will be written. If None, the output path is set under the training script's launching path. | `null` | +| job_name | Name for the current job. This will become a new directory inside `output_path`. | `"DeepSpeedJobName"` | -Example of ** tensorboard** configuration: +Example of **tensorboard** configuration: ```json "tensorboard": { @@ -1002,3 +1139,517 @@ Example of ** tensorboard** configuration: "job_name": "train_bert" } ``` + +**wandb**: [dictionary] + +| Fields | Value |Default | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | +| enabled | Whether logging to [WandB](https://wandb.ai/site) is enabled. | `false` | +| group | Name for the WandB group. This can be used to group together runs. | `None` | +| team | Name for the WandB team. | `None` | +| project | Name for the WandB project. | `deepspeed` | + + +Example of **wandb** configuration: + +```json +"wandb": { + "enabled": true, + "group": "my_group", + "team": "my_team", + "project": "my_project" +} +``` + +**csv_monitor**: [dictionary] + +| Fields | Value |Default | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | +| enabled | Whether logging to local CSV files is enabled. | `false` | +| output_path | Path to where the csv files will be written. If None, the output path is set under the training script's launching path. | `null` | +| job_name | Name for the current job. This will become a new directory inside `output_path` | `"DeepSpeedJobName"` | + + +Example of **csv_monitor** configuration: + +```json +"csv_monitor": { + "enabled": true, + "output_path": "output/ds_logs/", + "job_name": "train_bert" +} +``` + +### Elastic Training Config (V0.1 and V0.2) + +```json + "elasticity": { + "enabled": true, + "max_train_batch_size": "seqlen", + "micro_batch_sizes": 8, + "min_gpus": 1024, + "max_gpus": "fixed_linear", + "min_time": "seqlen", + "version": 8, + "ignore_non_elastic_batch_info": 1024, + "num_gpus_per_node": "fixed_linear", + "model_parallel_size": MODEL_PARALLEL_SIZE + } +``` + +| Field | Description |Default| +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | +| `enabled` | Enables computation of global batch size in elastic training. | false | +| `max_train_batch_size` | Max acceptable batch size can be used in training. | 2000 | +| `micro_batch_sizes` | Acceptable micro batch sizes, same as train_micro_batch_size_per_gpu | [2,4,6] | +| `min_gpus` | Min number of GPUs to search over when computing highly composite batch size in v0.1 and v0.2. | 1 | +| `max_gpus` | Max number of GPUs to search over when computing highly composite batch size in v0.1 and v0.2. | 10000 | +| `min_time` |Minimum running time (minutes) before the scheduler will scale again (only used in v0.1). 0 implies it's unknown | 0 | +| `prefer_large_batch` | When finding a suitable batch size, attempt to find one that is closest to the max train batch size given. | true | +| `version` | Version of elastic logic to use. | 0.2 | +| `ignore_non_elastic_batch_info` | Ignore all batch info provided outside the elastic config. To reduce confusion, we require all batch related info to be given in elastic config only. | false | +| `num_gpus_per_node` | Number of GPUs per node. This information is used by v0.2 to support model-parallel training (only used by v0.2) | 1 | +| `model_parallel_size` | Tensor or model parallel size (only used by v0.2) | 1 | + + +### Communication Logging + + +DeepSpeed provides a flexible communication logging tool which can automatically detect and record communication operations launched via `deepspeed.comm`. NOTE: All logging communication calls are synchronized in order to provide accurate timing information. This may hamper performance if your model heavily uses asynchronous communication operations. + +Once the logs are populated, they can be summarized with `deepspeed.comm.log_summary()`. For more detail and example usage, see the [tutorial](/tutorials/comms-logging/) + + + + +**comms_logger**: [dictionary] + +| Fields | Value |Default | +| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | +| enabled | Whether communication logging is enabled. | `false` | +| verbose | Whether to immediately print every communication operation | `false` | +| prof_all | Whether to profile all operations. | `true` | +| debug | Appends the caller function to each communication operation's `log_name`. | `false` | +| prof_ops | A list of communication operations to log (only the specified ops will be profiled). | `[]` | + + +Example of recommended **comms_logger** configuration: + +```json +"comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": true, + "debug": false +} +``` + +Example of **comms_logger** configuration for logging specific operations only: + +```json +"comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": false, + "debug": false, + "prof_ops": ["all_reduce", "all_gather"] +} +``` +### Compression +**Note:** **Compression** has seven different components, including layer reduction, weight quantization, activation quantization, sparse pruning, row pruning, head pruning, and channel pruning. We explain them one by one with simple json examples. Read more about how to use the DeepSpeed Compression library in our [tutorial](/tutorials/model-compression/). + +#### Layer Reduction +**Note:** Layer reduction works much better when using knowledage distillation (learn more in our [tutorial](/tutorials/model-compression/)): + +```json +"compression_training": { + "layer_reduction": { + "enabled": true, + "keep_number_layer": 5, + "module_name_prefix": "bert.encoder.layer", + "teacher_layer": [ + 2, + 4, + 6, + 8, + 10 + ], + "other_module_name": [ + "bert.pooler", + "bert.embeddings", + "classifier" + ] + } + } +``` + +**layer_reduction**: [dictionary] + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable layer reduction or not. | `false` | +| **keep_number_layer**: [list] | The number of layer in the model to be kept. | N/A | +| **module_name_prefix**: [str] | The (uniform) name prefix of the model's modules of which the associated weight parameters are to be reinitialized. | N/A | +| **teacher_layer**: [list] | The layer of the weight parameters are to be reinitialized. The length of the list equals to 'keep_number_layer'. | N/A | +| **other_module_name**: [list] | The name of modules of which the associated weight parameters are to be reinitialized. It is an complemenatory or alternative of module_name_prefix. For instance, "other_module_name": ["bert.encoder.layer.2","bert.encoder.layer.4"] equals to "module_name_prefix":"bert.encoder.layer" and "teacher_layer": [2,4]. | N/A | + +#### Weight Quantization +```json + "compression_training": { + "weight_quantization": { + "shared_parameters":{ + "enabled": true, + "quantizer_kernel": false, + "schedule_offset": 0, + "quantize_groups": 1, + "quantize_verbose": false, + "quantization_type": "symmetric", + "rounding": "nearest", + "quantize_weight_in_forward": false, + "fp16_mixed_quantize":{ + "enabled": false, + "quantize_change_ratio": 0.001 + } + }, + "different_groups":{ + "wq1": { + "params": { + "start_bits": 8, + "target_bits": 8, + "quantization_period": 50 + }, + "modules": [ + "attention.self", + "intermediate" + ] + }, + "wq2": { + "params": { + "start_bits": 4, + "target_bits": 4, + "quantization_period": 50 + }, + "modules": [ + "attention.output" + ] + } + } + } + } +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all weight quantization groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable weight quantization or not. | `false` | +| **quantizer_kernel**: [boolean] | Use DeepSpeed quantization kernel for >=4 bit quantization. This can only be enabled when using DeepSpeed FP16 optimizer. | `false` | +| **schedule_offset**: [integer] | Enable weight quantization after scheduled steps (can be treated as warmup steps). | `0` | +| **quantize_groups**: [integer] | Split the weight matrix into different number of groups, and each of them has its own scaling factor. | `1` | +| **quantize_verbose**: [boolean] | Print the quantization related logs. | `false` | +| **quantization_type**: [string] | Choose the quantization algorithm, symmetric or asymmetric. | `"symmetric"` | +| **rounding**: [string] | Rounding algorithm associated with quantization, nearest or stochastic. | `"nearest"` | +| **quantize_weight_in_forward**: [boolean] | Quantize weight in optimizer or forward step, must set to be true for FP32 optimizer training. | `false` | +| **fp16_mixed_quantize**: [dictionary] | Using the value mixed by FP16 value and the quantized value. | N/A | +|   **enabled**: [boolean] | Whether fp16 mixed quantization is enabled. | `false` | +|   **quantize_change_ratio**: [float] | Initial quantize value ratio, will gradually increase to 1. | `0.001` | + +**different_groups**: [dictionary] + +Different quantization sets, this is used for different quantization parameters. In this example, we give two different sets. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **start_bits**: [integer] | Quantization starting bits, will gradaully reduce to target bits. | `8` | +|   **target_bits**: [integer] | Quantization target bits, need to be <= start_bits. | `8` | +|   **quantization_period**: [integer] | For every n steps, the quantization bits will be reduce by 1. | `1` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All Linear and CONV2D layers"` | + +#### Activation Quantization +```json +"compression_training": { + "activation_quantization": { + "shared_parameters":{ + "enabled": true, + "quantization_type": "asymmetric", + "range_calibration": "dynamic", + "schedule_offset": 50 + }, + "different_groups":{ + "aq1": { + "params": { + "bits": 8 + }, + "modules": [ + "attention.output" + ] + } + } + } +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all activation quantization groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable activation quantization or not. | `false` | +| **quantization_type**: [string] | Choose the quantization algorithm, symmetric or asymmetric. | `"symmetric"` | +| **range_calibration**: [string] | Using dynamic (per token or per image) or static (fixed min/max using momentum) for inference. | `"static"` | +| **schedule_offset**: [integer] | Enable activation quantization after scheduled steps (can be treated as warmup steps). | `0` | + +**different_groups**: [dictionary] + +Different quantization sets, this is used for different quantization parameters. In this example, we give one set. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **bits**: [integer] | Number of bits used for activation target bits, need to be >= 4. | `8` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All Linear and CONV2D layers"` | + +#### Sparse Pruning +```json +"compression_training": { + "sparse_pruning":{ + "shared_parameters":{ + "enabled": true, + "schedule_offset": 30, + "method": "l1" + }, + "different_groups":{ + "sp1": { + "params": { + "dense_ratio": 0.5 + }, + "modules": [ + "attention.self" + ] + } + } + } +} +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all sparse pruning groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable sparse pruning or not. | `false` | +| **schedule_offset**: [integer] | Enable sparse pruning after scheduled steps (can be treated as warmup steps). | `0` | +| **method**: [string] | Choose different pruning methods, l1 (static, magnitude based) or topk (dynamic, learnable). | `"l1"` | + +**different_groups**: [dictionary] + +Different pruning sets, this is used for different pruning parameters. In this example, we give one set. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **dense_ratio**: [float] | The percentage of weights to keep after pruning. | `0.5` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All Linear and CONV2D layers"` | + +#### Row Pruning +**Note:** **Row Pruning** is a feature designed for two back-to-back linear layers (e.g., Feed Forward Network in Transformers). As such, we suggested use row pruning for the first linear layer (i.e., the `intermediate.dense` layer for BERT). Reducing the row dimension of this matrix can help reducing the column of the follow-up matrix (i.e., `layer.\\w+.output.dense` layer for BERT). It should also work for other linear layers as well. +```json +"compression_training": { + "row_pruning":{ + "shared_parameters":{ + "enabled": true, + "schedule_offset": 20, + "method": "topk" + }, + "different_groups":{ + "rp1": { + "params": { + "dense_ratio": 0.5 + }, + "modules": [ + "intermediate.dense" + ], + "related_modules":[ + ["layer.\\w+.output.dense"] + ] + } + } + } +} +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all row pruning groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable row pruning or not. | `false` | +| **schedule_offset**: [integer] | Enable row pruning after scheduled steps (can be treated as warmup steps). | `0` | +| **method**: [string] | Choose different pruning methods, l1 (static, magnitude based) or topk (dynamic, learnable). | `"l1"` | + +**different_groups**: [dictionary] + +Different pruning sets, this is used for different pruning parameters. In this example, we give one set. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **dense_ratio**: [float] | The percentage of weights to keep after pruning. | `0.5` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All Linear and CONV2D layers"` | +| **related_modules**: [list[list]] | Related module to the row pruned module, which can be performed column pruning. | `None` | + +#### Head Pruning +**Note:** **Head Pruning** is a feature designed for two attention layers (e.g., Multi Head Attention in Transformers). For now, it can only be applied to output matrix of the Transformer (i.e., `attention.output.dense` in BERT). Pruning the output matrix can lead to the pruning of Query/Key/Value matrix as well. +```json +"compression_training": { + "head_pruning":{ + "shared_parameters":{ + "enabled": true, + "schedule_offset": 10, + "method": "topk", + "num_heads": 12 + }, + "different_groups":{ + "rp1": { + "params": { + "dense_ratio": 0.5 + }, + "modules": [ + "attention.output.dense" + ], + "related_modules":[ + ["self.query", "self.key", "self.value"] + ] + } + } + } +} + +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all head pruning groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable head pruning or not. | `false` | +| **schedule_offset**: [integer] | Enable head pruning after scheduled steps (can be treated as warmup steps). | `0` | +| **method**: [string] | Choose different pruning methods. For now, we only support topk (dynamic, learnable). | `"topk"` | +| **num_heads**: [int] | Number of heads (must be provided by user). | N/A | + +**different_groups**: [dictionary] + +Different pruning sets, this is used for different pruning parameters. In this example, we give one set. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **dense_ratio**: [float] | The percentage of weights to keep after pruning. | `0.5` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All Linear and CONV2D layers"` | +| **related_modules**: [list[list]] | Related module (Usually Q/K/V) to the head pruned module (i.e., the output matrix). For now, this feature only works for BERT. | `None` | + +#### Channel Pruning +**Note:** **Channel Pruning** is a feature designed for two back-to-back CONV2d layers (e.g., residual connection in ResNet). As such, we suggested use channel pruning for the first CONV2d layer. Reducing the number of output channels of this layer can help reducing the number of input channels the follow-up layer. It should also work for other CONV2d layers as well. +```json +"compression_training": { +"channel_pruning":{ + "shared_parameters":{ + "enabled": true, + "schedule_offset": 0, + "method": "topk" + }, + "different_groups":{ + "cp1": { + "params": { + "dense_ratio": 0.5 + }, + "modules": [ + "layer....conv1" + ], + "related_modules": [ + ["layer....conv2", "layer....bn1"] + ] + } + } + } +} +``` + +**shared_parameters**: [dictionary] + +Shared parameters for all channel pruning groups. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **enabled**: [boolean] | Enable channel pruning or not. | `false` | +| **schedule_offset**: [integer] | Enable channel pruning after scheduled steps (can be treated as warmup steps). | `0` | +| **method**: [string] | Choose different pruning methods, l1 (static, magnitude based) or topk (dynamic, learnable). | `"l1"` | + +**different_groups**: [dictionary] + +Different pruning sets, this is used for different pruning parameters. In this example, we give one set. In practice, you can choose the number of sets based on your requirements. + +| Fields | Value | Default | +| ----- | ----- | ----- | +| **params**: [dictionary] | | | +|   **dense_ratio**: [float] | The percentage of weights to keep after pruning. | `0.5` | +| **modules**: [list] | Scope of weight parameters associated to the params setting. | `"All CONV2D layers"` | +| **related_modules**: [list[list]] | Related module to the channel pruned module. | `None` | + +### Checkpoint options + +```json +"checkpoint": { + "tag_validation"="Warn", + "load_universal"=false, + "use_node_local_storage"=false, + "parallel_write":{ + "pipeline_stage": false + } +} +``` + +**tag_validation**: ["Ignore"|"Warn"|"Fail"] + +| Description | Default | +| -------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| Enables level of checking to ensure checkpoint tags are consistent across all ranks. Useful when restoring with different world sizes. | "Warn" | + +**load_universal**: [boolean] + +| Description | Default | +| -------------------------------------- | ------- | +| Load the latest checkpoint for all. | `false` | + +**use_node_local_storage**: [boolean] + +| Description | Default | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| If `true` DeepSpeed will store model parameter states and checkpoint states based on local rank allowing checkpoints to be loaded without access to a shared filesystem. | `false` | + +**pipeline_stage**: [boolean] + +| Description | Default | +| ------------------------------------------------------------- | ------- | +| Use pipeline stages to parallelize the writing of checkpoints.| `false` | + +### Data Type options + +```json +"data_types": { + "grad_accum_dtype"=["fp32"|"fp16"|"bf16"] + } +} +``` + +**grad_accum_dtype**: ["fp32"|"fp16"|"bf16"] + +| Description | Default | +| --------------------------------------------------------------------------------------------------------------| ------- | +| Specifies the data type in which to do gradient accumulation. If None the default is to match the model type. | None | diff --git a/docs/_pages/inference.md b/docs/_pages/inference.md new file mode 100755 index 0000000..d63604e --- /dev/null +++ b/docs/_pages/inference.md @@ -0,0 +1,13 @@ +--- +title: "Inference Overview and Features" +layout: single +permalink: /inference/ +toc: true +toc_label: "Contents" +--- + +DeepSpeed-Inference introduces several features to efficiently serve transformer-based PyTorch models. It supports model parallelism (MP) to fit large models that would otherwise not fit in GPU memory. Even for smaller models, MP can be used to reduce latency for inference. To further reduce latency and cost, we introduce inference-customized kernels. Finally, we propose a novel approach to quantize models, called MoQ, to both shrink the model and reduce the inference cost at production. For more details on the inference related optimizations in DeepSpeed, please refer to our [blog post](https://www.microsoft.com/en-us/research/blog/deepspeed-accelerating-large-scale-model-inference-and-training-via-system-optimizations-and-compression/). + +DeepSpeed provides a seamless inference mode for compatible transformer based models trained using DeepSpeed, Megatron, and HuggingFace, meaning that we don’t require any change on the modeling side such as exporting the model or creating a different checkpoint from your trained checkpoints. To run inference on multi-GPU for compatible models, provide the model parallelism degree and the checkpoint information or the model which is already loaded from a checkpoint, and DeepSpeed will do the rest. It will automatically partition the model as necessary, inject compatible high performance kernels into your model and manage the inter-gpu communication. For list of compatible models please see [here](https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py). + +To get started with DeepSpeed-Inference, please checkout our [tutorial](https://www.deepspeed.ai/tutorials/inference-tutorial/). diff --git a/docs/_pages/training.md b/docs/_pages/training.md new file mode 100644 index 0000000..466800a --- /dev/null +++ b/docs/_pages/training.md @@ -0,0 +1,580 @@ +--- +title: "Training Overview and Features" +layout: single +permalink: /training/ +toc: true +toc_label: "Contents" +--- + +# Overview +Training advanced deep learning models is challenging. Beyond model design, +model scientists also need to set up the state-of-the-art training techniques +such as distributed training, mixed precision, gradient accumulation, and +checkpointing. Yet still, scientists may not achieve the desired system +performance and convergence rate. Large model sizes are even more challenging: +a large model easily runs out of memory with pure data parallelism and it is +difficult to use model parallelism. DeepSpeed addresses these challenges to +accelerate model development *and* training. + +## Distributed, Effective, and Efficient Training with Ease +The DeepSpeed API is a lightweight wrapper on [PyTorch](https://pytorch.org/). This +means that you can use everything you love in PyTorch and without learning a new +platform. In addition, DeepSpeed manages all of the boilerplate state-of-the-art +training techniques, such as distributed training, mixed precision, gradient +accumulation, and checkpoints so that you can focus on your model development. Most +importantly, you can leverage the distinctive efficiency and effectiveness benefit of +DeepSpeed to boost speed and scale with just a few lines of code changes to your PyTorch +models. + +## Speed +DeepSpeed achieves high performance and fast convergence through a combination of +efficiency optimizations on compute/communication/memory/IO and effectiveness +optimizations on advanced hyperparameter tuning and optimizers. For example: + +* DeepSpeed trains BERT-large to parity in 44 + mins using 1024 V100 GPUs (64 DGX-2 boxes) and in 2.4 hours using 256 GPUs + (16 DGX-2 boxes). + + **BERT-large Training Times** + + | Devices | Source | Training Time | + | -------------- | --------- | ---------------------:| + | 1024 V100 GPUs | DeepSpeed | **44** min| + | 256 V100 GPUs | DeepSpeed | **2.4** hr| + | 64 V100 GPUs | DeepSpeed | **8.68** hr| + | 16 V100 GPUs | DeepSpeed | **33.22** hr| + + *BERT codes and tutorials will be available soon.* + +* DeepSpeed trains GPT2 (1.5 billion parameters) 3.75x faster than state-of-art, NVIDIA + Megatron on Azure GPUs. + + *Read more*: [GPT tutorial](/tutorials/megatron/) + + + +## Memory efficiency +DeepSpeed provides memory-efficient data parallelism and enables training models without +model parallelism. For example, DeepSpeed can train models with up to 13 billion parameters on +a single GPU. In comparison, existing frameworks (e.g., +PyTorch's Distributed Data Parallel) run out of memory with 1.4 billion parameter models. + +DeepSpeed reduces the training memory footprint through a novel solution called Zero +Redundancy Optimizer (ZeRO). Unlike basic data parallelism where memory states are +replicated across data-parallel processes, ZeRO partitions model states and gradients to save +significant memory. Furthermore, it also reduces activation memory and fragmented memory. +The current implementation (ZeRO-2) reduces memory by up to +8x relative to the state-of-art. You can read more about ZeRO in our [paper](https://arxiv.org/abs/1910.02054), and +in our blog posts related to +[ZeRO-1](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) and [ZeRO-2](https://www.microsoft.com/en-us/research/blog/zero-2-deepspeed-shattering-barriers-of-deep-learning-speed-scale/). + +With this impressive memory reduction, early adopters of DeepSpeed have already +produced a language model (LM) with over 17B parameters called + +Turing-NLG, +establishing a new SOTA in the LM category. + +For model scientists with limited GPU resources, ZeRO-Offload leverages both CPU and GPU memory for training large models. Using a machine with **a single GPU**, our users can run **models of up to 13 billion parameters** without running out of memory, 10x bigger than the existing approaches, while obtaining competitive throughput. This feature democratizes multi-billion-parameter model training and opens the window for many deep learning practitioners to explore bigger and better models. + +## Scalability +DeepSpeed supports efficient data parallelism, model parallelism, pipeline parallelism and their +combinations, which we call 3D parallelism. +* 3D parallelism of DeepSpeed provides system support to run models with trillions of parameters, read more in our [press-release]({{ site.press_release_v3 }}) and [tutorial](/tutorials/pipeline). +* DeepSpeed can run large models more efficiently, up to 10x + faster for models with + various sizes spanning 1.5B to hundred billion. More specifically, the data parallelism powered by ZeRO + is complementary and can be combined with different types of model parallelism. It allows + DeepSpeed to fit models using lower degree of model parallelism and higher batch size, offering + significant performance gains compared to using model parallelism alone. + + *Read more*: [ZeRO paper](https://arxiv.org/abs/1910.02054), + and [GPT tutorial](/tutorials/megatron). + +![DeepSpeed Speedup](/assets/images/deepspeed-speedup.png) +

+The figure depicts system throughput improvements of DeepSpeed (combining ZeRO-powered data parallelism with model parallelism of NVIDIA Megatron-LM) over using Megatron-LM alone. +

+ +## Communication efficiency +Pipeline parallelism of DeepSpeed reduce communication volume during distributed training, which allows users to train multi-billion-parameter models 2–7x faster on clusters with limited network bandwidth. +![Low-bandwidth GPT-2 Performance](/assets/images/pp-lowbw-gpt2.png) + +1-bit Adam, 0/1 Adam and 1-bit LAMB reduce communication volume by up to 26x while achieving similar convergence efficiency to Adam, allowing for scaling to different types of GPU clusters and networks. [1-bit Adam blog post](https://www.deepspeed.ai/2020/09/08/onebit-adam-blog-post.html), [1-bit Adam tutorial](https://www.deepspeed.ai/tutorials/onebit-adam/), [0/1 Adam tutorial](https://www.deepspeed.ai/tutorials/zero-one-adam/), [1-bit LAMB tutorial](https://www.deepspeed.ai/tutorials/onebit-lamb/). + +## Data efficiency +DeepSpeed Data Efficiency Library provides efficient data sampling via curriculum learning and efficient data routing via random layerwise token dropping. The composed solution enables up to 2x data and 2x time saving during GPT-3/BERT pretraining and GPT/ViT finetuning, or further improve model quality under the same data/time. See more in [the tutorial](/tutorials/data-efficiency). + +## Supporting long sequence length +DeepSpeed offers sparse attention kernels—an instrumental technology to support long sequences of model inputs, whether for text, image, or sound. Compared with the classic dense Transformers, it powers **an order-of-magnitude longer input sequence** and obtains up to 6x faster execution with comparable accuracy. It also outperforms state-of-the-art sparse implementations with 1.5–3x faster execution. Furthermore, our sparse kernels support efficient execution of flexible sparse format and empower users to innovate on their custom sparse structures. [Read more here](https://www.deepspeed.ai/2020/09/08/sparse-attention.html). + + +## Fast convergence for effectiveness +DeepSpeed supports advanced hyperparameter tuning and large batch size +optimizers such as [LAMB](https://arxiv.org/abs/1904.00962). These improve the +effectiveness of model training and reduce the number of samples required to +convergence to desired accuracy. + +*Read more*: [Tuning tutorial](/tutorials/one-cycle). + + +## Good Usability +Only a few lines of code changes are needed to enable a PyTorch model to use DeepSpeed and ZeRO. Compared to current model parallelism libraries, DeepSpeed does not require a code redesign or model refactoring. It also does not put limitations on model dimensions (such as number of attention heads, hidden sizes, and others), batch size, or any other training parameters. For models of up to 13 billion parameters, you can use ZeRO-powered data parallelism conveniently without requiring model parallelism, while in contrast, standard data parallelism will run out of memory for models with more than 1.4 billion parameters. In addition, DeepSpeed conveniently supports flexible combination of ZeRO-powered data parallelism with custom model parallelisms, such as tensor slicing of NVIDIA's Megatron-LM. + + +## Features + +Below we provide a brief feature list, see our detailed [feature overview](https://www.deepspeed.ai/features/) for descriptions and usage. + +* [Distributed Training with Mixed Precision](https://www.deepspeed.ai/features/#distributed-training-with-mixed-precision) + * 16-bit mixed precision + * Single-GPU/Multi-GPU/Multi-Node +* [Model Parallelism](https://www.deepspeed.ai/features/#model-parallelism) + * Support for Custom Model Parallelism + * Integration with Megatron-LM +* [Pipeline Parallelism](https://www.deepspeed.ai/tutorials/pipeline/) + * 3D Parallelism +* [The Zero Redundancy Optimizer](https://www.deepspeed.ai/tutorials/zero/) + * Optimizer State and Gradient Partitioning + * Activation Partitioning + * Constant Buffer Optimization + * Contiguous Memory Optimization +* [ZeRO-Offload](https://www.deepspeed.ai/tutorials/zero-offload/) + * Leverage both CPU/GPU memory for model training + * Support 10B model training on a single GPU +* [Ultra-fast dense transformer kernels](https://www.deepspeed.ai/2020/05/18/bert-record.html) +* [Sparse attention](https://www.deepspeed.ai/2020/09/08/sparse-attention-news.html) + * Memory- and compute-efficient sparse kernels + * Support 10x long sequences than dense + * Flexible support to different sparse structures +* [1-bit Adam](https://www.deepspeed.ai/2020/09/08/onebit-adam-blog-post.html), [0/1 Adam](https://www.deepspeed.ai/tutorials/zero-one-adam/) and [1-bit LAMB](https://www.deepspeed.ai/tutorials/onebit-lamb/) + * Custom communication collective + * Up to 26x communication volume saving +* [Additional Memory and Bandwidth Optimizations](https://www.deepspeed.ai/features/#additional-memory-and-bandwidth-optimizations) + * Smart Gradient Accumulation + * Communication/Computation Overlap +* [Training Features](https://www.deepspeed.ai/features/#training-features) + * Simplified training API + * Gradient Clipping + * Automatic loss scaling with mixed precision +* [Training Optimizers](https://www.deepspeed.ai/features/#training-optimizers) + * Fused Adam optimizer and arbitrary `torch.optim.Optimizer` + * Memory bandwidth optimized FP16 Optimizer + * Large Batch Training with LAMB Optimizer + * Memory efficient Training with ZeRO Optimizer + * CPU-Adam +* [Training Agnostic Checkpointing](https://www.deepspeed.ai/features/#training-agnostic-checkpointing) +* [Advanced Parameter Search](https://www.deepspeed.ai/features/#advanced-parameter-search) + * Learning Rate Range Test + * 1Cycle Learning Rate Schedule +* [Simplified Data Loader](https://www.deepspeed.ai/features/#simplified-data-loader) +* [Data Efficiency](https://www.deepspeed.ai/tutorials/data-efficiency/) + * Efficient data sampling via curriculum learning and efficient data routing via random layerwise token dropping + * Up to 2x data and 2x time saving during GPT-3/BERT pretraining and GPT/ViT finetuning + * Or further improve model quality under the same data/time +* [Curriculum Learning](https://www.deepspeed.ai/tutorials/curriculum-learning/) + * A curriculum learning-based data pipeline that presents easier or simpler examples earlier during training + * Stable and 3.3x faster GPT-2 pre-training with 8x/4x larger batch size/learning rate while maintaining token-wise convergence speed + * Complementary to many other DeepSpeed features + * Note that the Data Efficiency Library above provides more general curriculum learning support. This legacy curriculum learning feature is still supported but we recommend to use the Data Efficiency Library. +* [Progressive Layer Dropping](https://www.deepspeed.ai/2020/10/28/progressive-layer-dropping-news.html) + * Efficient and robust compressed training + * Up to 2.5x convergence speedup for pre-training +* [Performance Analysis and Debugging](https://www.deepspeed.ai/features/#performance-analysis-and-debugging) +* [Mixture of Experts (MoE)](https://www.deepspeed.ai/tutorials/mixture-of-experts/) + + +--- +title: "Feature Overview" +layout: single +permalink: /features/ +toc: true +toc_label: "Contents" +--- + +## Distributed Training with Mixed Precision + +### Mixed Precision Training +Enable 16-bit (FP16) training by in the `deepspeed_config` JSON. +```json +"fp16": { + "enabled": true, + "loss_scale": 0, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 +} +``` + +### Single-GPU, Multi-GPU, and Multi-Node Training +Easily switch between single-GPU, single-node multi-GPU, or multi-node multi-GPU +execution by specifying resources with a hostfile. +```bash +deepspeed --hostfile= \ + \ + --deepspeed --deepspeed_config ds_config.json +``` +The script `` will execute on the resources specified in +[``](/getting-started/#resource-configuration-multi-node). + +## Pipeline Parallelism +DeepSpeed provides [pipeline parallelism](/tutorials/pipeline/) for memory- +and communication- efficient training. DeepSpeed supports a hybrid +combination of data, model, and pipeline parallelism and has scaled to over +[one trillion parameters using 3D parallelism]({{ site.press_release_v3 }}). +Pipeline parallelism can also improve communication efficiency and has +accelerated training by up to 7x on low-bandwidth clusters. + + +## Model Parallelism +### Support for Custom Model Parallelism +DeepSpeed supports all forms of model parallelism including tensor slicing +based approaches such as the +[Megatron-LM](https://github.com/NVIDIA/Megatron-LM). It does so by only +requiring the model parallelism framework to provide a *model parallelism +unit* (`mpu`) that implements a few bookkeeping functionalities: + +```python +mpu.get_model_parallel_rank() +mpu.get_model_parallel_group() +mpu.get_model_parallel_world_size() + +mpu.get_data_parallel_rank() +mpu.get_data_parallel_group() +mpu.get_data_parallel_world_size() +``` + +### Integration with Megatron-LM +DeepSpeed is fully compatible with [Megatron](https://github.com/NVIDIA/Megatron-LM). +Please see the [Megatron-LM tutorial](/tutorials/megatron/) for details. + + + + +## The Zero Redundancy Optimizer +The Zero Redundancy Optimizer ([ZeRO](https://arxiv.org/abs/1910.02054)) is at +the heart of DeepSpeed and enables large model training at a scale that is +simply not possible with model parallelism alone. When enabled, ZeRO allows +training models with over 13 billion parameters without any model parallelism, +and up to 200 billion parameter models with model parallelism on current +generation hardware. + +For more details see the [ZeRO paper](https://arxiv.org/abs/1910.02054), [GPT +tutorial](/tutorials/megatron/) on integration with +DeepSpeed. + +### Optimizer State and Gradient Partitioning +Optimizer State and Gradient Partitioning in ZeRO reduces the memory consumption of the +model states (optimizer states, gradients and parameters) by 8x compared to standard +data parallelism by partitioning these states across data parallel process instead of +replicating them. + +### Activation Partitioning +Activation Partitioning is a memory optimization in ZeRO that can reduce the memory +consumed by activations during model parallel training (MP). In MP certain +activations maybe required by all MP processes, resulting in a replication of +activations across MP GPUs. Activation Partitioning stores these activations in a +partitioned state once they are used for computation in the forward propagation. These +activations are allgathered right before they are needed again during the backward propagation. +By storing activations in a partitioned state, ZeRO in DeepSpeed can reduce the activation +memory footprint proportional to the MP degree. + +### Constant Buffer Optimization (CBO) +CBO enables high network and memory throughput while restricting memory usage to a +constant size. For memory- and network-bound operations such as normalization or +allreduce collectives, the performance depends on the size of the operand. Simply fusing +all operands into a single large operand can enable great throughput at the expense of +unnecessary memory overhead. CBO in DeepSpeed fuses smaller operands into approximately a +pre-defined sized buffer large enough to achieve great performance without the +unnecessary memory overhead. + +### Contiguous Memory Optimization (CMO) +CMO reduces memory fragmentation during training, preventing out of memory errors +due to lack of contiguous memory. Memory fragmentation is a result of interleaving between +short lived and long lived memory objects. During the forward propagation activation +checkpoints are long lived but the activations that recomputed are short lived. Similarly, +during the backward computation, the activation gradients are short lived while the parameter +gradients are long lived. CMO transfers activation checkpoints and parameter gradients +to contiguous buffers preventing memory fragmentation. + +## ZeRO-Offload + +ZeRO-Offload pushes the boundary of the maximum model size that can be trained efficiently using minimal GPU resources, by exploiting computational and memory resources on both GPUs and their host CPUs. It allows training up to 13-billion-parameter models on a single NVIDIA V100 GPU, 10x larger than the state-of-the-art, while retaining high training throughput of over 30 teraflops per GPU. + +For more details see the [ZeRO-Offload release blog]( https://www.microsoft.com/en-us/research/?p=689370&secret=iSlooB), and [tutorial](/tutorials/zero-offload/) on integration with DeepSpeed. + +## Additional Memory and Bandwidth Optimizations + +### Smart Gradient Accumulation +Gradient accumulation allows running larger batch size with limited memory by breaking an +effective batch into several sequential micro-batches, and averaging the parameter +gradients across these micro-batches. Furthermore, instead of averaging the gradients of +each micro-batch across all GPUs, the gradients are averaged locally during each step of +the sequence, and a single `allreduce` is done at the end of the sequence to produce the +averaged gradients for the effective batch across all GPUs. This strategy significantly +reduces the communication involved over the approach of averaging globally for each +micro-batch, specially when the number of micro-batches per effective batch is large. + +### Communication Overlapping +During back propagation, DeepSpeed can overlap the communication required for averaging +parameter gradients that have already been computed with the ongoing gradient computation. +This computation-communication overlap allows DeepSpeed to achieve higher throughput even +at modest batch sizes. + +## Training Features + +### Simplified training API +The DeepSpeed core API consists of just a handful of methods: +* initialization: `initialize` +* training: `backward` and `step` +* argument parsing: `add_config_arguments` +* checkpointing : `load_checkpoint` and `store_checkpoint` + +DeepSpeed supports most of the features described in this document, via the use of these API, +along with a `deepspeed_config` JSON file for enabling and disabling the features. +Please see the [core API doc](https://deepspeed.readthedocs.io/) for more details. + +### Activation Checkpointing API + +DeepSpeed's Activation Checkpointing API supports activation checkpoint partitioning, +cpu checkpointing, and contiguous memory optimizations, while also allowing layerwise +profiling. Please see the [core API doc](https://deepspeed.readthedocs.io/) for more details. + + +### Gradient Clipping +```json +{ + "gradient_clipping": 1.0 +} +``` +DeepSpeed handles gradient clipping under the hood based on the max gradient norm +specified by the user. +Please see the [core API doc](https://deepspeed.readthedocs.io/) for more details. + +### Automatic loss scaling with mixed precision +DeepSpeed internally handles loss scaling for mixed precision training. The parameters +for loss scaling can be specified in the `deepspeed_config` JSON file. +Please see the [core API doc](https://deepspeed.readthedocs.io/) for more details. + +## Training Optimizers + +### 1-bit Adam, 0/1 Adam and 1-bit LAMB optimizers with up to 26x less communication + +DeepSpeed has three communication-efficient optimizers called 1-bit Adam, 0/1 Adam and 1-bit LAMB. +They offer the same convergence as Adam/LAMB, incur up to 26x less communication that enables +up to 6.6x higher throughput for BERT-Large pretraining and up to 2.7x higher throughput +for SQuAD fine-tuning on bandwidth-limited clusters. For more details on usage and performance, +please refer to the [1-bit Adam tutorial](https://www.deepspeed.ai/tutorials/onebit-adam), +[1-bit Adam blog post](https://www.deepspeed.ai/news/2020/09/09/onebit-adam-blog-post.md), +[0/1 Adam tutorial](https://www.deepspeed.ai/tutorials/zero-one-adam) +and [1-bit LAMB tutorial](https://www.deepspeed.ai/tutorials/onebit-lamb/). For technical details, +please refer to the [1-bit Adam paper](https://arxiv.org/abs/2102.02888), [0/1 Adam paper](https://arxiv.org/abs/2202.06009) and +[1-bit LAMB paper](https://arxiv.org/abs/2104.06069). + +### Fused Adam optimizer and arbitrary torch.optim.Optimizer +With DeepSpeed, the user can choose to use a high performance implementation of ADAM from +NVIDIA, or any training optimizer that extends torch's `torch.optim.Optimizer` class. + +### CPU-Adam: High-Performance vectorized implementation of Adam +We introduce an efficient implementation of Adam optimizer on CPU that improves the parameter-update +performance by nearly an order of magnitude. We use the AVX SIMD instructions on Intel-x86 architecture +for the CPU-Adam implementation. We support both AVX-512 and AVX-2 instruction sets. DeepSpeed uses +AVX-2 by default which can be switched to AVX-512 by setting the build flag, `DS_BUILD_AVX512` to 1 when +installing DeepSpeed. Using AVX-512, we observe 5.1x to 6.5x speedups considering the model-size between +1 to 10 billion parameters with respect to torch-adam. + +### Memory bandwidth optimized FP16 Optimizer +Mixed precision training is handled by the DeepSpeed FP16 Optimizer. This optimizer not +only handles FP16 training but is also highly efficient. The performance of weight update +is primarily dominated by the memory bandwidth, and the achieved memory bandwidth is +dependent on the size of the input operands. The FP16 Optimizer is designed to maximize +the achievable memory bandwidth by merging all the parameters of the model into a single +large buffer, and applying the weight updates in a single kernel, allowing it to achieve +high memory bandwidth. + +### Large Batch Training with LAMB Optimizer + +DeepSpeed makes it easy to train with large batch sizes by enabling the LAMB Optimizer. +For more details on LAMB, see the [LAMB paper](https://arxiv.org/pdf/1904.00962.pdf). + +### Memory-Efficient Training with ZeRO Optimizer +DeepSpeed can train models with up to 13 billion parameters without model parallelism, and +models with up to 200 billion parameters with 16-way model parallelism. This leap in +model size is possible through the memory efficiency achieved via the ZeRO Optimizer. For +more details see [ZeRO paper](https://arxiv.org/abs/1910.02054) . + + + +## Training Agnostic Checkpointing +DeepSpeed can simplify checkpointing for you regardless of whether you are using data +parallel training, model parallel training, mixed-precision training, a mix of these +three, or using the zero optimizer to enable larger model sizes. +Please see the [Getting Started](/getting-started/) guide +and the [core API doc](https://deepspeed.readthedocs.io/) for more details. + +## Advanced parameter search +DeepSpeed supports multiple Learning Rate Schedules to enable faster convergence for +large batch scaling. + +### Learning Rate Range Test +Please refer to the [Learning Rate Range Test](/tutorials/lrrt/) tutorial. + +### 1Cycle Learning Rate Schedule +Please refer to the [1Cycle Learning Rate Schedule](/tutorials/1Cycle/) tutorial. + + +## Simplified Data Loader +DeepSpeed abstracts away data parallelism and model parallelism from the user when it +comes to data loading. Users simply provide a PyTorch dataset, and DeepSpeed data loader +can automatically handle batch creation appropriately. + +## Data Efficiency +Please refer to the [Data Efficiency](/tutorials/data-efficiency/) tutorial. + +## Curriculum Learning +Please refer to the [Curriculum Learning](/tutorials/curriculum-learning/) tutorial. Note that the Data Efficiency Library above provides more general curriculum learning support. This legacy curriculum learning feature is still supported but we recommend to use the Data Efficiency Library. + +## Performance Analysis and Debugging + +DeepSpeed provides a set of tools for performance analysis and debugging. + +### Wall Clock Breakdown + +DeepSpeed provides a detailed breakdown of the time spent +in different parts of the training. +This can be enabled by setting the following in the `deepspeed_config` file. + +```json +{ + "wall_clock_breakdown": true, +} + +``` + +### Timing Activation Checkpoint Functions + +When activation checkpointing is enabled, profiling the forward and backward time of each checkpoint function can be enabled in the `deepspeed_config` file. + +```json +{ + "activation_checkpointing": { + "profile": true + } +} + +``` + +### Flops Profiler + +The DeepSpeed flops profiler measures the time, flops and parameters of a PyTorch model and shows which modules or layers are the bottleneck. When used with the DeepSpeed runtime, the flops profiler can be configured in the `deepspeed_config` file as follows: + +```json +{ + "flops_profiler": { + "enabled": true, + "profile_step": 1, + "module_depth": -1, + "top_modules": 3, + "detailed": true, + } +} + +``` +The flops profiler can also be used as a standalone package. Please refer to the [Flops Profiler](/tutorials/flops-profiler) tutorial for more details. + + +### Autotuning + +The DeepSpeed Autotuner uses model information, system information, and heuristics to efficiently tune Zero stage, micro batch size, and other Zero configurations. Using the autotuning feature requires no code change from DeepSpeed users. While `"autotuning": {"enabled": true}` is the minimal required to enable auotuning, there are other parameters users can define to configure the autotuning process. Below shows major parameters and their default values in the autotuning configuration. Please refer to the [Autotuning](/tutorials/autotuning) tutorial for more details. + +```json +{ + "autotuning": { + "enabled": true, + "results_dir": null, + "exps_dir": null, + "overwrite": false, + "metric": "throughput", + "num_nodes": null, + "num_gpus": null, + "start_profile_step": 3, + "end_profile_step": 5, + "fast": true, + "num_tuning_micro_batch_sizes": 3, + "tuner_type": "model_based", + "tuner_early_stopping": 5, + "tuner_num_trials": 50, + "arg_mappings": null + } +} + +``` +The flops profiler can also be used as a standalone package. Please refer to the [Flops Profiler](/tutorials/flops-profiler) tutorial for more details. + +### Monitor + +The DeepSpeed Monitor logs live training metrics to one or more monitoring backends, including PyTorch's [TensorBoard](https://pytorch.org/docs/1.8.0/tensorboard.html), [WandB](https://docs.wandb.ai/quickstart), or simply to CSV files. The Monitor can be configured with one or more backends in the `deepspeed_config` file as follows: + +```json +{ + "tensorboard": { + "enabled": true, + "output_path": "output/ds_logs/", + "job_name": "train_bert" + } + "wandb": { + "enabled": true, + "team": "my_team", + "group": "my_group", + "project": "my_project" + } + "csv_monitor": { + "enabled": true, + "output_path": "output/ds_logs/", + "job_name": "train_bert" + } +} + +``` + +The Monitor can also be added to log custom metrics and client codes. Please refer to the [Monitor](/tutorials/monitor) tutorial for more details. + +### Communication Logging + +DeepSpeed provides logging of all communication operations launched within `deepspeed.comm`. The communication logger can be configured in the `deepspeed_config` file as follows: + +```json +{ + "comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": true, + "debug": false + } +} + +``` + +Client codes can then print a summary with a call to `deepspeed.comm.log_summary()`. For more details and example usage, see the [Communication Logging](/tutorials/comms-logging) tutorial. + +## Sparse Attention +DeepSpeed offers sparse attention to support long sequences. Please refer to the [Sparse Attention](/tutorials/sparse-attention/) tutorial. + +```bash +--deepspeed_sparse_attention +``` + +```json +"sparse_attention": { + "mode": "fixed", + "block": 16, + "different_layout_per_head": true, + "num_local_blocks": 4, + "num_global_blocks": 1, + "attention": "bidirectional", + "horizontal_global_attention": false, + "num_different_global_patterns": 4 +} +``` + +## Mixture of Experts (MoE) +To learn more about training Mixture of Experts (MoE) models with DeepSpeed, see our [tutorial](https://www.deepspeed.ai/tutorials/mixture-of-experts/) for more details. diff --git a/docs/_posts/2020-09-09-ZeRO-Offload.md b/docs/_posts/2020-09-09-ZeRO-Offload.md old mode 100644 new mode 100755 diff --git a/docs/_posts/2020-10-28-progressive-layer-dropping-news.md b/docs/_posts/2020-10-28-progressive-layer-dropping-news.md old mode 100644 new mode 100755 diff --git a/docs/_posts/2022-03-21-amd-support.md b/docs/_posts/2022-03-21-amd-support.md index 0c9ca3f..ba8917b 100644 --- a/docs/_posts/2022-03-21-amd-support.md +++ b/docs/_posts/2022-03-21-amd-support.md @@ -3,5 +3,5 @@ title: "Supporting efficient large model training on AMD Instinct GPUs with Deep excerpt: "" link: https://cloudblogs.microsoft.com/opensource/2022/03/21/supporting-efficient-large-model-training-on-amd-instinct-gpus-with-deepspeed/ date: 2022-03-21 00:00:00 -tags: training inference ZeRO +tags: training ZeRO --- diff --git a/docs/_posts/2022-07-26-deepspeed-azure.md b/docs/_posts/2022-07-26-deepspeed-azure.md new file mode 100644 index 0000000..128cbf4 --- /dev/null +++ b/docs/_posts/2022-07-26-deepspeed-azure.md @@ -0,0 +1,135 @@ +--- +title: "Azure empowers easy-to-use, high-performance, and hyperscale model training using DeepSpeed" +excerpt: "" +date: 2022-07-26 00:09:00 +tags: training azure +--- + +## Introduction + +Large-scale transformer-based deep learning models trained on large amounts of data have shown great results in recent years in several cognitive tasks and are behind new products and features that augment human capabilities. These models have grown several orders of magnitude in size during the last five years. Starting from a few million parameters of the original transformer model all the way to the latest 530 billion-parameter Megatron-Turing model as shown in *Figure 1*. There is a growing need for customers to train and fine tune large models at an unprecedented scale. + +![Large Models](/assets/images/large-model-graph.png){: .align-center} + +*Figure 1: Landscape of large models and hardware capabilities* + +To train these models, users needed to set up and maintain a complex distributed training infrastructure that usually required several manual and error-prone steps. These lead to a subpar experience both in terms of usability and performance. We recently [announced](https://azure.microsoft.com/en-us/blog/azure-empowers-easytouse-highperformance-and-hyperscale-model-training-using-deepspeed/) how we are making great strides to simplify this and enable easy-to-use and high-performance training at 1K+ GPU scale on Azure. + +In this extended post, we share the details of how DeepSpeed users can train trillion-parameter models with a new easy-to-use, streamlined, scalable, and high-performance distributed training experience on Azure. We also share details of the experimental setup, model configurations, additional performance trends, and guide our users on how to run these experiments in their own environments. + +## Making distributed training faster and easier on Azure using DeepSpeed + +We compare the existing manual and error-prone workflow with our proposed easy-to-use workflow for DeepSpeed on Azure in *Figure 2*. Customers can now use easy-to-use [training pipelines](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples) to launch training jobs at scale. The new workflow reduces the number of steps from 11 to just 1 if users rely on the recommended [AzureML](https://azure.microsoft.com/en-us/services/machine-learning/) [recipes](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azureml). + + +![Workflow](/assets/images/old-vs-new-azure.png){: .align-center} + +*Figure 2: An easy-to-use and streamlined distributed training experience with DeepSpeed on Azure* + +For users who have custom environments built using Azure VMs or [Azure VMSS](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview), only two steps are needed: + +- 1) Run the cluster setup script (to be released in the next few weeks) +- 2) Use the Azure VMSS [recipes](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azure) to launch training. + +## Key Performance Benefits +We already shared a summary of our key performance results in the Azure [announcement](https://azure.microsoft.com/en-us/blog/azure-empowers-easytouse-highperformance-and-hyperscale-model-training-using-deepspeed/). We enable the capability to train 2x larger model sizes (2 trillion vs. 1 trillion parameters), scale to 2x more GPUs (1024 vs. 512), and offer up to 1.8x higher compute throughput/GPU (150 TFLOPs vs. 81 TFLOPs) compared to other [cloud providers](https://medium.com/pytorch/training-a-1-trillion-parameter-model-with-pytorch-fully-sharded-data-parallel-on-aws-3ac13aa96cff). + +DeepSpeed on Azure offers near-linear scalability both in terms of **increase in model size** as well as **increase in number of GPUs**. As shown in *Figure 3a*, together with the DeepSpeed [ZeRO-3](https://www.microsoft.com/en-us/research/blog/zero-infinity-and-deepspeed-unlocking-unprecedented-model-scale-for-deep-learning-training/), its novel CPU offloading capabilities, and a high-performance Azure stack powered by InfiniBand interconnects and A100 GPUs, we were able to maintain an efficient throughput/GPU (>157 TFLOPs) in a near-linear fashion as the model size increases from 175 billion parameters to 2 trillion parameters. On the other hand, for a given model size, e.g., 175B, we achieve near-linear scaling as we increase the number of GPUs from 128 all the way to 1024 as shown in *Figure 3b*. The key takeaway is that Azure and DeepSpeed together are breaking the GPU memory wall and enabling our customers to easily and efficiently train trillion-parameter models at scale. + +![Perf-overview](/assets/images/perf-overview.png){: .align-center} + +*Figure 3: (a) Near-perfect throughput/GPU as we increase the model size from 175 billion to 2 trillion parameters (BS/GPU=8). (b) Near-perfect performance scaling with the increase in number of GPU devices for the 175B model (BS/GPU=16). The sequence length is 1024 for both cases.* + +## Experimental Setup +We share the details of our experimental setup and some of the best practices we followed. The users can either directly use them to reproduce our results or modify them to fit their own setup in terms of model scale as well as the scale of Azure hardware being provisioned. + +### Hardware (Azure instances) + +We used [NDm A100 v4-series](https://docs.microsoft.com/en-us/azure/virtual-machines/ndm-a100-v4-series) instances in our experiments. Each instance includes two socket AMD EPYC 7V12 64-Core CPUs, 1.7TB main memory and eight A100 80GB GPUs. The system has a balanced PCIe topology connecting 4 GPU devices to each CPU socket. Each GPU within the VM is provided with its own dedicated, topology-agnostic 200 Gb/s NVIDIA Mellanox HDR InfiniBand connection providing an accelerated 200 Gbps high speed fabric. The DeepSpeed library exploits offload capabilities where the activation and optimizer states are allocated in the main memory. Hence, 1.7TB memory capacity per node helps us to scale to large model sizes. + +### Training setup using AzureML +Users can directly use the AzureML studio and use our published [recipes](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azureml) to run experiments without any additional setup. This is the easiest and recommended way of running experiments on Azure. + +### Training setup using Azure VMSS + +Existing VMSS customers and others who have custom Azure VM based environments can follow the setup as follows. The scripts to make these steps easy will be released in the coming weeks. +A cluster is created using Azure Virtual Machine Scale Sets (VMSS) to provision the desired number of compute nodes running the new Azure HPAI VM image specialized for extreme-scale deep learning applications using the software stack listed in *Table 1*. + +| Name | Description (Version) | +| ------------------------------: | :----------------: | +| PyTorch | 1.10.2 (installed from source) | +| DeepSpeed | 0.6.2 (installed from source) | +| Megatron-LM | [https://github.com/microsoft/Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed) | +| Apex | 0.1 | +| NCCL | 2.12.10 | +| CUDNN | 8.2.4.15 | +| CUDA | 11.4 | +| CUDA Driver | R470.82 | +| VM Image | Ubuntu-HPC 20.04 Image | + +*Table 1: Detailed version information of the software packages in the Azure HPC VM image* + +Users can create a VMSS with up to 600 VM instances enabling up to 4,800 A100 GPUs. In addition to the VMSS for the compute nodes, we provision a distinct login node using an inexpensive D4s v4 (or similar) instance with 4-core Intel VCPU, running the same image, for compiling, launching, and monitoring jobs. The login node, compute nodes, and a shared storage filesystem are grouped within an Azure Virtual Network (vnet) allowing VMs to connect to each other over SSH and to shared NFS volume shown in *Figure 4*. + +![VMSS-overview](/assets/images/vmss-setup.png){: .align-center} + +*Figure 4: Organization of our VMSS-based experimental setup* + +## Performance Evaluation on Various Model Configurations + +We ran our experiments with four different model sizes – 175B, 530B, 1T, and 2T – using the configurations shown in *Table 2*. + +| Model Size | 175B | 530B | 1T | 2T | +| :---------: | ---: | ---: | ---: | ---: | +| Number of layers | 96 | 105 | 128 | 160 | +| Hidden Dimension | 12,288 | 20,480 | 25,600 | 32,768 | +| Attention Heads | 96 | 128 | 160 | 128 | + +*Table 2: Model configuration* + +For each of these configurations, we report peak throughput of the system using TFLOPs/GPU as the main performance metric. To calculate TFLOPs, we use the formula used by the Megatron paper as shown below. + +```FLOPs/GPU = 96 * B * s * l * h2 * (1 + s/6h + V/(16*l*h))``` + +B is batch size, s is sequence length, l is the number of layers, h is hidden size, and V is vocabulary size. + +### Scaling the 175B and 530B models +*Figures 5a* and *5b* show the results of 175B model with sequence length 512 and 1024, respectively. We only scale to 512 GPUs for seq-length 512 as adding more GPUs shows similar performance. On the other hand, with sequence length 1024, we saw linear performance increase to 1024 GPUs. Overall, the peak throughput of **204.49 TFLOPs/GPU** was achieved on 256 GPUs with a micro batch size of 32 and sequence length of 512. + +![175b-overview](/assets/images/175b-trend.png){: .align-center} + +*Figure 5: Performance characteristics of 175B model on 512 and 1K GPUs respectively. The colored columns signify different micro batch sizes.* + +Next, we report the 530B model scaling. Previous results on the 530B MT-NLG model using DeepSpeed and Megatron-LM on 280 DGX A100 servers on the Selene supercomputer showed the peak throughput of 126 TFLOPS/GPU. However, we were able to surpass that throughput and achieved up to **171.37 TFLOPs/GPU** on 128 NDm A100 v4-series A100 systems (i.e., 1024 GPUs) as shown in *Figure 6*. + +The benefit of this 530B model is its simpler parallelization configuration as there is no tensor/pipeline parallelism. With ZeRO powered data parallelism, there are fewer heuristics required to optimally configure the distributed model. In addition, the consistent steady state performance of more than 140 TFLOPs/GPU for micro batch sizes >1 demonstrates a robust software and hardware platform. + +![530b-overview](/assets/images/530b-trend.png){: .align-center} + +*Figure 6: Throughput achieved with a 530B parameter model on 512 and 1024 GPUs for micro-batch sizes per GPU of 1, 2, 4, and 8, with sequence length 1,024.* + +### Scaling the 1T and 2T models + +The 1T parameter model contains 128 layers with 160 attention heads. Training such an extreme-scale model is not an easy task. *Figure 7* shows the throughput achieved for each of the model configurations we explored on 512 and 1024 GPUs. Peak throughput achieved was **165.36 TFLOPs/GPU** for micro batch size of 8 across 1024 GPUs and the model reached steady state performance within the first 3-4 iterations. + +![1t-overview](/assets/images/1t-trend.png){: .align-center} + +*Figure 7: Performance characteristics of 1T parameter model on 512 and 1024 GPUs with 1, 2, 4, and 8 micro batch sizes, with sequence length 1,024.*{: .align-center} + +The 2T parameter model consists of 160 layers, 32k hidden dimension, and 128 attention heads. Given the large size of the model and the significant time required on 1024 GPUs, we limited our benchmark runs for the 2T model to a batch size of 8 per GPU with a sequence length of 1024. We were able to achieve 157 TFLOPs/GPU on 1,024 GPUs. + +## How to run training experiments on Azure? + +We recognize that DeepSpeed users are diverse and have different environments. In this tutorial, our focus is on making things simpler for users who plan to run large model training experiments on Azure. + +> The easiest way to do model training on Azure is via the Azure ML recipes. The job submission and data preparation scripts have been made available [here](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azureml). Users simply need to setup their Azure ML workspace following the [guide](https://github.com/Azure/azureml-examples/tree/main/python-sdk#set-up) and submit experiment using the aml_submit.py file. + +Some users have customized environments built on top of Azure VMs and VMSS based clusters. To simplify training on such setups, we are working on an easy-to-use cluster setup script that will be published in the next few weeks. If you already have a cluster setup running, you can use the [azure recipes](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azure) for the 175B and the 1T model. The recipes can easily be modified to train other model configurations. + +## Acknowledgement + +This blog post was written by the DeepSpeed team in collaboration with the AzureML and the AzureHPC team. We would like to acknowledge several individuals who made this work possible: +- AzureHPC team: Russell J. Hewett, Kushal Datta, Prabhat Ram, Jithin Jose, and Nidhi Chappell +- AzureML team: Vijay Aski, Razvan Tanase, Miseon Park, Savita Mittal, Ravi Shankar Kolli, Prasanth Pulavarthi, and Daniel Moth +- DeepSpeed team: Ammar Ahmad Awan, Jeff Rasley, Samyam Rajbhandari, Martin Cai, and Yuxiong He +- CTO office: Gopi Kumar and Luis Vargas diff --git a/docs/_posts/2022-09-10-zero-inference.md b/docs/_posts/2022-09-10-zero-inference.md new file mode 100644 index 0000000..dd718b9 --- /dev/null +++ b/docs/_posts/2022-09-10-zero-inference.md @@ -0,0 +1,122 @@ +--- +title: "ZeRO-Inference: Democratizing massive model inference" +excerpt: "" +date: 2022-09-10 00:09:00 +tags: inference ZeRO +--- + +## Introduction +The current trends in artificial intelligence (AI) domains such as image, speech, and natural language, demonstrate that model quality can be improved by increasing model size. In natural language processing, for example, the state-of-the-art (SOTA) model has grown from 300 million parameters (Bert-Large) to 500 billion parameters (Megatron-Turing-530B) in less than four years. However, this dramatic growth in model sizes has significantly increased the GPU cost to train, finetune or inference these models, making them unaffordable to most users. To democratize access to AI innovations, large organizations, such as Hugging Face (BigScience), Meta, and Yandex have recently publicly released pre-trained massive models. Unfortunately, even these publicly available models are not broadly usable because many users cannot afford the dozens of GPUs required to fit them for inference computation. For example, half-precision inference computation on Megatron-Turing-530B (SOTA model for natural language) requires at least 40 A100-40GB GPUs, which is unaffordable to many students, model scientists, hobbyists, and small businesses that could benefit from using these powerful models. And so, a real concern is that if the dramatic increase in model sizes continues, then a growing fraction of users could be excluded from the benefits of these AI innovations. + +DeepSpeed, a part of Microsoft’s AI at Scale Initiative, has developed the ZeRO-Inference technology to address these obstacles to AI democratization. ZeRO-Inference comes from the family of ZeRO technologies, which are a collection of powerful memory and parallelism optimizations for efficient large scale model training and inference on modern GPU clusters. DeepSpeed had previously developed ZeRO-Infinity, a technology that leverages heterogeneous memory (GPU, CPU, and NVMe) to efficiently scale model training to extreme levels. ZeRO-Inference adapts and optimizes ZeRO-Infinity techniques for model inference on GPUs by hosting the model weights in CPU or NVMe memory, thus hosting no (**zero**) weights in GPU. This approach is inspired by the observation that the aggregate capacity of CPU and NVMe memories in most commodity computing devices (e.g., laptops, desktops, workstations, etc.) is on the order of terabytes and sufficient to host the largest known models for inference computation. By leveraging this non-GPU memory, ZeRO-Inference enables inference computation of massive models (with hundreds of billions of parameters) on as few as a single GPU, thereby making massive model inference accessible to almost everyone. Moreover, by dramatically reducing GPU memory requirements with CPU or NVMe memory which are significantly cheaper, it significantly reduces the cost of massive model inference, offering an affordable inference path to SOTA models. + +## How ZeRO-Inference works +The massive computational requirements of large model inference means that accelerators like GPUs are required for efficient execution. Therefore, an important design decision for large model inference on limited GPU budget is how to apportion GPU memory among model weights, inference inputs, and intermediate results. + +### Offload all model weights +ZeRO-Inference pins the entire model weights in CPU or NVMe (whichever is sufficient to accommodate the full model) and streams the weights layer-by-layer into the GPU for inference computation. After computing a layer, the outputs are retained in GPU memory as inputs for the next layer, while memory consumed by the layer weights is released for use by the next layer. Thus, model inference time is composed of the time to compute the layers on GPU, and the time to fetch the layers over PCIe. For large model inference, this approach provides scaling and efficiency benefits, as explained below. + +ZeRO-Inference offers scaling benefits in two ways. First, by keeping just one (or a few) model layers in GPU memory at any time, ZeRO-Inference significantly reduces the amount of GPU memory required to inference massive models. For current SOTA models which have about a hundred layers (e.g., 96 and 105 layers in GPT3-175B and Megatron-Turing-530B respectively), ZeRO-Inference reduces the GPU memory requirements by up to two orders of magnitude. For example, with ZeRO-Inference, GPU memory consumption of Megaton-Turing-530B for half-precision inference drops from 1TB to 10GB. Second, by fitting the model into CPU or NVMe memory which are orders of magnitude cheaper than GPU memory, ZeRO-Inference makes scaling to future SOTA models (e.g., with trillions or tens-of-trillions of parameters) more affordable compared to approaches that fit the entire model into GPU memory. + +ZeRO-Inference delivers efficient computation for throughput-oriented inference applications despite the latency of fetching model weights from CPU or NVMe over PCIe interconnect. The primary reason for this is that by limiting GPU memory usage of the model to one or a few layers of weights, ZeRO-Inference can use the majority of GPU memory to support a large amount of input tokens in the form of long sequences or large batch sizes. A large model layer requires a significant amount of computation, especially when processing inputs with many input tokens. For example, one GPT3-175B layer requires about 7 TFlops to process an input of batch size 1 and sequence length of 2048. Therefore, for inference scenarios with long sequence length and large batch sizes, the computation time dominates the latency of fetching model weights, which ultimately improves efficiency. In summary, ZeRO-Inference's strategy to utilize GPU memory to support large number of input tokens results in high performance inference for large models. + +### Optimizations +To further improve system efficiency, ZeRO-Inference leverages two additional optimizations to reduce the latency of fetching layer weights from CPU or NVMe memory into GPU memory. + +The first optimization involves overlapping the fetch of a layer with the computation of an earlier layer, a.k.a., layer prefetching. Layer prefetching allows ZeRO-Inference to hide portions of the transfer latency of the prefetched layers. This is especially useful when computation time is not large enough or cannot be sufficiently increased (e.g., with larger batch size) to dominate the latency of fetching layer weights. + +The second optimization, which is applicable for inference on multiple GPUs, involves parallelizing the fetch of each layer across multiple GPUs by using each GPU to fetch only a portion of the layer. Employing the aggregate PCIe links of the GPUs in this manner essentially increases the transfer bandwidth linearly, thus reducing the latency. With this approach, fetching layers into GPU memory occurs in two phases. First, each GPU independently fetches a partition of the layer over PCIe into its memory. At this point, only a partition of the layer will be resident on each GPU. Next, each GPU assembles the full layer for computation by fetching the missing layer pieces from other GPUs over the high-bandwidth GPU-GPU interconnect (e.g., NVLink, xGMI, etc.). Since GPU-GPU interconnect bandwidth is typically over an order of magnitude higher than PCIe bandwidth, efficient multi-GPU or multi-node communication primitives, such as NCCL or RCCL all-gather, can be used to efficiently assemble the full layer on all GPUs with negligible latency compared to the PCIe latency. + +### Alternative approach: Host some model weights in GPU memory +An alternative approach to ZeRO-Inference is to pin as many of the model weights as possible into GPU memory and fetch the remainder (from CPU or NVMe) when needed for computation. A benefit of this approach is avoidance of the latency of fetching weights that are already pinned in GPU memory. However, this approach has two downsides: (i) the latency savings for hundred-billion parameter models are negligible since only a small fraction of the weights can fit in GPU memory, and (ii) even when a decent portion of the model weights can fit (e.g., > 50% for ~10B models), the remaining GPU memory can only fit small batch sizes which hurts inference throughput. We later show evaluation results to demonstrate that this approach is sub-optimal. + + +## Model Scaling on 1 GPU +ZeRO-Inference enables significant model scaling for inference on a single GPU compared to a baseline that hosts the model in GPU memory (i.e., HBM). As an example, we consider half-precision model inference using a single NVIDIA Tesla V100 GPU in a NVIDIA DGX2 system. While the V100 GPU has 32GB of memory, the system is equipped with 1.5TB of CPU DRAM and 30TB of NVMe storage. The maximum model size supported for inference computation on GPU depends on the memory in which the model is hosted. *Figure 1* below shows the achievable model scales in this system for GPU inference with ZeRO-Inference. In comparison, the baseline cannot support models larger than 16 billion parameters for GPU inference[^model_scale]. In contrast, ZeRO-Inference has the flexibility to host the model in a different memory (DRAM or NVMe) than HBM. This flexibility allows ZeRO-Inference to support much larger models than baseline. For example, by hosting a model on NVMe memory, Zero-Inference can support models with up to 15 trillion parameters for GPU inference, which is almost a thousand times larger compared to baseline. A practical takeaway from *Figure 1* is that ZeRO-Inference enables single GPU inference computation of current SOTA models, since they are smaller than 15 trillion parameters. + +![Model-Scaling](/assets/images/zero_inference_model_scale.png){: .align-center} + +[^model_scale]: 16 billion parameters model won’t fit in V100-32GB for half-precision inference since no memory will be left for inputs and intermediate results. + +## Token Generation Performance +An important inference workload is token generation based on an input prompt. In this workload the model is provided a text sequence as input prompt, and based on this prompt, the model generates output text of configurable length. We use this workload to demonstrate the performance of ZeRO-Inference. This workload consists of two phases: (1) the prompt processing phase where the model processes the input prompt, and (2) the generation phase where the model generates the output tokens. + +ZeRO-Inference is targeted for throughput-oriented inference applications, and so the performance metric that we use for this workload is the number of tokens generated per second in the generation phase. We use the Hugging Face token generation pipeline in our experiments to measure the performance of using a greedy search algorithm to generate ten output tokens given an input prompt of four tokens. The generation pipeline in our experiments uses KV-caching optimization to improve performance by caching generated tokens to avoid re-computation. We consider the performance impact of three aspects of ZeRO-Inference design choices and optimizations: (1) full offloading model weights as opposed to partial offloading, (2) prefetching layer weights ahead of use, and (3) using multiple GPUs to parallelize layer fetching over PCIe. Additionally, we measure the performance impact of varying the number of output tokens. + +### Models +For our experiments, we use the three publicly available massive language models listed in *Table 1*. We configure these models for half-precision inference computations. ZeRO-Inference is required to inference these models on a single V100-32GB since they are bigger than GPU memory. + +![Public-models](/assets/images/zero_inference_models.png){: .align-center} + +### Full Offload vs. Partial Offload of model weights +A key design choice in ZeRO-Offload is to offload all the weights of models larger than GPU memory rather than host a subset of the weights in GPU memory. Our intuition for this approach is that for throughput-oriented inference applications, the larger batch sizes enabled by full offload yields better performance than partial offload. In *Table 2*, we present results for OPT-30B token generation on a single V100-32GB that compare fully offloading the model weights versus hosting a portion (i.e., 10 and 12 billion parameters[^partial_offload]) in GPU memory. The results show that full offload delivers the best performance for both CPU memory (43 tokens per second) and NVMe memory (30 tokens per second). With both CPU and NVMe memory, full offload is over 1.3x and 2.4x faster than partial offload of 18 and 20 billion parameters respectively. The performance advantage of full offload comes from the larger batch sizes compared to the partial offload options. **Thus when a model does not fit in GPU, using GPU memory to increase batch size rather than to partially fit the model leads to faster token generation.** + +![Full-offload](/assets/images/zero_inference_full_offload.png){: .align-center} + +[^partial_offload]: Pinning more parameters in GPU memory resulted in out of memory errors for small batch sizes. + +### Prefetching layer weights +ZeRO-Inference fetches layers ahead of use, overlapping with current layer computation, to hide layer transfer latency. We measure the impact of prefetching on token generation performance on a single V100-32GB and summarize the results in *Table 3*. We observe that prefetching did not improve CPU offload. This is because the relatively short sequences in token generation (i.e., less than 50 tokens) resulted in layer computation time that is insufficient to hide a significant portion of layer fetch time from CPU. In contrast, prefetching improves NVMe offloading performance by 1.13x, 1.14x and 1.21x for OPT-30B, OPT-175B, and BLOOM-176B respectively. This is because transferring weights from NVMe through CPU memory allows prefetching to overlap transfers from CPU to GPU memory with transfers from NVMe to CPU boosting the effective transfer bandwidth. + +![Prefetch-Layer](/assets/images/zero_inference_prefetch.png){: .align-center} + +### Parallelizing layer fetching on multiple GPUs +ZeRO-Inference leverages the four PCIe interconnects between GPUs and CPU memory to parallelize layer fetching for faster inference computations on multiple GPUs. In *Table 4*, we report the throughput improvements for token generation on two and four GPUs compared to a single GPU[^multi_gpu_pcie] . These results were collected with layer prefetching enabled. The reported throughput numbers are per GPU showing that token generation becomes faster on each GPU as the aggregated PCIe links reduce the layer fetch latencies. **The improved per GPU throughput translates to super-linear scaling performance**. Additionally, these results suggest improved bandwidths of future PCIe generations could help to improve ZeRO-Inference performance. + +![Multi-GPU](/assets/images/zero_inference_multi_gpu.png){: .align-center} + +[^multi_gpu_pcie]: For multiple GPU runs, we select GPUs with independent PCIe interconnects to CPU memory. + + +### Impact of generation output length +We measure the performance impact of the number of output tokens since the memory overhead of KV-caching optimization increases with longer output tokens and could limit batch size. First, we consider the impact of token lengths 10, 20, 50, and 100 on batch size that can fit one V100-32GB GPU. The results in *Table 5* show a 2X reduction in batch size for a 5X increase in token count (compared to baseline count of 10). + +![Token-count-batch-size](/assets/images/zero_inference_token_count_batch_size.png){: .align-center} + +Next, we measure the impact on generation throughput using four V100-32GB GPUs. The results are presented in Table 6 for CPU offload, and Table 7 for NVMe-Offload. We observe an impact that is consistent across models and offload memory, which is that increasing the number of output tokens reduces throughput proportionally to batch size reduction. These results also demonstrate the importance of large batch sizes to the performance of ZeRO-Inference. + +![Token-count-cpu-throughput](/assets/images/zero_inference_token_count_cpu_throughput.png){: .align-center} + +![Token-count-nvme-throughput](/assets/images/zero_inference_token_count_nvme_throughput.png){: .align-center} + +## Using ZeRO-Inference +We briefly discuss how users can determine when ZeRO-Inference is suitable for their application and how to enable ZeRO-Inference in DeepSpeed. + +### When to use ZeRO-Inference +ZeRO-Inference is designed for inference applications that require GPU acceleration but lack sufficient GPU memory to host the model. Also, ZeRO-Inference is optimized for inference applications that are **throughput-oriented** and allow **large batch sizes**. Alternative techniques, such as [Accelerate](https://github.com/huggingface/accelerate), [DeepSpeed-Inference](https://www.deepspeed.ai/inference/), and [DeepSpeed-MII](https://github.com/microsoft/deepspeed-mii) that fit the entire model into GPU memory, possibly using multiple GPUs, are more suitable for inference applications that are latency sensitive or have small batch sizes. + +### How to use ZeRO-Inference +ZeRO-Inference is available in the DeepSpeed library versions >= 0.6.6. Integrating ZeRO-Inference into token generation pipelines, such as [Hugging Face generate](https://huggingface.co/docs/transformers/main_classes/text_generation), requires updating the DeepSpeed configuration to set [ZeRO optimization](https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training) to stage 3 and [parameter offloading](https://www.deepspeed.ai/docs/config-json/#parameter-offloading) to CPU or NVMe. + +Below is a configuration snippet for enabling ZeRO-Inference with offloading to CPU memory. +```json + "zero_optimization": { + "stage": 3, + "offload_param": { + "device": "cpu", + ... + }, + ... + } +``` + +Below is a configuration snippet for offloading to a NVMe device mounted on "/local_nvme". +```json + "zero_optimization": { + "stage": 3, + "offload_param": { + "device": "nvme", + "nvme_path": "/local_nvme", + ... + }, + ... + } +``` + + +## Conclusion +Recent advances in AI technology have primarily come from extreme scaling of model sizes. However, extreme model scaling has also made the hardware cost of training and inferencing prohibitive for all but the largest organizations, severely restricting access to AI innovations. To help democratize AI, we developed ZeRO-Inference, a technology that enables inference computations of massive models on as few as a single GPU. ZeRO-Inference reduces the GPU cost of SOTA model inference by hosting the model on CPU or NVMe memory and streaming the model layers into GPU memory for inference computation. ZeRO-Inference complements the democratization efforts of large organizations that publicly release pre-trained SOTA models by ensuring that inference computation of these models is affordable for most users (e.g., students, hobbyists, model scientists, etc.). + + +## Acknowledgement +The DeepSpeed team would like to acknowledge Stas Bekman for previewing this blog and providing valuable feedback. diff --git a/docs/_posts/2022-10-11-mii.md b/docs/_posts/2022-10-11-mii.md new file mode 100644 index 0000000..8a39731 --- /dev/null +++ b/docs/_posts/2022-10-11-mii.md @@ -0,0 +1,216 @@ +--- +title: "DeepSpeed-MII: instant speedup on 24,000+ open-source DL models with up to 40x cheaper inference" +excerpt: "" +date: 2022-10-11 00:09:00 +tags: inference +--- + +[ ![Text Generation Models](/assets/images/mii/hero.png) ](/assets/images/mii/hero.png){: .align-center} + +The Deep Learning (DL) open-source community has seen tremendous growth in the last few months. Incredibly powerful text generation models such as the Bloom 176B, or image generation models such as Stable Diffusion are now available to anyone with access to a handful or even a single GPU through platforms such as Hugging Face. While open-sourcing has democratized access to AI capabilities, their application is still restricted by two critical factors: 1) inference latency and 2) cost. + +There has been significant progress in system optimizations for DL model inference that can drastically reduce both latency and cost, but those are not easily accessible. The main reason for this limited accessibility is that the DL model inference landscape is diverse with models varying in size, architecture, system performance characteristics, hardware requirements, etc. Identifying the appropriate set of system optimizations applicable to a given model and applying them correctly is often beyond the scope of most data scientists, making low latency and low-cost inference mostly inaccessible. + +[DeepSpeed Model Implementations for Inference (MII)](https://github.com/microsoft/DeepSpeed-MII) is a new open-source python library from DeepSpeed, aimed towards making low-latency, low-cost inference of powerful models not only feasible but also easily accessible. + +* MII offers access to highly optimized implementations of **thousands of widely used DL models.** +* MII supported models achieve significantly lower latency and cost compared to their original implementation. + + MII reduces the **latency of Big-Science Bloom 176B model by 5.7x**, while reducing the **cost by over 40x as shown in *Figures 2 (left) and 8***. + + MII reduces the latency and cost of deploying **Stable Diffusion by 1.9x as shown in *Figure 2 (right)***. +* To enable low latency/cost inference, MII leverages an extensive set of optimizations from DeepSpeed-Inference such as *deepfusion* for transformers, automated *tensor-slicing* for multi-GPU inference, on-the-fly quantization with *ZeroQuant*, and several others (see below for more details). +* With state-of-the-art performance, MII supports low-cost deployment of these models both on-premises and on Azure via AML with just a **few lines of codes**. + +# How does MII work? + +[ ![Text Generation Models](/assets/images/mii/mii-arch.png) ](/assets/images/mii/mii-arch.png) + +*Figure 1: MII Architecture, showing how MII automatically optimizes OSS models using DS-Inference before deploying them on-premises using GRPC, or on Microsoft Azure using AML Inference.* + + +Under-the-hood MII is powered by [DeepSpeed-Inference](https://arxiv.org/abs/2207.00032). Based on the model type, model size, batch size, and available hardware resources, MII automatically applies the appropriate set of system optimizations from DeepSpeed-Inference to minimize latency and maximize throughput. It does so by using one of many pre-specified model injection policies, that allows MII and DeepSpeed-Inference to identify the underlying PyTorch model architecture and replace it with an optimized implementation (see *Figure 1*). In doing so, MII makes the expansive set of optimizations in DeepSpeed-Inference automatically available for thousands of popular models that it supports. + +# Supported Models and Tasks + +MII supports a growing list of tasks such as text generation, question-answering, text classification, etc, across thousands of transformer models available through multiple open-sourced model repositories such as Hugging Face, FairSeq, EluetherAI, etc. It supports dense models based on BERT, RoBERTa, GPT, OPT, and BLOOM architectures ranging from a few hundred million parameters in size to hundreds of billions of parameters in size. At the same time, it supports recent image generation models such as Stable Diffusion. + +See the MII GitHub repo for an up-to-date list of [models and tasks supported by MII](https://github.com/microsoft/deepspeed-mii#supported-models-and-tasks). + +# Inference Optimizations with MII + +Here we provide a summary of the expansive set of optimizations from DeepSpeed-inference made available via MII. For more details, please refer to \[[1](https://arxiv.org/abs/2207.00032), [2](https://arxiv.org/abs/2206.01861)\]: + +**DeepFusion for Transformers:** For transformer-based models such as Bert, Roberta, GPT-2, and GPT-J, MII leverages the transformer kernels in DeepSpeed-Inference that are optimized to achieve low latency at small batch sizes and high throughput at large batch sizes using DeepFusion. + +**Multi-GPU Inference with Tensor-Slicing:** For massive models such as Bloom 176B, MII automatically enables tensor-parallelism within a node to leverage aggregate memory bandwidth and compute across multiple GPUs to achieve the lowest latency and throughput compared to anything else that is currently available. + +**INT8 Inference with ZeroQuant:** For massive models with tens or hundreds of billions of parameters, MII supports INT8 Inference with ZeroQuant. Using this feature not only reduces the memory footprint and the number of GPUs required for inference but also increases the inference throughput by supporting larger batch sizes and using INT8 compute, thus lowering cost compared to FP16. + +**ZeRO-Inference for Resource Constrained Systems:** Models such as Bloom 176B, require over 176 GB of memory to just fit the model even with INT8 support. In the absence of the aggregate GPU memory across multiple GPUs required to deploy such models, MII enables [ZeRO-Inference](https://www.deepspeed.ai/2022/09/09/zero-inference.html) that can leverage the system CPU memory to deploy these massive models with a single GPU with limited memory. + +**Compiler Optimizations:** When applicable, MII automatically applies compiler-based optimizations via [TorchScript](https://pytorch.org/docs/stable/jit.html), [nvFuser](https://pytorch.org/blog/introducing-nvfuser-a-deep-learning-compiler-for-pytorch/), and [CUDA graph](https://developer.nvidia.com/blog/cuda-graphs/), in addition to the above optimizations, to further lower latency and improve throughput. + +# MII-Public and MII-Azure + +MII can work with two variations of DeepSpeed-Inference. The first, referred to as ds-public, contains most of the optimizations discussed above and is also available via our open-source DeepSpeed library. The second referred to as ds-azure, offers tighter integration with Azure, and is available via MII to all Microsoft Azure customers. We refer to MII running the two DeepSpeed-Inference variants as MII-Public and MII-Azure, respectively. + +Both MII-Public and MII-Azure offer significant latency and cost reduction compared to open-sourced PyTorch implementation (Baseline). However for certain generative workloads, they can have differentiated performance: MII-Azure provides further improvements beyond MII-Public. We quantify the latency and cost reduction for both variations in the next section. + +# Quantifying Latency and Cost Reduction + +Inference workloads can be either latency critical, where the primary objective is to minimize latency, or cost sensitive, where the primary objective is to minimize cost. In this section, we quantify the benefits of using MII for both latency-critical and cost-sensitive scenarios. + +## Latency Critical Scenarios + +For latency-critical scenarios, where a small batch size of 1 is often used, MII can reduce the latency by up to 6x for a wide range of open-source models, across multiple tasks. More specifically, we show model latency reduction of [^overhead_details]: + +1. Up to 5.7x for multi-GPU inference for text generation using massive models such as Big Science Bloom, Facebook OPT, and EluetherAI NeoX (*Figure 2 (left)*) + +2. Up to 1.9x for image generation tasks model using Stable Diffusion (*Figure 2 (right)*) + +3. Up to 3x for relatively smaller text generation models (up to 7B parameters) based on OPT, BLOOM, and GPT architectures, running on a single GPU (*Figures 3 and 4*) + +4. Up to 9x for various text representation tasks like fill-mask, text classification, question answering, and token classification using RoBERTa- and BERT- based models (*Figures 5 and 6*). + +[ ![multi gpu latency](/assets/images/mii/llm-latency-sd-latency.png) ](/assets/images/mii/llm-latency-sd-latency-zoom.png){: .align-center} +*Figure 2: (left) Best achievable latency for large models. MII-Azure (int8) offers 5.7X lower latency compared to Baseline for Bloom-176B. (right) Stable Diffusion text to image generation latency comparison.* + + + +[ ![OPT and BLOOM Models](/assets/images/mii/opt-bloom.png) ](/assets/images/mii/opt-bloom.png){: .align-center} +*Figure 3: Latency comparison for OPT and BLOOM models. MII-Azure is up to 2.8x faster than baseline.* + +[ ![GPT Models](/assets/images/mii/gpt.png) ](/assets/images/mii/gpt.png){: .align-center} +*Figure 4: Latency comparison for GPT models. MII-Azure is up to 3x faster than baseline.* + +[ ![Roberta Models](/assets/images/mii/roberta.png) ](/assets/images/mii/roberta.png){: .align-center} +*Figure 5: Latency comparison for RoBERTa models. MII offers up to 9x lower model latency and up to 3x lower end-to-end latency than baseline on several tasks and RoBERTa variants [^overhead_details].* + +[ ![Bert Models](/assets/images/mii/bert.png) ](/assets/images/mii/bert.png){: .align-center} +*Figure 6: Latency comparison for BERT models. MII offers up to 8.9x lower model latency and up to 4.5x end-to-end latency across several tasks and BERT variants[^overhead_details].* + +[^overhead_details]: The end-to-end latency of an inference workload is comprised of two components: i) actual model execution, and ii) pre-/post-processing before and after the model execution. MII optimizes the actual model execution but leaves the pre-/post-processing pipeline for future optimizations. We notice that text representation tasks have significant pre-/post-processing overhead (*Figures G and H*). We plan to address those in a future update. + +## Cost Sensitive Scenarios + +MII can significantly reduce the inference cost of very expensive language models like Bloom, OPT, etc. To get the lowest cost, we use a large batch size that maximizes throughput for both baseline and MII. Here we look at the cost reduction from MII using two different metrics: i) tokens generated per second per GPU, and ii) dollars per million tokens generated. + +*Figures 7 and 8* show that MII-Public offers over 10x throughput improvement and cost reduction compared to the baseline, respectively. Furthermore, MII-Azure offers over 30x improvement in throughput and cost compared to the baseline. + +[ ![tput large models](/assets/images/mii/tput-llms.png) ](/assets/images/mii/tput-llms.png){: .align-center} +*Figure 7: Throughput comparison per A100-80GB GPU for large models. MII-Public offers over 15x throughput improvement while MII-Azure offers over 40x throughput improvement.* + +[ ![azure cost](/assets/images/mii/azure-cost.png) ](/assets/images/mii/azure-cost.png){: .align-center} +*Figure 8: Cost of generating 1 million tokens on Azure with different model types. MII-Azure reduces the cost of generation by over 40x.* + +# Deployment Options + +MII supported models can be deployed in two different ways as shown in *Figure 1* with just a few lines of code. + +## MII-Public Deployment + +MII-Public can be deployed on-premises or on any cloud offering. MII creates a lightweight GRPC server to support this form of deployment and provides a GRPC inference endpoint for queries. The code below shows how a supported model can be deployed with MII-Public Deployment. + +```python +import mii +mii.deploy(task="text-to-image", + model="CompVis/stable-diffusion-v1-4", + deployment_name="sd-deployment") +``` + +## MII-Azure Deployment + +MII supports deployment on Azure via AML Inference. To enable this, MII generates AML deployment assets for a given model that can be deployed using the [Azure-CLI](https://learn.microsoft.com/en-us/cli/azure/what-is-azure-cli), as shown in the code below. Furthermore, deploying on Azure, allows MII to leverage DeepSpeed-Azure as its optimization backend, which offers better latency and cost reduction than DeepSpeed-Public. + +```python +import mii +mii.deploy(task="text-to-image", + model="CompVis/stable-diffusion-v1-4", + deployment_name="sd-deployment", + deployment_type=DeploymentType.AML) +``` + +To learn more about these deployment options and get started with MII, please the [MII getting started guide](https://github.com/microsoft/deepspeed-mii#getting-started-with-mii). + +# Concluding Remarks + +We are very excited to share MII with the community and improve it with your feedback. We will continue to add support for more models in MII as well as enhance both MII-Public and MII-Azure for both on-premise and Azure users. Our hope is that while open sourcing has made powerful AI capabilities accessible to many, MII will allow for a wider infusion of these capabilities into a diverse set of applications and product offerings by instantly reducing the latency and cost of inferencing. + +# Appendix + +The table below shows the mapping between model aliases used in *Figures 3, 4, 5, and 6* and real model names. + +| Alias | Model Name | +| --- | --- | +| text-gen-m1 | [sberbank-ai/rugpt3large_based_on_gpt2](https://huggingface.co/sberbank-ai/rugpt3large_based_on_gpt2) | +| text-gen-m2 | [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) | +| text-gen-m3 | [geralt/MechDistilGPT2](https://huggingface.co/geralt/MechDistilGPT2) | +| text-gen-m4 | [mrm8488/distilgpt2-finetuned-wsb-tweets](https://huggingface.co/mrm8488/distilgpt2-finetuned-wsb-tweets) | +| text-gen-m5 | [Norod78/hebrew-bad_wiki-gpt_neo-tiny](https://huggingface.co/Norod78/hebrew-bad_wiki-gpt_neo-tiny) | +| text-gen-m6 | [shibing624/code-autocomplete-distilgpt2-python](https://huggingface.co/shibing624/code-autocomplete-distilgpt2-python) | +| text-gen-m7 | [mrm8488/diltilgpt2-finetuned-bookcopus-10](https://huggingface.co/mrm8488/diltilgpt2-finetuned-bookcopus-10) | +| bert-q&a-m1 | [bert-large-uncased-whole-word-masking-finetuned-squad](https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad) | +| bert-q&a-m2 | [deepset/bert-large-uncased-whole-word-masking-squad2](https://huggingface.co/deepset/bert-large-uncased-whole-word-masking-squad2) | +| bert-q&a-m3 | [nyust-eb210/braslab-bert-drcd-384](https://huggingface.co/nyust-eb210/braslab-bert-drcd-384) | +| bert-q&a-m4 | [deepset/minilm-uncased-squad2](https://huggingface.co/deepset/minilm-uncased-squad2) | +| bert-token-class-m1 | [dslim/bert-large-NER](https://huggingface.co/dslim/bert-large-NER) | +| bert-token-class-m2 | [dbmdz/bert-large-cased-finetuned-conll03-english](https://huggingface.co/dbmdz/bert-large-cased-finetuned-conll03-english) | +| bert-token-class-m3 | [dslim/bert-base-NER](https://huggingface.co/dslim/bert-base-NER) | +| bert-token-class-m4 | [CAMeL-Lab/bert-base-arabic-camelbert-mix-ner](https://huggingface.co/CAMeL-Lab/bert-base-arabic-camelbert-mix-ner) | +| bert-fill-mask-m1 | [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) | +| bert-fill-mask-m2 | [bert-base-multilingual-uncased](https://huggingface.co/bert-base-multilingual-uncased) | +| bert-fill-mask-m3 | [wietsedv/bert-base-dutch-cased](https://huggingface.co/wietsedv/bert-base-dutch-cased) | +| bert-fill-mask-m4 | [nlpaueb/bert-base-greek-uncased-v1](https://huggingface.co/nlpaueb/bert-base-greek-uncased-v1) | +| bert-fill-mask-m5 | [dbmdz/bert-base-italian-xxl-cased](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) | +| bert-fill-mask-m6 | [aubmindlab/bert-base-arabertv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) | +| bert-fill-mask-m7 | [dccuchile/bert-base-spanish-wwm-uncased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-uncased) | +| bert-fill-mask-m8 | [bert-base-german-cased](https://huggingface.co/bert-base-german-cased) | +| bert-fill-mask-m9 | [bert-base-uncased](https://huggingface.co/bert-base-uncased) | +| bert-fill-mask-m10 | [dbmdz/bert-base-german-cased](https://huggingface.co/dbmdz/bert-base-german-cased) | +| bert-fill-mask-m11 | [nlpaueb/legal-bert-base-uncased](https://huggingface.co/nlpaueb/legal-bert-base-uncased) | +| bert-fill-mask-m12 | [KB/bert-base-swedish-cased](https://huggingface.co/KB/bert-base-swedish-cased) | +| bert-fill-mask-m13 | [indolem/indobertweet-base-uncased](https://huggingface.co/indolem/indobertweet-base-uncased) | +| bert-fill-mask-m14 | [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) | +| bert-fill-mask-m15 | [asafaya/bert-mini-arabic](https://huggingface.co/asafaya/bert-mini-arabic) | +| bert-text-class-m1 | [DTAI-KULeuven/mbert-corona-tweets-belgium-topics](https://huggingface.co/DTAI-KULeuven/mbert-corona-tweets-belgium-topics) | +| bert-text-class-m2 | [avichr/heBERT_sentiment_analysis](https://huggingface.co/avichr/heBERT_sentiment_analysis) | +| bert-text-class-m3 | [finiteautomata/beto-sentiment-analysis](https://huggingface.co/finiteautomata/beto-sentiment-analysis) | +| bert-text-class-m4 | [ProsusAI/finbert](https://huggingface.co/ProsusAI/finbert) | +| bert-text-class-m5 | [cross-encoder/ms-marco-MiniLM-L-12-v2](https://huggingface.co/cross-encoder/ms-marco-MiniLM-L-12-v2) | +| bert-text-class-m6 | [nlptown/bert-base-multilingual-uncased-sentiment](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) | +| bert-text-class-m7 | [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased) | +| bert-text-class-m8 | [cross-encoder/ms-marco-MiniLM-L-6-v2](https://huggingface.co/cross-encoder/ms-marco-MiniLM-L-6-v2) | +| fill-mask-m1 | [vinai/bertweet-large](https://huggingface.co/vinai/bertweet-large) | +| fill-mask-m2 | [klue/roberta-large](https://huggingface.co/klue/roberta-large) | +| fill-mask-m3 | [sberbank-ai/ruRoberta-large](https://huggingface.co/sberbank-ai/ruRoberta-large) | +| q&a-m1 | [deepset/roberta-large-squad2](https://huggingface.co/deepset/roberta-large-squad2) | +| token-class-m1 | [Jean-Baptiste/roberta-large-ner-english](https://huggingface.co/Jean-Baptiste/roberta-large-ner-english) | +| text-class-m1 | [cross-encoder/stsb-roberta-large](https://huggingface.co/cross-encoder/stsb-roberta-large) | +| text-class-m2 | [siebert/sentiment-roberta-large-english](https://huggingface.co/siebert/sentiment-roberta-large-english) | +| text-class-m3 | [roberta-large-mnli](https://huggingface.co/roberta-large-mnli) | +| fill-mask-m4 | [vinai/bertweet-base](https://huggingface.co/vinai/bertweet-base) | +| fill-mask-m5 | [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) | +| fill-mask-m6 | [microsoft/graphcodebert-base](https://huggingface.co/microsoft/graphcodebert-base) | +| fill-mask-m7 | [vinai/bertweet-covid19-base-uncased](https://huggingface.co/vinai/bertweet-covid19-base-uncased) | +| fill-mask-m8 | [uklfr/gottbert-base](https://huggingface.co/uklfr/gottbert-base) | +| fill-mask-m9 | [cardiffnlp/twitter-roberta-base](https://huggingface.co/cardiffnlp/twitter-roberta-base) | +| fill-mask-m10 | [microsoft/codebert-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm) | +| fill-mask-m11 | [pdelobelle/robbert-v2-dutch-base](https://huggingface.co/pdelobelle/robbert-v2-dutch-base) | +| fill-mask-m12 | [ufal/robeczech-base](https://huggingface.co/ufal/robeczech-base) | +| q&a-m2 | [Rakib/roberta-base-on-cuad](https://huggingface.co/Rakib/roberta-base-on-cuad) | +| q&a-m3 | [thatdramebaazguy/roberta-base-squad](https://huggingface.co/thatdramebaazguy/roberta-base-squad) | +| text-class-m4 | [roberta-base-openai-detector](https://huggingface.co/roberta-base-openai-detector) | +| text-class-m5 | [pysentimiento/robertuito-emotion-analysis](https://huggingface.co/pysentimiento/robertuito-emotion-analysis) | +| text-class-m6 | [cardiffnlp/twitter-roberta-base-sentiment](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment) | +| text-class-m7 | [cardiffnlp/twitter-roberta-base-sentiment-latest](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment-latest) | +| q&a-m4 | [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) | +| text-class-m8 | [textattack/roberta-base-SST-2](https://huggingface.co/textattack/roberta-base-SST-2) | +| text-class-m9 | [cardiffnlp/twitter-roberta-base-emotion](https://huggingface.co/cardiffnlp/twitter-roberta-base-emotion) | +| text-class-m10 | [pysentimiento/robertuito-sentiment-analysis](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis) | +| text-class-m11 | [finiteautomata/bertweet-base-sentiment-analysis](https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis) | +| fill-mask-m13 | [huggingface/CodeBERTa-small-v1](https://huggingface.co/huggingface/CodeBERTa-small-v1) | +| q&a-m5 | [deepset/tinyroberta-squad2](https://huggingface.co/deepset/tinyroberta-squad2) | +| text-class-m12 | [j-hartmann/emotion-english-distilroberta-base](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base) | diff --git a/docs/_posts/2022-12-12-data-efficiency.md b/docs/_posts/2022-12-12-data-efficiency.md new file mode 100644 index 0000000..3b6adb4 --- /dev/null +++ b/docs/_posts/2022-12-12-data-efficiency.md @@ -0,0 +1,144 @@ +--- +title: "DeepSpeed Data Efficiency: A composable library that makes better use of data, increases training efficiency, and improves model quality" +excerpt: "" +date: 2022-12-12 00:09:00 +tags: training +--- + +[ ![DeepSpeed Data Efficiency](/assets/images/data_efficiency/data_efficiecy_fig0.png) ](/assets/images/data_efficiency/data_efficiecy_fig0.png){: .align-center} + +Recently, large-scale deep learning models are empowering us to achieve more in many ways, such as [improving programming efficiency by code generation](https://github.com/features/copilot) and [providing art inspiration by text-to-image generation](https://www.microsoft.com/en-us/microsoft-365/blog/2022/10/12/new-tools-from-microsoft-365-edge-and-bing-bridge-the-gap-between-productivity-and-creativity/). To enable these services and keep improving the quality, deep learning model architecture evolves rapidly, and the model size is also growing at a tremendous speed. For example, from GPT to GPT-3 the model size increased 1500x in 2 years. The increasing model size leads to unprecedented training cost, making it challenging for many AI practitioners to train their own models. On the other hand, a less-emphasized perspective is that **data scale is actually increasing at a similar speed as model scale, and the training cost is proportional to both of them.** In Figure 1 below we plot the model and data scales of several representative language models in the last 5 years. From the oldest model on the left to the newest models on the right, both the model and data scales increase at similar speed. This demonstrates the importance of improving data efficiency: achieve same model quality with less data and reduced training cost, or achieve better model quality with the same amount of data and similar training cost. + +[ ![Model and data scales](/assets/images/data_efficiency/data_efficiecy_fig1.png) ](/assets/images/data_efficiency/data_efficiecy_fig1.png){: .align-center} + +*Figure 1: Model scale (number of parameters) and data scale (number of tokens consumed during training) of representative language models in the last 5 years.* + +There are two popular research directions among existing data efficiency techniques: Data sampling techniques aim to improve the convergence speed by sampling the most suitable next data batch from the whole data pool; Data routing techniques aim to reduce the computation by routing each data to only a subset of the model components. These techniques improve data and training efficiency, but existing solutions on them have limitations on **extensibility, flexibility, and composability.** They are commonly designed for specific training tasks, making them hard to be extended with customized strategies and making them less flexible to be applied on diverse workloads from different users. Furthermore, different techniques are implemented separately, making it challenging to compose multiple solutions to further improve data and training efficiency. + +To address these challenges, we, the DeepSpeed team as part of Microsoft’s [AI at Scale](https://www.microsoft.com/en-us/research/project/ai-at-scale/) initiative, are proud to announce **DeepSpeed Data Efficiency Library** – a composable framework that makes better use of data, increases training efficiency, and improves model quality. DeepSpeed Data Efficiency takes extensibility, flexibility, and composability into consideration, and it specifically demonstrates the following innovations: + +**Efficient data sampling via curriculum learning.** Curriculum learning (CL) improves data efficiency by sampling from easier data. We present a general curriculum learning library which enables users to employ curriculum learning to their models at **maximum extensibility**: users can easily analyze, index, and sample their training data based on various customizable strategies. Using this library, we were able to explore different CL strategies for GPT-3 and BERT pretraining and identify the best solution that provides up to **1.5x data saving** while still maintaining similar model quality. + +**Efficient data routing via random layerwise token dropping.** We present a novel data routing technique called random layerwise token dropping (random-LTD) to skip the computation of a subset of the input tokens at all middle layers. Random-LTD employs a simple yet effective routing strategy and requires **minimal model architecture change.** It is **flexible** to apply random-LTD to various tasks (GPT-3/BERT pretraining and GPT/ViT finetuning), and we achieve great data efficiency improvement (up to **1.5x data saving** while still maintaining the model quality). + +**Seamlessly composing multiple methods.** The proposed DeepSpeed Data Efficiency framework seamlessly composes the curriculum learning and random-LTD techniques, and only requires minimal changes on the user code side. Furthermore, by composing both methods we can achieve even better data and training efficiency: for GPT-3 1.3B pretraining, we achieve **2x data and 2x time savings** together with better or similar model quality compared to the baseline training. When using the same amount of data, our approach further improves the model quality over the baseline. Users can also extend and contribute to the library by adding additional data efficiency techniques to compose together. + +Each of these advances is explored further in the blog post below. For more about the technical details, please read our papers, “[Random-LTD: Random and Layerwise Token Dropping Brings Efficient Training for Large-scale Transformers](https://arxiv.org/abs/2211.11586)” which describes the random-LTD technique, and “[DeepSpeed Data Efficiency: Improving Deep Learning Model Quality and Training Efficiency via Efficient Data Sampling and Routing](https://arxiv.org/abs/2212.03597)” which describes the curriculum learning technique and overall DeepSpeed Data Efficiency framework. + +# Efficient Data Sampling via Curriculum Learning + +## Motivation + +Curriculum learning aims to improve training convergence speed by presenting relatively easier or simpler examples earlier during training. Building a curriculum learning solution usually requires two components: the difficulty metric (i.e., how to quantify the difficulty of each data sample) and the pacing function (i.e., how to decide the curriculum difficulty range when sampling next training data batch). Curriculum learning has been successfully applied to various training tasks, and last year we also released a specific curriculum learning technique (sequence length warmup) for GPT-style model pretraining (see technical details in our paper “[The Stability-Efficiency Dilemma: Investigating Sequence Length Warmup for Training GPT Models](https://openreview.net/forum?id=JpZ5du_Kdh)” published in NeurIPS 2022). However, one common limitation among existing works is that there does not exist a generalized and extensible curriculum learning library, which allows practitioners to easily apply custom curriculum difficulty metrics, the combination of metrics, and pacing functions. + +## Design + +To solve the limitation of existing solutions, we design and implement a general curriculum learning library emphasizing the extensibility. It consists of three components as shown in Figure 2 below (top part). First, we use a data analyzer to perform the offline CPU-only data analysis which indexes the whole data pool based on any difficulty metric such as the sequence length, the vocabulary rarity, or anything defined by user. Next, during training, the curriculum scheduler determines the difficulty threshold for the current step based on a pacing function such as linear, rooted, or any strategy provided by users. Then the data sampler will sample the data with desired difficulty from the indexed data pool. Overall, this general implementation would enable users to explore curriculum learning on their workloads with maximum customizability (more technical details in [our DeepSpeed Data Efficiency paper](https://arxiv.org/abs/2212.03597)). + +[ ![DeepSpeed Data Efficiency framework](/assets/images/data_efficiency/data_efficiecy_fig2.png) ](/assets/images/data_efficiency/data_efficiecy_fig2.png){: .align-center} + +*Figure 2: Design of the DeepSpeed Data Efficiency framework.* + +## Evaluation Results + +Using this general and extensible curriculum learning solution for GPT-3 and BERT-Large model pretraining, we are able to easily analyze and index the huge training data based on up to 7 difficulty metrics and enable better data and training efficiency. For GPT-3 pretraining, our solution with the best difficulty metric (combination of truncation-based sequence length and vocabulary rarity) achieves 1.5x data and training cost saving while still maintaining model quality as baseline (Table 1 Case (8) vs. (1)). For BERT-Large pretraining, our solution with the best difficulty metric (vocabulary rarity) achieves 1.5x saving while still maintaining model quality (Table 2 Case (8) vs. (1)). On the other hand, our solutions can further improve model quality when using the same amount of data as baseline (Table 1 Case (2) to (6), Table 2 Case (2) to (6)). + +| **Case** | **Pretrain data** | **Avg 0-shot accuracy** | **Avg 10-shot accuracy** | +| ---------- |---------- |---------- |---------- | +| (1) Baseline | 300B | 42.5 | 44.0 | +| (2) CL truncation-based sequence length | 300B | 43.4 | 44.8 | +| (3) CL reshape-based sequence length | 300B | 43.0 | 44.5 | +| (4) CL vocabulary rarity | 300B | 42.3 | 44.5 | +| (5) CL combining (2) and (4) | 300B | **43.6** | **44.9** | +| (6) CL combining (3) and (4) | 300B | 43.0 | 44.4 | +| (7) Baseline | 200B (1.5x) | 41.9 | 44.0 | +| (8) CL combining (2) and (4) | **200B (1.5x)** | 42.7 | 44.5 | + +*Table 1: GPT-3 1.3B pretraining data consumption and average evaluation accuracy on 19 tasks.* + +| **Case** | **Pretrain data** | **GLUE finetune score** | +| ---------- |---------- |---------- | +| (1) Baseline | 1049B | 87.29 | +| (2) CL truncation-based sequence length | 1049B | 87.31 | +| (3) CL reorder-based sequence length | 1049B | 87.48 | +| (4) CL vocabulary rarity | 1049B | 87.36 | +| (5) CL combining (2) and (4) | 1049B | **87.60** | +| (6) CL combining (3) and (4) | 1049B | 87.06 | +| (7) Baseline | 703B (1.5x) | 87.19 | +| (8) CL combining (2) and (4) | **703B (1.5x)** | 87.29 | + +*Table 2: BERT-Large pretraining data consumption and average GLUE finetuning score on 8 tasks.* + +# Efficient Data Routing via Random Layerwise Token Dropping + +## Motivation + +Standard data routing usually feeds the full images/sequences into all layers of a model. However, this process may not be optimal for training efficiency since some parts of an image (or words of a sentence) do not require a frequent feature update. As such, the token dropping method has been proposed, which is illustrated in Figure 3 (b) below, to skip the compute of some tokens/words (i.e., G-2 tokens in Figure 3 (b)) of a sentence in order to save the compute cost. + +Although existing methods show promising results, they also exhibit several caveats: (1) most works solely focus on BERT (encoder-only on text data) pretraining and do not include decoder pretraining and/or other modalities (e.g., images); (2) the ability to skip layers is limited, which bounds the total amount of compute saving. By analyzing existing methods, we found out the potential main issue that limits their skipping and coverage abilities is the loss of attention mechanism for G-2 tokens for all skipped layers, since multi-head attention focuses on different tokens at different layer depths and the attention map aligns with the dependency relation most strongly in the middle of transformer architectures. + +## Design + +To resolve this main issue, we propose random-LTD, a **random** and **layerwise** token dropping mechanism, which processes only a subset of tokens among the entire data batch for all middle layers in order to save compute cost (see more details in [our Random-LTD paper](https://arxiv.org/abs/2211.11586)). As such, each token rarely bypasses all middle layers and its dependency with other tokens can be captured by the model. The illustration of random-LTD compared to baseline is shown in Figure 3 below, where random-LTD splits the input tokens into two groups and only the first group involves the compute. + +[ ![random-LTD](/assets/images/data_efficiency/data_efficiecy_fig3.png) ](/assets/images/data_efficiency/data_efficiecy_fig3.png){: .align-center} + +*Figure 3: Comparison between baseline, existing token dropping methods, and random-LTD. Note that for random-LTD, only part of the inputs (Group 1) is used for Layer i.* + +Random-LTD is simple yet very effective. Particularly, compared to other existing token dropping methods, random-LTD (1) does a purely random selection for each layer for two different groups, as such we do not require any expert design for the selection criterion; (2) is able to apply to all middle layers to achieve better saving ratio; (3) demonstrates great generalizability for both encoder and decoder models; and (4) is easy to use without much modeling change. These advantages enable maximum flexibility when applying random-LTD to various workloads. + +## Evaluation Results + +Thanks to its great flexibility, we were able to apply random-LTD method to broader applications, including BERT and GPT pretraining as well as ViT and GPT finetuning tasks. For all cases, random-LTD achieves similar model quality as baseline while using less data, and/or achieve better model quality while using the same amount of data (Table 3 to 6). For GPT-3 and BERT-Large pretraining, random-LTD achieves 1.5-2x data saving while still maintaining the same model quality. For GPT-3 we also tested random-LTD with full data which further improves the model quality compared to baseline. + +| **Case** | **Pretrain data** | **Avg 0-shot accuracy** | +| ---------- |---------- |---------- | +| (1) Baseline | 300B | 42.5 | +| (2) Random-LTD | 300B | **43.7** | +| (3) Random-LTD | **200B (1.5x)** | 42.5 | + +*Table 3: GPT-3 1.3B pretraining data consumption and average evaluation accuracy on 19 tasks.* + +| **Case** | **Pretrain data** | **GLUE finetune score** | +| ---------- |---------- |---------- | +| (1) Baseline | 1049B | 87.29 | +| (2) Random-LTD | **524B (2x)** | **87.32** | + +*Table 4: BERT-Large pretraining data consumption and average GLUE finetuning score on 8 tasks.* + +| **Case** | **Train data** | **ImageNet Top-1 Acc** | +| ---------- |---------- |---------- | +| (1) Baseline | 100% | 84.65 | +| (2) Random-LTD | **77.7% (1.3x)** | **84.70** | + +*Table 5: Finetuning result of ViT on ImageNet.* + +| **Case** | **Train data** | **PTB PPL** | +| ---------- |---------- |---------- | +| (1) Baseline | 100% | 16.11 | +| (2) Random-LTD | 100% | **15.9** | + +*Table 6: GPT-2 350M finetuning result on the PTB task.* + +# Composing Data Efficiency Techniques to Achieve More + +The curriculum learning and random-LTD techniques are complementary. Inside DeepSpeed Data Efficiency framework, we seamlessly compose the two techniques as shown in Figure 2 above, where curriculum learning helps to sample the next data batch and random-LTD helps to decide how to route each sampled data inside the model. DeepSpeed Data Efficiency solves several complexities when composing the two techniques so that users can easily apply each technique or both to their training pipeline. The composability of DeepSpeed Data Efficiency also applies to data sampling and routing techniques in general, so that it provides a platform to implement and compose additional data efficiency techniques. + +The composed DeepSpeed Data Efficiency solution leverages both data efficiency techniques and achieves even better data and training efficiency. Take the GPT-3 pretraining task as an example, composing CL and random-LTD, with 100% data, leads to the best model quality in our experiments (Table 7 Case (1) to (4)). When pretraining with 50% data, the baseline training results in worse zero-shot and 10-shot evaluation accuracy, and using either CL or random-LTD can only recover part of the 10-shot accuracy loss. On the other hand, the composed data efficiency solution achieves the same or better accuracy results as baseline with 100% data, demonstrating a 2x data and 2x time saving (Case (5) to (8)). Similar benefit such as 2x data saving was also observed when applying our solution to BERT pretraining. + +| **Case** | **Pretrain data** | **Pretrain time (on 64 V100)** | **Avg 0-shot accuracy** | **Avg 10-shot accuracy** | +| ---------- |---------- |---------- |---------- |---------- | +| (1) Baseline | 300B | 260hr | 42.5 | 44.0 | +| (2) CL best metric | 300B | 259hr | 43.6 | 44.9 | +| (3) random-LTD | 300B | 263hr | 43.7 | 44.9 | +| (4) CL + random-LTD | 300B | 260hr | **43.8** | **45.1** | +| (5) Baseline | 150B (2x) | 130hr (2x) | 42.0 | 42.7 | +| (6) CL best metric | 150B (2x) | 129hr (2x) | 42.6 | 43.7 | +| (7) random-LTD | 150B (2x) | 131hr (2x) | 42.7 | 43.5 | +| (8) CL + random-LTD | **150B (2x)** | **130hr (2x)** | 42.8 | 44.0 | + +*Table 7: GPT-3 1.3B pretraining data/time consumption and average evaluation accuracy on 19 tasks.* + +# Concluding Remarks + +We are very excited to share DeepSpeed Data Efficiency library with the community and improve it with your feedback. Please find the code, tutorial, and documents at the [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed), and [website](/tutorials/data-efficiency/). And for more technical details please read our [Random-LTD paper](https://arxiv.org/abs/2211.11586) and [DeepSpeed Data Efficiency paper](https://arxiv.org/abs/2212.03597). We believe that our composable library and novel data efficiency techniques will help users reduce training cost while maintaining model quality or achieve better quality under similar cost. And we hope DeepSpeed Data Efficiency could become a platform that motivates and accelerates future research on deep learning data efficiency. diff --git a/docs/_tutorials/advanced-install.md b/docs/_tutorials/advanced-install.md old mode 100644 new mode 100755 diff --git a/docs/_tutorials/automatic-tensor-parallelism.md b/docs/_tutorials/automatic-tensor-parallelism.md new file mode 100644 index 0000000..6991d5c --- /dev/null +++ b/docs/_tutorials/automatic-tensor-parallelism.md @@ -0,0 +1,154 @@ +--- +title: "Automatic Tensor Parallelism for HuggingFace Models" +tags: inference +--- + +# Contents + * [Introduction](#introduction) + * [Example Script](#example-script) + * [Launching](#launching) + * [OPT 13B Inference Performance Comparison](#opt-13b-inference-performance-comparison) + * [Supported Models](#supported-models) + * [Unsupported Models](#unsupported-models) + +# Introduction +This tutorial demonstrates the new automatic tensor parallelism feature for inference. Previously, the user needed to provide an injection policy to DeepSpeed to enable tensor parallelism. DeepSpeed now supports automatic tensor parallelism for HuggingFace models by default as long as kernel injection is not enabled and an injection policy is not provided. This allows our users to improve performance of models that are not currently supported via kernel injection, without providing the injection policy. Below is an example of the new method: + +```python +# --------------------------------------- +# New automatic tensor parallelism method +# --------------------------------------- +import os +import torch +import transformers +import deepspeed +local_rank = int(os.getenv("LOCAL_RANK", "0")) +world_size = int(os.getenv("WORLD_SIZE", "1")) +# create the model pipeline +pipe = transformers.pipeline(task="text2text-generation", model="google/t5-v1_1-small", device=local_rank) +# Initialize the DeepSpeed-Inference engine +pipe.model = deepspeed.init_inference( + pipe.model, + mp_size=world_size, + dtype=torch.float +) +output = pipe('Input String') +``` + +Previously, to run inference with only tensor parallelism for the models that don't have kernel injection support, you could pass an injection policy that showed the two specific linear layers on a Transformer Encoder/Decoder layer: 1) the attention output GeMM and 2) layer output GeMM. We needed these parts of the layer to add the required all-reduce communication between GPUs to merge the partial results across model-parallel ranks. Below, we show an example of this previous method: + +```python +# ---------------------------------- +# Previous tensor parallelism method +# ---------------------------------- +import os +import torch +import transformers +import deepspeed +from transformers.models.t5.modeling_t5 import T5Block +local_rank = int(os.getenv("LOCAL_RANK", "0")) +world_size = int(os.getenv("WORLD_SIZE", "1")) +# create the model pipeline +pipe = transformers.pipeline(task="text2text-generation", model="google/t5-v1_1-small", device=local_rank) +# Initialize the DeepSpeed-Inference engine +pipe.model = deepspeed.init_inference( + pipe.model, + mp_size=world_size, + dtype=torch.float, + injection_policy={T5Block: ('SelfAttention.o', 'EncDecAttention.o', 'DenseReluDense.wo')} +) +output = pipe('Input String') +``` + +With automatic tensor parallelism, we do not need to provide the injection policy for supported models. The injection policy will be determined at runtime and applied automatically. + + +# Example Script + +We can observe performance improvement with automatic tensor parallelism using the [inference test suite](https://github.com/microsoft/DeepSpeedExamples/blob/master/inference/huggingface/text-generation/inference-test.py). The script includes per token latency, bandwidth, throughput and memory checks for comparison. See the [README](https://github.com/microsoft/DeepSpeedExamples/tree/master/inference/huggingface/text-generation#deepspeed-huggingface-text-generation-examples) for more information. + + +## Launching + +Use the following command to run without DeepSpeed and without tensor parallelism. Set the `test_performance` flag to collect performance data: + +```bash +deepspeed --num_gpus DeepSpeedExamples/inference/huggingface/text-generation/inference-test.py --name --batch_size --test_performance +``` + + +To enable tensor parallelism, you need to use the flag `ds_inference` for the compatible models: + +```bash +deepspeed --num_gpus DeepSpeedExamples/inference/huggingface/text-generation/inference-test.py --name --batch_size --test_performance --ds_inference +``` + +## OPT 13B Inference Performance Comparison + +The following results were collected using V100 SXM2 32GB GPUs. + +### Max New Tokens = 50 + +| Test | Memory Allocated per GPU | Max Batch Size | Max Throughput per GPU | +| ---------- | -------------------------- | ---------------- | ------------------------ | +| No TP | 23.94 GB | 64 | 18.84 TFlops | +| 2 GPU TP | 12.23 GB | 320 | 27.17 TFlops | +| 4 GPU TP | 6.36 GB | 664 | 27.63 TFlops | + +### Max New Tokens = 1024 + +| Test | Memory Allocated per GPU | Max Batch Size | Max Throughput per GPU | +| ---------- | -------------------------- | ---------------- | ------------------------ | +| No TP | 23.94 GB | 2 | 1.65 TFlops | +| 2 GPU TP | 12.23 GB | 20 | 4.61 TFlops | +| 4 GPU TP | 6.36 GB | 56 | 4.90 TFlops | + +# Supported Models + +The following model families have been successfully tested with automatic tensor parallelism. Other models may work but have not been tested yet. + +- albert +- bert +- bigbird_pegasus +- camembert +- deberta_v2 +- electra +- ernie +- esm +- gpt-j +- gpt-neo +- gpt-neox +- longt5 +- luke +- m2m_100 +- marian +- mvp +- nezha +- openai +- opt +- pegasus +- perceiver +- plbart +- reformer +- roberta +- roformer +- splinter +- t5 +- xglm +- xlm_roberta +- yoso + +# Unsupported Models + +The following models are not currently supported with automatic tensor parallelism. They may still be compatible with other DeepSpeed features (e.g., kernel injection for Bloom): + +- bloom +- codegen +- deberta +- flaubert +- fsmt +- gpt2 +- led +- longformer +- xlm +- xlnet diff --git a/docs/_tutorials/autotuning.md b/docs/_tutorials/autotuning.md index 303087d..38648da 100644 --- a/docs/_tutorials/autotuning.md +++ b/docs/_tutorials/autotuning.md @@ -120,3 +120,7 @@ Note that the performance metric used in autotuning is calculated using the timi Tuning completed in 0:27:33.988447. Total number of experiments: 13. As we can see the DeepSpeed Autotuner can select a better than hand-tuned configuration with a reasonable number of experiments. Examples in [Autotuning Hugging Face Examples](https://github.com/microsoft/DeepSpeedExamples/tree/master/autotuning/hf#autotuning-hugging-face-examples) would demonstrate the effectiveness of autotuning across different models. + +### DeepSpeed Autotuning with AzureML + +To try DeepSpeed autotuning with AzureML, please see the example [here](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/deepspeed/deepspeed-autotuning). diff --git a/docs/_tutorials/azure.md b/docs/_tutorials/azure.md index 1016aea..6c7cded 100644 --- a/docs/_tutorials/azure.md +++ b/docs/_tutorials/azure.md @@ -3,132 +3,20 @@ title: "Getting Started with DeepSpeed on Azure" tags: getting-started --- -This tutorial will help you get started running DeepSpeed on [Azure virtual -machines](https://azure.microsoft.com/en-us/services/virtual-machines/). -Looking forward, we will be integrating these techniques and additional enhancements -into the [Azure ML](https://azure.microsoft.com/en-us/services/machine-learning/) platform to -benefit all your large model training jobs. +This tutorial will help you get started with DeepSpeed on Azure. If you don't already have an Azure account please see more details here: [https://azure.microsoft.com/](https://azure.microsoft.com/). -To use DeepSpeed on [Azure ML](https://azure.microsoft.com/en-us/services/machine-learning/), please take a look at easy-to-use examples for Transformers and CIFAR training from [AzureML Examples GitHub](https://github.com/Azure/azureml-examples/tree/main/python-sdk/workflows/train/deepspeed). +# DeepSpeed on Azure via AzureML -To help with launching Azure instances we suggest using the [Azure -CLI](https://docs.microsoft.com/en-us/cli/azure/?view=azure-cli-latest). We have created -several helper scripts to get you quickly started using DeepSpeed with Azure. - * Install Azure CLI on your local box: [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli). - * Alternatively, you can use the Azure in-browser shell: [https://shell.azure.com/](https://shell.azure.com/). +The recommended and simplest method to try DeepSpeed on Azure is through [AzureML](https://azure.microsoft.com/en-us/services/machine-learning/). A training example and a DeepSpeed autotuning example using AzureML v2 can be found [here](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/deepspeed). -## Create an SSH key -Generate an SSH key that will be used across this tutorial to SSH into your VMs and -between Docker containers. `ssh-keygen` is the recommended way of doing this. Our scripts -assume your key is located inside the same directory as the Azure scripts. +For AzureML v1 examples, please take a look at easy-to-use examples for Megatron-DeepSpeed, Transformers and CIFAR training [here](https://github.com/Azure/azureml-examples/tree/main/v1/python-sdk/workflows/train/deepspeed). -## Azure Config JSON -Our helper scripts depend on the following a configuration JSON for deployment -and setup. We have provided a simple example JSON in `azure_config.json` that -sets up a basic environment with two VMs. This config uses the NV6_Promo -instance type which has one NVIDIA Tesla M60 GPU per VM. You can read more -details about the VM on the [Linux Virtual Machines -Pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/) -page. +> Our [Megatron-DeepSpeed](https://github.com/microsoft/megatron-deepspeed) contains the most up to date [recipe](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azureml) for end-to-end training on AzureML. -See the example below: - ```json -{ - "num_vms": 2, - "location": "southcentralus", - "azure_sku": "Standard_NV6_Promo", - "ssh_private_key": "id_rsa", - "docker_ssh_port": 2222 -} -``` +# DeepSpeed on Azure VMs -## Dependencies -The scripts in this tutorial require [jq](https://stedolan.github.io/jq/) to help with -parsing JSON from the command line. Also it is recommended to install -[pdsh](https://linux.die.net/man/1/pdsh) to help launch ssh connections in parallel. +If you don't have access to AzureML or if want to build a custom environments using [Azure virtual machines](https://azure.microsoft.com/en-us/services/virtual-machines/) or Azure VM Scale-Sets ([VMSS](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview)), we are working on easy-to-use cluster setup scripts that will be published in the next few weeks. -## Create Azure VMs -We first need to allocate the VMs. We provide a script -```bash -./create_vms.sh -``` -to create VMs with the Azure SKU in the region specified in `azure_config.json`. Feel -free to customize your JSON to your desired region/SKU. This step will take a few minutes -to complete while it sets up all of your VMs on Azure. - -## Setup VM environment to use DeepSpeed -Next, we need to configure the VM environment for DeepSpeed. We provide a script -```bash -./setup_vms.sh -``` -to generate a [hostfile](/getting-started/#resource-configuration-multi-node) and SSH -configuration on all of the VMs. This configuration will be used by the DeepSpeed -Docker containers in the next step. - -## Start the DeepSpeed docker container -We now setup the DeepSpeed Docker containers on the VMs. We provide a script -```bash -./setup_docker.sh -``` -to pull the DeepSpeed image onto all VMs and start a container instance in the -background. This will take several minutes since it needs to pull the entire Docker -image. - -## Access VMs -The tool `azure_ssh.sh` will let you SSH into any of the VMs with this -syntax: -```bash -./azure_ssh.sh [command] -``` -where the `node-id` is a number between `0` and `num_vms-1`. This script will find the -public IP address of your VM and use the SSH key provided in the Azure configuration -JSON. - -## Access DeepSpeed container -Everything should be up and running at this point. Let's access the running DeepSpeed -container on the first VM and make sure we can talk to the other containers in our deployment. - - * SSH into the first VM via: `./azure_ssh.sh 0` - * Change directories into the azure folder of this repo via: `cd ~/workdir/DeepSpeed/azure` - * Attach the running docker container via: `./attach.sh` - * You should now be able to `ssh` into any other docker container, the containers can be - accessed via their SSH alias of `worker-N`, where `N` is the VM number between `0` - and `num_vms-1`. In this example we should be able to successfully run `ssh worker-1 - hostname` which will return the hostname of worker-1. - -## Parallel SSH across containers - DeepSpeed comes installed with a helper script `ds_ssh` which is a wrapper around - the [pdsh](https://linux.die.net/man/1/pdsh) command that lets you issue commands - to groups of hosts (via SSH) in parallel. This wrapper simply connects with the - hostfile that defines all the containers in your deployment. For example if you run - `ds_ssh hostname` you should see a list of all the hostnames in your deployment. - -## Run CIFAR-10 example model -We will now run the DeepSpeed CIFAR-10 model example to test the VM setup. From inside -the first DeepSpeed container: - - 1) Install the python dependencies necessary to run the CIFAR-10 example model. You can - do this across your cluster via: - ```bash - ds_ssh pip install -r ~/workdir/DeepSpeed/DeepSpeedExamples/cifar/requirements.txt - ``` - - 2) Now change directories to the CIFAR example: - ```bash - cd ~/workdir/DeepSpeed/DeepSpeedExamples/cifar - ``` - - 3) Finally, launch training across all VMs: - ```bash - deepspeed cifar10_deepspeed.py --deepspeed --deepspeed_config ds_config.json - ``` - -## Megatron-LM GPT2 -DeepSpeed includes an example model using Megatron-LM's GPT2. Please refer to the full -[Megatron tutorial](/tutorials/megatron/) for more details. - * In order to fully train GPT2 with DeepSpeed and ZeRO we recommend using 8 instances of - Azure's Standard_ND40rs_v2 SKU for a total of 64 NVIDIA V100 GPUs. With this setup and - a batch size of 1536 you should be able to complete 100k training steps (153.6 million - samples) in less than 2 weeks of training. +If you already have a cluster setup, you can use the [azure recipes](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/azure) that can easily be modified to train various model configurations. diff --git a/docs/_tutorials/bert-finetuning.md b/docs/_tutorials/bert-finetuning.md old mode 100644 new mode 100755 diff --git a/docs/_tutorials/bert-pretraining.md b/docs/_tutorials/bert-pretraining.md old mode 100644 new mode 100755 index e3771b7..a094394 --- a/docs/_tutorials/bert-pretraining.md +++ b/docs/_tutorials/bert-pretraining.md @@ -4,6 +4,10 @@ excerpt: "" tags: training pre-training --- +**Note:** +On 08/15/2022 we have added another BERT pre-training/fine-tuning example at [github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/bert_with_pile](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/bert_with_pile), which includes a README.md that describes how to use it. Compared to the example described below, the new example in Megatron-DeepSpeed adds supports of ZeRO and tensor-slicing model parallelism (thus support larger model scale), uses a public and richer [Pile dataset](https://github.com/EleutherAI/the-pile) (user can also use their own data), together with some changes to the model architecture and training hyperparameters as described in [this paper](https://arxiv.org/abs/1909.08053). As a result, the BERT models trained by the new example is able to provide better MNLI results than original BERT, but with a slightly different model architecture and larger computation requirements. If you want to train a larger-scale or better quality BERT-style model, we recommend to follow the new example in Megatron-DeepSpeed. If your goal is to strictly reproduce the original BERT model, we recommend to follow the example under DeepSpeedExamples/bing_bert as described below. On the other hand, the tutorial below helps explaining how to integrate DeepSpeed into a pre-training codebase, regardless of which BERT example you use. +{: .notice--info} + In this tutorial we will apply DeepSpeed to pre-train the BERT (**B**idirectional **E**ncoder **R**epresentations from **T**ransformers), which is widely used for many Natural Language Processing (NLP) tasks. The diff --git a/docs/_tutorials/cifar-10.md b/docs/_tutorials/cifar-10.md index 11a05a7..74ee045 100644 --- a/docs/_tutorials/cifar-10.md +++ b/docs/_tutorials/cifar-10.md @@ -140,7 +140,8 @@ Here we initialize DeepSpeed with CIFAR-10 model (`net`), `args`, `parameters` a After initializing DeepSpeed, the original `device` and `optimizer` are removed: ```python - #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + #from deepspeed.accelerator import get_accelerator + #device = torch.device(get_accelerator().device_name(0) if get_accelerator().is_available() else "cpu") #net.to(device) #optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) diff --git a/docs/_tutorials/comms-logging.md b/docs/_tutorials/comms-logging.md new file mode 100644 index 0000000..52d93ed --- /dev/null +++ b/docs/_tutorials/comms-logging.md @@ -0,0 +1,116 @@ +--- +title: "Communication Logging" +excerpt: "Log all DeepSpeed communication calls" +tags: profiling performance-tuning +--- + +In this tutorial, we introduce DeepSpeed communication logging and provide examples of its usage. + + - [Overview](#overview) + - [Usage](#usage) + +## Overview + +NOTE: All logging communication calls are synchronized in order to provide accurate timing information. This may hamper performance if your model heavily uses asynchronous communication operations. + +Logging communication calls is vital to ensure networking resources are fully utilized. The DeepSpeed communication logger enables the detection and logging of all communication operations launched under `deepspeed.comm`. Each communication operation can all be directly printed to the console immediately after completion (via the `verbose` config option), or a summary may be printed with a call to `deepspeed.comm.log_summary()` in the client code at the completion of training, an epoch, after N training iterations, etc. + +## Usage + +Communication logging in DeepSpeed is configured within the deepspeed [configuration file](/docs/config-json/#communication-logging). DeepSpeed will automatically log communication either all operations (`prof_all`), or user-specified operations (`prof_ops`). + + - [Configuration Setup](#configuration-setup) + - [Verbose Logging](#verbose-logging) + - [Log Summaries](#log-summaries) + +### Configuration Setup + +Communication logging can be configured in the DeepSpeed [configuration file](/docs/config-json/#communication-logging). Communication logging can be enabled by adding the following field to DeepSpeed's configuration json file. Refer to [Communication Logging](/docs/config-json/#communication-logging) for details. + +```json +"comms_logger": { + "enabled": true, + "verbose": false, + "prof_all": true, + "debug": false +} +``` + +There are currently two ways to view communication log records: + +1. Print all communication operations with `verbose` config option. See [Verbose Logging](#verbose-logging) +2. (Recommended) Print log summary with `deepspeed.comm.log_summary()` function call. See [Log Summaries](#log-summaries) + +### Verbose Logging + +If the `enabled` configuration option is selected, all communication operations will be immediately printed to the console. This mode is intended for detailed debugging, and is not recommended for most users. The following is an example snippet of `verbose` output: + +``` +[2022-06-26 01:39:55,722] [INFO] [logging.py:69:log_dist] [Rank 0] rank=0 | comm op: reduce_scatter_base | time (ms): 9.46 | msg size: 678.86 MB | algbw (Gbps): 1204.52 | busbw (Gbps): 1129.23 +[2022-06-26 01:39:56,470] [INFO] [logging.py:69:log_dist] [Rank 0] rank=0 | comm op: all_gather_base | time (ms): 0.11 | msg size: 6.0 MB | algbw (Gbps): 954.41 | busbw (Gbps): 894.76 +[2022-06-26 01:39:56,471] [INFO] [logging.py:69:log_dist] [Rank 0] rank=0 | comm op: all_gather_base | time (ms): 0.08 | msg size: 6.0 MB | algbw (Gbps): 1293.47 | busbw (Gbps): 1212.63 +``` + +For advanced users, the `debug` option will append the calling function of each communication operation to that operation's `log_name`. See [Log Summaries](#log-summaries) for an example of a `deepspeed.comm.log_summary()` call with `debug` enabled. + + +### Log Summaries + +It's recommended that users add a call to `deepspeed.comm.log_summary()` at training milestones (e.g. every epoch or N iterations). This enables high-level communication logging without having to sift through logs from `verbose`. + +The steps to add DeepSpeed communication log summaries are as follows: + +1. Modify configuration file with desired settings +2. (Optional) If your application contains `torch.distributed` calls that you wish to log, import `deepspeed.comm` package and modify `torch.distributed` calls to use `deepspeed.comm` (Note: The `deepspeed.comm` collective and pt2pt APIs exactly match `torch.distributed`) +3. Call `deepspeed.comm.log_summary` + +For example usage, see the following modified [DeepSpeedExamples/cifar](https://github.com/microsoft/DeepSpeedExamples/tree/master/cifar) example: + +```python +# Step 2: (Optional) Import deepspeed.comm +import deepspeed.comm as dist + +# Note that any communication operations using `import torch.distributed as dist` calls can remain unchanged, and will be automatically logged under deepspeed.comm! +dist.all_reduce(tensor) + +for epoch in range(2): + + running_loss = 0.0 + for i, data in enumerate(trainloader): + pre = time.time() + inputs, labels = data[0].to(model_engine.local_rank), data[1].to( + model_engine.local_rank) + if fp16: + inputs = inputs.half() + outputs = model_engine(inputs) + loss = criterion(outputs, labels) + + model_engine.backward(loss) + model_engine.step() + post = time.time() + # Step 3: Call `deepspeed.comm.log_summary()` + dist.log_summary() +``` + +The following is a truncated example output of `deepspeed.comm.log_summary()` at the end of 10 iterations of Megatron-DeepSpeed with ZeRO-3: + +``` +Comm. Op Message Size Count Total Latency(ms) Avg Latency(ms) tput_avg (Gbps) busbw_avg (Gbps) +broadcast + 2.0 KB 146 11.12 0.08 0.43 0.41 + 98.25 MB 1 8317.12 8317.12 0.20 0.19 +reduce_scatter_base + 678.86 MB 40 602.29 9.69 1468.06 1376.31 +``` + + +And the following is a call to `deepspeed.comm.log_summary` under the same configuration with `debug` enabled: + +``` +Comm. Op Message Size Count Total Latency(ms) Avg Latency(ms) tput_avg (Gbps) busbw_avg (Gbps) +broadcast | [Caller Func: _broadcast_model] + 2.0 KB 146 9.39 0.06 0.52 0.48 + 98.25 MB 1 8540.60 8540.60 0.19 0.18 +reduce_scatter_base | [Caller Func: reduce_scatter_fn] + 678.86 MB 80 1527.17 13.94 1211.75 1136.01 +``` diff --git a/docs/_tutorials/curriculum-learning.md b/docs/_tutorials/curriculum-learning.md index 938955a..161c29c 100644 --- a/docs/_tutorials/curriculum-learning.md +++ b/docs/_tutorials/curriculum-learning.md @@ -3,6 +3,10 @@ title: "Curriculum Learning: A Regularization Method for Efficient and Stable Bi tags: training pre-training --- +**Watch out!** +On 12/12/2022, we released DeepSpeed Data Efficiency Library which provides a more general curriculum learning support. This legacy curriculum learning feature below is still supported but we recommend to use the Data Efficiency Library ([tutorial](/tutorials/data-efficiency/)). +{: .notice--warning} + **Note:** This tutorial was updated on 10/29/2021. Changes include: 1) A more detailed tuning strategy. 2) Pipeline parallelism support. 3) Token-based learning rate decay. 4) A new GPT-2 example at [github.com/microsoft/Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed). See details below. {: .notice--info} diff --git a/docs/_tutorials/data-efficiency.md b/docs/_tutorials/data-efficiency.md new file mode 100644 index 0000000..329e3bb --- /dev/null +++ b/docs/_tutorials/data-efficiency.md @@ -0,0 +1,100 @@ +--- +title: "DeepSpeed Data Efficiency: A composable library that makes better use of data, increases training efficiency, and improves model quality" +tags: training pre-training +--- + +**What is DeepSpeed Data Efficiency:** DeepSpeed Data Efficiency is a library purposely built to make better use of data, increases training efficiency, and improves model quality. + +**Why use DeepSpeed Data Efficiency:** DeepSpeed Data Efficiency offers novel data efficiency techniques to achieve better training efficiency and/or better model quality. DeepSpeed Data Efficiency takes extensibility, flexibility, and composability into consideration, which makes it easier to customize the techniques, apply the techniques to various training tasks, and compose multiple techniques together. We highly recommend you also to read [our blog](https://www.deepspeed.ai/2022/12/11/data-efficiency.html) to learn more about (at a high level) why we build DeepSpeed Data Efficiency and what benefits it provides to users. Additional technical details can be found in our papers, “[Random-LTD: Random and Layerwise Token Dropping Brings Efficient Training for Large-scale Transformers](https://arxiv.org/abs/2211.11586)” which describes the random-LTD technique, and “[DeepSpeed Data Efficiency: Improving Deep Learning Model Quality and Training Efficiency via Efficient Data Sampling and Routing](https://arxiv.org/abs/2212.03597)” which describes the curriculum learning technique and overall DeepSpeed Data Efficiency framework. + +**How to use DeepSpeed Data Efficiency:** In the following tutorial, the first two sections will describe the data efficiency techniques supported by the library. The third section will describe how to compose the two techniques to achieve even better training efficiency/model quality. + +## 1. Curriculum Learning + +### 1.1 What is Curriculum Learning +Curriculum learning (proposed by [Yoshua Bengio et al.](https://dl.acm.org/doi/abs/10.1145/1553374.1553380)) aims to improve training convergence speed by presenting relatively easier or simpler examples earlier during training. Building a curriculum learning solution usually requires two components: the difficulty metric (i.e., how to quantify the difficulty of each data sample) and the pacing function (i.e., how to decide the curriculum difficulty range when sampling next training data batch). + +### 1.2 When to use Curriculum Learning +Curriculum learning has been successfully applied to various training tasks (see details in for example [this survey paper](https://arxiv.org/abs/2010.13166)), and last year we also released a specific curriculum learning technique (sequence length warmup) for GPT-style model pretraining (see technical details in our paper “[The Stability-Efficiency Dilemma: Investigating Sequence Length Warmup for Training GPT Models](https://openreview.net/forum?id=JpZ5du_Kdh)” published in NeurIPS 2022 and the [tutorial for this legacy curriculum learning feature](/tutorials/curriculum-learning/)). This new general curriculum learning library inside DeepSpeed Data Efficiency enables users to employ curriculum learning to their models at **maximum extensibility**: users can easily analyze, index, and sample their training data based on various customizable strategies. Using this library, we were able to explore different CL strategies for GPT-3 and BERT pretraining and identify the best solution that provides up to **1.5x data saving** while still maintaining similar model quality. + +### 1.3 How to use Curriculum Learning + +#### 1.3.1 GPT-3 and BERT pretraining +The `examples/data_efficiency` directory in our [Megatron-DeepSpeed repo](https://github.com/microsoft/Megatron-DeepSpeed) includes our examples of how to apply curriculum learning to GPT-3 and BERT pretraining. There are 3 steps: data analysis, pretraining, and eval/finetuning. + +**Data analysis:** Curriculum learning requires a data analysis before pretraining that calculate the difficulty of each data sample (based on the metric provided by user), and build an index that map difficulty value to corresponding data samples. (There are exceptions: for example the truncation-based sequence length metric can be achieved by data postprocessing without data analysis.) We provide a data analyzer to perform the offline CPU-only data analysis. + +`examples/data_efficiency/gpt/ds_analyze_*.sh` and `examples/data_efficiency/bert/ds_analyze_*.sh` are example scripts for GPT-3 and BERT's data analysis. Our data analyzer employs a simple Map-Reduce scheme. First, at the Map stage the `ds_analyze_*_data_map.sh` is used to split the dataset and compute the difficulty value for each data sample. User would need to provide a function to compute the metric (we implement ours in `examples/data_efficiency/analyze_data.py`), the raw training dataset, and other configurations such as number of CPU nodes and number of threads per node. Then the data analyzer will automatically splits the dataset based on number of workers, compute the difficulty values in a batched fashion, and write the results to two indexes: one index maps each data sample to its difficulty value, and another index maps each distinct difficulty value to the corresponding samples. Second, at the Reduce stage the `ds_analyze_*_data_reduce.sh` is used to merge the index files produced by all workers. One thing to note is that in order to enable speedup by distribution yet still being able to merge all the output, the Map stage will potentially generate a lot of output files, which is proportional to number of CPU nodes, number of threads per node, and number of possible metric values. Thus to avoid generating too much output files, we recommend to start with a smaller number of nodes/threads (in the output log we provide an estimate required time for users to judge if they want to increase number of workers), and we recommend to limit number of possible difficulty values when designing your difficulty metric (our experience shows that a few thousands of distinct values is already sufficient to enjoy the benefit of curriculum learning). + +**Pretraining** `examples/data_efficiency/gpt/pretrain` and `examples/data_efficiency/bert/pretrain` include the example pretraining scripts with curriculum learning feature. Several changes are needed to enable curriculum learning during pretraining: (1) User need to provide a DeepSpeed json config file which includes configurations for curriculum learning (see [list of configuration](/docs/config-json/#data-efficiency) for details). We provide tested example configurations in `examples/data_efficiency/gpt/pretrain/ds_pretrain_gpt_1.3B_dense_run.sh` and `examples/data_efficiency/bert/pretrain/ds_pretrain_bert_336M_run.sh`. (2) When initializing the DeepSpeed engine via `deepspeed.initialize`, user needs to provide the train dataset and use the dataloader returned by the initialization (this dataloader includes the curriculum learning capability). We provide an example implementation of this change in `megatron/training.py` function `setup_model_and_optimizer`. (3) If the curriculum learning metric requires data postprocessing (such as truncation-based sequence length), user needs to use the DeepSpeed engine's `set_data_post_process_func` API to provide the postprocessing function. We provide an example implementation of this change in `megatron/training.py`, `pretrain_bert.py`, and `pretrain_gpt.py`. (4) If the curriculum learning metric requires a custom scheduling strategy (the pacing function), user needs to use the DeepSpeed engine's `set_custom_curriculum_learning_schedule` API to provide the function to update the max accepted difficulty during training. DeepSpeed engine will provide a global train step input to this callback function. + +**Eval/finetuning** `examples/data_efficiency/gpt/eval/` and `examples/data_efficiency/bert/finetune` include the example scripts for GPT-3 model's zero-/few-shot evaluation and BERT model's finetuning. Our [paper](https://arxiv.org/abs/2212.03597) includes the reference eval/finetune results if you follow our example scripts to perform the pretraining/eval/finetuning. + +#### 1.3.2 GPT-2 finetuning +The `data_efficiency/gpt_finetuning` directory in our [DeepSpeedExamples repo](https://github.com/microsoft/DeepSpeedExamples) includes our examples of how to apply curriculum learning to GPT-2 finetuning. `data_efficiency/gpt_finetuning/finetune/ds_finetune_gpt2_run.sh` is the example finetuning script. For CL metrics that require data analysis (e.g., the vocabulary rarity metric), you need to first use ```data_efficiency/gpt_finetuning/finetune/ds_analyze_gpt_data_*``` to analyze and index the dataset, similar to the GPT-3 pre-training case described above in 1.3.1. + +## 2. Random layerwise token dropping (random-LTD) + +### 2.1 What is random-LTD +Random-LTD is an efficient token drop method applied to each layer with random assignment. Precisely, for each layer, as compared to the baseline, random-LTD randomly selects a subset of the tokens and feeds them into the transformer layer. Afterward, we combine the output of transformer layer with the dropped tokens to recover the full sequence length. Thus, the next layer still receives the full sequence and can repeat this process. For more technical details please read [our random-LTD paper](https://arxiv.org/abs/2211.11586). + +### 2.2 When to use random-LTD +When you want to pretrain/fine-tune a transformer-based model, it is always a good idea to try random-LTD, as it can achieve a better performance than the standard baseline training given the same amount of computational cost. If you have limited resources, random-LTD achieves similar accuracy as the original baseline method with up to 33.3% theoretical cost saving and up to 25.6% wall-clock time saving. Particularly, if you need to train a much larger model with >=24 layers and with >=2048 sequence length, our method will be much more efficient than baseline. + +### 2.3 How to use random-LTD + +#### 2.3.1 GPT-3 and BERT pretraining +The `examples/data_efficiency` directory in our [Megatron-DeepSpeed repo](https://github.com/microsoft/Megatron-DeepSpeed) includes our examples of how to apply random-LTD to GPT-3 and BERT pretraining. + +`examples/data_efficiency/gpt/pretrain` and `examples/data_efficiency/bert/pretrain` include the example pretraining scripts with random-LTD feature. Several changes are needed to enable random-LTD during pretraining: (1) User need to provide a DeepSpeed json config file which includes configurations for random-LTD (see [list of configuration](/docs/config-json/#data-efficiency) for details). We provide tested example configurations in `examples/data_efficiency/gpt/pretrain/ds_pretrain_gpt_1.3B_dense_run.sh` and `examples/data_efficiency/bert/pretrain/ds_pretrain_bert_336M_run.sh`. (2) After initializing the DeepSpeed engine via `deepspeed.initialize`, user needs to use the `convert_to_random_ltd` API to convert and wrap the model layers in order to enable the random-LTD feature. We provide an example implementation of this change in `megatron/training.py` function `setup_model_and_optimizer`. (3) In order for random-LTD to understand the input argument mapping of the forward function, user need to change all the input arguments (except the hidden_states input) into keyword/named argument. For example, in `megatron/model/transformer.py` we changed the forward function from `def forward(self, hidden_states, attention_mask, encoder_output=None, enc_dec_attn_mask=None, layer_past=None, get_key_value=False):` to `def forward(self, hidden_states, attention_mask=None, encoder_output=None, enc_dec_attn_mask=None, layer_past=None, get_key_value=False):`. (4) When saving model checkpoints, (especially if the state dictionary has non-traditional structure) user needs to use the `remove_random_ltd_state_dict` API to convert the random-LTD-wrapped layers back to original model layers. We provide an example implementation of this change in `megatron/model/language_model.py`. + +For eval/finetuning of the pretrained model, see [previous section](#131-gpt-3-and-bert-pretraining) about how to use our example scripts. + +#### 2.3.2 GPT-2 and ViT finetuning +The `data_efficiency` directory in our [DeepSpeedExamples repo](https://github.com/microsoft/DeepSpeedExamples) includes our examples of how to apply random-LTD to GPT-2 and ViT finetuning. + +Just like pretraining case, similar changes are required to enable random-LTD for finetuning: (1) DeepSpeed json config file. (2) Use the `convert_to_random_ltd` API to convert and wrap the model layers. (3) When saving model checkpoints, use the `remove_random_ltd_state_dict` API to convert the random-LTD-wrapped layers back to original model layers. + +One can run our GPT finetuning example by: + +```shell +DeepSpeedExamples/data_efficiency/gpt_finetuning$ pip install -r requirement.txt +DeepSpeedExamples/data_efficiency/gpt_finetuning$ bash ./bash_script/run_base_random_ltd.sh +DeepSpeedExamples/data_efficiency/gpt_finetuning$ bash ./bash_script/run_medium_random_ltd.sh +``` + +And the reference final result is: + +```shell +For run_base_random_ltd.sh: +End of training epoch 3 step 1344 consumed_token 2148032 best perplexity 22.552324221233757 time 0.17486039188173083 hr + +For run_medium_random_ltd.sh: +End of training epoch 3 step 1373 consumed_token 2147024 best perplexity 17.332243199130996 time 0.4661190489927928 hr +``` + +One can run our ViT finetuning example by: + +```shell +DeepSpeedExamples/data_efficiency/vit_finetuning$ pip install -r requirement.txt +DeepSpeedExamples/data_efficiency/vit_finetuning$ bash ./bash_script/run_cifar.sh +DeepSpeedExamples/data_efficiency/vit_finetuning$ bash ./bash_script/run_imagenet.sh +``` + +And the reference final result is: + +```shell +For run_cifar.sh: +13 epoch at time 480.6546013355255s | researved_length 197 +iter 5474 | LR [0.0001]| val_acc 97.97000122070312 | layer_token 305784192 +``` + +## 3. Composing curriculum learning and random-LTD to achieve more + +### 3.1 GPT-3 and BERT pretraining +The `examples/data_efficiency` directory in our [Megatron-DeepSpeed repo](https://github.com/microsoft/Megatron-DeepSpeed) includes our examples of how to compose curriculum learning random-LTD, and apply both of them to GPT-3 and BERT pretraining. + +The changes needed are the same as described in previous two sections, since DeepSpeed Data Efficiency already handles the complexity when composing the two techniques. However, one thing to note is that since both random-LTD and some of the curriculum learning metrics will change the sequence length, it could require some extra code to calculate the effective sequence length at each step. We provide an example implementation of this change in `megatron/training.py` function `train` where we calculate the `actual_seq_length`. + +#### 3.2 GPT-2 finetuning +The `data_efficiency/gpt_finetuning` directory in our [DeepSpeedExamples repo](https://github.com/microsoft/DeepSpeedExamples) includes our examples of how to compose curriculum learning random-LTD for GPT-2 finetuning. `data_efficiency/gpt_finetuning/finetune/ds_finetune_gpt2_run.sh` is the example finetuning script. diff --git a/docs/_tutorials/flops-profiler.md b/docs/_tutorials/flops-profiler.md index b90a55e..169bfb1 100644 --- a/docs/_tutorials/flops-profiler.md +++ b/docs/_tutorials/flops-profiler.md @@ -184,7 +184,7 @@ When using DeepSpeed for model training, the profiler can be configured in the d #### Example: Megatron-LM -For information on running Megatron-LM with DeepSpeed, please refer to our tutorial [Megatron-LM](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM). +For information on running Megatron-LM with DeepSpeed, please refer to our tutorial [Megatron-LM](https://github.com/microsoft/DeepSpeedExamples/tree/master/megatron/Megatron-LM). An example output of 12-layer Megatron-LM model (`hidden_size = 8192, num_attention_heads = 32, batch_size = 1024, seq_length = 1024`) is shown below. diff --git a/docs/_tutorials/gan.md b/docs/_tutorials/gan.md old mode 100644 new mode 100755 diff --git a/docs/_tutorials/inference-tutorial.md b/docs/_tutorials/inference-tutorial.md index 253bb70..1766622 100644 --- a/docs/_tutorials/inference-tutorial.md +++ b/docs/_tutorials/inference-tutorial.md @@ -11,7 +11,7 @@ DeepSpeed provides a seamless inference mode for compatible transformer based mo For inference with DeepSpeed, use `init_inference` API to load the model for inference. Here, you can specify the MP degree, and if the model has not been loaded with the appropriate checkpoint, you can also provide the checkpoint description using a `json` file or the checkpoint path. -To inject the high-performance kernels, you need to set the `replace_with_kernel_inject` to True and pass int the `replace_method` as `'auto'` for the compatible models, or define a new policy in [replace_policy class](https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py) and pass in the `injection_policy` that specifies the different parameters of a Transformer layer, such as attention and feed-forward parts. The `injection_policy` shows the mapping between the parameters of the original layer implementation with the inference-customized Transformer layer. +To inject the high-performance kernels, you need to set the `replace_with_kernel_inject` to True for the compatible models. For models not supported by DeepSpeed, the users can submit a PR that defines a new policy in [replace_policy class](https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py) that specifies the different parameters of a Transformer layer, such as attention and feed-forward parts. The policy classes in DeepSpeed create a mapping between the parameters of the original user-supplied layer implementation with DeepSpeed's inference-optimized Transformer layer. ```python # create the model @@ -28,7 +28,6 @@ ds_engine = deepspeed.init_inference(model, mp_size=2, dtype=torch.half, checkpoint=None if args.pre_load_checkpoint else args.checkpoint_json, - replace_method='auto', replace_with_kernel_inject=True) model = ds_engine.module output = model('Input String') @@ -111,8 +110,7 @@ generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', generator.model = deepspeed.init_inference(generator.model, mp_size=world_size, dtype=torch.float, - replace_method='auto', - replace_with_kernel_inject=True) + replace_with_kernel_inject=True) string = generator("DeepSpeed is", do_sample=True, min_length=50) if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: diff --git a/docs/_tutorials/large-models-w-deepspeed.md b/docs/_tutorials/large-models-w-deepspeed.md index ea6145c..21b9956 100644 --- a/docs/_tutorials/large-models-w-deepspeed.md +++ b/docs/_tutorials/large-models-w-deepspeed.md @@ -24,7 +24,7 @@ At a broad level, there are two primary paths to training a large model: Since, ZeRO is a replacement to data parallelism, it offers a seamless integration that does not require model code refactoring for existing data-parallel models. For majority of cases, ZeRO based technologies offers model scalability, training throughput efficiency without compromising ease of use. -**3D Parallelism based technologies**: 3D Parallelism refers to a combination of three different forms of parallel technologies namely tensor-slicing, pipeline-parallelism, and data parallelism (or ZeRO powered data parallelism). Combing these three forms allows for harnessing the strength of each of these technologies without the drawback of any. 3D Parallelism enables DeepSeed to achieve excellent training throughput efficiency in the scenarios where relying on ZeRO based technologies alone might be insufficient. However, 3D parallelism requires non-trivial model code refactoring, and therefore a careful consideration is important to identify cases where 3D-Parallelism can bring non-trivial throughput benefits. +**3D Parallelism based technologies**: 3D Parallelism refers to a combination of three different forms of parallel technologies namely tensor-slicing, pipeline-parallelism, and data parallelism (or ZeRO powered data parallelism). Combing these three forms allows for harnessing the strength of each of these technologies without the drawback of any. 3D Parallelism enables DeepSpeed to achieve excellent training throughput efficiency in the scenarios where relying on ZeRO based technologies alone might be insufficient. However, 3D parallelism requires non-trivial model code refactoring, and therefore a careful consideration is important to identify cases where 3D-Parallelism can bring non-trivial throughput benefits. ## Deciding which technology to use diff --git a/docs/_tutorials/megatron.md b/docs/_tutorials/megatron.md index 7d81ecd..2977f57 100644 --- a/docs/_tutorials/megatron.md +++ b/docs/_tutorials/megatron.md @@ -19,7 +19,7 @@ reduction_** from using DeepSpeed. ## Training GPT-2 with the Original Megatron-LM -We've copied the original model code from [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) into DeepSpeed [Megatron-LM](https://github.com/microsoft/DeepSpeedExamples/tree/master/Megatron-LM-v1.1.5-ZeRO3) and made it available as a submodule. To download, execute: +We've copied the original model code from [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) into DeepSpeed [Megatron-LM](https://github.com/microsoft/Megatron-DeepSpeed) and made it available as a submodule. To download, execute: ```bash git submodule update --init --recursive ``` diff --git a/docs/_tutorials/mixture-of-experts-inference.md b/docs/_tutorials/mixture-of-experts-inference.md index 42df78d..7a75c84 100644 --- a/docs/_tutorials/mixture-of-experts-inference.md +++ b/docs/_tutorials/mixture-of-experts-inference.md @@ -23,7 +23,7 @@ In this part, we elaborate the usage of MoE inference support in the DeepSpeed l ### Initializing for Inference -For inference with DeepSpeed-MoE, use `init_inference` API to load the DeepSpeed MoE model for inference. Here, you can specify the model-parallelism/tensor-slicing degree (mp_size), expert parallelism degree (ep_size), and number of experts (moe_exeperts). We create various process groups based on minimum of the world\_size (total number of GPUs) and expert parallel size. By using this group, we can partition the experts among expert-parallel GPUs. If number of experts is lower than total number of GPUs, DeepSpeed-MoE leverages expert-slicing for partitioning the expert parameters between the expert-parallel GPUs. Furthermore, if the model has not been loaded with the appropriate checkpoint, you can also provide the checkpoint description using a `json` file or simply pass the `'checkpoint'` path to load the model. To inject the high-performance inference kernels, you can pass int the `replace_method` as `'auto'` and set the `replace_with_kernel_inject` to True. +For inference with DeepSpeed-MoE, use `init_inference` API to load the DeepSpeed MoE model for inference. Here, you can specify the model-parallelism/tensor-slicing degree (mp_size), expert parallelism degree (ep_size), and number of experts (moe_exeperts). We create various process groups based on minimum of the world\_size (total number of GPUs) and expert parallel size. By using this group, we can partition the experts among expert-parallel GPUs. If number of experts is lower than total number of GPUs, DeepSpeed-MoE leverages expert-slicing for partitioning the expert parameters between the expert-parallel GPUs. Furthermore, if the model has not been loaded with the appropriate checkpoint, you can also provide the checkpoint description using a `json` file or simply pass the `'checkpoint'` path to load the model. To inject the high-performance inference kernels, you can set `replace_with_kernel_inject` to True. ```python @@ -44,7 +44,6 @@ ds_engine = deepspeed.init_inference(moe_model, dtype=torch.half, moe_experts=args.num_experts, checkpoint=args.checkpoint_path, - replace_method='auto', replace_with_kernel_inject=True,) model = ds_engine.module output = model('Input String') @@ -55,7 +54,7 @@ output = model('Input String') Here, we show a text-generation example using an MoE model for which we can specify the model-parallel size and number of experts. DeepSpeed inference-engine takes care of creating the different parallelism groups using the tensor-slicing degree, number of experts, and the total number of GPUs used for running the MoE model. Regarding the expert parameters, we first use the expert-parallelism to assign each group of experts to one GPU. If number of GPUs is higher than number of experts, we use expert-slicing to partition each expert vertically/horizontally across the GPUs. -Let's take a look at some of the parameters passed to run our example. Please refer to [DeepSpeed-Example](https://github.com/microsoft/Megatron-DeepSpeed/blob/moe/examples/generate_text.sh) for a complete generate-text inference example. +Let's take a look at some of the parameters passed to run our example. Please refer to [DeepSpeed-Example](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/generate_text.sh) for a complete generate-text inference example. ```bash @@ -97,7 +96,7 @@ generate_samples_gpt.py \ --num-attention-heads 16 \ --max-position-embeddings 1024 \ --tokenizer-type GPT2BPETokenizer \ - --load $checpoint_path \ + --load $checkpoint_path \ --fp16 \ --ds-inference \ ``` diff --git a/docs/_tutorials/mixture-of-experts-nlg.md b/docs/_tutorials/mixture-of-experts-nlg.md old mode 100644 new mode 100755 index e43cb83..c88df2d --- a/docs/_tutorials/mixture-of-experts-nlg.md +++ b/docs/_tutorials/mixture-of-experts-nlg.md @@ -7,7 +7,7 @@ In this tutorial, we introduce how to apply DeepSpeed Mixture of Experts (MoE) t ## 1. Installation -You would need to install DeepSpeed v0.6.0 or higher to use the MoE feature. The MoE for NLG model examples are in the [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed) repo (currently under [the moe branch](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe) but later could be merged to main branch). +You would need to install DeepSpeed v0.6.0 or higher to use the MoE feature. The MoE for NLG model examples are in the [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed) repo under the MoE folder. ## 2. Training NLG+MoE models @@ -15,7 +15,7 @@ You would need to install DeepSpeed v0.6.0 or higher to use the MoE feature. The To apply MoE to the GPT-style model, we made several changes in Megatron framework, mostly in `megatron/model/` where we add the MoE layers into the model. ### 2.2. Pre-training the Standard MoE model -We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe/examples/MoE) which we used to perform the experiments in our [Blog]({{ site.press_release_v6 }}). There are a few new hyperparameters for standard MoE model: +We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/MoE) which we used to perform the experiments in our [Blog]({{ site.press_release_v6 }}). There are a few new hyperparameters for standard MoE model: `--num-experts`: the number of experts per MoE layer. In our experiments we set it to 128. Larger number of experts tend to provide better convergence, but it's a diminishing return. @@ -30,7 +30,7 @@ We provide example training scripts under [examples/MoE](https://github.com/micr ### 2.3. Pre-training the PR-MoE model -PR-MoE is a new designed MoE models, standing for Pyramid-Residual-MoE, which improves the parameter efficiency up to 3x as compared to standard MoE. Please see our [Blog]({{ site.press_release_v6 }}) for more details. We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe/examples/MoE). There are a few different hyperparameters for PR-MoE model compared to standard MoE: +PR-MoE is a new designed MoE models, standing for Pyramid-Residual-MoE, which improves the parameter efficiency up to 3x as compared to standard MoE. Please see our [Blog]({{ site.press_release_v6 }}) for more details. We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/MoE). There are a few different hyperparameters for PR-MoE model compared to standard MoE: `--num-experts`: Instead of providing a single number, to enable Pyramid-MoE, you need to provide a list, whose length is the same as the number of MoE layers. We suggest to use more experts in the latter stage (close to output) of the model. @@ -67,4 +67,4 @@ MoS, standing for Mixture-of-Students, is a staged distillation-based technique In addition to the new parameters above, we observe that using the teacher PR-MoE during the entire training process may adversely impact the final student model accuracy. In our experiments, we use a staged distillation method by stopping distillation early in the training process (e.g., after 400K steps) and perform optimization only against the standard language modeling loss for the rest of the training. -We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/moe/examples/MoE). Details of our parameter settings can be found in the example training scripts. The performance results of MoS can be seen from our [blog post](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/) and our [paper](https://arxiv.org/abs/2201.05596). +We provide example training scripts under [examples/MoE](https://github.com/microsoft/Megatron-DeepSpeed/tree/main/examples/MoE). Details of our parameter settings can be found in the example training scripts. The performance results of MoS can be seen from our [blog post](https://www.microsoft.com/en-us/research/blog/deepspeed-powers-8x-larger-moe-model-training-with-high-performance/) and our [paper](https://arxiv.org/abs/2201.05596). diff --git a/docs/_tutorials/model-compression.md b/docs/_tutorials/model-compression.md new file mode 100644 index 0000000..20f2e6a --- /dev/null +++ b/docs/_tutorials/model-compression.md @@ -0,0 +1,441 @@ +--- +title: "DeepSpeed Model Compression Library" +tags: model-compression +--- + +**What is DeepSpeed Compression:** DeepSpeed Compression is a library purposely built to make it easy to compress models for researchers and practitioners while delivering faster speed, smaller model size, and significantly reduced compression cost. + +**Why use DeepSpeed Compression:** DeepSpeed Compression offers novel state-of-the-art compression techniques to achieve faster model compression with better model quality and lower compression cost. DeepSpeed Compression also takes an end-to-end approach to improve the computation efficiency of compressed models via a highly optimized inference engine. Furthermore, our library has multiple built-in state-of-the-art compression methods. It supports the synergistic composition of these methods and the system optimizations, offering the best of both worlds while allowing a seamless and easy-to-use pipeline for efficient DL model inference. We highly recommend you also to read [our blog](https://www.microsoft.com/en-us/research/blog/deepspeed-compression-a-composable-library-for-extreme-compression-and-zero-cost-quantization/) to learn more about (at a high level) why we build DeepSpeed Compression and what benefits it provides to users. + +**How to use DeepSpeed Compression:** The first section General Tutorial will describe the compression methods supported by the library. The following sections will describe our research work on how to compose different compression methods to perform [zero-cost quantization (ZeroQuant)](#2-tutorial-for-zeroquant-efficient-and-affordable-post-training-quantization) and [extreme compression (XTC)](#3-tutorial-for-xtc-simple-yet-effective-compression-pipeline-for-extreme-compression). Unless otherwise stated, experiment results listed below are based on NVIDIA A100 GPU, and we observe slightly different result numbers when using different GPU hardwares. + +## 1. General Tutorial +To use DeepSpeed Compression library, you need to install DeepSpeed >= 0.7.0 following the [installation guide](/tutorials/advanced-install/). Currently the DeepSpeed Compression includes seven compression methods: layer reduction via knowledge distillation, weight quantization, activation quantization, sparse pruning, row pruning, head pruning, and channel pruning. In the following subsections, we will describe what these methods are, when to use them, and how to use them via our library. + +### 1.1 Layer Reduction +**What is layer reduction** + +Neural networks are constructed from input layer, output layer and hidden layer. For example, the BERT-base language model consists of embedding layer (input layer), classification layer (output layer) and 12 hidden layers. Layer reduction means reducing the number of hidden layers while keeping the width of the network intact (i.e., it does not reduce the dimension of the hidden layer). This method can linearly reduce the inference latency of hidden layers regardless of the hardware and/or scenarios. + +**When to use layer reduction** + +If the model is very deep, you may consider using this method. It works much better when applying knowledge distillation. Layer reduction can be applied in both the pre-training and fine-tuning stages. The former generates a distilled task-agnostic model, while the latter generates a task-specific distilled model. In our XTC work ([paper](https://arxiv.org/abs/2206.01859), [tutorial](#3-tutorial-for-xtc-simple-yet-effective-compression-pipeline-for-extreme-compression)), we also discuss when to apply layer reduction. + +**How to use layer reduction** + +Layer reduction can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#layer-reduction)). Users have the freedom to select any depth by `keep_number_layer` and any subset of the network layers by `teacher_layer`. In addition, users also can choose whether to reinitialize the input/output layers from the given model (teacher model) by `other_module_name`. + +To apply layer reduction for task-specific compression, we provide an example on how to do so for BERT fine-tuning. Layer reduction is about resetting the depth of network architecture and reinitialization of weight parameters, which happens before the training process. The example includes the following changes to the client code (`model_compression/bert/run_glue_no_trainer.py` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples)): + +(1) When initial the model, the number of layers in the model config should be the same as `keep_number_layer` in DeepSpeed config JSON file. For Hugging Face BERT example, set `config.num_hidden_layers = ds_config["compression_training"]["layer_reduction"]["keep_number_layer"]`. + +(2) Then we need to re-initialize the model based on the DeepSpeed JSON configurations using the function `init_compression` imported from `deepspeed.compression.compress`. + +(3) During training, if KD is not used, nothing needs to be done. Otherwise, one needs to consider applying KD with the `teacher_layer` JSON configuration when calculating the difference between teacher’s and student’s output. + +One can run our layer reduction example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/layer_reduction.sh +``` + +And the final result is: + +```shell +Epoch: 18 | Time: 12m 38s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8340295466123281/0.8339096826688365 +``` + +To apply layer reduction for task-agnostic compression, we provide an example on how to do so in the GPT pre-training stage. + +Step 1: Obtain the latest version of the [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed). + +Step 2: Enter `Megatron-DeepSpeed/examples/compression` directory. + +Step 3: Run the example bash script such as `ds_pretrain_gpt_125M_dense_cl_kd.sh`. The args related to the pre-training distillation are: + +(1)`--kd`, this enables knowledge distillation. + +(2)`--kd-beta-ce`, this specifies the knowledge distillation coefficient. You can often leave it set to the default value 1, but sometimes tuning this hyperparameter leads to better distillation results. + +(3)`--num-layers-teacher`, `—hidden-size-teacher`, `num-attention-heads-teacher`, these parameters specify the network configuration of the teacher model. Please make sure they match the teacher model dimensions in the checkpoint. + +(4)`--load-teacher`, this is where one specifies the teacher model checkpoint. + +(5)`--load`, this is where the initial checkpoint for the student model that is going to be loaded. By default, it will load the bottom layers of the teacher models for initialization, but you can pass your own checkpoints for initialization. + +Apart from the above configs, you may also need to modify the data path in the `data_options` so that the trainer knows the data location. To make things slightly easier, we provide several example scripts for running distillation for different model sizes, including 350M (`ds_pretrain_gpt_350M_dense_kd.sh`) and 1.3B models (`ds_pretrain_gpt_1.3B_dense_cl_kd.sh`). We also empirically found that a staged KD often led to a better pre-trained distilled model on downstream tasks. Therefore, we suggest an easy approach to early-stop KD by not setting `--kd` in the script provided (e.g., disabling KD in the remaining 40% of training). + +Step 4: After distilling the model, one can also choose to further quantize the distilled model by running the script `125M-L10-Int8-test-64gpu-distilled-group48.sh`, which quantizes both the weights and activations of a distilled model with INT8 quantizer (the weight and activation quantization are introduced in the following sections). note that you need to set the `-reset-iteration` flag when performing the quantization. We provide the zero-shot perplexity result from WikiText-2 and LAMBADA in the following table. + +| **GPT (125M)** | **#Layers** | **wikitex2 perplexity** | **LAMBADA** | +| ---------- |---------- |---------- |---------- | +| Uncompressed | 12 | 29.6 | 39.5 | +| Quantization only | 12 | 29.8 | 39.7 | +| Distillation only | 10 | 31.9 | 39.2 | +| Distillation + quantization | 10 | 32.28 | 38.7 | + +### 1.2 Weight Quantization +**What is weight quantization** + +Weight quantization maps the full precision weight (FP32/FP16) to the low bit ones, like INT8 and INT4. Quoted from [this Coursera lecture](https://www.coursera.org/lecture/machine-learning-modeling-pipelines-in-production/benefits-and-process-of-quantization-WAjyJ): “Quantization involves transforming a model into an equivalent representation that uses parameters and computations at a lower precision. This improves the model's execution performance and efficiency, but it can often result in lower model accuracy”. + +**When to use weight quantization** + +From one-side, again quoted from [this Coursera lecture](https://www.coursera.org/lecture/machine-learning-modeling-pipelines-in-production/benefits-and-process-of-quantization-WAjyJ): “Mobile and embedded devices have limited computational resources, so it's important to keep your application resource efficient. Depending on the task, you will need to make a trade-off between model accuracy and model complexity. If your task requires high accuracy, then you may need a large and complex model. For tasks that require less precision, it's better to use a smaller, less complex model.”. On the other hand, recent server accelerators, like GPU, support low-precision arithmetic. Therefore, combining weight quantization with activation quantization (introduced in later section) can offer better efficiency as well. + +**How to use weight quantization** + +Weight quantization can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#weight-quantization)). The key configurations we would like to point out are: + +(1)`quantize_groups`, a group-wise weight matrix quantization: a weight matrix W is partitioned into multiple groups, and each group is quantized separately. See more details in [this paper](https://ojs.aaai.org/index.php/AAAI/article/view/6409). + +(2)`quantize_weight_in_forward` must be set to true for FP32 optimizer training and false for FP16. + +(3)`wq1`/`wq2`, users can expand more groups such as `wq3`, `wq4`, etc. + +(4)`start_bit` and `target_bit`, to simplify the first experiment we suggest to set them the same such that we apply quantization to the target bit once the iteration reaches `schedule_offset`. + +There are two changes to the client code (`model_compression/bert/run_glue_no_trainer.py` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples)): + +(1) After initialization of the model, apply `init_compression` function to the model with DeepSpeed JSON configurations. + +(2) After training, apply `redundancy_clean` function to save the quantized weight. + +One can run our weight quantization example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/quant_weight.sh +``` + +And the final result is: + +```shell +Epoch: 09 | Time: 27m 10s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8414671421293938/0.8422497965825875 +``` + +### 1.3 Activation Quantization +**What is activation quantization** + +Activation means the input to each layer. Activation quantization maps the input from full/half precision to low precision. See more in [this blog](https://medium.com/@joel_34050/quantization-in-deep-learning-478417eab72b). + +**When to use activation quantization** + +It can improve computation efficiency similar to [weight quantization](#12-weight-quantization). + +**How to use activation quantization** + +Activation quantization can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#activation-quantization)). Some of the components are same as weight quantization, such as `schedule_offset` and `quantization_type`. The key configurations we would like to point out are: + +(1)`range_calibration`, user has option to set dynamic or static. When using “dynamic”, the activation quantization groups will be automatically set to be token-wise (for Transformer-based models) and image-wise (for CNN-based models). See more in [our ZeroQuant paper](https://arxiv.org/abs/2206.01861) and the code (`deepspeed/compression/basic_layer.py` in [DeepSpeed](https://github.com/microsoft/DeepSpeed)). + +(2)`aq1`/`aq2`, users can expand more groups such as `aq3`, `aq4`, etc. + +The client code change is the same as [weight quantization](#12-weight-quantization). + +One can run our activation quantization example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/quant_activation.sh +``` + +And the final result is: + +```shell +Epoch: 02 | Time: 28m 50s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8375955170657158/0.8422497965825875 +``` + +### 1.4 Pruning +**What is pruning** + +Pruning aims to reduce the number of parameters and operations involved in generating a prediction by removing network connections. With pruning, you can lower the overall parameter count in the network (see more in [this Coursera lecture](https://www.coursera.org/lecture/machine-learning-modeling-pipelines-in-production/pruning-uNSOG)). We can divide the pruning strategy into two types: structured and unstructured pruning (see more in [this paper](https://arxiv.org/abs/1506.02626)). + + +| **Method** | **Type** | +| --------------------- | ------------ | +| [Sparse pruning](#141-sparse-pruning) | Unstructured | +| [Row pruning](#142-row-pruning) | Structured | +| [Head pruning](#143-head-pruning) | Structured | +| [Channel pruning](#144-channel-pruning) | Structured | + +#### 1.4.1 Sparse Pruning +**What is sparse pruning** + +Sparse pruning means we set some of the elements in each weight matrix with zero values. There is no structure pattern in the zero values. One way to perform pruning is based on the absolute value of the weight parameters, see for instance [this paper](https://arxiv.org/abs/1506.02626). + +**When to use sparse pruning** + +If your model is significantly over-parameterized, you may consider using sparse pruning. However, to see the real benefit of hardware computation efficiency, the density ratio (percentage of weights to keep after pruning) must be considerably low. + +**How to use sparse pruning** + +Sparse pruning can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#sparse-pruning)). The key configurations we would like to point out are: + +(1)`schedule_offset`, we empirically find that when using `method: topk`, it’s better to set the `schedule_offset` to a large value such as 10% of the total training steps. + +(2)`method`, we support L1 norm and topk methods. Users are welcome to contribute more methods. + +(3)`sp1`, users can expand more groups such as `sp2`, `sp3`, etc. + +(4)`dense_ratio`, for unstructured sparse pruning, the dense ratio could be less than 0.1 for BRET-base model while still yielding a good accuracy. For ResNet-50, the dense ratio could be as low as 0.3 while still having good accuracy on ImageNet. + +The client code change is the same as [weight quantization](#12-weight-quantization). + +One can run our sparse pruning example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/pruning_sparse.sh +``` + +And the final result is: + +```shell +Epoch: 02 | Time: 26m 14s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8416709118695873/0.8447925142392189 +``` + +#### 1.4.2 Row Pruning +**What is row pruning** + +Row pruning sets all the elements in certain rows of the weight matrix with zero values. If a row is pruned, all elements in that row are set to zero. + +**When to use row pruning** + +Row pruning can be beneficial to hardware speedup, much better than sparse pruning (but may result in larger accuracy loss compared to sparse pruning). It is a feature designed for two back-to-back linear layers (e.g., Feed Forward Network in Transformers). As such, we suggested using row pruning for the first linear layer (i.e., the `intermediate.dense` layer for BERT). Reducing the row dimension of this matrix can help to reduce the column of the follow-up matrix (i.e., `layer.\\w+.output.dense` layer for BERT). Row pruning would also work for other kinds of linear layers. + +**How to use row pruning** + +Row pruning can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#row-pruning)). The key configurations we would like to point out are: + +(1)`method`, only `topk` method is supported currently. Users are welcome to contribute more methods. + +(2)`rp1`, users can expand more groups such as `rp2`, `rp3`, etc. + +(3)`related_modules`, as mentioned in “when to use row pruning”, if we do row pruning, the follow-up matrix will be affected. Thus, one needs to know the connection between the modules. + +The client code change is the same as [weight quantization](#12-weight-quantization). + +One can run our row pruning example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/pruning_row.sh +``` + +And the final result is: + +```shell +Epoch: 02 | Time: 27m 43s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8440142638818136/0.8425549227013832 +``` + +#### 1.4.3 Head Pruning +**What is head pruning** + +Head pruning is designed specifically for networks with multi-head attention, such as transformer-based models (see more in [this blog](https://towardsdatascience.com/transformers-explained-visually-part-3-multi-head-attention-deep-dive-1c1ff1024853)). For example, the BERT-base (BERT-large) model has 12 heads (24 heads). + +**When to use head pruning** + +Head pruning is beneficial to hardware speedup. Moreover, as stated in [this blog](https://towardsdatascience.com/head-pruning-in-transformer-models-ec222ca9ece7): “Surprising observations are made in the [paper](https://arxiv.org/abs/1905.09418), that even after training models normally (with all heads), many heads can be removed at a test time and it will not significantly affect the BLEU score, in fact, some cases removing few heads led to improving BLEU scores.”. + +NOTE: Head pruning is a feature designed for the attention layers (e.g., Multi Head Attention in Transformers). For now, it can only be applied to output matrix of the Transformer (i.e., `attention.output.dense` in BERT). Pruning the output matrix can lead to the pruning of Query/Key/Value matrix as well. + +**How to use head pruning** + +Head pruning can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#head-pruning)). The key configurations we would like to point out are: + +(1)`num_heads`: users need to provide the correct number of heads for their models. + +(2)`modules`: the module `attention.output.dense` is made specific for Hugging Face BERT model. Currently, we only support this case when Query/Key/Values are separated matrices and followed by `attention.output.dense`. We are happy to assist and welcome contributions on variants of attention models. + +(3)`related_modules`: as mentioned in “when to use head pruning”, pruning the attention output matrix can lead to pruning QKV matrices as well. Thus, the input here is [“self.query”, “self.key”, “self.value”]. + +The client code change is the same as [weight quantization](#12-weight-quantization). + +One can run our head pruning example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/pruning_head.sh +``` + +And the final result is: + +```shell +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8397350993377484/0.8377746135069162 +``` + +#### 1.4.4 Channel Pruning +**What is channel pruning** + +Channel pruning is made specifically for convolutional layers and computer vision. According to wikipedia.org, “The color data of an image is stored in three arrays of values, known as channels.”. For example, an image with three channels passing through ResNet-18 produces 64 channels after the first layer. + +**When to use channel pruning** + +Channel pruning is a feature designed for two back-to-back CONV2d layers (e.g., residual connection in ResNet). As such, we suggest using channel pruning for the first CONV2d layer. Reducing the number of output channels of this layer can help reduce the number of input channels of the next layer. Channel pruning would also work for other kinds of CONV2d layers. + +**How to use channel pruning** + +Channel pruning can be enabled and configured using the DeepSpeed config JSON file ([configuration details](/docs/config-json/#channel-pruning)). + +One can run our channel pruning example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +pip install torch torchvision +DeepSpeedExamples/model_compression/cifar$ bash run_compress.sh +``` + +And the final result is: + +```shell +after_clean +epoch 10 testing_correct: 0.7664 +``` + +Note that the above result is when not using batch-norm (BN) in the “ResNet” model. If you use BN for the model and apply channel pruning, the validation after cleaning the model will be different from the model before cleaning. We suggest users to further finetune the model after applying `redundancy_clean` for such cases. + +## 2. Tutorial for ZeroQuant: efficient and affordable post-training quantization +In this section, we introduce how to apply DS-Compression to perform cost-free INT8 quantization and lightweight INT4/INT8 mixed-precision quantization. For more details, please refer to [our paper](https://arxiv.org/abs/2206.01861). + +**What is ZeroQuant** + +ZeroQuant is an efficient Post Training Quantization method that includes (1) a fine-grained hardware-friendly quantization scheme for both weight and activations, which can significantly reduce the quantization error; (2) a novel affordable layer-by-layer knowledge distillation algorithm (LKD) even without the access to the original training data; (3) a highly-optimized quantization system backend support to remove the quantization/dequantization overhead. By these techniques, ZeroQuant is able to (1) quantize models to INT8 without any cost and (2) quantize models to INT4/INT8 mixed-precision quantization with minimal resource requirements (e.g., 31s for BERT-base quantization). + +**When to use ZeroQuant** + +When you want to quantize the transformer-based model to INT8 or INT4/INT8 format, it is always a good idea to try ZeroQuant first, especially when the model is very resource-hungry (GPU and/or time) to do quantization aware training and/or when the original training data is not accessible. + +**How to use ZeroQuant** + +One can run our BERT example in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/bert$ bash bash_script/ZeroQuant/zero_quant.sh +``` + +And the final result is: + +```shell +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8427916454406521/0.8453010577705452 +``` + +One can run our GPT example by: + +```shell +DeepSpeedExamples/model_compression/gpt2$ pip install -r requirements.txt +DeepSpeedExamples/model_compression/gpt2$ bash bash_script/run_zero_quant.sh +``` + +And the final result is: + +```shell +Before converting the module COVN1D to linear and init_compression: 19.371443732303174 +Before cleaning, Epoch at 0 with Perplexity: 19.47031304212775 +After cleaning with Perplexity: 19.47031304212775 +``` + +NOTE: right now, we only support zero cost quantization. Stay tuned for the code release on layer-by-layer knowledge distillation proposed in the ZeroQuant paper. + +## 3. Tutorial for XTC: simple yet effective compression pipeline for extreme compression +In this section, we introduce how to apply DeepSpeed Compression library to perform the light-weight layer reduction and ultra-low bit precision (binary/ternary) quantization. In particularly, we will guide you on implementing the [XTC methods](https://arxiv.org/abs/2206.01859), namely: + +(1) Obtaining a 1-bit or 2-bit BERT-base (12-layer) with 8-bit activation quantization. + +(2) Reducing the 12-layer Bert-base to a 5-layer one and then obtaining its 1-bit or 2-bit counterparts. + +**What is XTC** + +XTC (short for eXTreme Compression) is our new simple yet efficient method that compresses a model to its limit with lightweight layer reduction and robust binarization. XTC reduces the model size by 32x with almost no loss in the average score on the GLUE tasks via simple yet effective binarization technique. By combining extreme quantization and lightweight layer reduction, we can further improve the binarized model, achieving 50x model size reduction while keeping 97% of the accuracy. +For more details, see how we derive our method in [our paper](https://arxiv.org/abs/2206.01859) where we perform a systematic study on the impacts of various techniques currently used for extreme compression. + +**When to use XTC** + +If you want to significantly compress your models while retaining competitive performance, XTC could be a desirable choice. It is a simple and hyper-parameter tuning friendly method. + +**How to use XTC** + +**Installation:** Examples of XTC extreme compression for BERT models are at `model_compression/bert/bash_script/XTC` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples). You will need to install the requirements by: + +```shell +DeepSpeedExamples/model_compression/bert$ pip install -r requirements.txt +``` + +**Implementation of XTC methods:** +To accommodate users who do not have a fine-tuned model or task-specific model for compression, with the arg `--model_name_or_path yoshitomo-matsubara/bert-base-uncased-${TASK_NAME}` our python script `run_glue_no_trainer.py` automatically downloads the models from Hugging Face. Users can also use their own models with better accuracy as the teacher and the student model initialization. + +### 3.1 One-bit or Two-bit BERT-base (12-layer) with 8-bit activation quantization +For the configurations, see `model_compression/bert/config/XTC/ds_config_W1A8_Qgroup1_fp32.json` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples). In our paper, we used FP32 (`"fp16": {"enabled": false}`) to perform training, while directly applying 8-bit quantization (`"bits": 8`) to the activations and 1-bit quantization (`"start_bits": 1, "target_bits": 1`) to the attention (query, key, val) and feedforward weight matrices (`"modules": ["attention.self", "intermediate", "output.dense"]`) at the beginning of the training (`"schedule_offset": 0`). In addition, we also apply 1-bit quantization to `word_embeddings` as weight quantization. + +One can run this example by: + +```shell +DeepSpeedExamples/model_compression/bert$ bash bash_script/XTC/quant_1bit.sh +``` + +And the final result is: + +```shell +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8293428425878757/0.8396053702196908 +``` + +The other important feature we would like to mention is the `quantize_groups` inside `weight_quantization`, which is set to be 1 here to match our XTC paper's FP32 training setup. We find that under FP16 training, smaller number of quantization group (e.g., 1 or 2) could lead to unstable training. Thus, we recommend using larger number of groups (e.g., 64) under FP16. `model_compression/bert/config/ds_config_W1A8_Qgroup64_fp16.json` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) is the FP16 example configurations, where `"fp16": {"enabled": true}` and `"weight_quantization": {"shared_parameters": {"quantize_weight_in_forward": false}}` are different from FP32 case. + +With this config, we quantize the existing fined-tuned models downloaded from Hugging Face. For 2-bit weight quantization, user needs to update the ds_config JSON file. To give a sense of the compression performance of downloaded models compared to our paper, we collect the results (1/2-bit BERT on MNLI and QQP with 18 training epochs) in table below. The difference between this tutorial and paper is because they use different checkpoints. Data augmentation introduces in [TinyBERT](https://github.com/huawei-noah/Pretrained-Language-Model/tree/master/TinyBERT) will help significantly for smaller tasks (such as mrpc, rte, sst-b and cola). See more details in [our paper](https://arxiv.org/abs/2206.01859). + +![XTC quantization results](/assets/images/xtc-1.png){: .align-center} + +### 3.2 Compressing the 12-layer BERT-base to 1-bit or 2-bit 6/5-layer BERT + +This section consists of two parts: (a) we first perform a light-weight layer reduction, and (b) based on the model in (a), we perform 1-bit or 2-bit quantization. + +**3.2.1 Light-weight Layer Reduction** + +`model_compression/bert/config/XTC/ds_config_layer_reduction_fp16.json` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) is the example configuration for reducing the 12-layer BERT-base to a 6-layer one. The student’s layers are initialized from i-layer of the teacher with i= [1, 3 ,5 ,7 ,9 ,11] (note that the layer starts from 0), which is called `Skip-BERT_5` in our XTC paper. In addition, student’s modules including embedding, pooler and classifier are also initialized from teacher. For 5-layer layer reduction, one needs to change the configs in `ds_config_layer_reduction_fp16.json` to `"keep_number_layer": 5`, `"teacher_layer": [2, 4 ,6, 8, 10]`(like in `model_compression/bert/config/ds_config_TEMPLATE.json`). + +One can run this example by: + +```shell +DeepSpeedExamples/model_compression/bert$ bash bash_script/XTC/layer_reduction.sh +``` + +And the final result is: + +```shell +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8377992868059093/0.8365541090317331 +``` + +Notably, when using one-stage knowledge distillation (`--distill_method one_stage`), the difference between the outputs of teacher and student models (att_loss and rep_loss) also need to be consistent with the initialization. See the function `_kd_function` under `forward_loss` in `model_compression/bert/util.py`. + +For mnli/qqp, we set `--num_train_epochs 36`, `--learning_rate 5e-5`, and with the JSON config above. The results are given below (we also include the fp16 training results). Using fp32 clearly results in more stable performance than fp16, although fp16 can speed up the training time. + +![XTC layer reduction results](/assets/images/xtc-2.png){: .align-center} + +**3.2.2 One-bit or Two-bit quantization for 6-layer (5-layer) BERT** + +Given the above layer-reduced models ready, we now continue to compress the model with 1/2-bit quantization. `model_compression/bert/config/XTC/ds_config_layer_reduction_W1Q8_fp32.json` in [DeepSpeedExamples](https://github.com/microsoft/DeepSpeedExamples) is the example configuration where we set the layer reduction to be true on top of `model_compression/bert/config/XTC/ds_config_W1A8_Qgroup1_fp32.json`. In addition to the configuration, we need to update the path for the student model using `--pretrained_dir_student` in the script `model_compression/bert/bash_script/XTC/layer_reduction_1bit.sh`. User can train with a different teacher model by adding `--pretrained_dir_teacher`. + +One can run this example by: + +```shell +DeepSpeedExamples/model_compression/bert$ bash bash_script/XTC/layer_reduction_1bit.sh +``` + +And the final result is: + +```shell +Epoch: 18 | Time: 18m 11s +Clean the best model, and the accuracy of the clean model is acc/mm-acc:0.8140601120733572/0.8199755899104963 +``` + +With the command above, one can now obtain the results of 1-bit 6-layer model. Now we list more results for 2-/1-bit 6/5-layer models in the following table. Note that the checkpoints we used for the compression below are from the above table in section 3.2.1. + +![XTC 6-layer and quantization](/assets/images/xtc-3.png){: .align-center} + +![XTC 5-layer and quantization](/assets/images/xtc-4.png){: .align-center} diff --git a/docs/_tutorials/monitor.md b/docs/_tutorials/monitor.md new file mode 100644 index 0000000..a9c111f --- /dev/null +++ b/docs/_tutorials/monitor.md @@ -0,0 +1,105 @@ +--- +title: "Monitor" +excerpt: "Monitor your model's training metrics live and log for future analysis" +tags: profiling performance-tuning +--- + +In this tutorial, we introduce the DeepSpeed Monitor and provide examples of its usage. + + - [Overview](#overview) + - [Usage](#usage) + +## Overview + +Monitoring model and system metrics during training is vital to ensure hardware resources are fully utilized. The DeepSpeed Monitor enables live logging of metrics through one or more monitoring backends such as PyTorch's [TensorBoard](https://pytorch.org/docs/1.8.0/tensorboard.html), [WandB](https://docs.wandb.ai/quickstart), and simple CSV files. + +Below is a live monitoring view for TensorBoard: + +![TensorBoard Example Output](/assets/images/tensorboard_monitor.PNG){: .align-center} + +Below is a live monitoring view for WandB: + +![WandB Example Output](/assets/images/wandb_monitor.PNG){: .align-center} + +## Usage + +The DeepSpeed Monitor is configured within the deepspeed [configuration file](/docs/config-json/#monitoring-module-tensorboard-wandb-csv). DeepSpeed will automatically monitor key training metrics, including those tracked with the `wall_clock_breakdown` configuration option. In addition, users can log their own custom events and metrics. + + - [Automatic Monitoring](#automatic-monitoring) + - [Custom Monitoring](#custom-monitoring) + +### Automatic Monitoring + +When using DeepSpeed for model training, the Monitor can be configured in the DeepSpeed [configuration file](/docs/config-json/#monitoring-module-tensorboard-wandb-csv). No explicit API calls are needed to use the Monitor. The Monitor can be enabled by adding the following field to DeepSpeed's configuration json file. Refer to [Monitoring](/docs/config-json/#monitoring-module-tensorboard-wandb-csv) for details. + +```json +{ + "tensorboard": { + "enabled": true, + "output_path": "output/ds_logs/", + "job_name": "train_bert" + } + "wandb": { + "enabled": true, + "team": "my_team", + "group": "my_group", + "project": "my_project" + } + "csv_monitor": { + "enabled": true, + "output_path": "output/ds_logs/", + "job_name": "train_bert" + } +} +``` + +DeepSpeed will automatically log to all available and enabled monitoring backends listed in the config, and will generate live monitoring views such as those listed above. + +### Custom Monitoring + +In addition to automatic monitoring, users can log their own custom metrics in client scripts. Currently, there are two ways to initialize Monitor objects: + +1. (Recommended) - Create a `MonitorMaster(ds_config.monitor_config)` object, which automatically initializes all monitor backends present in the DeepSpeed configuration +2. Create a specific `TensorBoardMonitor(ds_config.monitor_config)`, `WandbMonitor(ds_config.monitor_config)`, `csvMonitor(ds_config.monitor_config)` object which will only initialize a specific monitor backend present in the DeepSpeed configuration + + +The steps to create a custom monitor are as follows: + +1. Add import to your desired Monitor +2. Initialize monitor with DeepSpeed config's `monitor_config` +3. Create a list of one or more 3-tuples in the format `[("label", value, ds_engine.global_samples), ...]`\* +4. Call `monitor.write_events` on the list from step 3 + +\* Note - Some Monitor backends don't support mixed sample values. Be sure to use your DeepSpeed engine object's `global_samples` attribute in each 3-tuple + +For example usage, see the following modified [DeepSpeedExamples/cifar](https://github.com/microsoft/DeepSpeedExamples/tree/master/cifar) example: + +```python +# Step 1: Import monitor (and DeepSpeed config, if needed) +from deepspeed.monitor.monitor import MonitorMaster +from deepspeed.runtime.config import DeepSpeedConfig + +# Step 2: Initialized monitor with DeepSpeed config (get DeepSpeed config object, if needed) +ds_config = DeepSpeedConfig("ds_config.json") +monitor = MonitorMaster(ds_config.monitor_config) + +for epoch in range(2): + + running_loss = 0.0 + for i, data in enumerate(trainloader): + pre = time.time() + inputs, labels = data[0].to(model_engine.local_rank), data[1].to( + model_engine.local_rank) + if fp16: + inputs = inputs.half() + outputs = model_engine(inputs) + loss = criterion(outputs, labels) + + model_engine.backward(loss) + model_engine.step() + post = time.time() + # Step 3: Create list of 3-tuple records (single entry in this case) + events = [("Time per step", post-pre, model_engine.global_samples)] + # Step 4: Call monitor.write_events on the list from step 3 + monitor.write_events(events) +``` diff --git a/docs/_tutorials/progressive_layer_dropping.md b/docs/_tutorials/progressive_layer_dropping.md old mode 100644 new mode 100755 diff --git a/docs/_tutorials/transformer_kernel.md b/docs/_tutorials/transformer_kernel.md old mode 100644 new mode 100755 diff --git a/docs/_tutorials/zero-offload.md b/docs/_tutorials/zero-offload.md index 420760f..764e80b 100644 --- a/docs/_tutorials/zero-offload.md +++ b/docs/_tutorials/zero-offload.md @@ -38,20 +38,22 @@ Second, we need to apply the following changes to ensure that only one GPU is us ``` ### DeepSpeed Configuration Changes -ZeRO-Offload leverages many ZeRO stage 2 mechanisms, and so the configuration changes to enable ZeRO-Offload are an extension of those required to enable ZeRO stage 2. The `zero_optimization` configuration to enable ZeRO-Offload is shown below: +ZeRO-Offload leverages many ZeRO stage 1 and 2 mechanisms, and so the configuration changes to enable ZeRO-Offload are an extension of those required to enable ZeRO stage 1 or 2. The `zero_optimization` configuration to enable ZeRO-Offload is shown below: ```json { "zero_optimization": { "stage": 2, - "cpu_offload": true, + "offload_optimizer": { + "device": "cpu", + } "contiguous_gradients": true, "overlap_comm": true } } ``` -As seen above, in addition to setting the _stage_ field to **2** (to enable ZeRO stage 2), we also need to set _cpu_offload_ flag to **true** to enable ZeRO-Offload optimizations. In addition, we can set other ZeRO stage 2 optimization flags, such as _overlap_comm_ to tune ZeRO-Offload performance. With these changes we can now run the model. We share some screenshots of the training below. +As seen above, in addition to setting the _stage_ field to **2** (to enable ZeRO stage 2, but stage 1 also works), we also need to set the _offload\_optimizer_ device to **cpu** to enable ZeRO-Offload optimizations. In addition, we can set other ZeRO stage 2 optimization flags, such as _overlap\_comm_ to tune ZeRO-Offload performance. With these changes we can now run the model. We share some screenshots of the training below. Here is a screenshot of the training log: diff --git a/docs/assets/images/175b-trend.png b/docs/assets/images/175b-trend.png new file mode 100755 index 0000000000000000000000000000000000000000..6a500d53fb61d0e2b384e14294bbd2575aace848 GIT binary patch literal 90652 zcmdSBcT`hd`z;z!UKJD-DGE{qB27?~A|Oray(1-{6zL$nNfA*YG(n1VX#ql$5?Uy> z2uSY`2q+-Z0@6YW<*x1feZO<>Kljfw&dC@K3<+!Rz4ltqGoLxKmFH}JDqA)I!a&Ta|p$M-@z0(;-NGD_oX1udG5dehed=bF#pe+3sjLJ z|MP}}n$F>W-pJ8&g9-T0E0k9y|9_3G`$px&e?NicM$o+Um$kDWMjO^X>1K|KimFab zq<0g9zVOIxxWaOp%=a*Ol98R=P0DBG&j=;-!R@z((aB{B_$L2N=ifbJiJ3!(Y_wkrU7_ImlJW{;DLEblH#=I1i86ukT4^e(N(rxBi~SN;4f? z%}O-tg+0LcEcTI}E zjyP8Ng)AuMvpVU(BJS!%1LLTLR+3KWR~&A9e|wo&m>patZ2fJl)TH``gv9s+bB4=A zZG-jJ@@Oxs-5^SHiV+%0PDxKs&v|36L+t4fIr4fNJL0nd<9v6z-CEOTtW?jdb1Y35 zSm?tD8r!^@pq>ngRn6nj6SNTz-Yf$ZYD`-|*uYjD2QcvRlsj^%( zo)Z_wsfer(ViYjyGIJ7=uXZI!GM%pawZ+GS9t)@^#VY+SKTLq+Ao?yjGJ zNmV{sys*}lu(XPGtzHdT94OdXtX7YUOi(u6s&*|TB_$@#Avo6#{(Lz8vaJ7N7T+xbu{F-$>tePwK?&k?=SbN+-eVGptzv{^V zdn0vlSp#Q}l&3f58|jD;!iwXbB9-X{7;aij(p|dLj457Ei(PD!N$uax#VIzHlX8L- zCtk;vEbmvEX)WGGj&HqN^ut$Jm6p1Oh%^#RpCN6d7zG=($!}tw}d6rzV)sN(bL;+07+*_DZh?FF!0ZtLxeFKLs1}1;KzLdyrf~r`sI3Qyb5v z+!#LW|2xxe)H0;mHa3-aGN-z!fD+b_gLT(Tyw1Vi?`+cU zY}`bZZRccTG;hu(n4F1JyR1b@rfeB2qjGbHIGOz-&2%wA!HQ6u4h0maa?)ulOWB{cl(W5?DUprUI4)S;U;*sZ%bw zw{G@GAt(hZVpyd{v}6yxm_nCYwigF^u1D6;2-SbQu9;>N!y;aLUS{{>Aq}j(6)Z}* zslIj&oD;u{e#Yhzd2J$+C#&AfnSK%cd`e(X)aleZ$eYv0I1ZZ+cXsm?`GJ}$p+7|kmDnv@k$DyLf%Btf&kH(m{Lj7N+jp;msc+x zM@p}3OX4eH*=fh_|LRORPw<@%%6Z!N6A`$!*y6i1)Il^O4=22Nb0J`}Tl~TDNZF=P zk|=@{BR-`v@tTpiITg5euFRqtk2RROa_5OgkxfU!Y6~J@u^=Z&z&OAxEU)#!-7%jX zto!V@9NDAYFB6oo_KJggm)UPIQ_L{A8z97R4OLjS>f|ZK+>eZmba0Qgs_#x04@|-9 zml%~*F<3kFOSB%98I~B?_q~@F`nDlW8ney;v7tW579-1gCoSi}>sM&QlH{K<^fYid zyrGkq^sv#Z*tfg6&hvLqUvXhp8l$!2bmJkD<+I9(T5;%}v+cA`jMchK^dT7pn$+hq zLyq=%ypHyMV<&vNgxkVLN=;^6&ctS&y4k#yAf+^3W&0pcIgS=2uRt>jn7nnY2&}!( z;7SovGKP+PxHCOy%7w;j4clGOEyz{!Mfi-{`gDtq+SHo|5-~mm@6iSl$Ispqqru!B zC(~ndRHF_XM-uw{W+=(lDl?ZF#d-w(jZ2-y?Rfg~&K2%lfvD zmI{V({iX&`z8-6}XM2phgTVTDXWrwqzKr8cZP;HbA#8*k?}WIXG^|lvp9HhCG$W)7D8`|~xn{>EXj*I8EeI#Fia-6c*sCZ^v?Sm~WfPtt(;&4zNOIBC+m zOwS&Pa_nm>Zk!>a(W{a{z{qcODo`>xw)mncQqh|nLMDo*3tPYPjV$H+reDV>y_$Q` z9!YgrdjMcq3_vq;cc?$M9FojNTin4NE-%Z&U$1o>{_bM@=sgDy7j?Oo%%06?g6?Qk z?Y)h5E}ao_%snEVX)PpQC1R za-8GyT)8r%GE<=oLqW!Y--0e)5m|J%A4J7CCFKU8393Ga;0}_y#i#4D8Jd)8+c^K? z5|SnI)KH_Y`x{dlB$DQiV>5gAxLr1JMi}>i(C8i2SoG(^gV(eUl@WA~ZES%nu4NpD zchxK7=G&I0n#pYBeqlDh9~3DUI-{*5x?@Vi(^q>9jS;q)QocTQvmHFyUiuWgx&@@9 zU`i*;-MYOWFV6dT-de8jOu6a%GJ3qgXh*+L%RBC+m4>DwmweaHe_(PVe`9!m`;F~F z~j%x z2(TMt8(Uw{O7fa{Ru;9j?AMi8>39Y#Hd2qcw(LD&qDRtMfV5Ig_IFz(?bn&aJx_>V zx>#}17Hrx)n!c)Bwa)%LWo(lGj&%;~MB`?#QJS2wS1q{4D7c-k1ro-XF%sLUQz6ZPCsBbBhq-dJj7 z(oOLB&R3Z4ZPIab>m6M+h1}2k0QqfP$@CJ)DNubetoFd=Zq?g-yjj`>v!q07qs06{ z?sn>o+cP|q9{0}(Vt%Sc}<Ipk zFY?LJ!Ctwe{#(R(wtHhSed!_&0~Q$UNibz^mclP-JzY1guxK`KLMbFoTgmXY9_^O| z9Utxp(GDIB4|8#;;a8jj4^*xYl_UARrPqt)gbR0kLG<;Ri&sVwjbG-c4RDw?T~;l- z#=x`aBjz?W2_LAbXb*q%L)VWAmZpa|KEMxPySv)Fve zcPoMlUUG2BQ&$Id(J^&AUG9-3ZwKn5W}B{+;AwQO!36zb$;XvA*`q(THX13ZckE(I zz7AzjHfkPSbx7_1B~n3_U4y$G3OU58pitiO4Ya5%b(tsT92L)WuRMf4_e51ZC;AXvm&TQRyUn@DyPd7ACx8Qx) zHF)sSQ1Pr%!mmV1IP3DHXRoHy^IZNfUHf|ik*IjSvu~113d~I*M;@wjNYPO;_b!)_wW5SB6|F;4w_>n?+CL&o21z{wxgH*pdPJfqLnQAH#sbcFuI8N*FJdmYN_QjypDJ6gje5f-DH70Yo$ zbTOZ?Kf-7xdnIxX`N#9K$VUerX@T9^iHdo0Dm&U|c&K#L?4Km=M&bta<%DZ!f}28m7iJQ~7+?5>pxer-PMH(LUF_GZom&z>G{Va+Juv zf_vE5goIB&+tf@6*Z&>a%-WH^%NfK&2T-b0R`9;V4p!;8! z_32a%!}?1>eG~Igf4fi#$Cp~jp{{O}c(B2K!tgA=-11mup8AhsL}c*M!DcVPx@<1% z=pSZyau(iYpTZbm&BzJQexN84T#9nfRX=4rr9I8*c4(-RZ5LOErDAw3h$%U4lLG)9om(Bwm97eeyuiA^}_H zv{Kj_p!r7qx3VB+u<(kZOe>+U;%pTJ?qVdrNIST-ZlW)SSmXQwwGF9@1U)=(iV1 zL*7*su8Un}l2#fZ4!LjD3oIsQiY3EB#~@mXe343@`k$fl`)35UU*9xDC)RoUqR$Sg zlFi(jeKtOppU&kC5c{&8Y#b(KdRqdL^m0{ zDU|pamit3vv$gmzg7PXn+92j#7T%wiqF^E~-gi7FNMLNpg^8*mwR;Waa>V^}pM|&h zH_tNtn_`#Oyd0zx9b5F51;+H=Go*%ZF4h*cwl26EilUE_`|WMgWW}@40u1yJ%h%2# z*=igmE9+j69IJ-1QOLY0lBP-!swukk(o~AyNB`6XoReBhQF9Zi#81$4JSoK>VUukqV~Wj&^$p!?g-F`N zlj$>}-42Z^|jhYzR(Dn4?>-g16WT5Ycm(9J!Uq#|)mPz~c)%M{Tu7%lWJ9+mK zvl5;nXEd;zb26%0r{1etX$QZRD9G4iJxA zIJuZ`9nyg$jb&8sn9%K4RIoWAXvVYqB3tPY%v1$(imHN`3tJN=D}q%2pidga`OEg{ z{74yp=Ab6xAd`kQm8nz~lCDZ|F?bvPj|^1)5d2&&JO=eG zv9`#=&=boCGo(?$=y&uGPl2hua1Tehbo4Xs>p+Hy+JOys$z81pLyn?&i=`(yV{$}_ zyckD4=bTOfHk~LHcw}O})c!&q;%b4!;83#V8A%);e}KXBCpP!q>Mu3r%&5gs>u#tDE7yf49PpybCMC zNq!Xrfw<<2$3!v+wgP|Oy~@p)vQAd2TEeU-okJzqcbWvonUjOD;279)^CS{ zZ_l;`?n{+;qh>0dDw@TX28#80uv-J_2<3|*U)f+BJMk(fLJz;zO$R#xil>llBE718 zZ(U)>Q;XM6=jrddHl2_0Z}}xKibL(Z{q5-YX6Mj~1*ZD^eg;;$LGwxG8B)IG6Xo1# zN?o=otF3fiM?pP3#NkQ%TxumMOfbx-`iZ9TA+K4TE5Su8bn7+^UgT4~Wlpi2zKeWT z5XnTm!A~hlsfaIf@l@I0>4=*-je{SVx263ai;MD5bAN5QTa>*cN|FDLr%E(;ewwyw z_!JBy)qw8gY^DTEZoCj%r3v48|4*CvA^ZYTCmy zW7nA2&?h3MlV#IN*{tDTy+V>y2B%Fo@eZyV_EC};dn`5gA@}wFPDd!65&%FR!_TOL zh3bEV1<}6eEKYC*oR@=PjM|Sm?jQO(v!}$U`@(4H6BV4ss|v^_XUeWUcvXLlIG$u`hbN+umohm$JrBB?8yg_Aq^eeaH#;*RY z`QAa)BV9q=Dr&~Rh&B@xho-H;hfUL6<@GbjX06M%3c~NFgIfV3S>AozqW39VHVqA! z#OG~J%t@$=cKk*epqyk$LfNFDcy#V-2wvZ*Wo@1=*AtGn%8OQ-+%vE#KAAIv*@_f_ zf2^_KTBo0|RZwl~njwBq62JzP_}%spx(ky#QiSl$35bYQM|E6#eKkSsr+UB8*?sr>BI7Pp6=7|~`h|^*k-^t2pOCww>2w|A)_8Ig z4XAUsZ32F01-jyc89KLC#s?2ez0>l{cC6j(MJ$>FwkLa+`f_L5q8DWNNA!H@z3=oN zjLYCG(mc&7v)N8CB=T2Y?gpjz+sF5KklqaEU8kDH0m0%7)QV|{IH&j-7cg+<*}era zwO_U6`Yj{%vlvzc7!;hJ1U#aC=ST}o~ zzcUkc6W_)jvJyNJfv)bAk@};M#=E=Nm-7N^>6kfFXL|2&Yw%8HmlQKW2L7=}YVF?6 z7#OO;H9VdF`k+3nq|Ip{U)6PP3Wx%${6awi=JZ7l>GMyQr&~iz_4*5K?Bm#^C#{oG zznqty+{TY8%;4j~K6PoN-o}CawR{MPevjOW1j3c<_l6uev0SZ*Z}v7iZxVWu=Y<;P z=;~U#cxiQ4L>_$%J^Q=JC}AGZsg)3TJuJ^v#+>TnwHTCPs8D3FMfqvFYXMSb0J$Y% z_z}`|f@u@xG3A!6Eo2kbn=vDUv&ENP-R6}xiw)-#nGkcqMwA;_fvc6%A;*M6HdU@J zN#E6s5et1)>`0jzHd}E$ZARvJ?H;JH>J;>5Hh>iI0dVz#gBP6&Gkwwo(DZ~>z#ZNY z6pB&G+wj8PMs`!cDw&v$E_75yD}E^+W>fQQSRr0;gngXSFUUZz9f-Z4wjX+>!_&9l z;4p|1TF+C6oXU=qT;i45n|q6(iB`d*(DGC#nSbTS+Y_+9+D>loE+ULn*|)8YJ&FMX zUrzZ!D^pVFxmeJ?*Jyx_>NP6yMosF0AxEy;kA@k=psr1H-YntpCL07b zI1clt0M?-B$2z!Wb%3Xlbm{r9GBAt!sMt2-l6mWKn7W4I^|`G|P^GSo1CH}onQEq# z=Hc%oo2a?zBZy+aI5+)DHk&*-adg#2CP>SX(jK0__0T>`RQg+5l-6_fY92pbTMWzZ z*b155fM77KfA1;$ zVZ=ooqEn)e;IBjhN<)oslgghOAAGcDw^Kps&e~hpT4EZy3W7wqe7}(HWi_-N)>4&(80jNHY+_Fk4N)P;+MO75ygK7bc{rMK)a z2{2hZ9oaM7Qi@gCBs~3c4$NR@u~cM&)joDn*x0#%(?~j2O$J?nBIUHcIQl2v>&Brp z{TF_rd%rV0C#QAu{@Es!(7Dqv-gsK4>7JY57chCKP$;v;@M0QLsa)BBN)=Vd6(tH( zD(a93TL;q0cn%2Vuf$yNb7d5;ugEDUp!<6>NQ8fuPc{2YkwTXC=$CYfiiuQpW`m8Y#K0E*GmCOr7b(TyF+`uxea$hs}of!tSlUSjS|P0+$+z(#p2 z0B+8^b$Qx8u6aZnY{r~#F)W2d^*ZI&*C%A^w2%E6XqWE)eBXpte+hav!_IT#ckT(=%n7Z9T3Xy&-GKN{0fM@ z%V{8k`VyP-+ExLp;zf1>Qp*C6jdp5RTme&^10q%-D;9u%-C0T90|Ts&i*ExcekE%F zR&?tWLM{gb*{KhTdNZaWtpMxK?zb+xV|Qnw9>^a2d0;oSG$7582m>MKaGSUR#e z+NS@Vbi-ueu(Zwo&yPOu`)56j;<0+j-M(F7o;1;A|4um-MFI*|D+n(DE^K;FLZ zy*#36W|?#Ur49fH_W`onE@!1K$j=YY3OVwxL%47M`T6C+M4j7CEhQ{0`NGi;B3tla zccKz?_|0YhtDoI^rc8mVX#dstZo9k7fk>`uQZ)0$((fYOZm1B$b(EuLNIH7*GtZB} z=pTAXX=pgm7?=2I45^BjZ58v-UaXj&J{2Cr-j^?^eJ8dg-hY1(6XpciJ%ZPu<`u2| zQfei`uf<`yTzIbu?`MQ3f{ll!g+awkM?j5PR&sLqnj+)NshnW@8cjKztK*`vj@ZyDL(|%M zbX)Jx_YDOfc(r;T8_E|a!_m$|vkZIU28snrr$PPyuG4k70Zc0^N+L`CxexVvsR%p-c1*$0VJld(L_iAqrmSbP>>J0$;|=(y z48$)I#9GO~;qtJ?=Va(J75QkqoXv0Uhicn>2=V}(WVK!3nT6%Qo{*YI|4fY$(*r| zadEe$&X4Y4Y6XJzZp3HxG%yJ8`RDqCFPhOkNtt2Bv4eR0G?Fh9YnSMUoXb95u6`i+ z57%2|)55!QWSXiX3-?Gy99LuO72IW{H$B!KOQ&{4w>0mwCN5O%h|i)^>uMSI5HLz0 zE~4n{#$EKUT)9*HRIhA@;SS$VhSWzx(*|{_XM}OxCc4)OT2Ay$>uf#ichS?k!_`RK z$1an;TD3^RJ_`M7Q+t2mKYpoCosA0r%bhlM9@KA8;q>>F`+u3I>OZIb|L4B@KjMl^ z*-pQ_Kc)Sj(*^8N#mxQj_NA`WJ8h8j>(te!p-H^DpUtA8o$)mt^S#+_ke*luJUcV9 zvt=M-8`ZfYoW?%nFbJ9s!3Sq%W}2MxnWcP61!XA=WR3a8$4b2jOSEr!eAHPsVCp41lFgg$QX)-j#8tIHO=`#2DM+&rDT3<{=g;F|;7 z34qWTD;#woaNH0qMN?0MGaCi zAOGR;3-84N94Ja8m^ch8EC^bF?NA5Kx4AhRGRXFb1kK?{Fy))&m`)(*xPlPy;Q_70 zS1PyHYym~AK4Xs^)ImjA3S4{+d3XiGL@uT151m5TF8F%QT3)Q=cZBh$9DY{W!!3Q^ zLr~D(T~dK27e-}iGx^OwcecF~wst0y zh(cNFZj<%yu7SmR1*QI*^G10}=TdHMrhsC70w7I0o{Xnq*l4$@CV!tnP4NbWG-@FR zZ1F^dmDnNi6)mfy?f1k=uDE*%v7F%0ax$N7sy3Kf$H3kLc;-a+aQDqW2hQ{$T%4c9AU1bQ|<@5>p~5v zuXre@qj{5V4ilkE3_VOuFaj4;JsT%GbzLjN9>U3)(jax(K6rK3T~!q0Hq?9Ry2hV! zOw^26%&ru9rP^ie!($<>D0BKK0BgG8L!1uW>9uvh+v3gHZvfjjCLX%A9{PwguMQhq zVt{k+GZrdlLk~g~Ohak!{gDftjS*h~wi67nDFry`7c0I-pHmdP2MMOZvDaA$Vwu1O z*LX6WvI0OgyioI&p7FUwfN0XB|9m{*nk7Q@ntCAlrsb*&WN{8Xj6*W5H7*MUl8JSM zh$`TjYV-zjZ8dNUj>5+jaL7A$JzlcPdQGU2!!(lIXumh$XT$h|hisoQ<52k-crkug zDImuWmX{c1I8FmXnK@JJ+3r)OR%qR~v#aD&sXR}hL`BcX>thuSub^!s(3kT%T!bYV>8ov%Z>Vc%Yz2SMn32W28Jctk z)mr`XRl1n@#*upy>M24PP{@s%K?mTdh*#-U1aIpgL!}`Ln)0?Z{OeG;`}agX5{N?+ z;?vKpp-PfPl`)1=3O76SVca|Tzumw>ug{|TXh?&bF+jr@TR}4B{hw# ztA0G9BLUuw8DmkiB7lHL0xm~Wfcc3a2z=l-MEjZPOM{vnGKrdjB7_JWE-K4+b03z0 zgMJhOEkPBi zE6{Xpya*2uclJfLpP_STkULWeXskhaW#R>WIwH7AQtfK)8I*DyTPEYzq5)lsRDRB3 ze4Z-u3vzqbuBnnlyC~Z=S2pG z3JufvT}$0JbdIx?Gd|hVtz{1tH3}?(Pe4tC!LBxTPE>UQcFU>v-e#Apd-{T};F8Qh zfjSd*{TmCx^U3ODgAiXK?KL2!)PV}Fp|h~i=m1Xw2Al?KS+HrQlE(%sy(c0G;7~d` zygNC$bs*dYI$6C|FpcJmg!U0sOd%$|`A+$_l{YT&yE&2U6~+bYCaoY}R-hF`qd~U9 zu_E2P6{ybrWadNX@V$rs);(aQXfCTP_@PgcidTmOAga`!5&@&qcRvq|_9!B2GE&9e zvLIVx`NZqSNZ`)C>1d_O)knX~ORG;-J}mhKvQr(v>b>8hugqrgfc0E04Xk#qtu7$p zk4B1U^2h<^6bu__#SZTWw|lNEl9kRuIKNG!d1C_OWCxD*;S`(SM^`LSO#J!Ql-^oU zjKbiNNMjkO-XXS0YBB$Ieo{JECW(rA@?hlm9J-F`FJ60CbDWQ?xjb;X3f#zv?eSJfDY)eY zIO{;Ooh(W$aFRCuSOWB*JCO~N3Cvo9n<3k>(BqWgDbbh+mV165yjLg3sfH@Rek+F$ zC`8_aM!%)={9*8FtsJMzbSbdH#+)Prlt7H&iv7To<@dSUvbjQpS~Id9Tu$R%O|C1$ zGnStD&A{LAH@n3Dkd2ZZl(o%|Rysu|SeDpd>Quo-lcz*aL(p z5xBj4Tg9En`p|$!_-h3%O}G9FZdA32S3zhZO2!qP%P;?%M?zE<%IB9&bU2k6{2gf? z=Br$E6CBXLRlto0!?=O#_uNbwE;Ad2Fa-p$)9;xcJL+!d(LD~vegn`H4-AktU_L#G ze<6TS;oKNR2$D*4ZI=owBnE0i4h4Zr!xb=L*z`B1F893%fX1r#iT(2GoTq1Qv0^(E zO_W=)n-MjGG+dQ(Ttk=fNct59?8t0%~!t31fSe`=xC7o49Mp~ zQ)VsC8q}i!p!tN*ZaoHJ-qj#|<_?1}q(HlJrzLnecF4NVC;*#gS`-ySZxX-Zch(0T z61c$WU~(D8zW=jp{m)ldwV`={aolzPDJRgG#c-5A?8v3UdEMGEr|4)uja}35HRMy3 zj0>9=&`;3J>pDn|qy84Kv+B}%NJ=LoA=PyUU`&4DxDQD;cl1PmTS&|dcg&<+1FhMTwQTsBt7Wi zfOjRDU&6T~X>J0MVMhuaDfHm^MF$GgN~@PN??rBF#3hA2q_W%>_vmlGP35%ITG81N zugtbcXF>f#&U)Nba@zSz4&fK%7mn$dvS+jJSjI2i|yZ;q2IJP>aSnW|4M$jl*m(1XA9F-1-~r zVbl~#`L=P?EW>*bTv(DWbl%`Lx>h_d5v8GlX3bA{#2-4;o6DUplt-x)qZ2ifs$g(X zWb~%-D3(=fQW9YlaOdl|LkGs?no8>VYSKb^6Go*Aj!&j0izQ{FoV;r zty(SAl>-d2DTva*qJN`PF$MHgBrjJ&W?A26An%UlDn>OmL&t_~M3IOSW}s$s0XMK|_R2R!#E^9E57T zHLftleE*B1VeQ&TM!_XhV|0Nmbd)(pItO=m*B%i~hLo7a8R%EAh8r?6QxL@f)p)?C z>i;*|bC5`6uyE`FZBPr!7E|D!YQ{ic4?UOxg*pb13nox42?zy1|AJRU^Px;Ugi)a$ zMRU3`fIR>GKn@}#sxU<_tvx)doIc6gv|oU3?Bt}qplQuhPjiTckaLX$ooLxVL2r#Y zhv!F%Qx7BQ;7yplsQtO+@*+d)Cp8F)@h~cym6D28`sO=<+B7LiQB?K)%jtu00-*^|yCCeP(JwKgUG$l{?NKLn&&X?Rk`dd(pf% zpY8)rrm+XAYhiC#z!C$2&PB|=kHf6eO0gE?t&vf0h(E~=BBi zqE#EP?dYaS;(d(r?*|o%opiv~2XHa*f9W`#N58<%rM|2!xyVfhB~P^>tT>h{jfMyk zK^<5t6HvR#Did_tbG6W*)lXu)SH}8}fL`o;%4WF4xGq`HtT9xB#xoCE9=r=1o}8MH zZZt33DTMQZ8h_M{D%qd`zTVGC<@&p8NOpVD|MtOBU2H!4cV$9f2<5%}?{8nq$uq~J zCicM+7zfLu)V@C#;+UkCit=vo@pb$)+XLkCQYfATxQ_`??OQ=W6`*!AkVPTd3D7J^ z|CGOjelFzT9sre=jrF(aN`U^q;as5zs-6WUFPsYi8s(xO&(qP-H90|lAUDM?5?KSa zG_~x`(69hw`XMMOpjq@aGaMevSG|M<2$u-NmYQ9^MiU5?^FG@My>ysBfB@)-j_e^$uz6BX0ggY(jJGk2lmw4bL|c8|6R@Gb+?#m0J1m#eA8L zfU2**yT*(qgD((>7T`XS5u?U$io+iPQEjr(K2a_M)Cy893wK2whdx-xO1!%04_)oG zl<7+j%+Gy*GTEr>05~}hS@xi<)?n@mdvtxh_D1!Rv!Vg z+EOyKn&gi3eq3^(4hCIwKf`WOajO08Hu8a5lXM+U{B!o{MAFz29#cX9DCFP%sZZ(B zfj*`s382Rj1;^0qVBDssfGqC}rWyk}B86MeC6xBr1YFqAs&^G|Fq>>SHB{_ zPF1FN)_Hq<{A{~d@{pESv4IWu7!|3)7-4TJY1OD0V%{F zQC|`@`rAn2OV{Z1&WKTjq9);<+E*LHq?8Hzk4>N;6?2`uy9^wH+Y9|)K#Z7AuvNPY z7)leLKKWDQAWGRm&O?Sc06jkV5+4<#|3Ew^gnV>u8_$7*G)oqbY<0>`u?Q$W1lxVpEnnBSF^;*v z)~17w<+e?PsD0nZvb@h1!R_SD;GC^J@R_RcQ&HuC$CbAp1we#Svmh3<0Lqq~))UU0 z*NmdV>F}EyoGqAj2quEcp@Dl8*wD?*p_Z1E#Gxja#l_n}`&;g+va(lD%Qr1sCS6!$ zp4CTv5m^$dy5PM&gY%hisWSn|98rpD?az75wiMe6m|_;-=oNTxa4%(g#uc2)qY$GX zJZA?ZD!}c=Q$gp|)c^`rXEK1wPobiv>DE@TL6oONyWr!HZ0nl}EXosL zf6kkt>VX+D|Xenhz2^v z)0V+|wrkAA^n-lKg9mO)8M(U}d4GW4z}Xn#orH}p&s)6?9eg7WgzZaIwvIU$1tYI7 zc~(3Ex5)6HZ%Cy#%_O`NR7pJq6$lj)8} zai{X%PC9FlpxGg@J!AZ%FtPzmnhU!V;It8)lLz)`W^%{;IzvVG3{|0kDCY{=3^wCz z#Z0xh>P6_fva4?}io7289!1#wF(xNeL%o#uaiJJLNSUOOmE8Bb3~@`o6-JzC6M_l& z5Z(rqQ7i~(J0q*P6CnUlV{6|yvJ}79`jj4agQ)KL-a#mQ%Ye98rwI`*>ev5XqlHfV zd;Dh94Cmp>O&OdwGFzciYtGgyGeb`UMT+D%*ovFgpDfUIcPyx|-t)8|2k-toHL7R1 ziRFt=4wk9*`1O5Ul_zsfserEqqWGfZhaD6;DglhWCaBA#N*>&%*GJpyoe_oZ;@$&* z(D;7nlw6=Pb)=$euy3w)>R%8QL_dvg(zqeO7Nqu}c>i_~r>3lvNSZ6u$p|93<5V)C z__J=LFy^%ZJU?K!q^jTTIh~81BNf__HP^*a{7UCYGDz$%|L|aU^jCh(SHj+Z(7lTK zn%L+FwQ0DPe`r@+9-7xVa|dHGB#F_AZt4R0c8$)YGT-JM_2nekpi6ANwUdPi&V{*y zF-9qsPq&bazO1o64Z`f)j8<6%;DdZS2*m=CNG^so;lJfayX?&8=km$Zrya_Ls4hBY zUzRZO2!4M=V*XSRZ^T?#F!HKjDbu0K9!TpEH!(dbmK{Z}JR({=$RxbRl;PKtZNv{Mm%xl&Q+#7pw z>~(xs@)*PRA`?`Y-99+dUnBkN=_{0QJg(VFp#2>sd;9i@Qh~XxYv|&~5Ov z0F_M|hh(9M!xrSsIApwGC}a_|J-j*%o=(sP?>cCp5OW^;vHC)(z` z-<|!8a#<$UzRjw$UiSJk0&?@)>`M+v^~B2q_9B=f(YSp1Xxurz%0QRPhypF+4-lLtYjID9&}61AEW<80i$jK6QLjYz>o};#qh*| zxc2WtR6fvS5Y+9dG|N;IcL!`nG}A=f<&!A+`oC0&#iRk&@(+1y@$VxPCX%yEBY5Lc z7oRkCnb>9i+TSR_d#`*r!_J*nAn)8wSh+&C|!dK~fA{QV;l6@47#KmXs`< zIq^jFrl&<`Z)8o_{k(R1%X0IE0r(L_g}FtIgG9FixI^E~Oxt7?{ZV?*6|xIDf&tMExZFv_(LYYxWc z7>PVz`v4xw!v80-=0HoeMmJAs{P=MCxFOz-x|XwqXZugdF{mDhE`atO2cw06%>FtP zFrMX~b;<2ooSKH&YV**8E~ZAt^gy4^kCyA=6tMQ#+;vqkzoY6}`&kVEI?FCImQokK z^s+A$qh_BLAp6oga|GTiQo%6 zsdFKbm(Gs;ppp=5nrMZ)R-=0cD`^mz`!vk$rA86O1L z51(oU2+;NO0>pB|KZ55u{6ugj%?nGdf4zjby#lGMpo$<&R7p{ZLqv~ybg`P=r+4!v zgI66)-~)+I2lGdkdMhOG9xvz|9JQf7;|uC)`RCF`2N}QJimw&5eVb|YTS+R{%8Ab5 zqlHSefYpildrt)FgsQ^phid0PoiS0fe~mCY)-A~}==2FsXx^^;$uj_)YBOaWAu9Pq zEGP)E-1s-w9;`rexN50AK`l{5AKSK4>ofFgt75*|=%>w-GS*`td)0+>#}JwWeMhZs z*L8ssPx>F=qM$#Ci+Xo1%YoEMSIyBV+^Aq0Hje22QVBgGi1!wKo*^hqjjco=*8XI& zQb=)?u4jex(eA8KYj6-S#Hr=;eHgfBcV713L;77T0OV66h@qKTVAu5ui32XxE{xAT zAn*`W#Jfe`hiA&JrD+?!r2faPXs9P+Y)s=hI~vN3zIFWA8hx%g6>_ zr4z9}=mo-aC2wTrBWbnzoE(Y!E6>;2@wTt^|`L+g+uF&xJ40*mDJ0~ZvQ)S=OEaO@b zlh*u6Ej*-$58ji2{5rn*d+2Q2C3o(D3%t^qTNyFucklSv?M`|Pk{->UIZ4frvzM%h z^>Jl|CNVcM|1a-X0m_p5fIs_FyQ&pe#(!RRe}M}NbwS+L{Nxq@2t_aH{cRromvd)$ zG9Ce+Wvu!br1DdFLI^`BQ!Vc^WNb_M3ORTQ+alUPrF_}ZX}wmN+IhWAB;F^evyJ%f zgx>GFRBwW$l_wuXjrPQ-atcb=V!fAlvkdtRJI$&?I@cmaV*K_2^-VChUP^Eu(pC01 z(<@O0tf^UwGafv(#r2H3tWB9gaRTD$C#vU08T=OD887MSZKXgh6PnUHNR}zza7cXD z7*Q0VtB8Dk)gz+LmI-GEwJ||$>8|C@#N(Plt!n5Q0Ld|?#tj~}YXjJ#|A(#j0H^wo z|G!Zx+Gb09(~%JxM#xCE?3G=zIb?52sSvVd7P3diIW`d?A$uGnvUm1&zutcT|Np+P z`@UV*_qw_+eLI}<`F!55*X#LwK3+u2Q1Lqj`H8u#%$}< z@9$aK&z)p6$-$i_DA%;+K8b(AGGdq1J~HV^I5u9+O=OU*xH_Oh`k916?wFv_PbzC{ z25J}Ca?1@+VrAAY_jPzxnnOLchRz6u#*7nv7$^dLh*uCP2A%NA9L!T2Z}~EjQ-mk0 z9_)dA9?g5o*~>aSyLk20Ld^f7A-j{{7EFR5qbs*Cxe9sAY42{Bu+O)tQxLtVVc_+X zcvxaf_34)1KZ_f)r6OaL<~upPQ!qYr!p<_L%f6McVhK*ZCWcAa6_;jVkCt@>>%z-` zQ{Bur16WlkvO(-yht{z;N=dr#{fwLgA8u2%C%CV`1eNgTjo?S9ba&yUV84C44KqNs zG0hJ%+cl07fjaMK;WDuAgXiL{i^a>{zq(YUN<2SE=eRMQFCJguGW)o8xvY2S>V`E# zllF+m=~${gcM=gxs!g$IRu1Z9W=$TKWy8o@euWOR>W7Mca?7QEUp&|M6U`flj~pvL zuPvjTe%e5u<(BfoN2{a|6Tj-tYsx+!kRB5BQtMs{7$#VG4dnj8&n#C!{gC8ATY<;?1Ral8 z9gX%^bBn-8uX3Z~(qu%>WoZ11+NFKfGaifHl6BU{aDS$Mp%eOBcidC+w{!&w{hC(~ zbK0&L*$qs?o3r!_5|Qaf5{5g{I8r5}BoYiMr{ooz*L3bn)mBQgXF%qUR(udZ67gQk z+3>Bz4+K52A~Bm+$fi=0PGgXL8CI1>p+cM-INoZOLP%^gCeNVZ(3z5GH+%h&l>#Tf z@pP^?k7LBKiM^=!kFwWE%rFwv{3R<6jF?WgAD=K@qw4Y*>vVyz1@1jgz_wfV^jQ>2J zLf1a?9V=O3CtJM^=6txG`rxx3AwSdAf7_Ph2=-w3V1F`1-duJVv~^zkLxX;)Z<=6* z-%$;iicdtU?}kyLK+No;L}#U)o-1a47sDHr%Ot&&3?3CVT63qj8Td_t;GU&k;p)&M zqhbS=^K0^hX9Gmbpe+QVgyVv%Oa@E5%0Nn#T;n}zzL`zG(+*x>vOF0nqFUy)cXBL+ z(^KC}uu|*Ev&k&n$X)eG%a4`)m3wyKIN@?{i^$`(HDRQeEr*ub)wym@K0C@)oGm3< z9%p0tWoe7Ng6@3zV$GueWqiuf^Q8W8%p~=b zi1($i+!iofAjY&%y*^FH--(1$=J(t0E9#BwoC6DIwS|?2e<#= z6*Iaa9P5{jSbb-&umm{7yl0C42HGXH=z$A;(BP7iWy(BwjO=V;w8kWr-8&)bO?x@L zLGkvVO6BtPT1tgtzD6nmv#^ik|8)e3stlxoN@e9oxn~GNp^hw&8#$~x96~}ubd*xo zGZQ70C=~x0jR|o_=fFc}VOuMgZ&>wDdd~Iz20WTuWB$AtSZ~Z_o7*Fk9h7<-+aCF7 zJ7+Xpc2?UbWGnN+Wgp35`iNfM4s}v~$^Kc5E9T-Kub;mCyysg9-f(_>Y3uo|*4(kM z4{Ds+oqFZYttDE-wGuC_=hHHT1ICm?q}YY8g_;Jd>ORg;Wr=*!E_G`!YiJ$%&3uIO z!wiHu|0E;LlyRTFKwe9A(_B?^Tb%k`sh}s}T!K}VL?H{a3foz=kPK0!YzeNmLe>i% z$79LAA^#6;ayqr|g8zEv8usXSzA*Sewv>ZJe)-a~b)rMyq`Eo(ph5(L|$?QR1 znGY-+ly#vUsR~8^{??am^<0Z2qhu;xn$C=%Sjqz($~<+ISl+em*zj~Ij1uj`1I&FwC^J5(3W$>`W)!Ltf>-?mKZ`~8 zJDR@Mbd`x_+4{oQMsL$s_~ z%5M&T9ptf+lOA*-R)^owlY~9##Uldua1;%x)$}yKc~zQ+m~7!Z`@F&fb#>W7!i+yY z=oiPLtXJHe-dXes8A#{(l9P3jip_6zbiBJ8u%uk(iJMSRunCy(RCfvnteLaLMgaf2 zG`F`D8mw=5$W5BQ-@!X0WX)(XK zB404Ma_Co<7tx4#hOgK=tsniM0bEUU$&V8^O^sxIiXH;_9Us+jBKv@8}pb8(!O{w5S5yXHuKSd-G&A*a3p+cl;(PO ziMC>XJZ{k|?bExG*PutgV*RJdpOLn2qoDi3D?aG&y_3ZH&P!eyk+48rEozRfW!KSJ zoikpc#hO7Ugnrqv2=szRNm@^tKc~3VPqf6%VjKJ66$hla*G8@T3)5v&h#L+fvIdRy z<+P2yn`E7F-{LPHo|aXPYVN7*l}TuV+Xp^g$K$QC6=t)wyhu$C2mBHzH&=igTIxSa^b%{KQ%hyXMVm z8Jd7*Wv;w)Tnq5wG?)O&=GkPNQ@no}@~4)8PQ{NZ92mkB@4{E>)?^;2l-NG>C66( z7S*Th$9XksT0E%R+f5r>cglM#63>W#f zz5lhg@+1DILuO-oSloYXN?g@?Rilb*(cfNApBU0TGT9%3BYdCyUVXoc%1Pt!9N9c) zefr!L!78guw+J4R(dQCYCbQP<%#+BwOC02^<*HTFD9jS|cE}q%_trEv%IM#1?Urdn zmEV(8rH~-x2xhKD8$O~fT3;>(FW1?6j8%L!gLj|l>UMdWX4jH(op!-V_|^e<{M^W8 zuE~X6M+d%K{QW*^0duNotFqBMdzrP-5}62VXGIlFQ}_Iv=j5cT@#TXvDG7=)&Md>V zb5mfsx)v<;a6iQaW^Dxc(*u|YkdpZpPLbq%l6+bz?Glh1=PePX5gBUoegCXW8T}E(V|CQ4VHEFL&PxI_2Ky+Q5o*-Z&ybt zR6;8sa9kRo(Z5w%LfnfeM*sWk|C{IdYAq`8D?}q^{#|wAUP{)+86?i3FOeY#csv=l zj56TKN$gWl^VTmMHcNFoj-Ej;cAr5%!(%%pby?8Wo}me;au9%9UO$7r_eSi}3lFM0 zgp2zoCNigJ<~`pEpt^$`Wa4{^534~wwM05dV=hAkV9R2$?s@vLkuSJRP$moTa zS{tS-GPS2X`e*gSd6juJI|ggIOvJTkLEz)~+XRXVhmyb#Qy zv*4NLD$pk!FEG`gz2Ph%sZMTm4Ry_6+j|Cc9-QJk&TS&19shd;>?^+_ye#p{9lbZ% zj=fPR^pp{#AtK^?4yd+%meg1S)F*;u*M;d`_M>o}F-{D&(QNKZnJ?8b_Fs^d$V^tK1N@sM9-ca)MqhxO2>d6bPsm5?yL?CM^ z-f>>bT6Ur1bv4RP%`^2t_mLeSA4)xI3s$j!(%p4}wbfPPxUXVKSpEpDhPns1 z&DyUEKn^quJS5!^dA7#dBi{hb^14`6)1E*#Iq-}{De<)~q%)D;3c@~fL`V5^hC51kDYULMD}#|FGaFLwY~JO@vPLhv44|2ht#0l)35Xwt97Olg*3iIijACw6V( zw0%%0&WAiA{~6W`nUaT{n5j=j;4VW>6x!=_E@Qzw=AF#evi68AS2my|Nk{Eq2K7da zFoTDve03CsF}bM2N5J$JH}OJ=jbx6PTTe;s>WIJXUlU?+4OjXgE1JIK1y#XCkCGh- zMLq^pq|W;l5DK(V`Uccu4AdeWrC7btWCkAsGrM?~S?B5Ghq9SiiF-9iVub{jh& z+?g`FN!3gk2l$gwb4a|ZP6FlBIPg4Dreh+-QJwG`wB26Gy2rBdA$0)2?b z_}2xEuuVvG5*{3<8^aq>7nzm$U5By|dk75OyB7H@NU$5lrvUuCmh&`+w2I=*_Od49 zEEnrM^wjo|!XU291IE;WPMk}B07j2SY#KSMIml+nkV_4}e+Ic)cG}R&%#m#Is?3br zN#cYcfrWe4XLsbYuc)=9CHICZ2$^w~FL@twxTCoPcvLQ*!!#kMyq1R;$4uX%#-6G2 z3_?yf`=+!x!LPwNi^TMJQ7rhUn38>mbPa0x9? zXd%O&w(IKD$SCt(jAz?6cr}hu6tO6MDfzWfGI_P9Ux`kJ>h1h#9BUSHCDlGZL;9+UY_+8NZ>5iP$J{V3I8*hQ=7{8`tw5wq@vIKXYKTAeDwN#YfA zcC28YLJ5S(C>3qV=AH;O*?OrAE7ecobnvD+Bz04LOV57CG%KGAAF3l#!4tapF=kJbAvdX`gkmd^T(*_1L zcnTZfC;zrDp0F1Elc~YTx-y3Jg(*1}>dxABou|=rnkx}VBW(_!P%-+0_9@i7CR@mV zkf!`ara#uyO_sP1+(o({z7W1dT%Gw+1*yPAin5O5{@&67&Io7;C%`X4zCVQ>`8l!3Qk4BceyBoZmPH;zWOZ`#?JUMNeE2V?&#GV!t|-`Mk$>-0jEsr;=|)- zu9$+6^|WOl9vf5SMAXX*bytetx9V5N3GnzaM?&kNvGvStOqaWi7WiR*T7kwCbI&I6 zr1NqJi72UI=^%M6K;bxO!w`}Noo$gPI zf>GYCJYTu>#`Y$z#iKJF^`%Z{-erjQ-RdcbX&{`7U-f#s@=? z-E@f#q43=QF@vEPS_~XpvS1vvQ1REh>l#qYI+DC+b=HeVo7pAk+2X0;aYbf)uEN2+ z=^#3PBWvx0|Lpu|!!8JnhpFhyewPZSdq_2Dx__(gVf1l<+{v>m3CGQ=P$mmi26JFu zC_qv^w4zTSkS6ah;6M-khA}W{pI4bkM0QbR_ONnWN_3Gpnm#CX%H?2OR86knI^mWB1BxV|Qz1&_=0v?mgR6Op(G@{!ET; zfu~C~i!$1NJb6`Y9#8)X@Vc_8U@*~p`PU7Pn7q!~j4%0ng;4rEOT78sPSy9nma;<= z-7hO5DD=Bot>>Ppb+5hGO@FkxR&smVQ2X&y5%^Q)y(H)J1=S-xu0`eU&me+11eQnAQg_GTF^|70=-LbQ8u)=7 z>=;{ww#F#kpjOp=Feb}cF{XQm`$q@=pky;Pp<9V72>~85n$m=1?g1UHxJk+ z#j|N_?+os;XMF0xqFR?5iZ&GGDuerL5Nk!iXIu27-NQkRI~4Dj8l^<&974ihQM};y zgvk;^n5%G{!8kTWlWVT+)j-N7(d%~ES*k^P+lK!ZaAj;y=}kTt8>r6?Q)V@+S9$xE z)lAwDLIU$K+!Bv<#A4MFrD8q!+{(09?c}FrQx#)z=yV;41(g+-|zR9irvFY?a{eBJVMM>sy!e7%Du(BuuQ06t&jclCPhTy z8f|9f*smv5J>-j?9woDuMqehs=f2_|Y3=GfI$t1cg{O`Y9a(w0XvpDy!Wnm!K8?h4(0}!kYxiri_iEW=zY81lnX850QsqMW zM2M8k+}9b(E@WmNmFHMi21<&~^DmA8U^KO3Y@kX;Z3>7Wf=-D!=gmv4OXv4PbjcTN zwa4ZSS?lhwh{EbM)^ZT=OlJ!^!l@X9H74Iq@5;wrOM+xf1q*qP;$YWfvcu#v&mMj$@ z&5t1fG>2+oSn5(bDo6b=B}lh3LytgB7%2l=??zTZ%Ri#BTX zMl>DV0f=nrIv zKg~VryEwG?yV`gWF4%{**@@S61bYq3#V7|&4pqK#W%b!8 zV8md`hX)f1E`P6ABH?V9%(B{GTJ#jV>W^`vt*HNz_?2;QGS+h_pL=t|R?x&@hu3FO za>hnE2zU2bpJp7J+3naO&6M6jWnAz{IJZ zK=HofVsONH!8X-!XN3L9I90{}58+ePLS1W^&!-V_2nI;0kmSHA%JHW|#|HL$->deP*RCCn*lud4KMV3iz-dQw@b^)J~KHR}o0`C(`_x{q0)xul%GWVZO= z`hwlXUJF-`eTS;LgB43ly@Zj}@s?K!`Ed?cd*^X1+jH(uHw|{ohFsb7xHkFuJsgEP z{}BnKSWPc|b~>oxz3twzwEAv#e%N6pOG9{YJ#HDY|?f&I0!OZ}%1mJGGG#x#{0-!Ba3_0Jf|EuC++bmj}}U znwZSybVq7gn3)%l*0Y1f*vN#=h)jFpn94D_eab~%$o%;|wf5cLGR;-@ zix&;PxAvSqel~yJsFkd$ew(p<)Y5HpYwcTPlclUJGt6e=7-K@6`lXMD7hcwmI9nfB z9dA9uo#U?dZ!~XuAP7wBi&NfH~t=3qzl9>ruif@$mt-x*-+ zD;%1W*=B~AYO?Rlq6(P)s^i{}6o$(4n+2V{829@W81|dbrZ#4-^-kG}1Y$S)EZg&L z%O-33L>Nm3DC`Nw+1T~;t$e5b!4!;VuamdXY0I%z`>uV2>ej&<;SJ*6!6n-BbQBEiGRILrWJuTejgTy_o#lyW8c-E^1|In$*_K9GpQQ^>V~q=@9E*W2Y~A zcEH zKHb*yyuG71yJrznv)%s$GnF$Uii{7`&q3D0*Ad={egsDAMrJ?~IUCdq#^=Jf!c#^i zF%rklT$xHaYl>pf#}~lnZmV)y?6M7y{gfhYrn#P_;4#FQDptZ9Yx_xf#|LdrX0G~~N{liC_bUxHkGMMA^5v?#%FV%Laf^9vV!yqfh(u-wRy-h4BWk4w8dNCEB9(oPj+%_t!9#W zEnZ=7(5~m33#iL@5lXYJCTbilr{8kP%NmQdTLr@{`$*DWZ*W>YKVpPRqleqO-Op2Q9gn{pY*W$%SMX97^Nh-OQv|J@s+I)ena?*ZS)?!EkKmwLyk z!6MJqN_@~-%4_9?aO7S7gP`ikDhl%kH_v4@DJ22pFSUPgpJV9ON-n;dq*3VEQ(!*U z+Q+=}NSbV)5VRZX)R+UUJpzEJL4L&eS5$EK$$snVko^E>fHLG@6Pcu%=Y>|?9{S!2 zu%g8Lrn+z5kT@YviE~ud|Gm0;osQQkwybh{Qv=8`u&4>XTqGO6z|^e`rV~%m^rrES z$L-4(3(`%WL%I=RbV|NFcy-K5FaMJ`e(u*?lR%vgqyk%?xBQ1oQnv6=n)^~C8KA`v z*`qAT;BoM%07|ug5yb{5fmxrr=3OAwf%rg!^V_9BV#Z;2LGa6hSx7g;6rA*_ z-ew3^jTVFZpa(FPjJ)*GQ>A{yU1yZZ?%~!uV!O9II#JO4t1wF#kQIKr!MIfuF4Ory zfraOGpI91`k49^mrETn@>fJjQg>rs=gUaB>hx29`3_9h`c`*!HWcM-kyAl4|y8ifd z>Su=3NfRa=^PTsJ5mza)E%6B&^4eD?X}!>a`HtiEbZXfwb8-8g>W6hS{2(h;YfMkT zF$Zq~M57=2`++@+q4F)OaB%q}Zv$0KTCc)NQu^NK38vYu%t$oGK+F!rU(khMdUutV zm(vy)YzCV5u82jR4j@OtuWZnf9bq#F3`XN3A_kOi8$)(+kO%Wk)j9ojBr{7&unlt1 zT*js(_n^37|5WBZJ5WS9a>_^j*aqBhRss-1JCZ|JXZd%+u-5!#2OVkwRx6d8E|4%Q z^*|q6gD^Q(U7S+93am#t%}FG=XagK6Q*^l&aXF6E47FBYQ^WV zuG72&e?oLo%vshQ5vi~Ud!TzOWgHX^2j4phT1tVSv8%B|IwW}EP6qxy#0LlV!5U(U z#NU^OhHR8_*K_>`aNpMq@Nr1yPNEMQihBGFa+VpsHbl;1v842vG< zQXeh!bH_r+k3(5L8zKv?#S+;oy1K!EDg0RVoccqlQM*#pg|1ewoKceg zh9XC#pUg+nf-N9{8SID{_&dt}=g{@jVBMLoSN9t2Kk#n@JOCZW4HVU_)9dMV{Ns!J6mgwp{fqO9pM!>L>vvNJ1?AzT0c+O7@_6 zx2{quM`Sz&wm;?dAK0kz(NU+)rIg)IKS>vvOGJkJPKxumx4xj@&-f$wJtYao`(SM*SFk9TW5*!>QULNLaX{=If4I9>( zIhjpq$|}P4x7Mfo)&o(4dpTZRNwSl2g65B8=sJr0&PJ1ljfU zpZrd%nQl-`@7{r@n(Zx49Q^IKbyP`&eF4ieYgKS{2#=1U&I+itImJar88 z_ws(q{P=@ox%@r1)jvip&p$?E4Y2+ubAmh;eTHliQU6SgUVvPfAO>oBvod+M|3llOYu-(Ow?m%v_9V>YGb*2^3Ji^eG`}+tONZt8S~gqrsIKW z{00P2JGCkQccJj}lWuI+wJRn*&r0ltwj6vagrAx*+>WR;Y@eO`AHP^0Ka05Ibiq@A zXO(#zgsE)r%dkIJ`|~wMaZ`F-Wqj%Q>ZJmPA!BMc)v9lEOVv`f&FmHK``k1e7SS?R(lS5D5EuR*RF2VoO#Mu*q50E01K_wY*$q0P;sTO33F5i)H7TzZP= z5}{(kfFVN&qePVm3nw4@venlw=jtoHVPR<_|FxsB&vIoa-3jW=Si*A9=XD|ew4Cef=<^z5k1N>zKQBgV2uf8U$p6~F^(e=+p%pS5 z3<%az20j9iuhsz_H!<;mO(o?$o8zxXK!2EJN_5;cA3Qtv<|h{ZMy(=rU}_nvjWAQ3 z&!B}kb_K32owkbPUUOy&DYF#URhxA)-P4^n*a*yS@&PrlewgBPQNv0zSk64Q1^ zZP!PoZ^#0{Yb)2t3zVkT0q}FwTJ{zCenNTSyw5Jb596jS2s=qcK#GToX{yo}um#8RZ-faRx6wfwtBU@Fql@w5{$$4h?X>B1DxYP!l`$ zDpz9x@0{hj+Nh^UnXoNP}4I0Uw3!FGYzA1aacw zXAhA`H-j|lM`Se%6Z3yIhKhw37ntFWS^KSF`-q(mY9))~boL+6#fT$efh!jAqvb5o z;a`T;0t9CQURM?xnZfbStKgW2Wh`0nR`sF4Y|$=|D0a%-(ex8m-&yDW&$rP7(LczS z@1iB1flTnuc47LUUPP*lh=_hY!$NMv-se%P~ivD)bq=Oif@n!~)!F=#O!V;kVqt{(}HLW<9_*f2nIz zM}2s#0DGcB7X5}I`+2Y`YB5#S^Gd2{HJUpEIPMHsoF)f$lkyTpf57~?A62?L;0>+7 zeN?F)%Ptx+ho@aWvT?9S>Nx@4xE3nDeA>$UhQB_?9F-vn=s2owzcaA8;jKouT5BB#BQ$;MsLxLZ&Y@kX&8y2cEPh84uq{M6&QeN@%oNN+uqqEf>}y zk+YXTc$<^@c<~U#@A0pWR#*89^lip)hV^yZEcSIL92;}r%B#XA2#I#x9U3BL@L~(| zEd)|-9{`NAk=F^75o_#B8V9NVrSz0;U&H{qY*k#1$}0)G>ET%Auc;Z*m|RueEhu}C z9SSmEDqoXj!~#TBJ~=~IrU@;^7R<0h3@%{EvF?UH_k^~wO`FGshmG>Vli`qC4q0Tc!n-%jb@^Y2$R=jvY`Z_?fIs7qvZ?A%iU|sEm1^XFvn%r=u8a4cx-Tr-_Ij;Jh4X;&^3`V*TQ2Z9~M%~h4 zagXeGRI}3LZ%l_;%dE5TbnLtw<)nMKg9f^DaI)qhD6_+9-$z$|BKr#(d+liY)*>v? z-YFsx&-0P0{i8BrCpnFdJg(VcQEw;Qg>1CXopy~@clKa?L*~9@ml{>}tdh2{`yJ~D zl!1GygEJ3DHFVi(rOLHOjeMkPwU{jbEjTVr4*_-j~{A1BcZ$OqGIIMv@;lTNmVRWvUsbVWW@p-x*2H zp)YlJB&RM&ye1aFq+;P7a8u=1O7TrLKZ5Fu^kT%90@y|lSn@f3sFtS~NMp#M?-(~w z(w&4tZk?l~3{?P_6)smf#tC$|9CiXJ>VD?aY3pxgCZ=%R?7-naVT6`%*2+EZ!5pQJgf$2r0ZxS|GO00ceSK(!{#4xhhl#Owai>P7J zg$}^E_4JS|=OQN_fNYj4qM1e&TJxFQ6%(%AC%C*`t6!(4VKx5AQs=|}hBI?iGJ*dU z-~`W``RygP&8xVloVg}*iuFON*B2mfxV@KH0Z-CU$KP|E_n)Jm-!=z~hn?~d9|W^r z9ERb^4p>SDYg=Wm;b7}FPGFVAH)Rd^_YT7{c9HrXETyL}Vpgd>n8gzX*sAzTV955? z#rjk5A=74VP^9SO%k6Eck^0s=^cub&8d;C;y&98l_0l8!WN!SaAvfE<4wvi8j@V}1 z5`KH9%=&iau#2m_cdX(yxs`_&S0~Yo57PzA?U@O8^|wI{Vvzr@YPmRNE?%B>`7DDZ)7DTAmjy$N z!n3klPP=pCp#|(TK`KuvHWTi@+N+ycJh#SoIrZ6i3!Q&miHoTF(#}oQf$x-mE{h-I zBoQpi*M>3P_JaM{JZJ2bv1En*{UY*$$L?=TcCTLk%DnUbY!&S>k06787sm0$OP5t5 z79Y(hS(&0N1~nM(PzJH7NfCH1rE zJ32gKxy(W=Oww;;G0mteMr-fBIA7h-_%(hw+WJ9j(x>4Mt3@5NdxyJX7T49!3poyy zlF{&fG1|1O)uz4f(8b~vJ8!uhY9%$pAGEM>)~@?;?s@-g)x1AvE6ue194u9;`phP+*k30 zS(S~l;j^-Hvu2rD|3}K2iehz?_RlCAw5BU-?RB&4&Tmw|d3!RcDXFg7hICS5DG#=gZpZctM%o4I~6! zjs8k|;>Pz^ix8X;kP;S9<`+85_jiFgj{FiQG>VaPWfRdBb6y8K2}MRuteu@gdd?%H zh`L_^7+@=N;);V+C@%-AObY?Re7KIvV~hqDqI&YD_clf{?J{hmq-?6oU zv-QFw7fhSR_t;faTsM#2#1W}4S|FbwR%p%coT^Ud;h3-@rmUVPRzCl3k6>4= zl3a{AUD7;CdobbIl1E2iSIx`8maJwMPG+S2(PGqrGW(*GwoXEP>h>8KMmr}#lQmZ< z{3S1Xqkk+GN8DPhSnNu()AsY~7Edj9Bu!42RaxO}4lIjCiSL^@y-CQG_O)4;ifK<# zl&E4<-n<>C@O6}4$H>NlHU=}cd!L`f#s5pEnU!|SEY^nL`#v^wlBdKFRoaXziX?^>$S#7As*o|ci>EdMfk8lNJQyEOYGKs)^0(1FUZbs=?( zqn3e4efy62c*wu?Khj@r`b=8Hx3Abk7HvU?HEl9W@3?Er%>D0PEer2Cu^3)=1L{bf z%L8^iyZ5y;S3C&HGw%ymNV+I3&k6_8)8ne~3nxm3?aXCu8v=#Drs;MSzQ&$8xN<%2 zd*(UzBvBj#YQm=cPhlFlYzpIl&A53XQ-$89knrs_)T?}YwDO*m?w;X|;1t$6=oJ># zuE|BVz3y}il?nyq?t~WLH*c&})2cpyIXGRCF}VeUv4w21i1--(M#T`miZ~lgRxLgu zYu)eQ@^-FrCyjF;+2q*5p6i6JHO>Irf zD|4c%&5iA4#U~kF50?~7-auTMXD^-^l2XEJ3p zwRcp8{>`gC-K5wx*DDu1b@!<}J!pc&Ll_?AHsaSt=YKtP^75bwP{@C|5|8(}Y^)W$ z`KKx^GMgo4Zk0c7{+-N?K75$&%d6xx6-?YcSuNOCCv4k0Tp}wK+nHk;I7Km-%2+kc zop)-F4~#*VB+;%~#CX=dl1B0q zh&z54bCE?^N@et`Nozd9tCw55#-6mimTk(Cd29V5+rJ)Dg68$K#hW!Z1V^GwxB57> zWzsW7cwcsIT&OFO$!LkX&G_}PM#4pz#PqevoYtUwC+mROqh3^GAh3UiX@PwB3%hvW zSC?_&v-O%WV78dF`94|S_s*+E`1lgvXU>nSavk&aue02hXJ{*Xb^{2MHb;_|$S~t?7tsia3>AN0vQ66bx4Q3*L95$sQu5Ikgvi43YI@~y^$X8+mSA<4XY`q+`xoo-^3HpDSUuuzqzR+{DtQenwduh3 zZ}O6>oDurJng*aSqF8mXf`H$MiO4p1>^tDoNfNO0GePch1|zmp1YghD9l`~rcV~Y ztgIx#1KdzHLv0bo4tlo|=tf%o#zN$<4i)rH^ug*}EkbelMxhw|?GM3Cepjf49If@M zS;HTls1mj~?Wj`t8spyEH%cQ%W-8Xtb5bqCge7k1tguFr8=H#c284Gcc4sC(O}V(D zeKVtPbtmt;E?n0h=XLWLxkQbR*0VAvdnXvn z=Ha+m{^XT*IR79&{|~*F0{R zAn$BX@`ZEM``*7G3H=H4-qMWawF1tt$2ETYJMXDK4tE)}^UK$~l}&e!##$_?Z62}g znJo~@JL>lLtr3sAv87ghV+vMaewS%{ewilR(anSANw4a2H;(iW0&PSdiAxDmzwG_G%K@22`wYp%M(kta}7 zFT6xX(A#6c*b?3n;`>z zpX=E(VLqIreCH8aE(Gb=W>y}ktwJ&{*}uZ7behQx21A3iEvzq8GCdqFf*UO7o7M8k@n@8Egiu-w4d{rWu4D%{I5>^)ea5$}~US-yqD2l7Figr;^ zU1}?TD}B1=cr5iV+6sOT*5UsZ20pci{Zb*YFU%Gmwz8AlptSfgS9j>!PwJp%m#0s^ zj$~(Icgo3VG`J>*3w%O~G!53a)z{ZQ4@K*)#xKKA6j1lo9%p9nOU8wB(T`pIp>Zl9 zEb9zYaLN_kfRoLaPRZ;EidNAd6yzQ}h7xrvEbjBtma_NSynPhr=uVf+=lqDo)bYHPHri?iT60rGyGlP_R zHIuvN7cSjv1-t=`5dE16vWQrU{F!mTet%l=tpSl76`KI^6z_MG_VO9Yi-@q8?fca= zs?XYllvZIzaD8g=*H0Yy&63_N{BSZUndbJ65>Qqlwl~#jv$CWfznetFVsFCbn&g3& zCFz(#exY1y?9LHNrAI6Kk8EU8Ve?)Y9dG$# z1LaLaR+cNVq}tVgIxLu!ZfKCzt>5E0DeFj6!8^N_KB;_jT2TF^_*kCoo7O%*s?$}^ z4P2&{Dk&)_YhF$)_UDK|CE4x4nHz_@%)b*XhL~sZG9&zsbUQApuGXif67>p&@>6_35SEu9QjT ziTMZBCHGz(#2jW;Ruflpc4NiZm{m`tybjT7VQWn0#CLKi&7bY6(ertiw|ldZWrL3@ zMae`a3TTD7eyyu|pTzawZR#JJ;lEhtX6~OGEYB~YNEW>(ofXdu89_}QTFnM3DnW(i zc6mWh;v44@1s!`aOR1K}cKo~S*k(K*ZpR#VufOB`$>3kKhUn>wvK|db+>;u{tS{T= zId^W%|6kO-^HD!FaId&mPBRv} ziv?A7sr;Q>s=EF*f_fLvRxGI8rWgQ&Gw?cZ;3^zu69F=u4-`!E;QP=tGmbO%oWy zDt7#jQ6?v|NgSOHzvaW(xGfMhyKm~X&@Pr>>b>CY85;JJCtiM4SZwp#Gft)M>-Hh( z2eyuDf6_`?pPb#5E6~>*%zmB{Vcu<-o)mNuz+(@!Zgr&O>2GK^gm-6Cs9j#`la=H) zepu|&)cGKGDhc;YR0}Fh&G!Q?g=e~6w2notxk|d1+fM07!*}wqCU0)Ri(%*MLovjt z1+_GV-MI!!P(4jcz21W=a$%B;XRg66)JF~PDlDGoy%5iu+s~)AqrD@wR;{Fb`8Vpr zxeb9@kbC=s*VJHUk>_yZr&EKO%FM@H&#k=K(Y^;pdz7 z55>TQSehM})EW8GJA2=ZTZ*Pa@49lzxU5`JgCggyw4s)BiKWZiVLNNHN$B6%LSp?G ziyJ>Uk@@2l??^$_(M*P-Q6_3=s!!qZ0><%(+rq)mTUsdMfop?_!S z-I2m2qR8Y0C{DT&$e^XLg+7Y?h01*DC#Y;Kiv#Dg4!}JOG)++!7BVx}pA>3WR48EH zL!7myUo$hR)T(yUJSp)Jn6keDLpIZI(HW;DHbh@%{1s8D9Q;ro`Rgz8zlLn;29BUd zG*#JNucWf9PQC)8oIaqf3k)ipIGKCAMT@EJrm{_B#k5*&mFPm^dxc*yXs)+hODRv+ zy+SKyy~Iwd${Lt?NSW5tT8XzqpwMgrwtMTA$8*d8_;bdPJG)RjPB75Ys8><{bKAg! zm=Jx}L9OXq&?yw>Wc@YXNg6D!M@>rpp(0iWUXN9%&|5)t1{TV{6@Lyx_j%&bEta01 zqkf0d&a1f>pK-AZ|vtpbMrT1kobSIzqEH2<0HeZ?J;jUb11bA35Wp7EACPcx{^ zb4U7wAH_?511K*9t#F>5hpttgtHv!=0zquZKsEN(6E!{#BIo$5ItZbRGfX0s6yd+W0r# z@O}u8+`>@YSm8}o$AlE`JcEi7wgF+_LXT#b<`q`9qBJ4=sg4cT$q@d^=Q_}29TD|L zR6OGUM-MtkrE;#H-GqjgOYa$IQnfEC-;W6hyqcpd^B31Pe=3V;HF~iJ@gqF`@-3G- zXfs?dYUXKy=gJ90l6nxXVd(WwNM{fWrH^%1HeayZ@O|9k*Xuk5+O%o7jH0`Ji`t(R zWcjeiyJh%T`X!Iup|pnnT+PmCR&EPwoN3z4`R016iEQywD_tI-$+wbBIK+Q&fsTWZ zmiIlZLxBBHDqG(O=gEqrwwfDx*VY(8Z3VyB*||gn9M1<>`8#rb$I{_=UD%N>)sm6o z09eTZ1hhwhz+i&t&cbc{DNwXXhok&v49)UP>H0yC*k5Yz`=V^syR$-zKC6f@2OUO6 zZ>oP$`*G?*K$>ylAr8%(xq>(2QFlx>x!?El`(y8(rRCnm#-o6N1a`gP2)+HZajfE} zh+LXp_0P)vujCrH2-=kCfVs+6uJtd3#E4;Jk&oqBEy_wy8aBukkzv2qkF-ut@&REN zA<6JG=5zz+oFGJqdpaoEUia;jDFGhi5HG=Ck`6=Hoi=eFbcJ%hIUQ*il&#F-Bc8^q ziLXA+6Up%N*qiU_DlpUc4ARE>*^wXMQ#aA@ww8H0{tu&5PFElEVjY;%S1tx~E)F&WViSo7)7Wdn$l_(c4ak zOy*sxc8$?lV`cn%@2!>prN=njIGrK+)-pZ2@L^ zKu-by#T(pDw~jNJX3@jnl;dtb#4qko;2(@=yUe3e1?(m7OC3cj8vg~tQKeuI8z z4DRO!C^gDkn?S!%waz%p^&S(x`u0D8%KHgv?64oSZR(Z|NO5#k{bHL=*p;qI(n;xgoX33W{yD34OY&{`WcOc#B#RdFN~)RScyJ zO4q!;8U(Wx$SUMVB~FTNek-F36gWNax`+PnpB$LzVfCV-=4&L0#iAC_O#jw5t3HH(}MN)r$V9EflN zgk*|xpmZ+zW}r_8epNbdH%`3@fGab_)rkIV)q3R11%blI%zerzP;>oSr=s|zSLg9k zWMy4pp3aEb=h}>XEP-66T37g2-BC{%&QqaLX2d($d3KxMP7*i2j|v#^{U+J!U-Qn6 z)FV+0jsx?Pfr6rY!JzcZfUrke!J>zpLJY4m9*3<_!ha2296OD!H;ow)d^XK3ZFZ94 zZTwt#()T-BF^@x-X(IWcJ3>S;)cUdm@jqt_u`ICrJo;Q$lmJJBMBFGi z2{m69UohG~ZUef~0?P1p1rEh9QbIw;AQ|bKs(=lH*3#>Nz(T)y&;$VHVksJq zoRHjA1$co);7qu18vuEv0GjNs4Yb;WpzN{(J0_ip6T%u$c%-VZ^gx`s5L%&NitI3g zSOsu8r1(Gw0);m+a+^Fq2>OK=O$3d@14MK|@f#=|hBS9PFpyE zw5BR16yALiqa(`4sk$VC{b3BLe~NE!H5eLQ?uMue3*^UFOcLxjq(?vNBoC9y4adxZ z5Wv%vf*el|R+t445ImL`1dp~U6bmv@>Q#tLZ#K3|bf+EbhJsy@e*h^u5ors5uttf0 zg|givIEg>L|C-gMPuLRe8DLuYFAWDWX>Xag>cHhUG$25b4#Cb0xC2WYC~^bg;cN!- zm)HLStO8riiLlwszbUHN;5LP|xcpBDZG)%5f_kp%%iCdpvvkz8->A(+5ftVqCh-^^ zzJCz;Bi{bpRqwio%|Z;53j@VU0%+KWVTE0kN!#AI7H(OYbb6r+AYIQxJkDNhRkH zYNg&Ld9SpL#pEMvwUNgUT0)qyMAC2|T-1v=*Ij<%fbhWQ0Z|{PZ*B2SVb~L-u?41M z;=b<=HA8+!1H@)oqeL+jr9zD0Wg+q?$5T6i#ukB{JR#w0u&#g@Wn14!eq^j{U_Jil z2XE0OG5s>{Qa~&`L7U!)$jAek|K+w&(QQk@%H=AWH;8V8KVju&}(#jz`fp48_Ld=0l|=ba|DaL=Ru$tg@o#%d=M2B z9&fH~cgz$RK+!xw7lA1WB~~?`Xw{a9>&2`QZcX#*-rn3q`@26l!S^&!vd(6-6FM6 z`Y6)e!^1PL3622}QI}ptloI7oOg8m-Y1a_oVq|{Z~xw;1MR zgrB4)f>(!=8|8(Thg+d6i-o{z$sdquM>2wrC63wWw<`V4^>=95Ly03IfWo#<##SR% zHxdw|6et8}fGi@F;aiBl1P5z7@Jzs9u>g2i_OE3S^4cU{>k|Y+)L61m~p6dJwU2_#H&UdyULG%dAoF= zdKTvpE5r2tEQaG-g{mtg)Z;)UpUwdn+l>qXgTpN39wgr^0L;VEzGYdGRO*cz_=ojJ z_{mR(jdK?>B9L61;thRYJh?}n3nP1lmIvmy;RY0wiMd>ZlzjYxiY>PrZX@n8<6b{B z=IMKXZhI)=CAen*!F~{1SY;O;JR#qZ8Z_FogK7z{a|ajDdW=q9hBFcJoQ@62L8DPB z01WeYLvNVEqu}e6o{7mxM2zFK+(PHUzj#Bw^vH0gaH`|&4e9$VchKsCU}R{Ds*)U2 zuZl$at`~owo5-c6m9g&bqGHDoVg8>r^rWYJ94{nJ8;?qQj8C)L^6r*^a}5V*9fFZ$ zf8@b?jE%ae{_>v&0fu*({xS^`dQk?IKVtL&P&T0NFa$@*N3b$D6WUH)#`ewL8@(I# zXWw;!kmO(ko(QGDVptbF2{IXJjR;%PU>4NPldI_LIxl#^ck^AaDla*~L_kqn{6B)8 z7)`und=a3_IGZaGea$>YkBL=hnSO0t`-5}+3I0LEvqQO_@XMTE-MFrSnf&+Vd6NTD zImncu1X5a-0a&Z0{haOYEaD!_JC#9FEZ+ghBaxlUh)F`*0ZNN_vK<`v3oF$lT@E%A zM(S*y*eO3GJ?v2kj*y?bk``{s8uL4c_+nThSLSBX4cFfD1h0Dmi3}cDN|zBS3WiA+ zs9`QIfl_zgHyP<%P95l7Nwk%c@yfCpuw57iU*AKNl&9Y*M)tM=XEH?Q-7M&Mk-J;F zzimU%lU%&F5>IXFb>@|HvIhHti*p?)npzRY=yCjEB_5A8wCnkgvg$AH*aJg( za=KyQXMw*Z>=JR0zu!fwe75-4(I;DHt@*W?~v zjh_|+dgV}tWc;+Zy4PvS=-i*Lr3%zh^d5|f7JAx5i3C2s8pXGz1HMU&3Fa#cSe%tj zkQ+JyQdB5%hf)2zde2KbaDgU*iFHJyJH0XuFcl5@RYI7H-LAB9AeR4MR6N(>Mj#JM zA;{4rR3I?82J0dH={u-TZ6_N;aKN$0UmuD+O&NjsQerS?ip=d=rB!@)5u=CTL8KTS zpkqF@o?y>k`>Sen*vncWW0Wo3|#Xgc#l! zSHPa?kyGyWyXPj{-TrFd-_fC)-botr30~ft4sox0=WN{Jl|<0kEQdJ;-Bk%GdI4{1 zyf$!%-E_C$RM838Y!c$?HIr~kM{)f^>>qTEpOJBleC zg1;XDu@8yQ36$ouGfwKfnHLmtow#AQw3lcEcgN`SLF?0LQwH|ki@`sr_rpR>Sb|t| za?<}~(-r2vDxdR6S2S?7lsTkgPio2JtAZ)9+W-?s}N2Cr~ zELIOInJLz#`bxdOGmD92G%lFKoZT{@C6AB1aZSOx*EJb@Dc)_+XL7Czjb0jDsw>9! zdXWN!`PxL;PluW3XNckIIQgNj9Xc1vRL`H;JSoY!kJDiuA=`$!F8tCXjSv3ndf9V9 zNbC3q@G)ppY(Z4PATY|mO@CX0kPNgm+ROdXp}GfCvq!8_#@1+*_Ziy97E%B7B@X#M zePkh9;eYAhe>ZBKUTKCu^xjQoGBpYR+A)8{v6{K=$@m16vbPMqzjX~6jt`~NZ>sYl zv0DW#rq1#Is%a~~I^~;EOt(4C?_C%P1>#Dg0LUd7eI%`SHg; zIM1oaT~Dn*iIlL_cRR*LDeCb9?Jw|`MMS!Pe{x5;qjXVo30&I=3MrzaiZV5jN{W{7 zgfpBBhD-j(IX{4Om*}~^V1J*O1*Yk|%8q;RB*Y}xgx>Y2E8Cp1>gmHmA3o@l`CQ+h zeemGvA!^p3i#Vox$szHr&OlvfBl-sN3e>^@tfE`^_seqggU zBCGXizs~5`)v6`K){Nd>BALvz@`RSx%9QecTV-sYcl3LC4X zi6Y$zeE+^cx%<&C9XrFv8SO11wLM4Yu1s$o59VFrTc-D;8R`{d$2P9*1#s(?_u1z4 z&*`Lu%nkWZ$kjLB3#?o#Sn{jW&m#+Uu>CRhO*~xk9m^blG5I8uNuXy)^@Y9(#tPQd zt~l;`phnO9{OtL+%N~K`2)<@H%CiBK9CiT$cs(Ndh$rE)-@}pWB7ve7rFCgyR)M_< zuI_7ov75cx8n=ZaVr*`x~JvEOWeNo_=K80wV!}(91M73Qeg`JpnkgA0f5_qWJQyn@N#q zxi%--R++Uu*GtGY=@p#MpR_M|NjqR=⪙qqSsZ)#XpnfX1`CIyPGh!ayL((r_ESL zp)To^AHJ`aT-D4l{cX3;VaCyLe(37r++bRI@`ri4K73P?fmJV^0?rdNLE3w0)lDr_ z>Fj4ibjPqMxYGxo?-51h&DI#neu;nZIn;)^$*e0wuzRvR2s4J0yC!v4`U>=_58$Jo zM~~T4{oK&0@vGilX|um@9`-lLU0%7g@dLrc#%jEX=(J{1iB8lU%9(|mz+b$}e{uxb z50!qqPMs`6G(!5F?FvorIcLQLSA zw&N-?=yg)i&Z~N0D_qAkM$jF$bxy!8J!ZR}IM2mnGn^V)bJH4^z+)2=Am zw!s2pC;Po*#TuhkXY_RMwsT;TlfEN9*KuSup(^!j=omIBsl2I`uPK46J2h}<^4eQP z=2@S|rhX4u*bU8a6T=qQgn#Tk@MwB-;WDIVjyHx~AicQN?QP(aXf5oEP2&)-I6mjj z-K}P`wkl|J&elSBgQ{8)SKcAY_oyJY#&liG@8L4H7kP#}^m1}qH*z~#%p$nDqraI* zKD=Sn!r)2Dl6$+A8jfd@QciZ5`J0(EJ(+5{zWYq8)CXtusY6KcT+~^=Z#PTNMRv?J zsATF3kLOSFSxEoQr(d>E8@63RuIgT z05}$`excQ9QwU>huVg%DjMGr$ZRcc}c{de#eYfas-1R6KXSQ!iciC!P;`@`t>Zt|f z(iMdse4ypix@0$$WJ>#|#skeiF4MbMeRV8{&Et-IjOwL=*@>=#`q!dkL7T@Xr+gME zPgD*&*ck;Xr2FtTEL3y{CXVD2IzDm`6PT)f{|GD-j>#r|oa@*f)$(3bAXT^9U9ubg zW255BlK{v$+shm8G`QBoH}bgN{OOad4@&a-j!R4f_Uk8>j~sqDwk3e?4&^KEKVwd9 z#r0Ztklv3jC95LU*(r;+V|`9zd)HPaE4=3Bcc~qprMN_v9y;1i{HV5AH=liPeZsYH zt#g9L47<07wa$9JlfT=f*izcC$XDjKQQ1vJBOLHEu}`PC@?BIgXeT)2O7bCvl*O=| zGD=3@d_5ZXdE}^*v#)2Yzn@;B1DQvN>sI*s=BZ;l594*~&l|-2CVKpCq{MXx32N^K z)Ou2`E#@o}27gof>C~Jz2#Xe;@16A?^B&#hs(R`IYM>CWx&T@lK-=Tlo zwdD_vk!l)VOKUsP%1`mH$sdENS9FD|H`doT#CFEa13I)=0~^Z7yu1FaiVfJB-wDMa zX0Du@Iy_)7q#_vV@XariqfucCZ$0d<qdzx zmF#K{$F3%fTJk6KImzS<-ZM|2Z=@M#e06#K>z-S5c;{^4VA|%{&Z8%)Nuv9t48Csx zaIQXml&SRE!bqR%aCta~PPMK4mAO7_w2?sDTu1T*YoS(5>uj*rXnQ8fH>6*;guS+h zJMZ~(*3>WOlpV^trIwhrRgTDO{Hf~#r=nh!JE!AE zcH%2LzdX#*UN?SMJoI)9!>7Db8;Waa_qe}K?8_+D{GfWMptp4`|9+$(3-QvsFZ(_B z2bMC_YfL+KYMcwq*|It(Jop?&M;GguXR5ZIgsL~h&i%m|YO2+gT}TkLs5kHY>~Aq( z{XGx2oqu~$$f9PEH>cl+uj>2K*s|Vk((eKJ>dATB=>|um^Hs8>UOU=R8;<0#NZ)5* z+mChH{eGwam1I0$oZhtAFHJescW&zoL#`luC8FTQq0%nVDuXjtIhNIB00HPm_3Mp$ z)Po9R8V&Tdy^(zioWm zrZ&y(0~gyr^PBzaK2W9~DNbQcExNuaX1*F-7Ogg;J({WIwk|#JvnhG3x{h6G>?)^q zaR$fWuj3@4SfzB{WjQQ~e5+|`e=0@CTK;MRX>r_85dWc8L~DiWjjY?@aDFP-yQEpw z4NZJH+URF=r0;IoLvAu0Lw4IvV6X5m<%-~a_*;~Sbx(%VdBR=-$5c=lG z9?y8=9tZN1!2ht}7{KcH+UVAN5zRc!Zk;;Z5d*Ib#}doJ2C+PSXDHsIbPR zMJQcj$a5Z=5{?;#4;>q68aR5WUB?cq+b*T04cf=u@WAcQohTrmUX(K_m8_p0m@3H2 z|HMVoNs};Ws--8pH!Y6Tzi=;ZX=N#nvyZqTr9J=o&KWD~j(le~-TSW($UuU2tIV`1Xlo&aky-L^cvem519 z?b5#+J5AQhe1s02r15Q^SGU76CpGvzFq0)yaclbN1^YV|T3L^^gq!m5Ez|tO-hKOx zsQw%Avs~<=7Aa@3&7uiqU)YEm`_EinvHnYNczM-GSasJ;RAs)xIE%f6jX8zE_)>?E zlJY#(f3eu(Hb3d7+4XFjj%0!Ex%TbbF=wkjvXye_zE9{FC$KTTjG$(<$x*9aa&Gi! z2MS9Ec16|WE;S}zr#|3v{$5ngq=44pF=CfJCQZ>ns9EULzH@>ShqwKCzVXZs`7iM@ zHpKLgoJ-wbgp5|R?(0Rqz}NzwX(9emrf#p)vhC|FqjQsUmx;O?7)CCe9HXp4`2jw! zm9uPLmuc?>^VD=roLDc}sxq>m3=YzF2n$JlgyW$R_WIzSmzT(k_IPKpSyAovK2;UtIsGma50*0x%3MBJRimBZ`KkWBhF{$|LT+>=`l-Z}W zyIwydI_yUK)1H;Q#2 z$@^vJ_!sRd9bJS@XJmh+V&E+u*b`lsqsu6&%2h19;#O#nWA(_qd)JI@O3dLlqfa6I z(@no7MaPxanhrj1>?Y~WQL(+Y7oqV>5!Mwq2|{8!E)I`gy0Yo_{_Ird{Lt+e_Tj~v zQ*WZ*{)B+*3XU*1okyDDk}#*);~n0wvh!oJJnL7X)>697gfFAJCN{@)Ls4Q2eAeF% z&6dx)f&IRdW%O|@cX7&4`O^vkF-%wfYKTRiq0X*bg#P$W{foWr6yEO9cX2SOwE76@ z5&GY^$r9?kO7^&&ZI!64fwGjtRDt?YESxQX@X zTf1N0%j5rU_}yw4+xsd{d<_PtP3%+GAr$^FDfO(%DU8JcLG&BoQtm9N=# zEt&hhw-~F$(<&u=gI$Z3Nw=`gR}X0$-w~hv2y@g$=6@}Fz6o&M3NMJL4EYtMzy__eXZ|s3h;VzJ}M? z*m`>L^_o`#^H$~DjXEEmlGmgz=#p`2Q%m@$3#^_&>(Km$M5%8o_1cd=~e`nbfI zueUBcPPo66yEI{-Q`n@U>Ft^umhIoAgtf~vNziWL)|4+FFLF0&{>yW+uj$@D3Nv!U zrrd<4S*P8F-*p`V`wKsGneS>%WM|~XWebL>U;92UzUle?dA#*m8*dxUSFh8w7IWXc zC*{z|Z56V1bs0ocrj7B_r$pzib!l8WN|-Zfva5K*w^WoGRmHayCb#UX>0wXfH#FjH zq^Z)D>>U}omoO(XI&r71yF2AQDJr;i!hFhvSfO&|C`tbr4at}s7AKi>w4u#pgkIqF zzwaeehyr(TYPMD7VmIi2yv~h5#o@R<6ANHTI6nKeSiRb*tL!-n`6=c$1=tvJVSgCs zX71Y6?aogWp7E!w@8s@CVMfp1#|rT5$6J@@kT4Y4RNErVyuL9$w_P)Rgj}^wX1-nT zD7tN@s-|s1L6PdInsFaHpq2MENNFy>v@ct=&|Djn5vRx-aUUC4U%-o$0hJa5cL;NI zH-q%jLO@?A=<@STC5G*Za3q=k)E^Ytiz}kxnRHqx`nLwW=*SRHgqBC_S~?-;J`s#t zq&l1a-Uog0F>5btUst9;i|dqOnnGWL#aB`mwH2I~-r1OEl9o~P>BmC`4P&A-sT8^= zQf~BrPzZc5vkFi0BAjv^?ie4M#W#wr)nKBMgntSp@ZH{+oPe5hka`E=01P$UqD|8H zS0ym#$58!?h^Gu>_S|5(24`(UO!WMkZYl)Bj=lz`^ zS%+Qou)5*1^Uztr0b;P~$pR=TT?)D+Wd$(j()Ks}wtmnPy;nO5fO*4PY9me2ul^L) zU`&D4q^H2!_==oD#%d5sXgDxD_W=MHXJd=D9(jjUH;%xSLXcIm2o-?R1c%z84mM78 zgRJjIPYVAM=a<67Ray&Aytr^MzC%n)g-4C z&F`|)nZ;-O80JCS!j*GYhv^0#uzC{Yvq!;2&3UqFDnfo_7s_yRCb(d(+Cjt^(znC* z?`m_r|1%-ED8|Zq6z$Y2X6Dy&ce4jcfPYP=M{2KBhL#=4`0Z3NP}3URT!H4*wtVbc zF~8a*%z!TL(-FqM5NJK+pp&R6O}nGv_zU#CZX#A!)G1{#Z?gxLMoHS*C}F0h z6zNy7a_T=CfI%_L?6vR^rnpShvjst(1FUCn$w58P{<6tRbP7URqop;_FIRc_6dBbR zD0q$gYzzU!^QRbOcvFaD-fRSxeg1rxfUk~FmSbR17ta(|Jj~YxJRD=aJR`pvB!J5h%lPfQM;upM`i>q{W z!{(>gsq)08lb|Qox+G4jsN0D4t#v_vHo&|b`{fIeP z5&!dUBaSGCke?1~!r5R4U%ixn6;1Dkv?-cSee?aSibWR$U9$H`e&kY3E!_q`B%yjT zcZqbyimI8$)+$&1+V=eQmh=Y&(D_Es*-{q5H{8tfg+?a_^VXU#cm7SZmR^gWmi2Zr z!yCOKf(>s@une{ldXInUmy|V&ZrfKTh=zJ>{~}tg4~5;{RsIi=)NBmn8XhFRPDRxo z4^N}GmLIs`kQnLJ^`gJ(&>46Y@Xf2%kVVNcaPUR{Jq_hc-_5@quoH4486suWKV&R_ zy%H`H)o$}QHd|aF?3yxq&zow^`lpN-#{3m1aHTRG&5K9YMN`;zo9$z9TPk%{}?hZS>UQyHOOV zalQy*V;o9ML9*q8Ja-r9sz82Ly}`mdD4e4`gyUd*+}Qzg#je|*2QMm*JFpDlsl;tc zJNQcBYU*w*joJls;65<94t_H7+r#Js@ihr3MFY^Vk7%mz1-QPP_<@2^AT=liqTXCM zx=`!=Kx6^a75m#LVMYJ~$sa@ktV4@58s1v6&$=!`>yiu6D}LjJ=8ugFHu=Rb@xq-HL6AP?qr+WtDP z#W1w*Z9%}ItLVugallWTdci8n37rTu%XtB}cQGd`xz*DXM9H?$rrkdP+KDfr649X& z(B0}l{DWX1Ro%V;6r(Yqzb^GlGatur4<0}3V9U0b_z$iE7XP5>D1PU zzvrpOb)_d;_3{Q6`j$oj+AQU!BPgDgy{ zS0mwY3FI#7koWgS?>NjYrGtVF2hCL?RE(`Ao5D6y4#rm7%StEP{@mz;A*5Etu3Pd2 z0deW^H=bsi#b?5LH^vD~E0mgHm^=s*O80%G4gV58p zQ($Hg0RtIrKRiA7oKT3JmcE@iNP;kvKI5MA1+c`|Z5`0_RG6mVF3>HtKt70J;1b%_ ztPQNj4lu)Z-pTN3HE5{@-b_0JrL6ZX!~8u!?{@G8zXr{ty)VtWccwf6vk{ z0Xw-q4jDqBp&dwAF@^zsSLYbaHh+ERq|gW2+aeU<@rYH9gGwk7HXH{^FIgOTc8Q?o zU4%AG$=2%3PSL>#nTq#3?ExR~0yGgZA~~oCfrFCzb{O3}5nqH9z76gd{C2qwh0zG= zmkBw0xd0ar0@fO6M;2-hqmwW&(lW@+w?F`wharuPQaS+q>jNMUx5Lo8rU0fT_Zq`3 z80CcESCyF8C4gDg0Vei=6b?{sZ3nZ;0}n_>ZHkgXJ%rI}y>~grd)I{)%OV+X+pSEs zAR(FG4fJdm382xkelHc`6wOG`F+G6{8e|zOz~ zHeQxq^i&$BuJo)JfFM(z+Da~lMg5ZE9?6Ei0*U3JuQ4O$zx(Dfnd>q6=$nEG=d4ZP zL;rjd5^-{T!9HFZ>dY*@cFSwsmcUfU1N({wOVz&mS0v(qvAPEGl4 zBCph%gc|4AlNmk!*0R*o+a<;+aFkEhHcIWU&)G;ZIY%ei7Vm4fUUExp@CaG+J>I+P zqVedMLw(n3XleFj(WS^%O3yB{%4B->w-?}7n(0k3?HK7^mQxLP8)dDD_UB3vnh?!CDe>|QR{K;fW zHLcDJwt<^F);6V;`{Ryk4^unnED}Pm?+dkDGkr94!a)gV5i&vkB!zS?JKC!)))a3fEbUs|FiBIEx0=YD;?9a=ejt77}+=zi71M{L>iNwM517b33f zSz{RZ@LJk`Z*)HEnW=3AyIBYSWwtwCBnYmUvORgzDZAiWHt2tT_KY^S6BCq=v|h!k@fDO_0@4>-FnZ88(Ho8 z!bBzFlHfe9v2Z_Pm)~n&jhHn~Kei^iuWn}b@Uxcf&_^R?71v#@8{4bb5sZSrSo8?;U?I)tiPfiWAE#>G%|!6vS9R+jW|aCHl*qX|I7@v z!<={0&|EAXImzs+P#v+UuCwgw38z(}$6Bq;kcEw8Y~+z@=fu7J@gl7{`^$c^2GqE8 z%VNXPc`N;#p}GB%VUAZsEn}axsL|T@)>eM6nS=&H4EJEAAYKj=HfcKV{F-cumK!`0 z)?&u}T3BKSKR!LYC++00G*V%2IQjaZW$7|HJfa&f?(5L8D+0BJ;C7=e<QYw_PQ+a z<>|CFVmjF9rYIZOZqINQ3++2p3;9moF^hgf^eCmZJ5{usyG6amFzoMUkIztV(v_%b zZTdU#Gm93!pLHNTR(s&rMAQ>jqxs?Y<3~H1?l;X!G1{526_iB;D+YP!vw!ah6Y2N* z%ueb$D_Zn8SvN1)!shpCEurMfhtMH@zfD5zj%gvk@s>!&B=+e>a+5<eM^EvvbzJ|Ef>CUhqy$p}|L-I6?Qf?I!&vJa&dAg2)~qw? zOeXDkHno*Jj}Pqy$m9@&)R~A_R6*R`#A*cb8|4@;=F?eH)5an4Hbd8X>E{XyHfGy_ zF`-jDr0#+@(l5z&WRGR-(YQH!8qcasOr)o4J0F|AYVG+C%zKTyzlgS86Qkn=j?dir z8b5<-rG3dpXNEU{XyP!o%sCO5ArCtE!_}Xg54r6^X|d&Q-nf;G4G4gY9-8U;8#m2@ zuG$Pqd`=j#$~R>*eEAv2p~;!oJ7_y_vs9MgE~eA%s6F5kviNm8zd>t^7q=-idnVTX zX#c&G{U6b#8bDTMGY{7`BAQ_{afC*@nys%GmLtPPL5t3($FIR!m8GH5{iGj?_$fj; zw`9|;v|-!1=lWIR=vLTtjQ!O;*pOr>Z^u!Rc6Z7smog0tMdkR@llow0YT>bx;7Myx~8!OrR~zBo&R zE@7&>0=}>ynS^9uhrL)9j0GeSGuf(Xg#}QeZ}@t;&o;} z6`T^)T_QwLFu@&to^5?@}`)=eLWXl2;E#73IlpLtL9 zi(eUQUP)Jv2JP32wcHHNoI_^OMY7Z^wdR5gPR$+`T{fQvs7zkMvw zy@)c9(z$w1XMN+!5|z5deDJL~?1~@%#HP_$qdfZ!C8sNkXIM^0CB9nJo2%K|y?@`_ zmp)f$&tfhvB)IdB(J8~9dq-l&CdN!pBzRP5rO$C`dvW$oS-dv)upaKgCfSO7cjxbt(CoGRDAs>k8%x8j_&RY}hC z*|0t$W5#AG^&&Fxd4)Q;pWZW;Riy1zUwIigG<>yCuV6NNAlx@JcJUTd?AXje zmtzPG)0@+ys@ZQ;XbH6?Gg)}i+zNw#$cpLa)dl$pzXN+_viJh59j3Q!@@x|g9Pcs- z^cD4f)Q_n;MQGyS(3|k_4dSF*(ctV-W_rrPd#K*!!%y*Uhew4!U(8UGMihp%S=Z-A z+J9{2bbYPRUBA=4HJg+mDDCXz>zNY!>(Ir~izkv2)M#h2I^~m6T8yhUcWvW(S1qi* zn~?a$`MsPr&ZX@^RZgG3Q4@3jQh}!)FnaAOe#9Jve1qE^`Sdp~+)!*`XXycGGkq0O z!NP8P<3fu_|1LpsIsP{zxGH78Ba zj#RxGx;ER}1cxTxSQ+Fi*_C#d=Vt^W$o-peuy|6PXnv-Q5m^60U(| z&}kn;+ISAVY(7UTH z62bD<;W$~fM2oa3`j~#rJ!$kBC@M1D?NGHARryh{6mnwBFL5i3F8e{rqi{zBGc*o$ zZ1RC2xJ`dFuW!p!U>^Xpgt;7uQqZcaJt8BGNxdi^Sd3IK;LOtQxmy6fz9mPNfx?<~ zJ)~P+1XBrWWa_1(m&mJe#~xw9>uA~%Sb16Pdj}s`hof3v0vH(-6Yq3mnoz_*)zT)s zglj@mZ|*HX#jYUYk6!fCW-qvVRuZ>~+Q!?UqDY1qp6UwBTwlxLHOy=|oV4f_PF)Zk z2?@!(h(Z^{JL=C`^(R!iR73rK0kFInS>IvU{^+xK@Rkli-uG2PXjLu!b1i>#(Iv(1iooo;G?N*;ba}S~WS*ZzIg+SWxV0 z;mdF^>+1t+VgVI*z(m%o8H6f}Dm7m(6}*UpDko~suEQa>BE~B1qk_cS2wV6zg*D%Q z*F}6+=HU8>pAf%vP$HkuZX!bq<9~m||N5`Kb@1iIL;p{I@r3n_%#~e%)Hz8eH%$z? zQ~O~GYyU%}qXi?zWyXGc{Xs!NwjsO&AE59H!+I(KYWE4k9wlf$gE;TR$Mq+mCfK*- zlC%M=`wwtHYJ$7|htYVd#Nmx$G?w=gT45{nNIg@*k$fcfEqr%Kj;&xSb9-&R%3Rzo z)A`;^b6<|S9aQ|5&i7V;KjgSuC>AVKeil}qODNGb`NG=mLTBwd0+fb)TgMmCro`rH zCkal?RRkW}!LL3R;P&lF9v4^SgSrB8xd~lUOEQMN5*{jiUC0c`6~gDXWZ+iF2{!2g zLpMWI8`+a?jR)$2&;_{dj5l_{kW#s-UH3LGza)svezE4Fy}r*{KZ;=6SEGnthS4&F z>OE7<;R67@6Iqo$(rE%`$G{4g0A z6x+D!Y=J@jeuwF+Q-duk=q`S!Kbc67k>zdxcg1;+5rk+*1(z7Xr9(&nE&|;#PQI@; z5y&j;I&5`K2oTrESGnKQgLtk`gR4Hs$y#{7p$p9=TWH5PHraz^d+*U93;r?p_sH(I z6c@~DGaq+QQi?MTb#!zr`Q^9^Hx>~r;@?evLa7GK;;JqRp=Y^2qhj?A^fO2II2x*g zV!hVpeiRmoZw_w59fN%3AD~)PW$GHK$miJBy9M{xBFt!b*ddi~bUO|KHecKRYjKQ1 zQ${P@C&!!}VZrHVlTJY3{+*&LI!f$*gi^|IpQ+deVfC<~H+cM%P5z?Pn1mW9N%{^q zEG^IJzwO9VOeMgFdkyjEpKHI-lx69LQsoza%HzAzZ7WnCm$o47(%j_P3BJcg$1-8! zXLW?t<7)K<8uCyZ4ISOpYJZo{JMU!O>B(wEt5jow&~+7EhHChB*AZ*3Br!NVYMOj} z@<$-B)?);<`G~fqU+nOsF>$vag_0L~z=5SVx1}0MFL?!I302Rv@2nbQU(;J&1Al$U zb9#+y41BgaEAdcRy@CVx^%rPP{SZ5o?UU?25vVq!)|WocA05&QcZeS~f&!-@eHkxQ zC{kgi5R`5TyO81zHuE#Ept+^N=weAJZDq~70+BB5UtrRo6JwO&dPUR-J*?uv>dfTw zRf6S525MINrL!A$t?k0+eXV#v&!%D~&z9DpS6k02p+r<7!bvF8blVAQ)ZcASDjzn-c8FKK~7-3m78CSmEJH<=- zyi}JPXRQhe5dDQ$UnEZ&RvUKs^oO2>;0ve~ci={x>kT>r+JOTt)xiQ{l=PM3g<-n581WHaTnqMcboJc!GBP7 z*N^s}5IQSyVVY85nBLam?k2)H-r$@f$7Ywn%z4x=*?Sb7X|s+3w_u-Ehw9}6wLV_j zC)JN*p_N)OC|?lP-!p-le&9&oEaKUyM6n)N7f}ch%m; zP3>Vn=e7}&=IN)>)&}~quZE16Fu5W^jWu?myO<%fiN=it;YUlWF7`-X&v{a-O%a>c zSBJDH`dJ-(#MgAf*f_m_SvNA$hZ0nn#44}O?PvPNs(0I<@Flkn_2WjPrmD(U1aYE! zztst?1|01!eo{?w6S7QAf}CjI=VssvJ0&JB*0%X_4fgDDAM9DJzvGt5cVX5*2}1`V zqIc9bT!7*70B>YX`|>PsRheOXvXzsa-Q3@&+QFE#Wb|nOE7`jXyWgae`|;KfvK!x9 zi+ToxKBt$d91io`|Mk7zFodV(rCuDW&$D39S6_Jc1(J;Bm{+ML0ueT&|J4B2^!w+4 zV@CE2ZBlAy&r8qh?gw%QYV2)e-0!y@fCwI~{qsQMn&v5A|!fbWh zyt5zNytlidAxIGX4dcO$ltnliNv~4n%vGV4qn7?Z^EM@=AVHL@(`9Wz)vfw^uP>w+nh5GCC_@``+i*cxyXpK%i*c zitB@%Lj3<>@6E%Z?%()PlzJj9vXmk%iV;Obw#Yu#F*C?sDj7?OQFdv?gOa_7vCU$M zj2NVPETOW^AWMoQBfBB{xj%i+`Rn|B{yEprbv@6O{)qW3@B6-A_iIsVN^cF^fEC|S zSo!3|Tkk>&i2C3iKX|WkUt!fb9@yPRzXc785!jZW1NNSMMJ-rvcy3&{B6HPhI}vP9 zk|?}I3pcn_4|Y9*h&M;=(bI&itVL#!vtPTuyZzjakB5xy+OwY3up}xx!+X+4ND;g`Ww~Z>EQ<99E zJ4_xp7qsxZY@CaES?xIP$QY71qpYMHs&gZDOat@DjP(Q8qLl7R-6pIr$AV0UkLJ{E#3 zCtRWTnmpS%hLt&IMhcyqKg(8eR(5HHoRMxZv(Z;yo$1@+G-i#uOO2`}Tj-AwvkSuA z+Sz8=B7-GZRXRqCvxJiUW}+`0i@8hsBU`zpqG1BF&jPn+4c-C&Ig;HdA-y0;599ih z%!`?}f81}vojE+2fHjZQnDjH{ZA>4Eoc0mT8&<j+gO9M4Sy4OM4X=J8UOxO zfva61|0}*Ux9copClSe&?W%2*2=eT8wOGvZvlvGrRSQ4A_j5o=boKlo%^m5R<5GSY zy2#>Ld#t;|M^2&@>>zHf=421sF4{18Oc&1)Yn20@2g&gV4ll#=`5U&6#G25tuhdu* zx&c2w{GTA2xRV);K@of?85T?)^TO|(XW{*apt`v*RL8fZGaM{$MFV{&=0JgRYT9;> z8_mESYOFC-Bz9*WYgle?ceeTKIbnb)fG>>iv@Q-Yuu^sd$XWTs|3&%!LD z)AF#r%C*EYlRWKfNP{-V5V;)qW)m06+$R=w;I9mB=&7_n$SZN+EeHL}rn&_bK=I=TmuRt0YjRv+n~ zQcpLA|=G85wK+87=j)7r(VkP)^+HP9ZBZ&lcWZ>O_mOsHj4J!%KTJj6U({+ zGPVue!$gY=klXieEmPan(Ycg3F_nNSAX``1dCMv!$?wjPk?NeUqIld1y~Pdgc+Xxj z4v8!Z;A>tSTa&+6JQ~&M_n)asPe#DceJTycJs?${x+EQw4{g!P&1}&N+hUV{49j20 zGd@!jHxw*&OZiwY>;Zn9&~m;@x!??MUQ^Sd@hH_gw4iBkgWmU%X(4K2D;kB)9}C#XO;O?>hNYBvL7J3-;a7 zUt5koZi>?hP|BncFq@|i3Ni4yH$nbRJf{VIOLPR(fs?QQTmQ08``doL?f6$%r|B4c^pm+qrff zT-$;BvE{lN<1Fo&Bh;V#stlUd#S}sUjr5dIPNP+;PgeYw$UFj7D{|&o9Mwxb5ilg zNA5Jr8>iV#UrV`*(0#;xH*MYF*IkPv1tpn2#qg z^l?RpW)jXPGmz8a%1EZJXpCUqn=RhfYrMi-VfWOqsWgXrh${=K+U$FvaP)};pbQr< zcMMg8h!?vBLY2dI+)^loLk1yRI6nP}EI4`ZBQDkmE2`+sh={%CMZ}%4VhvSz~} z4DR54T zmm~mT-B%?a!lw+4qYwwYfS7%SA|EeC#Y|SunTsADGkE5W@^~&1^P&{6pkA1ry^vDn zsD4=XMewX~@348ouFe=2FdPGe_2`jAqrVQBVD)r!cr)b^MtkHaN9hPc#*sbZ?qHi3 zqht^l??KgDfnlX$#+h?K@`O=aC>+Dyp6A4U(f^Q3=?^3;z*op5w#31zdbQW1xDs6; z_QGmgv}sTfx1Zc|t&qT`ndg&>7vf~L75pixE?Q;{*$8J4r|@#2M2BOG$8z{O2;|K1 z^2xDZTC_GXxkbQ-K1A^i13FPE>vFBlbb!g<-D|00%Qm-qm%)&3pHu!-ZpZM0mYVmW znCc#FC8_oVKkJL*;hT2cP*!{DVIWhIXJ{B~=>&q3os*U3LD@uv1_c-fLfir89 z@lDx@se2v)U3WvWLvcT+V)-IAY>m*)Eln?JpqKKm{tS^9fpho%K? zNk?-F#UXQ8u`*c!6TA$@i4{RK#s)Qu%Tm8u+_nhpz@)wl_X~mn+gvi&2OE|C8+VI( zBN1ev1bB^M?pL95#nmB(M^011HwB$6f1R`Hg`?$}Dwt9Z%z@=kLeHgWIYA`5Dh#&P zXBB`6x{=ED9tq{G1G*Xeb0XyW9j%sxF}wf(sD!f*kDEVFQ3V=q|2TrIPV?a>1BQef z1R&D1)bwr&u$^RS1)sH_?1}Y%oH>BXPqz_rzJ(f^*1S6 z{-&cgmudy3=bkEdOhj(DO2@HYdf-h4x?pFsMn!v}N?T1M8)=z!$`B@e_?W z*0BHj@1NXUOS*I>OY^DS`uHu~fg1+_`ayoh=wpuKtXVgZlDtI1YGa<{%{~uYYMZ*l zh_F&I7mG%b*LLW&&t#=I$OZfL%S^_qO&%Sv#R6n_<3A;GeHxkg#*P#@B*Jkjfm@e6 zcJoB$%&L&DbP+kqd-?H(P(JmR5Is|fQxQ*Coocp>+k?D4vMn_IxCa`R;|C@8GeQZ$ zzrSUeGF*x8B6ufz3&wMu_UIRW^gCy?XhGcZ%i)ojtaIVNtX|vUJoja=v~n1~SmPu& zc(+96u20u|55P48ia4Z#jaYrl`?-ifVO5x7NlTT!r(=0FJm-XOGo{i)H#0Synz%R< z8_@DHexyazBYaq*4Ns=(Ue=#Y?1uB)_db`vMBWmnq(0I2`j)FCxlI{B-EQ&6o4JdV z45i-b-`AGfnQ)^;zm4Zdm2#Kuf5eMN`HcKR$Eu?`O-|NaMxNoIFrpMCV2jpPFtO#{ zP?qn_dMLq0IX(w;buz6;?}DP#CuZSmhOqVQi34sFljFp>{eCTrND#y#FG<>aXRJDL zci|xsr5!i*tJbU>GE3vg(YIq{a-TIuNzE50<)VM1lG>J~7alS8ZP&x_sHpxFYH84v zpFwbxg2KzMCNYk|GxbFzRm>q*?FH#{`{TKj3kfPn_oBkAM@Wo+%bjz_Xq z$nu}Kggk3EblzA*@%f-X4KE0P$PK-4oQ&D!!MJt3=)o~AUuA=>K(zn7o8E)FBTWrY zlo6OM%t_(%r>-3hyq^9P)$iUPBfV^zPD5}1wPja$DekNu_tVdN(w)^>hi2h@XS|sf zegvG&u0eFsCa&4&1|?)e3O+>dFJVCAlIEWZlnxFH`ghX zLn2(|ADIo4hB1qjh?4iLPCb3uVjdEjN37FH=~xI=FKsf6s&hw7OfQkwuO<{$QV#dc z#FGK>T7Wg@s9r_4P)%_j0#tu}n{bW)^FGH{bcfMB-QWJal;Y0Gv6d0rZ0-1n{2<4B z(p}PIK<{6^uYSW8#R#m94mFLJ*_SVN4%NTaEcQ;XX`w1h&}`F9+KP}=J8sWO_JNl9 zyht$aewRO|Kwc^k`@}ZT=G9Q%h$H>npbD_^%=Qz0W_2~quXYF=VZG^^BZkTtMcyR{ zpeRL>Mv3M)AN>Ji9=L>zQOMzP91tix{aN)t$3EiRiKtMMh`|?r+qMqu?Q}((*9{1Y zDgOs7R_mMnl>G#v47H+K(QnwM$=)42VViskQk)VO{5IJ%RY|kYTn96P^0B;UjHGkg z$B+UTzoJLoOl{8eX(Re0?bLsw^Wepb#Q$}#Nn7qi=ZtAZqeQkJ)Y2H?Od|%S3OSQc z0Z(8#+j+wEEQcLtFW{!54;Fv1XvuIWs~!KWKBbZT66^5^fb2w5yF6dRMK68f8-8){ zl+_Y7MGi?E?FhFsFpJc`V>|l?eUTjnJ&Mx}-f3v=+Ihp-Nf}&Do%`bPvtXZk1qq-1XUnUav51A&Y^O6 zxy$0egIlTp9O}oI=6l>8yFxVG;l2*ZZYvM;$05JP|+{U~& zkE@&?kzb<~O(h0Dn(g;(t;{&bWEjbTrDxrpwC^DKx!IU5=={#-HNg6R;_~vm>yT#1 zX2CcwMRy?D{a-(Ntj+6-QQwKbWg2>*C+YWDL4*)v?gnz9I47%-3-t$|d6q_Q{6kRZ z`1yg7r{bumB0U2mXj|wFgO+0-@EkgmUg>Z9`YkYp5bPuV5UnM>1##svvHlI^cs*Nt z{(5a}{ z(7?`Ft88K^>gO}$E5z#THsNWfkg({gXTgD{yk1i8BM$Nr6-YkqxVK-f*RJ)Oh0;?n zhmM+x-dx*7=KTciz=ZeuSXx#@IpZEK>^I{Y?aIG=!-RlrZA*H`B;d=6bcOKVOR#p6 zZxaUz2fv_SEX6-x(C(sIqu(Zj(Ui{?0$P82|MT(7Rb4j+m>30zSD!p2Ls6V(3~+#UqxAHTj{w!acuUV{2Pp zUHw#w@U8Jq7~VacIGAZylE=jKiaVF6gRE~4&c( zB5t_^Zo=kWuiPwdy|Pgr$Oq3>@3%<1K6#zvlsRi(;c=Yl{e9THF^92LZmI&yt6smA zZRb>SxB09i!_TKyoN+GmkDkii#J0A%h@?b@%zjIi%hYSMlCXq%%jm5&PQnB*2Uu(i zBv&U;TdbH`>~iyNY~me!hIOumxGNQx=W5=qJ8;YTzPA)hX#Bxg1-9N|V7NmW(|twC zmsn^Ww*&Q`$0Hu%lY9}kz*xZ=b_eE)2lRqx#%-+wG#@?HAAK}53%L5n^sQvaglnvv zVZFu}yMVC_N>Q9xkf~2v>~6e2vnFA>=P9F++Pt0NOl_7AESfAqjf{uI`2ONFIYsYMF&nmQ{xCI*1pHK%-&QKQ!%UMMchdpA}q!&`uComdPsKw zF_X#rDzX$%%0^FeKi+w@H6vXWpzS^TuUGrLuj;94l6^#0N}rCXxo7ji$PXri@4Po6 z0h(4)022D5TJfpps^bN(`sb7a?(bjhg*!T^c}^pM_eq`o^_s+KzC)9U1i%Z~0ZP62 zF6<--Vmz1kvIW2`eq%q$n;CgX_d&(zCVD$8+Y|o|sh>;0k~UwX@l21)-8$%6aI^bY zk;G+R)a^o>o|~=P*1G_U&#!~!!$iixpTc|tqk(q}ntVC?-c{RtKp%h}c-zTojx#q? z{zd4rI|&RGrAF((I@{srYDa+DwKmBO;oJaj%zOT~182#;Cbh-y#^+c15Q*g2={Fne zL5pd}Uj!?D=e;y}7KBu$K>7PY*Pa^Tb&f)0NdHr9#~8sdGSo6u<~cxx)Wt z*}BGj7`+Yxu}CC)cQEmClo2ui zmlhcSt*0Ou_PJdGF~Pbl8qn>zevZNtz3J)D$joqKRnR3wX7j_Blm{SNU(FVJtAI-X zx6r%UvC-3IusW;^QoIN+of!fVT^TYki#iVc(s`>zTj1ji)&c9`!p}-d9^g#bKSI6K zfEZyk589xbtDr?SIRQeZu+2VM`eK%MDOj0D^6dRB&|fNDrA!EDQ?Wc(||Oc6z~RolWFUCv=4FjlUR>!S$a$mUO` zRiQl~ua`)mkS^|X_XEddE@M3%T(6JXpq{b?ORfQd4+&?-RP2}}GdU;3_}ZpVKFaPP z_1Fu-67PUIK^%8_3b1dV7km6V`IrXOFUv@*yL=>5-%#@D;3?K4yq6x`$z*YbBC?g; zW#C9=uWBJu)z=?J)_zj27KIkyE!lbJa+%R4ltuTA#8>k{nKx6&^{GYe3<=ta+&R<2 z_lWYhyYs=2BumxCAxCT?(6p7mXz-$4QPC_j7uzPL%ZxB|^cx?{RDMpT$2BZq#wA!M zx`XnTYqTe@Uxv+nA{hes#m9sg%81$mv%{+J$lYOi;rdnfVXMVmAhG-L1%`P~P>@pR z_j-|2nNgU==L{tq+ClN_Y7Ksc*yL!!lV{CvOn-1#nzUJk68jHD2y3PU{((jAx~f&+&5=(6dOTvvmqb_#?O0rUm51l2(E z$uHYZORm73?Q8q#vAET}-XmlYcePj+vFNMHfDh%64s+S2(J!F!N?YgSArr?c3$?MF z_^I&QkBv4FPNlyl%ohEy{)6k@d485s>}xL?Abe?FO<2oscWj_{iiC~t%D0CGep%f+ z|JAaY<92*Y{Jv9bE4i@rY@^;l%h(n(*t;wR)_i_-3n$TSbvX)&THxYhav{M8pt=cF+J-Q%{`YklKEVI;XRiPMe({RPlr(8UPNEH9y(b$T5xOXt9-yy z(^_YQJPp}>b>7u821thHn&E7mpbapoYW}LI5fK_NY{mth2v7XM*SX@`32b0781Vj8 zavbF27v>Mx&cp&n|D8vrDQwSK98 zPwjFzp%=iD`wIX*d-weUdqL<*%Rz3|1MHLuD5w|xOjxqZj#RI`lef|J-Ar0YAlrObo6igRfVIB#)T%8t+ z5;jjc_0H=^jg5B6Hvqt{0bW~*#MI;OZSV9DI`a9MN~^Qg3GzJ#;#F6Ao~ecTcM&#d z6{FJDSg9uIC~=NP{m))EU~An)q~}0sB}g#*H*@`v8j#*jCU(IOn%clyPxo(!uHJ*8 zvkdror8Wg@ofiNa=s{9HAoPifSl6t-YF-3BP=$+mWD&F@EM9H!6Q>f#I_j-2ToFbR zNPzEZ?~e#%V2=AO=-m|I)eCHn@&3wJUZqXP+J)-OMeIF6>c{Eq4vZ`Rk|v$4;+LV;zo=E(koHam>0>%r$1gx- zX5@g4&ZY?{GbKP1Nig=$&A$B`F>XM1(_ZKB(*3|0G&zQM(17O3K}w$^nn71p*f)v!Z<7NEBmLiUZ>`vCFUPjRX#9bXQqi2=8y2;RJM-9)CtJ8anDFT_ zH8Tm#YtS$v5aSn67_Slc9QiQCz+R%Oi-d$p^2iTB==MxdZ{=(UIrQq z!}=#-Ag?QsFM?TW(SkoR4|DNLWXT1Lo1{_^lAZeBJ&|Q+PPc%#8gqyG?^CZc(DN|2 z1mzMewya2YhCG>CL9z?TUQ9(}g($eqA2Yk`FcI=2EGyKDmUVOvrBO&~6TB56h{D#P zUXOF&Ce)!T{T=30zAbXEdw`Na_&cN-QT|wH-v12%QITNw(CLt*ct@zb=Bf9fK+u%y zgV^-TKpVVKY%@*=`}4}p1Mp!dhUk}dqWx>itJii1AD^i56ui&%IIg0Zn`<_nXS?vE zh3fasqGTCWghsGt=y8l!Pprk1yM<4-;M>Vh{Q!nK*u?T(748RvG*^Gvs`SBRKeg@z zc(dc8IAZ@WzGDt3kpA=oiKZrb6UBO0r_+!DeytB2_ta*Jb3!3KBfwef^@WKgrHU+>%T0imy6Rp7coprApa%g^E>IZ<~ z3(Q(S0}o>?PAnrTUw^r;LRI?sVB8ghm``~r7K!34{5tf;jyy%M*hJD65c$8OXaZOG z>=UR_KXN5i;eeQMn9d9xX7!@Eb@Z=k$g@6T3*X2VRlG-JxHh^L1Cm?jQ+*J>4G-SD z^*L6^1G@+A+f{haHaVtAbNd4h!tg$j_}t3oAYWiv2I3780#{|96b%W#c^$(H83sgR zf*?gOeTBo#VO(=__aDS_JKdbHk{U^Pw1<=WqF*JOE2_5JxLu1kY@d2d-;M=fD@QXw!0yNN7vNlRIc&YhafNNeBSen80|=Q&c;2d8e%>F0nuoufLt-1ATdS<2+w?J> z)W*9NvKMw$J!ll!Ne(iS8p7$!!Io$)xxpe*O=<;q7W*_l!cZb0rv{Mpvoz0bT$Sg6 zA^x~qXs@)AC<)iw-9NU0uPB1bcs@5jLw#;+B-D9wKDG)_7C zBHjoJgKn?2tKy})f0Ex~chR&@u`1kr+O)Ru93hUxkNxpVwerxS_y3(~w3roKy*tnm zsEV#LR*E~}y1t#w;=9MLyp$bt=H^UV^EAB1Hr9n5^UJViX)`}NB4qOg6R9dy!94PC zNVQ&<`kc&{=+nL|0WE!q)_I>Q{5nfyo4aycWa4bVOh`$tA zJBzxEsw8JeG!&r{vl3RzCAECr!nt_iC}1A&KOL zpQ~Tz+PUyx-Tz(8iUJ0kDqoVbISTv=60MI3^^lXY7*@E#bG+{8IwU?sUKNR?^8`9q zx$)Cuhy&<;6E98PU@C=tFKJ6g*SB_dsjf~IL7u9g#v{ioL6FJ+=JQFp+fR8B_Y4y$ zL)=69#s=t)__W~|KV!GC{<}d4m~y;%1GlI-O_!8OPcVP*s zxfQ6+E+bg3`)yOAyI3tbX+9v^?2uCj{KE0dd8w;RMYF=Lq0Fu;fg1>gvS^xxjnu+! zQ^NORpZkKRNY&T#^J~zzT?Fn~bhPn&F>0Q2IET?;Q$TVuRLbyEL!%H+D(@;Xue1Eq zYowe!yKKtMTrF8V?RAtGC{)HV`ZILYzl0ST8Or7n*kw%G387NAaxL9MPJ=0#_H64b zd~LXFbKZs7c&EUOczN`)C!J+oM0^@ieM+udyOkaP1BwpKkEX=ydN6qUjz<;|kjqNq z4(u-2Ga#P6CXQiMRbykasEt)l1x*6SfU@+Lq+Oa z$F?qGAzRcFie1*S7EH;mzjdrm8HDqkE2Qz*B3F)7!UC-OHG%};u``O9DF!834SCil@;EFvX^HI( zhq3o-kW`-f7aNOV8>6W6sLxg-CITv%o6{~OA}b=Gqn{iHG&?$9$RaOW#4Ab|jD(%g zYFi@Eanw^(r?c;$Cb<_(Ne8BPNmU*f3`s)8pu7h}GB z6e>XiwVHZJ04JlgD$Xr=5SfjBX!0IFcab*&g}cBa&ak`L#EO`mRZ^Ak8?!6N8q4ld zNR2p5oFO+}1rGuqTJs-Rt~eB;d97hJ{1BG0^uNYZ&uqFyeHO)wC$hp(raDOxWMnoDPK2?7N-G(@S72Rh6 z2OUr8F^*K1YGevzHJrgltJLd#SJf33R5o}ul!5TWLgK@yAJ~c1z9jB*sKIV>k_;gH@?x^%UyrHFQk~-bS5<0=F z{EzSf`~#kUc=xc`r|`GL3GTVAxJ#C;F?>;|^HbSV*_%lbsV^ZylM%Yks>xwUW0K>k z0TdVWViVz%NV_rjw_d#I)By4*HSaqDOvigPIhr@jgp`HE!fL>#3d@}dOpFx#g!LMt z3(5@qXGj6xKi-)i7rm1iLPDKG@3J|Hjm|?dL@FrIgn&+iseZq~Wsg);f+|(Io*=f> zY$~f2J7Q?;bdE?XzL#0YO22H2+fKerLWQAH$?ni!$d45{&sg9{=)McWXIaCMf!-Ki zz51G{bms7*hQiK@es<(hZ}tOhq7q(a4q=qckWd%dRk=~E+?fod;Uz0D`OpJ;WZ~3! zHTJr$j{Z&lg774d4(%4o@_idF(vPGZzFEcG){Y?ZeQU<#4o<4iBDUBo+wKksF5>tV z|C_(SC&Uu-Ley}D%R-mk1E^C4-^>SfT(=;6YwvGfX2@{H|ADFY5|PKeO2%}{@^uMm z=dQ#q_qzlctA6V!%?uKa(a)^Q;h90Q2+X0&wAqh%BWZK8dUTA%i3f|ld09iI3i{L( z$ww^Q*vU$d&5koi8TFNtYeMnQpBJJY>t`zg1u=F8{&uis^JLMF{ z&zzwn;lcV)XDqYH9`C?0po1=frAbTKT5%GK-IUx@it*))@j zdQBzeGS+OK=T+DZSW?x|Qas#}gUG4Hl;)F1rta=b#*TZb#P|jklRQbOU!O4p5AJ!a z-KXpfW0A0rGm|V5v%>Yp^X+bf;=Kk=E2&s!Tupo!n|k!@9UKO&26lQ^y`A1AM0EKn zZrdSz!o4aNecr$wOp*9|5pdwgo-p?dffg(reitPqmD;kK_c5FL18I$c$oZ8O$XsW- z4F5f?c1qo0>Q~v>QX&QsPL!Y_#`utw8}11vahx<%8IICNxeNVkVT}na)UtR%Yc*k+ z8!h)tRa(4060whoe)nMKL2npQ(JPK+86zP!Av-xnB(K6BJ&1_|N`rdy3iIWb zJCg1c)f4!CPf%~~``0(W>uQq^ZGjS~iK#b{V&a18 zMUGteViiw3VgM>obmDtj6raSI!C+re>1tTUcyoSa0b8k}AU!E86Ms>Nm_^PwvA#NI(VcA|RPPbhB=kxelY8WD_7w9u}9FID_qP_2gy_(-g3E%tBvnv zyvzq=Sa6um&WVW-=Ik%t94DxZek1cF+u9s`hSK-`#$mI_0|L(rGQR;i>m(&6IW}Gc zQ3pRm?D=S^`A45K0&p?sc!j{U>$OR#d$l4*_JwiFOH&zzeDA%9E~|j8^)J(iX?BzF zW~R>%u=p%oKgvkr&lguq%I&9eQ@VWkqJTN-Rs}=NfZNzB`i|vbUOW<}YH|bh_ABgc z(J{KyfKAyCz6lwq;Om|2b?Y?*YL@Sy$sIC}J942}t$=fQBPE>XLUuwW{5Fm~duG57 zn`~?4umJ0r9)#|CuJj{fVJw05kd`96Z#q!Fms*jp@LYH*!BCFu5462xQ!LLIfGG@bq6`NeeLCFp(|U zVrm8t&Y}ZcC-;OlE73dXUg@MUDCZl^m)LAag?G^OJ}W2MXbb-%yptmM0>zfmss?1o zQC~CULA*TzehmOF=pDkkk`-_6W0h+twV5USCeXT_A?uH|vKX`%-hmIW95f4yzC_Jb zZ%~z9?+X}Bcho5CVFNaJLvHKciD{(W^2*eEVK*To(dqwBV{_gLVS`TkJ%6ttY^bJR)PNq$RS*u{H$h(k zc}`C`p-ZH_zU8|DXnu6b#)iq(nr3kv(=HAKN7)uG+X(HR&&41@tk|T z!|6s!4`^Ub3Uz_5*za~*$KgGHPyZrV$;~@o@C0MBNC<|jnxKv%#&2OqeS1}$cKy`K z|0{%?;=L0Ih~!Bhu!uYc3Dt?V6*sarqOO!_fo|4y2#z=lC7>AppgXmJ2SWM(1 z2+5M(&(v=f)o$hrr``et`mgv=6>>NQk%p$WtPJ4wV?cEN9bXy@p}%_o8Ri_6%S*`x zGmqTnRT$Q!Bm0D^z;kU!q>Xb3D@?xxWUj}NHPzyCm=ccsrGq}1mFXG->7ns<3I)Ou zrRG3jZ1LQuVc(TYJL+v=Xum*mstJq)Qsrp{n9etz{QeaJ%#Q^OB>BsAh@_o4~%2?z{})y2}s)O9Uno~aNGw5zvI>*?u>8kT#+%~zTTxZ z215BEfVM%6`VqEx&Vu8FzOjI)LAuwxTKz}@9#peDYpqo;b92`p;8&cK7dr)vwt;bH z+uK$U6j{5l|8t}Z{ipoxJu~K^T^xm&NYJ2i@QOsUED>|iJDnWUb)qlh_$`Vf#a8W# z_koO>rb8KQgqTxCDCcyph&ww^Z)3}V57|VJV|Y+v=ZTN}XB@{>h{GkYJg7>GO3X@2 zvfEm7{gey+4J~w>#Wv|fQT?;uyke9y%Td=?^0TK3Tf6er9y^-} zn+cT({+ctlK||@a>c*`-Az0_N9MeV@;tC)64rjH0VI>o7I($jSI*OEZDbT^Ok0zDC zxM30N!_<8Q2!ZlbJ6##?_8e8S8n5S|!F}SIO`+W;b`}rn(H%PXUQX>$y++`0z}%v$ zq)d2JMjm+eRrb#Gx9HfXW#*f*G#W5v?uF^V2Sc%tfO`r+02_pF*26 zFH%j%Gp7oPM(bbG%Dj~Iy-wB`^x<1Or#COZK0IF;c(Ty&#)|5nV6PQFVdJ|Yw8ej% zXrng%Re%a1Fcy3fxS#^S*RI+q{9mVNF)&KqV-*2=x|DSLE&ozKS?*FUjVAkO9 zlj%_i^{wHVdYIquobmrQqR0$HY8p`uBpCl!w8mFgSKNFP@w$YX$h_B8bra8ePF> z6j`OFDSKS?>Md((nMx>l`LYeP0N0_sZWMss3bHRI$@^4di1Fp391&E_J`HcCLPwL(>x`^m%O9ZuQ>0Vr!q=*W$?q6xseVe!{PkHq! z1%Y>%fSkWH6idDqiZ;yO^>oP~sm3{t=Y1qq}Uy+!$$T8z62c7!h` z0Q~g5G6JNbKE!AHQhadsA4D_qTEVrU=lNOR{&mcOYM4&GAjsL!_u%~7qX2Et5ik?6 z=nkx$O5IlT6M8zoQKZ5QC#R5H3kkIH4Y~z@)JJHpRwrapUs?c>@8q zupJ2fiqKUda_>3>z-^e}f9S6t3NXoXx2+I3F)SAN|)AsAVYUJYmbZtuiu|$zC_|B!^%>{0cEiU8{l6 z>>kj~dV|0ze??v5>n+rC`_=1R)x$5nPlstKOSL0ruR-8<@i(_;Tl`z_b#O&o?2-0Q z9wV4cBHfdl{roB_;~KH1xJxp6&h$z+vfKs5fE#py853Yhu>eC&!n67I^7a6V*BlZB z4&FZBlg5dNgTJ^>-EM!l=8Z`jc2*TVe9=j_`=nX~hP|F{;fP&GkC=TO$>YR3gAgo3 zU_1eIx-};MSTE5O&aF z1i*@MUMz~GYhiU%-(C)G7z87H5bXDgFw`v*E5-xW`g+H;bBz5qJe z->7vcRyPNIXocmaFqa?o($~doFI2^@;5XP)k_Nc?mcL%R&@SJbEQp*61$APrO(q_tUF9e#$i?bcRK7W}szREpRaF%NnC&N0E7A8A5tK zTd)ocKpFxd#MZt|$}B79Al&IdPvZu++R9H_`!-6Ii(U+zWn3HMQ4hU`a%|BX zT|p=!@Uqvg?B^;4R7?BK()G`RU~BpEyj-3TwxEr4N15y#dPO?1RX9xgPTmi!c4Brc zXr(ub{|J3r?ptxbxFB48^rChCm8!DDQ;^Yj5b+;(tS>^W>IaI)VcZ9b>4-8XDeKhQMDQ)5XjmJvr`1t_X z>zlIvF>!oqv0P_4>2j1X65Sxm4}JWHdMwqJJVDXRiZ@H^uK5=P)tyL=c5UfR5jnZi z3;aoegH)cCk1NHP8nrHx&yr8sl@KEtBdDsNs~`Cmj!)t<0yoWt1Q`Aardk z38#?Mtf-cB4)j*`zMqsQN*X1iy{fph^#Rn{MSJ))zUR2Mn$J$eTDJLy3;j;jYJ zvsR@j6yaVF`VRA16FAxXzp(u$5ey1Z+l5(I|I`SGm0O=wmc5n`@Lu|{?Ww-B7k?fr z|2i;WrGLyKF-g8z{@Co$A&>I%3yVJWVTNs?7>}cc#e%xaT72(W zftIzY=v}C5mgn_r(~(G5&%%i}DY`>%ebo_Gn7bzYs6T*rzPvGsT+V${F& zz>RPq%-oXC#K@jc_dvWq3aM7&C()#_6#VQB*f-1n!57+k?dSc9HX6J7fCt69?n7*! zVSdnAXs-~Jg)4Mjbn0epA^rW9_4sRiI}-J5BHphsQpFdek@2^XAV`v{xeD+NaN=@4TF4qBd6Gvo3@?z?MDj_Vw*tWMxD{sWSR? z?XJLv&jV%ml#m08x87M*tvr|S3R&xdABcKJ9eRd3O4W~!-%H?-E+=?@K1gI@)=v-5 z4Zh8=RvJGk3o)I}@sxW~)#JCvO|+!V^KvugrxP4{5rk$aCfu&yJ@w5CyR$N9v%P0f zCXo{OJetvAvsyDdLk{zafD{H7U|lkcjI(d%U^(li`(S(I>uQY+(N`zcs?3c`Njp4x zy{~t1o(|O>k#@2k*QwW46A?C~oOgdI&Jre-(@>Jl2!3NH*zldU?Nzj_i}klvtNw~Y zQ5B~$!{rLhigjV@Ul`{UF1R3w9}t(saWx_&0m$reSIaBY^xJ_thC6wT@!Asm;1!qU zt2*CNKCWOCKbYfKewTTwSjjNJl8o<<0pq_LVlf;#ibJ*#H=zo&2i^$Z=CxeB&YO2r z@hnU@*gV$}Kk!!UtvL3kbLOZz8~^46o=8NlMs#m4v{$kD zW^I`}QkfOw(2DLocm7hM~wba}w|K zS@10$wj4NpReh}gv&+*8g^%+gOg?MJ0@Rm6U)*RB&!F7vevo?hnK$aE2)d!e2fK~v`~}$^-+L^s{PE2D zru@Z{#>meIY&M8JFa{6Z);%>A=D$KXU0=-QKQ1oVh*4cAUsV&7tOQRj1(Ai57}_|8 zU5ksIRV>yLKFfIK>dZ<`cJl-3u7aa!4|*-17bjh_a_T4d7z*#wZ}P5xcC?)fXEQTU z!xN4oMX;dL7>|&Y>R>9$p1u9SkUgZc?VaSWZNP)h8sFs9is2VwU2d_V#^hg)Abh3x zUq&Wl!`bMF1LD!W2hoqDn+|_>Ii%tLoWLI0!^>bx`~-^7%@yMT59!@W+vv?!FUhV; zfVh2Jiix6<<}3$gjQSr!2#Fw-z)hMveb$HSks#51#w;(bow1KJtUr3+ZX5`mOgPbt~E8*nzDwzlW1XA0k1=8YWn)3E)51#C*Xt!4`bgVx% z;c-3h0K0S@aljDHVD2(dyau2J8LJjL;;3(9yf?}s9t=h zU0vz%2^S*$J?GiH&sJT$@w+<=|F)is2qp>f7%jr3b2BT_k{v16_*_`57mRZWH`+3b zjF@W1O`>$|=Huuz_iw~>51k<+|^W?x>`)mdV#%mbT# z^5#*58QAcPKY3C32rWpL{>#clo^vhU0w-gsS#nR->8ju$Z7Ayj+fq>BV694c??XMY z@apWMIi{SU*J+pqo`DaUEcxfSWR;j98*x^-bB{n9InCn1N0{nVGE6a|3T!8RpqMBH zBax#$E_|1SSL@H_IpQrdMOlGV4K;&A4V5>Tib*MY>4a%1g>>i_ARCk06w-uXJ#uoF^t}7xI6vfBkbnCMjeR9fV7*^DD@QMF zVH`g2B?PKZ4`fq{l7^7H!-X22Aw(sRmbX-~LByVSHM?ddi07V08bv{OB<5)F^fM86 zJ`ypM_A{@+g1KG ze6JmGhfWVZrEmd}K#A+aCTP2J_#ZIO8x2VOaJ9NR{q5ZXXeYmb)!GPQ*jVd$yWTsB zUfe_AC6unn9Y3u(t=g0nI-6HGC;9wGz~`;9bhpX>*@ZF{H0=FH?u zgob5fnOfL?%Rt&w(RG1-e}MzM+kxr_dMN;b_H9XaxEbk@o3CmzP(tw3W9fx246|$!dQwk&3e?k_f4J{oN_Ob;pBQ}gaWlH`6$A7=b zGQ{_&NS*2RFdjvK>u=|kC5e%v3BB}abLWjjXHpHmhjVXocsvgSQdQAGq9?6^67Riq zo_7cSm-CG5GRfPIA`sTHr zWBScznIiR;-i}$6(KLGynW$pUO88BX_b!7U4N;+7@G*P9K$lm#VN3W3!b`@viiw&v zohFLsy9{OC-7$Nx=_DaCN2`@p^TLkFi@Vm9!pnD0=;&Xn@X>S9yGil~@E+#Fc9bja zQ0sp++`%Ttp?AUQTeL2;mnqUK-r(!iO*k5y|MvQFP@42h;r1F{2wr4SyJn;^^|Mzvk5(Y>jn#eZN3&un5 zw~oT6<|6qT*rmWrV2X|XRqWfrYmI&vZ#Pe7r7;?@w*m-R*Z~S}JB-m+kg1%8A+Jxc z>|TO{W)VLh?5_*Y3V0Uz@Bwv1E}5V>;mbRVj%QbmPhDm&I&VAE?*B2FOEptr2lEr* z6eX{iu!;(xDr{GF%4krteETAb@C**NEF`W)MdUrQhhuC*5^hR4NU9S$n))GMeiYG^ zdJHh=8`n7b9P1G!&ebe*5@J2fnH=at-tPD?ypy%(3~YhEfYIU<>Kfz_Wv{E_oC#*R zQ`s4^Ok$Rx8(Yjt$cD@x978WqwQ@D8g_E|ZYBkM##mXmh;TBcoL7@3Ai@YF-|E=&W z1)x&JyqkxAuZo+Ln1gX(*|I8=cejYO=6m0QYhxKA{lJPQPMjb& z2a9E27?}ws^;p#4Hp{g!8Rm(CoB_nT0d<**x0!ap<8Yq)>QY&$mw4!Bu&deVoWFUH zMhf*&)JK%40Ctlu6kr>sAN5~GgP2D}1tyV@Jrw43nr^!XZ&fh4_zxG;6Ut0x+7f^e z6L3??!4l_PIXEd+4MoX1c&Q6h=VIcefuF({@5Siow(*#CBE!J?1gro(Ds$?dGD;Cm zp5u8Eqx%dtGhgg3s$cdCMIQB(l384b1|K+w8<9ep92TB@;%Cqy(ooc3FHXU#P;YI-(1^&R0KI!O)1a(IKQ$EaWhMe zOA;9WwWzR5&R-eYSgGCc?L_uGiZb$DJ$1|8I;)xJRCJlUXCd1>@^H+pq7*~pGUzZ% z-o(2Hnyy&QSefNVJrdh6^+{@|w?{T-9~99$JWSm&Cp9q&DqJ|>$zCqF#!wfO(9_P#tE%J=`5N{VPRQbeC*8=^?IER`ku zGBZflB#dk^St4x`iZXUG*1>3ogbagh38AuOZ!B5H)>sB(IrroH`=0Z?&R^%B-#P!B z{^+_a*Ldc6?z!*x`}KY;RvE4pV9y$=0Zto!;dscz_q9BJA@#6ht;AQ;^UB@Ho8b63 z+q7;*RKmC^IkyX(I45rT)wy;(XX*UlhO~iAjCeT=(~;Sq)!cPV-zMILpHL7V#K&*k zRE(}gzpp{cYsM*d`5hnn{QZLv-uxB%ZNB$XY*6 z>K+x7=D2_|A%4y7z5CNjEW);s_1Q{c#Tq2f*tzlQ*sxmXjFW1dyDkhbeaP>Z1G_@u z-=a$;)@;6%H*)>KB{{;C7OAwWD#hWNc;hzp3@kUEnFn z>s-g%iz!}4AucHH2L`rNXza`yO2dDBBB9d5V%gdq3%Q01`u}G2JLL9F{q$J=Q{XQv zLK*w2(i=hXkuT7l%-a#14g&dS(tRpG58eCjl~uwtv!FC1<^|*!!X&wo$aq?hTy_P9E1RzqH zAj0pdQn=22dk`oH>1!%6=C^e_0gf{0chFexT3#xF9;nu-%7U^+e#=nQ^46|43DtXT z2X@s#f1r{mPozQulB2@f<@1tpM<7!Ekm650!9R#=NQMXy?cDP7m`?0mJvY_s&jfIu{t-`ToeT)dXgV(L**AD#E{Ax;Pf1d`(swUzIf=#M) zj~SQPkXH|vJM4GYsLuHm)n8lUpFS^gZRxrZ@y2Wx08S(rXy?37=Ik8~7U}F(AOc2Q9?f!#0ligoP_~%sYQ6$z(K|Bgjcw#hkbv4;G1FP8~&{x20#u1D@;Y(9b=ff>!BaTl7X@Kvbqd2_hE zipI{IZ3-dk*3Y(<`kdhDSDk$DVV?GB6+9CyX_0rAUJzUUzzf)64LrthZc`@_+ECKzdQ@pw%N*gHR9HvJ9;#S@Uw2g&0zo{RRK)38M?{PpMs`+!D4Y&v8`Q! zGAJq3F&#%by!!nik+H71Of|6qdgbjsThgeK$+zk9AY^Br_J!Mm23X({%1a>^nS+sR z#qJ*ICb`5DV}7U2H~l!!^8P@mGBEWc%VxrEj?-|%_PCgluXjw#Hl#J65#&hm7IsF7 z?!A=DKax?uHwTMrahof&kiqZ&1bL z5b@5g>CmY2>B5MM&mw*X&H+_~D^!30IfQCi`B4`5Q_y}|<~Qn_iniI9<=x8tJmZg0 zCRF95Q$bg~4DJ}!K)#wsvBAtI-z(ojT&g&NXQ)JrW2R?mJE7tZDP;*gm$_Zrnq__k zh`V3WbX2*oW@evdLsXG6!}Wqm6>3OXJ;!V)L!A^EWeJO$Qx+EP=VhDTYe(FPl|TTN zvY=0?WgO3;V{nd^Zduy%<3?VNHI02-p^wN3U=eCXYrVUz-22n?r-4mF9?U$zxBhBJ zx7s0baWlgPPU-T`B5$9LY6Gdk91wjx(7oK&lwTft^X-nsgBjkg)~Pj%^n!C4R?#mC zuT}(P%xjm_blW)6l`=eZOU@&O%dbGhK-J~%*W;@DSkqUbeSXAxYl@G5z16_Y`U%JF zC0^-LH{-$26%X`Af;RvXHwnKzqg^$6EpGj31}jp#JDYzEwBcz5hjNN7o!TmxCX&fp z(I@ua?-96CeLVJo#NI8>DoNd3Or zdfB7P6AKSt1u%wP%xTnRM5gw|uw(Bc)Nj8Q;oZKwy25Ll-u2z?%|EDL_dZtk?Uz~O zG()&GiKY411s{IR%rSr4AEh@wlc@41(Gjv*-u^iDcW9jafXS@6)yZN0c`)<3j|wq0 zqCFxE6Qa$y7<+}YUB*yZ^w~qMh{Kp~f?&Eu`stHc77B;a%6lICd^ly>1X-rVL^gM@ zKc0B9i&C7?kw1V+LuRe+-QA<&dNt6?%^SN5?VjTvv0j`iaBH5 zzFgJ4q08nDd$Z)7x<{m=WW@I#T;oWvQ1A(n@ASVa(+~4RGpa4l%v?D>ryxRl#(#`8 z?(|c}cZ&G;ULX01`hX;Zkz1wSr5&X;d-YOhtg^OFV{B*097iye1iweL;I6H#;o~gD zSN}y`7}fdTNeuTM?(9ZRf^!Fu7$A1rzyJNW4X^(j=i>jbUi$f~GKw ze;^o!cqZe}O$e-U2B=vnG%tdJ7GluQuRNZWcm3Ibi|hrnFEmJ}?a8>#e&W-vW*jb) zrP&X92pjtQyu6q3yN|@=AaL$kog9s`W;Oc@1x2%*slpqkZ20_;)yv)jEKex8F+oF^ z4-%R`j<^LXoh`DzDeG?LZLx)|Q`4(|J=@1KuQ`DKFDNsr=x~*o{}iTo{r!)%aOA)E zF3_*Cy!@|}kN@jm}-$5{8T@$^`tKs z2%fRDl?|@}MJg-`jTOahcr-~@+6@Dp6|adu%D$5KqtuK0OBdM37pQ^wz~@m zV~olB&VYmFv$G9^!%!8iaF~UB38dGCv_m1g+X7%!b+=sFhIB&e&X++kA40nSvL1*K z8y&zyZ#y2ws<8@2HQfzRjkZTZX|AO6Jz&3is3klA+A*lLO%b}{?`4F(4tZ`u8bZKk zyQOPrWVp+HW!2*{kn$wleBb}Mi-UB!uCzri2_9%L)&B|1^J6I;AcS;FM>e+ua;zCz zkyX@|pH>0B38)h5{VeHtE5W}Oj};Gz4_(u1a(y#?buNSUhNN4&G7W5wmV}z4;M!9a z@d662)n~mCs741*Fqq9>L*jBe*i2M`&=L66Qop_x-qMMvf@IxLG&S%%w4v4=JHp;A#qE(fH3GSw1aw`g$lxKN(!%dW zA7t~cz)$xp0O5Z~ejfy~bi~P&KbqO_hg6X?C$Co#)%9Qq<_7RKZ-vv#Nnkarvots^T2^a3Wl5csJ8Zd?~V*hHy1GDJ#}DYu>0^Tge?6jr~|g*qaCo`7q6ai>}zH_ z+XqhDq3i`d@QixN$%?>RtbHJTHFZyT@{Q6);K#nO6PHGsfU;UE@u=9RpYmwV@QUel z`{+Bs=^^P>+U{-F(3O96 z(s77&dVq8(y^s(VRHWu96D}w~U^}%Ba@XrXy$0TyvHOr4N27(`D1%HBeb@nya_WAI za(@WF&Ne>Z zb>B9G#aU|dI7bnPYN4TD-UXM_Rhl-AOtYW6fZ zu%J0hQxSgQSBv#v{Fu`aRgtN3E3oh}$w@C&e%o1AKU3WvbzZt zO1p}&qSPwTaDXLvH^u!=M-E=dw%eWz?9m2Uh=7<3KY7O-yKT<;&w$!$eXuN)RE2A` z1<58`Vbc+%fzNU?TfmLl*>U^*qSu_2heJrTS7N5(m5$lfn)Cbu*&Q=i%ZWW zI#+%!#YCq>2tGsUmWs>elM^|cdvAgDUVU$@5Q)S`MhF+5&pj3vH~gsYi$kE{Vrn`i z*{EEqpx9aTsjR=#=mk|7f19<+ecspCg5_SHjA=X?)>u_oU?xDb#f}!)&;Fq!Q3Cix zR}hW!!Pjx^Ut5#KOVF2usaw34ZMqKtuTApop_8)NcWPV|S*-T$4G?9uCnu=|S7@I* zpY(!@|Bw2yHfF6MFu<+oa{Flr&Wwk*!M)oq=mcKdcKN-iE|?4Wb}eOt(7oJ6t+h+$|2OgboA2@x&UwleYb z9#x7_?O&c@KT|}&fcymz|L~q9T$|mIK<^O_&>K#p_Og!OD2E}v?wFp##3F-d6FLp7TL2o@`El>jSMr*&oZ0rgZbEwQXppI#dUDeTotl@HoB?E5h{Me%WURUO>YjV&k zV|`2jrLSdFOvu+($ycK4c36(f8yVd^s3X0vdkhA_j+0|+ak80~QG;^H%NxgZ`l*3a z-1>|}sSEV<`vuJ9)mdt~ZVw()bpZWg1rnY!|lD+kr?ub&7nxy}u zMb*qXf`ZXw4CV|I?Zke8RTIASUt2BCSLf4kwR#iZ*$yJ)BA8YJI@Z zVyRY6+askzefkPd-xn$aD@VSJfN$n6#s}4_VKuMcH$fylmL$L7Gq75?H=+3OajGnVmKj*Cn%=I0#0RgGdMYZG{-y(LxwhwJ4z=a{SONn zCmRp%pXSJG?&zL8Cxla$uItdpPlhov()hbG&6r{Whg?IUDCF*po8mj%ttkyZR{!#92f#Zea)VWjKM)lYpneG+&TV#W5 z5j*EnBB%ocVG@S2kvl)iN`}@P1n@lT$6DUr3qmUpBZomx|grBXwEZYNc)42q)>?wksNA(tSb?)FkTlB z+rp!rnU@Ze&U+ySyC4_5<5iS17iUEGd&!|#VoROOmOP>jyM!S6t&w38n_53M!f>GT z<~>k&9Z5CFtVR&r)5pXqTZrJo@dY#6C&&@u86{S_79%o`I+;>{DQ$Pvka$^^z1qR|0tF$STZQvQ9vCtV^` zxb;7^q0!KeGWd1eCx-EMyB)cPnRq9YzYL6&yO?)=5*__-X%jum27na6v`IymRZ5J? zYBt8_C7;fKN|2p|UC$hHR}8z-7LcQDk|Tb6ra&J20ii4*&9Wd^tPD(@hq;PUJnU_` zwr60nTQ%Q+M(aw5baM4G3yr@!vb(k$WG9Hj7((*5Usz)R~a>!3V8(zB?BFPQA}RrAXfcO5cy5FB-r}&H<|-`s6ptpGW2fN#;^Vd?YjwDAl;Z^ixIK)?7SK8EkgrlO?;?EO z%qhC;%qXAwj){LlU}X`va~hM0%K)Ky#Mr`ay1q2IVW*(nKn>j`4lb&}!3ho_C7xEuF@Sk~7bG{{ndvmiJT9 zDe;UTmhKav*5|!}S~PNo+zPE5C0I<-mG1p2`l?|RdJ%S~UiW1YNMY*4NlK^RIW`OB zO5x;~U_i4;cB*6h456nueRpuAZ(Dai1mxu{rH_z(G*16KSwjt|@4y2pbFE-82UWar zh;%7Hy_;~7+=G($5u>1a@(qvF2&g51v$7T^$A9tmb9IXq!tJ3>HqV%5=NG6uLKD}l zo9ywvQ9+w9UDHN+xX2cYLv>i9*oT` zf)a0WtwgW$pXqz*kaaXd`Zk!KMr=N!N~E6Q_ADU8Q|3)+$yd%tW1s+3u#SXcl2N}GnTgXlEb<0bvzX*G3#a71=A9oLte*F7t1i_}_N-F4?LDUuKLC(4p%|R(w z8q-=&_CUpOds{X>Z=D40ZxNf-%?L}i-48Y)RtCI#bGdU(4xsP!&*zOY?JB;U=UcE( z*_;2(raxg0k+v9!@+IW8JPNaZ z|1e-cRc~!QAAG34%LGJiEa4!T;8)p-vgad`Ljf_HLvlwM7ZW9$aRqZR#PX&ao#ltA z7`UeZ;7^!XDOP{Y7C1{C88&DQrz)Pu%4)93Ag)JC7BgEybeD_9s9knm64 z%la0J0kD`CXx1&c7~`n=o}136M~f1XzShkKw>GsJ9e^OhoorM6BD!#KoBd4Bp1XCWx;-bz%5Su2Ew9ee5ZsSZ}zVwzn8e?Q0DFsgaFw zn|xo^kKP`nA0JkuTD^nCS(|{C2e(P~Z~Q>Y=uB9z<_((x&%0zfJ5{CY5T_Y zdR)X)i|(R>=T4jg!JE*>V#a`5_G8X9V?}WQKPGIFySh9BMC+P)h;9K-$QxbK#^qjl zPpwH$w^}%&KcsIDOP(cjRzZ{uT){TDuIXcERg@>|%kH@*8H@6R>( zvc11o;bG#SF2Lt>mfRW<(eoVLiGR%*C@UeA2KMZ2>kr_04dNurXr#(`bTLl%w^f=~ zQHFagsff66RQg^HmJ$-fKFjU0h1z=_yLEh65H4T~GKaVAy7kMC6tHOT70u0ZPa71f ziu&Y5H4Pqgo2?0YLz(AjFBk(GCND}kVf0ruQ2ZG6IYpGGt_W2Y~r?^MO@HL#<-$Xl;sEf7l$OhVFr}2+aHVzYSV`e z91UZ=0XV8f0+{*1KCqoYRRih$xK$AFcn-L8^=hayZ1dy!sj$l08@=E=JTBr>KPvXy z@IL3nB=;5rC?Jj?pW9&|c*%TZJw{*p?~WC3IljO>`}c1E<4R(VUzr6XKT!NZQZ}cj z=ZRTrVx7MYc|M0SqW5!E18K=-O~ka%a&=MQLtiG(O=N&I1~-6 zj{%ls-49Hhhq#_Xv;*udoc&KVDVeYO^LKB5PX|ER2fD3YT#5SfX-x19Kkn%Oj>I~J zo1mUcgL9^pcql)5+9Bq@m*tEaSfcrP0y*Y)n=d}!Hu9(?c?`B(7br8(8oe-NayT;3 zF4%L9KRDVzv(X{qLucGSOY+>UjnyRhdhvX-wFo z-u5g2M%Ne2%<0=dcqpFq4Uxj@@P6rLTDFA$h`rwMycMs=COJRtIDUcu(FhU>M*bQs z^xh87oCF)1^wJf>TBb`xzu@$$en3o(qbRo8tyr;FwR298_t7K5Tn6UbszA5WoB2(H z;ZMy?l&IH5)~BCMgexz!4k#qck_90q9Y8h`>lUyypID^NkeivnDrjFeI;7&KFT!3* za&t~C)ici})(waR)VN0UPd>?W^Yoh6QN_C_>veg@wk<<2@ItIA(Q{EPncYMcwXJs` z-K;aIv=Scps4EsdBzdglImqu*SB zgx{)(1pTvy3)r^mA0JYiON%m_qVahPg-)g&ZN7S#`MoTUZv%LXI&!ml{DnXM;Kk9R zVr-ea)@TuZR3r|AlIBVnYPn_MjssJw_g&0bg&cTfXsdrxY;WB_K4s;VBzNRPy9BX; zj?UN_xiKq7M1vr8GFRCsK}W!R=$I>&(egk~YtjYJ&}*rbc1zK$lJ1-a_mMFo@sQfl zerE*WD)11IeI^p{uP;J^HGL`#gb%3K$>;Tkh?%M@f?)2)hY0cWfgdut|sFe3YPHljAJfZu9LNXwd> zk*97Qf3M?p$Z6xzw2Pfsh;{&?!u@Ddg>&VQ$mUJcCJjZepASXZ%^d=R?`tH?m;ZFk zk6OR8F%j14=Yu+Eo zwDeawA$#HKqD@D^#(D3yz?KJlVK1`pKJj_0GL>N-AR-=i_hVG%h7CnAM|DL2i+mGsdIU`{#DvG`8~45_0x@wdI51iM z>e(u{JEfBs^5$dd5`cz3zkyIz(sjPDt{WmNgETK8!DO} z_*7_!Qya1~>jCo-G{|^J2Z#w6?s|ZTjlVl!c>BLz+YTv!C-Ecx0q=As@*?;Xqd;<8 z5-C&ttIYznvj+c6{MfE6BAVA6npugGf z_GRx;P0*BUCQSwdM*lu9py1HHxjQnIl523tI|ycA>34xY74mqiaH3h!uUumxg%EIH z^g+X_Kc=Tx1<-qJ9HIUkB1dkCOdz;!*qkW;0B#04c_QiCS?<*A8gyZuHBmmX=1@ap#sHh za<)HpR7tNXwT`9*1gh17A%9+Yyv`=<%zyp~(53AT;i3OD)3tU@=Xbwx1dvbu=TH9s k7r>z7|LlgCyy)B6o5Y?yRE4qVg>Im(mXRjun*G!N0Uff#YybcN literal 0 HcmV?d00001 diff --git a/docs/assets/images/1t-trend.png b/docs/assets/images/1t-trend.png new file mode 100755 index 0000000000000000000000000000000000000000..7164eb0819ad2dfec2090af00ec1f0a6def9bb78 GIT binary patch literal 39821 zcmdqJbyU>f_dcqkgp{DbTR=LbhLRXW8UZP#8$m)EhHgO`>29UFL%O?^ZUkpQTDswP z2K~hMckdr}-L>vocl}t)VKFnW*EwgOy`SgVd!LtqujM4L?-Sp@b?X+kl%$x#ty`$D zTenaa&{4oUxF789f?v076(mG%6%UcFfj{mV3(E@Mx>Xj2d7*a?{9XB4MoIko`ufL@ zALHZWJ3BkyzJ05%u0A|GoSB(9J3FhdukY{gM<5UZ0Rigj>Xw$4@$vER-o0~ja?;V! zDJv`U@bEA(F-cEP|M>A^OG`^iN{Se)BPS5?jy5&}3Vd47vdPqo! zt*!0#gxd8tq3hGt>mG{hbYIwQ7xf3FNxr6Y{n6`}_ z-P$|V@)`KGb+Wm+IXyiMCXQK>r!Fi;hY{VqN~v&be8SineAT(IFjUCr^ME5l-kD(`176vO zNOoMeD^$3!O>i1veb+$!5?nvQZ$@4w(fgfn^{2V-DnYB~Pxa=B*=C(1*Q@us@Jqx& zx@YI51{*Nv4^IuW0wEnAL5*=}dvn)y1CC0}aZMPJg{2Ohz@tFMt;w?(Lj|+QtykB& zqW@HPqBTEKS2SoCGWh_5(nsmFe(c-vaH5~btHUL_{%^ov%24q^-l~$i@fBt>?7IcM zis!DqTxEk0G+HAJl^7M{i{O6U5UbS(B0USAJD6MZZ6=OKHG8MO^fOdWo)oc8_4Omv z)?gaP_K{|0*J`l!xRbqpJQ4KM+fUi0OHGpx5RPe(oZp=+)*5fMF1~KUW^^}{%{l~i z)CxjI;&em7eQ|{X)xF32wrN-0>-z&?71M~GbBMfd4K4Bx`IW41SPQq#Y(;@L5hLHx zn7(Y&*nm|p^fMq{%95(tYs{M}OKL=WZo&?>E!;v>x>PI`DSRI6Ss~|ZYPbWD;EO8K zjHs9yD|-rs?FcEl+Cden4_Ij&eLN3YX~8#-|9&%)TCk0Ms(jy<%lVf896>zW6vaC_qaXvD`sB~LYT%N( zFq?MU?s`??m~U#zn+slD0<^2@eV@xOd8olWss9YU;mcK9c+{yHERu1*uKryS0dd1x zdA~2$DHVB|isRHTVAHEVD;kEZKIid091JmqmNHhddTTBm{er=(y;Sp_ojuOhz1#xv zdBOE(^a^+n8m_qmEq3%{B9go1 zrL*WzC6L%r+sJ%k@wN(Yr{V53wF*QSY^Cy-^OGm1m~AvZ56X%D+`FVd;=HZt%Ke$j zd$$uMNTGIY@`m9Zq2M^5zq7Lx(= z=c)_-zrU>*NBv^RWnS%va+eu4bK%jZb<{zv%Gh?u6R;2GACQB@xWAG+XI#7xmS*Bq zu{UT7^V6>~_D{-0u==HSeJ3A0UC@mL!yn(QHN$h%;ODkcbUdw^@^A6v5Gp!?m!>jU zNu@K?+C#w0N9)HHbTpk^W5P7f*NLepeZrejk?RiWEYh1?47j<6@JvML#$kW(Cq_x+ zb-G97tc71zJ`CcLBAxSQ)5o2hUu=9hi-W9AH}^~%{LFeH{xW7-<3s7p{>5%*SZq3Q zbw;O=tL!SBoh0300KC&5S0C;u0k8Q#2!sD0E(ZEY{+sXW=-Oi=m!bJdpUw9wJvTa~?)LQ2V6gB-(C+*40>q571K3G!w9 z*zxMZ*@SFy0C^vjGfcB}kAQ2^PKj4?o@+(?slWwJA(y^f_0~s6r_6&`3oUEG1ADv3 zMe?X?sZ}r;2YDU|c`_6=&59xiyr&>?hG{r_WMgX6{UJKp?e0z_9k{V5q>l9ZQS!+% zuG+CKh(EYmu!0%7+}xO4IogX`+Z#E$*bKxfM zAU%I^b-7LB%G7(}TQ0S7Zw5=(hAH@-hnVO2hl>$;EB%qs^Q^QDKIG(NQ-@zMQLRp9 zS4H+{n_7{lrR59{=We6=UenT>~RYX0?eHES5%zR#(v=5ac9|(|!(52=P zB)V`(@YwwDxjBdU1g#`xpZI3UJNz$FAcCVSW8N22x50$K=>I={=_y%m##+5@;{-0| zY<*R8ZTO5C>Fkfje7!DqoapwbQ`6qk{b6Z&h~+hz`{c8IO4#=!_2K=rEN`{ z%4(G;_K8T(&O5hWy|{hs)T-*_ACHGiG_-wieXiW7zKO)m<5K#Nk~%;oZpCC4P{|XY zIeA*dbW4X;^OhJFiIZ>IqEj|wg8@tXPlM=9Yy@WB)RS(xTAyxWV6w6tFn%%4dr?A2 z;}fonQlGq*ocW_cQcxUm&heKMUm9c$KM+%&k5 z2YI2SaBpNrnrGB6Z4_AhEomI^cKb05$J-9-{6$7^WJOtk<4j5rOG2fi=kGl?VK{B# zakfXHFM}f7zRE5l4v3bKcl%Js_P_zUn9d3<>hC2#h>p!+s0}XKfjOPuqJ%rPg0Zx; z)cWPOI>LS&9OoXAf$%~-(HRo{RZm(kMf|z$AZ}#B0|xv&6dSqxxa94h<_1!hH18R$ z{`o~F42*W?uhBrfZkH#h#%1rB&enST03zt({uF}Lr>Q?82_-%Kr136{#A~shi5FyWt8oDmeH^N%4qw?YQxiM+Mx|Sfq(fqZ+1Rz<{^S2!cwirA)!TjM@~(`D9Bw^2G4-OuuA-Vo-n7GeP`@lbA0)4`6O! z+VMG~Id0{~-TQ0%`yH-8`!(a=tP=!rdTBp+g;AgAAn}W&^#%?@gtY%buO|foe*fl` z$9o3>!o3!669!^_4*mc8i~aU={o40mmJ@*yPZ1QOsBJWKt{@E1r@i9h;)ct*IaAm0 z>DW(Sfax2c2`maGeIn(Q#)*pGyjMLYO)u%Q*&@w= zOJ|aFruv`T=3*8r9gVxO`p=G&?>&ztNEI7JT&xT>v=*&23$^Vb&E73Fi_+zAUn;B$!gDpC~B_9s9PKB(2JK*!{T-Z;2>=6cbeb{%l z#eaX8i$%GzZqAp*ck2H(VV#ayCwpF4Wv}+l5GoES#ac;zNxQ|REY8DeC!!y=xkLAHH{zj zB~)ZBYlHaDp#hlQrx{;S?CULuS7xs;hhWbM7S*4&V1_;8Yp8UfD17#rujhlA!#Z~@ z#&FK3)}e40r*wk&4*2m07On@W6z*P+VvSk&gE7s9i;DSUB7q1Xr@}3%AqekPTPt0E ztH9_d^?VFHrmk?zn*Q&a)*9R-pQx|F77)wo4A8-09%~IKUpoz2OP`1?L)+exB0CZ0 zYg&BVS{+!p35p(mRO1hf5#jLaELJGX-maFkTmgr(eU^?j^}f1FumUsl6a%(PPsbRQ z0?N$*0B*+$jJm*nWW~m=8dcp2=Up6t;^gAy;ujD45Or~=Z1Krx{R~ujiyyU!aub#Z zERecnqI%dwTr%^kING3r%yGOtEZzp0+&lWcw^J#e+-0Dw>Nm++A*cf?Z+eZjB4T>1 z?z^mI_N=RQVnnE`Q6d1g%`Q*_Tb+8hVu`>QsZrBXd#KuMrVfL*TbB3h^+E|%h||SR z4Bjz#jE!&GSP%FDtniYOxx$%Mr|up|O{SMz)VI;w8u#3!lMYoo4T(j`~GylF&L-LdxV&{(XnMB25?(;%!<&4g!+&WE6~TM@RYV0i>)g>ai`LF5ocU#7w{(~% zsStGrpQ!n3q=lZPE6-$d?x<1M>pkDn&#?dEx$o?4ha7YB-cG;kSot}n1jQbNa~Q_j zHjcwtIn=K!sO`Ih2%mpX@~S#|o7w#9(79t8y`+Cmz4;NPa<-^tgaxBuZ-u?!h1xKR z-WpvbYUP<;5`65r(Sh(&Bsg#MzB9A0&htE+Mv$g%x6O=>O8Lp`ZOkUd=Em2}{Rl&l zVo6s@Ht+>8v>TjSPYsq%ccRGXmyZOh zWBqqtvFI+x?=Rm)BAErG3+g##Nq^g1y1t_O&@pPq6otxy;f{~NM9(u`H7)xPX{K4t zDFe!QmwWY(J;a#lxwHzj7K?7!WrV1QwyI5~_jG^xxZJ|6hdM#{xzz#XGAh;fS2;vz z;JvmUYef*nI#Z#M?c8w|m23kl<8&b+nqzLt7Jc?%+)EfE5oMaAZWO=xSw-acELIv# z{YCc&MzE#+VXP!BzWh#?J=)jp8}^q^ciq2Raq-N;Up@`4b*RWqH~R!=1xs8JdoA`( zh!plDQ&dQPKGFj})64%ipXi{{8L+Y3lkTgn_6k7zk*}U(>PIOjec=Sp?XM0hbE(T6LO{Q1S1T(j7H=CCLM#PvF{BT zT_^=$k;5t*grh@K1N$%OcHQx22gP`Zqe+O&!sP<-qJsUL0q4GE{vHlvsgF6I<7_#YEZI8xzS=mHAA1LE zE$DC@zr2WNkCK~2a3j4h+TB;14vk8@rdQTUz!1y9Z>;@c(I3i*K)dCvrij~X9O_cc zna-%?q}*!yQ5$DExWDLWMwCM?9;3Z_-jWWkMj74KH z<%qv}Io2@b<$#N)lTUH(0=t)wWUZW@K z4lEPsf%V^cjwdl~Fy+ka_HwMVt@!N`AmQ*br)u$$q7!?eAGHKHp8}r!bit=aD$6ET za|u01`-kb<6Y=ud=5Qr9XDpys9=}>KR4)BNz0BC5BqyGfhYHlC@0K{pJlv?R5iRHO z+E?T3$3UMx4n~mR4`s^t7#(8@$na?sQ3d>Ejzz_6i=@1i-A97!rpLUzD{U?Ru`LB` zFh){i>6|h8)m0$3uLXVq2uI6kmI=B=Lp;Sm$-^M-_Iq;wAnzYUJ0pIlt@z4UotA7p zmR$4SE-{1WI`H3A(lG5C-m2CoKBT^I>hpOz#e^C=2@gY_06B3h=0z=Tv5VX$Y4&k5 zMt=KIec;M!&%V1w=Q$ZURbu)%eP%wv2j3K*4=x{<(IqMVyfysjIabXKdjto#|wIKH26w2spJfc$@KszGv16Ug|6@1@U)Ai;)D!LOhOG$y%8vQ`I+% zRe4(Y7}Vxaqf{P_z#Ys@$Yw?IoQx_9kLZTE<6EljU?6VrI0)srl*d*2?c@sT^7I{q zZQ?rsF3WeW1idizn4*IohCX(G5CnW!lwo?MPR{h}PeuBzmiCdnASoG#b{^%;V$#lN zy>d6+O{1yJ<=ephdfr;w&+V~rmifrk-;=ynp(b-&{J)s zj29KYlX#-M-6NNo@i;qJljV^A0oA(|XKE02zn=uiq9XY20qGy@66_JF(zrdakS5Wa@32^f9H)f@N23?I9mlT=JPuO`nxb_ zEOK2l#cdO9E9Fuj^$nG13X|uQCXIMxjgH_*WD}wz*~5*UrGq9ESEF%^og*Q-Vd`ja z5b-~d8+_H^sdl>2p6`;}*Sxv5a#FgLclPVX`$!Wn>P%@F#wF0E`xL-{R!%({_dIyWDg^7(no?0-`1H{zUw z+&9Z^=ek$$L=C98%crx&?@0ojZWSlm=NtW#KG^jie{R8tJR~?>C9X!B`{avWx8o3x z^yeRq$>DPT>-Ir1PVuB{daXC+4vg-H+e#ysKJbOIX4JPXwc|!q?_io2BnRmPgeAjs zhUz*iIlDl{N$)BWSt?GgF;kJT(f)Oimg*7r znCezhd+$}B(cgQ|9=B&0Vby*gIrviXX+)%pog4W!1zNPY;J$yFA9 zvJ0;Ln!X=LER=L_wB6dg6)OcPIiDuU)x?>k%pY&9G8S4dDc>u=2EsTs=Xl@j*uurb z`9*Vzu3*{^65>m>m%Vn+zKhM}GN@HOrd#rRL)du`kejd?0yIqujlA7%2uPfSe%q$xr~$|`64#IsWDvJM*RbEeww5V<_1IZ{@G9KQl-|UP zTL2n1M=Jd+$&Py~TS|lLmw>)r;!gk+(;;=k2F!FMb^A(jrIJ>JCFu3ZDEl;rKb48& zM-UAuyDvtiO^mK0&@@ieWvY7EdkjQTXUzR(w*8friXT;|f;@fG&x$iQe0}V;*QyFH z8`TI_Dh2kc!D5L^a?>%HClmK-lmi>0G5I26WHp|Cd{vyKf(KF+uKCb;b9;N_Qv4{*psWQQIV6F%k=B>2oCN;F^6nBjfT@M%wSAa!o zz3*HV#r{$Sb>mqTdS7hPH=Ro*y_6p&@s&g;(L=CG91a4pv8B3gNMVgfv*P?7IbBLI zCSJ!)B!;6wg-pVW%gC65qraficeFW<^t2Nci-$=rTCX}CZ|&*IagAUxL@WQO-wF}~I9 z+}`^yLK_)Ky=avW&wTB+DjR1;G?7_3g?Q#H=0VLV7b?_S&0TBUo};BS?#w74GV_QV z8U3{w%Dzu#S+N|)Ab!)egb;HF=ze%oa>?~I+NEIyO=5CqR%JuM2h!W{6e*fiO}&%f z3?1%I z{b1b=kn9=okBih-0FmlUxusSyj+CAOvc6kY0KmI*d-9B6|thIvRMK0qN#1lYAYyce}VkFriFR@V>oNQq?6duRA* zdORXZ9RSnsS*@{*(%FE)_cH~TUFpMms7754xO-&#YfO8S8PoCDchv;adFg=KMHUg_ zY<1zqrufagr9MgpdUapN6^2nLIh?$oMNA;vGT@SNpwj~Lw~vjv!c%}1dusL;2~SWi z$;{z0yy1fRh_x8Wq$hxIfvM(Ah`y4awlq>jE1oUO-yYGAdu@qI$=kTxf{TyNbauon#o?q1au$wbY?!0IYdScpyZ}z;zsbgEGBel6!Z@@o za1C;goxPuhiI>1aKmmq`Am^H97}4IVRsKvUX{n4O0S{uQ_&X@idptz*^*))qg~icRTJ&e9}U zrXFXq>9oc}WMRP4f6P{6TYE+<*L5cLafv%qBDWUYD5+uGYam`w`)f9~oRK>BK zhOUozn~tSMhG9_`Id)v>TduT?-_`lwic3Gw9voEt%nu4$M^&lkZzP z7cA{m%E67I&6yZ!sEwHK`SX!g;&p*!%mI2VKCLJHUs7NhW5haEoa$r`)udKZIZx*HgA*A(9#{^O?CfP27r)f{?5PA zb|m9d+@85k+#SdMCRlq&d)M=7IAfYuzI4m*oN;A2Xh_%emIa{?{MsdL(tCiE!iTLyV9jFgldO+2f3#Zb3P9}cvoYTSC_uojha_P zb5d^}Y~ZoS$Kj2d-l(~25hGo-iY^#3X}$_=5Y&hwHS;_ zfHp$;{l;W<5_eqJqmpjd%J*Wv5Cb{cy{yEWzyO5b5sZ>0v;TYYZ%aKHTpf&_XU z^`w>q2)!+<;y1ghl0>7~O?^>1x?OGd+Wx%rf(riski7PgwMH*P+z6aw7K6K%Kv7!G zC<&29{1{7^H>J+!F(#em5>g6Yi_pa-S7#BDKrF@yMGW#Nw)H!!(Enuo{5=>WW}Z`Rn`y?bYw#AWKKV{ z=GrMIhV)z@SL^zk{y9fge``(s6G^@p)A|I(`#AP&Sw(0&YW1uuj?ku7>yFTVn=kqA zdk)){h7ohO+yV~xE)J5C*s9j7x zPl~-AFiCL!2ZcL%S3X3%%VgE-t;LY6@-mM}?%|G-wotK3Bte}pSFr&|kz(V6X~wrI z*RFMkOqp6j@~(LoO(L+Zz#DB9=hlxsuZKK#`oAU1dQTKKQn zHp}RY&6a}!T&ej-4f^?SL3V5iibZn)dD@|dr1%g;qUzf5-b1y%2boXQEFab<*Xz5) z{Uj-f=O$DPeP-^b$f9Wmibnjm{+gk7ES?#aGkG~ChLk(2m8OXa$6BxG|4O)jN>XzH zD0Wc3S_cVP$I*i-M!h-?YlB_G{wKTqW48O{eXHK5bzd0zT{l^?YM*9*9J(L1Nkc-FTMYt$3`tZ@1n%4Lvh zrbwvvk5TMFAwAHRhJlDyXe=Vbw>UF2pa}s3!nrS6o|fI}g|A?a_;`Qkm95NFZri<0GY(dko08G>|#UT#{Eo1qkgTz zLHMOS-H#FIdw$`W;INS)=2t^ZZ3fI0^`yfu;3-MkiHXeF502Y~lx{2xhjep4=8iXK zl`-^~#Q00275$Hg@?v70CbMIbaZl+m6l@>Ry-V=WljVTW#X)!|vjwHQ+CL(af?iU=@NND>K}=dMf5!)NK6$igwJP|y zeuLKQB9b=L6Ew`DNCaBb#&L+(D7C%T>(f)P5Bb?2pWFE`b17LmX6@CGEfe&DW-WW4oJR`2+Ewnu>YN5IE9oT`nnZ>+NLCqw^`{?|b zy$C5Lfo}uwRbYZTv)VC1)eRTaVU`^O%oFBOZk+c@rs9lZxYstNUpG;pBBKe`+dARB zSAV8Y@%H4EBZcZ+<%;&j3&^k zml9GqO=tVRQ?Y!w!*a#~jdlJIQ++ZCdktJ|n{q=TEAQnid$!JunY-NE_|NmQf&_{)rW;yHFMjouo^7 zq2Fg7pF+JX!R$AW1!xQaPqiQix;F$O-ec^dth^HAhI2~sj;M+h2@BRz0heq|MCY) zV&Z%s=_T9O1l&KvEHQQ6duH|vVk6E<&w4-G^pz6R;3!y7kkFz{6Y1v}qaMKA@fgVH zNWwReRp6eFG`lQN$+%Ibm^oX+ZXu zoSOWcrH&o>)+H_@3iqK*d91)(sq%&J8c_u{lCYcmd$mys>Sk1BD=Zw!C;=?)Hq$aD(JZPsMw*XZJrqT@5a%Gs^yS1kBK|??EhV_Dt+mog` zfzJT&p=8Rhu{tiXB(hqPIXOqEMqM>hb7J)s~aXKL`-2Dv_aF7p(wwkd@fvx^CEBd_Wy>BsGeq=**Xw@gSp|yE_)& zB{4T}G5@^(43Yz#CXr%z*hox+0XBCQY_1>8v%CzSd}GB#ZgW@#28aZ@T~-5FZ=eO8 zzZFMKf28{3o;S+JY+S}i@Gv`X8vA#xlY)`&>}VynaeB_ErHgJyzC98~k8GQRNZ`v> z41SWc9(&L*1X$LRxeOQ%sOEkP!smq*^8*YZ0gE4*_~=+TA7rehWy^iFN@PbgaprQT zgy|JClu3pK8fzKr-iYULMrW}no;KI~2^ z1)DcpKM$=0VzZf^F2mD02h9tF6@UPBx;pgZ^T?$j^h@~Np^DZ=1yB)dGuO);P>d#J z+)UxZGajtM>B40~puzVq?9PHsGwf)^wq^TSKP)oU5!3kY(i#Jn8s_TfPZ?`@(=8JF zCE&#SvCJ}(soyQn$dcf(+ggZYSDCWW0eAb;_pJE^iS9+z2CtWc8?0%E(vg%8;fgJ- zEqQk*CU|G~*bOj4WtQiG7J(VF$dIn(T5$4D=`owzf)d4}`oYu1Uywp6`(7T?%(_b2 zH5kM@^Meve&&%FC^;PFpKl%K5{5L$yDG5=G;LMk?%nooxVO8lyObMfaQ*FsC7dKX8 z<{hh5&nt^NBt#RXm0%LR+Hz7~Bcn~fn!Kr);~ouz==f?6x!dGon(4(`SlYv0#B$s< zL5rPzW?LiBf)RPeKeW-Ty|NCJLa0jI(US(OYze?I^c92Anw|SnVq54d=z#=j0Dss5 zJt?E4<)+n&+10U(csrSxpyscBy7ab!yj9c=+I22C_F=nCW3jDA;T)$JRZ7o@u=#zt zdXrv|5Oytf^^@w--?sQ~fF_L49@_pR3GGMhI04Q}`kjPx+m{gjV=@W{Xgz05|87e53~2@7 zNHzyGJh)i@%U!@|;JggE+!01<2zgV{s4lmeoX2aq^O3V@(*og0(jRS!*_z*%TzWz} z?*jW?-bWJ1kuTT0Ze1Bgd|QuHjAYe@w+Yt|fC2Yd&w}s!Op-U0ENZK52v$}Uy?3nm z6G$Yq-BGHxIMwyyZtMIc0>VI#3qIW)ieE&)$v&31w6V>~#*M92GlcxrGtT$tL;&o5Rv9ZHJWr!LoJKqLeD^qCN$ez4nM-_XmYe zL#;(QP%q_I!C&;52L91__F}>xuN`ir(G){%D$MoAPL5Y+7Ad1^Yj%;|*jp=-} z?D0=PAJVVDxt@LLi@N0FlEsOLtLzW^AD!Oi;^~6LCW9b553Q1^BW=9Ti9%Ou+TtGe zH(@mh?u194vvfv3R%6YNx8(XOfa2*ThZj9HB{X9@O5zY9lZfFGRE&5JJ(dnSQqVy_j->rJ2pA?eXM!u*l#J^e*|P)u)E!52;bo^5=i`l*_lmm)(_k%7)g*ly+o1hn4uX7Zh9=`!+Rz2!Vi6dx3UYce7)O|&IvZaAt~{-Nq&UF6Pevy?p7PHe9Es^ zvJ~fk5KP_3(17A1>nE|I@d--4d2%TdKSrfPHyISO?T-Q&D)bye1IWw%ao{X&p3wv4 zlnD87HABn3F-W)VU=-`7b#RTfX-kY46p0lN>bx^j#k`Q=}Z6sqT{@}ZHooh^T=^RHcVix zTW8mT(-9!NomCVdwpMsMWEl<`%Br(6NIe=4p{JiUeNC8q6vH6q{vH%vO?+tu7u0u@ zKe9|tomdXZA~l&+qP6&}Bzx7Ty2@T9jIV#oXW4P25e{~)g`xQQsj?Jg4HE%N#VRtEp7MyV3gdT@8~@hTJkR{>flVd^dI(yC(A`RF^wcx+bW6LiGl1qg}tUf5IZ}A zSTDzFgiiOM_p%>-vH!7r0f&cPkrfQ?VG^61#CG16NSlJtwu$g*)Qac*k(uQ;faLu> zKnbe$!L?sP$r%HD_2~F_$GyryYmel$C3(0btWW+;C1e#V3hQ1TH%OX)GORwl6F2hW zDHS)>Yr6QG^OViu)OOHpwo@JH;jX`eg41;mw9O)uG7pf!gsfzBI=tQwMfoO{#qaJTYz`AT9PizvIH4A8b6P1& z+2Vt+SsB4PCjtj3&kuD$8==$O>&22mf2l5{?Dg!@fNPdX=T`K+3iMOGCjerJS27L- zp|b0S|K_$!nyXvpBQ0Yw zAj8th|H=w-w{x+U^4i3&+M?S57XB>E7s+BuyLZ><8r)I^hte)3u#>0Zb6A-(AQO{Y zNzNo!z;z(UGORYK8LxWd+swK;6hDg?D;lRk>u9hp)5o+t_8>|Msen%ONA09FGO({T_IY;_xWoOW zmS;h6ttYl`KKmioPohQzg^7ZM1kSpJ} zjDF}iwuwf7RHxpcOfy5PhhYIvg(UwKytehpQ^uvG-R2`Gez;LO?Kb35o!~47Xr2)H3ZpW~Dps(YS~*K3&+b4%mrPS>tn0VIq2n*Np<^~{m4XA*pr?i<;c;%aklPg& zmU{ap*OmYeX$QDh~coND}3QDYad8=Bu=35jHv0Xo(-;w#cmeEU&>?wTBw+g3|;u^lx1Dt>!-Y+ zA~O0)E^iZ-5YimCH33pD+Ijeg$7TAn)NRGVd2=>{(gcoOn-Kb=SW5^w9h8MDd+0Zs zQogVCYrO2{D;*{zC3#1RH}#)hQC9Yx`Su9@=Ca+rOO4 z!M96^T7M0ScsTXRr@(-O-UCcJ#XiOU!GgCW_~m4gWrG@chd?`|J+qE6qO?z+XALhF zS?j6Tgb9GmmlU7?_p8vXL?hBlMCvK!(XD^1%Sy9mZ_Za=V1n{R1zVeJ1uvyu{#J(E zUozs=9;$UmOP)Y9a2qbhg*;R$r{=7f5|rXI=HjpWW_qrt--k6W4YGb!V6)TO)#}DIuI=*m5~XTdwL_X|5AijL&nqfGDr9>!)1ybH>!BC&$b8 z&ib!Hib$NILmUbIrfdaUp3YaktODen29Pd5hwFQtYZsNY7dNo#E_!Mhvx%Fu_W7Hr(e9>s?0C0P5}1U57a1q#X!86 z6ak&9_wDxZUdB&KUva_rZHIDl1apW?9KMb87!x-%)|BKN2DlGJqG+jc z20k;7di>zUj1t)ovufg2(xw{((1Mwp>A`H5?7WnFC32Msrw|t$@KH>i(GIJ{hyqiU z_b0Vq%#d>Y4UBo?**?wb7#kvkcso7-*YFGMB;%&eUIYJ?+6I4aF3b1OGSoC66cG8T zy``~yjUc?5H(a(Ij7G28P zwc@g{ATp%ZBMPEM4$$ULPdTyI-XD-mdGGdqQSbnY#ygKzB69F$njc<~@VMb)eTNr) zRLJg&r`{k>oCRwx<{H@}JQBR84`u#c3cwAofnCtPH0K(!DJ!BBJVA;2P^%r9m6VfQ zz$^AyL8c(W11C)h)NF35N+54?7-f)PH~djXmmR!8?NG*d@nA;RUrTMDZiS#T%gX#r zH&)1Jmv%My`NVn;gTIspUW-;kz|?!#i@*rv;}bhWc#y)SZ(GV2kO$|(H1wn6#dJVi zrc!JTjIN%aUhE#(2sE>L2JKsgKfkv*n0zvC1@mI@y!*_B9ABonly2ESmpdkaGCZJ#JU4J!ao!*lcZqs>1O;MSq9{+`wnk^1sg z@H`NzYfl>`?*554+Ek2}RqQfvXR*4e^YmMp{|aEx0}QrM5lK;2dG)M;@pnlxH&W|+ z$%SX~GoP-xUTD}3v5sv0fN-6zpN7D9o-`UD%Le&|ecGhT)?@yOL@x z2sGi+&(Z6h|Cc)WlvjTU+OY60gtW&QU9)HP;OVlFiTEpg&wnb7*L|xbt0Uc^B5g&t zBdpm~-vE{dPh*95OD!h>S{b`U)DLY5{L^PXYFS{GabG^%@{YjX%Flg8LD&z(!!&i< zD-%wAqBWgK=}bPX= z7Q+)kr{}A4e58$*N6c1hzl?AGdx6D??z=$FhD2tp`E_uz&)3_Z!2O|&h}&${5M|IO z*rSkglg1fEOeyhC(Jdt|>2Yo3Qa$|Rcs`GO#*bAp4KD000?4~eB){*?W>sVdqD6g3 zP&aQjF1w#Sq>Y~K5%5DY=O$kVbtX@yUo)AEMban=VN`r&pkiLYeA2LbZzS|$D`n&m zh5s-z#cl1%@3zNT3#~0{;i0oC{+lSaPmDxz&zJS$+q(Z6ETC2GA=%%xqX;!N*m zK7ZQ>sx*8Wol_^Mg|Ii;nP1j6zK!D_w*U}#C+p+l9_3iu#AVBC@5tej2Pd!o3SGHv z_HoOk|Dj=wd5g%j)(kalPG)&*&&=Lm(aBwkxL;ECokUh3rGrPRv-O;P_omu*;ohyJ zxo5REXG}{$9~eTEjHu|RecqDz30^|ywK!aH5otJJv-~q^~|Ncia&TE!u^u= z*PDb{#QS35gZEKrrhG3Yb4b^c>~P<6RKStT?WOHLir%WKR8VE$Gq-gg#8uP>r8wRq zHm$tP)#he*q3hRXCPO90Tn+KP!@kSZ7{vr%C5!+pM@e=m8MSfN6T4{jEJyqiy3R-u zWGfB!BMtI|eA`!!$cTN+ybbiVDsc=hvk_^H->;dUVDn1Ye^0~YQnP`8Bq}-+vz>;X zXS6ZE8U58fcnryL_4>-{diaOUWs=_wSN=?rc8s&)oi)kV=h8}2R?u`a2?D*E(1x^T zm;S^#tD`8Mv$VzGqRr&t9c>$EV|BjmM1h$>FHq!%Pt3$ymDqXiWIsS}PZr2bM?OIn zb>e?fb8YhMnl*JT5*%9yEARYVr%i8EwV}`Gs5RW!yeC{oYGO2@fg93J_YT_TTh#-7 zO0LYIk4joYz1$A`t{w=Z3uFFKT5v$&vJh*NJ<8YO&hv|9gSWeA(}+(JbqW0rFBdMZ zTQ#9@`adMezZ4Ej^^#ufm|uuyu!yjpWe3f6xu_GHdeT|72a09qgntDVcy#LFf%YZB zkS)+%>w1UY73Kq>=NzWlT+Vw3`pZcj>1K4hw?KXM8h6;PcN5c$vZ;ThKjzz^^)A|v zfbP6r$SUd->wxq5i5oQKia$jzZO6gv@#0!DI870XNdEL-R4*QT6e~DLu7GFsPdHM7 zhqR%f$vimz$j-d7^5Qnz4}Cr98lAnws#SbgN(Wr}Pz>&tORnmG%tGqH1!w?19&fk3iD-Wnp>~{$$AD*-z=Ai6>?e zR+j;v%`g$@5x%7|V=qbnoUU+8> zL)cKExSvq;Nf80x$~;nT8JB{JgpT_CZ?xS!s?+c0V}0=M&{J8m`WFGxemtCWtXkjV z;Nf0-buEYbr^fZ&Toh*o&Yj#@x5ieNWm*t?lLm^uCPIZxMMnLv;@&c>%B>3tRDbV1n4@JE%_P+j|yq$Z0fix z1THC;->8{0&RXeo(N?KA`8l(3BFUcD$&xV@;Ik)BK<6dsOC!$i2&>-Y9vSs;w}Hjm z{i4#s^sYE=zUj&v*-*g_2o*?_fea=|7BkNuHKF>8l9u7r8$S<|-ZZq~i6J>;UNdr*XzK9@}YYUP zH^jH@^eK%Or(i1dC;JyKM=-Rw#^FP@M{P2u1B18fhA3ZUA!MfDiUzTTf&o^dXNqt) zsGp(?kKz$+`=%>0Y70c_%bhb3#O45e6{U;J{t>JF7v~tC{hQTKoX8@9)82qDHv?3x zklW>eZ@Ug={(}<7^NmK3R}+WtQRi8B&IG^A*C99$k9lBnvkb>t3M<4yhLG3Gn|7BX zBjVnT3_U~v?@UGPjO)*63+Cl)zrV*nKPj3ZR3}!4Eb;a z#HZAbS_4WE^orOq$7LXB>$7{cT$o&Nhpa033}%2vR;=PKnYLqEh&$*IZ%(5=F-B@YbK4e#@>fd zbVTsUwnRB%|C$YPPg*K!uB!~{mBGi0khK7@95B6*LdAdc>S6X5B^KLNA(n07t71Iu zMhNy;_6#H)1MHBs2)Q4_+aKgkHQK~t8-R)P>X!`I^X`nCLHZN<5g{p-H7pq#PgcCW zbZ?QJ$D~2jk4pXy8i`K6H+(7xyyEhzyU%|!-P0o<07L5_8jbOk%=T*buM_~q!C$-p z3H0~3?#?8kf9A>omUJYb)ncxpBAfvc@(%}WN^NN^GK%@LuJ>U!gmTa%`JTL7wq4m+W$mNw-#0sio)heo9TGEYAY-ck@KjCZR^LcpK{FUC z2J6gPHA{RLz!~~YHwJwI8=njeGJPY!f^yL2)tgWO4?c~>oGFg)5^_{;Ah=3m_dSrf zHCq54(ZCe3*+*j5T$+p(u6$iu+DrW!cD&=bL#o6(cf6#;Duli|a#uOG&BNG^bE`C{ zk@Bp+KMcCUG$vY=advVAQ54E|8FT7N;64FM;B-3 zPeWCSZ{84{&hNH?{GISenx6P8vSl{YG5|ObctpYn$jE_wWXy4r2|b36D~k4i@^U?F zfNryt3LO*;Ama5D3jd7q#xx|iHP^YU$g?Umi~~6UfK?wg!JYUrOMDto#G>=+-;`Vn z0Tm`MDq|qcCW)1Ll!61q8P_#3`_NsZ?oH^B{LhE>nZ!{H*oiax$ z!*xDwqn@h2*BEyV8*FK;qx)u6x$u~>e-m=TaHya-V$=8c_ts9yj!oW^J+Y&GW4Gvm zDl8IDuNAnKX@}DKO)<|guKF>a13=P^=_2rO4;hra57XNozjfd;fF$q&FEAi2<%w@6 z_R*rU^&K9=h3t2h+aI9bK^kFykSWHeeLXs(7@JjP>30ecj$^ZU2mlt?VQmRW&|YNZ zid-w#LiMx19q^QU(D|Npu6?D2EC~R;Et?SCkm3h|WmqYk%SXTl@`qb2;}!${{PyOf zHT=p-lJA}O6B?=)deD7+S~P*oRo>7DzsK_5P@m0|x(plq?Zu{;>aEK0*}EbKsDvh( zByM4ui{)gfT5nPmiDkRLohR()T2(=MC|cChcziwJRQFMl7uQqfUAIP%rU;Xlh|% zz#RxnKslgncDAbgS7QgcUTdXy2G#YC8Gyy&O7d5A9Z+kV=-uD!?RR@eHu@CIWDGrq zoy!iTIOSlo@i1zY^veTz{CC!&Z413t0M)Nk&Hu5_N-tF|xQd*58~5A&j@CU;_duFh z=K0M)OWabSUES+n8aMQ!ZJx0NHtanT*m8zNTXMZhyxLnFY^Q3KY5g)EIYeRy6Sc>| zv9;E@g{Edqlmn*5*<*_ytXn7fsA_QoyCu~=Eh(h+L(<*4=w!7y4X5-mTjrG@2ZQSF z|Gb_Tg8Gal;rF-@m7ZTO3+5@y#I)ZSQO-dSLscP=e7Q`v3sQ1p*W5l}d-Q5tht#Ku z5vJKH(f#aIiDasUcVtMqh%E$z*zM`f6rtC0Ifv=;{ zmfcl=Spe*#*;#=Pzj8jdb_!3ZhgDB|2^{j?!cv0|q8NIZZHG-oo(4=0NBd`hLE!Jm zeewbOrP+`f=bRo2j9noGZY60Io+4K367ZwTE152{G?;_Ca;{u23B!x*ov~{69*vi} zTmr@OO32Lp0hvXPYnlW$&yyOyz~`J{Xi(V6Ez_5AN#kJmOt&`oF8yc;ubc%l0RTv0 zzors%;5SRu=gw#eoL=}wq1ka+aC7cMMXYp5h_~tU20r!A^4=!gFT)*wLsKne8)oIP zd41c-EDH9ucQDEP;UqLIZ0uG^Ckw!`pqBWDjSO~$t*6<2vPF& zhOFrw7j+-lOuM7`bIv)EF&P%vna^zNS(AZP38wKb6OHRtG0Ok~FT(z?#A-z1gJL;L zPdg%k&ng2gHiOdB$5CbK`JmS7L+Lb?-7Rp4Q?yU@f4LYr20%f5ZE~LNNjb@)Q;Kr? zte6#3MeNcpgQQMP9y)@wKvhZ^K{S70Ls~|3bS{NFQm`unw0&X`Y@?JOk zM-}nSPHOzed;u;&Uxaf>b!fYB$h?Bfo6QDro@jIg3%b-i@{%YL?QNH@UNGY-e+;5j z6c~Uio89}_U5Wpo*zwP?{5t51Qkw!THhXNpuf}|?lEn-qaJkxqo%fWkAzL6P$(2brTCxeR2oz3#>a7JR{48q+ z6j=stt0h^#95yJB=5k7YuJ&&iB7jPaCwecZNFh;MEhl+@DWEqAZJg4@J;eHtTnNDj zpWAmOI*)!U)4b~0JWDWq&+34Jn7f>KLjRZOC;jQrm^+HA2b2v!f@1|v$Sl%jvVwY` zuW%-q+xpjBb8eErv~7{|X<4j4Z$HCGkW;k76Z5^R66~oDZnV5TW0uQ$9#uVSfh{=0 z^6TqxiCTqz57t~su^IhTNC|mNK;a44p=D~F280o2!m{@qSVMRHw5ZYqNu6pES{)ZK z%%Uygts{fn5DJ7NcYkgK7PbU}X{+G8UIE(?&aron4a+hgd ztXZkB1pDv@(W!qgye#`#=L5cJURdZ^LR6;`v&L#cgr@4060jKB`83^X(Bry5^zNq` z0dF{@x$ZoU>AV^tcxkn+J00_!E}e3Q2@01JQC8>JasQ03{%9jcT8>=*>^HmgvFe+r z;6p}kCUnwfppYf~fn-%RB{Ua6T!18~rm3um72F%Umb*4~tx&G={-OR1FApI=@K`Q~ zmp0msF+2*5y#dq5!Z~Zoka*D+ zmS1V^u7tFUk>c%roZ^FtLk#na2Rb>BTaTXnF3O34&z~u(f;!5AqBH)O+u_ku>$3US zL*6A`F3hQ57X4NJVw*gwo|>Df-KL%OmCFU09K_`HIB*R0x;4ExI5ao%zNcejz|H57 zIHKk(*DXYRZ%27bQz=iAYO1j?jdHDnafa*Q4RA?bpp?z_M;d#;y=qx*N+Q)xoLcJfihftExks&ly>C? zP!Fy0St1m7HDpa|5R`q%?qb?Ez{EPJbD*>pEzbWkPhNN~#m<=A0(nF(tk76+jdw>R~Xbkz{qV52Q+hB%o zs--h#XUqD?*doIAG9+Ai$~tGSdLS zIF5RKFT{Fs0+Be9@hXxzMXy+5-mRFhJ zZXDfC+tLEFL5G{afS4`Zl-PSP?RgjNN*bjI4ODGH5A#Aw7GaG+gO=CM+BvGSLfw}K zo&rQtI8@Ssr{wLI)(hpVC&tU_eQ!2M>g`~AQPQ>4+rv0X>e*RW>@+{izonRk#s`d6 zp*{Pq3#H>cq~VV%Kk|Iour|Bn1#o>B^<0nNx@kp>BJmB;JkOr4^QNpY z&;!9IcfkyRt{rnx#cDDMZs78(;S2i`J6K=?I4V?^sjle?)zul)19AMGN;Gh60~2r+ zww5jH*~!B@gcOU)i-njtifrL&kjvUx!J0UGPAExMrDLIllOC8lXFca_`pl&*r*~eA zHz8u{Pc#BnXhTgidx3FKWGfpVzwaMlNJj~xLQ9xCN{o;r(+LCU9wBhE3@1?RGRefb z4D&TJ;{vpguw}h42=gT=qwmcWRak)wklMd*REKgk6_%;ykluy~^#m}>R~p{9_;l1f>dSa9J>i584Qn-5<8t zW6{&3>CxR}V=2QquS?Xa&`r>(K2m<80Nx}mdaFt=5~E=s>Xh4F<)J~2Fz_< z{Ie@{nX|~(q|<4ek|Q_JZLpPk3{C!`W{|+6K;G9L&F^1f*&R0U96q_D$hq1|(kIXW zbY9+J3X%-ubBY*0mk>n*%M>QR!AohL+qObhNYzwYfp?SxbyO2p zjUXOVFpTsd0dp7Lj^Z@;EX$gpRZ52nQmDWRwl|FoH%SZlZEE|%VeTQr^gqXLK`zA8 zrsGqu#f?rQ?tDGmxeMm2XjXa!9%Q zXqcS`ur~r*usUz2&p3;Go^9J{qzF6sCF$GT>mv;p$Z@xl*gR`hzJ*b$?X zYe;hL=?+~bP4CX9T1nzIc&8|7)7^ZW*>bkc7|ot0!13p zzcp$Yu^cy2oSg#08Iz_hXxa2bd>h6 zU@NX(`}`OVnPol5j;nzm zBif`oldqQB!kE5U#zf8~`yN9Tx~`x;@cS$KC-dc}^TR-9o7Sy}MHsFQ5nHPEw-9q9 z6^40Y2rkGKNa6Jxo|kT*%}FFVuNR3>yXt!VrBf~tPV!DK+VJq#3Xq;xnIj2c9A|Fm z^>bUpTNBG>io+Rwpp-kb23hV%j;uNE`u{Bn{<*sND|Y;W@sqlFFOl(at`3TnjXcn* z(&)129&(qzZ-rd8kwMK!|D}g@NG10J}Kq{m*MS&&szGFB{Q-#)xs`#@IgLM#jnonZ;e znv=)-l=usYF6u?N5No0h$8tJ+UyTS=omWh4m+W~pDiw|e)GQU7HDDI-eppOU9?KAm zNhlE*l>x>Y2jP4rbsK7u9nPC8iNy_0N=rIWqVEN8-MqOxD4DB5q}~$)Jsx)FFaHH- zK6W=I_$@qlKFtralCCS3YFc5ZSz$}kL zynQqBkA@en#GCdsNidiF4-5AldGl>kAk%B*ZG!JAY(Q!FYlI(+w{v;=H^oKoR` zGB04km}eFJBqNB+aw7a?Tmt8x{w*lsF6_p`z!=Bi`Bh3IW$8xo7u;~{*GnNm!R_IU?~hYs#;!>iOBLOIaA(eb)03Ue;F^qd>rFXkG$bF;})&Z(w7 z-y0ftZ(4zyzlxeAz{3;UGwfpsz2Wx|Mfz%mHtVzg^aK<2N3-I<$N>-rpD|;n+d_H~x3NVo+lfnwn z&`rjVI(oiyZGnZ)yd_-_p)X-vVEBcxj&ayaLKY-<(8o`Rs=x;^N?kKxB|~;Ut-DFu z>qDgfIH*i8x}4D;sKO7VOpXnU>md-?JAz)krec=Z$q$o``Wm7rp31tN0gaUXg;?&V zRm6`uQKfekPu{P7H7#e50ecJ>Us7i$=P#KINCJJ0T4+FHqPEHse#&^>{Q!wRoUEm9 ze`&YDjtpLgN>}fvWwTcbd5v1znC21WyI3mNp=&4&J4l-Yk!=WKGJ&X_`jBC+= zX1g3v+Y2hVkJ38pFu6k^~*JrLLWbos&z6@B3H4by;3Ahnbqu*DnlvD2`J|G#G zg$0atYI`<3KUB@bZri)+TFe9#MVLldafw}-E4|?<@-Ulc=pW``rw9Gr8{Lh#>&Urs z0%fOFom(b&ZrUAhgfpeZ4hgj0P0F}V4!Zr9YkMrnV&bPX}K=2~@0xDKi*&;L-qzC z^vJo0$xeBx=XPYeMogU!$f8h^(J-Vd@Rar%V=U{USD9-c&9-*}6H*)V@WQq|&TjXU z)sN^^c}A+}C?Dg{n7&)pGDde%);}+#pl4MF1kvB1RzIo}_x1h$_NVQcN<-jzBYPQs z;?s><*yeIUISOzuy%)T3&`d>x+o^z!WrSD6@2lg%b4@J3>R(Wq&D3P4yvmdyy=4AC z&vo#(Q1$}IhDU0qEx}|1CS&dMFw@Q{uw4vHTZXFiSJ*ByOA~WVzGThza%xPtXrqN# z&w?_@`B`euU7LXNd!ZRY55SZXdhdhgf~U9>3lO#J7rnE4Gt5_V>(LL=KLwUy>k>cS9$r#_mZhnHv4Dx1 z*h(piDL$IZ$uM)}Tm*X7RNFGn3fGKK9?o&`PTKHx<{V4`7QPj+E{;w-Bt_#K;83iG z(es$I3$rATNxU#Bu03Z_^2a_MqeAZk^GdHn49)P)i!y9BjfzIG;}NH_P#L9y1>s)h zG$2vI&L&Yc#6=~CWSz$4l)WT)obH%YkOZhds8=hFL?t4C^B}S3aqJ0za3QlTV2$R9k$$Xx?ySHxoOz| zLO`@-G(6rlJ!2|I&h_qM5zv8O{{wJZB|@LlPVEK3O6-l~mo!DRP-S!mcSg_&d{PzP zaMv4gSM({>hIFu>NlT!nyh~HGkDQSRi=I*A%g=Xh>&-z83tE~6a$vjX$A%VKYw8Dw zM`jF&YN)ID10DZA#Uv0TOemX$Vg_58?`Bm@I6>@Ihbj6<>HK6^N0Oi;!V5WlUoLSI%WhWlnzjbOlw1W#WZ$7KU_DVkf z#F4yR!mv>PD8l#tTYd$o_g;VmW7!5$i&ekzMaKN(Lq&TF@4HSugMwD1lP)GmTVSTE zWzO?PTl3H4!o+)~p70f>3ub2$#QN~|L48||tHKmP%5lq)L!kvo$46k$XGjyPyS-6N zd_HYJOkv;rGFYNR0Lf*SnMO=aBK2KwnmiPyTQ}}z@4F7@P>80Qt)Zg9ljTqIPQ5mP zUCKnTas?NRF9KNwU)5~eQCV!RfPVWubSc~NxqHw*pVF-$mMN63 z$YXn`m%*TFZp(T#8W2!`!?rohl;tXKA(WQi7u!~;Z;%}kl_y{N=-Q5n}cYZU36B)^7Xq2RHw!K2< zs))S|iYTbp@=8+{Z$dYxo{Rme;&9wuLnESpo-O@Mb>kQN`KfW4P*#WcPc`X`zkDiK zOXBTal+LT64mP3(^j}AD(u*;Uw4imQhbZpZ``+8AzaA1JohlW#*Te zdn6XGa69BVv3p5$LGI%tkl)hTtOUnY`H{NkOZ=X@ZF$y%2G8+8xdpMsY4%yV)@6IL z?ul9rIMPyA#mEZ1(gE2D&2n7igXT3q1%GU^}I36zns za+*IkkO7x*lt8QoAmStZg5eZV-nuChk3O=DdL=482d@;&65jw7A=aUm?8u8jm6QWu z`Oe?_b7~cz@j_)fh~mQqNJ*u9@$OHBk-VF-yEkqK@htrXvF@6vP*SOIMINql6H(~n zpn)_05p<|Rrvq6w!YabnGVNmyeGZ56ZCl^&Pj-N7ht8^ODAYsz-3ckUYI{ej76RI- zog!EAljk5h3v`(Bb-yXE^9(e2HAjbrVuAGlZ4G@Q#@$Ho5hKspyWO6$D<23-e}Qt0 zrW4$T8J^m=@XTQnM4E{29IVOn*D0v0uAq|&>RVibsMS9tW>r0PTbQp%tlL@0QhZ!~ z?)w7bqB;mMz&$nUwt44x(BX1)tFS$c-1g(tLAME*y-{QtHTfTc1a7^k>=#EZ#yPs( z5=c?W1%9|Tv-LGubgwyOZs=Bkm4Hz@xK9QE)BWE8oQM!-mO8BU^hmaCE@{}`4Gc8| ze(l2Hh%zj20RUIUPJJ!%z5ie2F55G3&V^kPU!dxRq%#D#`ODpB!8F#uzm%VNku;AYOWqedC($xYLN|HNZ(tLhi90G{QE9&w%3J)WGfmjG z3)leges4_wM6YOiBVukydc{6qa1MwKfU=PZRcD>Q=30Gs)Hvd1SOk0mk0rW;9U`1a z!F2=TDmbjH_<_41(O&|u2U0A3N1A-3w2!`@M{z}+upygE8sJm_=PSiqx}5uG%hlq4 zS3wu2^5LFWe4OotqSHfn3nUmEbQ-r>ZbLd|NC!Amg-}GlO2n8Vufo-*@m2&u+y4c` z*>j~r_4+ihbJ5$hcNk7F0ldQ7d8+wCO{{nJoOf}oZSBy@t@}O$Vg)*+^T0&K^NEdL zy5~mvYwq?JiMwN;fW6d&R3Ur!DzJH_>)L*^#PKhu7*5<-#xlAlL={qyVWkAe1rV|&KfXEk$c!GC= zljSOf7~71KY9L7*uRWq_WMRQ3ei%(FifddEA_ji2dG_*TwPu zPS2A&`H`xB;B1WSKpExJ8(!);0d7<50gslbo|$`qP#z9qKJHmk+9lsb%BAw2GG zsH}W+a#rpACNVSA9!eS(3&&j|lqLxModT-!H4!@5s%~$@M6>(&BYu#)c0-fQI>Kk^ zSA{6BsHW`*Uq(4X7Q|k}g-FMicBPVNGOFQ5jnr(jygS@x=Zix0&|i9AKITM0%`IvL zQfQpfjrI}NEW-NAD1G3PmvkI zO49Tmx){b$4G}7D4W-h*NIW#3f&FSxx>Of(+4u)RK9Td52_G@Txx=ay!BAX&?+S1y z+x?24dib;GDHlIk(Fw5^U>S+*vx@kT9_$!g8YMCmAz(mECZp*BA?#HGGe~n~S|S!= z?aYrRW%qPno!cdXpmHPq$^Oz){QXLEQrN;68q@dcp4AzcnNsEZ$aJ2*gYeV|L=ro&tIA z^lS_>fPR6-6!5X=CuOmXmVW;WFaTE_)O^V0N194r9X9OB9K*pUc7$oM3&_ASkJeUjf#bP;pEuBr4`7{;{is{RLngq{%vWO)p*1w~Z% zgD10wMP5v>V2hN#1!-$107RHdO>*vCPmT+$R){{bRb7faRy>ZbQn z)A{4t1S1pN`N?y?LUoA^i{ap zH`VUGdbH-I>8Kioa3f8)u_<)Dqc9c#t?KP1Lqh*T6L43-S(!t;UpEYo1gb!587GqB z4zAvF==O7(=Oo)vu;~2 zq(bIU*{UPUZIKvgHY*CSCcRIq7U1xLS~{)y=)F6t*6?m{Rn=MFsTi1t&c-Hd+{P<1 z@6hqiO+lwjlKCgS0$lgM=)`~27o4Jjf;}qh+(+?IWgf74d6mafl{7$|31ti#L~P^S z#!b&9sZ<%Gr_!}nso;KqbJ4-`mS-85vj;{UqsT%(9Qra1nK;Bc7^iYNSCRt?jJIFI zz-u2&E|3f0E2fNl(n9$BqD%$!4GkN(?|@XL%VaT4m3Vu&`HLzvT>=z#gAo>HeM6W; z)OpJ0aW9`q7o9zuS7jbSi_!ggI%1sNN#}P{rAngzt9#70`8d3YkK)oede-ON zvD`8Q;YW4&a!HHXb)(gN9Gp#NhPS>h1 z@eRjy(V2IMS5svN^ev0i9diCMd}02Ep6wX_MGTObAk<-I=Q5yBilqVq>2O}4Wbdt3 z6t>JUrJVcuo+w&Ytumxnw3zP|?^KA|8^ zX{Vz8JEC)5!?_+@a%D<=6z?CB@;gU@p{RuAdX5?d>4&-x+(2Cu2O3922?zqBN))oZWct~jmWk~w*Jhdqv167 z3}s~pqewpI=ja8TZ_K8+ko^VW*9U9K>hwII=!Qf%0haVcRgLCqUAi&p&lw^utQmmO zN6FM%sQ`oM`500rt9on+5^OrXU|c|NL*5!#&P{4VYDO`5wV#GwI*_Zpo_-GTITuAZ zAW#DiB)U0Q41SAc1W8%cC&?1%Lmd4-0umj?p`BKjxd)2j-oiemW7m}aQ6VT+#J2PH zN9%n5v5EF94qlpo{o(r;TEhDSq4)k`v>7N5V;a3~%Xr6-B%}LM-PjtSUGR*{EyBPh z^lD+RB9MAhpnj#b(+Nmt7@q5Y`hf!%k<|R?Yk8MaN#i=qL@=TX*p&0eCrEAPJgTev zAHK*6IALRlp)qCIfWLWB%R%JSpsG===TfnR2-`#49Et4)Pz z^9i*Ts1JwYKy=M3%d7XTq_+5&Ei1sm$juLKS3YAdv78Uzr$na^d3w{b>+Prn_vR@@ zc?zEKvH$2tTz(FtJU4-Vdb?GdT&EB@Fc%cDYy_lpym>RPz3x1V#7cb%i!T>o4K6t6 ziA8u`Nt>tHha0uG`<5IyzOkgNDnr_&I7DpjstSAU-PUP&(K-)(x^Hv>06<-W6hOVZ za2qjQ{V3G1#bXiCQDwncl_mf4bN5kG_H{vbCOwPLaFX@SPxFut;Vf_xo99@D+NQuN zf!2m_l;hRy7Z*Ib!MU6XI3%3%VN-1e`8HM*vhK6o9HEIbK0}ny5X$~MT-w}xgc?V+ zfxrx+AI{bc(vGj7g}A0r?Aoxc#78wQCClWs*ET@BeHAi2rah^ojvo7E0pfOJ1N0KL zRFG|(;tTRqk-h1(1AaaiLZ9EpS-O7D4#lt0SDI#wG2%->*Y2sW*Fc`MqQ~FJO+HO@ zPAP@{3&xh=dfI)ICF+_AM49Q`SyHReDY*e<@`y79Y7YY{gTa>7`k^U%zqxPJ8fawz zb=~p=q6Zsqz|<~BBDcPO$aDBVsco;4I7tIYOoTr%6=UP|m^ws4pSBF219-T8+Dg@w zlXrk0+w;R~AbtC*s#9)x*7_{n;A)Deja{5x>)7;D)(eT`aaru3<~QreG(+p_!tXfw z?~z+X|E2QkZD2~1hgPTu!B)Li79SxZmd_lq8RFb#7ez*A)D)}xm?12AJk*Cq{DNK% z(Jy=5ATkvdOpVVN8nR`ApFnGjt!Cb5zP z3kE{6|H*!vR~NOOR94;0rP&jknW`XuMP~~i`5c6sshIl`@UfeKwqHr}YhUm>Oz+u_ zjIp4LHv=R2b(<9g#8dcR-9g)144zF!9ye4*jbk1fLrV~SIP?R2;ss42kRc)x&Z}Fx z2ByR+BB2)inXajTtbaPz77!euX%TR|h(_U>mZYp+X@#WhX0I+a1-H-CwBbF1w6=Dz z@>GLXe(L%oj{oMt)sJ%N9CGar2e12o`ZVxC2_DGrTz;yxl#60Tr;T~_YTU~qz>Ycp zGEhO_!faetEA(%61a`Z-HP)Hz#)H{&?`rt&f({90S<1d=y+t2W+|1>yn&J;$9xZ2q z2DC`hV=~1fxyU2l7MXJiuS}N#yFI2LZuk)|EoR^ruNZKw1Mmq<7&;6Wp*fcR3h@cD zV%G`1KgXc_Ptk7&E33$`8ICLbs;4QmsO0ib$6G6QiRel8ta+@2G`M9Pw)f^*k#2WN zo}Ur{#hl&az-Y=I$T9e=qO0vh=@_GGp5L~Y(My+E9W>XArV+P)ukp(HL-uwP^LLV) z;0$BE)0M%)@{+{D8|0$IeNcQTkk<-EXn~y)#99NXr<(bb&{cHHhQJs8qoIh_a};;{ zKz6LCa96)7iXG5`K%~3DfXft4cBA>59Kosv^gPvPF)I{uyo2RDF7Wm zU;JhpgzJ_?wBZfBlgDU)IDUA@x8lT0%vtWRpk=}VcECS#iDi2KCe@jn-6X#bqT#eM zk{$oh9Ou&^;25aBV(;|-oVLo3Jt*Gi^=}wHnTLOwekz|zygD!Zg#naMfb3s=e=fiGXivF7lgNkndX1Yg{{ zeH7(z@1s=NSEvBl<3AEtEw3}qSqm19AEka3KEW7fj~We(c$0O6Oqy)|6@+9em)W?w z@o7Y*ZKHvHihs_b8|h}l$}5(tw|8}=_Q7_fv*qGTLil8Lxl(L-^$y97;7Gjm$$tGj zT2)CtWx|-;w0O&hef-XqnA}qd3e&snpWOmmmR!HZNOf{G_<9}rEg-@WgFD+Cy;yE= zAN2c8TW^X#?=aVHY6^TCprG&H_+T38>2v9bgM#T<4BYY*Kg8JS^MgI+KAJ{t(Ln9h zCFS5yN|)ene!6+QJ#-AqBk_6@$?7ZtMryZJ+-4JW-rF%E>CeS&hpAn%HrW?=dMRdy zN%*+u>=s;wEbehER~Pd~@uLR!tE6PL4Nks+d+u)x#bt7XYn)wz7uU2YJ?;qEi3qqn zTzlcnzx>7o{Qr%mMM3>+Yj8Vykfw>h%qSRU=ya*J6}>@jK(Sr5fUWfk{1RJ`GF%|5 zD0rgZuBMapA-Hy(S^Vq1gB#4HnAA%Kx_>?RN3jB0z~12ybj>;rbN2*?0GI0$xX1p^ zcMLh^gaqc@V_i7-bnxTab&Eyb<$yQ$dmOd&BF>4!O%6*PAVYvsbnr!WkeF**A}jt9vp=&vMmm;2pxUn2LTDMmoA3=3$u9w2ZCd5|g7j_QT0F?b9iyhku6m*%u9Wyc?KKJS^d5H0@hR zJjRJ27Mi1=@kCIi8k&Qfy+x&ls~YN)=2Qak*htf9b2~S*q!;0>q+tr*8W;BYO+&Vx zvV`wMBJ8>wLWG+&`YU1sxh2Mx9r`CME6blYpS=71iJcZUgyLXs^@`+CD#6ufJOwS}+$ov#;H@roQQbkfu8eOg3)bD~n zhqVp&$m>TH`qp#9o3Dc3vnIGEPA7ZvJhfQ2($*M!@vlMfD~e^y@GU&2ip)8WQO_J? zZ~)(gym`?h?i@ZQdf7@!yg&ZpLIJhk8-Uo2vF z6Axl{LmnonJI=&xf#6U0)s(#&@f}DoY}~hY^(WtL;%Fz4ZYE#oBql;Ki8;ltUiKLM zoP+G*Hve%cyaIjpnV(Cz~z6<=Cg%=W{#X9t}+qUqHxuo2_ei#e4;9O*5P+6jak|H?nJ5GbQl8@8tB~_@N_cU#XGpR zCm8N+&ayyzMIdX`tc8M5cEMq#I~rKg(|nw zIXw1Nq?bdVu%^+W;)McUVAh-C4 zs2)FM%3NQCDkdq}iQS9uprqwAF}0?C_OC*+1l+5hDe;yFGLvcX&nC<#i<-OLW6r`6 zYJOc<(@g)S5at{aTdUa-ZtM{8V0a9->yKN=(Wah+8qt;ng0P%>-KdgqDXQa0MF!_1u7<_VlrD-(x<`4zJLqny(KOZhkUyeOAzej)nypJ-`DDN;mm zOm@nSCJnU=dUn-f_YT@!pBbt~#HOkH;yr;7j??+=j#LBZ|tQ!w722-~w8ea&!IUK$t~lE@ppJ9o{^1?0V74q5m<+jcx5~@Y{1I-&dGa$)~c7gqRBd6kD3#uK1i8rW@w9_Hh(Ao&NHNKvJ@n^rX*a= zcH#--qK)bNmMd?b-yolTaQ^_A!mPS}f zpTUNlm}xTIPoE`M`d0MtKNgfvxxa7aqRE|ke`%n8_BiId{E2IOIfKH+z?M+bC#)~= z3uJCQ{4T>p(l3l8KU_|J(jt85Bm%x-Zt#GTGix|P$&r?sc~ME?(=7z_<%RbdY%!CWubSfXe7IB{CAnQc9m3DtCa>0@0r{yf3`92%Q)93K4Yw?5* ziVq*1=$KK{l@YOPwy>3=CvlmY0bvjJdyWSfY_tpVj?fz4^h<_+cYEISZFVcUu{6#6 zz}8gJI^>cWK-Nc*PTjePqpJS>be4 zorSLrX5V~jE@GPhepgZ*4Uf;HOZBh^e1oc#iugy&O$ieWu>0Vv;~~<^1=ULl5T6)K zR4X2|t7R%E&>t{w`!l8f5WUKD1v4FH4l06KW1HL>aL5~^WS!SLUO79Coq&A5`W~&l z0QpiOtbg3&%~T(g*y+x62>8qnv%0aKaR8juO?Me!t04Wusez{1kJgkV@`gbDl!NAFp(1Dn;_9-gi z7bj(xo^4;M7(weBm0ugqsxfLoR9<(4Ts?opBVOsa0a2~Q_KzC*n_Wk2B4 zx}WgPa3kg+NU2Bs8@Dt%P}|wN@09vB9#$tQxi(S;z$Mfq~nj z6QzWcF0_L+mD96j(R1v6lT0N$yG_7Y*}tKg4BqJSWrX+q;Dw+9U(x3kYtMmr3F-aiHd-9 zArOc(3nf4bNK2?HkSHxeAR&Y#cSX17c=oy9bN_$O`Ju2D$y#&GIp!Gec*i@|!z-7~ zw{G6MSxii9>-lqMu8N6mB8Z8t?cBH?IMXK2$^w3@fnGH~EmlU}_Z2w!4PtW1L`d`21M2LU zhUZZa#I0UjG{5%Z!SOta3(N-29}S%Cr%(U>`?m^+#`m~~soyXUIJbk^A`=iw+A!Nf^-~*?|X-nq{2$gw;N#j>@oohws{-Qc1 z<;B{0+|3X&-^z(D^Yls?p|Zt5>+mLVJ<&1gw8{6ue83<0K5HiiTU12nAGGiO{-_6^ zDQ*A`pV}WGtO0(W(*H4vXJ_kfiHu526Aw%rlaO|B*M#WsxWtdkp8NaRfYFzW&MO|a z^Pqi}lX-kMY9Da;7x>3z#lHSMzP+=bcZ)vPJQ1VnD0-|biR*yFF4Z5GRm@hY44IFK zyOQf%|NXhroW4(%#qXPgMPJLf_x-YBFQ8^A>Mlt;e5zvYMCZ3ZR;l!xPl!AJ<0(&J ze_Zyjkh1T~dOALAUbLaa+=9(g)JBsu&;NKzFR34wO(gIBZX_?e)m^?ny~Y0~Pwan_ zC)%d}vpkVo`QNq$oX7usp2&s%FJJ(Z@V|iZzi9jaKd5qikb%)__T#S3(_wt_G2YhK z&HFT0*F)dE93o=Yy9VHxu-Dwn6jT}-#D|1B0{h?z=KqbmjtoX)4ByP0_;wn~pMVgX z^RN>=X*q{{Q!*i7$rH*Ak>;hx3rIkAbBbI0Ul{YL?+~{&iM}dtP0CebWTfV+YUP5} zPVA(s>zWp5Xd+zgy#m6$Q}hJg59zXutygNyoj~=sUU4bbkBK99AeqR=QW>ALQ_>## zGus}dU$hXm#}XS%!I?cdIey5XzGI9K9(VFh*24nf z^EnCNVJAQR7iRsd%$qsnqGL@jPh`IE3%}Lu#F`t27qhn0H%LorH5^t=w9|KNrP$l5 z^m?;^Cye|Bj~;n9pBtbZYpe0t(xu+X;H!5i3b@G;$^Qn>Ax6A0O^3!i3k5tXcgkX! z_t{<}!^XG2T_sCHBDK8GznlbM^q^mmy`m29NZ)fU*yMpw%>UO#)dCGG3&Q=s`f#@` zbS6nA*g@V??!0FZRHjpT(IhLzTCu-se?jfrd;G289R;Pek?iz4eFy-xKi&6V;M~b6 z6VpFtIKSWv{!l-uRz8##PM3fLGm47zW&}YCBjcD&(9~jV(wH3?dETY(6%}JN7zx04 z^uJ+$QOqR;8YxncZc4O-V`|s%C}S&xxv4O(tjq#K6-<>IE0=+`=12m`Mexl-_SMVg ztv_-Xhwf-&4`R2uEOaEab#cz0`0;IPoSZK)O|x~&J}+XAr7X1lQ0u;%e`#j==TpS4 z7;X*@8qqyIbNPx*r;-xAsC-?=^i-qv#E3*^9hzB6HaD&tJ$}`t`_;^?_ZvfvfymeP z?<^si7RbxKA)iGXi*tN5OUW8fC6*xgOdckWY#PMgbMtn+L<=O-Sh=_jn0{cOzoZ?1 z*;ck6K0mJ(=$C#ubHmf?yCy!<3 za0hml6IAx5V)-BAR;o#(}$^ z();(eTEI$=jtPGd1U_S9nAMY$g??dkkx@)}iRL~JZ3#+sE ze!Ep@>FS*RoRiI=bp`4vFm+W>W5_Fg1#VUkBBuWN4ZX?s^Hcv=!9R*e%Ntb9XHy%? z<82uS8qXRzR2vQN-ws&Ju+w>4HNp4%tB=#bJS5$rQAbQu&N(OFiStRWzj;AQ^%!`# z7I*KOtGj#zAx)cn3_Ro?AI{2L;sVKK8G@bU+Oehr4ekT4JF;FJe=Dp`kDU8^aS9>; zWsfkra@ifC@faB7kdx-MY;$^9WhZ(C+c|>4HLTq zoxW#-Pcr}I%jQB|`9+$_7(WZNd~3<|{Tf&9mN@%baOPUGLLO;41{5-7#l-GTkGUeb z>zf6Q{eKUJUFTl4&t{5feovq7!t7j(zZHE@6+>;;wb~hnQQCSTa2V+0 zP3+#QiJKur_rh-Bj0Cw=2xT{Eh!|yOJ2opupmja;F@grCF=lEv4^gW}(0uE_vj$$hy&8 zjBeuC$-uwz#ZJmWoE@QG$b%}b^uHn{lVz69OnUaMJ0;zS5~W~f3{XiKv)vG!1T zx!!&CNHZfTV6vV>|Gju0`Zfp4`g;)VA?@v{2aD}wp^M!s^MEh&!%c4%{A1a%JCpkR9 zCuzoEwQPT6*Yx#Jr1=;#G)^y~`4U}wWZPAt5ipvL_kWWrUoVK`vtgIjBT`SGK>C{Ql1QT|Bzc~(ki9V6A%)yN`S0#vj0 zmW_*jQ`QI~Mqe5*T!pV!wYCxoNh^_JEl(AtDRz|b^9e=1lLpdTt^vk7aCwa7a-Mv(KTA49FR%t^8;P~UXAI#mf{J*>$6houDOldjx?!h z$oS>#HL@ISU!)%BcV)^KlTX)5@{7yyUt8=KdA=y8dEl@;LI=xk(?RzNC(KW*}Kw;UZbl*5{xrMFjP|-Sk z;#Qb*m_nFq7^bGG0#;esvJY;^uAP~ujn_Y)y`=$1M$9gA0H+oq0LF7l1t8CUl5U=6 zCS|KBCHCsThz(C-oBlCkHO2_#l(7R#uFA|d$o5S{sLXID;jXTThisj zeCIT=sfADx-{$cT$Bb|I`YC%NncZnqZdgbLoZ+dZpY&s?Z6a?WHQv7J+~u-&dUGfF zt3Z4m@~>&vutrlKHRyZ~T{fm(CIO4Z7yCq(C{`KtpW_}L>2cj#PZFOa%OV9%Va0aE0*`Q4YQiP|J7@aa zKd)~*GOHM#$6vs7!~XmFbFVL6F8WyF2q8|iw;BrTHDSXyh$jXPO83K;w7|Vb+@+$O z4T`?=oS$%``r&?Dap{9i<$+BjX|jRG>6M*vdY|r2Bm@P*GaJ?ZYJCcXj_=&*U-W-0 z&#=gE)_*{LXJEVjAWVFLdwve{NYh?XJZu|QU)`{brGZUt~bwH&}w3UVZV4=a; z%%)MJ{uw0e2#dtpka0=SeQ_k;i)q|g!(Elp$iO)1H_rt3oP&Nrj1w_)``p9o!e4Ng z`rZ@K>N0+fVPr8@yEU(HC|7fIg~A|7Qz9zZIr}0h*Rjl5jm+6HnSe&<+c$%77S27# zuEtbTNB^@Ps}MKPz=v~zsHJJ(1bVmn?ekkYwt!QPaP=OtAsHkfMk80p_(UnjVbHqK zP$%CkX|WKSf8+V@UdPePWbWO(WUIPVo#H=Bo^QAiW(-Q$Ey(#A73=mQ|-2D!$WgeU+8!`u`?>mqQk z{T|?#^6bI0`g+gw!@WBWKreHSRv*aLQEnDF>X`^&UaTnI^cEo1dBRm8cAPl6q4#K~ zZRrUO3kR>VDed?BLMYc+cVagb@_Y_DZsE=6-5~{*%ets6g?dP!Kuy6fxr^TXK7UhM zWf#t$<=BCt$N6%Gh1Yhu z2GNO8o29XstH|q3K0F3)dy&8N9lq*J$T(qXRB{?N5P!^7k9R0EvjmCaZ-jC_(!dM# zyBA{W+?(d!G4lH_c zo>n2Wr1O&d(pqCIvo?brG{t=Iq;=g}EFm=%_p z-PPvAd1pa80FStI{VBO~++B!#yE<8>Nza#tq*q|&V%y$OpO4T+3VP?X3gpbKW}NHR zU+NU#Gb(Q8;Ty+@4)Ye7snKdJG%fcODrHW-ChjW3%deZ6aJ#&tU+|UJ@y_9))sXoP zzfC1=wOlZFsyg_$IaPantD!bHhiiMcGD{6?q~#Wn|Bg{5u0lL&c^UYABmC1-hr*El z=gC@cHO3jL#R!aFWC(O_S78N6=`uW`GJLXnGWf~ldFI1Upml4+WPf#>Hn^x^p%J&u zq3~Ubzu(*E*=nDtYi-V%5K39dns7{V02!HCIn#@-c{87FWx+V| zOlbqlKIfflKlOt{H*5uqMs7V%$Yz%W$YVJCxjm(1AI{syD;ag`DJkU%(r`OHc!7>T zCf*I?TeEee#to_L$MPsMBc#}0pkITV=TQPJs)Jx=?NMaFXT+B#J5}jr(@w`VsLk0jW@P$5;YJ-j5ym?Qi1LUIC^q`<1y}~T9z(uY^-u;MUoPH>A1pF~-Q)SJ`bX;T^ z-t&SamUod+$Wx1Hzz#{US_xLQ+7a_r4tR^qo^EgVxg@npM&E7;;sy-WGF!SsYCBpX zG&3AIcZBAcMU5IU>fxoWi*_oUt2u&O2FFJD4&n_vCcC2%^ni)P(3VTwgz;D`s3bZYAR^mqYAnn&@)nrtn(;vkJRgE(#WG&<{O(H{R$%l3hwlW3v@{F$)!$rwzqP423z5Yt z+0B|f64Mz|T}(Xh4|3Fv_`;bA5)&b=A3F zds!(pUxunHB_rRY1)_%1vct6KOq8X718edn3i%kpQfaFolG|<>M}r5tZ!-9tWYWl}?ym8&4b z${gE+9dKz~oegZAsSCU+2oTCwkG&scd%86h=;y@p{bRH`LO=D>`1= z0DrXIArXr<>Z7+#I3`XHjuU+M_s2KNGXens%kSjAGaD3ynp!R0BV69;HL-bw`Rk^nP($MjODR zKMgZ41vjTi`)gVnlc*MGGS@ZMLNI~KpT*7HVIkgp$UAllP%6SU~*kU0B~ z^JB}Y>L8{Nxl4;;OpoOjb>%+V27;knk)*c&2o6nB#SNpgC~^M*te0~1L6(%o7@ z{5G~2z3l}3bL?YExsJwq$0xPC0QY>4Rc))9fY#MX4U(D-^+4p;5_3hNvtRApVlp1l6Qo=W$T28NSQR1an zn_2kRMKsQDbuViiOfx&`+k4}E9Dd<~`zXg4aoG>& zMRvZ9V~x=QXZpv{!XfTqL;eWdvOh{t&&poGk=)R2@?-J3d$oO2G>f4EaVH*jrq3=O zFBD8PRD#x3@sHn8J17)zriFgk8IT0u1=pk<@VY&1)6Pc&ifZh+1~w!|?V}H?>m&ft zo+V2JgYY2)A$WDQ0|$C#9@9{Ky={Ej?=fmoOA?)?;_6JMns*_E&zI4h5;v1Mw9r|Y zl*1MVo-Lcg-q2LNwp${&Mlv$AL1wJ+&!_2HO;ia3hm}hXUO=A1TuL~ELkzi$6}1Yo zddQo^KK@FB+W@y_S3Crqcm9}nwRwup9|duZz+NCs)h5++JX!GQ8$2;p0rtFT7O&px z#T|cov2R>r(lOD%>x<1a>7i8u79Vi6W{JM?>R$Gqw%;?{p@#j(H84@bGA&opeW#fXZ%Qp8dK*p*=Om4?;OFw+u8uP-8G!etfrC3B9IlYFluIWR*ca3 zH~m&9Von~pO;S+(obKJ!Iv(|1ikjr^B=);3wbNcajYmc`x7SUcy6P5dQa4vb`8rb? zcxmnoEAk^o&R>%x^|lHzv4p3{K&J{@EjCA1qSfy67YkO5-LQGP;fz+*mUr{xkAstR z%vs6hd!T(bl|iTc!r3#}$93-ByoaQe&n^+?!*YJ-H6*jS6`(N$(suBJwH54M6^v=X z^>AO|q-LANsO=aDG7r{O^KT-3dn<`kyN|xKxvGLSCOqtU-M`6fxhpzOZ?4u2X-DOd zM#C=;m6u=AIgea#k^P53d*Xi6NAL1=%6H6AN>Qut)8|3IS0cx8&7S}YUV0%P@)RLr7URfPE!6=pRF zZApCqUOBY0BU#t5DmS9uTRr{TON%4s3Hy+DBC_^JN(2k{1oa5PR0D-UgYzwfJKDT* zSY&wi$*HjPYYAR?ReK^g2d7?&jBJnr%_5!;jE9=Ch%?XV@shJuqZPZng(j&TIhmO} zaQ#zxOnE6Bb1mk87kq~4!egB=uoWZxBx^<1GAeWEY{b$BMvXr2Wp6!9b2F*OQg=>W zjctWAK3_MT-Ee1Mm4KqXad}e1jK(z`XLsLclmD!6zqr;6q*G%qNwZJzYp6CZ&Q?P> zIdoyd{gbW@K~zPA^j>sRB3NPnOA;1#l<0s1Ty;qvY02|qmhPD9dx^^!g10a} zMo$~>UJ_<#_6io-DGN_E@wYW_j8gr~!SLl!g#9bVR{KOjZp91a_^OBAy6bA<5KwTc z{@s;WyE)iZ&)}n^&!q>5xR+t_jYUq02J`c8Y{OneGNNwFW@VNGvF>$zy4?`FW?*Y7 z>cj9p!JFl+WR+eWyXLxFPwWC4pilsk??1gl@vaH7Ea&NJ4}F69zRZ!CRe2INysk17 zzkoAmjp4s06gGHF2T%5D!iw9bJ8Kj<%x+j84h5v-X#&<~XOGA*@~f>0n#Hk)4G&m% z?B?C_n7w7X4A3Z9!+g|G;21N6TR>_DfOrYa*v#9uz1GFeC;~Q+N=|+_8G|5B19j{( zMqn{Q$4^J#W)jdQH~CTNVCl^spBZ;0(+k=_Y}||Mvt5~Qabq4C-wEHK;h&=x>kI%v zD-*r12N7dS`yyXy#=bAJ)^}&tdni~I%^Ix7JSuKrCFyilqWj_akIEFPfrURluTgjf zKwUEjAyo!}{254V$;QanS4tM*5JZ2X0g~Q+NAfVNq$i>H2z#4HXG;5;oUi-_NwtrGvF60Z$?|lx@sjcqb&aCLPecr$EtT>ocO;1j z&R3RWx6aTm4pnE2YS84>w)ajT;Q2K~2rXJ|<#Z&xvi>kYm;z+S0omhR+;Esn>2&1PE2TMZLe zd1EUTlb#Y_oD-EuZTsfamSRSe2o8*}Uf9^J)ndGU*|X95ZS5!N zrk4sEK8p%{ch*V-M zeMSyn#VTf$RB+l;COh)cGx_r6tx{D*+u<9I%`USxA0=cj2CteYP}>51+P+@Bf7&}` zHI$G%HP8D7tPW^g97-gtce!TKKH-UNP(Wq1j;_JR;lUqzrsl!b18*KVUwQes%ly!~ zFM~geO@OyJY>{|J(t_c3zPJ+2RQ2`q)|joxHwGq`y`xBuu@4%z8oNH<<>;H0!GSoL z6Bim^-R6!k7soe!vQ9J@k6VIJ4UTQycSKnE_96fJ#j&X2H5=-U-y@BsR1;A<01Dhm zxV)f0ttxZv(L{r(yFA;fjla^Kkdb!d{o;7%xT%5`-K@#C)ykdO|5DCX%2S;{Yh&Iw z6jT$a=7!9QXyouSAX-q2Y6M$&wPVjN-Rhv$2DSP%a#z}-mVdC_%UTD(TK`_r}cT>ZG?l^8O?um8$DzjR5UKwXiM; z4p7lNadZP|QCE=Vhn>1e zY3#aX?y{&<`n>PbEx#nl_Yf*Cme~5!4?394b-JdklO#Sj>r>r8Un)nnZ%RG+_^=FX zTw%J63Ao}KW@9ozM73{=S0 zt7}JJjyzu3H8Gkru=YPh{BiZWY{ZLcZ%a!ZX+zyUr#{I2@P)|%;%5Td zD{kqf6HpdkNblhlGtxHzih+k#2dX`Bi9~N6{#%#)kHV3&s9A_AA8?ghv(kGEz`muIYv4FHQC;^y0QbTDj{<8{C7(;>9T$pCpts z+rV3oS=Q?l;;tX3sX8@W6ZmHhYvn~psE030O~k}B$YzCQWvE5$+wa)Y{^&#}>7|)9 z$+~=-4w@L*zQz7qOt`-%6}9eb)?5xcah6!1XxFOh(%@tSat)01Qq~Unq@vXj8D2HZ zxE(fZZ%AW94}$&L*<5@s(YYsL0Li@MtYas*R3ABw&+P;15Yj*0rO%-XCG?YZl5uJ` zy;zRz)dL;vwg=@py`KTF4|fhIte-x`<5JtQbPOnltd;rZkJgF4sNZIBLmeQmVB6|a zGdH7@V^xZfi$g2ijC;3$-S+;kqp=fld^d@(!Qr$H+r*&(tG)Vl zyj|R)nTmTS9V2-op-RLy;fQ5+F_*U6l;pq8Q1H!JS;4Td{58f9sMK~=pe?RZv?WDr zEDCZ^;#5z;QtEkR1yA_kYFtk}v)R~c4vY`WE=&%Zr5u5$zE09k&0s}X*l;$9ZTv^w zahT}Xhr4f02E0dt4Q>@S$eOAMTLhwAQL@=-_s(1FiYicq;AAMMe*nJETcG**u(p>; zmbu3)AW8@Y*(aAO!iIWtrh&*})%gb6YePxuu^UDo>nIdD0U5*!-n8mbxTJRW{YI zLS8BS%QLN4XTpWNCHG5E>J6=@XTq6q;CE|Wl#>id**>_}YK6NDgo;Z!S%#x^78Rg^ zYL@9q0BfEyQAdf&a6nMoDW7@7b&_0kVw!kKgRIr$XQ1nUS-zjH*RbB7znUEE%%CY!=72ooxV< zBfv29nG$w2~DbgnqkE2+T!dZw@<_57N z3I8eQm}k`?Aukz)755%!d7{@yzigv*%UQ$Q?RbnSyPP6N{lYe>JU4eTM^;t6*7Pc|dL?=pk#C zW7x)4hg&;`lVgdOtaDB6&x*q6tSi&U??o?%@z_YLSULLDYo=e)@7qYX!!lpw!+TTX{=*QVGwiU zNSC3Y7I`_GD>c!`sym=t5(2 z+|kKfRT9Hlaa@A=B8$+G^2$X5zF@VV`^9`|Je20A?G6OtHHSK)K_v=-8Ctt103Jog zp}YCDX}+aNfcLPn<(YP8*jGmM&F^)iGe5TVUaQ1uPR=PHv9a!~I-EV9 zqB1bSlbkB67d9JDAuD|*qlN+PUIx?oU6i_t6pGx)j3ukoL<&+rV zj)4~pJ9>=SAEYkKxSN#*V58G=pys_U18Z1q105#&W$<1X;t`QwKZ@`4)#j08t?M$= z)EKn7NE%`gA%sqlrZmHLjcaDJ6D$U2RB^H2yap&n&y=arl7DBmcq z_edVTL3j29r4tBB{nY2Z`I%FU^yEO3vX@T550P0J3c~9;F;C25<76}s;CIs9(K6b8 zX__p4*+4I8o#YqBBOBs@$_6-{kkrmKUQE=a)RZ!2#t)^cJ=P%Xa18577?_OH_88w- z2vf(U<>=zR(_!A^w@(AbP}ZxK+{COCY~#=XsrVxV+SM0om*jP31_zx>w#h_P<7;OT zRj}%*EJ}#MIU{m{-+7gT`^)X>6hNpb3)aOHbK&b<_K9_n2#iC}wV^-*p7G`DS{HE} z(bGF>?5UJXaV7RO{XF9J_)0$m?D==`TMXyM&mRi*I6}SSZ@0VfwnsB zx)Khy0T^OLeTf?O9{AH~clD;>v{Kv$jyqyfr4^8uYHHl&we>g*({|4EFKm50o7@0CaH&5$ApmfNXV$kHv3cUp}l(eArL# zYiUOf%j>v5#WBoUscuP5C4vtA!mY^y@p znuDW8mdzsi*^_Qes+YnBxxMfKeSc(zg3Ami{1ptsJM>V-|H(aXFWNNetBSU2TsmtIdj!49}TXrWLLk2lX> zIYi+EL(Fzf99GXxE`Es<)f0f~L1+Jsiac|`u(NgNOwxCybQ$pM%N9P8HF9zTtkG)F z`_0COZ89?lBqQz_=}!SL1Qh-$&bw``MjKj;($*&R1tY$yeG^>S)ZZXA5ap=DjzdgL0JUXGSOsH&(%mk|wHHl-68E+Kq)5L zJMRC`cg?T?R2p-HkPQegUzevD2gjgjodfIMuS)m;avB>5U1V+8oK3vARYu_}gB2RJ zZPf>h^#Q`ZT0sMUkI;OlQ0pBf2~cwuV|!*oa>vHVpz^kBp)M|s7&G{=C=C755l~d( zP^}2dXE>M7D5nvv=w&jjWJ*@8{ zb7;>ln?yKJDP~p6M;G(9%<}}0cKOwX>>S}0#>(C%k9-R0Ds)O*TIf70mJ9H7>rEHkZH8W6V`WnLWgs3|P+(}`PHexAL z*Y+?2-GI%}*(rAXmyl60*IGG?TUq^s$E8e89B7u>DOhDh!Vav^b>s1qQZn0*?ni6- zo-qmNQv6BX<9Fo#>C@CSt7Ku2Z!XFAbv`X$?sL(Sy)9~R z*bq4%yOo*9A!@G-0GSNn!-#$*PN6o9W6`2=t4OH#I#f&X^bF(7kew~yqK)_=!8LUV z&LyPsP~A3#;B!~+#%HVHx+psZA}T@nCMs*hB~#XVCTh6FsGyMNEv{@NA2vN-BfcxK z1Q6oU+WLHWgwD}qjf3nm$l!2^LmrUypKCIx3DpS-ST;)`T_&Ig+V3#u>%MKDt{}42 zQ;hXio^6`?FT<%&l#bxY@lLjN-5vc@==@WLhS=hI(mtO~X)+(nBX8NUU@RidL%|sb zfGBS$7`xMC$B~}uwqNI=!F6YTC?IXw5gc6(C{C;);2l^+7~~^w_sWAmGxtllSV15` z&EI4_Vo+si%Xjq+dPSm@6^g8HN}2tQ1ljQ7l@m+UwRwS3WiAfzUYyRrKn=#gA)i5c z-q+ciF7_1ANPcg;F1nT&8ya0D#WLn*FGr=?U-iUZ-N{ic}iHZO-sfiRdZ9s^) z$j4z}1F}XAI$o#z@XeubM;i;+4e`mb8vUHd=1BncvDpG1l2<*M(rTz67~3dF4H#ME zdvrelwDPR(r2*x!0)sg(;+UWxCsz9|?cvTziGvqY55w|5Bw%wBgOf=B_XkAWG2i_a zjB-+Fh1s<+>VR|R*UnKA>-F@b;4Dj8Ika8f63KH!4%#)@-D{c}!8sJxgWAixy8IY892Z(|Dx zWrt8*c~iy<%N8|vJTg$H=f3q?0fqs{>IBSPaEsvPUe#=cGf9! z(ilM=Hx$nAeS}#wHp!NKXZ-Giu9DOxTfiWS6(OB*gRAxO=`w5u=m7Ps* zl+hsUYN9Um^DtDq>=6qJd0rUksL8CNb-%iY^tH(Z$c|^^-~J;1izVDWF(c>cHbDyP zXV~>AgN`aorD2BzTgph3PM0oplDNbw^YXQ80AsKNE2TPHG4$L#sRBkBM8FWjj0R51 z)^$6t{^o0U#)M3^P`MBSW*+ zGsve$O&zCt!7H^BzspP|O?JkIPdhH*m+l(c>bJ@bxa~c8WgAgR*8c^?4+oZ>IDH{{ zwgUE)6&_OBnxgF9JeqVZ6JxdmP`vC}E&&w-q%JsaIOMUh!uP%2bY5Z-ATmT)bXT#W zE~LHa=~~;}tIHqW2IG+2e(sHXSw+$luhH4%WcCuEy=?brqEbq9bc=zrQmU^0jko$^ zr~F)@LoPjuk+WHArkqlXc`HWG35y#9M4kPJ&~}bcKVtXT4H!6NGPGpwu&1h9@hjY*o4SH)S~1R(rqK)3=vyi z>S*8R7N-zg78pioW**sMXRhKv!7LjI(C}MqOEYI~->0k1JP7 zNPK%+@7pN^QsjVAUCWrAlj91y;C@w^?RwhaJ%I4zD=$>96QwraWPvH8kd?xK%x_Sq z(|LSq(@~>Pi_v#y>z;28w#4!AOIS=FN5)Oa_uL?FU)P^TL0ra6yZ2m*51&0DGq;0x zwQ+1Aya<++k>_jIlrpDcGO1_^FqVdL($;Qh4=V+ug{}GXrrjijToL0ax{z1srTDcsGv@oJCj~Pt@YKkIl_g5_nW2+)Y zfp{F)=S+_N*=*SZ6Z4rm7&v`~hpoxnD13=)>=&eRoW+>N_B5*#M#2jkF?$*p578b% z8rdfh0T29fNuT7~9h<8skBvq0hE|B+iaet+YTJU3c4le1xE`ESlemiuc``TRsl0{; zQ2_3J9XMOiwFljfw(^JzPXz(m_?RwH?Xtr)G4mJZk{UI9UoijeH`*OnTphSpf53ALH_)w|x|opwV0h1?`FZ=)a6JIL842)H*_qJ|P}_4c5s}Z9x`=|x)KzZJ$IiZVb^iedXB+l5tNrOZrBuqPxH^J=#XaMpL%{qG`Yv(9op-QPWdzcWjWI!1f}NPe+iSQ> zslLB2Q0;&Yy%oog zzLl(ar9QlR6TY8G-gBow&6Y8_V*xRrTG#qgdyUxquk~kuWf+_A6u>ZPfDBn*yBzRg zdOJ1;8#LWYuwZp=$N>bivhUw(^M!_CKs#*j_w^C!?)44w0ipbp8IlGFDflfkU<MSwNHo4}V55wutV;AuiK=7!{{cH8B<%(bG9&voyBzm?c z_1WzTSo;S$Z_lw)_andq&P#avd%#k#75FcW4Vrga61J^fh;{vfbp{qFrV%^PXV(d= zPE8-U^%yvArdbKJeB0O>K$w0L>5hSY3?0)ZWbl<7JQs`z&^`f_#AYhA-e@<%FaQJi zO#NWvPr@jYo9$bD{m(H_&=Z4e%_|07dR8`M3WJYe2Ig!aa2vWQ=TG z2%jQ~cAAKc674hz=W~ANn=4yhQROoyQEyurngxKs7+ya(Ox-5G_on7^wIQ}%-Yd>b zf8iXavy5u+)SHoc_=Tcw#QgD4@Jm3s5k2!(L)5Af8eo)Z)L7&96(ebcZ_=+zKTVm= zIBzMUbl|J^ovH92b7aqN!``_ce;EyJ$c-1vaAEY!SJ*qE zp6Pwwh`?PeiOQlh0;P7^zK}#??fcORDcQFs06`kWXKte-v+qN3sz|MC+>`_~hHX2d zNHI!%@~oOm>MDsE&P8{KQHF2cG1kqOP*2Pb2Txu=1c>6d3;Zk8^e$3?S@qah8u_DY zUB+n^pBhecz$<>2y91r1o@HN*MhuN2TxNq_k+z8IA;KPHx@_UeegitrD;c|f2lNS^ zoh6Ojub!qNcZCdVT%EcKxS*$del2+(A@u1+e)6E13ZCTs-Ru=Ud=g)0;alo-QgCA_ z(z)nrqJ)}j@(D@emx3ICua%k*bxHs$5iSvonF~ww`gXPJ zRNY3>ZF~l>25JOvZBqcnY~fh!$ZIHz1Gph-l;gQ0gvpxOWK$B*+>*A&TB?0Vit?_B zMcV!{_>Fwv*`2EuIX))hc%uNBL-230fcT}k%{6HgS36X&8OtN(_TrcJ@wzqm|8E64+O!gfH@b5hM}3WyoQ7%CBKGU+*>|krT>z;0ZdmI zb*H&c1#Eo}*sj|%wf%fJoouj#uaJv)Ide=g98>`CDfZ7MH~E7fpzfENlR$!J6jD)% zxSsFlGqfZ~WK1ovBzYI(xy{OpG)?|0qtcWTIUl<+yvNU^Q&-4E8(Xs~(Cue;xHFH` za7I@IF}rfN59%4gb!rTI;{bS966EE_y_PB0lb_i^Ui*j<5Ifs(J#olgQf4l_Y1IrP z0s6u3b5?)b9BeOli<36!ex)<3%n7JHm-W%_KUZX=0@N&my(rLn3&J&XQOX<}Mh#D3 zc86&Cp3xyCpe;Ol3%s1*KTAE;r%mRlNi{E_rVYm^!uffi(L)plLIdOxYe7n$q*Cia zsiZ;ubprC8B7Gb@`Vc15+G1QMj%b zy8f?(U+{73vQS2VO4l^SIGgQ9>s5(_Wq?^HSUsz_x4}Qm2na&7VDP!B^!__ z5_apYvjGF1v9^)oxI{Mup4m3)+5|q?6UY`SvMXa!sw<{OZ$##o)Kp%+S+TLwG4RsdH zbdb5soD46=7y3!mk}I<52&=@4p}lv;NiV`lh!CItD~Uaj=%+vC**=x+7f zbsNT5Ke!BE%Q(*B@Y>SH<+HsmGj;H6*2%LuGTN3vGtH@>U(-|oY5yPg-ZQMptZN&N zI*el<3t$6OR8*RPf^^FO3Me)}x)K2q0zw3Y5U>}JF1<+;qzZvxq*>@KNDE1TSb$Ix zP)aBv@UDv)%go%*{XXBH@7Mdw1A%Z|Ywx}GD(88w9OT{!AZdZ0m;9~35^+=I8FwxR zWbt#yw3QhLHuSjeFDknpXFnhqKN7Cl;n=jx`EHgV&`$HdiDy9RXo#W*=?2p1%;+zm z^dzgGrB~Zw(Y{U(5{um`_o0tun<&6b!Jf?`2EU23h4x@dI7B*xD?w_Mlzn`p71B?H zh!;k^$RouB`wJIYDrLNPHSc7sa1#( zgmahwK)q>0^IIhR&gyz-kBntlvyBzElRbyAGo3p?s6u(-amnW=@`Z8Wguw^i$7x1y z`ov{~|7nUEu{!_7!IL(2knsQ#PT4-U(=FbsR7(0l|Eo}Z@k^d=DN}XEsr+=SS&vec z7lU3D-d7@iMHmcXCPdtw!_UemX}tWjoci3x{?>))p|h2@JIP+V->DuuA|-_&Og*X+ z;y&-Kw)MhK?3c57H1GYv!GPst#|+`;Bdt0UIHn^m(31(wUW47CatbAfbW|Nshjaha zM)4<^#70-WBF@`Z%ntP`w~o0y*FNI#HrObys=d}Pq$p$I-k)^#1zT=(i3V?ntyF#5 zh>^#I*BjL59TaaH;F^BSc!}6T&iY`qMu4fMtG&XK2PGxws8 zNR7+s{bexYK%B#@r;CFw^zemTyT#!#cxQn&b%9* z2a~qh6fUcIvvdgSd$I1u_S~|%e3I0LocIV(*i{_F#31rYZ#~ zA!uzmAGoFDWsIBt>Kb5N7?KaJfUK%@xKuEhp2|7Tl&9}a8hIjWA3JW#Ey;`(M_u{p zZu3_;3ug+^p2-Pr(&9G6W{@RoZ@%8LvqQ`L>i4~3V!vH^`SI>g2+=ys1Tv(CXfg0S z9a`a6nbB>IUg-Mz7j{wRM!9bzN+#O_YQjtEu2xT`I)+Ir07+~DU7G|aNoH=|FE@-a zZys-Q_s;6zSP$zj7xCGkL7R-Q5IL)_ZFdg?-Yz0a#F9A`mT8I>7M7vJP|)d$UxsB0 zpktIf`{5JtHQO*w!}mEws;p1je?DluNTX)}yX~Ciy!tqpyBM|kXO0UvuNf%M+ti6d zi-^_Lo*o9^YXq%Bu*EoJ^HtKt1xHJ;j1<7$X0uZB3#0yT{NZ*}2h9h2O zZa_;qvFbyd+-eS2KKZxh zPwl?R)xy_}pw^;6L!vJ@mr&_ExvyZ6Z$2Q*~4gP>0d~Nam4CkY$#XPjUxh>al0j8qr4j zU^7)!+(XuT=@A?#9XRMCu8pPq#gf`AO)O4nz!23}zdG;VB(0&ExCOPj;zt_Z7`Dc; z7vtD6d4@kh&{a^fnJKVqJohmoj*(L02EMAPYrIaY zTZnL+J^(8J%ddTp9b1#7F+dg@Jp6zC0Fxxi`U@#-N?7%Usf)?~U0DUfWZ<=O{54i_Xd!M~f{s`s4 zpGoK692j!dk=$2CsGC2V?gjcva${Cv`i7q2$Zjo8f4H8T^6^*`{q1}t;Dx8l9Xt4H z+a!S3s>8gD@~lZph18?o-sMx9mZ2KHCsxn5*_@2ovs5@j$gP!WsL?OF%`m%_DPYwu z0qzR`9C!y+@B%|Q6z2V>+X_o%1$#hX#kQ4Qz%YAP!26IQOjyn{e`PqjJN6L(~l4SG~woxWS zUueyA%74&zaWIF?f>;M+RIH^Ex7lDu1I(>Ib4=qvk0VB(9n}tm-m_h^MpxtAm5~aPr=|w%+kTBXEE1RP!*vp!1 zi)Lhs=%h^+r{-!Xa`AJO3WHUjkg!~iTwNJA-4YLBIrO6-q>?H={oQPL@PAx|3{%yn z)XU*Ql1FQ1N}rz==e!M514^D7dgOJXjR6`h)@jxodRBXHn-@B1Zt{v&Y|_omk`}Q9 z*|V;Cqu`N5>I#rQToD&-tZdEQ*xy*iw;(V0qeu9s1z&nsu8?hzw&?dtQ!4ak)x>%4$qJTb8D;xkZ};4(BI5hzSjA}+R>nZ zes0=rw-TLGJGS{L&7YJH68d%LNbcD{9iW+&qWyz}+!qoOWQ*MsFzf#EE8ZakH)P^B zJ!IiGu-;W`N*C^2-bTyZfIaH{+t85b2nj@d_HPMzlLEH^ZX^`qs?i#Vt2#Y0V2RYK zwEw&^7wq=xOrTs*m&#dAPq>id#)QU+BVe>b@P^yKa*qM2_$9w0&xONstGeP6#uJ{N zhAe)Oo4my13G>`A6lfQ@9ke~6uTwe z;_yO%0oAP9(}Bvsr3G{1))d2O)qpn?I@F_0O|m$3HMi;%P_Nz;PXu*Y>CBt%pWE)@ zDp!JN(Dl#CLXE{m3U&V8w}Mmwq{yw-nOkw0r2+d+OYQ7yG%y(;pjSv{P&4jZwbqUSoeDc*A9Ul;3!p0_-n*ZjI>1ic#_;Den!SwE{psV#SYBs3Ghw`JD(>s_rjlKy#! zPX$!p4o8hbMSZX{J#kz3#=H;wPzjNij(Y|%yovLQ8W5(q16pV4ufKVDATFGA)D{jG z6cyEk&E>U6R!P{BzizrQAY$-@Z7oB_i|70Cd-OYK>abG!+X z#trsCIG(QEM*lvZjh>bB>ep9Cfh@%UVwpYFn2k`~-VN-dx!Ia%MBs{mLS+FT6`4VR z$jgT!(ZrtWU25Y!bFfSBG&MA>b05?VRb|I^xj3v9$THuJYlt7Hvp8pC06$w^5E2)b z#T)CjS#!=tp~WTSRn7ZpD@0imyXfiiZ_!vfe@|vpQT+ACc-XHLD9Ng zSCGf0mIkWKq^VKvE!>1kCdS4k#^B-!jm39Tf3U@vYVUGY(pJC9t=0o?_PWp@LNcy)o|aEhD}_`NAA zZSl9P2Eq|6IrL7z@3=EFB_nN<=UgW{Ma4cw28lvtfirkU<^WpqWHn06G-J+fQ3iK);mGIz1Q=Y{fZ zimYp>iW{_t}X`x*}v)sa5k`?dBSLn_b?(Dn+4Mu{;_jP8U>CYbw1&%VqKukM{23O zYqk=TQk4n5)ibFoD9`1B8n^z<;Os_@(2d+pMVIfpC$O20?OO3(SydmMFVDF%)^pPc zi=MK+Z5>3*Ky=2kZxD=3^Qx7~PqReYrUbM*rRcKOqUmWetSnNdQvNtysb+8?dQ)O8 zA8C4wKKEh92=9;v{4bj4bRD!iAX4#~IY&*fad>qi-j_mEzJ5=@hh_GA0o9L_3sB&T zH@7~qG01GPp|rsL$7M)wS=z%QOi*RIyoy-$a&DaoEDvPp10u@R zrgdc41yR@BJ!1uI^ab`*V|MU9{vJfnnAN7P?ii${cQz2$HF}ehsH$+%-5^%T^78{X z>W4GWktQI|xZ!P^j+J=Xldp|8P=Vh)G%%lC=osh4L6&vaG_TF<_3=&fq&^$?I=mL6k{x54j_+K;8rl7jw|7c;%P+7nf!_BS6M5*1hk= zyKxfai`!R0RqBSv0Y1`cF%|hUaQo@S(e#jdTh+;!A;aA*d~g;xi5RPCqZ zy+GTrZ5nm69ppy;R(=NCPKMgdOReW2GRQ8jp$HDtUu*KWefOyy%I=t%YKOtueXyZl zfm~4VvP+L92oF~CmxDo;B;|H|L8GZr&`1@9`Mr?yvlwEpRlnNBDZ zHUq!WX6(zm8G8$v;o2*{+h_|0G)i#$h3;X8muj~ut2%miT(t;%sxwJ zPxS!d*l8$TWk4LhS;d67k_@Uv^_fIUYuF{NdQ(gDnBp|fZr(k0BqMxU$I|IFNSoQzV3RPH&x!^Eu+qI$($J(P24uAgfo17LGdMf zYd-$+1OPegrJ|=^>eBbW6Fg;5*kTWe5NfTuTo~DR6uU#(??vnLPjy11d#jr=LpuKKfO0_ctnN@6}ha(U+nzJx^7} zQltm42chY5Phc|!($aXLcjYgl1tJx(<0PZIVOI+x6F|iij?L8@&@p_0$=SLOH%cyz z(@0*ZAC|*dn;$N-Mz45>Ntie&gJ*{ikoM+^_6P3iEG`0xrgbK zEoZtPghjl`8}s%=vT2!yBG z9>Bp!zjfR6rJw(eMhS}&15Te{5%0+4V|}Kb{U#QOyVbORJ&N>zkxCr`S_Orz+OXb6 zK+M!X3MjJ#sPz2gHq>j7jQKXs2hq+&s+CPQr$1XcU-(rj;vzzRc@^+GouiuKO38nu zGcnPr1IQS6QtR&Ls=v#}EPDFAELzK_C|)Vlnh)?~nSfM6z$w+)FXPH2 zl`l6-@iue^@h!hVlsz$BnoZLDb!kr=Zk%eA4=8uF#mGe&y4+cwD3Q(<^Brpp*?wyo z>D^iQFF+!};t=;IHDvop1iV>YKrtV+biX`t75}Vnm1EPqyYBXs+EDR{$cY00xh&Z_ zCsDkF$1IX7qi);Mlh%`dAZg5h4fS1F3w;ySF}<*qioS1x>SE-Zea!YjeAU zx1S-K_Zb?lk(<<_g~P0)AMSsMCb*-sx%d|8*~m&ljk?EkkbB?rbK&sYYQ-;Is9Zy^34Wz22lDL zam8#qZhUc#NgzR~TSiusWwr;CJ0cV?oG?u84bc<1;_5w6Ofy?c=q@17skB-z`oUjn zTj9J^c#qiG)2D2?W9>`T(?*m7Mm)v#eyp#Z3ZEoE|KZKV3L44z^`e>C;zpHozy4aO z7OWQ?&j%MYKxPTTTW*Uvw*9 zTVp1~CU>jP8_h>;Y*aNg-xHu4n@(o2(vpI6L=IF(sPj(1cUWe^c=QT1t==Qb2ne;R z$B?^gwwTx{yI1FaU*(nwq*$~N9|CF8f5`}^Y`;FD>MtXG%KS3ODqegh{Y83n?{|Nf z3!)+mc|VKk0TR`+37a=5*cg2wFF_r4{YT(q2*q%yrep0c=-EBhbZXGu3PdAf_&>i7G49JT zwj7TBdK(0L6tIY5V0>W`y<5)YDVN+|RIi8`iNve3g$S(AqL?G^#Xv0W;zThhn-?a4 z3WRt_?H2BIvws%?WUFoeiY^Oqh#R$M4?A;gf_c-IC*5lMmF?V)lDI+P^23!i zK`Mg%YgEt6`vk6#Zk-IOpfeomCn$AKU*A>Zgx)rNWdQMPN0oq$BGIk*U4-|%3DDL8 z&)R_B2dE9}(Eb3G;91BI*R}+BU9RUh+$dN2E_Q5wOoV6GQXeb6WGBa|88?#Uoh6N1 zntN#xWy-?iL_D23JDqpya{p;Gr}Nyrv4zOgVfrWBUDj(+-VC%(Y!Ux-ost=JIbruyYX3m~mfEl-7xHCBx>mZh8NuW&|U38PofjJ=;HqY+_X z4|CXdE&Td1k3*4MGeEgviQG)h-N9ZEi+Ilb<%h|$P6r~DnN7+Dr_zq(@;wR?^&9B! zil93x^u1Z?=gy%fKiTh*?|J6%=!SB6>dtPBYgSINrLaSo5a z0MRgv>wpe31%f|LNBlC5Q>FSmNk(Mmbenss zk6D%r)cd@|Q4LIX?dp|rnd1iV9$%r8>~rtgXO^ptas!XO6*0Q%ol12+e|&CQ5AUkQ zn0@A%yN=6cf4K8bWEYoMxT&SAY~=*Zh8Q9&tivlMR@qYfK%`6VMk_tQ@h@=e+OZ^>o=o={I+{*79IZj z&-UfYKLHrJ<2nF)tLs*Lp6<>~@;*)8>zfYFM|J5hD%r_3(}RQu8O$gAe2QHP9$WXe z)J8h#Sf^FR_Tlwd=-tx3+_W~$if*c&vKT52S`o-B!kxDF0I#DXo%MtnPazZHbfkh- zi86lbzi1z&Sms?yYldU>uKldPsag;(b^~j6!`(;l5G@4 zU}?u=!Y*Yh25C!K2JnHLH52di*SdgGGxt3MV~fC?>cC5dO+uVDlTogqF)WZ3}Rv zzdGtM6VP;sW>(smwm#Xjlp$xJDuOz^^=DrPRX6%{1@>~31%fNc3I;>ZpYr`8A`>0x zCp=T)p=NZRw9STE_0&Kn+Krn5Uqh8>1nIbxkET#Mi4zIV^+vOajy+nJ4(_Q6cy$u~ z!VkMch&7ROm9fgbq^IQYy!X6KeWp>K(>=6EKx9fs&@&`h48qgue{YbgZpiMl@Hmau z*-KOGLma1-NR81SEap3b>p3Ozq!pbf#CF9in*5BNuuV@&$P&o+;y4F$P8m9%5}W?r?PLh_ARQipbAwW(L2SKwacwa>g4u>$ zjJo@e4lHCY4xCf=0C^2p)fZi1AgNE@g^(LE5&Gf#^I{`-Pmu{zxDc4G^w!_pMD}be zEEEcNEjAKT9u0s6HSs-%p2yGwU z$i7G!uZ`f5JWQ&u!@OY`)4zIyO8}cEjx10tbJ35$f$f5iXmSJ0tQo-~xfW>KY#Y!ZfKD z!j6opc*GDi1lX>a?#z5{xD@^iG+FEs^&4GBuk7g7ld75`WgZ{Z``Y+dTy`dnC5TNL zk{A6*l7OR28TME1`9(YmO10`a5^>m^P@PD`B~}0`2RiYedZHPy!E^S(*0akFTJrpY zeWbQ>yoVP@j`|`wSDl;xx!t}f%l>>bQnhkQ1p;wn-mdh0)(?p453Vvl`~K&6ea-v|E(sSO{LUms@Iob*IZtAfy=YjI7R`iOm zFs{lnE?Vv==X9efulC$v?vk6n-_BGzeCF*HJv+t2?yc~nUJ>QRBmPC$1i3KLiUsYOUvUPo)I@iy5|0_`ma?(RitH)?6)I-R`K3lntZ^2#s1Q<=q)YE0jqHNK9U`_*e$*J zsbY0OJ|o-E|>MroU`iKi6?0V(bbesS{8jGZ*iBB6&=p*b-5J_o#P26IfRza6%-C zYQbrJ*})J~AHn8%=;qF%VL}YhJ?sW{rQ~5`y8yv^6 zAa2+2*nRZGrVF1;y-U1i6JX~TdK9S1{Q(({>wH7K#av>1cn-ijN=ERP;>R>?ns@S4 znltx(wD?1rxs`Xpj5XqIQYvZEsHCIbjp&vT>_6tPav5tNQh<;9^Tr)~QSlUJss+oD z!xmvI7}d(lcrfd z%p!TiM(b(jqPWz97;_0N^#j`t-5ssy%-MnWW9_)h**Wa&P3A>}3+*F9IA*R*RfB?h zk!6~abbx{|wZ)V(dwp#16U)1aXn}Jg+gc}q8lsH7H~S0CtbMdx?d{YgrKpT~fLJ~l zt%Eyx&SvJu$TIxZ9J|L0mlq!OcX2x9JIH+O1Tt$UZKflPh}J2jOQUku^wQL{s{Wz(MsxECZC?IR=Q>)KL(F{M;5SeR zL-(W`*KmE~jhvzSkyysG<%nE=zG**WUV3|FwH)C=l=`aDPW#y}QiC#!4-0%FV1sr} zCVujuwMTK;ZzdOZpL*=g49j@u!@XCT+7zU}X@zruDuDEkLbttR6SO7f_3mJ&K5mKA zOM~50pgd`+m=?^;E*k%=hn?riKHaG}ah`XGaHD_wrnA9_)}2$|nM&B|57ajjvfbx} z8X|8k@tbZ<9slfq>rY=9x1gR?fc-TB8O(uTXbPOEFtW#l%izq<1H2rDW~{#Aq4+lJ zzP-Cm>J}}Bi*)e^?ZXk{VM(~>^sqPC3VsKe6A{L^Uj}{Js6M84?b#WsnoecPnoKXA z`i-*|3)3ocDD9xpaaOmbLwTM`ek{okO-R>#dV{E)8;5K9L8#VL4d>xa2xXOxxzFce z7>uH8f$mkgemn_B*Q4GOn0)?3G`IIEI7-#Mo{&U0VqJo!3N*aD9) zd!aQ)^GtslzX{b8`V-JaR@rlE!ureYN7lWxHs@c5CCmtk22oREKp=8gBM#%wvNyJ8 z*3E^>Xi7z$E#k(=OAFu(MNIN*qwZydN0~eu-;cyhE^%v2oC$6-b>~bs7hb-vc=3zX zV6mph{CaNGIZcMGM}v^9L5?`bQ78xGrd*fqGqImNF|xk(^P@Lv>0fVc;$3ya*G8St zv7ro`rZk907pfHdDgv(b8J)G=x+cord#}XKC4##9L1v(QR`#vX+#r&!?5F5?Ro25P zm6mS2phk{q;}|vfRpbrnFr#uYj0|^K`;WX_MMn+A9b_nG7g9?U5i?5i9anYRw zB3Uzc*I^t*hpTFQ9fT;XhNIex3>vwJ;6>FEl>0Xhni+tqsADR98`0*tlc$O=E6#hO zyJxL=9ml9u!=ZcSXmgK2@?j>o^5_vMGl(1{aKh-yYIXa=qiU7186Y=eNP=!kk{F(NYO_2o|TN{eS5%A`_|U;YXoh#>41 zQ9UPO!mqkj6H=Zd`cZRJ3?MHKX|>KpN(PoNLD$k1Rhu;;PL)3IR$rWwu|hoCn1Jnk zujec2eifwEe%AKWx9?`>SZ8#LMS`B?3z%96fdbKYAcHJyj`RF*N;+ za}YyMoCPV6%&tc)ige7YH!>IyG$r(oRYflh)}%+U)JiF1@ddI$l9Y~hog%-+IC`~= zDu+m_>$B3C)>U^k-P>#^C@_5 zz5aX};?n0_i71}elC>w8Y&MtP9W=UR)k|d) zEkwYCHe_G-A~v8VkI#}@()E~sVc(J>zsON$HHZ*h&pU&HR3!F-BJW}`O4Aihon5Q zvW_C5e{2>0o2#hJoj74eohLFcWJuLNGjzW8J5dnm0~Vi5+{e8QT+7X-|ywve%dy4Uax|AvfS0 zo{Squ@6)cv*@@I;G^}(Qe=pl9N;G}iurl|(CTKi*3HGfT&9E525`3TlS~%8N37~6> z-}e$b+`qh;nX_EszF|CmE}1N`gxjwP`0d^bMn$m$41(-S&v-mNpf`aYp9n8IKs1$i zPL{#3Qvgl1WR0$H0X6OM#!L0Uz^YnTI~O<6P&SuBmUw$cYm=PZ?0`OLv?t_i*_!4K z^(9l!aBg_(G(QD-AW%^sY1yAJh(^dID3n=S*BzcI+O?`_>MHaTlmo-L6FK&tCG;rHy)we8uIH)j<=GqP)k??X1>faX^@(4j zo+aZaH;sJ_7#cf@AHh|w6prakkg}`vKiO@FF&73&pUjgACx`0~DX^rAqSf$^fnJ6N zOebDlW9pM&$Y^bIiu3QAWG~&8c_5rl7T?0%sKtFq6IS(+uFEou?%X@viQRJUa&DPc zc5z-r(QWGDwhF;5WA8);x&94dsrHdC9zw;SqqIM<=S&i<6b*WDIV9uv2}ANe;jBD25rQBV1)NJ~V7OUmukEi;82Mj6&Cw*?YfY%vY+?zWO&V}OeMjbwElE!iE%rYv}vhktY8a&$&$HW4NXd&IXZyX_<8gSGWrrF^X<3; z7JqUM=B{xWVktfjU60+Tdp!ft1t~Egz5#TMp0!ViSH83t4s!Rcz}YC^n35*L8|5@l z?bI^JJTB!yr_>3&F6CkH>J67gWQy7hYH&p@r-rz_(jkc*bVjzL$uGD!47E?K-S@J_ zzRq$Aks%Qkp`QkGdo!~N<*loP6jYvB%)X5n@lqJJA*J0m)hgr0$`p1-d1|)gUAuHv zKm}&8jU_hNC6m46d2(ETw|=_8J)SQ0XtSByCOIpg;oaZx`?zg0Iqe)`PWOom9K%U;uy|RJ(+n18bQ`)I&Oj+CJ zA!c)?vID+Pu7>=g%T>-gKouI@Wvoe~pk0~8_G_ENsP;jv%+)qIC1IVmUxIal<8L{_ zgt&O?T<^v_=F2YEEpzsE?IUF{goE_Xp@eaZBU8?C%D2Kvnm}s-Tvz^pI-*o)Q`jme|n4S)-s|>5Qzf@tqtw6AatzlX>$6(ADoK3T> z6g}r_IWm4(`n2~%Moq;aDEnxyD;kM2-2)iN$G{rfY_ji|o$Vh-vPHS7o#u=416=Y` zPU9xGnV|OJ9u;J~C<8&TE3ed&4ohI*~baW0zDqJq)Zn1Jg$>7 zSwW=lR@Y&>v-5nB+NbkUCGN;(_o|~7oPrTgDNb%H2bcbtG|QMDpo_+zl$Ys4RD#Fi z^?Y*KFlKBzw=9}%_K_8T1m;1`={^`tM0TqWOQ_uw5twzc)*TD$d)elhDq&RIH z`tuw_#2CMa{FPt7tvHWrXzIVc6~;J=0dwhQc0Wuz3uNr{njVIJG)ZRb*Vi|wz80WU zsRb_PXZ2C!bH2NnySKOAw0?@r7DUGG-ORo+RQI6Taa~!b>J1{u5)chnAIzX+hm=fN zQKB`|35!sdew3yDxjajnRwAD6rpA%gs3v~=++)FBg-71VY)Yh@G3Y6RV{n|WDLT9v zZG&m2hY$S#*#7e%q7E#9je@#(CU6Pry2gkKv2b?xvbwvZx&{dK?c|k?GV}!KW^$b> z)1!8>G7^T9u?fRV{x+Bm8TEKr_phu4*>w$v;i*s;7F@A@&axFfg(q(GNg3(mSc3_Z zesTzfYPx!_bi_R6`GLtwQC!qyTvL@NuXEADh(KT5DLJ&YbjY%g$Q0|bTOl*qg?gyN zDnBa2%Mr6$0w8fgnFIp9D^S*fvAGK?oQ{Nhqx9sL5@w|KdZWBr7>TJlcfu(*MVYO# z2<7ynswIwm%tL@;E!5#*p5oI@3loE(RD^n(k>_?J8fMeYYCd;mbq(cXf$#j2tSs`Y ze_vQ=SZA4Kkbi**pMZ9Pl!5J3w`bb#Y}fckg)&aR0UK1Kf0_>bAO1Kf3y2#I$}NJi zn5U7&Z~@t4eXor3(vzzr&5G8?xtjldbT{%0Kb=_E5`ZC3(V2*Zxq}|an|z5OVNbu_ zK{|mjzb8yls0!fReFJeKzx{oSL*RT^?(T*j0m)_?7^*vn43|9oiJ^BYm?72MIL;^D z;N|w%04jEW8>)_c#7{0Ab>P@>kZV-I?}ji;dPFYqrET%FlvE0 z;QBXu*nimCK;kIM0xdGsc_`YVQ1aJb$0jvE*`c@aB$jZsK?w_be)W@5Ta$&wQBP(5 z>nQ<~Y?2%QZj#dse}%{UDJA<`>My)J>os;jO0RU*n0+QadiIWT<)Xuz{_FOD6*~8< z|N9DUi-zex9f*MBfTGu}NYWmMOAhaS4i=DLB&;>3gl9FD>s|I#cnRvM`~U43YeIf3 z+Xp6y&d4*$uAF8|IohbnQBB|DUXglJjQVa}!s-^vjOl_ZQ3H& z5(c5k!{NNtIILEI3~eM-NhZ|`HhP``P1!x57X1w#{@>0&IF#%EyJH3`Kk#4QDL`$X z`dt-g27X2tW@f4)z5q^4><{3`V)*)I#9t99OpilFeax^p2r=FCktd>hDwpa7OP_u} zP4KhLKT`02pPfjxu}x?WwJL(pDBD*n?nmOeqJ_NT4U5fd@0YIUqe)88>%!t1*eI%P~>Bz@z|LKbJ0g(z9 zV8~G^{zbnpK0K7T?ZH0hP*2PIZ+|yl^-tU61NR<&|H`j_RVa}3ArVkJLVSwqwmDmN zDWcM#IiD7#8Ty%)B(&tT{y&$d>Cda~>82k~a9sRpM@Nx@e!|1x3DbPcS`j`HB?T{E zfC=<0d!|8OjXEu~q3852cae;%W!-AK3q-l(%uEes_Mw}==lS~_&U@_|N2G+g# zVP*oJ)a;w;&e0>;c6Dd>3`NL>*Nxr{HbwvS)JBPzrKmKR%$T;^#G!7T$!M-cMnh9_ zXm38|fBJkw)=(w}Zn`G@AFJ|8KK{wqQ_e=ivTcgAt>qej+Vt`X!{XU{oeNfv_Q~Lc3QY;U(ft7j#;#0zEMN4!P|5}?SmCbfW8LlhrB<&S%uGN zSXj{5e<-cwkIOplw6dk?KL6TJ(5t>-x)2e(0M*Wg37kPP81 zz#h2#h$w`5ssy`qdGoNaR_@a3`~T~Y+hU^K(s$6>vKGpHrKExU)x__Vrw#xG?NAB*i?eb!4^vhvg~4+m!!(ZvKa5 z5hpgGb*^P!k=s(CUz%5SN-<;l#u{_@AQTD?!}(j{Pdsk2kjUi2PwH<8E`al@0{g(u z9D>vNSFY%S$?iC+*PdhhtD*f@A7_KHufoOt$P2+@1HZ^Pw-wvL`-~{fq+9Lgk|nq! z1(@gr-i3nC1-9zub#c-1N$*W_a!+!n^F<2flY;WLI)Lue zCsVO3TsWtcIw|j%S1);#%WPF>IAjenWt5ky9M1xhMzflZt?j&GLIbwMrs0q}3YA{| zqy3%WqF=DhCZJBZ=%8!ON=4cPdfvTAb3?1Ct?Q9&-h?(Ti)L{SqiK{M?;9o?d#s&& z#mFD#nQH#cZux>e%|5DZ3(E0q^i_4?TBC+4+?QARBck7YV(-Uu@>ex$(l#}pB~lX1 z2l)e!YMZca&y)66=DO;Nb}JXHi66=Fur0S^pL;yC6&Dk0VL=&cN+bV{Lh1MZD9onU z>#xk4&n$VGh@ta$VBIQ8y=bm}efu^K3^J7KQ6q|-g z)1%#X7700|A(#H+I3dhv{g<|S{1TM$gTKl+h*!dmC1CU)hK$XLbyW8sruC*}f+}stC1yvThNo?MDG_h-Nx)ArPx--S+HreIm(6Qz#(v z@7uM3pK$H`d85>odBvx!l;%AKE*$nTyFD1uk)sh*MT^@}Ya~-vHn@vpXLddzhd1F; zvRk7Vick6Leq=uSyJTvQm_7e+rv0`lE7-8)^4=XTt!YzUzaFc*oLxR6(dx@FFP~kx zVcJM)kT9s!myZ{iz}5?gh-;KK0=MYf#G2EA@U%7Otz%l-Q{ z)4XlU8ifs${2O+$Bk1HhmrSc0YglSDvW5;nkcFR)9MYeOQc*n|=P(Vgwg z?6vVLC?u+Ps*oULZ9mfZjP&E;4m~HjKb_YP1v%|8e0{LWh)dF^u~&ln&4Upe;~#ho zOsQ>{*z}O+?oe)1&@GdS9}$?4lfaZA2l~4*hJkc7X*nO;>UOs2&TAE=@?Wl3( z)Jpo~F|O%Mz5iEyWLQMJ2+n_dm1*nbMpQY}O%+E4u8k&3?8@!PE43!-reYY!CoIX^ z?UvQXJ>bz?IrHQ((xWr3wH}$S7TJ^%-V|YHB{!T&unagEeW};Ip@2PBJ!4#$%Il!_ zDz>>}fvna0>I4{laWw{#po;I>3C9}nbzPdi? z#jjL!D8d-CWUkQL(KgoZlKB`y?P#1ezJyTUyv_sei2R>YeSKFA0ezZ|aux``Dso*o zQ-eZDyh)=v3nK3ybTaT)Rh~o3X9ozYmWa~_3YfSF?REW7V2$}cxVdxUgeGlqSiRVr? zo{F|xKUxyRiJEF`bnbMw2$7Ie7VA;vgF5$5ML;XV>&B$Cz0;PC8(45?OR zmc==+A7p=WfYwI7`?fzalieGVPm#8JBet6zVYyYyc#-%x-nAkwgn05{zVnll)k9$% zkLkw7Ex4`joX}pEW$rn?F%~1-_bzj`BOye;fc!D3PjahKS1{kfJBak`_40iqf4+Ox z#ZTv!*VOW}OMHwnooxS5%)q8QvuxwTH!_1Jx3Va1PsFxYMRSbdNF0l!lrsxjIRop) zxQ8$X^#Ia~nC$R0=W;r7DE^C(%*_ zo?iJt+&w^L4IdiNNU7}b2!Tp zbx9b%O*&i7OzoeP%KE+6WN}?chX%p_J+W>_R$w2lUdW~U!BB~=GUY;iNus(VG%|jI zvYU20AC9}b!7}}Is0ZH~%8=RluW$9G(hJEE6;U3eo*Wj1Hui#5s_YzVQF2$fjGX&G z7|T2`xRn-Q7JZ589q$rG#0%KwgcDCD0e4oC9nXisiL(g_n9sqky6)=7{CtKE4ocJw zIjoV@@hduym0%zmkjYWfrsYS1yNXlQ8Zm2DZ;U@1s3HLc;! zBn!D^Qj~R0EChTGKPsK+na#^pWY#B*pVc(W8V`0YMNF_RBe_+IO<<&*eL_4LmAQ-; zrvsr_l!Q9g5>L(#@5w7(4Ob*JQ~(KoO#ezFf&m(x7q-X~y7%jBN;#PSWcIqqvK zRu~TQU$zvqQ>r`H*i58!plg*BdI*smTE19au1##pt{&CuJVtSRg+xx-$WR;ypM+_- zV#|!1D06dMf@Zy*-5EPN-2{kAkE!+o{=SfA0E>3+D_H zb}2?DE-83YKyr8Got(!kaQ(wl@qc@uEHZ;U{C&?5P;Tu<|`FDJ7a}Rp#aO8)FoO&F|UTfRC?v{AAbC7r`U!q5I?H-!E0nK&$h#k#-OZ-`#B8f4KCo`4b*?zRK zQI>%9@x*)DnAkEu$rV9rVmKPMO8KgHnWuGf31#~TAtxVQi;c65KM2t{JMXx1V;~jv z`+7c7rS0`2*%?pFOs!HfAMuP+c3CSnYU5S`WyO@>Go?!ko_L8CVCOy5-=V8K+)+`Q z&*T3(w;0u={iAfPZ*)+JCUf4*7*R`f%`TVvowN>Tkv$r!7bGc}JAA{M^l3~fMT7oq zZF6bBG4=JfVn4yuUSp(Dv| z_AgVV5v7bu9X&0giy0vrY_Xn_f@X7E{GG=cja=H??b|d-(}EOh&}mmC z>;XdhrW_H}bxl~_&1Qjw_x?%z)K7ZGCnR(%`GZX#nR(}qBENlApt-9eQXq{1mv%nmuX4gwb;=9k|w=UTb%on*dUs` zb$woz4vj>=LZ3dayfnI)8N|V_H?FO5u4jLBU(?)IHN#3NA;TZZWmfblm0rks9*{pt zXUc8ZVf|486Grq~V;D8vz4$QXg?=Fh_xJ9gIpxc8H$M>QwS02dG`A`Z1Ea)c1sg0` zEuq%M-A0p=XL#2TV(L%*X=2> zi9QEyQ^GppL-TpmEA+MyuuO3?_~fbLrP<`WendZM!_iW*pNfM>_Ge#SWMfj=2wKFp zDtM9^CoW?#mgfyRGjMMw^WU;e6%U80N3jNK+B(%;MW(izm+Ca>&j|VCG~&s${3@@j zbWjUFlT$b}7U~k8yOr^`bjG2Lfg{0r4MXPbM!%9 zN={8oqofM%{NxBTU2(r%yR zr&7B0S2_c>zA_Zl2l6&lG+TFgTl`{}|CzOi&DEw`tjxab9?;?+jZ_qRf9e<4?#o)a zPI*I*@iFih9Nx8*v`GNuAM~*Vsq4u@(G%{_?_Kr1g-&9@VIMTDIUBVn?^>^R7+M@w z1$NxVX0;Kc6;jz7&eT5Q2`87VrpYIm)?jBF4sleP{mD%3uKOMF+hN;%7%Ta#-9Exp zF&%MS%iy8{fsRpm^>RMX%|%T^F}^ua9407-%Gg$6EXaAg>TGsrZ42+^jeeYoneuF3 zgzEphgM76u6}b`kuhyF1%&h`!=M@`uL8$=_Xiw5vP)Axfl8SANj$#$tE154AQn zVm0WRA)HrLYQw*0cTAq8Ka0Bnp6q*Q3;`_XO4S*c?8|#?lS|f=Y6NUdiP+(DFnfcM z>A6*{Bskb=6~yg zTZI~QKa7;+zi0{fMjgQ7q~E245zrC7aC@ZuG=Uxbd6SutouOhTztfw3X*}_-HP? zln2kOHx1qj$Ab$%Qpaa1=4JWZ%HB{;>9pr?)K?>@V8DTWdS*}cL37cw=Sk1nUe+EJ z3Mo9)vz(9Q`5uNH0V~L|*aDbBe=mE58YP`2CSH)RvLvZp86agA*4|Z7moKt+CQ&;It7f!iMFVkNx zo?YlVn=U@)`vUT95zq?t1O<}}mzj1B3qn{?(f=1bjUt1T&`%krFcJ(~kZctNX9>JYGAgrz3&JXg<7KdMc zaj9o4gRDDd$u9J%z#2o|Q;f9RysWrsj;Kdzygb)-=lPqc{qe3O|4hu4s;gl0U3 zMQU}%=#Hz=KXlAQjp%Hx1*m3cS_2pK+4jf}`h8YE!RKeMpZh&nl|iAUzGXNP$m&pF{A|)+4cJ^iWU~Sv48YjU}dTp82$9qWxw6-jRY7h8( z?>auz?M<*JV?WZtO_!>U9=&r*@*>lzu&7E>-aWk#@dyxf^`=BU54(#yb)12f34Mm; zRS6qu`EbbH+v{F3U~#FTa&(Bt?LVdzD^ZSF;^DL|OIXRJ7r*bQa{}#f(5HW)`Wq&j zj^S?wuXLVav#K-KrjaIYsC8!<3ME-qOxA^6z+Y)6CUz(9fT)3}D6m<#-6mNf-NA|0 z{qWf%un`moy*4EgLY9%=2u4K7Ar*<3g>E8fO}^AF#r6k2c;RW{z!|LWiT~~(Nv6k%)C^Q*H_DY5{cW@#*}H%&MO^QapBD8?MLQ;fKb z%&ofM8>gj*?r@+SE}HiQ!2lX4klSQ5vhsqs66gTn*fBOVGese}?wK&xU(|gvE9TQR z#mmzs-vG%RwIp%MT>JSYSBos}?2tb4_!JT1$OxrCVtm)pQ$*}7*yF>`ZoalNtWB%= zXgdZP=lWBe2`ke);(Sk+)|4oy^ThIZ1WWU==B*YZCRGl?U^R9CR%UVP59-XGWa> z-St7*_>bExLHnS-1St~v!VTuH>38yAmDBdRZx!pDhAsEWAGpi|IrfTn=|Y+BNs4Pf zJ~yi#T(O!{_=|2&RN*n#!-;9%zd62Oef=WJ9&g0+O05s%AM@KgY*2EX%b^FKNxj z1_vxaQzcT4W-TrIs=I%4>F2MV4_Ul8|Kh;8N%wPL2teB=0nq}%=`ZdLpZvlbx`K>o zxzpajz051t6iO9m4M)-*7Unb?fpfEI9P~WcLr9MTUiG>U_TszXX*iU1G>?B-ACl+=Ah3%0cAYaNePx=L}qts!K02{tX)P!Y>c~ zN25_05_4hwGy^N9TlmDh^~dUf>o9Tk?+-1~v+7^W?>a3T_a?g+2kup|g8LT^56^ov zd~%s5lp-!?R1I?;mc>^|YCUFF5|b8G11zK~H>-EU!0aHuF~Hd4sseJ79_Am=s}MfT z$~rEUB@rr{p`U((DFQhf!}EX1$i%Wy_pZAz4brW^22+M>fY9w&8$6R`k8+b7Z=DLK zyEh(zK-RAbmqqDUQoklA_8=arZ>xmZC{gB>lY8P1xj}sd8%P`d6y29+MyB zo@Pq+Tve8<9karx^zWFB1TBjTsw6E@LJ+k1wn+9uGF0Udd$v`QGO}`z#W~FQatria*pr%dy!nA{9Ln4h zRy9gjK%+XmFH^r$O^9^BxT}X`1>~*O*f#{kN*G5>80c0WOsSQDAm-2DKlf%vD*ZR} z7RPOe)DWb0?yMHl>)NY#IZ_TEAI`hQySV3u%^(r`ShzXSS55HvONUP#9aZD^{O9C^ zPoqL;OhCbR@q{Xg-bn=>eluLUynWD-hu8%9Y81KKxu^T`!-(2FH*&I5pQL73)=xGb zsa&!(2{}A8c|9u|v|~*aIlBD|ZWwQR zd(|#>Pf-jZuGY2}-FMNfXmP#VdFku2lKBiBCuEuSRh07r=Q$K9;l|@VS$2|7Au*+I z2`LB5#udFu*^cuRH&`1|^X?JFk)J|z+ZlXzN5>0=fL=behqjd~JT7Cy3&;K(?S)&? zt5|b+JDW|O@HVFI7bq1RVPQY}k$m@wYM1g$%lkM|%PIjE_hn`-N#9z378^|<&XQNu zcOwH?P*?6W`lA>vP>j|x&Mqi!vUBZZeyp>WZV z>S2vIKl5}Mc(WGu<3(1`W3j6IQKbeLwCi};o+Fr^IEH^(s)t#L5$>!*m1A#gy(QM1 zI*qQfW`AXUxbUf3wtu}Fxll5*J)e`VkPV5E%n(xa-i#l11|8(Tffs2eX?F_~cSW05 z9AKP5x7wjTPjF9CwOQuW5_)>m6Oi5gWPLy7$Q6=)mTKX!t0L}frZ)R4$UJB@97-!E zWGRM)Tj6|cccq14+><18=Im~}qd2`2B4!vnEpFG#mS5;z8^DPHpyp&2RBtMLentQgv{) zXnfH!JjJnArQGJ0l;`B3+kLzb|IK4lpNOm!@95P{O?NKb`Zc{;xgeYo(0aJvv(7G*ai#_bhy zx2tWp4$}!ved}uT?@J=LEu8A?F@3x=m)@_}7jUJubQa$EI9&sFJH8>j0 zw~LT0Y|6mynL5YTwAE|mFCdUB8^y4^{bKesVAO@O>rRvdZjNiHc=Rn<`lOAVaR`Oa&vrZ&!1wUQIA)Tf0r(HmJ3##a|O zR8|hD$ST}zABf=nZx6#9V|VKeS3QOKxGFuy7+}nNbx2dqI%uAt9=qVjUh>c zeH$W8ct+@ov74c(9vBZAA@D&<)p%b>Um*XsP&%-E!4;=L#|T(~q%eT{ez{S1fWu>> zGY)0D(7hFLW*rDrMioJ1Wx&yfequqxKQoZ%_YIqsJzw>;hPPRG4$(LxOeG*sGmkv~RmDAK~>W0-43tBZaYrC)aF^zUh zW+%dvWdcf_-FVj6*Utyl@3r@f^MUWFPEMzF_X@-LzBbi`Cj48owJ>)0UaYxfRrm2O zc8&vG^5zT?TmdNY>Gj_Pc@Vns%n=D^{D{fUt|WwS`x%S!lB4PQru`}w-q%#0-I+AX zsWo-W|9FCbe!K%ofz~4h!(F)UH0g)1%y~z=XCpd{a zNxGL%ip+ZTj*yvE%Gu_Asy(&W3T0y@9=<9`X4zq<*P|=DhT_t2YE1)ik5#t zE;J{ccvxKY>v^gx%H)%yEI})6RovU{hc}?*GdJ)_T zbIM6RB5|OVc7ne&@|-Qi6C!C~Zq?r`2kDb2;i>aFGi#%9~7$JwjDNcSP-Soc(M;V_Wm>Uw3p~PC{#9 zUNmSq;3 z9%Cqv#!`~Ab^P0Rk+fU#+w~3k)q8n|hSi)#j%G?aC+XP>hWsRUu#s?b7TCb450npq znQ4JrxJQWa)WCV4JfA4wpV5axW0TCscB2bJ&XtR!Ri_HxRg$e4Z6Dt}&#U#t?=<+5oz)Cf$~Wjp%npMm;1vA3-MjCG}~rM)M+cwmBfp`cR1H9)%5YR7K-TUlej+4>{EI~!lgzQ{%WUw zf;uaK)UF2jGMXRg_2ukXg1w3nG{Gp9z4_udwt2ZI&$5G~d7L+{H;930VGdad2K=ai zljoTOrnxVUc`FE4Qs-O}dfW3PlfpC)bBY4Q9$+qipIw9Izs%n-GRRBgVhAtReLv)) z+z0P;y_Fz;l%2CJv-!5N!BAqFjVS1W8NN5IoR)MXL}ZlXdN6IF5|_;OPC}DkpE&9> zr*;}QksBRED|AJast#%5Jq1cbFvxEJ*m!}pMu3P30dCZ~^9YF%-gHj_Xp-9-%(jKa z6R3Nl;=d^_nI}dXQM++kfo}PQBB|1di|0ye+ijut15frK%>jp1^;y0qhbRadG)%1} z3RvZlH^qL{-S|g~c@%aJertOeIkEcZT}ogW1)+tTnn02Ep9kUhvd~cuay<6kAhzWA zz#rW#f0DH%q1Vr3Z0=~dhU;t>2Ph>&@8luO9V`Yg~ zNPW6W_-NJ9I{6R5(n8><3?N@M2`2FMvh=(}1r_q3pl+fNfg$#$})Hru|)-z<+>|3M^WU;kjz;4}1n6eIhy>}1IPeY#P% zfzS4T7Y*6hZf-oA;Pd_eE!_VWZi5n($;kgC4miF4KONPWQYqhb#maPgHP3XFwQ3j* zob`(z|9@+OrgV$Xx)O{&{=a@(Rv>^_Z^$ypHufL%+AG}3JSSROXq)+Hu@o`dn6NA% zg}q(l#W&f7Zm{Fvw(iws`06rmb=ht4i@B5~QQ_)}>6$K&kpMOO7c&v@Uu^u&c5F`N zSZk!r%72(ZyK0NwZtJw&VWd&mc)IK}6M~ZmFxt|;Yc}rLcz?%e&1?G&N|{$RcHsUt zt+#mbsev=v9bfO2oK~z&Ayok1V({dhr{Q!%W)DK;--}eN5Q9!mzYzRH#vFS+ z5uGrvkk(Z5eyyYKsOYq#!3IG7h&64m;o=mBMncF4T^n?l+JA!nzJHX0Gd3p~mbRWp z(^lEGKc<0)qZMmgN&b+AUlmsCHbrE=GoTP`as%Mk3h&MF8#^}p)xwM`Y>uM zf6ZKP6w|loY(sGt)`;)nOxIY#zq&>WDg?+{M_}U{|NH;;UGB(6LQw!L=j#*-b8mrQWPU)H^sl*UdF=tj|e99HIkT>)+IV%z{X z#_!9_;(05-Az6orHD~$#rkh2ZT&q$K`B9gFHvI&$^bPRpV!(A1>*oNZOBF{JmljH| z4IoMSU~LGyGCXHKn^3U6<2z8LM|%(ZIgeGrJl%#+mvn}*gC z?rSM*?KaWclKz$g=FtS!%1=hMS@Y}Pt*|rUy84E z-q2&0F_vpt5aZhzc7punT@`{I?G0UU zaBW6VzvKxcpHuD?xqYE;ki!?y=M08JAqD?^=Wv3I5ukXisgJvWGRSMo@Q*6_X~(FZ zbPt~tKLBUzW7;w1@)xmCYU{*x7bN;^j@v@wX2@HG_j;oD&J=u^tg_N-d)-J}J`zE% z_cs;u^O6QWVb>Rtq+KuE_!4p+2gZvw3T_suBgDP^qmAt>lKwW;kn@KNmvlkk{(LB3 zo$pI6-`85fe)~}_zshH%!Pkh^U;<<7jC|FHCx#@=)K$)oNR^w0K2Q?!0c)BZGFP-G zq_O){azhc+#o*`*N%*@g<9A7ZE|^Uef~Ig>t{uP&5X^1XOCdSZ_0RZ{_T8r zpj$bB&{7^Ij!gsmF66!6cW&|Pl2xWb_l~NJv)l`Za%{}|?EF`_i(Rumg1%vO zJWCF{yD&X(GUph(n;5Yi z{p-KsAZ7f%hGKoN%|i8Dn_6te7+iBCmohJ{NP#m@?{Hzz$3YIfqM1 z$^;H>W=J#2l-WTn{-6GH8OWn4& z_^vE{UUgV&I#b&i*;>_-7lz*hd>Atdjdf&L`Z{Me+<-tgG4JBvhwhU^$M$jFe8rzn z&E&tR9Qsw2&Yz-F3BsH#1T;Vrsc?6_@G{eT-Q`{$?XlVHM_A9TWPO-Lx3@Ws_YKa3 zXFnM!!jxO#9V?OzF-Ox$(5p$5*_=zPBHZ!Wydn>0V09mW%W|mIXVO6d@=aN|cmVF^QdGcAFI;-4?} zmE{aiLJizHPE1Ky=U0RU{7hE~#VaY`b9A~9c9P%aJ*N(BQ^N!{cuN;L)b(cDgw8mS zw;kGnPN-=q@T8vW@E03lhGT263%N$#TC-i}gZ&@_zpjf4=Hx->RkPH94p5h>x6i1e znNvM*;ZzYZ$bvLnb6-2%wPd2;t#~@(+{h}0!Nflf$ZrkBT|?Y6qFfJ2=^XHZG)cfJ z`5_xHO_y~%Gz@K-YF|rtBfFZ_cGf~KR=Wh^t~AqoY!6qqROg^>-KiZLUrb?w!ywq8 z)eEi#*VCHrh(^Ey+fr`ItN9rC*sHZGAN167oQ^txD7-k75r8! z@zI&%m>Lq|aWI}#L52$3#{-{*=R4pAd2BppY*t*&tK2sAW)}j>sweYw-{d-);RLm% z!Mx>3*Jr8OrQ9M&%eo46gSLgmCUNtuj=7DFQqrtOgRWSxsLhs)k{HTM5os1x~$i^nZ3x`+|lyEg|EoS8WW}F zM@8D~9A$845!TzKFpww#F$HsjUMnD#UbuDc-qE)olQmOJ=w2z7dN>XAi`sGj^WFj` z9r3ueTOXEipu2*kuk0tRETnVi1o)2S99R6|5O40Ku2lTBpL3=7pds<(ztxQyZ9ZhL z749r`u^uB9VN7wOB{O)1rpon%otsWur}q7&Qx;tviF<7ldfy;eN^*xSyoGqqf!3oF z7T%Gk_2g=~MxtU`lTl!Ps|&gBg;jeZ4fEq!*<1l$g{+k64V~@XeE#8nEwoJr4(--k z-6t%KKgGF^Mm|)XH81=QclPJ(-jZ_=$f2$c!rw=X5XnVaZ5vist$ci8N2akkXNOWi zc!&00%q$|;Bd=O9f#k2sr9TTih?-Rw?Z+o(@o`BB%;#IT4en&;)P>^rsP^5EE$h(N zdW9A2+T-23&V;Xov|BZYpAK7?SY6o+V18cSCrA032v82cPs(cRFD?!YiX3S?ty$m^ zPMM8AWiO1>Y8*<793mmiSPfybX7wIzY6O!L*wy@{Arp>yixZCjbzoHsrt*?&&C>bc zFW8T^al@(X&mc_zxr8G#vlWi+(?qm0PoeF{7iWT|(@cbyOv(D&;F%nsmi|qUrpOH% z-mY3viD+xn7D~+8`?Hd24Z&%fD~~v1W*42dLNygW%TM7I9fZ5wThz!LlO1^qN!C2rK-;qW3X*$gR_Av|w}Sde(DvN)AE2 zWS2_KHmM)*7hwBNv-fdcsPQiAd}%?dFd^W()$E#Kx)vim+E(^+m|h{7iwNNkaBHjb zZY&+|-)b?2J%)=THLKwd@RP;OYFdQZPHZU*3l%Ft-U569c}J!f(GEQqM(9q<^E9_P z7;iKNtM?SPPNelQdN4nxI4oDZA$tCrY^^f)2Eg`T2fQo z9T@E(aI-%|)V^aTNiZ6VKjLV;Y5cxsUu_Bjm)D1bvxSnkY-fKr`<#kW6o!FkKO$uf zR|{^a@FkA3Ga2|DAWQp43KLAbR;lb@_VKuEw!3w8Ae8+z=MZB&C|TZ%og;69@bz3J zM0Rv(Ol4+rU!$tvX{opqxF}VR&Z()OAM>GBxb85Sl79=-mru}1!Dp+JWAfdm*?nn5 zdc3@i8(c4(^IAs^;g@81!VudKWeUD(^ghcuQRdK!`87K+Yh=G+JODN%fJGwTAFuQ= zU)UFKbniwsHzXlHuMDSi*f28`DNMYi1YBN+ID=s+OB!(oL%p*$E0ExxUk@r} z-uKz|13UTiZZDf*Bg`r{&cyI(Nynu)?b}0YcK@|%-en$p|LkWZN#z|q8nPWbd;ivf zc1l>t3G*NIfb1C54~X(NK|+s!8~I4Up&-L9))zE+B% zNq02Mb~I#}%XJulb@STQDs8J8xcGA0s&C?quG#nc(+6s)#&Bz=-7$ziW4Wv&DkFr~ z=e^834qA_nCU~j9{dC)qg_~q5gV`Wb95FwE@Ajv0j`vH5tgedvDpTM6NKqR4zEZpc z^7e&H5S1dmpJH0G+jpfXkD6Orbytw}-Dc*lyNtS!1xF7+AUVLWt;akjy%S44R^in9 z&(w705)~`Vs$NqB8J!v|0*30U9EYY+zlP?kE#s{62#2R^* zj2jv*MjGtJIEx~K6r0ro$26sg@)55w?&e495%kR1^#Mji_57>R1i9M#FFlej8wCo$ zu@Ux71SX8Vu(O$ncTb8lt7u^c>pnx5J=*P{WUvX+2yhX{Y;5iwYh2=#c~Va&1mre~ z$3|5sfWy$3u+rx$Q?;Q7-X{q}_T{rU_?wc%?U|=A2DZM_2K?~QHwf>lkV;{x%(`F= zkarl@`?{<|4S3GNIk{S`jN*FyN%tglbKN&-xuf(;0Fjb(DY=f?m>rMb-DJ3K`26(R zs+F%0ZPDL^ZX*z$2PcKy&sTXxFu9bFP8hYSVKyFHrMt!BMto{iMzL;Mz|fvgc?eMMveXj zSpvpdb|{uleO;b88TnA#r_(2nB78x)k(Y8SNa(rM1WV(=hwGg@C}z0hh1N=N_A#K- z!E&t!xaFm3*$c4Q@;@gY@hUTRec*vE_EcE_g02b?Ln`lZ@$LiiKiCYO^` zUwN6M+l?E!c&||`l2c!^hc_x@gN3tAqw5KQJlzdX}LT3l_Rp`Zuy*|2FCJh!_YVA=&IA~9A6uf z{w{dm_JkVVj3MsqQ>);7MXFB>6a-GLuuz5tfwhQ5iH1z_K4@66TaQhhPhS-~t32xMj`3UR-90 zE8{yNy&eW)7_Qybk-pn3*+%7xA>b}0<9}NJvGOMh;m%`fRap@Z*dg|u;zNSS*@KA0 zjqA8{9@{M+u*$pxuiNd;7L|h=K7kNF@z9E9M1{hCqJkKi3DsVNcvje4k-H$LXjyfIBUSU)9{W zo~MD!2Vq_A23b!hTUw`%tBA4=k`P^lQ!I@)I=*K7nil(VU2!NUwD^&c2NvGzb!URF z?pnzVu{q3Z6P0hD8qOd3uOY>P3XU_3>(iBxSiZ6}S6&zNi=B){k@%fpm zI5N`j;>8Xs+EWuXhz5IBU@SNSX%gPuQ-igiSN$T|gkp`e(1nK5%l2sD&>*Q9y|oYp zDw_Z$^G-dY?TPo>b};sVOxl^`%Q_jPM-S!uFE0i3#%Be^&c&R@QN2=j!W!ncpk< zx67S;65@69G4t}w(X#R5T|AfOgz7FBOkx?DS`C1FnfOC?RgE*X0Pi{7QyC4~t7^9m zdW^XVXvv?rb%<^<&id)J4cdTYDFFUmNd6b(q};DDr#ldx$3f9C8br})^f-#EJu%Hw@(9x$d8}gxTiMkLcQzpl|J16N*KQ^9 zQEm!c_m;te$H|VC9Jcs#&!pP)41Wv*)92KBrJ6P;dODXuaS&v_?Xi0sZUthfFC}@i z6F51@JgB`242$+liW_ak-{@7eOmTsH>9w{a+H=^|c*)!|g#bl1C{RA#uoJFUx%+ow znGzg*mrdDzXbI`O-)9FfjYi(ea6QQ*M}n%JQ)kW#FuHJdkbu7GMGZDV+X#HOE~bgcctK_e{)QiKiGV zYn26Q*idn$|65U!XgA%n>R6qDC-nT9u3)GqJCxq4t_j)W$IR>9&x;Mmy?&(o7y`(O z&=6+5_&HJ<%EJ^@qlWCVfY5)A$Ucj{HWv-2lb*XlB5|JXbu| zEwx?ED^;O#_d+wxyyCSoY|UlLgb5syef6woXTJ76;t>V!m7Eg=iN!^Gl{i;^zW+3g z$6E$E+CX|+JdobEx;z)a12*QJ(;~i}mxC^*B}+810^OwnaHO1Ymqoy&F(biUD@kB8 z_6Y;$pU%ob>H!5EkWK;|bj~nLUJD4l*~mCcPwYrP+QZ={1CPSZ%3re9ZbNkeNLm!I zVe1+A@O@#waGejo(u~mSvxeK04Y$YUDt9jcd5C8RqOMGXYi!lS3ZGq>ZnY9c+C}D5 zX;kB5T$CpqM&amMuhgH$b#P0`1+*hvpCH!G6P1vHcj*dndoNSlK60VZVKA(=9P%ts zZ#1OjD>9Q!}`oPY3E6{%Zra&?tI9%cb)HiVO|F!ZpU zCqU>x1eFBH&;}?5_bisAz;A9?IJkEm8*pJ)?hc;G!oc)dL1^z2Fch31aQYNX^jWA%%=$3w{#Ey()% zH@iOmt#v)=VtMlXoMJg)=@jC8Xl>Ek9ZIEQ&kh6-j_*gYpTSKewu(6BV0|dBVNq=p zBuWKTLw19>A-Vo~&QrC@C&b1)F>rb96`7Q6pd7WaLg{H%lAL852ZY}+RW{~IBx1F} z@#iuqIkdG6>mf5dV91#hMy4q63Me-c3ex#;f{I>E;U%~Me|iQcF@2k`?ybX>M$GId zZqEE1V512SnM7uE8!dwka8C+L>m7x`u@;kO1*UKioO?<7jX7@8f{!3fP>y+SHmVb6 zB+!Ni4x*Zfy18Km*jqb*jRMQPq2{wm)uybdWbOwa9WVPISJ*jy;R)YhlN*m*zdm5< z;uW-M;^3Mjls8b5!Q8!P)A|(Ph7#o_`ugaoN+UmI+zn3%u$`aJY*Y&w@e2Cvz+-PE z{(>b)f5I&QWSNT*8|Pg=gXDl(#Dg+u4HCHK<|hTa35_$Y zrj?-0CuKufbx#rD!uHIR0++m->}yH6=i(i($F^3DA@m8B7f5L4#=t? z@#x5@a5G%_(phy;gtz0&iJDv^6zdaN#&-?Nlm%BF3ATNGYbF8Jl1fMz*J+JN&5l2X zNx2_k56I4pQvjU{vvPx_82vgS!oOMAa+AIM`)j9+Ss|WRyNCz9t|LJa>s$u=wSC4F zn=4NNoq`nIP!&GaeXQhL9{_=ZY#>2=YYsLSZNDWKWm)#6k}pXgX4V(&hx)$R_`G;A zyzJl$dsiPJcD3aC1(^_lohhnk-58?9*)LHkWO0(F5aYhG+oy4@Lh6MKE z$2B%@&Ze1|R|VtF?vKw)0cb~Ab?zPTS8gue@;j5*8>Px-dr{40zTJ<-KzS7eK23B_ z1im!7Hz=8L*P6~MN0v22nGl48$>ng($6A1~p;Yfl>Hc8oYlSff5Ruqt1~k~-O6VGX zqj@hAuUS7g+3PH!hYSK)1LZh58T}y;VHtm=ceErcz{Rf?X4&kfV=PRnX}RC@4XpsM zYKlX)hVW*Ox2o-k8_T$m2G$zjZ{K0chAOhZEN(hf{=+ldiE*6^W^%=)Sz3f3XP_b} zDWn(9cu^EfyCw8ZtpsE5%FALUO}3I)Q+8%jor)GOb;-cu0=lCgO0~|Y6U2^VJ4qeV zUAKTKs4sjM6ReeKIX+_pGDBG!iaU88gp|imW-LqUag~e4KsI*DXfu@fv$o?5WlFGZ zjIU0s1!=k;rZ{-hMotU;v71NGqaMK)^vM-39=)Avqh8)US-Y z$_^?Zo8wH_PSzO{Jr1$N*S~*mquMMD6PXNa!0@hgdurp%u@p1$#94Om{z6urCCY@* zR*N1E&!et^!t<+(-BU)@i=2jarL_+WJVxq608?XccODA&$y?p`&iBw5ptKfVy0%bj zbg)C}b4atdwR{ecDXYXpd^M&@upBilC@(G82|xp~=@9}lifeSWnv1i-B%TRp_1>WC zNWa3k$k|X2$y^?;BcgTArKo5egZ99*)jl-qGQ@BF6x~?uxDQ%StjD`jq-<2K^q^KvGx=X6us8ZF>xhcQLHW_?0GU z>D^VsRp}OS(+8bZIRPssv%B{NrnpGTmtUkWM0C%b=f>({1nA6`$sp;IfkYC9 z$ujAsWf^(@l+DG%Fm+GWBp+eLk#7SEwnAd$WxNL)#Du!e-p^D+*v{i=ZAWpgdKd+# zwBg5I1F1_jS#3fL|9Lyhc`yG#OG1GtFMY6MJai4-|8#oMX?9Awk5F4F1we$2Irg_2 zdT;c)+JrTV$M)~jyutSHR8TwB^#bba$<|Oh0d$0yK~eE*SI*>ged56GtO@==c%kFH zaBFClnV1+fHPa^+KlIs(q1v#P@uESG5=l7vO<$d>eRBHTl3y!2 zVOE#5_4@65A*aKyMdJ;_5Jk64j$xv$<~rT<#bJyLng{;Ea9y zZbZ^~CdbWJ?iiWI@IELdmMiGn{QGM0WUnpot~~JIye8h1w@%Z_8j)!R^U!uhmcwLS zqUlf#TPV7PsBE9y0fFcNF@Gb_K7`83b|ASWf3NYv0B3?fDfC?_RUUb&A#t+$1=#!J z3GW@PCm{p5@jcg>{*E$eyKNkp9+$4J*XS~Bso&=jUJKpq)%m!qyS{R!IKam0dm@z1 zi);<)PGo)OWiHGo;u{tXw_F6Nf%kMB+G8N7$u$0DJ(&hEAuzSetBV&O1(3y&olFf^ z9FF-ZJomLDu3NWG=qMqH>gcqJvCWXXn*kBvzl_b#5?}W)MLulj+FiN)M?aU>76J#p z+aJ0Q?@0#oc>K->V@FNg*Bz`c$uTHDiYn8pwA8|W=4<}-{l(*8LyQH}9wei|<~mbi z8lB~UEuB-ia|uz%%3rqrwzDjxXXI_8Xld`_Ccd82t66h zZab2%iM5~5Ssjp8;4D}omkKzUf8ri55SRgQHE*bb&i9?Vx4jfx$V3jD zuc0%GA~p9{)_|p-#Gf52h%EdtSE|%7s#mflCGXexp4Ni13&UT?)m}H% zWZHze7-Awl*a&Owmqc=Ct=kts2A)2WLs#wF2Z8JbipAX4fCx1pAJk2vxVEGy4ky!M z5BYuJ7??~#Cx>(~X}P4{W8Mc0IiEow>>9;|CWA7bm>wEUn1PY3$G5D8S-(n z6VCSyV>{7dS5t`WXrmOfDDR~9nPknd9+U9>{CKv&i*52?r)%NJ8(bc}!PPqn#+JLA zyupAED&h3%&?QCmal^Vjj@3~(L7Rpq0^x?!D*(Bir^GZD??S4XG@3cS0+7*bV4e>^ z$gx9Tk8Z9|(XPM#WF-&~(m;3Ja15t$RS?BxM>NR??8YJmC~!7whv>|s-qquQQ7ehg zfy}Di(l@v{wpJe}loL{1(^ALz_+sK;hIP8Pq{ySa^^Ww>&)bTS)v1mBy>?M*=YsRQ z)yVq$>g^Sq05sfO5DPQmSIc--asPm6zIYH`$uPCmompc~#BY4Vv`XuKHl_<|c{(7< zz?xs*I<(V>R#Q}>jcCo~IE?Mm$wG42YKBj&kSDJ>qR*=j4xj13oEClufne6KB7cEw zGl!LWN8p}l52);~1Ml&GH*%3s81D&YzumET29lj;$oSS~8`E<1*vNeDa^?1^%hYn* zg|NiI-k_YI&eP%u_O86@fjcnJ3<&ZEp7aCsb<=USsYdN-%PXLeB*SLOSR#J{MPBKN zU#BYGB$1%XUZJ?`)ram=Qnh-M40X;v&pe~u?s}3*e<~T_{TRd9CClXZt`RBz0#F|d zb3b`)%#8vXkF95$z-wL}2}_;H1G$PA-!(wkU%>CP1E^cG$yUJZ9Rhmv&n);B5P2^( zAA^T1A&ntTeru_j4@5@}lEX{BKCpXL>NfXju(vV*WPJI~2}6K<)f!s?5KLvL`wxNd zIjt~IK+-Q-tE`;<;78as4gzcq^ZC~;+80gkv=W(ysAUclL8H@{_sqOcK=C6Q3yO;S z|8ah1s{>_bmbe)_d)~p|1Qb}H3ILXW4K7e}RhsuyuhCxG#kc$T7lKfCjSpozYi^Ed4BF zf? zI@W(-V$44@UHJ{Zl<<}_49?y^$uyYXMoz}tNVaJOwD~B*ACQ0kNhtsF0`T+yQ9b_0 zk@+9F=<>rqj^qDWS^pv8X$888QM?< zf&5-6@6+npaRuwm1lcI?*U$~-1bmcHFWoe~9;&{AcYskWZ2%?6ITc^=S{7BB^ck?J zZ^hSAj1Iom3_Ldie~NsC0WF`f8{7r(B%p0?P*dIWGM;kjO(9v=v>Ol**n9b#Vu)gVN`u02(OMth(3%ZU)G{=KX zvJ&E9^#PK9(A0tgC`8JbIqQ{;&0XP7+*B&7pQ@=$8O7(8?3rK?5Lp!Y0POolmQdu-ohPP)3!m%#!u%94 z8f8otQO2;?Jlqojq9(7@#YR zJIo`Ps4DTxuhuXvrtchw0SR3PlskUWWFG}@$AZYz!8RDzc}8C_Nz?`x$ozfq&%IW{ zdQI37_jz8$p-0wESQH>9uXk6GaurvgeTG#})joN-b7j%WbiDMf#updCy_um!An}>e zN184o*@Dhc_%(f3z9AoYk*5h|O|wXpS8H@l89iq`A0ly8(kS?fp~Bh|-OfrFumhtN zszk1k^&yAAtxeqfqlFInsN@$G7Q#o#j7HlV!GBxxFR`f>41e30*_o#SY?VLgHXHcX zlAXnG5AB8awX?|nCfcng2`=e@V*Zuo6ES`dPDoop9{A!%Ai6!rcT1^k>DnWw2beT} zaJEnzj?bR@#xHABsDwP8*&hsM8F^*Boz8Ek7&xrQulEB=HN2#sToPnsdop=C3 zzIR8YetY_oSM9Q7-p4`s2kU&plB&z1x|kurNN)l$&f5bvs+Kb)jk~mXuo3E5STw>q z>Oi9wFN(8JLke2WXBaUe{(ev{E$~nwHuJE6z`w2M4bU|Pgi(LYw3at*Y2Law!${mQ zl2_Emb@CW8>zXBAWT&I>+mwE z8ENbg)>pRy>Y`}6#@(dP`$#Pei2@zYBR)Yl;-CHCVU9wvWkV-NqhJ{ z%`*TX(DW_?{^7RZQOZdT*tCS#N~D4-!3aSP=;Xx?_rmf+uQ$q~&;szob&v|(0{M0P zYlf+3f{o4BzRkOaY!Y8Q3y2D!hKJdn?0R7(Jnye3Q+r(jWr!>l+EZ-CWjX4@LFkv!tF z_-@q`bNZKY|o@t7QkFe!GS9US7Fb9+>XoCHw-{b?L+c+Wu^X zEnQ-arXKYf3C&#?;jxdxyRnW85_~5jx%y|VTaj(vVo_QxjMCjkAM)^o15EAf23HZ{ zqqo;&BoEA2jFAM*+N{Ps-br{`0sh-4NAb69A``|^P&LzJuLoh0U)7RuiO$X$WU`nA z%m$#chc-^|E@)Qd|cMaod;M=$>Dv$`=VTyI0ARaX`q90{9#^K4J>~Ju=Mh zq`>9#%Dr9{lSWr^-Fl20vs0IcICt{@BTLpfZ3KM{PA>W8Hp7&vmE%EK-P{a$pKXwR z)AD>#2@$Ocg@-7QP}*(-1Y|f8ZxX)62iEl&B&KHFziqI;Rj&J$z4GpcJy2cHfz1bS zlT@ZIf1@PbLDmaQWP=`mgbU(h7;h3)3jjkpABUu<)}}aOi&BFGfOiE-21+APQ#_<0 z0US%4+)@Mf^bpkY;o0CDMea3sIOtx-CGO-xRwQy`Ftq(5Do8Bim| z+3ZQL>(I8|mZ`Hg`nCwYX2m}11v(%t&_{J~u4IA{12_=7{FV*e<`7wh&eH8k#s?|| z`H4w1YPT9(Y~*7Y?h2cmlq`f#@!)|~%zcf1pTgnr`b2E$ZoevULpR%;Y~}`$wO% zA9*XUu8Jips9-JjjUu`H`A$YFFwm2?}Oacv2`h+^#;r`%DDT$n*9i zWnHq_03JAQ;F%dGy2TWQK^>f9M@b@;r^;o9ea}V!Im(_?1~e!r#P&2o^*oh^Zi@ zpn<9c6s#0v4G>ycZB!JD5W-?=pR0}Iz+V9#H2 zvTIQ~5~9`g)EI~#he`>`+=X<)&p3w3Eot2p1JKXiIr8Q)NgO6i2Uon6%yIAAL* zow2MtZRn?&{jJSGqjiEYHLh{V zLZUrXM9=HDqN3tbjCbNowJEyz0+j3Xr-z9JBOd|~c_hS_v0p4SBxmM$O?0YAqoQWJ zR0Ea$*KXC4-QKH_BT;uQxif+rFKNUvIR}BRW1j8s+Y{*+jm!W%e@|%c76k46QH>ZR z_4Fbxo`%cRtFb8pg4Z2VLU+Aso5E#DVh)$BK`P5zuNl;C$CvWOx~q&K(+Wg~WkY*; zab#9QIa3pLMNbrpwST`QC1D>thKoTO-mxgUirzh_%a%tWCN=}q1IF2N8Evdl?-_yA zF09xWEB*H7*L0b%L{i2L@NgN>M@jFxN=Gn;sP1FGjsE>r)fs7FWsR74N_xb2G4;z| z^_$$ajsIfkSM-!wEwoa*J>n5T$h4E25!@AZnn^;Q)7~KgdchmtObXorf*8H^h z#IBvadZ_R1#fg+c&5j91BoOG2nq$+(h|$P+%?W+S&V~WqV;`Q@FMd33EfJy!yfDYR zvIVO`KFCuZkKaq4A|^5_VL2>*Be>`;-5-Vh$TdcuxdV9r4qSlci(Wym91%d9IB?h7 zaXkvP_Au=%otf;=K%C}f_rZHNiMyd7phX?<=J-lTaF7FDaaoD3)pbn(cg@0Rue);< zG?Xy4MX}HBMR$n;oc1$DyDgeZ8{H;nsZUHgvg!{2=x8;Um{k(N^*#6?K!Z%+gr6?5 z4-Z%PVb!W?B5rZDlQ$rCik`3W3yQUJD~t6GKnI^N4j0LFo34eKTK@Tt1w@?fuFfGJ zpmVTgJG5kbbACKOn3@e8K8v<&XHF+A)2YlrDL*vn_v<1<&qk-PR~vQFUfG0Mo?;Wy zT*0!Mt+6bD-HbRdK{{bwn2)ab#)vaUBpoh%j*pt{Ugo!qa>Cb&%XHH#?!*jONb?PS zQnwes_oI>RW!JolFYQyiB2rJLE}O(*`!C8G?F&&OSmD{WLaG{)5?Xmp0)o__U1qWJ zppSAYbU-_T0jftdndFBY4=7sC@Ye`5(A*{nr|tJfV}>NR~rF` zveP89`PV{n%D;tUU4?NQD+OOV9M&=EIMnsE#p9jVrca0zL48er@LVsCb&a?g#4$~N zOGymT;Uj-MmbpCS@gSl~gEjFhbH=zsDZkHJf^XkZX3pOm_?{!>G;_x7TR${yH^!$o zB9&?1--@Bi3g*dA+GNPDT~;XoDF%Bni%Z+#MpwERnNeJ`r80In z-p)4V-;WWcR5z49P9fym^i}ye`A4>@=xoG3O|SK)cil^m8s)=N zNZwL-=JR_Ru=-A~%iE+#zxpC+lQq%S%VIyk?Oz=-{w;KlGzX+abNQp3u*CcyeBui> z_HpP^>^b!&8j4L{q`j#CDV(?k34I4|Qn0JLIFIrs3!HrACOvk1${@YathJV{#g9`_a2(2-hY;2||Rnbjm0PLN*V`Y%Qle zm=#;LJ2sjuqFvlmChgpzDlZMZ>hW@L!0m%N~OF0%{kPh z+1Y~Jwv|UqGNobVuc~JUSAV%96Pb|n|)aOM|?3W97ms<;v{juDP;T!`$I>tm^& zS&N)l$7oHNUpzT7{~ssf3(9M@1Z@qh+!lyFhG;FV$o3Ug=I^wWTN&>{J?~9DnMAjHTKqO*1Fyv=X(`b@O4qZtv9H8N+U`F+ zOJvsmupJj0z#{)!a#dh%g~g}mX*;aSH~O%zD7}OyN)OwWgS~A4@h5N`?zMJiwbz7$ z9lXq2B5Yu;-KQ;$CIa*|hj*%<EFwLsx7M?Ums>sD*biWH8XZ|h06;E!6{W9qCH4V$@mVg<}#vfi}lnmtY|o# z*973bJOCSCo-!Cx{8f1+Qb@k=uRl_X5(~SppXwcvKk*0+2?$wZ;nY=3u)kju(`#wK zK3cZg5nn2O&W9Yp^fV7svwfvCLt>HZ`q_z=V^%1ek8A%GFbwRuVCm62egnB7#Xc!6 zzuUDwe@pk3Li_%HEh^zWP)ZPMC10s-+F9DfEny_HOu3UU%vudHF$WGirm;fI8*XI(z!lhgSf`^+ZlxN9A_l&kXl-s^B%D;yi!g3ib@~W(xqX+= z1!WBlq5hQL?;lLf+KM=r{RsBj&HH@Iskgydd;nLl);YBtLePRc#MCS9@*kwXd3Sf$ zfnWHCTke5NZnP#&ez!b4*?TWsT&*Q)>KAclXSPh8YY5YfnGlYO3vxzk)UH%lzOZLC z!B*74!&&oAgVZ+EEH?K5q3xvYS3j!;<_7G7JlG0W$UP&t9AYYB9^f|ATHW!w^Cpb# z*y;d*u~kC7h;Cfl1W*4Zv|xy^%J`$9Z$?%v8QW=Ib@2SyQG&KO{542XHvtmMtT`jGcN`!5IX9UJ#q#RS|H!oMvSK|pB#|-wsr)a%OA9x zVppFITF|b{-*?_A{fzhiT{XyH=__*Tb=y(ebkv-=$WOlHDCbXhN>_BjCbY|m97as63ilz?xm${tN_D~i>}7bXrVx(@==H3~-UDwXaA z@%wWu@*1P#jV2g;2;*+2{HwlAvY-Sbi*P~ULM^E!+!SRlJvOX9{@8n{4z5av#d4)_ z`inUJr=Ic!MdOyZdtF_V#bsFUvixT4f?a`|`M%x_>Mm4*Up%yDfrf?T~( zOpFKFM}BdSj(aBm=K$J{R8hQ@U)k)f29h}8sk@W?<=UdRp~6%LteBVC-p8)g`V>Q4 z@Tj6Tiij!rveT1S!}{X#G&VbaB&bvdxn1-hK&%@LS5^mf4t;>$I5-Av@y73NYVrLM zV2~F_)Ki@YU=0H#zD(?pknkcldmK--`(<}0_IA+|m)5h?2iI;MXX(iv9#gy0TvNEH zdYy9Qh8}WxRQLgyQZI`c_bP+Ex3`QY+t^zrI}e<1X(V5rh3GmkR-)|W2>ml3&yQ5%QsK_ramrcX{)L%oN{LF$&EUL>Uo2L`wvz%4q zOv`}qnHu=UXHZ=U*3~&F@ft)&AKtwaRav3b17z>yqFs+Og`2u8*Sj0RZP&`71nQnE zITIKCqmRd@>!1!Dras{wy7`E>4V$lJ98R3mF%bilySWtG zF6E{O)c-+1Jhj571M>U}pkMKa=KJNy6iQ6#F^iP^Ulmi^QDw@*G~&z;Tcm4kQM0uU zGb+_q=lvlRcKAJ@G%k-?{Xr)-rK^E55;_ZOd_|}Ew^3g$S?j+scRuTM=aey{&cc=? zWw7!WLeKWUwOqT}bu4+WMWcrILsuVB^ui+kkqJoU)lOs)WBlk%74Gr=?jGXq^ZE>J znMshaqGhS)+N(7i-B8OJehEP4%TtnRXDziN=5((o*=>YC(yw}v8{VwufUP;=nD!hL z!CwYvP+G*9UyL5LJ7hom;hgU0db!ttC_3h~THk>8bn-#Sd$IQF2-Ssl}qdNSe^$NwcTXhiY@YS$m^tMWfy1r!IWZ4|zwS6egkT zs94?6lG1Tr*E*n!$gb*AfuOUHhzo?E?H?o-`vWdl~V78 z2zy8^cu#S+ed>e4S$nN1Qw;?s)(5(yWlco-(WCa|yzB`n0vZN929oSVL z!FK8{I#S^K!Vm8su4}R2rFQiw;!G2}IITeOz~`lT@wZXQP7e&0NOf5H)`FcgV`O%*tIq2)ld2&#}aY_w{Dv&G|ei3=D4qV%l44jZuVwE z%Y+UP=RpDJx8%hoCEGW!G3$6$w)CNma~PMO@XhOFOXJ??L^o~MU6Xb!C$2FLSVEGH z6ub}Z;edAFdsGR>%BR>jdLq_yu&gX%mv^tN2+xK>=)=qa1E}d#$s{py8XWpsUYL$j6gLIDsgAK<0?Bl&%BdWf|HeT$| zlz~@y*pKBm?+*;=XlIX^~6P`;Bd*Tqu{n)-o}@ollMP$ zu+9S(6={3)jg^cT$$CM-1p?i4PORa!4db;?A`&i#C}W0E9bY->%P`i2-9?%#AuHe% z*(X$~@a3*m1NL0;ud(iS-*vlmMQp3zTf0u!#jh>yVxOhoyXJdqAPYZKWkEXX0sd|C# z?K&+TPm_$vz`Vttqc1}+5HarG@Xg3+z*b7Ywe7f)5v%0{=Yg+Ps!gEmg$g&r#)$QDAis*-=R_!{FL<$H{4ko9apW{ zq>PagXYm=izT>PUOld+Fs)lc&B{Z#ROqV&4isP0yAQYLP`1Pv#_A@Wwa}y07r%r9< zeaM82zHgrgJkgb2`jESidTIW zwILN;|Iu>bQT_)i=|<|t%`~dHcR?=p7Hxehj?tkOMU;n zYsZFT88+W%)TRm~9^-T`FZ51$0yDXW+b1b*a&7uAWPGkV%s8twyQlISAN z707hFdqMnH4=Yt-<3Eu;$MLYcEHhmsfn>z+9}(AM`ptEktKy1|CoF_IK7FVDuC8AD zjwN&Ln?@TGsGQt|pt;PwrrG56r#x~}!^UM|m+7|h;>oYWk_S`)5I@BW64od%VLKsD zWhLDHQ9+jTHnMxers48b`lUe1Qo^6#tLE)D__SqX3=($yQEEk`+{XS!0 z*Qmp-1g_ryHmPk~k2i|`5pj5o{9JSvE?Fy2?=G4utMyd#m-2*UE$Ou*{z6bBsF*;oRxjA;n z+}zRd?Bl=qSE20L5d$z_=q>RQ4EMf5Xi&-^ZmHT4kb}!<{Z}6j@~JCZ5PWvsZ0Q$t zM|6^>bJIHtkBa`nSHU}8L5t5M)lY0&S~%h0wJQc0>#_**t7ApJWo18vEjH``yVD=o zo;Vs_ccovLZMzB@#xJjN`HFxQdDs6sL9BF+$cp5@ESxW=s+N0|ApiWo78*$RL%^>6 zHRSq#3kNH((ksJ{uWWqy@h`FJ@bCX;Esi#|46&EOC$FUQ9?kWA^C8-+n+7pa7B&J|dvCqKYC8e4%OfLDz$~6qu=T=) zjYG5wXzke?l%DE>eU#=raQl0=AlBV^W(UTN_3>Kgt$Fp8d;@j|yvDu*eA_Vug@ zn8Iw@&?B^6Dc{H$l=Y~J5CQ_l<+pER z#w}K)vp(f-vI4RgG}P?FlZ}**xym!2=l;q{HVK&;U`py#nKheC&$z|58~} zKt>#<^*&Bnf%&<8Z)xMc&r^=- zBDdaW>9mY}sJn1H6}n;=KLYV>SXGxvn+*1D>N6Yqo53dk`Dq=5UbgI$yyv!#lMsVl zK>y(EjXYmOyYAU1ct^#DII93&mDLn~?B?w=6-smB1L4A!hIhpK5`~y^Nfx;$PkXRue2Gc;oQ3 z(5j)!;{gu+b^lbr`EBS@GZ3&WU9$CIhR)(cD!q7%&a#i1rY4Tfr4tj$EsXjg4z+@# zA8<pe!VKS3@!%llR3 z=6BET2fPgpQfZ|mxX2(VOU_rUEeZMq4Py^UNtp7+wsr2s?22Y$N3VucfgrEQZL@Fr z)t^K9TgCA=L$Vq68x1>>DvIaB^O;5lgt{1_JVYEVAb;6dk>Vw8a1j~j`G!GhX=y}Q zySzYW7@6^CYxJ+=?T2p7jwN3wXawOPWX=TV2NMuN69#OwxqKOM-BdRkQ{p>)ITn!@$Usv>?)HhXnaI1+UHbi_y*Qul~5k4E$l#w2xG0 z1(hn&&Nz7$J5Iu>Ga~asQwP+#D8LHI)fR0<0|Qx4J-ns%{d~r&9I_$)Zlto z55|DEBhYc&Ln29>WvgnHO7F+XS-93_VT1Lo8j6?RH7#(X4~_n6zQW{;rQt4RZg z5`-DrP2CFR#!g!wYqgHKJgY2 zGeyRSbXe13+(4Q;Ev=ViX3qaX6DTn2>A6DYPL>cwuJfR1&H1ykb3hEl4Lf|&IB(i` zOxSrXsbUEdj4vx)uNX@j0OR!0)n(}U{vD`OhXhOwCu}s=p0bLeNvI63AF3gpVBMM` zYgOYS8XbJblWqpcvnqS@YGYV^$0p@=)mfgX;)rq!!jT4yZAtxJv05p4NSnNrzzF2Y z&m~l(jr`h$IkA3okLp`+C|2S**6bs7i^m_fZWgLHs1BS6n>!WH(hOt^f>pX|i_Du+ zh%VFPI5XX;T0sd_{&kneVR2n{g_~X%I&)2E1pOde)rFptCtFyw*Ks%_yk{XBzN`t`H zbqVHCw5d^(7e69ZPJW8g~7>2Q#y4u+1U5hB!&c{fT{g#ez@A8Ua}nPn=INGGAbYQ?GWBi%3yIF&;Y zb)u(>9|UINX75{bjIlC#WS~# zXD80e3}kcF8~UW_qTr3~{_!e`eU_d4TiA_+I3Zp7?K8XGOIZ zHI1Fv9&PykqU3j_?NIHE!ae!|a>b)(ZA8GtEUN z9mXV&H5E+Vk(K+hrfMzqN)n#BY&Kp?*h~lgdOA6^eZ==?SC8`%Ob}AJE_W{#eyzu4 z>F3aR0TFM4EQM>)koVUP6FXFCk8*1+|EyVai;ix}QJ!}g`fen*%5d9*Ye}MqR{$yJ z&MDjBz;`kiUhc|P3B)C2f6Ei!&SH*}xve7@gMr#D8^SpBI#X_#yj<2`-4GN-VT<$o zV8-l+_5Q8~^Q=vfrjF=;O=Rw|qnU(mur$E!45p4;tO9X3FO2lU0%Po(%OdhGrAETI zUM;62LpIa4L%3xrNQ7?|Pl%!#%zC>|kxvfRt#VW+Wl;wvBVdG1*%`N^p1h)ex_oU5*X{Ic+OY{qrNZFVI` z{Syk^`fR44EyaLETh*P<-T@PF^l5Az+y2bX;lb1OsYh_d2lpfzZfmt3t~=2(QV~5X zyqaWZ4On`Tx%N7ce=HgbX7P<<04mMvos@t4^|16_TXk_8%G}$WKzbqvnOf3Qd57Qb zf$*r|6z73bT!NE4Ot+xQEJt>t(eU-I6K-80)^D=ro~(J2uW(s!dv9*V&p!v^?d<(B zg9g>BtA!vkE|Up2ws|yE(r}BiclCQ*;wg8iun=?zHC8@rbL`zHX&WN99?So9THiAQ#S0yF z_dLuYnMvQn!K0=q`c-%hu})npX~qS1*6RT>?OdC`RN=ePqU$wBxQMZ zXvC`CEOOp)mW!;WkiRa>5vh?M68nBk&PWqwXZF-a zl=1JjUg1^8GI9*J>Bbmt`w5s9YofqcY!CU!ObgNKUuHh2@vhq9L#+RaM_DW4ZHuP5 zo$!f%47S`j_IP|rFzLWIn(Q`AmL3W?$T$aqkWQU`DxYYH;ywG0JkM@}n^F3tabcZC z-Lo)VrhWELg-h=84X+CsKs1_3dED8Ob~@`;uK8qI;3=IfK#cf|yAmwet1&9ZM`uWF zX?WZ*+mWbb<4cu-JV8wvf9fgIi&H%`FIJIHWD<)>+2dov4(s#9zO+DPK$mUPHQ|kf zX;MACir=^lQe2rM4dZdkQvN~Aws;1i^%}D+?l)ZUEXEG>F{-UpHNZb9-QuT+{K%_8q!plak;h+-)qLAE?Gt_M!xbMc1!Q83E z+Mn7E*bjvX3TxJN7CkXIo0kq}n!cxhq4*L&I8mumAfSrY(R);7pjr^?0 zNNJGCVlZbBJg3{FzAQW*=JuQ`KNar+O@-t^m)PXVu>=B{FYW7S-Ml2@5{Bhlm~GE@ zI`pr_3!a75PD2YW!L!XvdtRSe>YhqfZqyr60ghcT_8*7ue7U~B^z=r;5sQ?t;MW?+$qzUSzJd8LIVAO5T|`DFTkG> ze4?vt1P^4BVZcaAFOv`O%6rTb)t8$(PO^|VHT$37b{GfBDF%sQLx%yy^M0g%L0ER05fTzg>ZhD} zkP{A~)AmsvH@y3)#)YxbHfJpNcc>bmti%=9C*_ZGhZf7*7P4H!Hv2C6S5(?;0CNRC zRRuysP!4`ddc$2CwbLA3mP2_MbTdeUWnb4Ooh_W|w#2R>#0@>Y_+GT(@Q#{#`tBl< zOG%TuaoyNg<%vkof}t=38*H0;F<+f~R=3`4pG_S^pDNk8YzV{%4MF=MI78;xJBdL^ zU9U?TBJ`e9xY>3vSiD8-x~$O$k(85%@9UR+8n7qF{7G&=Vge{EQz&`;d3u2}O7yH< zCccCtUX8w*8#b3oYucYLK1-Inah!z{38D|RWpmT|qP>cUKh+Ic?~>{O`V)_a5~dS2 zupW63ZDwvtS1~DU%A5f2fS2X_eHC9~pJrAHP9}l5Z;0Fo_R9C{6n2}OFY=3r$pwXN zL%0g8$IJdE4JE!*J3)v3YahnKhCI_G{jq0-@FQ|(Us2^Cw6^KDTItDv0-rph=2QPD z@pwEy=co4hC|M>@37+xM!RbUgfXh4T&Sc*iwAQW$*>8z}Y*1&$^fKFIP%oUQO5n<` zcIEBuc*}49{?jZ)mr2d%yMW2l`NuRcpabmQqnLfHOBr<2wIE5kxT|KkvxN6E3Rh(4 zV?Wb%Z8AN|^eqE%l2PBK9a5COlb}vlTor4LWbqUMv#lgY2cKMnz zBJT~B+Re&-vYrjZK-dGxH#p2E+j_M?k)&GUQ~z;<)d_DUbP)#=QjIQY+}vKLq5o8G zLvTT8x_I1Ss3MV3WZ1aDy~k+dC-AUpk%y%mJsV;{BbLWZ*bv&FRhEN1Ju1OyT0Oky zHy0yV>+!sCIwmX4}ra&Q#@AJyG z2mZ1{op`PeU2|TOBS0i*+0b4vC;wC6%weLPB0(S=g}rz&EBPA=yEL+LnERyJ_FRMu zI2j*B6DpaOn=+LuD<9hXQ%qaGt3XA&DJ3j%)^wBTYSM@a;_&!1seyRhSM0VIO_vUL z!Lb3xt67tVArtw=hz>F2me$NDqA!Stv9rIZ8d%R9uO7nAet;iMoU)tK@9!yM6Espk zq3uqKk$Kv~*_8BR%8p}aIYHG63|fIC(4C0JQDp#?LYZW<#kIAj5&kU1ke~U;?AJY-F*t(VS^N!L>aX?`o>$QDfs;XT;EKN|$GD$Rs$>JT0jepco1W zvL&D1R(!}rdro?Gb?vf6m$db2dDRXB$;NfXWeG zGHd-{D)Z6MDQN*TB#r!hqe@SyKZYu@2i4ys#38Q6hltI@HNIkRbQed0Jms1MO_TE? zA=~;q=NlAge<-su;!0~ao;O14N;N}DU?XK>9_KFZKtn4uEr>>)oE$a*Y0FU+0a z_xIKf#WUrm9F?$wMRVJ%MQU$rjL5-d-;^C`Gm~?8C<{w|;d_fiWZEZmZU&Qj!a_E- zZ^Cx4yq7ffIit>^D03-NR-*`HoF35_YfPK$-vI{CV5JY z)YUyb^<0-3m$o*<{6tu^{5#5g4%42|FN?EB0zNQG?!GcHKZb108(E%jdbWl*JjLFv zzmUw`K<`KFSpx|dnqox9NwVJ^7uU`IH`m1tBgD@UUw4FpEWkg-xzGJ*11$G)j~r}d z7n9~p_MU9EzO(iCBzt`-S!q1}rGM53#BytE&%`n7JrOBs80LnsOYJcgX@}0Ce=}#M zyhG3m7qj6P54RR1C~{ z=5q0f$sK0I^r0 zS=l6iPFcUkiUk-E#7&$mUF6uY|$uqMnhNJP&U$ycx&OqW16Mr*_GTmjDP)$P-#^5Q4Mq|L2-oz zLN~a^T-saruHbb=lp$bzJZ7nhKfhHqsl<&92afY^OiF&B7|Nh}!31@^1hw`ZACl4? z#`3EgA!tlRi61KIdz?E0V#|J+W@L}Hs)!szSHyM6+c-O*po*_o5%u;FHzzWK$+O>8 zytN*10GrSb+Hc#;=)1the_oNf91_;S-c9?0N^o^Q|0y&fG>)A;K0NPAswPAfCAgB% zS)^>pxB1wgGiCK>T5@~KmZW!EH%%bdZsfTJ%a-E}`h{P z$3!({Oc>H=5)|atR5qsSGqL6iC<_Z9$@HfAjDAF-A@2j$4_R||^^p{I1z9!&)P-}W zeEZ6{#xrgdF!@ww+rk*ozE6X#egYjBxx0tp9K8Q=XyUAuo0#-_2&`UpDmEg;jlO)Dd z52m`_a$K&r>z4uBRH6T&?0|=PAZK6kYCOtgn2d&wJUeRCL45>H*&W3rnol8^c@sHWJFe9V)lV3Tu5#sdPt+w--$Q&M`*a}Ul27kiScDu@z5hjERl6WE zRzk8=Z-SeQ*-34aeGJ!ivDGg%ygH9mx2dA+2gZ!@2}VQ4`n zsIip3oeyAEsDn4I30=>+-)|7GFseMJOG4sDR&z1jP$hR7gTaEs`tS4>^Qv@hzd3L` z-W3?{>4VMIoo92%O(6GCW8)Jl1j+L~z7nA*2724Cy>~sL(X^vVgNi9MO&n3%%KJ2t zGAb?~k@fve=v?P^`KO8z@A|<4G$&_+^&K}Z+ghAOD%h9kpdGEUN(*hkAa=?nxw3Qi z<;r@Zr2cBF&N z<~Zf&+}2r7+9aJy`Ng1v9<%I^)Ey>l?1OJ3Q`K$RxSwI(qNZy#L=>0>(gQ;Z-1xw~ zI+BjhJSa1aWs8NPWs}cvn>*f(mJ?)3`s%$~bElDwo}v4^$ba7!I$LxBt^E zN9HbwoyG_$;~+%k3<)skl6#}_%%j{=<|8_}9J#}NIp}S1H)m8e;;AWp9lb?0a4>f? z)Tg&TN01M3x3UGhow!^Cnt$Y2-UlIRluYqtaA_io3Ih6`n6Q$#boxBeeMoB z&D)LfQ>#DRK!|Hv-nI;A28wsTe8t{Fv;)%=JC!CYzc<2qoI6^kKd=~I=UY_o>8r-6 z%`O#5UrhD-S${7qbWrM)ntt)ki;2?iG?rhc3~7eAoXmZKt@SgWZ8pY1VKK93`9ctZ zl457?0|(?mh=;dWTZM1O`!Ky;yKAnPcUnG?PGQT$J8N!rkxPe6CQ2HmQSd!ZR9U_> z7>}y|02lf={3`K%C&(xD_0;>b>?889pd9e}ox5=`+|}B#?hs`|q#7FaJ~X8}dH3E2 zid*$%u>NX}irPLYhp05S`unOuL%Pxv7rEr0tUF3}^l6qzlDl;MJI3UL z+GGrp671WQ%Kvyn-!U1EDbJs8Q*b~aP0Z#s1X5RzRd{_~=$G2EHy5l#AzpIcbEQ%r zm52njo_To`C5f2jLJ(YWuf@(b#X_ecQN_41rHF9Cdj>b7mL~~Z-+m(Acv?TCBWuJo zX$IdaaGsv=tG>mp7^@a=jqh}5Q8^Fm7$}}Ej92mu`+yu8hElxs>_v^ zKsmZR)MqQ5{N<#+ezCTR#@`!4501SlSrPp(5j~xwf6(6Xvc0krv-F#-WtNbU>8^RjW8TpA@Tz{M#tXss1(H>iPh=ydOeOq+=qqbS|qD))(x4VER#$KV*|z%h&^ zyUL9dg1e-gR+8x>kkSLJS~SyMgG7qZM*S;8OLSSe*NKp{R9F8?BK|vTnla>nz@>}C zI>zQy!k6(UIXE(!P&6(fORgiwo{0ybmVrs+}M%;~uzYTfPLfkRZ37RPwQO9+6CX z;_J{sSlJE4G&H`JvRB5c%iRXXdLhNEUAils zKTkrV^+d@P!747kd!*`wlQ)OvHLhS1(*m#Vu0%8oCm#f6tG7D~ZQ-!qsh2G+@{%fF z1)`l`xMOVSZGwBJVCHx~;vmG>aIDT(%Hu5P49q6DR-SMw=3o+LC4q#AR&LwooV|nM}3{4*doa%VmCr>-7+*dkZDWtbQNb^bX*wnp=Fr zbNdnL*mY_miuoBM*tXv~!Nu0z@AFx;&`I$FbNl?;jWf8jr;1`XT5{FnZ%T0? z>DBLAcFNAWRT@dpzNcj&Zr~A=#;?HL($^(3OExtwJ=ies%J4pyk!Fl*-seAGC$W=f zZkq=Ag}KqUNh<`rmGR*PP&TvtkPucRG0{MS1W!UG`PH|Il(?Ooq2r>%#JX zQvIOimiH1)oXy3Az&(>#-y9)!$&TXjYby1~zXoQ8Fj~|JSy9riX-(mSQa+tuFRj^3 zY8vU9))hj@NDcd0XhHg5!=(xrO6=WrR>IVf{==dUhM`8^A%S4|UHx^+z zk8&w%%4#ceG?x0fxpu`mNKX&Vd-tWwygMOTHgP@8ncs0;`kb7@42#o*-mV-fX0+Ph zDNl+Ik;y-rl~r2+wi1FQ>bsWH>gM+*OkhRvg3T@z zciKs|Q|KjM%#R_svij+RR|CdQ4l*rgC8Kp!1T%f|`tffJ^WsXvgT5Pc@(H%L2uip3svP1l8N$k_uWkMh<>h zyJKpvU^mSW%Vec^dUq6TXOT}%&h7$P&at5WIAWGV!?nFR+a1UXZ9wt~-cGWus~?o& zXIeS4{i&2b=Ckm+Gy=Jf!P^&fxnhh^S(^Q@?mQu(*sm)#Z{nNE*#X1y#(T5FviQ^|VK&PiVTnHOvO-bl%=fZAH3^C>f}k?qo#LkJOUyJSfv;OQ+e(Zb!Wk^`M`Sg0 zlZfltXrd=Li?gdv%aS#!uHColKaS&|IeT@^#S2Bvy-mr7R;BwlhLq*Lmsr`s9%ylM za&h5-D`aL!^PhCLJ|87NfE(4GnyhsD@ly@mG{Sg0d4c690K1QkAycQ+*9oq@5cn9I z_oYpVbMtu2YPahsRj)tkN*R$Y%8W`3^7^gQ~JJ#r2 zzp1)=ufM5^mYMkT^YQEbF$kDr@av;pMWe>rUB(!FUw%!I7?@R@o;fqu6Uhrr% zD~>7tt7c3voRuAKV2NOg^{5y9j1Y)EGT^^sg#9*wB5J*QlT=Ia-M;Ko&B>-`B68oPm z{l_Z^7W)w&@{S5Nmj6z@MAl{nc>=Uv77T9|Ar@i1`21k~!mfKBi9fmi1vwescfo1} zch+Xx#dSqnw-0>^$KyK+b?o`=jzfEs1Jnd@|M`KyXZ?Ri*!p``DXa+6J_Y{wKc<^W z`?6~8i?%Juimap#BR{XSU-e$c(SP1$tv>Kh{C=}9)_$$aR8&&< zQR!VIQVVi-akua`cae}F_Zez03y&t?^7z2 z-P@73{_g|+w+8=L>>zDP{@Kywt%sBEw2tbdwBRpWp9a2a>d0?rYD;V4*#Cgv9-en| zac%T&YxxF3zS|AoWFT*mGibYV^ba7fG<~s==4Btjwz@r(U3w=pk7>R98y824t;$Ep zXJqRVTC3uc;N>?tsTD(ehuo1D%(VRJOX85v4r9>t`gO#}3vSw^AzShg{QZHD-L1dN v6L|qL-npYB%JPd#|1=OST-C`emd+s4KwDJkjm=g+RLt_B7M<>lqx-rnZs<~cbz zzP`R~ZEfl4=@O96f`WoQFZS;4?(g5f$Hc@uv>_B17jJKGhlhtdI5^bS){cyfTwPsl zY-|)26=h~-Ha9o#?d>@`JBx^jSXfx7s;bV<&wu;&O-f2iK|$f<$yB_*Y= zuP-7Z0t^OgX=%m9#d&&qc64++Olv)iiacCxJoHjLTr78Xb~ZINou8lo`t|D+`1oyhR;cp?ceRF$hK9zB4qon*F^f8yzM-F8RD#h_@k?|4m`T1k1~C z$M=RPlz!M^|F`_t8~A5;;mVk`aj$F9?T_BNd+UHR?|}~yrC*dw_}#N4!mdecBJFd0Qrp))6bI>iCZ?{ z2iTu9X5T-ej`{nT>5;0XOv}2zZ0B-=hV!7eGoL)ms1C+#Cg-biLbh?lP?B*HlZ0Ak zL`)#JRP+yJccjk2TWcQii({lyY0Wp{BA+}5mO#w{_R$Y~V20RGVm+hlO9ROzs#7ao z@o@B(_-48tz2T1rh(d1TFs1yU-%?s$5l<j-4pvSJgR3Qx$?Qclo#W7$6Un8Vx?kNM@OSe6d#Ce0 zv+1(Z@<1QQ=XmQD-ghND8Qeq$$2Md?^j;Q&^%8*qVd--@*b+(;SySA12Aq>x!q<(+$qcYj;foJV4I8 z1+#|GRwfO3JixMeaLd+$*O`%PH|fg^By~Q7M5wjQj8-GIlW1ddA?HvZ{sf|AvW+%B z@_;4Ttt+Kz5ge1dJis2yGNsRr9j@{{nRWojPPoAOe9#^+HPXMQH)#(`N*G;dbI97i zi0!{%?72K&7tkJ-T2kf?jMSp`iz2G)JUwcrty~)TZCtJSteB1WZJ{UIujk|2!5X*Q zA>7T06B)czB`k?p*kUrz;T~Oz0ad0A6`C7*RA$WmkcD}CZ3?EBa{z-}$*LD$#awuu z`7pxc`O$3&gICQY0!i+wRDLZBNd(%%w3dI-FCbYuN8FU}HCZVs{Gw*<;ZY=SG6w7!*%_F88#|hF%&npe<$<2fE<1Xl|7It8d@%R zmPOf|=kjHldya6!UuJt3AB;gxtB68LVE1qJ0SE;_?8S9>#-!hs!6#x!Y~BW8McRHP zw)3%b90}lo*Z(|F%8VRx;vT`707G9kayuIvd0Z~ag`t9E%YF!O!C)9X0(Ch)Bv6NN zUYS)tn=VN1$4}v;10CnLGJpyo@pHu9PvU`;_4s$Nb+>B-GQPtaHl37qSL=XPkH*1Y zM%GNWIFY0xROdAhG61^S_Q3theK?~vno2hwW@rzA@0!3PvePdpdAM#n=``!OS%3Y~ zAR-S$#dl1p^;__&@&yo*LVk}awP^w7Zk8AJ)*ghf^L#Xb;a4y}T{f4j-QZcXz6r1c z4XGk@x$IS7TepuM?{;4PB#{1S90H$6KP;>)P`GUPPWZW(ZQ@WN=*}YH1U3LO%W>dS zY9Rz5m;;&kC&vE`P~6(NZ@iIVH|umY(Gx!$E$F_9;_Ck9pw*Pt z{JZ9HU?f^IwLB|5|Mi7%jS$Bf90SN=PRMv2aeSFA{CkB2Q$bMZb?=~5_k)wbL4_dMcHndCph4p@`fonFY(jQJNTLg1_3MVj+Jxx4M*ol~riiF-tzfb6w65O$T(=47 z`}-c-b&5sv!M&*EAZ!j!V)Yo3Pxi_DE4-?;?-cq7+iw04grb}%dKU?|T)m-22any{Mf zol@5ZGwyL&Q(YDG^oG2y;^n8M+oK*FwlS}%w9?)D}{+mM^`vS-4SwomDWeH~FmgsZ6d;?ONtKfj>xtEjv-gP(%WhF-f3>wcb+~1Wqnz^I0)Z4dWtN?*p zLEc4DLA}DDtgr3|yf)&QH*%OvOEnBJn{X(>8Z^BD&sLTpn152{Z{LI8XrVebE4i0N z^JTzPAiX5OA6*gI83_#h7F1Y?6jqwKL0$?%hcr(f48V5UT)JAl|Dr*`i(z2U18tzy zL^L|;pQ6x3+~qnFc4iQ@m_%>m>pp$3()mGum?;RGdL?q*wb|!ycrU_6zByk`C8(Bz zW|L#;FD&n~wP}<9AFh{pj6<*H1T=`MZ0ki7CGuKOy2P&2E0@v%oMSZf4OZ6$5g$i^ zfh)-M(lwsFQnRqXxUEF#C8Hd~ti>H`SHf;CUr@wl9CgbivSY48l<~e*4!1*lco{cJ z$i=SZ(ufti?@HOlcaOX##QJUKZ>KOyMQZ3GurLSh9g&@hz<_;CRQeKOcfeB6=^sz= z)$uCHgcEwC2)2!rmIeEm;VoAo?;eU15?qG3SL1n-X z1Nvg#;lCH2GwB4Bo+-&kco9@NytY0Jmr2z3FvAqa#2`Du-ldIYR}^hnd&q3*Wj{}R zp14wH693Fs8}lV}C9d;KjBh;u(L@k-%yGMp z@WAidja&SUYt?s3JZ|C%gPkh-kgL1Mudi|JQ z@~>Lqr6OgO7BD)pq#uYLq<`p_z3j=q%BKE1nHLr)Ab@MC8RJ{_HQ!b8@TIg&nL|4j zA0FCxDGgBc*reE#L%%K<(?oh(Cd5A2MKs~OM$lH;XxEW5GN8$RUmysrwlo~J?jUaH zeBBRez*3D>87yFEPD7sgdvA81dly+FxHbO4e0=^+;Pzfx04pxa(3HaE^*!o0USp7J zVPktQar6_`q3pP{8tYpfB%7VhOk{h^tA~e@Q`c=-bkwRA=&8m&SL?(5;^J2Yq(o|O z!tfYBDRe&_>x@(OcYcFiPWw;I-zY`mHdDJ4Oe>{ExEUFOhu4c*05C;rJN~-r0ovT2 zGYg6|4oQt*aH0(n;6c<@vV}!J+3On*km-~#8#|{^adL$X_{V6Vqo8&_V}Mzn+kYo?LGwUMWy`SQccvjDE;%{Rt9SKn(V9;B z%1f1dT0wF}JVr)z7hpyc&A(;ZCpz3g-a;K9p`$E*&E#U&{)$QU*RQt4#VO98d@;V{ zZL8YU$*6-R-=auBq}$5Nzo|bYIEsFF5SoIXI#rU~b5nSP zD+ae96%EDg|6Nx&2+I=<%ZLb?623ny3tFAeaa(CtmBC~kjz4@B6E$^Moe10fELhvv z%n)j*vs3zhhn0>Z^2Mn*+uKcx%Bo=nr0eCSrrQBb-0{J$99=Msf~Yh?!FL@8zPJ@c z?%KncSnqw?Jgp@?PER2-t-qxDC@<}!zSR{YlyB04o|cxNc1>K-L~fN~UfM$Zl#7=H z7rcceP_CLB4`6?T5xv8`9B}P_XZn`}sR}(+X6k(^Arm-I{rAh_e%;P~eTF#k{Zk6c zoq|-@?!-jy^9LP<4CSZ?y^nV(jX=ro_1FY*(p11+1SGGV5+0=L{pzqbe`uMG9(Zv{ ziGHA7ARbHaW~epFVOwaLlP!}D{(I34gW>OPjHTJ>sTwmcNG0PBN6iuZiCXlOfo4`= z0^rp`1Op-G_h?ylUU<12-Lyr(UkOQEyted^b+|PdUQe>wO~X_^Cruw}0g+tR5d$>w z0PKK!E=)5JeGOK&YdgtJJ@;HnAF+-lb9ab_7MOddT`a@?F7aLhPl(d|-I0Su_0~1W zG$s*)%;_@}*<-sSYJ_XNI@slzCJp{TTqk^L#AVbAZj@=KGYbu;AjI>#@-iPNlD z6FoiXqsq}*$&~(IZN9-m_cPnKhWE0-Y01(a?pmhCG7QopwI?J{k%N$3wayV zr#CqQxyO2-5tAWsE&S$5_D&b|3hJ7t?kB3S-d><4nFPaDB{29&k%g*c<9?zjN1+tHs3bXd`G?J^E?_oq+91jZTLDd-$lKK!H-5OqgP74)NFLi>}{@MSu%B`tM*w5mdX z2hU9_2^KYiSefP_68B7O{C#w2MEY=#m&d!Sq#zZKKL-~vINbCG2GmHtNuFe`->L)% z^@R9kw^dC*sj@ z2fdJN@kO(5s*=M0AC@-wG|wZgCvBN3X2i2tTJZ*qbThIaiyWwpc_r|6Ks#-b%*B{h z5lD`5^OnZccXB*XRt_=)?f-1YkzTLi0y5`FO@GoX$_FKx&$mw$c=yk%FbEHnKEx0a1a-+x=5HSpMA*wDJNXOT$O>RDq^Wvo&H3A5o2 zn>lqP>OZRi+e|rr&2w$VjYA?}A`&O9iDC8KP;U5)(c(Die~VMsZ>My>j9BD*TLi0y zaPH7DE(#MzhsT{O(~7vQp_HEz#D%a1A^U#&A>4hCan8xZ@buQaF5f?Z3oZWzbsqo9 zxOQZXZ7TWxYWMIH!{1x1{kMZ&$g5{g+5yw3hB1BeyiL8gS5&MJiz8P z^NIaD_hoz08aPPo$!M9X1S_cHshdJxfBzUoSNVRo$dOts_$B&ff!3w%iUFoOrgt@MgbHoBm`~&A&KKz;cI*hEaul%dW|AKY zxD>?^Qu8%kQ#E(lu78QN)2yM?X4Mr#sjs_}VNu*|czOKUsj!dsB77x_yd|ZyZOWUS z%cOY%S9TTe-cYiC73q;vGiv1-_5gt~{-x{Evx+n^aiuFt%Prz}jhCuFJ}P%oy7+D) za#7j7UnhF#W)3WVVghmzJJ1w8<1LtK47#jq_$#07%qxvw03#GVKy6OUCBm_mX3J|6 z67m+iSrVW-gjg~vxd)b-)84xeD-i?xB9-)s^S%{FA(tX!Ua{ety_NE}1wi1l0 z_uA@vGDE~0Y4o4oP)ge+fr#TKs6RK{iNJXWja^z!W0Qwqo=f&6bBj0-%KUF@iwvby z^}eg67u`MYBSHEk{`?(N-&T08z=;8-HX?w2U?eec z(^kJ^Eji(G6`?N!yFiu}$0j+~7ik^ISDj#1PdRlzdKu^w6mu1)ASFgwiQ?)0o{pg| zWJS;8|1!Vdpop0?id5~NA6tqxACx9pgf^QQ@U$`irHDi?x+7=ocr6P??rF~ei`2#=tBx zsE01K5ob+>Yyaoykc7`V+3K}FGCFN6uW6gVlFbX6+fG_uG6 z463@65gb!ZLr5&AyOd4MbH8_`ts~_1rT8gOcLqg-na2A4J6+6Ps$Dzh^@mootL%n+ zHT7l-EPe`<6%NGjB1tmf<&ZZKZrUa}AB(BQ+_Vg@2Me&G)Zx|21mP)eMfCSvW^BxP z>V(^OI;lp*BnedF_o9gZBVr`Tz7S5lU}D6FX}B+~i;_>!N#(AE(}48Kr8J&Yl8BjP z6}@6ua+Kdy(+<&iUSQVWQT&m+UHCS(6gUk$Pf?P;ijhl!JLDi@1`BK{ddI|q4qZgr z$d6&br-N%`um9F2m6UKcg4`AvMzu!8qbh%dMbt)JLWOR`K`8W{u^o@*7y$NZl{3r13-6yxrm zc!|F$Ba}1G6dLBuFPwUP2t*?4;}-fAB$-gUMorG9+Ejw2VoC0SSDydpsc|u!bd(yW zI>`eO9v#m;tUBxmA-#<7N{;U5UYDRi*qpF#p&H>Lzw6~aE&ku&1caGSjHBGAe6)xU z^yC&VxatH$w*@!{D2u+(%lHj`kq`RWxg#0DLH}j|JjVU3DN%g-%fIq3fH2>r#G(ea zm?*Mc{d2_kR^u1>GKw)gdLOObq;&o>D62htTc+H=r!rT~ZuA-%qwT-A1P7uwTaS|% z6JGev_)uUmO#$;C>wV7FL~)I$kB;Evh^C?X@j`Sfp*@_#(~Mmp8TNlV+mceYet2}6 z%|BKC2opH0+~Z*{NeKzj@CndL4A`j?IC(QS0Jn&b>e23w26 z%K(@Izzf7!TSaspmTJ|+Yd1^Ol5)nSl>(n}?*Aju|8~W-!=BL6P)9bET<)AUj}Lqw z`G>GTjBh;Q`C-kpP-`7csyz}oc~uMi{eM^%CpJ~I{7OspCVw7tfv5OulGgvABoO>j zp<*-&{BmQi%}{y_e0O8pW9IowfA0Um7J3mW`lG7FrV5y|g(h@yNr->Nbh`rS1Asg5 z=pcpZkcu$$i;yk(3qduz$=m-`d4Px}oCs89dcn3(__SKIfR(iABZTD{Tt^SXixe zrmAR7nUtfJI&FOT^dC5_!KaSrn00B&iW5~m=@)(JDgv?%t8FYY5Wr}Dzx!tL+bYzr90-ICC7;e?DGH|T+hkH zp#+>dRDLU0xhN&yZZn@=5 z`Y?YaWh)3sR0o`Km7yfq0w*ELu=aLMCM+$r;FU_R-CCM~Ztj1rvB zOx2N z#FFdJi|Qzwe)ay9i+EGDUu&E`1aX4_`dSWOla|4s6C8SWxf<)alaLa&vGfm{Y&+ z>O)z&EGB-;Npq8rLqc4SJ_NZbY)hd~yv$lte6B4zK^rp}Emh~&?hts1W1!xn#h^rs zpL(^wA?>L-_FN@w^Jtp#jxM>lSgjtsWT`J!{ z*DBQsoV6rYS`z6=4FKa42|DtxlE}?y_F&NlohR9cB_!S@X2w+1&zYtoHcZJ^Cy;xs zlK8SX;*$o{Y$uHq94Pvs=9e1Ra&KXXBhPOx-1`W6WO#-AuC1x!PNB21J$Lr3WoLJZoT}L6azKIZe85gXIwAMw!^Aevu zI#W>01wI;*1V9%TaIl|a)T)FuHJ9O+o#OZGZ`pMbC5M;1@Kt21RoWm_V6+0bZH7&P zZ(vcfrU?i}#LY~{z}6XU+&8AN-6Q>CNlI63q{bZ~6a~oaM;h;{-z!ml#h2MNU%v)$ zkw-koX_&_ECoP{GlnOSX)19f?I+_a1O5?chY2!(X@BWT3zm2pX2|vy4D#$I zN{$qxYVy}I!^^ehw){(jmHyu$KFm3-6aD&@CzSw)P``yPSJ^5b$ z^Nf2w2OvQXpO~h_s2n+GJPxXqj4>29R+>pT*mx=%OlWV- zItHc>3UWhyfzhiNBd2o01dA3`$=9$Q%MrNOD5$@3ohl&Td~Uvv*T;aDg*=XzW|S}e zZvnG4P;J3^c2EN5Lh- zYpcz{?~oHH@&QVWsak)GVla}d>UX@!UcI#ic;>$zE5*)$yG~Vxck_J!zH_z0cB3l7&J;!vh-eM zR$e7^cvClt#oZ}Qw24)Xz{sT8LR`@4#mu1K8=zeNOXOa}EV1`-dd;~q#j6!5Tq9<0 z@qZ644^W*2T^Xf1fa#@VI?c^8c{$@rlE1sScd`L-&f(K4 zHZINwtHHdAxwHsvyhXhuxHxx8E4?~gpZqcCo#q{fVFTE!mG`0OKCHZy8$bbAnJiWQ7{2pDm)v5}VZI+fE=< zCShG%)N-{K?$K}6%ggye`8=-}sFLAeGsv)*S8)YX{FF~BIm74xnMoN32n~>iG~;rp zZx(o8mT=XNEg*IIyqp{Y!V!txjXax#Z_zy?mAjr;oyfjcQ@e533Wf^wwKeumgU6{C zCZP8p&vm?Z5!|Un-U2=4V8Eb8vVJy>&7JqBnVcN;x^tZquOzs2u0#wbn6fqN5`9gv z%n(A?H^si4oYZFiM_FiMRiLs8p+&o6;GaIDbIKKq?Wqn~`A;>8M*gcwE8UJCUK)B{ zfYqy|XT%@!3{D2WZoMRWpTfO=fo!Bep9eCjuXxD~^fy0O`#)_T-Iv_(sgrDCm6pk( z??~48+|cU-4mntplb4HCOotQ;Pab_xM}Kt3?X^EQzB+NC{2h*XOb5{1&i0Ofv{uDu z;v@4|05dhbV&K2)NCpJkL^m2~biQ1}=kcRwqKc50&3wZY287v!+1S&yFLKA;po?o# zdlzf=(5Zy#+sXF7m;sL6V?o>S|9j?eQ$%YzGXJZX08%9o)|6* zhXj(xi4|SMpSH(AJIx|Y;Cz4(DN}FkXl~H6sKcp8U{xBt6Gr@Y=K5(_eV3A4-R)Tfl1cNh5iROG)^vJV!hMeyOJYB7@y#cBC|+~|KMi-w29EKqAFvlMOLSJ~9+6XET#rwT zeW}(z@M(HEYtw;$=-U@R$u`{1Rggf)aDsvTe z0pr1J9;062;h!g7y(v}WfSKzV;;84|SJE|%IV;p0=B@%dn!4N3ZfA{)DF`?3T+S6_ z$DSW?`WjHk`pwT@rfvtzPT7OkP{$V?KpE%hKKVn=J{C$hCtWuI;Ti6^V0(eT6@Gpf zHMnKdZPhr-U*i!Q90Y;{;X=Lvq3mnNmf1XF_QiDfrS}CSoZ_%DElADrvn`f1EX9f4 zChA*(T1AJpj~_CY_Q8`$@3v^su&Ho#V$*5~%TbnGGlM?<7`_yK2TV4E#E2DWJ;!Nx z&To?hipAd(_}csNJQnNbVHnNvI1k8aHZ29XI+){NrX+L;8_)owbX4Td-n^Ft=MXz8C>pmnP(v-SRpVWWU7zPq5s{VR|*Kcf~WxZCN>E=>YrJJP7on zjw_9P;IHwR7rfug26;&$eE@?Dk7b|kAO9f^zUI@^L7n`Kutbl(D`k zFIG5tJByeu0>t{dSV8g#cc#H&saEZViQ-anz%i>s5=?C)aB=Fu-n~Qr-WrV*N6;LZ z)`H$VqJ-(4Tud&e87(Iyap6f0Q7KtIfogJCQ%ikr>1#fXtr^JM&I4G~m5RR1gknJ= zYH;LrZ}l0{_~;Z_28Ft*&t}%eWqDIEOG38dS7y`0(eS3wy?5MQDPKR&ANJ3YeqGj9 ziw???uQ<{f`&>yVhF^_*azqTDTaAn*6>ZC{=}wm0wy#p03{|b7sydx(Jy99W`e-~$ zoS1OOQbs-i*=?Uw9xJOWrHhqvFEdqvYETR+P{$;@DkQ#ZkBE??l{q24BSqX(dIo1D zNw8ux1?6P(KQVpl5$-(44cZRetN(4+^>Nb+$bx153Ej}|@RU^pC8U^KA8Q_E*dp1& zpxb;i6$DM~wHhg4RI$NS#w``6JgHyTWNahuy2$_4Ihi*|T8fVi3=>;r^)Q=K@_3jH z>z0;$GM>xwOIyvyn63i4Wci6p7BUEyUxQhbxz6g+q)B`WHsbb`lH;$BL)|osD?le> z<<}7O=_i3x7y%+XpM@skG0`KLRC^6(yJ}2Z3o{1!kP#W?(@}kp8J2p+U}!=uzt+F< zyn69ShX?zT{3#u>1INWe9&^pBxdsi!FsW@s(*#?|XwZ z#~u0CZ+=`+)2X|lOkHLQ0KL51C0<^~3d@`wj_B)NNR1oie6sd1Y3_Jb=$6!qbFHV! zenDyS>27M)pHc1OrfBWqhPLbeaU{MmmL7oQH23$t{&w;u z7E)oGN%5AkjA=&rKoxxnz0EfsckLB1gd7TNeYmb|G$`k}AGuDty?D46W=uSK=s~U| zL2a%e{H|clTp7$hd2PoXOU94#dHe>hCc+t6LM7|l`qE;v>fwz2HXfV1Cx8!u5d?Ha zIqKd$7OID2bG0j}QGxEn_I+iCZ;dyOJoe$Jb<*Tk`5+1^vl+k0kiOC``LzbhWWi(d zTe#d4(4^hSzrU*^D7l#vwe%t;sk}$1OnZLiZAPC_L?kmk?$JFbg) zVoHcl<1l2eKFl{niU|9bkq=_zxA$8b)ILoGMA+%Bc(J@EYHlj2&{RC0skIA80@U={ zXo%+0K#6w%r)w*sO40tq=H)c~mH>9BoKhhPL(a>k(^}6#r|8r5E8=Ns`5t1ww+8i4 zH)?oTg!rZwNx6(N|Mb>O2QQLUrosM%Cg_!*)va&8wsf@-)%4|4j`6-h6ekZft|W;M z)cc6*xh=*Xa&+_!u~{_Zvp7-vhsS}5320|kj_u<#KK0m9{%(E-hW~#mxZ5FKUWu9b z%Mkl(W%76HY|v+}w<752nJKkRJD=YhLhe%m+rPBSewmNIo;}?if`LJ>TkZjx3nw3P zn|`$p$Emm4b~`nNsr+N%8jG)Fl}-nZ6DqM2V;P21$y#SUt`YXv^o-f9iIpRl`u2nI zf{|f16(pQYUcqk`yvtMH1!+}nIY$Im0;C%djD^JBeB`Si9M|$`o0|;B{8^MyFChAJ zSn{6l)H*WMLb+_wVbVGTMf1Hio&B=*d-Uq zqx7jXd4&>8c9mB`$JQf-|D#RW{=q$_qb<9&DI8_M1my1s#iV-c$)uBfiEhg*%jB}Ie7g00*)@$NY;(ow_c zn|PB1tx`vUfT8gWBXr$M@g6piuG=>Wo6Z>R;hMO(@zB+9QaXZ{xm&e%6mM7LznTW> z6oIfF#hpymPpVxev7^Y%nHTsh0kNMAwnG7GjMXN{Opa1=M3-3Au{K`aQ<1hXcACGS zl5qq_ePHm+^d~1wgu9L_5L@NYmCf5>KI3Adk1`P|iD! zZ5IOYtwG7%X$prQG(|B4Re6P@#1grv_IF*AX1 z%~e8wNbvua9BbZ}N8ZNe=c+t%4QF3w3E#s3IP^M|A|qbhrk|146zNV~mx_JXd6l+v zjnF9S_5D9~V2~@S$c-gXI&~UYy z5qBBKVpwcmpJ!)YMK`@xPbt!Zdx_?@)?PPrfzYKfiI3T-wkqj;P1|0*FM;Lz3M;=z zkuVMYXuL#%*KvXg+pd4MSxC=z?aGhnWjTZTZZ;XDs>Z&aHGFE;?3~9B2lUuEj6X6a z4v>d|Qo%##;l27$0!xPdfc+o2Ul|0;(-t>{8e=~vPTPgjV@^>6rn&kZc5MFLM4cV? zv!FOZx*lyTBk<9aso!NwP=AY?+PiD6WP%z05Cz!?*|4pE_@yt;bKM#yoGsTdZC0Qi z6cV7HSHOJ-)`rz=n}VWd8$)KS@aoxa+8&_qC4)ZO(v60CUO{l4tj1UCR)Q1Ol*yw$ zekjN8;qTLoAA8A!D4`qGj^9(n#((>bC}31p`>})K$ONli4IV39+T68hEf0u~nH+se z_>lhD#n^G;B?zk?HCW|VVh?`nw zSWJ6NK`ag4xkXAofs1Py6hoc&quP1XJVV3!5GI=_*sbmTdrjA3+3z~BAxT@I&g1*N z#-p2`ZL10pH*a_yp#$(;(@B<9ckX)e+Fp~zu znN=2VGz^fMdOw?h=z)bcGWKB-_09^m#Z!lB@8#I4B1rLFAcq$Bj0`|xtLqbQ%6{k8 zQPXthMu=4ehJ91fRrwyD!nKEofU9S{Y6)xZck# zG)@aV%mY;bF6qqRj*}l!a`8MMDby?ly=H{7_GK=Q+swTxMc;>mGpO@6hd^?)FNf$izUPt)cnv&XjzN;7~w7(DxR5c2Z`QMs`% z2#RlYN1+;TOiRCm(F6${+qHz%LVg7|{UCo5`g8XqJy4YEUnPVXjCk!py=O|1e#r{5 zf`Cc+F@v^}2a1I4DZ8aLo)CGRGv2oR@MLTra>(;f)1)Ov2+_gfT&=$^>_^3dz$Edf z)xSkw83Bm%6z`{Pvw=pex9*&XGUF_~u|bbU9eSfC?PUVuT|v?w?xZp&2&+gMb;U!( zjnXj>W%J8F`a@;2I5RKw>I91jxU#kss9fG&db3h6ja8lzseb_$ANdzDUF#gfm)tlU z)B}cEp(PCdMU;g&g|#-jr5M+Tj+J3OSZ7kLxzzB6@VQCch!n3{x=_P*w+g!ZOtD?H zDY6V8I|?9_q7fv=q=_-~tD>7h2ES^1Th3k^CX8wU2MT zZ4lliQt_i$6uyDSi5YG^j=TxzyqP36RH@IVvtTXFLSCJpj zy5P$5Ve(W-u|)hrRF)AwAdeWCU;vUn+1KkB&NG^=Ve8Zi$Lb#aP~|&op2oa@p$Ss< zLc7UI^w}f$=s=u4v&diL1`?zyDbz%HXf7C!=0E4zbhK26Wm+!-<;KFOPiAGI3ivpwV@z@w< zGtK!i$05r*?kyM5qoT>=)fq8a#f0WW>aPj@#!VDt(NkvkM!{i%W+HJt7rGUn%Pj*t z;Fpa8ri|La1Kwq%?y|?O-_$ZIgXC`wo_GrQBESmWRyn`hb^ul1wPQUwB9DWWo9->c zA6wxx>QY)U4kI42&j0Ek#`ENyxj$-Xq?gaYF$>;h#o^`ESfB5EgVx^jzRx&*SJ)pD zv(}o(sgE7@L(-zND=461sp&V>)8-^PTt6mDjg@y2znZ>1&X$79v?0y?h_-J5sF;;C zi1m(B1m^4-_u&S4RmzGx9eJz(XT&w7Wa*$ix0Ips#~J-O{>Ns>US-@~^;7I8Kt~L% z-3+wa##fxXXn>EE@$!=2)dm(bZ-~d<3qNFLq^#Rs$f^TrdP=obOMogfU@B&;!qv(? zK$W)^vm(Q^2Jc8a^@w@XA=|pv3C*&x?Ra_HX({WejmKOD#H@P zYHa*dOJ;p7`)!S?qV$Tm?Pv;Cto1*s38=nJjAYZUFr}VU4!DK`$4%cfpnpsybaCp^ z8D@T1E8L6NLB35^DH@=8*`URPYJi*0)>^8NnvBAv{4fGgZ|o~YXEFWTY71v-Wa*V(uj1i= z*>0*{VS{C&SO(c~d@+$bX$X)arGyVMO38mnh)ZKyFaTGKhR=j(1SuSdeWIEdEnSOd zYAIBagX9tY6KDbUE;Q8CuSB-vGAO&Kv||#k3D6H@kf_B#X6jP)mTtCT%kEWI8gcm# zXq}&*R+vpnQ(kFuzL5KlRABY-1?L*j{L&|>CutbuN*X5=O^%88_s>Bsc|D8(?4IBW zj8-SIJ~;(G*GYVZynDx~KYTN>Rks&`iHY0glub1ennydWz?hHrjl=le5!Hv+dz)Eu zDhq3F>+oeuX;ibvPz&N~noHGN>I0=b);Gq$|F*|hGvwmZsF#sK_3&E!IOn#B1V%;e z`cY{DUzO*bOd2s5RK^+;@yxjRV=E13K5NXOi0bHXIdi;MXxd=$H~H0{u**UoPJC`J z`i*aIdoyX$O*GYhsYG&pVdv)&sH~E|dxfzE6lCY2Y^@3nJBy5z8A!~Kfr0)Kdd0;F z{W|nU%-5Gj>)ap$HH7EG^Vv`+GbMMQiep>D_MGZd+iv>k$GSYIbk}!n`;hm z+}a5PGm$1L%v5s4i8O;0@$&v_v~jW>8*m_71sMrVuraMP)zHDMvTauv=b4H^u$7vR zG_qWtv{VO$!RrCJ2u1G7_A@e~%n1FN>!mu&=9Yh{I%_70bl%*GmN>B^JFKCaxR)48 zBkK<(nRBc~s&#Am$@@+QH&ia7PZ7+~A;}iI!WzyhTG?e8g&(%#jczuW!b))6$!u!8 zAerzvfyw%NIiDVH(Q)+3oY{!$H%G+b9M-^^rjpiK>K?aiwk6U}MOdbdT#Bi{)gwWd zR?p}Ksbwd-D6qgJBL^ku=j#t$4kYY}1baE(ib9bcCHVN((Wc(!wZkt+0Olv73$pEL zkHH(y;>3t$AtA38bqf8R{gWRX-k8AK(_vIxE@`!udD{f<-QjVZKDRTW)(q^AEILQ} z86A{9i5m>&*mJ){V+zT;K7{76ynoGv8(=o5re-ArlM38D8`q++EV^*v;?+5a3sJiZ zeg53(=0@8VACjt@OT)E*i!Ik8uPW10zs!TQ+7`IzgRt-m%rm_`nSuKW4eBhbddW>0JNhwTD({_9VjwhhsLRx7t(UcN@>L4bDaN zS2LQCboNip#SK_3REQxZnV&W7W;Ru4PK8|-_gBh$B-xWzGB&g{lE?v97Jz#}IH{FT z5b_M^8@z{+i#AQ0m@GZM?bYVrz+AWjN=l!u3ofVq4ylfV?LyD4 zmIo(N?>`RES_&q_lZ4h5v(#VOO@7~+m~y;J`4P#c73xLB!^oep6^6`SnWo$;2nMlF zEP!_+v~(Ryz&niq@H%|vNgTL*fOUAXRHj)2m$Qj)-!ytRk}>;a1tMTohbYKIz}W86 zkh%h!k=Yay3sFtID84pNm;DOv%a7m%kpFFvFr`Zr%uM}NKtGP{jksj%!8-hYQ3k~# zYA-}}`kVAiu#>15HR=ajl?Wf@muD!T4a_+(lr6; zZ%ZGY#^#-cW@+nKi6a>46*0K`u4^OR^(T{t=rGE9sF~yy;1usoYKp?gB+RDHo>J;i zg1)}W0fn2w#D=af+OzVDO62n~m1j?sqK19aysu`S@o5kDHm5rd zuG*1oHDgG@L-fuw4?ztj{~ms!V#Si$KYrxq8Nfbp<;U#i4ONbrW>Kug4g*iKAPx2_ zZMz$L#R2LnVj9JrzvP(DT?&F@MVkJE0L-T{6jent&@&33*{wv(h-E;o79B%aSYFN# zL4E7}`*Oh?4tM<_C|%&m)fN1XCHksz@13631j||bICLk>>1T_iue)rYn;&yeYlwqF z%8@s)=X^It)ueP9u4A@KQ~I)_$FLL8(&O#VS<%MK`l!GXqVeZ*38t89RSa;wr@H(X zbTh4gdhd6GVr%Ro7qpbX1Q_g)>{-53oKU(o6jTI0jO0jT%G8)QE91Z|^8$__w4k5( zGpR{*T(E?8wHYbhn9sb9RZJYdzc=tZ8+>jzlz~r`93zN&S^_bd`oX_hh$3O0>(5CC zjC1;DCI9LtM3r=`rD?#8hI7YbRz2dmBN~=ih`uzHo(U6dMm)vp_{}CJbRLp+?!1mo zHMew__wbBc&`kJ%E}?!Jv*W!zpXF9q*3GX%_Zd-I^3_`9|H zy4Fgrs^2lMmxgl3+RuPSgkFZT0<<%-_V?+0qwS7kW!iDDrS8$xYZ97dfL6DlY<5Y| zLTPEbQ*EM?PA8EB(R8r1Jgo6&D7oPhDr*HvVN7h1T2ZdzSO8PJbIHt69*}NKtiZV9 zw&%tMz98Gshl#nJBXK_OVTwRKte8%W9N>eC<56fO5ccQwL~)dYlNkb8T{bI!Fi3a( zHv;?ZUBCX~V z{eQUn3Wun=?QInW6)6#rZjcrj!9lvaq*JXEL3}_xS;^k2Ek_^o`@fzqc#&!24DjwOz>I5 zqxYD$kJ9?aqLzyoWl|JR!VogkKN2BUcwL8M(hiG>JA&2%z0Bx`#SQhCHkFZ-idy9i zI<6xqV;Y$DZ6p8Kk(XPS4}g>Ag7-D7Q@dA|z3bHkMTKJl1CIAtQ<$8MU^X!}jMf}l zVvIP>slv25=s4cetgSS#AwTH|h=Map_gBB5NItT9mV2MVnt(WCfSOW8mx*t?7^8>h z?tPt>xVZi0LXI40u+sz7a|b7;yPB+FoxbPNbbjJRQ55x0Rg^pP%6!Yz?92L^p-$^a z=biOCnGoKOGU^SErNK7mnERX3JMK=7&KLqr*36R6-~WJVbhi^t`1s|0v`{7R^H_4@ zn_vK5pCR(6f1-rfjZ_da#<#R((rP7VLifc-8b0*-6h=0+_J7|EMsBJkqhFbe_+JR- zFmo|M6U~kJn!Om}#}Rx6(?8l)*T7#Db`Zzx{1W~lH9`3SQB>-%cw4Y|PGj=t&gf@n zp=0BCzPKrvH~}8&b7JrS)w{tdwuQ&SMylF&<_g>Sc!IvK?y>*+uHZbSbeTuR?aTo( zR&qAq7f6iXKJP!~oojBhBW?E235i%2F+RAXr;d+Pp>?+XGY8#Cf-r$>;OQ9Ol5AYx zHfLPf3#5A<$={j{;M|LXy{(OT9lHE?_VRkuvX^9ICg<@oEzC%Fp^DPm|WhEI5vq%N_02 zoOGtKKc7D?3J=ne$WN!kN()wCK7D^*6sa>NZEAbdfAvMVhH{dHlB=GR=@ln)c~AT3 zQrf&^kSRk?Kd$c#hotz=4!RCtt2UHWQpg#PzXyE^mHLFvIA;QR%tKNm;H>IJ*NGm`!l&m44UiWXeIy) zI-kEtH_UvF!N0P8YkX;YbG9;fRr-@zQcPxlT1ob=F6G^l=WfnYWH`DnlqFj@E>8Fl zwUn+jl_c^E6@sLnQpei#6AsP$nFLas8rXEYu#bX3Q;9m-y5;fQ<=Akb=T80M-@k}( zRwn30c%7)jsB)$c~VdiWRa=3ji3r z;TlOXziu<9eLMPREiJ4Uhw{<~A3g|^SGwqP)~BP%kWiDith7|%CL^Hs{UdCNi~jOK zeTqd&m}zm77`LIr?srRZ#lMAnk~X|er&&2EKVQ92*!9ZlvS z8J5Sv42kUs6xVKJqfjA#q)S#-M_iK{4{+sF3t5_mO53Nn?ex=G$1ufhuKuFI{6|Uq zzf(*`_vh)=50y)L0o@iCx^){v;uex*R?Ulwv(V{p_Yd*Mj4R+RR5`*;MrCk`9w7V;rA#SskKdUR*- z!D}3)GNnnimd~^f-lZG~lk%K8VHn)5=!;9feoDc^g`x6-GBXVEwi-CP#a~xdG~@T9 zc|5@i**G-+`{E{tKjGh~@8>mlc#TyMDtG+7q(6B+(*tw|5jNeO9^F4VilMWLQ6it1 z30o`8otf9C5+$}Ymn^xz9(hA~zYT-k*5*NTZ|mfIwn@z1$V+k+)tEe>i{ddYNQda< zH)mlBu~%q+k;Tvtj(Nv^LD30DUDR3)YCdOy3KmGRHCj0{j`I;J`ia#3LxAL@Ql-&1 za?Y%;n%r0%S)$oWsjOWM{Lo-CZ$Bb3idQr`Pga&4uwp9BQIz*M{et-kY9`k=h33FN zDYB@$m%b=3do}>Gf3wiu_N*?SjibbOnm1^<>UJ&-sIy6MG6GDdgYOR(-Xz6N0CJr-&b3`8=sbD>a;WZU_eCzV6kRjY8p|4?f{T;BULKsf*i z*7WD1&*;YFbtJXD|rfNBULoCrC*K( zD;jRfSNNQ+ngkxcobdI-;jOJo(&vOJ>tO}@WH*11rk^v}&20Sg6m!!D@0~BFDgRjB zcPF94XyWXm5hj*^+GsogpV?1SxN!e6ml}kbw!ckNWTiE7f1Jf$+O-zhJMPP@H1Qu_ zpM`}HVq{u9`B|f^#>uGkbd*0Nv`WHaM=XuX&TW?5VVy#f5g!_b_T%sU)qSXFJc&92UvG0Lv4Bc~kBsr^RGJbqG$lu(apPtX7u?tH!6Mp*Q1{P>z*OPB=plY2oiN7ST^YPHGSq-|P zM_LZbEeVF-WBYrj&MADHvz004ZZq4YR8n`n7$qm z$-Jsaas&z&d-|DbMi&Ucu1`koR5rXKYY|`Pj#AFDZ=3uIbm0<>T#WA+FgUI)1sm zuaI8?ewk&*J9kzkK^fN#YusY(VUNw~&T0xt7JrK$PZ6pk@1~u83`T?|Em-w7ja7>E z{-gBMIpuiY*D|k_<%_F_fz(%!Z>4nh`lKo*(Bks1v+6w*<~4)T;OBMg5!k4itNC|+ z&2E?r28$b>JwN*VrvOQEBj6`~b?NG>cEqRl{K#{O;d#YLukEB+QZ`@`R?ATEd}cR! z#0w*QHH~S*z_=peCMFJ3n#?Qe-fSUZWeX0MlqcI2sS*J*nwA<2!Z>L~pgzJW)zbbW zX1NkMT&@WSz#6+51`f4ZUbc)CY z4$ArEc(`n2*?|}9KtA*aLZXAH$>V1Qss1qODbUO1ik!8~s;~T>aVvDBMF}BLJ8Ia$C8$;**#CC>q-=`+ng-ryrp-Lq{Y`{-U8GsO|h6yGFl=x%X!D7)_bhoEkJFS&VK*3g2g5QTw~t4!-$(CsN*c zmBSdf;y&uNeMl934jbQ^lHPK63)fNu(?Fytl<{{EWF|l6@8Zor!Qp@ZDw&(AAnxS} zzfi5@E7OWJ&q*FiWZ`C5=x{g?_V8f@79b}|?MRKv?46b|gq=mMg_#mcOvJ#U{9+3uFaGK&3_Ol+J4Jx8Mjk z+WnY6EoQsDldV)jsQBrn%OC(8#xW2YEabQQoEoZXq9%^-6u2peT~7=Yo7w^>@2cNX zbv1_9FvIzU+V8~+r0ZCIyI>|wnyTcgOWFzp7wnw|s zge-gFwl2y)2YaO30lHj?(_S9PrH+lQdYh>xQ}qB{f>PjH7Up3Lfc)at0;BrH%Q44SGcQ_zsZ%wvnd(+PfXe$CgJq=TQ_Pp2!@pL^+y87atYc)j4K7E(%$Y8Fu%mGfu2A~WZF;%_FXLvSW^>Mtra>gx^Umq~l!1!$?lsURV{tgMM;4yQ zUy75W<`7^>8F*4-X(Ul^nPXvZHL(rH=u!rpmgy&H!IkobFDe-IVPkkaup64VCt5uH zBDoR8ig5BlM`he(_>fjfq{mtKvq7)uOOIk_12vGJQd3j8`byDAXKS_Tqh;<k<%bWzT377e(km`nX}HOeGauQdaf+k$xyht2;PJ+2tgz}t&AcSsz2tR9enFBq zGGqGOihC1h?s_>$0YbMl)Vmj^EU)}Yw_%>aXK@eCA|&~12KasBoUGE3T=l*tLO&Qn z6sim^rXY*+^pU7?Kqx~yCL6H@ujP&_G_>><++Olt#LV+;ppMSHs&2K5aoHkl{l1MYKFq3VFA=0Y zWH1dVu+PnP{0~=mVsj7V_f~fqVVFg52=DcO}L#L(sgRCe&p)(X0L{1 za%8#j_HguL()W+4@kI56IKaXT7)VjAyi0DR^6P6 zr}5%Ty>L8c>us-@-?&|euUpehq3560XqttJEo{tG5Qojr#3bBvUqPTMoFh6fmWAEa zT=`y9iTQ9|(EteR&j~p-oaSrl!?M*S{+p-8fQ_jCR`}LrxOqf213&uv&(Bf)_bTjX zd@onhQp9w+%wszk%UrcD+GiYz?@O!T5)taMV)?e&k&}Pb`F^amro{C4N=M^Y#9a&r zEFCv-jq8gR`}B^MKJxF0D7tt3_7>|sga}vMo@Wpvw(W4w1Oznb-&VUaS-)`M&KvuK z5@$~FE(!EGKfCBm8?@;C@7@!cQDxwg`7jWn!_ynV{O}Bn`luo;Gm{CQ9eP!fqSlqN z+IHw)3FufbtLsB*pztIN`@LXGpVh4Ni*XPmmcnRQfiA-WHfpO=v=xcKF!1S!ZmY`p zA^EvBcFLV0AcO*A(b7~FC~y!tYRr@8lCbQmD)qJ_Ij3L#%N_D)GuVw z_nJsi1JFgBD=Y?LO08R;Fd+ML&12DpMGA};Y5LBVK}hV&!Zr;1jc?h>t+VDFA(4V6 zd#v9BC%%Q&*}(ao#-uTgh}qxW+NxULS-v6eB#mdCxGi58HRXZpeT`vIM}{RV*N&tZ ztGP0lwtB6tq+^Uv;2)1H=z*2|UIz<(I3w>5KNHg0hV`5OJvZH10Le3-@hG0{u{!~t zgeZjjWlR>5yuLDABXso{-<#Wv1tP7Q3U0vKNB{=BIB2{%kxzzsUICopmBXf2r3I+s zv{r}%G|N_5*wMnT%fGl?2=_NLjDvAaHe;>N?7Y5_Z~_$F+L&x=g~)r%W2NHh-3f0V25YyY? zvhh3}<_a+M!LuL-0;T9;?mwm*l;8hakzSH1Dq0Q=ir^%cMKXF{ORk!O_LZUfvMc^2=H|0bB!RVwlCQ#!yFXySl?RbxD z<|n=K@AfExV|1aB_2oLNh1YfVzvrkz8Hr?cL&Eb6>2SrwYjfOT%AU*t3OPK)N0bEA z^I+ovHRvvL7t@R?#hz)umlfDy0O7!%76qfc)5^LXVCs^FbHj}G68)qv_P$L+|BSuW zpY2xvm_sMz?bC;C$VvWJ%2y4XM<1r~E*A^SQBx^?PL{`T5_e})ZboaNPG&%x=>y2{ zVSjCEOxBf)RC*q=eVQvn4?5Cz@Y8TM%I9PY8PVT*Fq2=WpB4Q5;yfWbpfi%156$xW z!Q6lWJf$Enz|r4q&!0L##qcHBJZ&Ah|I9Jz7!vsSHM#+kGL@ZXh1B@%TgNpmC9kAu z{s(PF zd&*g@kUKJI-jMMDR~IpALcx#C@#0j6JY4e7O71Q^#v(xz>b|txvU-~{91G5N=(`#+ zuq@eC4#{2Yxw)S7&##Q~%PHxdkSM=ZV37#{FkVj5>AS&@Ys_=`$%%njhY&0hN-?F+ z`S?N0G-N0SuyFOke7^`mK%R`cM@NqyFx77dY&aSi?jtwkjxiziS$>a{1Z0}V655ul zb7aC2?_>Hsz`(m-G;&`8pQ5GOJZSR|IJeyW*M!;->}ku>A1*NZNw2&$YUgYllbrvE z!2|amQqa%4_7B*SVV@V*ml*Vh@o_bGba>J;^X>_m?L$VIvq=oB7r`h7&Rv zKz*IjI=bv_O;`a$>1PLcQ114)3Bx<>A*_bYs1-=Tv77rjtZ{c}F&6}FnkdmzIoGV# zh4Pw0?@Db3*KQUKO=Sw2uzj!U9XH8UCyoU>Bv+7Z^*qKL%2#p`Kh%(Z`pdZRQUdM$ zwG=tn)9rT$LOXw0u@gPr=zth;l#d}H4(|&!<9%HH(E~k20~c8S0Ih&vu2J70Om=M9 zaW1bdxyfnsvK}&vVBKJ_JyRxT(xHUxK%Jf)526r%?P{X@0|$7OG}|&YQv^FMF69e# z3rB&$=5v&JPo5itR?(lrFT?m(pL`msmz=>NJ5{VcR zxs4&@A_jcSDwoDhTxxVTYEGzR?UZ?Y2WkY|zqrffy+CpzC*>RmU>aTRu_pL(EY5s0 zs>y1=qhDJc#{#&S)s6j6Ju5e7)?MZz|Nd2EuH1e1Y$RxjhWas7k&Y+5t2f`oivxo= znqGM7#f&Q(!n%oW+Og3$E-1V6 zO+4E=q#jd+%!hQG_(diOoW2J#1sDZGiq=LsXEskt7qJs<3+F+s4aHkh@k7#dIspy3 zhw5gPd3c-mR7o;TxNh)PQ*oDd2cQ_C2g@i#SlCdx&OM+rZQM{h=H^Cde-ir7CW@WX zJjYJ74p5{9O-&0EClVcmZWcc@AsS^>?*D||`kJLj0RuG+f~oE3T^;Mg5e9#z0T!#! zrQP+cIj~efg_UvPLwjv_$ba(tp=_MD+tvV2iyiHqayK(IUk4gE2&FIq2;E9UVz$JK zK^+##S%Xy42Z3JF&Xx`Kdkk|RwG>mW^A^5<6*WKKDzCdwJySgSy}h3V6T5X(!h1A>SUHqt!3}0sFCdSk)=L9-l^hn>0v#w^qrd9++L3r;ZLp z_=Qk11U=N#dTWj=>+4Kv`Z*qL5CK}z_kSD~m-kd>pY}Im$;n0n2h}NCW9&+BTqip4 zEpHUMd$NX~!cqX(y%I!>-9lv4?PYjn<75TgO$JII2UD9v<&Y3I`VEF_k%$&aMPTNi z6zXng946OaCR^Virjt+QEHul&j`AXQ+iN{$5EO zLAg%(tC99`hN$U6)J9OQRip79k4K@psSlUUfYkz$jpU8lBv+Ed=BupQGuw_zKEhh~ zY%k{672OU9q?+;kq{Q<^!(C?d(#Z7sb+evXv#a$aau1FTNg$TDXH;qQVTJ}8j<~)P zVT-`TA}O-%N{;+lNH)l7Q3q%XHU)0Z;~a~Lz4PE7eIx1g0DHX!&yxUDP0L<&COsx7 zdvm>0r@U)!@YvLlL-Ks50eGbc#Og6Jh!LiEdFGNhSH5{PzMQm=Pp@V=wKQ7kwbIZwBN)NG6Q+oO<;36 zjmWse`&%vsEK`B4AhJMXc!Jt}A}=%rkz2I!6j6hhy)e@Zk5+?0Q7yhz|HsFfKzb!IDL(?W_KHnubdw)2Ra0v?e&k<4OR0oPtLMj> zB9mQM3_C1Jv8~lv>3kFL38BX>lE5u&;YF&Sl=T|wMy%E3+c%msn zKaoj3U$*!X>LfN1AhIeLO=sme%!pk_6eiP!qPn2kQq`&DO&;Uwu}Hr1iK1cp->cN1 zxT(0_&FZBO3nB=tG=u7-JX(vl^_aL3o9yw_U-| zf-0oX``dD5Yf3EMS(LfoXFTf+R@=_22g2PyOYin5V}T1^bh*}!&;o<(1kq<3T9FVZ zJI>4iw_J)R1otl*PSFy6z$PSqBlsGnbr_R1_6zLo$M;-9Kl0Kj*v^;%QSNw4>ToZG z{Buan4KUn~Dm$+|W-)%=XFpmPG%~lzM&_~Xcl-KDqmPAQ>ew0*kk{ z74Bq0YV#n>SfepsU}wYV!P*Rl+zyuYe|EiP;m2dC2Nij{y5BcXFN-)LDpH7W`hbpGOdEpL z_YzI4WKaYnB?RL9HbY9Yl&8hd*~Mtq-F4Gm>*&)VA!Fo9SgzPh0~X_lG4M`dZE>q; z#4F)g(&n@kW7GSOmIkQO(_Af}abdZ>szv7y;`-{RV%Q9D(H@JIHD~LRfd9qWK6*WI zJGeJc#`U=mYriLF%Uk<2AIpvJA?tz}>McjC7Cu~3sz*91@i8t8i(lFZo^d*{zNeiZ znMd|U|MwkKT(-}Q(QJv5&nat%6g&D#gT!ZKT4JN2b{2&b8XWRh~m7Pg|TpfqW=93Kk8O`R&Wq%S)kejg*`Za&OAH(ezgZ zB(4nrGaj;!IqJ;T>q}7Q(mkYw$!x9NU&FmTXzjjVBV=&NB<$h#(U(~KCeFHdo6>q- zuzT;R^rG-}K-kWC>&mk-SB(#zSs9tK-O=~Uwu zLOUkof9%sq-0>6gnd}#BXV$^C^urDVEz({M*%}_#uCHvr z>V@V%(@yvq4GL`~_p0hK z8gSf|*C&7TID*<2gIuJCy}pP8z}{W{(p6i|9G_&Y#Al67usChq3&D2qaE}-Ip)zn< zuN9^Gs?R(d>yBU3`ZTA#gGzEKwP5nkMFro7-jgOXnH=R;h`M?HF?yF68Df zaw&3kK1;u%RQEq4;`lDfUE(|1BMgg8D-%2awBkhG{&W9JNv^cte{ylCzf7Gg%MPtM zCKwKQ>LKnac*^0$iJkIbhvHb5wueD_%4@XmW-tl@J+yNBedrqhM?kiKjru#_{DGfYIGNKY^C?#`9ETShUm8F}xG zO}Ji>1PneX&e4v1j$hLc`ZNpHau0Bg>ifpo`Iv6rF|R(A%!lKk3SN#Wbgkm;AFjV6 z>lJIZbQvOGUNdgH@qJAXSimXtZ|7?ECT#sO4bqkQt==N5^!YALM;(4wkc+pQ zoWD7b1sWRA5V{$Un`6m>vGzv`H;(Xgius(VZcBfL(|QAz9Zbf!z;LMz=}+>CtsN+A zRo3F>igg#q46}ECB!JT3O8O5v?#xqKJ1_jUOnyxW3R>Bx0>}#l>_}+UAuV;oUdtQ4 zlHsDT`ov+{Cq~B7E}9v{9c)qX$CmsvE=p)fS3W65X7}lryjWR# z69FtgpqU6e>`vNVtj)*PaAdgG)Scve|5#_ezp*cH*br+Hg4nqer8@d0usSpG!L<2J z7T`IG=x@6=w3z0Jd|I_V1-*m`;q=aNM?Ei(*?mMysm#$X2pgo(ena`t;?};^Pa~Dy z&TzclCj9*T0DI^NeP0x&qhPr@ABwS1JE@f1iU#L*&{Kmg{Pc#ZMz@e<@k+bflKXE) zpPq&$Z$OiOZ64k=^5ZDD1>ORXn&bive;SHO(s=8K^>yDC* z%_9;Qm!@k6_5ZlFWp6~(H}L-fUb zLHL@7jR6c+QV!9Hx*~#}_MhK}-ooZC)TYdikym6K=Y=L?l9a*`9o_YZD&G z7#U$^U05Y2usfk52eb9z3r#-y$BcW%?w~;AHAShuQJ>v05$A*hCz&75hhP+w95fHpdfa z=#6oQqX0Vj>E!tbiq*2T~dBzYJ(&vzK+b%N>_JH|51pNfSFav2*W= zO<*P*hKcIG4-E@~7P>%ZYAR#{R>VN=fwH#hVs^3!{|lC3*kv60m%#Qpu485&XD_#B z$~v$?DHNAHkhT`wx;x7#V>PsPr*=?VaO3Mo{J+`#M_;rRgZmw0o??ZDU>kz@d%N2s;K|Bo!OsQ z*=0XG zLb6q(C55|Xf=k)_iWJbRdO~}ig>DFKr*QTsWw;t@d9TSJIjh>^Wt+FV(`_};-gO*vAW?{iG1d z^uw*63);6-+H+`INR6;Vb(7ZkfxJy_s093iCHJ) zwMQHjhGvJ1M?V9kPfZQD<8Zq@c}Sc)Ekff5vvX|3Np*reGXSnUfNfzf#LXs*+zd(H ztgC4-3wTS|9NKSnW`Cga>k2Ycfq?{8-;ZG&xs~7gV=0GdD3P86AYYJ>)rf_|3Z=m< ze%eB_6jXXgJG>4?7E2T%Z{?VzAh*ixm8i+{hBj$U?VC{z*>9}bL@fcLma)Ti^_?Fe zq?UB)-?#H&)ii#KKrNra3o%epA!DYb*)PFrC>3Y2cgj_+!hf~s2~dF0K5&DmCTjET z4AgaFgbwKfg%(6znM0#G`)X1TC4@PmLzigqg2|-R#JP#*+GRSLdP3=V+;aDcxOwkz zgK6LM6=JJU97(#Dr}BIqdwN{B07O^Ly{|LWO7V(4N!+x&D{IymSs1p;*ZTAZ8sXFv zuR1myX?Q^Sg)um!V1*|5-_Om{ECw<#o1?_(-~-ts4C6_$ZHbAz>eqB!jHmP)ikhW% z`sDHqO{4?+%Ua0+yAlG;eed;?r#9Is;kPCYqJZD&k`=0(0DKg-GIc$+@0u3jpN#CuUho4Ai=Xgf+ z;TnA7cn9pf+%KY7pgei)O#*-&F5AGEIv@oJjg45~=O;yzq3l*NkqoreW-W-UZKIN) z!jnSH-Rb}Z;{lEzC8)of>d*B_(N-K_R%?s{+}xnc=*_(X^ErTBLH=7(T+krQrEBJt z*O~4TVu&}nzb;Pi+wwYxBC;1_6ML0kFjB+{Y=U8Pl)A>#1J{?0xt%?1qc0ca-PRke zp%$dAu%4*|S9{zD)!xpI^qP=D5aX;O&Nj6e33Px-TQ;BpG+#r(Zva3$48f3(oF|iw z2f#L&q7+->KL8q-ilRDWMSQ~Wd%%RFCXwgV`Q`hJhvNz6Yeu-3J|!+MYuU%@>0 zoDY=+8UfOidjnSiYr8tmit*MzJJQuwITvlQhtsNU(&f@1g3>IgO~Isk7CTNM9005h ze|ik=`|-L3In5Vh=I843linPf{}y1WWq%N05J_v!MVl``fV0;{Yp)#LO;vKpiaxFI zX%2#(5-C&S0GLvOLLuGnr#+aqid}PkcGYcJg|E^gf3&oNoO~E5g%%9gpW%7-qlc~x zynLkskK&Ubci7JpmD?4@;7ku)&ljMoa0%-AUU_AFR3%KfOGoi{u(f$lF=kO=pL6d) zk$0d%^-Oj8raq&1dZ?gjm;Lf)C7X4JBAozy(ee(_uV-(&fP>}u3(@{t}{ZE%fj=7 zz|Q8fA5YxECgr}0CFucmz72F;j;KTUC>D*@aSZoaJoI@q-9b)>m@W%u0d`CEP;=K5gOl{=&dhG&*t4*#_Hmvcc7do$4C?dO&}gy{ z=qQP6DTcsdOM_4jNx+U>zKBlJ&M(JC1pyFW_%F^O&bXo%4x=>)}Z!%wz+c1HCa-eJjTddDl+=bIYj>KGW&k#)BM&3bBobHBe5PG&unEumaweDMLJ(yV`9j_J+h;5E* zR_`H!m7~VC{o*(B{_u479 zrA{U{DUc51el~$#?$c}t&$^fK{D(D5L$5Rw)h@uRftkHRP(}v?*o58uzP^+$qki(M zEE(KHv&zF?zPi~}OGhr_>R2?;$?!flXGJv!XV@UzSWuF7I>!3{o8vl#l9>Wa0xmYK z@#ifu$}|bs9-xxO!S@X~iI_slPWNVGg${PBP%oYIV_ru!wa$70YR)3scyJey0J4QC z^lC-A#ub48s54P_U-x^H!h}`c_E~%B)dT}RDN$2gr+g}vObT{!8d}xaoB8}PUw`fg zYwd;7K3vuJYqa-6F%2V_Hh*N$0`*LHPJgkny1F)R6XHpv(cK{cOQLs19?=S!P z{ojEOBwHxQ>q#L5^q^s_Pr|HY7Ax9rxM_28Fr&g?Q7ub0;Aa-S6k~kJ-H+`@x!gT- zo1hY`=68cP2e6_M{1|hWWNDSW@^@rRep-V@(-WIvm?Utv2j+Xc4ai;!dce59kQkmh zA>vZIgVBJCcu&+GC&^0&;w0TB*ic=Av%=U|BA(4tC5hWyHC`VW^Wgoe6TM1+`R8fNdatnoCiW=yA)Qs5e&cb66f+Qtcw2C`OR}!yfvazX4k=pZ?HU zWtgFbF6F0S=VsY}=qE@oe(o?r<4*qRLPADCKD72o$%l_D`HJa`b*=yr{Z64Y46@m+ z;zpJsowRzoyst!xy#jWu-&^CwlF2#Gp?cR>qKQ{cGHWkcSv?#3Ob{^L@W5gGTqT5L zY?ANshw9Xv6T7xwDjzH>7Sh^S3qVng{Vc4RN%=7NJUK}%d2G|EY54Y&9aVPwD**vT zfT77nDijah?zboDTc|?-O@|qEmj&Qv0K&ql5rWL-ta2~R*D4^1r^n|^lEn~Ap`Gc_ zgDjROy}pnN|L-a2MVL`*&oxXsB|&7(z)*(;G|>%r(zQI{V24-wbHXSaGmV!6dTYGH z*_|}9@`)NcX@{+E5=GS}1K>HneEo6ZdlbxqJ9KnR zn}!ThGxGH0D=-a{SAqTjSWk`9Ct;7<@t>*nhzuU>Z5t7vO!@>h;m3b$EBYq5wEF=iY1_21MHLK?;;MP^3 zOKEiY3R(Lj0CWW-6#~pCksk14_hYr-fpRDrAc7~-*M2hUqd7IOoMYhiQpxRwyl1@ z^H;Lt{$iK$rO>+Jo+7JL=9#JxZ?N^xR-IabZ0^9o{cpvlhGubT$ZNoaw+G4MAI`^| zbn?vp+D#771Ob#fRlvdodT7F@2cH^N8wko3rm|-_)zVrTWzE@`uf2|7p-XCt{L)LO zHNn}wznQlec9fRZ?60k;aKSpBuuub8|h^gSOQ$nbpcP3}ood+e`2Y$%L)lUGqtFZo56U}5{ z`Ycj5|I?N1fl)z2(@TtIHsB|@7Im(OI0P#r1JOF%vq;EZQ=lYA(+%ADhp4=RdvAeOgsiYN1$hlBLjfP2v6oQfgr=6>xh$~?A*Jxi09^ef{k<=7F+et(RKQM%L)5D14a9VWC zS5zB=j14Ca!>k%g0;uB{C`JDX2mvRT<#Xj3&>Z8|K0YRUnzFWj8oA%_QX6GydfM+P z^qL$wYI`(_@lk1wq%Q3QoGLK7%Zn;+&@hNtu297DWXCSsj6SuToOMa4^Qe2yX9^;; z%`FhAJ|LS#TQOM|F-S|Rp@IC6GmWyK>p>#!hK{!P*rkNI9Oc#84>|Mx_W;mCqwkf; zv)PIb8P}O|N*kKiKEKQB}+K9ko@*(n|pB2;Q3 zuLip&F+Wjr?C4~TJ`o-t78hDO1Mdb7ka;G=Zx7E&HfoQ-Z0DHCq2nQmS0?=Y0o~Aj zTjjapKzu(R$Tu4p>f@C?OXGwc_MoDo{2@>ihkl3xir zYiM8rRPm+U>@JSZG|OC-<`sF7`+|`pwp+|Geb;_yoo@SBy=OlhbV(jS7i+jax?As* z36kC`)+`m{qV8!91dVV3+Z<|4VvmJu=e@+VTfzYztKDSnF3`i|&YPbTu`*kpZQxHl zlIQQT6!{nZfNgY=SZW>(mm7GG(U^H}t%|?5TY4i|NKSMws#W~gNH%I&;#y|_|NHvr zpoNBY&Zz4kNPk4XDD1gPOH5q5e0aWv)?VIKeM=wen-O~4psh#d^@%G_u+xvQ?}jm$JhJxE?Vr%`jXJMC;~ zFMK8;JGpz+9Z7$Md-~vWU=L z{!6)b3fXZ5U-k799qwL-HEOotfyA%sVXb4OrgaG#HWyl=vgHk7v(aD(UOU&b<5OT* zyv?Kg8IFDu0oNA40@O;*O zkf0fcNLVXa?N$>{6llK>z-n3{=l=At-mR9jOzn#Oh&`U6*?Jm(_cY9C+J(Q=(Wu3* zKIs3FLVyAZOuDDQ&PT^UViLI%dNHF=GU*e=D0l|5@~zMSrWe!{E>V-JNhs4ieG;mz z@gQZTWq*izrwIVHdZTM##8LiqUZIBMXYA}$qMEUWxMpQdy(n8tOq4_dK*2a*zpWrE zX}Ibj%xE@iiQlc6jlS8GGD(+6=zkY z@pgbtiHA?JOX;Ryp`uF@mI0YB7$j>tSf$t?60Djta5T>QJ1(+v6j!tdU%yD@ySYLG zDw~lTwZe}YtH_&7spy5Kaum<+&!4a{;*pU-a6PqI|ipbQocx3Jo}v$Z`4_F zc7!?V(%4y)`gzTDgNz+Kn|!9_pd5FHO#0#{GA8oAj_L{yOHtJZo(XBnVt7U2pUoL2wQo+1rfNA`_UPQz82j8*|*!Z(6@0$2J*aqwF zsv+YXy+55vpz$;_1oP-I6)g>C`sijRdA^ja2~$ks#TmLPVBpgL1D_HNC}-UaSbNmP)J034e0_w_|JGEPxZUaJOMJUGUQ+5(K+hhvY5X58 zSvFEpjZ;dCR`A(ylc~DF0ttl`R(je1}R zHq=-hcT_h0Tx>`ArKD7obA03IT-6bs|J()&^j><45)=wLqq9oWZ_+kVv^RS9!me=4 zdGTM!CJiVcZ{O}@8#I??uTh^nFddbwNy*R zcdvFw)}hq_$!sVVdJe4F%y-2grJJu?-PU*@ zPHMj&$ZHCY>AysefK%$FLFA*kRJC&jv|}M*_Qk>ne-5@Z@%FxnW;BhI8j%cp!BMv z+g4kcHeCa6BTiJ#bJ=Ei6aSrlVmw|UVuZP;AJ2#ygnC1(MP9i~T?iHu<3gVNg*-9n z06iHHe?J+qzQxIwE}jj{j47JiQe#%_0_l}?(Wnn^*f=89wk!4yhs}QnraxsIQ z3gq|X6`bXxzjegRlD|;1SsVtxN8;Ml9FiHNaH|_ls*i~Equxfm+)#;9?E55eY4QHc z`Kt8w=kRsms7k>>Zi?MQcjQe1^4*OVus40J*pY%fsX5Ha0)uLUc-F?Itv9YdE{N&A zYnG?KTq{M34(qV;4Y&H1J-jqHQ`*=hNg+$P!8b=I;3k=gOcd5;8}cbAVPN+SMB*i{ zO|ACX!Cq>XYMSkw_(+rQ05ycy7!b@qP0y4AL?oK-aOEH#yj8gXQgtCsh>Brk;L0#v zbhkmNl=MHsJ6P17S&hpzlk5r~RA2N`H$rRCLtVdHzvNFZCCc__xtOXZ#!(HgsxYv? zGkj0jAP2)5t!8Tc@kdAy=)NFDg`TlQ=zxJiySR2o!G#a_0V*^RiYU_PzfC7_wNP2Q zL4>QRI)H+w9|qZw$uU+1u~%W|ZYfe?Q@1VuLx}n5A*w<)jydv;iU%8u(d`N!{;yKy zYouT@XeyRAr7W+$dN=NoDKGrOvN!-;YMRA`(tiD?+!YMPVS(zVsSDPOofetbK{XdL zwMR=p57a&EC7R$<&54iHqBSZkzLgg8|El}Su&CO$YZV0%1r!lMKpN?g8XDBz1`m30*{>J|uL&TC|a}yACVt#KU zyWjL;`I=wrqIYD{dMVQ433)l^yVnf-p_G5y-mFV*-ddQ3@Irm*Xv1(ZX749Ow%{0( zEkVv6HyZHV-C|VyUnNNdM;hM|GtIschIRJj(YJ(38P|Q82?Z1DCc>hl@QULCFgyAU z$1&8lFo`@)6#7muy#a_%n6aRr9!7Ft$%ZGNx$!Tfu-)5-b6D#@xRdWIgn} zPxR$-v0PLXvK+p-ZCumh9d1btOs~tzauCcJ$X6pZ2bgsv_7cFk#!iRcc$o+TGCTXq+2_;i>Q!8x82Yh-ntUf5QNv@XLX|WegV`abZ?kxv%efbFCi0x}x-^ zz=HZ{Sn79c3ck;{TU%-l|C!WC?DBY?a~-uk$HSiGuj8y2o(m-W(56*+U;{ZOV$8U3g9!Tb8$!a{jNEGJ*_XDLyI#o>w< zQ}*1Js>2^tD*M)Jou-`oQdxIZ{e<6Ht01paS9-F{3^eOc@0Cy^@&Eh=D+xGO>}guL zP98?f@B=ABv{yq$cv>e|MZroXsHm$W5PIh6zvo~OK1&E{sKid0a+4X^X9TTuw0X>n zQt8_oT;^cUV<_0zidvD?eA_R?czFbIZH4Jl9V?X zx9t!=P_G`A;Ke&+ZpwK&?gY>|!~WR8@us<&>W<%4;kGkP^x&RD549rY1LUz7?>yTG zYzc@wVo!E7g15;z-D;n6vTRQVR6799fWGF=(>ppAqpk^39-~`D$nH6}A#|!38<$Nq zVu&*4@o*B=ao!9{FnIaM;QGE_wEW2j{tpqR44erHgIe~!;SSG+v zTLwITGfE_Xjcx_WnmGLg*zMkKiZSG5D@fhitzrL|!FFew5I}|K8za%fy(|3r zEuUnS67jd*y>WWYlUzGFA_ZFRUvhwlC8?iRX@lFITi!(5Q`2hpSqP|x#&AlQNh@VX zv*_C6hz`^emtccu70C;)G!c+-dh{IgZ*}KV>zPS^-|(Yj>HA1ymloq+j}1Rt3yPh5 zvc?VSK#jkuD0c(&np98=+pOM{Tg%=*)Ot38t?BrhC)|4?lRemM#U3bCm4(DhMrU5! z(R#6Lyq90_gtAT2HFabAgxRKCk>ir`$b50GM5@BoRDL^#L(ayIuiE^ zxPN_lz5HN@h3ubp+WS>k$;2qf#3!CT##*v6%UAgoXz2)=hzJgvFU&AuvN8o(jy$QT z16_Xq5tRiIQACXj(S0>aW1gDFaILq=?YvIwuEc}AM0`T!0Ezkwoj6DMwCR|SMM+oWw<@AatTc-bE8O#DPML3C+Ybk-!`I^ltU@?>RALHg_cFCat(?iNWi3o3$!^Hydt z`x>RGJ8W32QeIs(Q)Zt+m;nX>X-n>@x=I%m;Hl_XP^A_5PM?AMx$*RVt4TX+ z=KBj7EdLV;Ze@?%g9<);BX|f_&>wDG{U#V&$1d%uvuM9`bojABBR#b$Dcj)z$+$cH zIV2b33SNDe@~&WITH0X?VD>uLbCd*Ud94Pn1-VY{IAW=@juPNB$YM?Mp{Y)~Kp}_R zjn>Yv;4v*$Y?{kFRDQ3mp|6)3)eNgsRPeV(LU*XG>%vB5T4|IU} zrx!n~%@k^3z^KIdjwXg;vMPjFwYiS2#kq@0?|B;RCATiJ+do;lDB3*mpQvk2|%xK|7 zv)U!m9!&_hp3G~|$6yJGn(3P$&6uv^O}xD*sZ4s;96xw4YTfnrGLx}$(oAkM!5G*1 z5QZSs>C?*&Q#649O;%uzjgM{)~ zhf3)1Jp!HK;}OAvA4QXf5f4nOEQhZ~-`{AfCI#EyF$J!wQV5LeuUsmm{n0$?8U2^7 z)@orq?rwt}u_GG~vM2T$UjR*^nGu^-NERYLjfesR#|mY5345c+{@f_+4r5e_0@34Q~7(UUuSmWdz84jV>dZTIF4r4latN!okluF3V0)Y@Ah@!w;`jT5+7Vv8LA>UkitRJP0{{y{(`XAgkI*kU0 zFwUOz*?%R1uX^kPxd6Ea)LQDjZRQSxDTd)}NuJA1epRH>g4y5wqOTL5vcWcfb9&z@ z;Q|?ur3+II>PYJ=O{F`vhIRCrR41P(@I+Y3`LrZ#w(`rO3L8 zC&ndYtx8Lim5<($FLtTljI=AkK^xie&Mvmu8^)B=(yKb*_~BQuD2`B<4$hA%k-#%e zAbgV~pKTt4Y4E7mYa4V?&5&fR;c{YGLVsdAAqjU`Sp*&woi^yXHh66Q!7}B6-#_+J zoP5e|@iW67zgx_y^|H;e$rLjA{Mh$q4S9q}+mAoswv&9nP6iE2f|Vp#O60_kjPWXC zto^6Vk~TyFuoku7NQ1oj`MKMW25|iIoWMLCQG(sy%7tZ(+t+S^^?`p`BiqM$4n6g& zgk)uCj#!|RI~wc)dO0*a9(?2qJJ~jab~kHJAe12`plQxCceJ!WJ>+t%#ZuB8;hhFEoJFw zZQJ%jmjQyqc(B`zC8Mtq{%7e@ZRLzV?JsbQ^3T&jkqqJj!f!a^;Mlx zvMv<=I6U}QB#JIPWxLRNDcP~R*qla2#=)@r8eHiw6Bt;hbkWP!3A6eOjQDM>P)E0f4T=&;+9`04anLYbrE~ ziRY4^aZOf~qQ8qjT5WNeZr?9Py#&7_?2if` z;BNqi%~O*%)3BJ)w@^)R;t(8+v-|@r06;Te#d?TQbVLuGivvL*BBWJn^@Xy<1%GQ)>c^Oc1thnc+mBbRRpZn+GpkPKct zjkGi~SxZmbceKU!|F+DRoMgad8E7=Gr%rHpqro$V*V~0|2^IZ8xV>u;`8h7$SDEs6X zOPwlkBtp}pt-#o`J1$54sV>WOYxFXE^u0d&7dBt5`}Q8a;Ua8dQt`x>wz8!G9=sD; zE{S{HRBOh8MGgJ0%KewsHOLDTn#{93viSiuX-P$j7<3Os(Sy|>mV!r)a8^9}g&ey;J+Ij8xir8k?I_h=eYA(ehSl>2ExRvadDig@k) z3&}a@da$(V^3ZImLls`N?cU%XxzCbEAzZ8#zQX2)#b3yxRp>a*-y>XM31*Txi2wDP z(y`Fj;w3rU>x@2_`H1Fjy+e!em_`xxLlW0+QBo3-G4a1kflQi9dvVkm8DYm-gz?xL zhkJo^Ap(#eFt{lWl~8xCs2CE2+GyH>xe$m30PeVFnHUxPEPz1#ULYOU-4t!=DF)o9 z{^R3G&VoXIe@D5SKsu_-Q1&9hpZ~s6dbJSKjAJza<;Pf56C2{>Y!nxc`+BM#=pqzC6nGxGJCgzc$jA4+y&@2dj zuUJP;$DYQdiY^AsZd==#Pp`rAXpmYprVGsy)!?I&w$#GeT5{f<@9t z>ig{NI^wpnB{XOYSe=0Ty7+iQ?p2$i@bBKK)gV}^z7?4_ct)Wou@2xF=_@>vAZwAe z%Avjf+)dxe#g;w2lw_tL;UI1>$EB46J)-;5dL-Fkfc7ykoF+~Yt}3E!^}X6KV)`aX z+|w?J)I+8=`7f7Bigx{REO_1uRTt86Gp?=8I=&^3#DHH~e4v9%B5ChOraj`xDt(cN z`xxhAto)ZR5_nM+{q7{#)W{GOE);|_;EFyzeLi*d_v9I_h6OO8eNScz(Hz~1(#cQg zuA2p$&23x%)RMsTypFG0Azsp7MS`~F`~LNYaf0H)7{?t($3+=k-#=#6Xzd#!5)5;7 z2TnFVI%&TcjSxe_ieG;-uO!gR%9p?I2$+1Se7lgU(aD;0a?!*1yAx(mcFy7M*wdZ@ z(esH~#0VDfsx{I$w&v|BjNc!yHwC5pLB|2ptt}?sh76}##0gg$)`ST-TGgO{<>|Up|=u* z+fr$4+=Gub^X9oJoi}LOY-d*^%a!3j2n_JP3hiDK^b&>NZ3a7qP0eJxw0^f}4`Zci zYXqiDekc1{>TkO{jULr~1?-~>*)f^Hz(tuhHh?DcapS*2?P$fYu_pwywRNh zm&aM7+0}e~q=qg5ve`awl~`n#TcREDchZqVFY9r|(uA6hLgv-HL~?fVY_IQ{vL9V* z9R)JPC4RS;UUmRYfJrjbFiB8h!_1zmm=$#%>iR;2h9>{bK}&JO_Id8v36MM@{`sNg zB0EW)UJ7=)rw>OoC#+0F;r}EdS~iS1<;0}~+wIeH>!Ao=^B$pDAYwqrT=ZMv6I(7D zAR9>78Elv|bvbfd$3Nd7d-kM_$*78rT}?l?en+!sH#TuHcD5F)pi^nF!qI9W`SIe#eO+C}L7 z>O37`kZ<KF=?V-sJBQ=|7f}kcBIzcw_}Yo^pw8n_NmT@U8CI1j@-2-FZivdoPKkw|T!FjFdE4g`RI?u+CltM(q)Zd)UBMH}P2Jxff0l*>1m6cQe#0Gk}P z%1un11GE)dTC|(=Y!WiQxW9b3R_L}a8PsO{5{gjk>=E9(9Qa~Eobc^wW zDQ8?*VF*KvOo*fLm1IAvCA*TTcl)A5k3H0FfsGoN?OuxA6-hnQO}6BWY8y0J)WMeK z=8o$tcIL@oU1l8{nj)3c+=P+L8zCaEqms${n~-ehgS0t?T(Lqab*NsngWEIwf5lCu ztGm7+T>Xmo0~;$_p8oo+U$qVtm0%6SKwG25WZp~Gb&fFP`EQj@Q?B1)ts{hIZYzrj>Wm-uW z?33M$F;i`zz-v{>AYcOWEf2y)e>f#RQtP29cofKOZ8Ag=FQzM`wjug*k$p?1NKX;E zZ`YLmZY+n$yd50(z3SMJn4tB%#*N3jaOSE>z<0x#ORkLY>9QH&B0Ai{)_=()-2pBI(96wrc=sRYtKk|-0otWkT zwhmVFj-9-AJT_Abx|UkuC$VL#p%o#L!}rXztW;xONV)|31!Tt*a8emnmWp~j;WyO$ z5seT{U#GpjNCM&rQR7!k`@#1~LTM(WRldffd7BkT*gH5u4&hgeWS+Su2)B){aUB{} zDYZs(VAeid>%%_~rJ^o*(pdfQE=Q<zG0Rt~}Zon$*6P*RQBI*v?0GjgDY3)to=Z|43@nSmnS>%jxuCpVm9Oo^(vuUME zG%l%xwX|lylby53+Ej09``b5H3n5@wbZcBLaS1GI(3VreL#w1@A}u?dhOQ_?CWXp= zjHD6F;@{TGFdoEBNXJno{bPZ4+2H?~rgTR1b3XmjayVpNs%6ukKv?%f98jkSza}ul z^Kh z239f&I<~5xHOd2tvQ0;wgPtcLKfQH`OO(OsbfaS(Weo(4$L0$t7*@yK#gV-IOyPyW z3y1O~`iNYfEHU@DERL-}tNj8f_`}&Aqm7mrfN#RXMBQHq+g(HU)Q1h^Dw^np%OG=u zgs4C&V9+ks?+}Eit-~1%OGw3!QYt}6`QZy4+B}pE=Xn?T>sCeo7Gj)f2w2njV)a8E zHT2(CEVP*~ND<;rZ)XoDf*Z)k6Ql$zJQU7=ftIc_FQWmQ59+*PbwIDtyIopF4_E5= zr0M}AXW6I4&W*t56!T=G$TKkEF)^~}>`e!Xo_k}f3B@V`Ws$r9Ol8fZIjzm}gRBuJ z$I3}#7Pzgtsmc0AH(u|JvVvXfc9E67ykBJc=E*Tez_y5mfMWd_^y67$Ks}!FI4AqRVqW zmucf}aI9NhdS_zJ02QB&ihnA&<|<&s5^;f5jB}j7N`cbZ@j*_n>gb<`g-mRYYxaMw zQJ5VS$;MfZrrW#J=&GRC{F;&vM-cdj6|5$`=!}w4uAsWJ zX3}RjDXm`30hfyNQmaW1&!a1p0ntux4=^a7<}b7Q)V&4&LC$=t!zX*=$xnx%0dk4} z{L%-FA%o@cjZNm^RvPe$GK!H@-1Bzxw=BSiIZaez8S~i)>=b!`OLsEsR<7*v_y~a? zyNUiT|CPUv)p_b>I`quSrqWN10;U6i&OzU>O5n}u-t!LcI(~dzWwU3UeGqPxW$cEh z%0z{t1Jqz*ZLZo4c$r9>=SbBW^F1|qC4 zER!oIJd~?b5#KaIx6|^&5P7L4?v;JYTfXvBq{cUq+dtru9FitHE=n23!b^5?xRf~5 zu_NyDa$Rzzl1&6zH&2$~_CB#h@v0IsHmvy=0XYo)jy#-rDd)#$&{&I!ivlAU#tU!Nc@Ku8%Is>1CJdyf9 z;Wrk4gIL1teIylsdx~AXinvf&eEH-A}P#=XyE{F$@|(@y^9Fh#i?AANNLP>8sZVNeXvWgKQi+siD8Cc=J(@tkH3|9zrjc(@0CNq65Z4nDWx}qFwdtN_nBz3zHf?Ho zk7{Pq3`{F+nF;-Ip<9Z3GkPXZdhWI}Xyo66GHolS>^G$9xg{vN)G*a^1&Los(i>2K z;XOW7bKS#Wv9TYzEudxN5^p2~2tp#9E%#V5Y%xDlG~WTzw~l!(gx zFYootAg9X(RfQ4JQ-d|I5v|yF4!K;l7hi&D44bGGJ=|`IHAP&vA+h>s*l7UYfz}&3~!C3#~y=eyNw67LS zW)@%ZfEZK;(L0WrvTF`y*j|*G{ z_e0YjcgOz*r@c^#Sme_}K7&ZbDJ^pGn>E~Z%d3o~?Qc3=2y=mh#y1}L7ubx+VU=|q z>3P8PTj0~E&@aB1VaH1UK8eSkEsJkMGc8-&ksTOk1u#dXKH%AidMxt0Hl1GY`j{;> zee7iUQG&tQGugcyufS$`+qVAv)IKQ~4D~zUVl9A;9gfuZyulTDt)Y(ROdq^|4nvZH zB)K*_4^xEReQn8C|Iyqv#FDK_BTtCzoZ-vcK@voxz)xLx%52$@YxwORUYf z#tWv=%LOp^KEQEH{$0*C7MX4nqMT_Yy>*>mM#hO%cqx@{>==G5!tOBTP&-|@N9{MO zHB@=%c;{Isr>?8fJ5RY#B-?*0g8#p#ifBChi4C4j4dW`I43|xUXE)tca6vq;BB`sq z`q>4WGGLm!&O6d>LT~z%J6XKUrRu>{9)uL(fZMwE-PA=mqv-&(vL0Wgxn6u>W)KAu z)bR}-8G^}O<)SmHds&zj;Qg^k89DZ}#H2B!ZdIxqC_TxiF;HQ+{WuIYGiN+s zblRaM<=PdA|7=b~H-Wb}71`-y=@8)XnJ^oRMQUz8cDwGU)VQ{&F6>ZV?g$a$-o+P<7v=CzEj3y3xk zT|wzcLfsFcG>?Bqpl=G{et(2Dq_25yK^e*>y<74@c|6rv+DtY89~H-6gX7q-m$Rg< z1cD&=O_0oMS6Bs+Tv+sn88kf>$<4~+p&H@SOUXLs8CotG%fcN&BtycmQYE$Ba*J`9z(YKE-hI<=eVmWQJQk?z}5wTA37 zpTYC#&6t$zi>)NdaBNBhM{`5!ms&X%*5?f}oAW*|B|}54jGB;SKhKZ#NZvLbr8FR} z$BNz`L*!J!t9PopT=U2rq2v{37V4qp3>%5b$G&1MZCHwb~Up%#u7>s!pM25_F zTAlrHOc|t&H#S~79O(HZ@+?F^7x2QA1;J^oC+vet=nPMI0}ca&kFi7T*FK+kr1F*$ zs&q+MXjYW}C-$0h>H4gJ@pRLTxTb4mkX(N}MH;1(bl2p!q|*lPFooj8mm(Y$UNA{b z7E7GdiX(@8S!bqmfw1w-iGIJqiHJsz`Go{H7bFPYT^GVC5y6b+F0XEAchgsa<07!d zB)rGj9r9SxF`2+Qt$&L8g61^C4$?`p;CSIKESvHGcHXw??Q!fj5!y)O6fIWYIJ5O7 z@xy{V&&CDv*XXPaDvoDmBkX9-pnt_dDdC&!1y*=IRVS>pT=by#Q8kQuYe{+$l^$2W zV&am802QEgbCun;i~7a<$=n(hFbtyOh(*pbCObR3JN>d#L5^oG!9~Smv&`iXK25Yn zU$YO6DvvFMBez>UG~2i59v*6K8^CD9JNBHKL@%;4I=&P+L1C&E^jpZ_<-ptC5hynh zlV-6?Nzd;7*mEUg(D-yvO1gH8+h-t1|IJ|8k+$gQkuAwRpUfD6iO7h0zRyov<1)_) z+8#Ap=JF?UUFTI_BtX!P#X9&Rszt8YQs-LdC7>8lY#E*>qNnk(9H3%-Oj}BN{;SC6 z6@6(co$dF|jPuqv%ZtjM_FEE>A)Z%4Oaxgg}e0ox80Il|}j zX{blI$8g1SG(@e25r|7}LYs90O%;W`GWSpzpwf==Wy|;&sk_MLw4$$6yF85k-7D;ckNU!hPK45q2*@a_o&m9_Wf*j8xMVlRq!hTmo zJ6yYA;sVJi(Pqh4?u1HhA?HOt6iK6Rj-qe^Jp1CqlQia)!9M8Xf)!^vYSf1fmb9SM zRJSV}NHHIq()jtTi;WZ0Gx_`n%%y1*PlZP5idHzikUv|qO?E1hBF4gLC{-SFn16)g zgR4}tabmVh9JE8~CMANJ!t85Pk--jJYqGx~RVk6Wfd&$o>^T6oF|t!ac3}lJZT z>bc+G>ppKsF3MpCzYIrg9UvWWzUrOFvuPp+_juq zbf0lbR0DMs)ZI@s!N8Fg#0vdTH8;=qY8RNw#*j66UZ{-5)#Ir&)b>(J#%A~2>N7?v z$dHbR-`O3Q$l5bcZ)L4}(JjwNwjUE_jGbSXew;h+t->rV-U6NUH(G&-(U*~)J-jAS zcq}ypo&}zoC-LI}16!Uq-B3hyYD_APnO#*JvU5Rl5*i~i?#fnv^y@r_yIW-A?dHP! zv`$q9$kBFmYBRlT)h>2S8ee<%jk7Qr&H2|3Gn&Zj_|_$j&-AB`mC+#%w_D9k-hr+( zwOqVjDKTyfc@}+WSLF~rYv}vxL*JIxppc+HjtLUyqW63OCX@986|gvT%_2k}?-SfX z%qcVdB8}37{ta^tF*k^>!Z*p)ANcsP^ffEOj;5G=937UQuj8|!KC?Y2a4JjfcjsiL z@qcBLLl&Rd?~&*@x6&htyDxIuAk?pMp}BvLSWugSW`!|@gOziZkn!xgUtK@u@DC3w zaM0i6_CmRc`G~5l5pUc}URs~&E7?aIU)yO*=@&b84lT#fY&{7g>WtjIVbrC8srjpR z3DlqG9z0SdjLpO+c<0KH!W5jlAO$m3JH#oU*f(8MW({j9Olvono`A$7%Z)(rS;0|r zO--Klfd4G{kg~lC_uSUUE&a$@_Ko`c8FmgeTk}S8F*`-i4l2Rb@pm+0l_m4Yo#|WM zS^g=uVASiE579AHioR#)zG!S>Jd(mJq~Rnm@x%V2$NM{HPfAH4-7w5m7Sbk`fG>x) zXYh#6jUtHLtEHsG#(yZtKi3n}s+2gmT4u8DVD~h?KII;eB8#VDV^ppS z{eVX~>Sr>!1-Tx-X*ly5m=JSglQtN-pnH=Yl@h5rPo0u|ZZl`C?7uZ})gtmjvvbb7 zn!$(I3w!gh6BT*x<}tDE9NQ!_fYJ~6ihK6}kGV%n=|V>&5!q?5otFFPNPNbv^7d@`M8@w@ z`i6W`VNW)=E$U`Sh8-t&-=qz0MVlx%PcCeZKnH^ zr(ELd{w5D%$TS&cVHiJsn>ThCiI(jnnV5D727o5uDJvh?|3St+gbxfAjTlEPN(sGAv z2g^^`hIhQ&e}>nu9A)f9(C}+tlpQI1AZc}sb*mM|*TaIcZIFr`G}6DjSLSKy8zNzl zDOBjocxz>WV; gE_B9d(0GRG+{DsyyC*_$1^khfRFWuu_R8;n0P$>W;s5{u literal 0 HcmV?d00001 diff --git a/docs/assets/images/DeepSpeed_dark_transparent.svg b/docs/assets/images/DeepSpeed_dark_transparent.svg old mode 100644 new mode 100755 diff --git a/docs/assets/images/DeepSpeed_light_transparent.svg b/docs/assets/images/DeepSpeed_light_transparent.svg old mode 100644 new mode 100755 diff --git a/docs/assets/images/accelerate-dark.png b/docs/assets/images/accelerate-dark.png new file mode 100755 index 0000000000000000000000000000000000000000..37f870cc3f828f78d07b6dc819deac8c19027bf8 GIT binary patch literal 9208 zcmZ8nWmFu&(ggwuE{nS^?kw)^PLN$BKyW8$umpF4ySuwvaCd@BaCi4d-uv^NZ%&W( zsp+Y%d#mT1xfPp#}v7J@tOA4gc}|Sb{Vr{C+~Pm(lqK1%-t9 zZ-<6TNyB^Zg#M-`Ee2IFPIT~o18XL#C<+Bt9fkC42nPj486hhns^JEGl8$6gG~~Yi zR@?d;rd1mqigbWG5zBphl}$ioqE=~_2}qMr)+@a@4J@XjaBI`9a3qQ;k#MojmnmRu zj*;6b%1hg(xP&jE&kx-mj&1;s?0iaeHG{->37wMw3|w_PBBLk~(4*k0e%rQoChL4L zZ7_3jamCSwS+BILZWVmr+WlVd=A$a10}b*1Z`*|y5%;6$@Y6(Y6oB^U8nQbg7dzT{ zvNQ)5dY=6d33?`qRXr06hIXg%V?S>2?4*hs!CjM-Xe&4&=!E$d6s^vz)(7^V6+$pCuIj~D!p)@En;ba)P1k$X_dA8X0pj6X z5bd*pE}?Kfu>>@74=b`d@Edi`X5X%~YOtqX=-x~cl7ACC7V4LY`$kU1VTN~$9*x1W zu%dh6sUQqgA4UDgD-%j9$F^G8NE^|PHHq}m?}D2u7Gf~(c@>Ei3RyD?=eY2Oyb1AW-+3ZI2Im~e=RZVojWhp zUcnMI8zHFk*UHAZ3{YLkcJbeiJ8~mAkle_KR$e^xjkNp`u*+~t56a)2DbF3 zL9`EAQmqAc^CYv8&J1T+s7|P zgF|tvV59FFcLp;39uLi`nO$(@A>LBn2!;)KnQgvdx$|G|&IY!fd-5@Q6(1gfi+xY? z|NO?oJ$w(C`3TLAN!J}<0t4#$uT6K95wNgtvw!dP^I%n-0;xI)?!Dqk^VA#iKQL5P zaNpfD?xcm=zcb;siGBHz)SFX~SY7-=32$*?j%6Fs9>|c(5N}){Ml_IdY@5+F@;*Br z1YL=~u!~hSx5#Oc!Yw(iYF>v=A-g0Xiid92HG-C4UHxd`3u&3JJ~|#BdH;nYTsCX# z*QU(#x~#`Z9{}10HGgU*-dr0R*qseaa<<pb;-L!LYpx8sX6oS^Etr-ouM${tS4t2`Qd7(St{_{!mG19-d;($0KLPFmnX%VAifiNwg$Cpp~A|VL{*b6aD zlxI$}I29XJYi|FutXE_V?RzaGI$SYFva)(4iLvSY@gSDtPAN3$Jw{SgF9J$OXCeFe z9(>a-wff*tyN0~HA3|10;pW1E$9GRne(di>`*t~&M_W+A8p-aXV= zy$O=WS74D4X`Guu(}A&MC?;f#hxPwDONITOJU9tMjq5dV{oTU9jI$xE>j)N#+PJMI zNHf0x(nfEv@8FI4<~qtd)i`$wRhtHiT3f7#26Ru)4NT>|%;FSm2>br;eIRQ3ND63- zPWlNjh`0&#_@d8&M_4t&qB?w__Rz zuwnuZyq@6NZe)5pqSuSf-;d#HZtkw`#u$4D`yfqtmIPY0IaHEj=6F1|qWcsHCca4i zV;cwU5nZE9MEE96m#aS{S|(Oo)rEIbn7kHL60ECkspRhfz+GaIn_fAk|F_kU5~v3 zDsby>s;z}T0PFWJCTf3~v%&iW`Y_-tPJp7-&cZ&gLg<*(j{-xrr^L$-cAH}wFd_TQ zl5XG*_Aht^*%wSX=-I1Wt(*jp42uzvcw;z%OvXPHnu6z!utKd(GeHy>tK;AVsXJof0p>utg$Mev>*z=xG+)>%!l8*iz zRgO;f=k8z!QcU-op=cG%oF#mSHtn-4h`M!{XdU4_2?{8l%P~h);wrs%^OysR5q!ML z#1Q@JuNo*11@oOJ`w)r74^~^gzC>$#-)R(*D2Uzt(F^1jvVj`lsR0A|3(=!jT&-lZ{1)z^423G*0f*oE)RXh?upgD33-_+{EkX zuUEC{DP;fr=o2CsuOjRWkOemz9iJIYw}uc3dCsgea9u4_yl`iv+2MOA_+&afIz!_(G zaA55%lnn<#kXI-xU`QzOzWu+XQtja9#`?a%)iO`yPU`JOYuZKv%2B)FuGex=g7Ac6 zjN}0o1;YD9cT_{mR0xpxb^&j!|CaBXIBpcA6t8|$tgSD{y(wu0hln7k-KELA%}IpX z5w3Ec1ojEzcf(cGCgYj}w!eAq5fpvb9PBUN8b9S<=_>EW>SiKXGZ?RAALP6A z`{yplvXrz5Y=`73_U>2Bn+8+PL^;wy1n5`IGKRa&WCfWPE=KH~bSybAV+}h@b!?o* zg}h&JReu7Lj0NTb!G+g%T}jbLx}coEY&KFmpv z>K*)Am@e6W#Kwv~u-pw?ypU2~uWI=QK5u#NP4Hh^%?1SLDu8U*b~DnMH)z606-s4+ z8pNVb2Pt7o#bOMamL?2xn4!xjG&u^H{(dePJn*D2mSPt!k0JXq>&mYlbRF_Ly11k3 zhtBsClzqa~6CNbcUl5B7Hm0{A8DTX!cI0#hpLUuP6-JzuvOb3CxOkuOB5XDD^oawn z&`1-CxJ+v$klbhe-p;!Z4<@#4_7^ACiWlveL-xYfAryU!NeU}-d;uq`XJl@OASLVc z2#rj!66DWXwo)7BuY@XEu$<-ioD5-!e>;84Kwff*Wt0X++B=*@Ic0Mcg~KE}Wd&e1 z$ye4AzqZ@FuOuESz=EXDkNh)s31T-?hVW_=)~=b5(|Ik*2~mJlhlfc=lwUILJs##C zDeHA4_%#hDksRqt%f9Qg@%qiEF;H1}_~%pDbzF7d;la%xV#vm*7utj=E(p1ygE|N< zTCJwDPKCFCLw~xguW%ab(GTrXywE_T4sBc82Qu=p+0yQ7@+|yO>%!f+c%{y(EAQ3@ zY!zdMkiUaTm8cwg+ztiC*H1*fjE3%~G0dv_?t@v+-yF0HoKsp|_C9%2<3z|*MKpXg`P5(SoC#dFwv_iSvJYvndk#Vz=Ij#M;O9NgW0$x=Kj zOSK~ma=zeWn%_*al+=u*`>VfZ_;JiL&+tr6DP`URafe}N^S*@80&jKB;_ACZwOa0z zf_oYSi>8sCYUHx2-obEEoWzCIfYxDVBFp#z ze_h#rO82?83^k~nj_;;MX_w|0I*04*p*|N(m~v4>^^|q`T5dW?-o;*fGnedFzGzgxYjt;^@KNU-2S32jl7fF^r^JUs2+F3z|v&hGViW%O$mj3 za;(&|ek<;2{H{aF#eN}D7(Evnxl?LYY;+}wFo)8Cx1gy(2_xsGN^{*9B5yg*_PlE~ zi@E}_UzlTB#a5>ea@?7Wn`nRhYX}9K$ezp+B#)G6h|K1WlrT5P60oKBXhkny~ ztgrr=#haoxC|%wIWJsi6m?zynSYzP?v8HKUcJ_g2O(Zp*0H-2An3d6D`t z;yyk!XfI=Wf6GO;o~k;>s?x2&96uvUkM@v58X%<>4J@nka2EC%_^hXiYZjSPZLf<`BWd2dZpD?kAW-9QkH{pAWCHt^S`Va!6wP zl^$&*k}fnHNTqj(rcDif!|5dGv-SNS)opraLrbPa;OfWdoO062&oZTh=qc4jm-O>gX-1v<`NvS|KIqJVfhCUkWmOvq4hWUs z-7#kH?$fBK^rQduxLk4A^EZxh@P&*i=kE(lE7`*2|CU2KcWjuI%P*he>Mgv(UtJSC zBDZ;;Fr_3nP(vDVijllFPwYTaEiaJdAVHw226N$*dR-XGg%tR@{J z5E2$AAg(XY5y1z%TMdwBFNQC5QULw>7}KL@2naL>PVvwZ;&|50>m?p2t+nwGwtl(Ear0{&%pzUCAIWC&JQou$|CYtyLXVStt z=fy1PgFwr}HK&zOxh(EC;;$&jEF7mhnzFD!d)dTuWM9gIcRoMn>(SZnq=}tSsp8{n z$O3H<_W)=UGPdG0@zYgznc4B@50O)MSDnSKq-Q zO+XUoi57$+PUR9$mM9noqyCOu?mTRdF{9O(dj%cBX(~!UCuL_NAamTZ2n#Un zgOS=Ryl|J1k$_UeoDS2na9!*(Z>*+KU9I9T1d(*2PMrwNy(q6qdc+_REItW5(+T6e z1RaiuEtthIjPj}fulxa|B}Sp^I4*g8?n zP3<-aIO8=<&MaSY(KdNoezRI1ab^-~3|v2;2dMW<$Z-rQsu2g+-*7lS@KKRr+t=~U_;`#=$(4Pd7tfe7e| z2WdzZmvZW}m-!+tep6gOOraVxJp zgz6MRH*4C@6n$t^iJOeH0~s144srJA|%v(i?s?PglBOBh3~hc+jEaZk^B zA!Mf`%yVOo-f^~XPc0dv9p1DpfdHoDQX#uqLZi=*)wJC5>jM}DyK10NE}LDMx0`R_ zj!8U@4?@dBa@}z5Zb2**puLT$jd>=jyoZ&*+yCiIcB8eRYBZsdU^QDZ&FW;3U5}(b zs2%cY-V=m#SSM!Tsi_`*hLi2-%2Q^<_k8;6xWwi7YpvXx*8(u0~R_Y#W1{1_Dn6@`egxotGZ9t z=vxN_dN6+l-GEWzxC^mcsK5kg6@4vUvr`cetVxfA5Lu_UD5e;%VK(|97imk*%5j#U zfhCn3K;l>W6(Z2F8bWCEzR-!|_)j?oH{7YiZy`z!3_ACyi5@aNeO8zyWvqg|x%3KQ z?^1 z*ty^OYqQQ|g^}r6*!=DH`t!KVn#(Es3#Po;q&SwAOnufXUjPPYqVYm2%Nogdz}>fe zJpzoebD26JB|7|)Lcnx=!eY4{DqbXh*`$J=RU^w-mY12tiBBV7|M@ENrP@NO;@fCE z>@~{B+nr@l(ag&V=qyCe+BntFtHjOcxK3Z+MQPr`rDAz$5@eYd{^jJ7XJDj{>@!cI zfAW^}#mK22qjNTc^AB?RJG;qf`gx}bFeOZj{L?@>azg4|GJCRwcR>%R%F#?D+f z#^OfBbtLjM-KK@+zEeE?h=CSz-7URF`F!&ME+gBij7P%7t$%n^6RBUVEu}wW31UX@ zrVZaYEzuzH!p*kW#*sm{f8FIL8Kz8zXDFW#+i0t+lMI2Ao0J;dIF2Nv!Esl3C3k^k z+vqA&SZQJ7{~Q}IdMYuTxL(1|?b3R(ZlT#!4|e-nUg;{Ed3id|HuJa%;?P48TW>x{%|G`!)iq`e;Clx#sE&)ymEcFl$CZPot6J8Ff#9GQ= z_?7<@+fPXcrVd=4AUk<$r97O* z7a5kQ`Hv);h_u-jtAhiN=!~|QOuqZWOpXg%Nz=pZ0rB)%-m?hmx_7xsNntN970?PP zm5VjIPK-Ng7Z`#eyo0%zSFQ|qx>Tz40{FA99`kK8QKP~y!bF!C@x#0bw-t3{Rdk^p zZIa)dI3?Pz8% z$X(#|Wz{AlhnT@GMfo!nYx%Eq=Aaq)V0L3sWsh_-=E@88jiwEY`c!NJof!OCnwJiO z85VuYUwYOWNn7Dmc{D17UP*#NLBqpwo2T$!LS{u_IYu4Or9LqD9S&tVBq*?P8gY*x zcI&gTM7-iHw`d+zqPgnL%!dUUx|x#VmO7Z)#~ydLhtiFF%?{QWakd#-Sh0maKna6t z7(87*Wge`X^)2w=w`9RqOTpWAt+l(PavIoB44zhZC#D$8eh$JVe5e5zbB_(l92tpU zdLL6?ekBhY9gH&JK9|06B$EL(4qiS?Ze|~_A6fGm^H-uPk>l;VI@Z$rEPZpyA6@$P?eJ-j;M{`@$4;B>G+hAm z1N}P~#wVzp^jtuF>6~V#lGl^!LzGDnVzgmNu{r9b_z>pgC(Zfp*3(j|+Q=UdVOTFi%lhO|GPf7N?A z6#dvJ#>Y0m^uR}}7h)bMbm}o7VDNgWm~8C~!@A_J^AXJW_$~)=uvDcwXL~N)rnk^f zpsr1tW$=-2kWB~(roYUrCd@{BuO5BP?)X-Kf5clJM@ZPdqYW01B=JR8*<2wjfQvi5 z6W2RCAPLog;foi}jfw}u#SsukUxb32-sKSmW8+jyIV=3W=mpsnSPo(t4kd$Fy;25< zL0@Qs348!aLm~QMf4>WO^RfhMGD6Kf;YbsLK1W-=M`cyBP%c06#nFASzI1`WW}#Xf zDIM2NjBIkwcF~vZwAp=%Q=*4dy`S=8G#t?j=NrOiFXJiG@Vh883Uhm^yX4=BUI@2v zs=l$@z4t4;Gb*9LX4rO3cY}%7yBS;5!5Oy6!eL-fcR!D!=!ctdt}UQdW%9$pI`y!2#w3Z%=B+WOhcu}E#5K<{OnxX~x| zPXpyRs|qt01*ejaxNWtEjA;|{+Yx?Ziu|dx?c@LLPRm%VOpN*|j)=w+|72_#19V3e z8(?(k)hpk9raU_UARfPwhvqz02WCi`vnQyFP#Th8n&1T{OUFHq3j#>%#(45=mk0R9 zWuu>q*x>*(*4{*66eIZy-@_3!e_AGwNaK`lP5b9^phi#>qJn=XSY_Lz-WA$XVCdvw zDf_xO@bcIF5#raIlhmlY^xR@+j|%E5m(6l0t-iS$n%)z1da z^hRrE*8QDD9k-xKtiM2@1CxpiW}5)bh{yNDH;MgwT&MigJXr>*MW8VQf{D0oVk&71 zoa~rxllvffA8ArlwH&7jI@t+6v)D{6mY?9CeE1jGO8fQK!Grce1a}TFc4u(puHw&$ z@qC*<=C1Lc*n8dQARZL_q3!!M8^(ID`RLVYFUkth)9<>smFcpTM4iO}n0L-{2wxDf zg6?omVoBT0)~1)Hg1~lJxfUiN9D#D#Cj%AX35c{i8uzll$#Y;4EZBv@D-vKWyQ1us z%*4|qnyM<5QXM$n8H)0wQipr@amtUDrT|t^!qTqAzha+kiK0B%Jl!N9z8@fc<=4cn zrl00XUUNAohZ`Gn)7Ef8%H*?*QtqQ-5hq~1tBrgD=3zQc<_C@Pf+eVBczQ}Q=> zX(g?oC_uE84w$ivu5~OM9eYi-7&Td`V#W4pzRPagZwTan4q3S*!Z*Q^yZ7;T{@v0+Y^v3k=e)6kOFM zSZpvodKX}mr+d*$6h2m{A{C7oRpK}x>jioy3&mN7=RKov1b#kEa?nubZx`kyX2AD9Xo>TY literal 0 HcmV?d00001 diff --git a/docs/assets/images/accelerate-light.png b/docs/assets/images/accelerate-light.png new file mode 100755 index 0000000000000000000000000000000000000000..d60173cf582a4f3b01bb0575d1dced41f9414a53 GIT binary patch literal 9020 zcmZ8HWl$VVun^oGf(L@T9Kj{HyK8U`3(f%n0>R~QcXtSK$dTX@2(G~`IL8HtV2^zD ze!O~Bvpc(6J<~mHv)w(>n(7KTm=u@@2naYzin7`W2#C`!&xYuzFZXJ!amkks!%fl1 z69EB>=--8ike&Pb*EFAdN&bvh&iJSS2gdbDmaDBFX9r1r#2inwBaTL6W=q+^uLK3IP z$G5UJB@f-C*{z08@DQnlhup-9^-z4@-znH5Un$R@{rpT_L3q4p5!b&5)=6Vr<5ip`2$fC;G?0Zz0bA#9p>nVxl^MJ`H?@W<{) zD=1HB?&?s*Fi-lVrPu~I*Dxluop$~%i+Hxd>3+xpwf_b6Tu*IuC5YfB9pSU``nh1B z#z|ZkxXgu@W;5=WinPZOaPOM*VaMXx@Mt)%hHqmDz z0xfrzJAu2KCGC&K3AzsZUyEUU3k*Jr?A{pJq4@3mey=iyJTp1Owmp|`2u7aLuZblP z{vp4e(j_3_{y=Uu0v}E2MuvA5^hFGCa=>pa9jX;u_-D{dCvQ$6t<4;kqW4@f$hBmc&)+V7Gsj2G3v+;uYhk5!~%II#}yeI;6*|ieSvCCc2i~mje+L3trw`9iiQ#|JZlSGq^F;v)~SboAh zW`}aaD7raAlnwki%WXEi@py?(5q^5s$z3}HF@cc`Uk9IwXdqy!C7vHqWf{CBvzAaI|3l;f_T^X%Wo{d<+k?E*e^`^-E1^qr zb(6KQXw};0h+ZA1fg24yB-5}$TsQ~?nuq5nn^QkUUUQo+mEnvIwp!3v`t#rqZ|sDs5& ztl=MK-$Osj!>RFsAUx{C`&ZrB4+}*>S2H4Z&3hXGMTCa+xcN07Val7Hm}?BvEDpHS zEuXQ8>^rar+-k<6+9rV7)HgJjgQP*&Iqqe{8 z?UL4oFw@04*)Y9JloK#bhen$?k&wf{S+W@&UKLW#FXiL$HF;Vdj7z!&BJVV ztZzXsIH-5dp0HZi9u|@J5Q#)~R<`ErwvlY=-c3*&@URUgcv#P9AZ@eR_Zk$Z*8J`?EKzi%5Kp4ek!dl|eef@gBo62R1;S2!|f4|l;H>cCFN5#pfx!lj~g zWbbFPk3!t^H_i6Yz}n@qGqv`;_EUc8A8Zk*Aq#YhmhWFFf0yO|ka-t=M!R( z*`NKMK*UT`45nfViM?MI1QuHXL7lp*xseeq3ic8-pzR3f*s-mXGO#kQ6Cp#bO;Xpp zU*V4y&=B3GP33|{$ zA2OLV^<)RP1{{B_$N(Kw6iyH~V$*#yPfbLl(Q&Zck;8e5r0YICkwwJjFpI;abSaij znWpYVwp4O_aCEHPh2JRO&9c!~_n+wg-fLf7#j2R~({(C%&oEKUE!uUJXXYlw*<)AjAMI<#sDUXK>!2FJf=_as3&_x?aLM zVCq*4RN=ij(F&t&4s3P2b*;_zE;-=A0@P??X=}dPH4emA6mlR52|^~)WH$?VmG6`w zsrdI^d0#Si=`Cj)5qiG7NP+B`2|C)}`cTY_$4|?lQC;D<7vz&~Z++LWS-z+GzArF} zyfdFmRu~hPMo`hE^ffFF7t8ML23f+!?6X}CswOv_LF=Z3lVrhPRb!f$vf67rDLVZ( zKUtdzOrkOw#XP7}{hW`>p%3 zJ9~bPh2b1ci}unP*a4G$Jg9sOSpPCKAREc5n0mV8Q*g{vAn3q28OT}IiC512ISUQh zfoNWy`ab-#4$3=(Xd0KRm_v)a(}9BY?XUbI=JyP@A(^D93auC{1L%9C)a&88@&xiI zNuBsDrVUrb+PaEeaeMC%i~B0`LpfOHkyAN9>>Ij=2a!hZK>ZLSH7T6bwMaGt=xzkg z(Xx+B8YlIZWom1__BS^7a_h^-OY7^e>!N1vx$9_1^BY)yC%|9KIxj-sP{+!b2^g;z zj5=j9a`3J-`MlEjwD-=(mBs**%_y%S?}RPC!MLWySA|r~lA_)Z5z;}brMCsw%bdl_ zH~r@NbKX&>ZhmxnOHy=9la-oW$ZW*_FestOzsMQVQ2nFlQ#tl%8bXa`$-ZhoZH>z` zRuczapHat@%0B!?==lRHr=^Li$lS)bR^}mwyFL>PW8DQ*2=|;#-kABrJ09!p|X6rG}gt}n$^`AYHKk8^}1?K?>X>b&?=3(!y zT{(=I)6o_o#MO>)OV6P&LO;M@?%NPbv19T2RM|61?mDg@^A3Q4X_4)GGB7Sqq12n4 zmCIh=1<{p$eu;tmST|^f{0&8pc(?nvp$H01vSg$`KFY{zs+%_-4a?qgEI zfsdNAXpkN)M0wKr&)c^j4h<=e-l?qZKLwDuQ=~*yu=K(50#~Peg)7@z zX>PuoQL(Jso4Ok=l#-=`{G$u=uvHt+nh*k%fv?@dJ2#djK(Q>*E|Q_sXmGGU>ewZD zjJ3t*!!&m+AhgA{XLT0YecasbM2#eS2DsOMgVs!_#G#R^M9Mj_s9qjZ@CBWv(bFmA z<=>2TUo2FbpU1a!%goL8Q+<)OOiX*IArWbyCRw&kU^OPC;G-s1k@5p}ri%?yr8=r3 zd(Thpw;U?{2|=L!ixl(>iI)zcI#wEW9o7PzTMt^Zj-7Q9H`W^#DDeuC3^h9Ndb)_Q zhI}iCFZ73pew)?3JNKXXx`AbfTgg>COH@I1kJ5-J9p*X%^9K=++bx2b)~stcb4MiT z)h$moA9-pZKdZp-_ihi!^?30woK&pgST+7?`pXp?xy|s~xt2nE`1d2xAl%;9nqfCZ zKG>CVeekC*mGOvj?clwX2NgiE6C2o-CwclL^^%MS44v!&FZF=)iC3 z^2XqN%3}T1It|~ALMUeh%9nl^Hib1ru3(@|aK@>w-t3hs@R8IWBl?y})?Vm7(nv(mH#w^NUa;^`l{ z*n6t7)Nwu!A2n12DaASb)T3y=mk1^b@=_9b-c>BR-IypbMr5zy*ti?0fP-i&lgqa~ zr}(Gkk4yKIC*__SRHh|agBNwS5f=sEA6zx(o%t(q^LF$Ur}4z&k4gi#1@Li!av)B6 z1C0lAt*pY$69@I{ytZg1U5Yf_ zOeladL}mbv#dwYqJnhwAG+v#DrxyHP-*W;}Pi$v_I!$Ahx%Y{Hq{&Qz7Ur-zqO5>$HjPU)WXo3XP0X6e6A;UH;(hVIsWh=?*Je{$!gK96ZGW0#BtwU z>JY?f+)5+KEO)J=t=la^-QSU%$Rt{Ec5l(Or@YdV^{yM>qNJ2WeL*%wPF4IE%p-X4 zhwDQfuM<#L!LpvLF2sIwVvCQYEr2igY(6NbAtn?A7W^6DaC$XTQP1RH9FF=qC=Aoi}*J}edplHHs zGaDkLVuXE-eW#~7tmE`R4>)MvYSbn@H9!ogzn`;n<-Mi4}^RmkRy z9X9-3vk;<9xr{QWKylPqP=^{zwG-PA74!BSWt5pVqc2e&v8^nqo(1X&lZFD!W0Wv| zG6A*9ZA6QB%fVh%UK#4jbd(T?WHQ88&Ae^sGDe&EEn&Aza0^AjxML*=;Y@I1X5WU; zpCDc?G#fK0iWd;5{%9hRS6n+9iW1lw>mZz^5#2umE& zaB;=+!@P_#NNqy6Zt_v8;?mHTnRg2`NPV#|WT>XQ3Srb(Kg+7<Ko8Lnb zuQVfgwsDctu6&8>p4SdEeep*N#hj9lZurRZVs-r--{h+6jdM|EGVQPyZ!0z#80Ysg zrA;WuHmxj-WDHoB5SMR>&NoWNrbCZ0*42Fx{l@FP^=3$0d5tHeLhkV7a)lNR%`O7D9S?*GuEcQARy1{1RNY2|(LDu8T;ti49JI zgY61ruO{SX`AolC*E+b@7aeGZv)7Zy>O=hz6~vJJ12n?V%v!o0O&8QlRm7xSr^Fp_ zgQ%%Ed=2XkG$-|z8`>jGUCl$&<%PmKtA|{#mT_c$AJ9OzX~)%c(zk3v}`;$*3_KEhH-_xD$k zJ8Q2tIXBUYt)tr#{$T*j-O<8T`@T{+7iUfql@kX(p{UQj5jV`Mx210R=wSN07Bj?P zYd+F#*{OTrQLj?{(u}v zEfFDmqGOn`8l4wg(bzXH0W!hdu*Li7Y+G@N3&H2Q;W8>`-r0wj+7PX5fl?l@eln zsassv^sefw@MkwolJ-e@s-JI4UiQ0%Mls#>aDZ_@ue})oO^b&>#>E(oL73!=_;px| zrQ0nWE6xY$8@GQ=*3F!&E^}3^Lj=ucuagv~K`OBX3Ba2LA^S0Y`&Z|U=Eos3d#%zL zgY1)?w#}}zULEe7h~Hg~Gi4{Ar|1uL)HT(8I;}1Wq*h;NHazH20xZl9f5QbejR6u(qOrP!EQq~Pz>Wn zJ)>LToW=XO-Z3mTIlz%|hz+2dWT{HBL8!q5Dh2e|1Yp+*(&}1@ z7O)M{n#cPkwc}bIN1uxcOcVG^HJ2D5q!?1tA8^RRu0}rS?e&C#%$|EKBkrTG=U3@{ z)R8}P2Z;&5f6xbSI-!IfNR_mEM63hU`_79QxxhwIf zXWXzW?zi){6x;<34rii(dvUUqDa0vN{^ruq^@Yx>I?=rWY@f=C`Zbt}xWfkZReo?A z>A_$A`m4q&h5F9%G~`RQ6?j0M%G{w}eOi9BhqGq}Fwp<3_x8*xNLYBibiJdxv4|X? zj(P-oF#QpvgRLFnNn=-(BHw{T$JMj0{h*-ie!09|F*%tIPO&Xw zOFhgi67uOu4{8Xth%^%Fau#hMw<+`~Sh;uub6<7*+1wQyG?riEuSuYOH@}bBoQ)W& zA9vKjz1B|%pthYJ(7d4#vUaK(uj{2iq9Ec$Tzt8$|MZo# z4aZ8yv7?2CU9``xqUy^u@(@-zP`M8}W>*Fs4!<5fSaIHw99cU$9ao_aZuKhRs?CZX zGD{hSo1UC-{3g}BZ`TS%zWl_xHlj%5&vpb1B*nBZE#qY4(3FmPFWcNHb846xv7Ege z?-tfV>nK=0rdW2IGP7%Mh8%)fhd9_M0dUvv|yQ>hZV&+BnB!D}QE2yi++a%2<6D9nt)wD36kiNbaC z8U^km_K7M1cqVDtOkPgHjLPX_(^}Ti;3kY~HGfQ(>aC?;Av0Kx*AsyeWAf|xqmoOa zkJCkR&0E3_jWt)Z?AKcl=trRDjg#uU63XsAZm{n!Slqfxv&L}1FAP)w)>gg{PlbZ z-K`$np;VHCs2j}frHh#mGSm&{(f?bt(eH^kQSxb_7nYCpDzcb3vdw|ZK*qFFdzItd zkio1D_|5yC&*QEc>{-%OxyT85?`on;Ix|SakC3zro!fOR+nrw( zcdB{ccbuOUwJaS=}DC|9rs&G%?@+n4dyWciK(~7TDgIzvcP9;Db4PLz%;*%eqL5+A1tyE{ z%gqXut{ogH;+zQ&z?f~FkmjvQr-UvyLRN@pYThB9B%ZFUGbXzGVHvx(mYgcnm4V;C zC%Z4t3Uh_p5f$Hlv!kYWkiJXc5}37Z({4x;dF!Hx{d3SMjz!eTye_`J;;aLpLZJOUpN)F`t494Uxx@@a z7NvwvxCV`2W`JrcT9&~p!qAFw|2EbJ-%vTP^-Z3-fi2l?Y__%#mm?8ynY%riS4ijU zQ5pc;?XVDl{Zf@OmL2R@5`N*r95uZ_5HP^VTCPH{kTrEWCrMPm zeqLK|CCmFo6ycQdoL5lt$Y~0o^0D}{KA8&{h<1GTfNe-UUM`<*frEeeB*@p5%TH-d zAdT8PQR}c504ZhLYc}R$=SN2J3sN&uyRfq}60Yx!{cOI!T?jD50_t`xx(zBl@1KAF zkR7HXn-cg{@WIz1qcKpaO{}KL7ccrS?PLX<<(IB>>|sX6wwiw>CH;5E#s1e27yK-b z^!GIc&z_EqQ%&hL2A#N#O93U<2ODVZ_Az0@cGpCt?UOE7yQ)hWvukwm{Yr7-M&091 zH1@w#psZR~G;aG-}?J_XeH#P28~?2FyrQU}Ow$VJI@&Qosz4=0yzoN5wSVZ3tS zy*NdYeL#ne$f%BIDKB;CC)xD-aT{CC{FO+aq>(c_1dBya#EJ;A;Pz6K%p@I}o=+X( zi;;=8p2M(XO7zd|qBE7lqfWw9*kf0Nb288f`0{~Z@sL=E%?3+#xy+| zd3HV=L3mm&yzcBX;*HCzs#0sZb|jMT`zM#(Q!D`9s+Y>LEaVCXQ$+=u%h3X zr??}8SH*AYe_U@~oaYy0W4slFLw|?0q_)vn2qg%@owE-=>3xe>k$qv@9??V=61~=1 zV>d&Pvatm%caZwP0_j?-O%i#Dv6iZmlw$kkfIA2L6ZUw|X)$NHQ0cn~f4ZL+k0&{d zX8?9QhJ61FH^b`74wDFzG~AcWwcYv_@pNd*ySJ^^Ln|pJjS}ud)3}b15cR#_&{^L_ zEA85lq(IeHqOO?JYcuqmmHG*347m3Idmmi=-7q2b2#zwMK*6kv7-uDax`~vvmMf9% z3#S!yC-MSVgnu8nE4!{l`m3uXJ+#|nGn6kqAd?R2D>q&NiE5D=EV*b+jsm&7g+vZ# zShDBdV=Z}b607f=zJxATv0{TMg?gsyI3InlCrHRzDw;+9Q_3%qM$CUp&%cQ^t4K@n zUvMNmBOj+Yu`f$jRXB>pl)JgdF->rCD?(e$0=ce!sSV~jrJGYw4*jhcvHxlyMG`&; zTTcu3fUhdBgQKNs|C=(rB8v%^5w#R$U%Xei?st=fFss}#kpmN;_IIk&`CSxL4s3K71;JYW#NWj>P(P9|FV^5JA5@(ZrYd)WD%ssc~=XB0iA0O;s z_p~}%x|*gW-1BOb2b*8=%!P-ej#C7|69psy@VV@os5tgRlMS<>q-wK; z%5D;Bg@);BfIlcHV8-Li3fU*qf$5Berb-9f|1-BYnhiNKx6?toL^`1&^xkcG!^yY! zP^-KsU~CO0JA@Iu8|miW0u@4$w%IMw!)0PT_6uC>Y=U{|%>$A?sK<)F&Z8NIjnL2< z)EzHqi^*-hNkIE2=;ygPZ%F&PgE>`V2J9@Ce*sU$6sAfd@@Rj@+C)~>_#9DmtrjJB z`eiD8!3+*l8q*w(M6PE?P6x#LTeL{MF zflVuC6SqJ3LYmQMfLPQFUFFnF5qdg4H(jC3;)8`B%yZ$80)Do8Nb+oJ7O*#xLeFG*PQ2YeKvT zJOKCn8rU>7_LQ^CJ3VkI1Mt4Mg8$!$*n)9{Cr6{Ow=k(+4!a;I$*IfMOMeLcA3m6P A8~^|S literal 0 HcmV?d00001 diff --git a/docs/assets/images/accelerate.png b/docs/assets/images/accelerate.png new file mode 100755 index 0000000000000000000000000000000000000000..9e9111ac178c8a4f117c5e84063a74a01c23becd GIT binary patch literal 12653 zcmX}SbzD^4_clyOr_votOQ%R9-7Vc9jYxNcbR*r3Al)F{CEY0@T|>jWxxc^X{R3v^ zd}hwsvDVsGtnZ5Q5@^VT$WTyFXi}1&l%b$tP{DgGM0oIjdJKI83JN;VLR3^yN>r5G z-oehy!rBxHiZ<3OmS3`4h`3i@rGOF#Av}!7q*xyJQvr`OPPjo%EsmBGOVcGvD#trI zrYL`_&d%uG(tz-q3iJwp@*C*Qot^wWN~$Mjy4ujQ^+vLjWB%h+Xpy`w)f%feik!oe zy~ajd4fODO6b6{0p_G)YDhl~{?5D)~*QaM!OkFjt56L0-`5xmfJ>kQ`vuYg=i4{KBU>d zs>c<8xohwXKd7fZflmJT`!t@3Anxt01#%F~skiuA0xZ0(-zsNA!>oWjnJ=nSohK|Y zG0c|OklwYci-Nh`bEd`BHSDdf!-{FD1{K&Acq18!Pf)+0s-S9o?0en86(oB}&2L}_ z=U)DxMQGDp!9@f|DLFBOzc9FXAIQj%Vyi)Zt*ur&@qUy- zlp+uO6)30J^I8e}RjlFH+Y;1>?A$G;h%I3dIN$tHULV z)xR^LG6^{Juv;U8%WtVsbK;e9aah~kaFe|LslA)otb09O;xVSA`BR+%#!rYG6Y!lLHi1oKZ?XbVM){|j0+ z%Bzk{gFS>NBuo}eePJ2ayDB!h{NG5liBOYx2OenB zrTvGdNLVZxdzRrIv!J`7SW*5PxIIHar%_;SV$1_~vA~q4b{#qlRwp-hYh5;{e<`u5 z_^(;0#O!mJGK_V!z0PeEahz#2kwJ^%Q7gks%g|=HH+WL32>+Wpi8TxM6I2LkK=qbf z!`TsZ&i8d=7$;oCsiAmre^%@vne_)|15Qqx^h-Bl`1c7A{{OAq7xRFe3Pkkrpz?8F zarpvBaA?_YGIBK4BrZY=@@etzTMk+Un=XUz!NaTm#dJny|NUyh7GCsB_~t9GY9aT7 zd|5IooENm>bfzf0j6P2?230?y-5VD{oq7>Av;QWES)15tkO-k8)}}qu{`E1Buvs7B z$xTSJKWg<4nS(rMdlscDPlOJ_-KtU2fA_~nW#Yy!H@o>uF>q77;R5|g!GMc?P$zu+U}jC2qTeb9o(q0xx&E)~+!>e3B%f@vCp?6T5NArF(8g@$dtp zrKP3o{ZT^(FC<<;pi|c}4e1q(u(kmQ;X;)$0{4m|R>O)j5p{gYR8*Z4yyzq}55)h` z+?y41A8uB9d@c%tf7v?O88H$FTVuh2q(-xSy3ze`KFlmn(&Bui6&D}>8u69?Sn!PH z-}TObk@OGoRmT0*R&y2df@ZsD!OfI6XY%)opH37*kKwUhNYUKL0!WRI1c#SU>2!^a zr~4v_w4ELxM@zjKV3Qo_wlpXaC6t66Pb z+lMg%)Lm!&Vl+e1OCH6Hy?jeH(MI@Ik$-~K)15i}AFsCOYs?kO?s>lLpdz3XW4aLf z39VOQIT9kLWfF*w#w``%&nNSDgy+Q=dy39 zl4doVibI8;Ys@0&A80WV`I0aMZhQqF_Z4*YFow{AYp;)FRLiC@7v$WLPY%Dx3+>@R zvPitdhwK*ydECz5rE%FfNa;V{^b1PL$Y8mUfPmR!gAeFMq(dio$gO~Fo-o3+gpO2r zT$?c`--? sHhe!A}1oTCPDP8y-EbTFcD$jxhcmF*(Hs`8J(j_U-2oPUnU zhYaN{yegZa3BfAwTvMgOt`1MXrrm?*`v`o+-av-o)3a)x1!I9vk->*2{{*Gw{Tn@m z3y!ekbIZ#kOox-2YRCT$C$k&(p9l7Pvrwf;Y%?y2*TUnO#`n^r4`AfoLE^KLlJ)9#h)uRk3T>Auhn&SIjT1 za#$rDTCB6;FV%m^$Hw^1n5&YKT zF@RE1QVI+Tngo&j=FOYX$Ve(KE?gfUABq~((y}rs8JVr^!T9{bLWje7RyH;^Nd*NO zUf$H>m6oYWeZkGGt?$v%MG9F0mwS`2by{-fc{O`omURyQrs*Ju3*D=29v@s!$>9V1 zo*wVN{Y}(hGa0}PMkfiYtgM{crDbBGW?%?UPbdB}H3goOS6m$0)g_{=tjv-k+tb(g zhL~9P^Jn}v_bW< zTy!Fk{Q_xEy_E6cTjAt58a(W{;4|M=U!?Krdgq=aFGUS3sGZiUttoT*K5c_qlyZZ- zPXAPo+wW~FI-cD90|NM;`i`TTT`xwtQ$D)bh95Z}FIzP17K0gq)fVUebh1#htGt@j z)AI@U{pa5abSfr9rD|$whie^KKeZdq=Q^J1dLfKK79Hn949kvFa;OAsuOh|sBn6q< zZ*mXsHH|O9k`JCFG3gHZ!=dmw3k$ z${lhz=ykm^ci6Q={P*u)zKaod9q-$}%N{3f#Qg561J)NNH+@8Qm_$Uw0T=>%svA|2 zMBENbHVu;m^L}JdQXmt*lyVM-VVbO}YY+9&X2doTYr=;NX_QA}xRICrRho(4^b_n8p+q z7WRYP9Q~v7p`zn*JaURVIUP`XUJO8tt0Ob(-SZz3$J!`QmO}uj!Jiz(|D3 zX`a*7m#XC9$6KZbGv1cxYb#tRgK{l%ong9+V=7Cm*=H4IS(U{T)5S_xN|dXst9+1o zHIDnrg>q@Lg>b(z-T%>LdOMKeEg4`13wZ>{8ccr~ZLw(VkSYC4j}9i6P_UZUIgb(pci zRVU2dCH-Y)z39hVzn_E-a2HPv-j-#K8G-rmakFP_JT9)z6e@Shd>dmk`wXWbH@>T& zGTu~ZEMX^e#_USzaebV);tVRs<>A8Aag9}FoBHG}NTr3st&-Bx&D-59ms>1%mGaM3 z{v8j0nWv|x9qCf$erosIG_1@PIwU10f7^=W=B@iuN0Xt1N5o~l9?89G|8HJ60G{)% zX~BA@Zcfj5rc~|7-OFq=wO~(xW+$EDHy-$Qk;$k#(9yJE_@-H(YGsM*K z-@%D@ad%MJP(~_vvktrJ^XyfWX~d_rW7qMRB>1p_aYTJE%Dr}YDG*g($o}u;;97Ru z%>d}genSKXh7d!0t?}zynWXpoF$WxWt9+tk!#dwstj(d_!Kq$pj$8DwRoxA*b#2U7RF9P}1-0BrVRRC(}GdEdeyTuzP>Y1)p>{?s;Y&~0`78c*@ztwfZn<0=TBuD`}oa&o31@WY}~ zSPWrafam`HenMiRAzMmuNeP9ZU{;&QH5-_i*kc=jg<-8UnbhCpNiZU=T zFm|`11$V&g5|WaPI0!*vg8)&~(UFywM%MSfr2>nj{w}S>y@bz5KvUc3fV}KVF!jzy zLR{;ogi){HlFxI?te(WebG8U-#ACCmw=f}$_g;B`$9~tJsW0YSz+uM!S z@6Bo!?$!e^=y{(@)ym2f6B9=vppuk-bUCgiM1@~?^uC(W$WY$_Q__iTeF0iOot&IT zA#UIlJ=mMf)8)b-85|ghS-B3wVKBKq_}=`v$|#63CGpAQZoi0#-%d#>_E3OYF4bgo zY%Ga0v$T}{AdX7L%Qts{$tC@ zS>AV2^}Sg?ehdJFoo16aoe1f8ejGP7z6H6f+2l}g2+8t-5WEybZ*T9>dX*AY9hVg+ zPXPL>@8zYXEm`4LuuPelnawx4g2Gxg8rHl$!UuETU}0%?c=P8;L?_Lh0HQI!p)f0g zC+E>YD0*B570~&6?fJ#Hl%{Mh#4TN_w zALqmuxIbiHu-P5U2#=0dsIh^^AYI-g4JmZp&2T9$E3>+;dYR@~a79^VduoA9@{nbV z)f|&1>sTiBMTKq)kM~1k`O^#C>We00)}9?0kX%R+*Z8URSM{7w;}4EgmFKMkIO62R z#Nfs7MY)`^4_bPa@Oz=_B zKT50BF=?o}B?kLEatDgic{eU9)_+zdmOe9HfnW}maoBt-Ai4OKwmj}`gGv=tD5STS z-_XE~A^7m?P~aNCpV+q&TKq}N%Q{bxhK@mR|EB9z52ccXBKQu#FH9RAk6TW76JM$> zr`7DhKN>#1^o1I8h9oDDxYcVLyfsbJYK~&Mhl_RLMOogd(6I2?a|cRgGdH)lu`w~f zM^e~9LUXTqUZ5Eo8p^r<8p{$?5EqA+h$1$etI+iX5$Jh2DbafOuZ-_)^B0kK6IgAXt7n$x8Z3D%~x;b;Kx_v*ISkE#g@XZR!o9V z(BV(_XBF8T+Mp(ie?F|C_1f6ikvQ%AeGMM~PF0m6c`6EuPSfBZ#+Qm4#!&HP<}*5( z!26aXz+v}hf2OJI0+eVz@`JUfzaNW;NXFHbCnqQ86#@blK7MGY!EXZ=Ffr|syb}vH zb5qkQ+ZA5TUC)gmJVaz<2kF%%yAItXjct(IW&mR(vqja@=*r;%s<)eJRbeq%Y*T@s zk(`W2K|v817+Cu~0zN?4d8r8td)oi}i{X`#3tA$@CDF`Yw8_#5H z`J<_$L+tagMP{l!QrUWr94+|7^ZNB``DlqjJ;Va|$St{(`xD*x(Wxnuqyr|nzpvTr z2RLOxEQrd>M;Tq$G3-e<<8xXjY)S@334O16XYLdGLji49Wtc%Ipm%)f?-262-CorI zf92Mdj~I)MH=mT%+j$>Zg`ZVta2>P7M$cdQ!Id9(I((It;a?2i`>NLD%CUH4c4p^K zVEVo!X0q&mBh5WdQ zGnVbkI6cWN{(${qlGwhZoR_rsP1$5a+m)_0*>>Z$ke&b2B4M+nW0M!)A%nLL`7cl| zc1GW_u_-G(Yv{QZ3~i7ZiNG+*9JRT`u0!bABD(yQyw`o^U&TzOqn> zmoVRI#d$yYK{_SvT_LW%9m-7I=tn-wf7AJyXVg~_4$L8y4M74nrYx!-O`NCuo9e2v zYbT~_EvB%u-tzGLppr=%K*w4HWlJKO)O7xrVc59Z=g;%hnQ`?!QzlG|jB49(=MdWr zn(C6sZ;>n0flQ0?c;CN&@9F7roNp~FFMmfv6H;FO9@L((dYh&BS__r3`^0;9wGo;E zQg~6p`wlt!WFl0;y2LUBjnlONn`;vR_b+gSPTD>62ic2-=eg2ocdp9>RB)dGN5o02 zs5jm0`5v|z*70;_RfHP6X>?1Z`M1xxfisi3<_`-;r1%t*!_j;dY}w2|)yvQcv0>R% zj*^w_jz-r)K-|DGIUg++(1yt%9evQ}o$^$+Wtmp@$57NmE5K1ShAGdU^6$j+e-Qrp zU?fYRwpa+?onWPW<# z<5CA5(~e1ucB<24t{C6LX?HcFfjREl@83c|^;{M(b_ zd@%d(VmHgj3~YaOcf`l|kk0rWm0>ORcf3D*En@Uho5U2=Xc!A|8Kwz0bO`cyrTPNj zsxzq0NUy2QZ2qv{IQLdV-J9_&mT6TV#kcb~7}AF`Zsi=c?Ug-L8#VPJq|33XiY;l* zk+tt+ksc4}VzY?AE#ufJ)sjMJT0p-eT%TQq11X*9c`l_&pQ!TO+SZ0M_tk|uv$a{v zn~jy3`5MvfiX&x(5p`5k7g?9~EgxU2T;$G7nFhQ4x{z(-MzAwG?(%5&y3|F9DTe4N zi^be??_m&U)5P-42(ASMC!Ek}&?rGKj-o1)m&;&fZm?p&b}W=Cy{o0Wz_+AcqpC&y z5yZMMiHEjE9hwF1x+EGf z4EensNGY^T$*4g)wBuFJElXcOm;@E_`U45!?0)T8Z*oV1Dur@ss%NKp~dzYSX+{4X-x zfIdUM?Fn5VLG&!}J6fjQn~Zsngre=FV^7XLy+w=h)j(lS=)l`0QOWAIlKK#TLyszFfZA&uM%k%f+@ZRG}tX8G4uc znm&;Oa)IvUCh^-(^G{ztX@kh=Ut6cML~P!Fl9?XIA4px-p%=!vW3p?ZYnxzgCh zFirF%15*XuR@}&la&mH#$%D{^1|xuGw5ZR#JEvcZ3l~bdl>Gg#979YTWb^SQAJR99 zWVR*B!q)}^OtmKr1A1S(3p0CNRY*fR_R(ozzL%U?auDX?4nRr*o~TeW(k0Rx3M|}g z_DOY4D3nUbN63#1e_3Hs+604mEEpA2AGpC+CB$#9?*312IR5=_rmShdFwJCz9B4$Q z*Y^jN9nHNrAe5-m)_~}>KjKKG+c;zZR5kO3YSZea@l5_SI27E8A_cOAe%ghD|66`E z^X{gV7$)}qwqD9MJ;XmBf5-}CU;9a?*(2M9ETXsb?w34nVtHjKokP8vx2v9X({9|X zCniS-*OXhe5F8r!Ff3KeyQ()p{P~uxJq%F}rJ}K#exJC06_X4h#gmv1+MSH9;S;QJ z&YSntDH=DTdu|Qih$M7B$;n0b^Z&J;|K$fjUhIz_^G4cKIr`cSwguJIZ`$tm-YqpUyrc6v$ldPVf9$SIX_U_%g2|$UPoesn$B@u;%gWl*s5YjEsytFgkiPkZ|*I=~AO}nHE6nXv8WOg%~_W(Gb0lGJA>9N^^ba#ES zMrT3_uxEK1p^&t+bP*?hAX9=NR~le+ZRPbKoARa-HODhP?m~j&i^hw@9|+OI^NoK* ztr9&nOG!nC4IgxjzxQpl|K+t?ejVG2F;l0!S!lrTY=}aS^Xb(B5!&uh{#Gp$GAC;a zk+}FLj(RA~f*6??li7QoF?YIzKk5)n*ua~B(p*g(=XbZaw_&R!0)SfEB$agsu@=thBaUx73jU1dJrF zu{x_chsUeAk5?wnr|ZIct{ZSb#RddhB5xHFsMrsWk7pMb2Qg^YB6#N|B_**(QZkZ~ z_IG?Q=2)&r?3)DZdPi#IUX>jc(EtkjVlRACTdqI^-=-1oDpYpKK2h7)T3YS*%G;tk z@4i^WzroQV^RjO*-=H28;>T8-_JCrdRtR%Y`mpA&d_REj6BmZ?j0ro*Z(rnN|F<;e zh$ACGt;p%VqS!qIIr422o`|oKA&;K{9mp$w1RC(-v~(7yRnlQE%K8htdAK=aTt#GV zk@bEGzdD#>_j&RJT6w7}Dvd(scFRe7{ERa&8TTBU<==h-alE(GzzjSqMo5kOmF3Q8 zT0hW{aE4oKmbmJz7bqEmZUMLL2V&&nqYIT|&PX?T@>YNJM>Qb^8X5^DON)Z==C}!o z_`zw5U6t+@`$|oSPnICGo7t)5c1iafin}wcU{<&WbT#s!C^}~<$~{~Rh~!9YKL?#` zhEl2%F)KRyjniO z+NY4NUgQAr_)GULh?;n4jzF}qQcwc@;<4o(`2@ua2vx1krIiqq>+X?;Zb!@GM3#P+ zk}t7-cZY`;86=w)5!DGFid5rs1f#r{Z^n~8PqQx_CBFo1jx1MqCI0~I2cX=S?%_8n z^iLHjXqbe)u<|fxb-R`3#D-OO8TF;Xc$#7QnIj7}g&!=!kS1>Hc_C%xxWi3e@x0jl z{C5kF<5_}a0i6?uC^-{Be6?DPK+2C@@djdMb=IFq@wb3YW#-ORK-JaN*_d6bs;W9J z-kNmWWi7#WFj{w2s-n=e2X7ioaK0fT+C*dJFA$si5|=q-Gexy1PK>P~S;=bs9WnL| zoo)~EHjZ#4R@0^8V|;jj(s5FIF}P|V z={4*ft6@dah_Po~Ue@*2-U#nAJZw}Gj_gEPazbm#EG>>(bLgt}Ift(aK1UKNU13_c z?l`@eCO%FpRw=twKaZKw;lj_2GQ`NjRY|0_Uo=~kZoSH@@ITSrKMDDEQ%lwf|LHt? z7S0*X#K_S5ld=fuvkFS!Ub;fiQdjSLv78#n_}y#EF~v1lzD$sdMy2SnuNW?p0zl!X=-=4ih2(A*%GKlxG-gq+gq z+6oD`0M#iaNNj@YZVtqva=o@xEE;)5Ev>Kx>%K1>ammU3KtgTWiu!2MA|pIRh)+PU zu<>8ucwe`y(OEu&m0~v^BZeHW$S5hjrjAF-n;f{k*nKhUfbN{*az~QQqstWFh>WnJDt;t{ob^3zJp;QOGF38=6?sQ|=$E z_dWA9+Qlb>j?5(jEVG(grKxoULyGKgvalp5=6|w>#K(SEk<&u`i}IU!r3bs-aKx1v zs`w*)%~o&%cz7?u|B!8m*(jmeSo(tPIdIkFV?TNW#mMvFtiPH|^rvpiYyq5Y!i$mF z+m~E4_yEip=XsT*e5v>w;J+2@U1^$`Q3JOzZpImC%zS4(SQ+YgcI~&+-*(3mXSm+J z4ans8sMi|%^G7)tgG^paD^(kx<)rS@RR{Zk#O+@VXfqD`fXL1A5h_=;MW|bBxb`WNf#hqsSHa; zx!OO~dxHwM1$3K{sYu~{K@ANAwFaF2{Uk1@T`0_s0~ofWshsj|mV9zMpxj(+hBMc< zlmnRjl42y8B`SL&1b%*gVb=t;R4-alE(CbPOsx3rfZX76i#1<(R_l*mp~qsD1`lep zWCr_+;Pa6BGu2kohKz{Z_egv``o=`tIfv3k(#0H3Pq-8KOP}h1n_2H2AL2u6M!c|T z75BSRD~dAwG2Qu2B5dY;@@%MxNX=37v!1)vBj*ivHa2XW)Kp->9u_fcfHNGU_k^^; zZjGrdL%r;8ig~8tYOC83Zy{(a_zEOqUZ?V+!Scy3uIGa^IQ#`o{@~2S!o%BD4}YnU zw(YlOh4LATW{&`tYCz!E0C`u@(Ru&;VRk7^g3N20%Ax*}wGRtLx$;&GC>`H+<;9MGLB2}SGz07(XR)LBxVYl- zgxwN=OSV7+ngP}1h%E{D${93Sp4AJjC>e5QZF&mI%5Ok=IkxaUK7eEmTMKHy0}#gX z60&61FsN6;YTWnB?ZQl~@@jjV&))Og1&)e}iu7aFAzE;zkE5q`$o6QA8hsF&C9!ju z*1_)w_IH;-pvwoD-cpx4&ORg}LT&U?cmGt^B<5hd7y@60>>eH8XL+kh*piWB5aXDj z^olO63S$z=y?*@3H#L<|z5H{|-VcV#)@o5(@??GQCQ5F7kN~-qTnji8*2D!^Mb;P|b6G->fKz!gzqX9+Aq(6#;gi9faNk>9f7WKs$0j^cx z@Lf8$9ch-&(}%OOGa%#IKR;d%^V)*%hK+$BCKLN0mzrB#JYf8?T`^OiM*T`X>8x2e ziADklgVj=zg6lUJ^x zB2nXheVjBUa7Bzx!kZ7Q(wcFBvM&g~7+!Xyq$pF_G2%}f0byjYwq08yfJ`qqEP3ep zux8w0Z-SywHf8hAzpp@flP@y9yW9t@5$8Y$Ym`+7_J?_vS0B)|R!b8)kG=yP?j1Gt zIdJ+0^ybry24X0z!D$c9auJb#qC0=Z5k|8F^}jxT2b%xI)BQ<<^l_3%?uwfL0Qk4s zM;(!57_jhNzpb*@OcXsj_kY?SBG8cq%&{mgydUY~`<3ppGU9LNHsNXRnY|zP>1u zvf@WzV5OHLk&%#$0@HA0f2zn0U75R2+?ebUbZs#Uk*7Lu?@0$Q{2l~iWqnOfOt|{{zfUd*9{Qkiyqe;Dt zghlQ~xtr1v&rq@^97=pCC4It0`5J=Z2aztO64#N8RL6?Pys3^a) zz(R_3AJNA*>UZh1-m`!4V2UOAe%~FEV=-lf-ImEb-m$JfC|BMf>-(m3ua7)ehaf4W5Qf#*ViXSM4&3GsDK`n zx15}b2?^M2?CgLo82c8}d0Hv$|z6La(U)KqD3 zZh#!$x!j-H{`)sJB}EFj1TSqXY;18Lm}SoUdU|N-AzMg@x*U#k>o`zfJFh5$DmIM-0>f|IIZ>NhCDkdW=MNb|qpg2*HO-J>Fy?24G$T*ki7caK zk3!XSQ{yJWmu^uEQ*UWpOW9wE|JSW~`sr}Ac5mqjEjZ$Y;*JopxYozTjtP3EGG>BE zwXGmV{GzfVxnenIBFcpwr`xj?6sKxSM2Jh3l!d>*oc@gu;SUTyV+t;tR|&VB1RqP26ewN<~nb^&{3SoFWYg;X^vKoZN7^kl%&jy5%mPiXp#}-HKSMC zaK{&CyS)D*cl5S$QDI2?>m5SzH(GZbo>#Uvucvugw_b)ifrJY0JLEX;gr&+Ar#4Q5 z?XHKmsKc^TpP{NwUW8d{%?D3thu8+g!K-NXk6ltr)_}{e3O)?Oua{b@Yn;eEkaL-D zU%FPwAj)^7O>s)HF~Wze(F|p8tH&A(3|E+j4Ij!9#bE8Hy|<4_i17AbOhTj`m5OU# z>;w_1AgeoMNQkW;6NGwCcnWW9S9o=tW1z`CcA{+EMc>z@@rMN%BKck*Jgj6T%^yKu zW0ahVluuY%f}zcCP~Cr-1T^6oCYs@FXN81W&PB2PCZp^UGIeankfSX&&Tf4(_VlXI zEaF|IB7Wnm%^hE9Mza*qqK3#@)oCBwd=Yc!BB@R1MyjQgZcOox^x;8R7cEJIg)^R5 z6bn9;wzyts)|Q30@JVfoTsm*zbxDCS@msFeOJy-Bea;`>Qn{^bSATSa;f-@MEoj}4!R|8Y)ewkQT7kU$#|I&tSG0Mm+^eE}Vg6M>_s zjgDiUs_EMzb@J@hmBR;TiPHI*Ud@|BzgRN|1?FeRrNSB!Dw3izv9m(pq8TdA^OsXD z-!ZajdJBwCx*t?`xKH@ML}VLI+zW?hKogR&mi@P-L1^&WLkdE(yaRd+HgVP{{>&i^ zf^a@GQ`ze8x}(_>Kx@+Eb?}dUOb5P4|QrlRd z&DyUKL5nQ-3#)XcvOJaW=)&w(!K_qf;OSRPPBFGGir)$T@7J__I#dLhy@FD7g)-41 zriA#zsj)(^!QPl$gv|=Upn)@>GT;|#q24~{C4^RG~z=^iOGMe5Haxo F{{SlV_8kBK literal 0 HcmV?d00001 diff --git a/docs/assets/images/adam-convergence.png b/docs/assets/images/adam-convergence.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/bert-ib.png b/docs/assets/images/bert-ib.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/bert-scaling.png b/docs/assets/images/bert-scaling.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/bert-tcp.png b/docs/assets/images/bert-tcp.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/bingbert-mixedbit.png b/docs/assets/images/bingbert-mixedbit.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/convergence-table.png b/docs/assets/images/convergence-table.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/data_efficiency/data_efficiecy_fig0.png b/docs/assets/images/data_efficiency/data_efficiecy_fig0.png new file mode 100644 index 0000000000000000000000000000000000000000..1a32eaef49ee8378352b61733f4fb1d0f85e6b61 GIT binary patch literal 382954 zcmeEugL4ZvDhLWoSaeAXh#)AvAlnjSiO?T@?_>*^jMt1Oz4c1B$S5Rp+OoQ;lCd12emr@WOaCTp>P!oPxSjg@AC3?{Qd#{&-U`l^*_FQ z@xOnK*yDd@S@VkjxxC{oGSxDQ!&1vz)afHG26-Hg$9G*?GvErV{TKPv3N z6G|83GCiT*5!vRTqp8lJdLeJ^7h0Q|rw!~Afq}h#`STZZorc}o`CA3JKWs#q4t|O6 z>YSsbf1jB4Z=yywyU}bw>C)F3rb=?alf@l9Lj%uO^iAl^M%1{7MB&^@izthjPJC@j z5-Z>Hok9-Z+RHs#$W9`zD?A>HI;+FgWP3{C%^b_wxnRykpD<}`@#8s{4Z6`@Q;n>{ zihe3QD3q+!t<7-J`=1;y{ejYr?=ehUn*Sd8y>B|Qdr{H9wMamH@>6n4>Jm*^O`ND< zuS)^yonooWkZ%s>OX<19>WC?oqflp$AqV%S5pK3RT)28-{(Db)y(o?D5Mrpa z4T8LNsfUD0rlXH%JV1;FR+%uA>Faz{hS=m2&mIvSRO?A)ff8iDAHLf zT(>S>Lu_GGLhddNz3T#|OzWUwHvF;*kN(%SSAPqyuCngc9STb#wdYz2)58nx471~} z2UbS;u<#h$=To))Kcc+qVXPTESR%Vk&{rwN5uY#l22I3}BvXW>o9dMCa ziPeuWb4gRv^Oa!f<*DtWEJBvtsT?TI-VFh_4{qByF2XJ#k#dbpZ9H7E2o`1$b!Ev9 zZi)@}Y4A`D{<1lu(C);Ftr(p1z^t~!r-AK7*P85FR%gac6PIlH{`pWV$Uz-H5eJWQ zt2VBRU~XWd)vY~;>S?5_3o>tK9x+r3tSmWEOKSV*&wSx+MXynl<+%<&N7?Y zmCwUDfv+xJSk1OwstP!lH@Qn!A3s$&7b(*gEXul^C2(d3&A}7&2|G$8cio@W$g=); z#h4tSsV@m$V`7NA5uh6H>(+?lAMgD%JfSc=*-y!B-kNwstr*_i2HyO_9jcjC{7ORf zkzWB{WRtRdYyf;{x_Z?u!<=*vwJ=^+rXx3z{puBA#W)vr&})mzliNqP7A>YM3+VZ# zjDO4+`7I_BMpi0*6deN3u9=;I$&?Dw0V3G7B52_&}I4Gwh%ix zqgt3v|D|IkTE}g+;f!$gSUQI>`^DnI{o0p{$HBhh1kn2xHYMr^2?-Me0mginCsuyfy%vf=f4`h?Ra z9&i|M)*Xh~97amZabjquNo&^EZNk-r6=jD9dxJ}g0szrPn+DSyf*wuS;T53eGA7ie7~$_@9p>J zCAcjceoDTvLyOdwI+MNvk%}1(No{C1%a{oY0XKc+b_5TZrEaI0`^UoM8t<+ZY1?x` z;2HkSP8Th|B}%HMTU7n?5hN%~%^`h&Q3L560{2GDOwQl;<|_(6#x4c^{_|Bwce7vC zEx}9*L?MxU3vVswWD<4OqO)KTQZ>L8Cdr0A(#3U#x?E_8y;biUGQF9K>laL5@1FWY zH%I7A=Hq|*WH%v(z8`=+{!CZ9eXoj%;C=6Ux<~HzJA6frr+&HP+y^i*V^GrWSfs05zP{)cO}2c9=%5plGe z_8e0l-eC3&l4Gay_(i&Bp?-}Ue|rqC_`_GG+QZo8MH0Q}>dHVgwtjUJd-sNb4@*EK z65eyexv)5JQqKhu`)6K|3R%#mvYDF?Z6LWb!xVpReUP4Or2;Luw-g~ z9V3p?Jqv;M#P92sb9-WPsj+ByC&(ht^M|m@>CI*GtDD93{A+h($jYYEhLA=ffDbSn zh-JP(+9jfj<52D@a+Q$Bh#Cd;f`2FOS3~go|3b0%T*h^Ij*FwyeW6GO<=XZx5G*$P zt!en;Msp=4H0tcZHQ2mYcW_(gV_%V#A;=~OuhF)squ!^5R2FdCWpbt|0e_Nmy@3v$ z7|5XcXlIIR1C_Ffji1!u0vFp0f2mJ)xRJGKT_n*Fk8uS#)3Z1~;C@F%Xx?~v*2Qtm z4JGB@x6D4cjmI}y$7Qucq0r?WJ7oidI2?G%$&sn5$#krajA}}=liAl;M*HXmbH|0H zz62Ily;6ABeU6vz8t)aHk&yd~1H_eJq+|vLr-H8D!fig-3I6YKwX^>i-ygriD9Fd} z&)fy0T2`4hK9=jZas2a4=X!S+P+O!W=7x6;XFNbsg=(4f=}(PA3p=Sk%Il@wafi9f8Wn%6&-3xI@>jm`Jw zvbHPURV<~#t#HV#h9*~hAf3_^bTT#}QK}61jcdzF3|ojUScl(==%@H#gkCKAx&)=H9qfe^UUw-W{#y%hfD5tq@NfmoDJDa zMZ*s`Ys{p&IoE~pal->Z^c7PYo=yUd0URGgTX8TH8zCvcgEYEOS`*>D1mLXx7dV4U zseTHVDspNCz|G&{+w+BA7qHoFu(Sh`tkd|%Y@D>GN$8gvb9m6D?t!i#w_C>owiW3L zz2Vux{vqk$HuI3;F`q3e2UF!lBybpo)H;9uo8BDg0f0dEJQ+9d3-2gLfze>f#%~m^ z@&d@^g6Si5<}4qx(oP}a?uL+*NMS}wqGi6J543FUx0Jk%kQiBsejl=!^kDa-h7Fw&f&5H>WNT@#w5yFpuq(* z0;2N^CX1`|SUa|5#<)swOe1`j(zHK^9@WgiEPiFqVI7}9U>&L<%?Egq`Gp}MO-%@h z^+Rwdww>&$IjUO^L}L*+(8Z{kU~ob#uz`G$pv+{op25ls1M?aI(@%dqsL|C_qd1R) z+YWCt_aRu9#&-%Jp+h@66=-MBB}nk?(%mu@i=|i_k~A(dh-@}3e1JkF*E^-wDI`5> z%d+(Fjb9fsV57CHIxj2$)z1X4dck~cDr4cYszZL((NWoral^ThZqM~KERa}-;z->s zOlYaiUFO1Qfy0cTnyEy6;g+Assp|-yf34dd*o0Rgx^wgOxvzK8ym7^4x|9pMP`au% zEf+4k2(3d0{V3uwN+W;o5*|IvBIKj0VZ7opT?>_C{o##X?I_VmDYr13SpkjgJDoK$Nn;) z`+piR-$Et?IKl|H(+p&f8rnUm>%DU=dpW9+N~8Ni6Q3K<4!-3BzlaL;F#89-{xnnm2k6cPwbo+HmvCCvM@TJy zpP^p6YtzKUy6ogb5=c4M6?mR^sagslx2ZHmTZIuKsb%Z9G(8WrH-KZ~{d`C?z`vlPzM99M!GR> zNvX%_DT|@TFaMwbC^!k3zpSEld%H7I+&6sxx9h5y%Y+!4qN}#sXmLRaC5NZ+Dg!d) zMWd#Tz(&#^sD7XG4qs2hNqQ0*8U{cQC!7TPD8iN*VYBA|-m>i*5V z=Yj~957_AS(NjIW=ep`EU=Wc>sQUsx4hxvv>7@Vm zqN8w8DB7`5i<(J^&i4){Co0_!Kq}^Mf2sx{I*KMrSN_LI?PtHdU+KR!=mH@oZVc_; z?z++I`aBH>n91v5<8QSxZkU#8V*?^Ac^M2Tf6T*CCZ8jkgEKFs;XV9_Sysd z{$%}cJw3G=P8`q)Q6J217;9!AVbruCMVj*b4Wcq7(xP5RuDSZ0i1@>i)c?|vG~>xH z5#Ck$M0Eg?n3Sy?B5;wpr{~>S3n9yaEU*H2$g5?+>;~}Cj5Vv#$!%EagCiyBAmRvz zHw8y&WfHPXFM+FtZeyyYCZ~3erM+`Q^(e_=)qW9cCQJqhB4TxHca6i@k|g$5tL$d~ zAi!Jar@LrJWHzw7(OSQN(kgq9Dqq};HJ0c}#kP^MmNDpeF?uH;n6d=u6I$Jm$KMmR zFlN=`h%DZ3-W8-WcQ9pn^UEsnJE>P&3&v7;3vJI@5PZhD$g9r0=tiRD%|)`>6%Ky} zo{ua6kSB~H#TX^bET({P5b2{_3k^I|mqXi%^3GkpaH5PmE}7z%@klL(QlIEj`Xk9D zS78|vnf1w{p|%W4Y&LZZ+Nn}R#?|NLo9ed8SxRiw%>wN&E_&x#1O2?0DQ$4D&0;|$ z*@aOGp%yo>3i;2&d+f^)-z%7Kcf-YZrl<9IE5N*2QqX{u4Sn6e}$ywAVGu@CLh=Di^g# zK_^2JkBC3Cj&qez667l&-QgE3#w%Q>-nn4Ypf>f#YbwtxD$?^&Sr|Hst4{f8N zH6DRxAlw_Kk6%l*jXdd3T-kA->50 zs`Q469dR=c3HHhTDh^&I%A@=g-NRucPxPu zsYj{R9lePpDW+Tl`_X0-opF`Q*9?S9i!8ClEiXqG>Iq|9%&{*kKOxL+a?e76$N9;t zY75_UC6Co6;uX+Ac!GyoD4N2K)bB+K2VVc_6p7=t@so~FPDj72@v7Nz$rmv-qIcv# z9g*P}YaIPvrS4)ix5r0m`j`A1=K4Ky4uv6v4sNxKBZ8NLf>Z7dOHteuIw+u4L?%T7EH0^J-h1nExCIJ$`N|R zeSV2NN9)VaUOsM*HJ=01wNLQJHNVoH6|UKv!8;W9&@5GMB#E z+@?Alzl(9su#6`rO_C$~CL(B0E^P*Lk)~8q^x)xHRVH^&*LikA{N2=`ziT zq#&;#FG{$g5+#ciEPD>jmyUgXZtYr?C*-yuq=k{j#ah9;R}>o>z88g0AxhK`ruUDrh)lvbLuR+vimevePHXf)E<4gKmrj zjUPX2HztdoY3#hWzYm^VNd8G{t_NT8=co(!=|m-33a*(7xZ>{Sl%-A1&#M_H>EUP> z87X8OTgirwnb=SKP`=*eP6Br8$Wjvd)&Swi>`S}7pmd0NIon(idW`i$)Uv<(>pw$u zQf`8&oEgm|>Vr>}_uCkt_P+o{5{lmH9;7hhI$eDctM0lZD3M?oK1$m}aDHZsysrgP zW`GBdTc?nxK)6y{(R?EkN7pi=_;^p9IMhI6)pR;FYe7k$ntsd8Pxmd3-X>z=;hHrr z`tmVu^O*{ z0idp!HO+$@Fq=N=dJv$Qc~~(Bh=sq&Z5f;*V)J(;Eo8T{-YGCE$)eE=cN{C<46@Eab{+%IvKU>_$zxfBrd+D@%xLICd;glvJ-mrcy} zN%D@0$bt6Ueztr$gP&pktE)3Flv-NzTk|Pd zW+tHR}E=MWxW;KWY0pfW69t^e8a@fn14n0yfN#?q~pvW zXt8iAhTyZSG`J5Zzzb=&byo_!o0T93G7jwi3Mz>+(}o{pPC(A}=*8^HQZhpEZ;gq8 zsf0$ZgeFp%GD_q1A%wVPrf;la3`+o2V|HTn<%*JRh37Xo7M5*SM;t#UsoaTRyab6v z!)>^;K6rsKq-CU4RfKBh?Voh;?V}SlpN8yeUeHhi8nksZ&8)$aeNiEZ1^8+Nda3#5 z;la415>Gr5S@Bd?FRW&e5W9qf<>Fjs{P( z^C!N5jA5k1o>P`w`O7HwM=uUmmTDPq$ygx)s5obB$M68CB*wp;rUeZ@{D(!J+u=kZ zVjawbB6(Hg^G>FLsRMXFM(Ctm6QhpW;^^-_W}kj85}g@XmIl=g>uTD`R9DTbv6`iE z<8&l%C#^upPI{pL;M@lg%{8y#bNZ2Rr1l>#(h~*~%hS?7CpWmN#e~br?y^0A^pCAF zi98tHkb%NkSCw?2mNO)?ZD>-BMaqbUnDJ=Ki)Y6kV zp+Cc*BiLw>IkvLbcVrNQgmL zJeUJNbqdk_mWW)VA^cTn5QL4b#l^_n97*Z$c)4&Yq5)Dk;PotX**+_7dCqf~Lxo0& zgA%iTqX{c>OGM@r$NQhwF!tU=K9S2G8`pRTL{Ved< zM;*v9d779VB5xyGT7zFI-`e{LGG5-5C#{|yL!6TZi&T;wKGhR@zCyO|Pyk-kE}%=r zX>p}1(L+%H#IhT8HHccAI6f1XTYzirhv%sj<3o&a;cc=c!i1C3j(j?d_)P55T%oXx z=o`I<32G`j;MjrZken=I2u=7O5R0T`r>;O75LR1jau5wsrSdFD`L+=9H=Y|KJsc1x zMKCP~jmUl1I*_{v08hlF)vUl=0vi@jpxV6P87Fw@;AFt$S|j7`Os$L2n8h!ewpPTO z@sTfo0_=rUC`3+2`QT;9#to%*(8Shmb2kFEXX+wIyGiX?lJs)kATu`11O#tHQ+ z|HwQ%*~ya0P7NAs?=QRf=Qr%=iJvBcOR7v(N!lF^501?;4nK7NkF{#Uj?VPCr%Qv< zN=P#Q8)?-YZ+3RaiTH}jnC55X6K;~Zxj29WIT}Ry8__5bs7|RkX3?Ms8g0?KlVG0O z*6!_Va7uma`kbVGs@Y7?7~=r-yq4Av-G=+l6%ers9NGnw@yk;52_qHU*Lr-20(fB# zek<*l^4~#WoqAT70=G~_wB5sbTeF7(GuI5!eMs*N-2HP+L@D$vk579qg1BhBT&8dB zqkX|`k#ogXu~bMi{jd?zFb&9~+O!1iC*K5M$WDlZ@D6_Nv#dV3okr0fj;b1}*F}*J zT*7-1??6mp#hV21Ves5c*Ko7$hM)Lxc&hcyHP+em9WL2-L9APKnO0~NUdH)Ri*~-> zf+33`LnBrVbT$^pXZF0I9J0#9k7-SlQm`1$^m)~OW ztOL2ZP1mi(n-#1!V|=N7^i1en*S+F;(LUOJ>7uM;o8iD>x&e^MmAXw8{_Ym$z@Iyk zC7}@@7urBfcn{X;hv}V9tIb;d*t9>-T~^^lPC5Hf6Xe~jYy9tcY2Mw>+fif#3OARf zpM~ba+}$d+F6pLH?_rx*lfVP%;uCZ#7_uTXKvX7ix0H#dn?ucn0aFz3MrJz=udM7| z-8XhNiVKjNP;%f06)A^4Bp`7$5_WA*ehdR_5B?%~FaHb+5qI?(sW%qnq#p?Kc#F#M zS2kC4e&cBglqB*g71O^h^7PKL4&?Tf;XF_fHjM5&UAuK`m0YCX0@S~Uo&(WxEk)kC zB9Z;6=FrH!b1eTL^oMVXA#9nHa7!!|T$#A@`Rd!+)2=VhgEdTG=*jUh)I3#DZACRZ ziZ=eLFh7bF7`I`kD<=tx16%=Mr*kZHH}7FoMAU7h}!LZ^dVO*g)8liOK4Ucf|+? zoWHiV-F7Q8a9+K`cKn%%SO7!Yt&xlNfF16c!T-Pxf8`baOaQTXj)gBz;)b2TK_W&+ zL!J`2b_b~5Nv?w4E`CgXspUD+%`Ca;$82^uIhLZCnuFWOcqHV_5v{wVUnp|twfdc% zFAzZ`JcdvIv^|KlRNbTR6B402(*4bHI6csYef!_6^7pj+$hc=1nOF(95%el-GGq-# zZXeVOdRD=aY1`F_$gQjAKOkqZy47vP$=_F-lFW>~{tAYUgjF-F-1Dm~J2OZZ))Usx zYQe)c491`B#eNt;w>A?Jo$n5T%llN-?l@=kwENi@?5{Ae`&r%~Li{oANjdp>a$=+i zHaO`dQGQC!`OdN2NJ8o!KExol3p1;McH?a%+^vAZ#yh@1$I?_UJduf@@uZ}~{p{vA z*7%rkBBeH62>3l^Onh9J(DVxO;J>Dh+mLr;a3 ze#3|EGwow0NuNgw@~PM@&Ks^PXmsbb2ksQL>ww1iT&M?*Ig|OE5<{{oUx6FUq_py5 z4*o4#Qh8cNIO=tC&B4D@c@PO_DSjpEi}fGm;6IZXW{{UUHwL-Z+CU$@F4v;ex2vW3 z+Uv{+_a-o}Q2h9LmOwPD2HgeG5Df@(AX&^EaE)s!nBGxYkHZ8cBp%7y_%zJ#(CLyT zJzMkgerkob7UCz)`3~)L zF!8t{?DY*eX4Y?%M>Clily!x zCuAQShWbH0(;lQ6Ncqk-3W z%M@;-=j8p0w2>?4@dI0->^b>2!1+?vg9l7HmHr$ld`3gr?A}7MrM%N<4ba4pL;??+ zob}P>#iz3ycYRk`p=DnFws<48=1_m<^zQogWe={td4{7NTHeG9nhH^w9#ynVbGP4~ z6GF(_^9l|g&U;G;`U+n`k*I@D#Do)blA0R7Ti(CIS_M?havI?0j+rV85B zMqn^S@w#!KR_rk&vZkVhn5HodGpe0F9RS^b7;}tgu!3Pw>WrCY2|LR^3%E2OpY5elUn3CItR_w}EYgF_D?e&Hp*4((`CBM%KaHgy?lA0xLShMOy z5Z;66r8BfPf#w? z-TsED0sEIl2<)ZM_4<5L9uz4A6ime}OB6EA-67g6z6ONlQ;iAWe-3=L|Is<#ZD)hlL5D|0 zm-v^4b{)o5i9gm_y$};}47PQpv3;VA;}F&{nia-mE5#2S$XH7AWLD6!jV08@m5eSe zxQ;Wb9sX<&iyf6v>ps0XDwRmvAQlbA#+Gq zo_ha4l*dwDn-1hSv={V4N5QzPNJTQ+?(B08nJGhJP!+yMYw^hGB-`uJ3pJ3XX~z;` z2Fwmb8Rsg4M%6rLlQ*{NdV4&I*7qppg1o!EUqQ`UEieO$MYbsAQSR(l%SMtlGPw;L z4#sS7(hVl3DWKG1@4agfsh(D^{DDEfYo#uLtnT~+x=!%=w*5i>Q!j@vUNHoZ(rV`x$VUl&AW zoK<<^c8TJ!f3Vvsb<|Nf-wj@X5RVO<5Q_6qtjaQfoR z495D|x8g-{n53v{Dre0|{b7JQ;~Xm1&<+4naEM_#FsX8!74g3WD_a-?wM7qc3u6W4 zGcYBkbcI{CP*ud(Fri5aJVg`Pw%1L`*{(rba$D|j0#-co`?}}&xjkW1-zFdrz|0as z{~giptI3wqgaYnIwqC@2ZuT%l6hX^M^P{>LbciqB&obh-;-1p5xzq_t<@QKwtS5)F z{BZg7g_;YO8|$tj1C=;pr1UC0<8p^^7B7WHj`xDTI{xAI zyVPR6?ZU^6`K+ zEB8ua_7u>m+A~YQ(6?!mVk)HQ1`^QJ&Y-_c3z90Ml~(;0i5}0m zB_-4OKc3$Yh}s)vRJN=A71-aY#M@r3WD zP+_&q@D%cfqsxmA@a9q}a*9t1EwjOp*=vT3Mhxk2DV82a3Nu)cjP`b>mpIBgbPDsT zMWjo`q6;Q}c$W7|GztDlcpr8-4^7nrBzH92%k|E5UKk~Qa8635_tADv$d7eC))7Gg zRly`hnz$p&&`0C8PV(JogW6wtZO6-*_Z})U!*|eP%nTaeolFl!*iXw+sA=M^&tl-%0Hj)HfWj&3^8Vg3` zlR2x%Y}7kb#e8`JoxlVU%GKiK_@k6}*^}xqHoHUXup{HFy+tCP{suh#1WZ|Pe5^$n zDdL0R6p)#DU=2~tBA~PiAKb=$yO;e#@T|6w2o%HEprHc@Yx^ju8e=P}H1EpUzB6tH z>BdICVjV3FYo3q^75$q>UZtdpq8ogQ5=+@2K#Q9njbd13R)?(&VgsP3Q?tuCaIi*Z zcGjk%3TUrWNYJpAL{At~E)`i5s9qr)#he(l6jacKdL@LSaf7gS=r|7r7q z*zvSi{4B|?0VE)&$_)7??Bpb8?#i*5I)c_8uZ2KTD1h%Wn@i=Q{C?YTFAXCT*U7F=oTJ z$>o5V7i^J+4v}`W7vUVpRRrJIRbzn+4ESpc&&SS+YXh<5>^RqFiJexdCCz1*zxt=A z$9b3t*tuSBa($7NGM=hHxqf|8gLvi1lHVr_RG2%nxe}?TRs)MGIqm;$AzR;5FvJqsepEiiDKev^w&a1UGZ?InfN8=A zpGqDoon7|w_?g|$`EDY{c$PtKKeV!I;W9o~2R#@w>8$#G(scyYfe4yf4!slirCFPe zY4j15o*PNk35kN3*M-H-_w58BvkV8suD%azEe5s16#B0U=;D}Sw1<_m3?nt}@M1&R z_9vPaXPcBNGln0TU@sYe9cUoP(0IopogSrf#sos{D2!sSwFRbwXL3Cn8~U#eKDzVz zsph~Pq^3G2DUN676XZPw60aw$gF)x@b?VG#<#|(MC~Wi}c`DheOEVho@62lTNXls> zCh!~h1bh!Hu1h4yonoDr$j-m(j_KTY>tRPkK)bjy-;`1)Kwda&^%?0}_U2q-Yxh8R zQN*{H?1rsmf*lK%lQWO5gBnP>up$1JKT#y_<)(IOqTPIkVxLHL+hf1VIDr@Wfe?Lw zbO%51%+K%``|!HdkY)??D>`H-WvJ%U?T_N1kKg>f3w>8}s-D8wDDpu?AG>vqAz2$a`RtJIr!jgFCU;N>R>2<95)S!9fls#}l`YOOJ_i z9ts=%fOt*xsu$*8z2^B&?ut<2n9_V@qHBOA`GV5?qr@DaNf_%$suU&h$G*cDMzwcl zP6ox!Ogx#RDyH@Nc{RTl&Tmi9CuzFEkQI*5v(L00wC|~Vx(b%{?Z%HW1*$WkibQmGg2Liy2 z>jrV0-FJ)1!aI4!gKO;_WRjMl=~dj$OX6;V-%k>wDtpPJ~ zeFMgd*9Fl$I11n{-R`4aPo>+C-;l zBsw@4Ag~=79`$4yEf8)Su-G})4MXNRz*$p1ey&$DTS_nUw532JNkHJ;n3sRQe6$G} zT7Y7fwvKbXZ3ns4r>AT3_WzLl1TsF@O7+>dhb(F;>v7Ivt~E zc1ui+$9V~A9#}ijm^W1uk6IsA{(J=T+FygYM!wvzwM^#Y*6mHnPk_}*jcjMo%+JB-HEIsr)5@a zV$paK%cgT1I2v!iK*Nnn7QLsD{xpkeh})=Ov=%)QHAn*iCK>?lxeY8Y9@nS?Cj{Z-?`rZHDc(-Ydd0v8It54#u}O z0>&gC)Ef(ts**_`A6FG|{?bralwryS{D4`jRe;mc!hTWw+Du0#RYATcalH+8sr&6` z5StBzCOU6T;z1-`J?!KmF&lfjwLdCmGU)o8{&&odNLroe zpeuZ$wUsPq7G1M~9D}>s05p@Hisrr;(tMi~?3tSPeTw*Gpe}^C6z-yG%*h|YnC^e$ z(-7dxKv98d*m--t2m;p_Hf@QLO=dp1;5fbSr*rgmeNB+_W7Tx3<#lVwVp=d&giZ$Ic*2ajkT|T{Gj8jovvQ3r@0s=mSo8{kHn; z&%*DJ`NaUAe}8)=KHav6>TKiV{78pm&y!Z?n4VqExhFHuBkiIlo_D0%GOVFJt@s2) zl0IW)W-@?G1_yK{C;2WSN&4jlHH>}7ud*<)<~XznAcrNzMxP``diC!9mKgeRlcLHO zh2t4esNfJH!~SzT!CXdDFmXxD6_!dAuMglpsB*ZdCZ!eGwsYYrET@(#B%a4QPEk7~ z)AG#iAT*&WU?j`Ka&I=gX6|dqOC?WYoKwYXNorb>L*#g&vJO4D10IyF9T8yBcJo{ zuG)F^+SM~HyW)ha+y(xYa*+18^seGYBUE6`l3H`=>ej9?5@%Ie$ z-Q2lGHUt!WLdxBnQN9+YoV-n$KNYRp&+;Woh0ZL)Y>^T}NwV<8SF@VTxq-?vE-j~# zkgXV#YBKS)raG&d6khwCYK?D-Y0AGMUnLRZb8e94PCScx$inj$813T=1 zYO0?75EDbqXMB`WH?x2pSE_5cekghXwo4MnBS8M)O&SUGlO6HBwmv>8`)`UNDK9As z;zLkBHxF&w6ZxkHZ4z%bm}T>Q_daBt)sC(bBUKsOqkSs14GeOKeBuUQtJZ?yUL0kM z@{IpJ#hcotE{aJeK4yVHy;NOk-H}ExGQh`B_Nw{j^0MI6k{)IVGDjrM;+7tuVijb< z5}y@Ps;PRFAzC;-<=GF)8rJ$X%0Z+X_A2W3ckuKs_rY{F`jWkJyZiCCKZ^8*GssZ> zG}l`0*h5j-6MLW!Svp*24%VuR1F>bis#@sxpiA|(;&Fu7jOuXSR^6C8>Nbq_uyHnf zzrt#5+G{9NQQc2zo>?rr>Ci|SEC0tezZjA=2f=8A?(Ls#Wzq}sOQdoIt%zA?R&}x> zBXD;SgOm;pj@Aa+71^k+H56@xDdg~*!}6g zVZ{S$p&@z@@S!;lV|WVt^kypQKq7Rc)@mEFk+@ z8Bh*sodo`n%lmOfH>s4McvG>IcWdfGY={Q!fFg6QAcsqO*;;SCPg-QuPROjxTV?V| zG9}HTaYW;nA_KZkqu4&p5buS~o%&0AuPI(OKAr=K0`>0oPr(-R86%~tP;L=hd5Jh~ ztK73RCq4SvEV^A~L<%mfJ)Z>&NE?!Qh_raboUdNRACB6Iz?HGBobVq6QZv! z;a*a|p_Cm;XwM44J4->LPt@1Ngh!^ax*zmILMz6O&~jDNcD*5{t)+@r22nsFcKj;T zn3|tJqFB5&5V*Dt@mi;e{HG>u`ni1^j`|hQ8dA=f`a+nxlTGM2<-u7o*A=4n-X1U8 zCwO!5rZhFD7*SP@>C`3^tT)CxOmbRE6;Cm7U{qyJHu`5FtLVMy1O+UqsZX#lYFgd` zH1oWF=TA)dn>(n?Fz(Z8!$to2`nfmEMP;+YWFJqlx;cV*Qf}O5H-csXKB>lVM}5grnYnFr{5&Md#=G@vZ@xC@iKIy6&7DJa zWrA7)5#W{{M6fURr9@OO-KW0MMHt5st_@lh1x ztl^J9E|8Tr;Tf8zd9I+rDf7M7v=}D-3#}YYpgJ!AMNwIK@{F?HDjCfFWV&6Z%>t_S z{Ajy?IS^0lHib6~ zU{u$Gq>%RUFI^~l8PPX(A_XMUsZ;s1#ByMn=|g@-fnIluN6U%R_NU~@HU3GJVK9+; ze8Tz&C}oF|wO*;Le5RnSkcDqG*H1a|lu~4&F7-}&)Hjbc;1E#&qKz*mhmj!<)C(tTZIaa5g|4B9C^C zE56YFWaNtfb!VT0jzVwhpW2d&ZukOvppk)+S6Y7F%c{iI732?iK1Q5i4+}DX)TT0L z!cbG7<~_%hD?Ng?N*YRQzcu#ybQ?{JS>w%ELTXMM&7y$*sG477#wk;nS9mYe05>+8>f}z1x@v<#W2NInqf$Ildi!4zy>z$4{;gDkumny!A3tss4txQ# zf}+)WkGXlM!&-Xh*&m5TPxZ?ZxJfi(c)dsE2qzW>^xPG^to014ZF*wPZ`P5 zi*^#6HZ`C@Ds>;DgZ*HR?=SdeFTzwYM=%80zuD>dMBW!&ev^kMIZjp_MrQs{Rh&P0 zIU+DpoKv&{S$G)STxeFEik6bhH)`Sh@RX%fho&Sn7=kP4NzI*K=RoC*ojeia+ToV! zV4+ur^poPZrG!USfbT$&CV~au(Nrbx?+k9_3Gm#Hk={d>b2)Q1a96SLB6tT*)PIG}1T^)0mkqtX8hy@iSw5ctH8obq#aHwmH7B%@)3Q;{rIa!DH;Q> z%pf79Ce;$Xt(D7NnKyJ}RadBZVK!Fdwv% zserz5^uGD)`#S^>51Ic49&#%CtiXL}tQ<7G>6d6Wv&9v@u7y7%rFkCKOO5VpiA87r zX%jhmbOon~@O+!uPOAqE3KI zGYk~Se#(rcl5vV1GrxyrjnT3kE@9%%ZRe#=S3sL0Xj@c1;E9Is8cWaqH3quZTqX&V z5y&0gj`T23O};&yV{Jm7=Ot`^;9f{-Pi{8PC}q&dYO#Q`p~3=(Mw2g^nAEWF$`d{Z z^b1Hv&Ug|qlyIrFI*K*Ec!hx0n?**r;MH@H@5;dQF2jI}dQ(qQ^$Tx}_a|f*K;Qb} zlq~#BfO$-%wYaC zlL_SJt4 ziO<$5w5c+wtU?GPy#4XD4M=Plt;9)g6f)JP2x-O661u&xpr0wwTOp{d=KsBOr24T6tem|IR}_NW6Z^0wvAYBQ4CVG?{#b=}&| z@G*N8tE@p4^F*C(>jw#*B}+1kJ!e2xa}vj;crW8#z^8|Xafr*0|1L=$lV8&62=d7U zd7i|KABz3_H1qqWWJ0*;G5;1tH3+w8Rp&Wj7zyCOV&J?9)RhrOWTx`gfY;jAf`sV~ zblzI(SUd6CwOPz6&KVO)6$O2;AH&Vnmm8$~QW#v?> z(p~O0U-XoJhq!ps_6P7i9C=sK4zS_{DOIo_o0?p0@s6K-7W-vHtk90wc7^Zvv z`PPby+Zhwbz4yq$ZkpABC#_TQz5DjwRfVYpXX}xnt7t`WPn$|WBjs`#&bA4pjmZ&6 zS=Vfjv%V=d#4Vlj`LIie>@F@Y@aYg{qOAdH?$_vU>M}+OW;RsKOcVucXj6xiPc3|o zqOnC>`uOFjl$zi#!ng2c{)qM(P>6g*N8KTvx}`zh&-jm|Yk!36Kh=5m*9mujirB_{ z4m{e_E-@|;KJ`<|zbC8S0 zjSW?%>R~gjEmU8f6?wt)(nOdJx)_ib1_GU}=rj0$L1uuzv;lgZcMpbbV?JwoqHC@1 zucHZ({QxVU9k?yZC+eYxtdj%@Q6A`jNl~P(z{bta8Hr+pb5B-IiuHev+M67`7sN?cI#WG~Xir9d#|3 zJvt=F9HOCc;{+oX_Nc9nL-<%WXTQ)<>2o-lD0+dQM@eH zY{qfxA1|n~xVM*!*>E{(Uog8TzwJ_Rm7k*a*7FTHY{=)=+%9$U!TTww20g1s|Lcn# z%YS7HF8d|yT? z_`^$DR?)rJRFRXklpuvdt?2xtskgSbf$YyucH5h-s#GqK7OtJ*k=P%i+eFxYSLMLQ z3wCicAp0aP$$gO*bX|Q_0Rqx2NG?re?N^-wYvuBpeJ83{GQUgluB0469)`mO2k!jy zSf?1tlH-p^5v)ySvQP3Z!=5}ZsUS@I8Oi!_!-PEYfsB0nUo8jjsE-~y#*@wU_amM> zSr4l{(!VilKd`nEU+r=L8E?D6Kx8iZYz8~j7vU=-pb==Tu~O}8|Bt<|468DW+CB~{;~EQU?J6V~_@=l@g^>lu%Noq&qxEKtY;AcYbRh#9`w7uJ8JOzy9%>nP;B!?7i1s zd9Qnc+%=qu+0*z3xF4Lgd!1r}BJ?1t$nxi;EK-Ng@2bZP`B;G=QZklX# zph`JZb!GZG4jbb$eC5TnHu5ZBltXMDG%wu&zWrw9P}9jg8=@PkeG*;Mm>fZf5LSTt z^6uAk7B_Jp86jbpCmUaz_)tiM=#PL@cj&#t0pkb#Kv(|Hn&VW{ zT=3eb^2e@MUr~bn)p~wx?#ihz4_$Vsm$w+$C>mXlIKN!|2OfLH z(CgE_s>jg`^9D>8688CRw{LBGJ1l4;7&ohd2&qQiW^?v~eh~(=Lk>6Y1b!oL%xK&_2aZ@k(Z00bf9ht5oaciG(oly9vEgZW(Qf z^w+IMhm}TZiou~*Pq>;Fft4z+bD=f-uVcSurs<~n>?p2_#tmI;J#qCQxY*;HF$s4O z#}M?>XFjzXx?uYi{DqU(`al0V`)_|5sHYfI8GK2U`hC2)n~N2Sd6C1zC#wixj-g6G z<%P^~qPgjK*8~|W6A%~Be7|-_#f>-Gt{xOG+5rC1tZ^NDGotoW$8+=+-z~WD-QG~q z`C|TcR)><-D8>jsdsefpYMV0+8}|CUeZ2@9>MJ{cb<|D4+RGC{qnz*^Wc+tK#qarP zI-5og7)4qjmO)kr*%N}(Uu4SfJ^2sa{e{=9Tj|W@Aj$OoAO`#SoA#p{uKDwZ=gBGf zD=*fEh3vL?#SZ$r?rK?ddmMB9iNz>>YQ7DQd{fkdRw1REsmYd5L zfaNzl*{B-*=lZX&N%|Rk{?hKB6Z_*(A2n%3dJ!V)fTgP~UAek^{iVlkggaAY9tYl| zUk!SjI$fcX5pWvC`;3Sk__zj0ivQ}>Y{v(fXc&hC7(YEJ^*6(Og)1$sQ!kFq8XRWD zH3QU8j3<35FL?w1N!|#gpK8A=yG!?%cj6z%R({BP|{QJL- zLB0>bqf~M85uk(D;ieM8Th-%#%;ldF(!Bmo#V3~2kjjL5fMbnK(P_qpuPf=bTgChN z^i7DQD0mj+!Wc0m0$)XgPTIu&a?iZqu)dtmNeKw$Hs=f8M{6PPG zE3eC1df&kzSR0LaPLp0jZV_}mC)1j0zXuY3UA*kzm@i;tOC{}PB{**bF-@I*7UM^b zRl&&oVC;;Mc3e(LY_gQ}ug@prbsRh$dAD}(+SdI^5oWI96P$kS zJG%&Mq~cT}5CX()%E8|jZ&Em?xJHw>bgGh&w6r=2Jv(o;Wi<7@$08a}Q$ixtqkK35 z?m}DF_=TM6=#*Kgi-_d6#9`aLu59=-y`@MxA6c7_SaTC-lz|4c<5UcA^0_!$5CM5W*M}(8EkqU zg8oLSURyT<((Wc42h3ZJNv(`Rn_)QvXZWtwiSfgO>6{$UkwHZ23_u*J`PWuNHbRje z)M0EuyC-PF{GSf1awXGjN6?DjEy>oSD^{IO>^sA#!VUp%jS+I!rAI)e!Nqt3j*#LS zGe6X(v!smwpuH$Lx)Ge9Uep5wg{R=D%L3Sot@GlTM<)P=NqrrdF4#cB!W4s_6I4x> zdv?#g$(_^|=t=@IoGHe)nnr9_$F2l^BNA&(T$u0LOljtyJn}F*_7ktNJK!mF&OuT9MQ}z(*_V{)FPPB{H?!-@2pQ{Gb-^+ISeFkc`aPyx5B?D?IE56BN{yL)SmgT>^ z9ydg>LC@@w)71U@c~;bokD}5iKP#iAwH8u1po}H>EV8?Wx*??E$t)Wb+WgUd2Zz;Z zRE^{c{<`Dh z?*6m-6)dgbUjY%Ve#vmt<9WASB5QXQAp(*j37>c)a_Cggqs&8Z%c|fJJP`r7c)o8w z_>e3Igh0U5(R>Yim)MB2*3wm&8NhZ`>SO^!wbBEqb&ct+Fn;U@H$vWaAWN$`c`S~0 zy$tfK#r!GG2wo=mKS_7NP{HlM;@Dp)Hn?cStL>**a&qVx!t}GwIpZuCT$_*SFOT^| zZ!z$&Jp!`&MP@X_Gu{tpoUp0jmO#6ITSsgs2&@?(G5qubcwdl$Dtp}82dOg&N;~-r z+TSdEM2OOTJn-;3(4g}w=N?DCU|;iB_}%l@<^SLu7Wbqidh>{aNZ8HaBn$DJDXu|` z%wT$mHx-gg6VA{LV3w?9-chBdYp=IL9{sb{XJWzD3u2M{Q3s)W55{4&z-l-+Zr@h2z zEpG&8#t4_&X=y}9WCt8n1_R`r*Y<20w{Uy_E^NSRHf3G|cXAL&7?RRYqg=75eVxg; z5A`vkcJq?%nVoRU?aImOq6ssfM=YPF2+dXYV zx#vHxy7t-*^K!6`*x)*GOsVq~&yP-Hh{-9VW=F4_vbZC?LF~81&9|mfve&L{;$uxC z3rg*K8Dh&Xo_nAuqGl^Ek6i0UiF(zPjkXPkkOl4>UrwhwDvD5_K= zb}E!LK|Dxw?omfmbaFN8TU5MJT>JffsnB7@kD4;KnN(Zr-$TU3>Y#(*j!Hv-A!M&Q z9R&BYbm$!rIsO8@d@Vycg)83H(AJyeMZOOYj0h;rMB-ikGq=Va7Etn8VEi}B<$Eh5 zw}E?KY7Y&^sZ+Rciu#0dDTT1` z&kH>#h37R|IndYXD(md@bzeRyW_3Rl{NDg^e zCHa|SSwjw?LoeRD(N38~aVtwoUZ{9NKUvSr#Hn|~SHU*Z>{52dBC5p`doqUmtB=*w z!F5*BEY2k(_>74wIQiZAm$xq8O0N30VCXw(H}kQaVmL9s-I?wOIg|hqa_@L1Mg$ zJhbN39kO_?siCQ>t;L6X{W`#YSv>oC?KPo8-ZA&y#13m`Q}Jch;5zE5x@z!Ilj#BY zO7ID{5B#rWlpo?a>N|9st2p@=a3j_z35;0ZBT(hGfXFwBzuio6)T!W3%<^l!6-#+) zjxCD(6wuRXio8c^`{q33B0IB3|2|3-_Ld{1=DMNOYJ@vMi6>@H*iYYZT#cN>MgM*$ zVHu0VUTc&VrJl}AoXGpeo_@AiuC-uT`H$|rgC`D^{dKpCw6gkO=aal9jl^-P)i2Fj zPRxlyh$3ypq-y7=x!8%S(%i~%HV|L#&2Ol?d+CH##ZMklXK!Nf#Vg#nv6QPYM#x~x zBI7p~31oIp1jjCpErw23etUM8;g3illUgvOa>CUUVQr|5dQz2d&XGi{t5<7<^yVqa348TEfkHzo> zY4{^4s#F^;XddIovCuwsKuf}+7i=4zXY4b5Xq|F{Ep}@uyt5NL zpM#n~Q`a21FWP8FJuq{{ zRqriANI+NC8Q{EwYJmV;=B$h!7E^4b$nb^uy9iqI#BFPYsmb$$Re@o;OWyXECZl~I zM$}}ypvhn!(HULn`fJYPNLuEabV3~QB{!c8u8joZn|Iztys=7V?B@JoMz8XGCd@Ap z?_~N(C47gUqP}U$m`O%YJ?hX_tOhqF?=LqcfcKesJt~qZrGA|pX7GBpA@-x3&i>o; zwF6Dn7FUZ9@iMTOJ^mVj8<%ChcNiNJskYefOAJ+lDDzKmmJ~~8MhpLQZ-<r)C2qSt2VEh$&uXVICJ-X1M+Ko>)44# zO~-X;cDYsdjl4{57N;hw!?m;lHK!Z6C+(k1i}_2s%Z#XS+`R>5u5KptHvfW$oIpIr zk{Q)?PWEl8M6BTnFmCXDXh)#&C5MR&+m19nZ`2y^u6C!U2xvim2${E z+Ep$)$fF7ALXHC>s0PglfTGt!Ke5Z)p7;RB8mRG0cXE@+O&-)THahzP2QC~VP2%}i zjM0Z&w)n7id|c87HKWgw+_MWYWv$J_-j+4R+qaJG-9Br5xC2Q9oT||dw&*>@L${#gx`3cT@3kC$RkR5 zo-71mYK8JJ43{`33$+JYj-T!;WAwT?E#7(XoLoA^T6#Rs3ok&_NoR|~($y3A7#+C3 zfuvN8KfZ^??c6rTXZ#)o5U16Qz_+L(6)k6+~luD$#j1CtxPD>#yVd_0oqlC4{9%W$3Om#q|ial=AqeE@FtD={he ztC{$8W`;YjTy9p087(d$uT8=14%~2}5TxDsDFiwQP7gw0#xRT(uFidgS2kuB-{RvW zzb#jJie0;MC79^Y{^vHlD&#;z;N~V@eb4U)?r1iLWd!=sC-*v++`XvDVa|Ww=sA5; z^Ro3JYsAE8KCEz8Wpa*GQ61W++hoayx}~pGbpLSHHj%p{Z?p$&V>0{v$t`b8m&H* z9yQ9fYS9QlPCZo;t;im-YKo0bO(YPkL6X@`X?OfeYk)>lVHUO9+}>Xj(bPOiKy=(M zAI#cHRVm{ro|)Y8eyU^s_{c7?;K;WDoS&hT4%Cdh9hR+^Uw|G;{UO>fZ$HrC))od@ zGDo)Fa@1tA0LkiHz;ic5W+B^mv*~HiQC!^dl*k~wam}7YFqEmwb+5SvyJTx8qeHx+ zbhpAohK`v!P$tC{^PUwY+J>8{$^(#`6({b;W602V+(ePPDo`onUf902@2f*=Uh5YU zN}N0WOs&i3TOn>5O>&SEmQFQaHpO!^o?b;E1>2(LD}#1J;k*H)X(;U`i-OH6vYJCJ z+l0azQ?O#DBS%cNpHB^ahGrTtAKyQW#eRu~K2_%oM2Wzm@SIXMetur@yYfxejz>>* zba&G4QntJM*j0o8&ZA(b8xH{K(wvTDE5|xNFPZ;3dWE0szms>@88Ti(#ramKaF@Jx z=kYTtNuOZ|J%C4yDwJ18jccWH>)D`!35|zwR--fu^W6{3NC?WZrX7BCQ<=^(%v z)$qKmkciT~Db9lg#*Ve-h-}E$ne!j;ZPHkJ_i^=+qoci)6rs^zeCXTU5Sm-UR&bXm zA%{}1Rx}3;2|j9j9fcSKhA2xQ8i(C7UHLx5BUuXDDO8ivr?YEiOf9Rl6PEQd=DfR zChvA>HnrWtmN>Ug+{y7hA#sB0T)Yk!noA-2#ATMyjvR?=^59~ZM2 zs|d=BWGM@xwkuNsfPdcNz}A*Zk_@9ad}po6U72`i!^mUlkE_&qBxrc<)x_BJwNgU3u%k-lF^x!e!H^m)6DB9hvzZbf@i#<7=dL1aRK!@tbc`c^1-_3vFEgc z0!R0W&O_;d^y2FT*BjUH@vb-G+NC%jY5Hmn7BrvTkgfTwM@^Dq53}7Lwq;B8g_<&a zfiWQB&a1b+;Y0COLcq`n{lq(qZ0b&Daj_MXxS0Jr;g$xIC+2KRy4ln4Tq|-e=JsG1 zRfE?feH|~woh_AJk5^VihK#s>gzDDJY>i!-@jdtj=g%77+rziyXJ(G5Jni4tisDn` z*T-)rT>FvUy8bicI6UrHjvx^!xTrrlP%_ZBWF?Bd9c7ABc&Ezd$?p>J}({pGL zCF+2*^2Yv!!v3u_^TdviT!JuXbUY77pm*WTp;Lb~Bick!Mv%(-dcL$hQf#B9*=CCNf|~`X^=C^;vBJyi-~E7J z=%0EFH9s?NGM9rhzXbc1hXyp#&YDw+`k!MfU@_iP=vTuaRA-USYt>wKgd^*!o?^2h zjv_F5Oo?IHP>wDKSXHqG-!zR@pwTVOq4})qejM)!ZmF2Rp*pPJ1DdSR@=t{@Y>lP% z6J3j03zES3NShRkS3Zcv?6jdrXEacJKpS(ggaFcSu@F;wD5;4mJ4L8*<&C86PzbL% z^>ZIt@|Opuyb0` zSUl1-vZhz4zz^RO4V@@N`i9+Mry!49=%HxL0DNyhw+(XVD<~+FMOn5QU5-%D=&jxC zSbG%YQ4&B5x<*Er4AuFg3+}E?-peS3ZBu$>?zF#kcN7pLplBrP9BRZjGcAWUF;UM# zH2l$R{twIciW46YHM#eN+Xf2_e!b!+$s;B)i2#neIJoOvZKX{ibY9ornq z@Xa(#X;(9F4lJzcviOf7?@ZHXpR4m`tS+wDV>;Xvc6Pay*I#@IDRa$V^h<%qqIX-a zt^ma5U>7t#FxXJ!^Gs|DF5jEaA}=EfS+MhvvdvQR&8Fr-ZhhJ=O^EXa=X{D$^UX~~ zuDi^(ltWN~q#H;dI5>+22<&9`P*~bDu{6k;MNA@jwxA99@myVZpTj_pN+d}S~)i~bKX4HbSZrH@F<;K z+Is_aQ`fhI_6~ec+j>jta2d-fLU>m&8!1PuO6Q`0Ala*oPbqA%SY>k-zd2tG zqio*kZQ%PwiEyJ_O4E`2oy~0pE4LRJq*F4TOO0g)fk=6Xx)_>*h&(K8lKk&{*I5-t z6l3iMC0Y^QW0XQ0WSqDyTP_K1V|sy zj5uV#UzeDkSt?THh79oD)>J;g>KDMIiqRy`xy!jbyPI%#=DJ;BQuxbI`TPU|CaLTX za-fRNQdl@t82Qpq$&kx_L34v^IW|pKkw-^8JWK!6=(h%S;}MU>Ec&U@Vgz#0e4ZW` zto+W-JXoPQGmhKtd|=H}0Cq0BTu-B`fK;*2I)^90sGE~wzuYu`zGY7HKzl83^Yx=?1Iw`QkGtK(>Mp*Fv%9A7_Tb+EQ}Y9m zgTzKL?v&y`mn)a%OJ=~53Jnt-bM6G3>leOIj#a;a+Iz^>k6k5NdfV?E++p7bf*c9T z=J7dlV!ak7xX`oNl@UEZ)kP}bt3t1qyhD#V+3A%~4IN21ZY@GNNs<_-?1cW>4t8Yl z&$x5DBKod(cuO3G*|N8yyBuv{J(^VdWu*lRiX?J1BU?DQ0i>J*#<;*Rj}y#kVo|^n zPY*pN*{U(FKszkH6lDOW^cS6=`y990cW2tw+t>w;g1iQyJ_4Z}EB0{OFam=2>U3iA zOrrv({q4&U4s4@{kz(*Okkc-`lAG0fjnJI?J>yYb;M`E`?AJ$7Y)2cH?kwGSo;E^D z-0kmJQmx1_g6L<)%5tF>O+Zu&^CQ5V`$4X+@o{eEm8-YaLzP)B+ZRUKA``S$1cuvM z-gh(vR2$vRm5PjzL)mC^h{fkLF_UOkVuF|U8gN<@hY6J&X^XfB%=|xGvaBFx<}do_ zXiYt;rD3iCGdw<{f(94|RaNgS5<8Q0bqKgo(||L>#H)BHM5b(jDsHX%b%C&&IZ5GR zlrc=CzEkIeWx5N9C)F|{XL=ef*P3_5I9J`Hi7TrxXk%`7F_~|KjR^3L=KW$%f(iCB zQi=tBNqLq0h!Ow2@Z%0N06~%POo@5^z9bkAA6&xcIa&T&akwGbzgGs$2Ji^<=a&?G zd!0~Y!5lW$yrXyJM9N&D?8tjkSSqSj_!&XcifR+{y+>AyR33EuwfWKK&Yg#DcQVfLLLcg^_}QpKF6qQ;W1?leHi ztNenl$ZLa$T%Fg>Tq`nvmHzt1>5WUnprTH#uK;&{X8tKQ`t;%N_(=RbU)qnZ(JRoV zA=z`J5rpvcW^_n@X{07=r_XRZ8|~VC$>yTlk2{p$;Gl2zPgu)Nr>>&breVqK=E%*} zfS|FgZvUjLo}n{$;OkYdJn#VM_Z$$>_@zIeA^mx6!{`@snxE(IWRR8M85o;>Fpdr^ z7!K_1Z{XGN&sMg9hrGu|<46B!xf9}4K`e^hJA*c6w^Og&^Mx5C!BN_)q&J(tX_w2gC4%C0E8fB(c;8++yVoc=RM>sdI z^#@);7#AI;gxFnZ!rxQ(Eb zKk8}ztRWBSAG2U%cg`O~YEyT>Mv0U)OPi+8{KmR*goe1T4vHH|F=7^8;`f12DKC5N zhHKt80KjMN?7gq zH4j_s(&79QcRmmCOP$oX?PtDUKL+)dD{%&SawF`LA~a8dXzYR#TF1c+<(n-Cx?u%T z-+8u`GvCZmuPn^*%^@K5GN*?i;VbPDaoKZEs8V=-=*`g^le<6-p@qHB%Zu$zq3R}8 zv5<7GQf~1NGf){YmGPZmApI@`PVewJ^#dmE;8k3P<*2e4RZ~A+Xew$qCLhqHGWicd zZ7O0+cBgJNJX&agIhbv9Y4(-d!n_ain$A#%0# z5|Jsu!am4QH2)MlDu{UgXZqK+5hqFDH#yE@hJUO1@MX5@<;D<=Y8s#I_^RABX|wuw z(@;rI2-bd-Kqh8ae-npFLo6ZnaTU+i~3Mr^9#yNa@F5C|~!yO(?oUc-NEqcZt1M{=U0j0)&J}>?4K% z4(=CIK{Mu9p;DmANwx zwjwHtUo#=TgSyS!c>|>fy5_t1H$*p5>>QL-2My+!EZLUjIZw_MI# z-zBg=ne-{L#Ki|{8ui(K@b~@4+D{MP+c zn6O$FtDaBEW3ctk(Nu>^3nMg`EWWHm2cx}gBDoG1!*w)*2r??M@rR{}gIKtJ@slsW zt;pQpTi2y>2qrASz~+{tH|#5R5u)WSI5zyh~^}&*+t;ALO}}10HFh zSG_mUmnwarx@5<)zwkH978&P<5ND?d2wWYFgBd7EIIfh`T4YgFzP&XgJIelLKQA&6S#~_HeeLnHW~*e&Wy}fASr4aGN&L!8~^;Mxe$8fFkPe zA`?$=?^Y3k%JGo^AmWckz1%|+Sp;ihu$$|%7}W{RN7|k7&MZnq}vB`v7?^Z_+H-1`jZoNCa{u;l9AbKx*l{|r|)-9~h* zHn7DNy=yT(U^y7T#Y>x$Ed2uoN$MM|7=K^8A;k#X2q1{6KBwE#u@>+4AUWSlFP9ooy!#Cb0>UV+d1^#3Fpry>YFQvfclRqLnCO=Q`obktW+p|0r2gI=ZMm*0#~mq=(4l|=Z)&xv~@Lb z?W2mZEHtJMslRLHXk-ztyljJ*C=fCthg${n2^j8xFTS%b(znejyj0WRzZ~Gd$3aX+ zSQGMjJ~5l81-03zxXm}zsP!5^7pMwBbp~;mQYt^knyq|uHX}&UtmkQnS-Z|bV$@uN zZY0YK8RWPV9d1VdkOBHfJSIk@P5X-Qdj45LTHk%AhzPWl+a85a60-r&Mg16BzlEZ* z)`7ZRfup5K(~|T-3$x8%YEN+rOYm@nT2)WCFQO|!jGtLiDU_zlupOV#?1H~9?YX8=3FPLtrV0e;L0NK)K9~>1KIT}LDL9!2Z$EpcH0nl+CL~OE8^Z{TY;~y+ zVZvLU=?|~s^1EaYCCT(3bVX>tjX5}xxXVS64|}yUP7_pHguXcUI#sBpBB>74d6-BH zrp|!}ZF(Ac(577Ot&igYdL<00BK*rN-Oq9E4d` zFxL*IWqwMwpfjyVnPf=9Ss-F{;^c0}hsM$40QIeKNcB*H={}3jn>%7!tHqx8Ny_If zt!8NB~th(>mVnPotrG)f}-~&9pl?whJa%YZScE`5<%RwJlWm9EgxiRO zW(7)_gk1T16qUIM8A;q?%1TaDK-NbUIbm<2oD0xD?m%CPcFK~=U$f5y`p$O_CEcq6 zM5kac)s|h-gz-vPH>&M;-4(#+8jn0_Ghx!>0LL}zj{_n=h7FRAg8nulW}TVO9Rb03 zF06pyfiddsaOt-jh6W?T1y_L>=N>{YJ)S$t%?K|@G&BXd;KA*70Ivvxyw|jng>0@e z7&~qU?aaLeEupBGTJDng6^=32jc6?rl$3cI)$VX36Vjs=7lKq|RYC;h-ML&Q=i#}Za}BTZV>@Z*2HMdn)hVYB z`bO`1Irfje5#x8~&Yv`j_hku(RcB?>5vsY`q)i8S#!P$}qAP~cV%KKJO-E+lo8mPg z0|biMA8Tfuw$HVjNW%^MHnSbRdt9+G=G7ha%E-I|g*)C@FNSv}<9aHZ<~04fB$S8? zPHSx|b`~Uk=hNaxf>J(9VSs;yap4w0MdN4gP8Pee8GDj80k8hP&ZfB##i?s(ATeF@ z&cTc84bzW4Vq;{uU|iV~W9_aMcDKKiaxk_Fl>!d}1(pmqhdK_GI%NfogO~OalM%$3 znRW!Fb`F=VfSu7K3Eg58)@?=MOzklTbgEOtJXjTzjq__cdbXlwTJ z)i%3|cJOV<{6}KU+vd7DzYg_~VrLtDQ|~FbiR>FAHD^#WU02JvKq?j8_-GPOwk|m_0kmt(pb^0tx11hRxe+WQs z28x;81LC`&WOrN)1^kiF9HPUu((bxmKRWlE1Cfm6gCtt4Ys09K)ls?Ur`85PKH*6- z*Dpm{J~*wvv$Av1DLlIgENUbn1(976YH9%x48_o^G(>8jrfnr?4eTnB*2j7nKZ^aMhnAOfL zZ1a5@?ktTL&L=RK>U-42Uc?ewa`^h6=l8+k#Rfc>wE=wk`qF%M+M^%Ke$j;PEXD*D z1s<X6cu#krwsQ&vXP>rVmg3?6So@#17JA+Umi#N zg=+m44gf7;rN-!Bz!r~ia*|*^9M>C!<2~ndeKZw5iv)enhc48_Dt*?z4y;G;AEcN; zXp#Sb;x@E9OaI)R*SR1zE@C3nR- z7meS7lv>gq%>5#uBr)>>t%vdU*mWPyN%v1ZL$HF>#P@d%6t6A2>RG?HK}-UQ7ciH) zf98!a_eD#$Ujk`9b0zaFBtYQ>@MoMj9~K@*y{?C^Vr2n6~agK=)&$SPwnhxXZE{wWQk!=Sp(zWsU;$?LDrfnI&|IY<8WIr)x2+E&9( zlr|{j|2i*&pI@}|Utg4fbe}YveH|#BfXxMz|4)w}g*mnOrI;F&oQ>y&eDdixDB*1^p z#(cflb%6r2TN-4>26Pz6jPy>3HvV>FU-yCTJ{$h?QMtb!ts(vAqi3c;MeDP^KmO~j zuQuNL??)R}pvIy{{~k4K&XrqXbPxwvIJmUv8c9M2$F0LL9cm!IJCU5+$I5ZexZ z!@LlNC+}X`fw?dH?+0}P5uF?aM`s`14ANe|8)UK#Gl<$Xk8QkJ`JZpr(MOgL;?9a0 zD|rMC1AC^p4|9nD}^71tF1dQ*JXj_gMH&?^#1iT zFHyk*yhng32tY_PBV1SyiVC?^gbcl^cYLlwODwIwVVHJ`NaGekBYbN>UeLBFM?8FBpFL`xA%>6x#oPcx+Ddsi8Gqe5_vHkztPU)6wV?+0VMH-UAO?lfe_nyFs1#c+qe#EQv zHzmew!ZdG7BvRDFV5R{^vbi<^1eM>aUp|QVevhgHfLDC>ZNX#YiEoXJ+-)BYH)x22iY7rN_4Wo+q z6E+vL``^EBN>zs_Ll-)FG1C}A3M zfy5XFZdTKlcf0=k-B-Js9Y91u$;z-{Fb!^heRKTYwe`_X;m>rU-DlQ#Zc9o@RR@I} zYgO`ebJLlQ^{OS=wf{O9<&TdCx4q}^Q4Krprhe_U=g}VW_%m)_kDuHYzMVaQ`Om%A z8v5s#i^jhU6b#fveKB3H8T<0KeW2Ps+^NvO5Nl{?IB?6b?$kbsx-(am^f};fboDVu z5{>(9g2VUP*0`Xp)NlMn7?%#BMsFJK1%SZtwb9 zeKf;8tLj`I5==8!*tQq3wzTLG^k=ZAW|1wU*md<0r^V{BuiTvOHjJD%{w`D^p{#P7``|!Ww@W0}) zu>)Jhq1kaVd%kXMzPPJksg+dwtNQzeO5REBD*@IoD*yQH=kxauuylgYdS3!i9DT27 zySoe5C)TSgperN2J8ycJKbmD3drs~02T$wCK+Cq2P9D+dz}W3N5-+3W9M^)^m!fGU zB<~lVktSS@Lm}{`vqL=am(M7ud}I?^o1S2;R@)+(0(qF(e!uVlsKyoTj^K44p8N1d zR9Jg<0l%-Lj_Je)t;6wiFPhLdy$B;(4l$dh!6~p_Da?+xp6MfE=>N`?)+4#_jQ#pH zgSs=>q`^d2yNqZM&iGMjJB^R6y0^0X`!yPOe!jJ18_P2ZGYm`M4%QDf)c<_>o&J?z z@J*F&2;_+`+c5@GZjZRboVhJR`cNW<%Pa(y?d z@|tBjHHIy)10x&%kW6uR-u<&1+x49K9h_ssWn?2S9ZT>(&9WZn+sAQzabbiZX%%r9!~9dG()|vz3asevndyR7MfXQ!gsLhADkIqUwOYYK6`gIvYz+G>B>q0 z)4?hxp8J7G-+rTfbXsTk<0Qev^Y;yQro%qiJ^?2NbNy49ToEJ_zQD5O)|N958^3x-w-d5-8X&ldZl7G9!ntiG4 z1=(RwU6Vbm4j%}Z(gO7YnX^R8K6-ssE_ew06{LQqv5(2w6293%S?xzJ{BWg~``E20?iv04 zv>z%JZ$0q3Om&97q=jCQ=8_-rlOvI5gqg9PX-lvl7f#ixTD82lSi-64ny)WGrZ`+} z*S0U=856yp&%?TiVujmJ{lkSxjagcfn>&a@GAt)cR_D%J{vg zShFPE2UT$KkiZ}C(7)!F?A|OMn$nwIh7NrHp0iq)-`jDHoK%pvu{l@2wB2#YcTcZD zcfr?qExX(MHAu*JtG*s=NKQBB=i|7(Yp3RiYrAhaT{kB8A?U7^7s-*|1T`_pjC9V6 zY~Bo)*_CnEWGbt5TBw52OwrjHPLnB8nC|=l-=ak~j}`HSvpXdrPG`UUi~WD|RRrNGDm zy*Ka4ybbr`WoZt-zS|h1q>0ChkcP#YmAoK3NmPJ2fti9e-9ht7L}@f9O5^z|XU&^W zTTkguk}1CY4PGvzeEsHgDlG@zMSpMmbM=m2YR+S~G8s@<|PscXtyJPBTYmQ0a3yi1pUm#v8_|Cfy+$V#B*}(z<&SjPUEbO+_Opb=9x;0JP=~rk^W?uj zY1eBiqq?$fVb4h6Hl*J%JSab`L&Ya)h=w1X5{`uZ!g914XPj1=2hs^AR|-M zVX>i-JnL!B0-{*3A&SUmd_p#ZxMk7H2SNTXqEHLdfd!oowYImzeO=gt+VHmGOMP*? zLedHpihUa1Jv%<03(LRmC1J*Itn;Bq)Lq%m9IyM}k244U^V>c=Mi#MteJ#Q9Z7!XD zDsi1tJ*ML)PLU>0rHM}d*J58^+7Y(vKR4`~tFvwI@^GXUhs)Y(*Wr{K4|lL?DlN8s z`i-)q@~fMIv>sl9gyaw>Uskdyo`OvA{xhN>CfqXHGd&;#21mz_c*)l?Msmj#IcH3) zVB6pf0%zh>Uo};vyVCWw+m77 z8>J32@zxVB-+MXO=df4E^nEKeRgIj2-45+qI3uAQ^8dA*`v*or9C=-r(j5I&ya!y@ zr>ERfDo>ZG{eLa#LXvD`{!C}2aB}F~?7M68!s3?B&qvSgBvZ_T6LZ_a`a1AESzK6Y zPT{_-)O6@sx$V<>g8Wmp&8nxaTOwETQ#OueRFg+aPQQ0;8^J7ACFZK12sR)Ce zUjU=Lz8WoD(tqT6{yamu>*AEV_VLSEFVW@vvUI^9!b5 z_aU>sTqkMEZpDEN0i0y1?)uVzc8WLUyAg}l6~Eb)Kf}SXLqGc zdz}yLWS8$5#sq|2B?;HHBS_i4wGvJ5EGXm~sVy(aqr0N0s93hWm@f3K&&VuT>U77b z)&u69gw*KYCKCp20t$DkD`_g|iL9*K#kjZD^`4^U;Gw1zNlmfhJ`*+?+oPjQ`b6Z2 zM8MSj8k>eDiHJ|h&A(SZa%nX#ttf2OW>gC&U46sR|LooU`205oy2IOj2rPR~q*EOA zyqFbNHlyF`Nk281Ca01sp;*>$bEZ5|qUZR_(jVo!N1o!B9+ivp7Wr?t)LKohEv(FZ zDXGF8QTl0VMTV%zNRJ{7biqNEE|(Aaxh(lU1Ea^-uRDN`tjt+Sqe!ah-MahJ!{mFW zjoB|ETb;l|WcpHdt6gvQ(-fGTk!Obpz2IQV?}Dv$xD{{DXy3R@s{DttT`UkVnJ z)>8dyaqy%PIfhgMxBa7kikO zKnD0U{X7z%bYy;?`_j&o(<+^6#9`&HBAVkJ?OHrPXV^_qpccS$oi}%9PWSNh@VocK zm290~z_1=O?%a%#=2&+Jtqhm?hZml@8uoj&GV&!R{&>K(<5j*lX^*4cjc^Y`3m>4t zXy(5ovQIxOF=RHjJml=~2b~js#jIxhJznrJN_QIUJRXG{cN8A%CD9oT`;lXut(A8= znNPyuWJpWJ=@&zf`^aywye^Q$kNZCRYHtVgA3{f%^uvN;zaAvabv!V-`PIL8E%USW z%;D1Vt8W*zm2EO<{A4H0eEB4X@7#Xlennvnmw9!P$+`2^ym7JkoGi=SkR|faB90;t z?ZYmc;dia4Cfjd?%0~+Rxr3FQ)ZtyGjjV2q!|VWI@a(qCl=kl8u5p5kVAOiIaRwoQ zPWoP()gG;tAK~fKQo~k|hVpXVWz>##e-YA5c9D|dBDaKoh$P=+_KY2T?-|9aE^3$R ztk2EuIzZrBWy(3;dByhkM#)!c%+GQa`_4%)iQ03!4?p*Rl`BN&!Y^We4;WP#Wk$oN zXNPClJ(fK62QyP`Ddl$w(r>e>V(45Dk}a-^bAnu%lMfr-QbhiTF3r9;pYseU->w<^ zt5GHmd@fiROes7rsPQ$eam_xeY-Z2WBzwF+Ihj$Ou0u)7uEY3op9j1K!PQJp4|cYC z(%}dADvc^7s)D_v?**FH@V}~fqE7%lCm$W@W$JPFc!iGDL7uFg@P^32rOy_3ev4j1!v4WC5P9kmXKF3qjd!_Tm`oXYoXs%p5Z7|&i90R7 z_QON>7I9jNmhIa6jHw%=0^eYtPo?^IE!i$K1=*#>ho8U4VWAS4e0=|&dum10g@y)- z%X0fA8mA{ev5mWcgWD>9$jg-1l10Y!P5U~@K>oOoQ407<%%>84P1YrjpUR6|f#$V}Fb1V6gcS(A14yUTAaU%4a;&-;EpHYH(gF|oAV?yaaZX=pUa_fj=Y zjRQ~ra{jbX*tnw>`CPCPQ&h=|xZFX$$Mq9ZFI~DhbfUUHEp^-(l;JHERckt9M?;p9 zrPdkNx)vy+U@Jx1=t4-lvRRpouiMjSjsy{Vm4|-GndUazhUjUS^n-0II$?`<{>kAg>wkF<`w0cE4-ZZ z?W)07z{cOxhx_H`JGOoGMR(Vl7{HtCSKLtL`ikUQnF)SgBm z@6_#hYOsvv(?$G)4lZOP`Xw>EoxvyI_UfB;3;7WE%VK;8Hj%g9TspCHV0A2r{;D6` ztf+i#NzotcDBAC%IB2NeR|F=`OiaP{Wgla{?}4cqUKG8`W^I`8ci3s{Y&!FSxc+zc zdJn3K^ahq&9&&1jqbRFmry5g$wf*yNsbF1}t~L9#$^URMzCj80iF*8|V*z)@Tm(}9 z9C3QtwZQOpWWZ);o@Q#V`}5LTa#^g+Xe3k)wpyU$eCwdx?&3UAmNxuO9PJ7>f9i=7Hkaf*$?vN0&dJ2*lMLVMsbB1S^a3c)9XExAoGDQ^Z9aEd$bNabxF79_7hh;J%b&y>A$Jzbbc6|3)qUjBR#VwE{A+@i=`7f0|k}Ht=7E$ z^m8!2HPgRw?+)L|!q5CDPzsDxk-A~PU2<89<*Gq9*6ZYL?#BY94ldOjg7G?5cleeM z!O1<-IU#JlL#SOMohotnJ)s;1>(aQ&PUc?G>4yYaxHi(cCBFsNUh|Uuq5kTDX(Wm_ z?(blQYa=lO21?n|_}paQo?@na>HaONuN|8A6UKV#ceGejh$}3~zKXItJ$=(GN`G*M z@CbZl!*An9xmC&_ILdMDzFB;$l|fx(6z_7yJi8t)MWMK$!Rr$$vCYRb-;I&mk9{cO ztBb2j)EBrrb8Fh?%1Y?4Wre7~p}>;QN!T#hAp7SI;IAaKp0XM@YrC4R%VISukQ_-d zUR!?CHX3Yn3%psF;LzgS7hIZA#MO z)6U8_8(JH)E{?*R*S7ENbw-f7V62rfhHZB)E4D9`<5-xoOGvB?8DC%!aI_}$6I8kv z1uBmrMF1WTt3tn)H1m)>&3LMR;j;21xU-NSL4i`(Xwf0pC5YwC52XlzPvY`6QH3VC z2QpgxSQF8kZeX`3A$=o}V@i;VT^0Fm;tmSjQ5#nu>`6w&`|;aa^TkH`?>_}}v-rVsq zkKs$x`^YZz6B5a{VkKpnQDbt8~%Ac;f;3a0!vCWX4G`!#x?`7Uy1Ez0lI1&Ohb(@VMH|5tHch z!)uyDwF9k{=hbSl4NVHm5-wJ6;^=24pPo@MhHoZFvuF4V&cxFm;*pMgoLQLZ93G}b zJTmG8hJmiH@5{9H=WDH;e4guZt@S#dJUrjHlyTAORoZ?ATC0pQI;utS%M#4tJsDPn zo`ZgbA1eF4@XSnels!}aVlI9@fT{0V@{~!Y!vkz`lMKj%=S@?yX0QF^YSi!2vt7OD zdQjd)wFv18oklI^f$8B@X=bn(0paz$zK`c8!9r&_#&xDbFpKt-sqYd*ScB>fb@R`6 zXcx7W?W13=1H^N_LwIwFxcns9*Dk4zArtRn-@=dq!22HQmCtw)z-AK|v&T1B#$18ObV1kRUnd43Z8xi?$>Q zl2no+h-45D1SF{B4C3LCk(@y!=UeN5-S+dn?>EN%cm2_h(duTewQ5z(nl-ETvW8RG zaWfZh;-jvm-TdWYe#2%1B$X2!G9}%1DwRqaG+v8$x#Y#N3!96&a#~skXZcke(uqVD zX`{$Ka=>JP$CYg*F%%U{2~G5yj$hohw}vYq7m_oSl7nbv-Aw%NSGp^F&c z9y$&_PP^Q}B+}$Za$NQG>FYjB&!kTBbioIFJt4t2eD)Nh)OPw*vFwgEA|>t4TvV1b z|00zYA?$8O73nym_z;-Gj^(Odb;Y%u@Te))w-+gReL*-uP{;3DH74;Ls;dHBNxuFR zov&0>Yyn$j1zW2i2zzM!bT)&~GQ~BjiRFMp=@_$f3nJ6qvy~2NLa9z-OQPw91npt8 zzGK(Y`%|ThN=MVK@&=qLD^#-ls?TY1xh}0VWvlRaCG99b+24ha1Q3izzUJ@vU5&GH z>nJnpb^ev%o@YW^Y$G>mq-}?eZad4|rlDZneM6Cu0r^5rL9HZv!F*+B+12J^-pX0k zQw`HZveoihU9cO}n|}n;O|G_q_N%qz%Ad5vCwa0p4PNXXj5LsTd^x6WKzSqVhiXv< z`R9+Egcu3Lrs=LDF5~_qSJsDUr8z8at_G0~pwFb%nempfXkT!Q*x7x73)&oDpt7 z#L(L^6s1`>8+rkx7Jl*C0%flCtY?5Br0Ys5?R8luW0ZcPbam*>^WFF)mn`#7M?Ds6 z`Tc2=xqQM~Ys4r;+6f+0z6rJrjWoVD;Gg56g_FUom1kv$n)JBmRf>mro;ZRTGK?8t zJ+&?Z8AbTEf{l_k^M=RUZjLUGXp11*0}}NH4wjv^zT|~jSBz0Qc0~stf<$V{`~oHx zcr8!z4b)^0z13z^g7o+t^IaaRv8Io>tO^pLX&>t)FJMcO%2Jo4hRGv6kG@O~h(q?C zkS>9lNOL)UJJgB^w?#OYNK^QALh z(qEi7Z4dD+O);gvY9MDjR~~<;RD1U2;fm>^!DOKXy80kv-vJ$h;gLUQOFXcd`n5k^ z6dT-`*Rn2(b>n}-P-|{NSs6e2aI0lTgmUhoCqH~cjgHW$r1Y!aR7&9N=K0BLP4ECvpRkOBD6*{21pv@Tq;hV94RAi)HbJxrq4Xc#MOZYFro4M*5hOqz7e`*;ZK2N^ z;tA$Ah&R6r=AP-~2)6c@ztvIxA?V5e^X{+RvP$~f{xa9&j7CZZu#B%})M!eV&GL7% zF8x^cwtzwm;K<9aQkG`B_58e1gYyk4_~JnEWgqUD!eH<`wyFY+5V$*8ER$wF>_!%{ zYBV1pQw@)i^$xLe*Gbc|47itm&>WAq{6BynrgC+NjMV5k&3&g_CzNUKMz%_W$xV)( zL6fr;ZJOgx`SNMGRVot4hqJw;h~MLKrh2G-a5m9KcU5rhRQ7W z4r8cY#j_BRw(Xs^E4b&(cg?OLe~G909r5%xY4YImBCk(5;T5Ar4|ihUWyw^;)_O6h z0KZCmn8ba(_=}APU4T2|H=DyP8&HNh8uCJM6E`%(FhD=)Yf(7hCWSLyuipap#ps$Q zrFox!73ba5AG#=^7Ek_3G8OED^h=zUQ_N4pA&t z63n!wr)Owx2KwF!!{`7e%y%IL0wv?C{CMxfd*jQ@g(-Ba6O*L@bnn~)#R7A9jgnkk zM`KjVCBM86<;#8$zIfa?vhwCh^`ZYzcw)y}Q4V1Pkl@Df?BP=iHF@9m9i7#+IKxH$ zorSsj{ISWM?WeNiM4({=Ia?MlGvNL zgf)vIrUCV{j49t5aIGw7XosLGcwuri>6@AtICE@sKA9Plx#Ew$MwLN90CQP+zC9&u zA@_8cw%dOvXuk>7)d>1IbEq2(n-yNQcb5%P;7=-%Qa4 zwD9+QWnuAKTW%>2Te2K!PX9Ji@<7e9`qfEwRE~=jWhVU|Pt)>Xv-1$SHFs=qM@-(# z{?=qkUkr6TdpOd6p?#`vA)j<#pr?TbJvPe%&wH9m-ef6`55Z zT~N*T7soW$`u=Hth;k{Cojmv3yF!ZnvA&B~7)_UzO=d0sBO{FjGEvXX?WP6S; z_{THdQ(3ws;G`{}{`vR4ozRb0oz;1K%sBMwFeD3;o1K2J85F1j?E~R(c+mEk|KN(j&^8k)-WZ`K3Af<*V-x}v&b$&&~QN6 z_7Z2*ri3@5ei-?9mX17SmvL~7e_>cm|JM(3 zG#U;>XQTuP33Pt%AD+RCV-4J|-&CHy9&z(dh2(HyevQUpNx^f#4j1PPPMMZiB3Au! zRcBfFddMs;c$S{dHKV|?mBl=O(B;9=1$Fm;I_Aj91%q=%)%Uz5-HJL)zZC7RBhO>k?79oR~i3QL1HzUoMWo8aW`alJ>k(Dap ztJ=7-U(jl_OQxP>BLI_&{uCS~@3lqCi!3Z-1&d>20v6hW=EE!Fq_~{)=!@3d<+QGq zF6f0h(>LkJxm@COfmCeD@4dk{D%S~uD<-d}m{MMqN6&OT56Q))udA_$QJAi=hC2i< z9HQBhZU`?73wmgXT1WSPZm{lz@-`#El#lt#{QA+Kv^H3G-#2@7c}IfS`AT5 zVI_y?C`UgRDi25M-y5jgs#&q9aSUs_AFkzNO8rP?H)jagP9-B;!gTwe|9D>~ALMOp zzTis|n@H@?yLwKn7b>1vi`C?BlCy`qx6015?$?0Epw5nKw01@5VbOM=Y-pY*XZat= zGwl`#WZ?K)|;wYg>*iF%ofF&|ibe9l*ws<5xLZjnU# z{@B=UO4m^mFQIBn?psb@^=1vg^HP+tkv6dC9Z*|AgYu2vYDDWe|{Qwzvte0K>Si&HPo%nJ*bkW z{3OSNVUeaMo6{+b$f(>Uc~D0o+5L1j=Mu5j(K|oQqjOpPuQ4CXZ{7ZAM_3f_cWDIG zQO0XL?${@<@YCkK zWI;7u5s3@H)M>*b*I2^nt)B8_XAM`d+*D4-U!{khL-~#;oLfUoas|1D?pOEV%aaf4 z`=46?S&!?LAl$0)F81O4Vx;mJu+3jYab>d?o`viOCkp|5dIMEbP`hNfZ1fqwbAN@| zn7~wML^(+D_q`I>tAQ<EfW}ZfZ zUtW*o_r5O{JGqXHLk@S)cFfndddIV=*vA)l^Q)n0p{(!H%cFfC$paO&4vW6OOLdp| z%@*O0s7r38<= z-6PCJ793s2$Mb#8`^sS@hQf`}4&05l!k0oxIKJp&J3Z&4`Ra0qw|vZ_cCs`rR1LNe z)#*<$@^^T)AjVgT_9z&ZNMKt%fcKlSlt4v!wV3xJ1ZPfk;5|X<7;rLKP$rUl9Z!2r zhvK40HS-ht7Y_$b`o2GA)iQBic7*$uR;&JQw`|2nW$a92i|Lv&E}W+(WiUE;dpp{J zz|vrcvk2LQw(D>Oy^))5&&Ob;I0df44%PScJ@d$~D5|NchOKopl^UrNJ03=&i+ zxij+yQP_LlEmHB<4ytW)(|AyY>TXzD%#8peN(D80eZdp(E#*H~cY=jX*3?h|7c1Dq z+VyxhviC~Q%79Vl_j>o}Dp4i9{OeJ6ff%7DoS#-LI;xs9NOT?_a^2pAlw&IXeFZ2t zs#CIZ$;-GnX?LPu>l8ddPK2dDh0TXm>3lxfvfjqx{A!whLE->&?_Gz)t9f;1Yuevs zLY8}7NMd7ESknE3&%bM&x=E8Zw6_&z8wjo80ZIS>6vG{~dKoq!ONk-x{lyCwFL*dy z>S>$Q!D52C4o0qEGTS~n;N$KACj17xgCr)*?NBj0x%T3_I+LOG1%cS6Yj@W{#AaA%Y(tn+i4Qa-@Z+gZLM!GCKEm5%g(uib6@&(V* zpRSj`YtFo04Aq7(40+Lo(4z|*LK|Dadu}bXn_zERi*IpC4D=6jy*&b`&6dF6m1&M} zaquDeEkyZNZ(@3V4l^NAN<#DQ9H~`#ft`XdcDcbO-Y1)MZt47=PTI`kk}(%+nUD{$ zb88)MzJw~C3BSgey9*R{P2fJrdFyh^Lp#4;<2kdHAevXGjeYesVY}RL>~9A$ZH%`E zMSPN27bjTUYZg?W{-I1VbI^AgN0ijKc=R$?9Bc{CPpJR68h>j{*BWr9n4+kwctVc< z%%PTIy{Q|$GZI5`9zYDh<-zMGjbth=FPb%9SpDpF+nS}T_fhUpa2DY^(oNnUtXa)u zh>-#r8c6V`yPJA!m+jbdxQ6Lv^UrrALro|>1D0VmGS6{ymyG~9H=1z}~Mu~TjLTTl7J9*>?^Nf?zPRh=c z5FqSr?VfeCx5!d$%QqG#nDz)$8Ox$C@y1% z(v{^aDFLadM01hxM19|9(6rJsGKBMeIt+#!ar5!f?TDqEm?K<$@yn3kOhF-yhsR)3 zli|FaBd5w-mODr_!!Xw{(mC6wuRaqlm$Nzrg;%ovk&7Q>?J`*j{{RA7Q~5|%DYG+5 zbtu#DR^f%hqJh_cj7Z9@w*2bBA7t-X2m()znd(hizG~;#aWFC%0x&CRdyAZ&hL_sS z>U~qw0r=Fdm({lOV>C@a?*uvaqskax(B8u5WOf?(EjGc6Lfo&>Qg$R)CcYD0$+b8| zUIzu-0?NChqi4OYo?>m8j&vhMioksfSPmPt8uV0H#VEb6iP9)K6SxZk$tQf-GEo^o zN<-SqKfz$5>M`?*V{ZpK&<=R-?ZCfI|_o@P)0Ro{MVWhy}QyCEKrum-|;c83^YDi zLc1WhOqHVq)&~v|xEqzVlex%zb2T1pH^e1a4dHgv>r_KDw%Dq_tA3;}5Khfnc&C#{ zOyrP1V8p~;uD$6>s#x_^|2Hr~A-~6$e{e8%(vL+c2^CeJIr{W2N> z&x1y*ti!KTcpIt|>?N1E34}^C4GL}~m1-2-mgol%h3r`ElFj z3ob?HZpGx(JQEg(&A*Y92p+J}(7V2IU}2u)dj_3aeuZY@c1h7s7ZREiJCLMX(0j~AKy^C>!%K!P48tm2EL8;eU8W1 z8xDp&MA)#`l$Ykpn5Jh{x2LE3M}2H&y=>@yW?63hxhM8kIdSx#RU&?`^cK%mjCEev zbI*d&(pwrYCM~Wm7_uO z%Yw_!+KY{8FExk#58aWUi|Fy_f_!PhpogN<{$WQ?MO_95EG}?X1ruku>D6Jnr#H8* zp`NdohR9RK8$WBRbLwZ>wCArsWo(>RH7n{9*!ZXRaRf6U+(CyIQ7=aE{o;gr0|TN; zh6l{Psg3vhH#2m%=#upxI6}Tw@1D&1BlgCe@?IckODx+F@~|)ZmS{r)ChniU3+eSW zAdmPhw4F!$dKoEu=?f|{v&`I~@(<*^0+rPVKRcTF62dvx*=e=ABnqJL^zr*m)!{VY zktfGr;P~^V7>=g}^afvlOFt^ZFnd#o+T)(ep%|-HS2vad8tUqmxFMUGl_D87s{}CC zqT^k|T1AZ23E1B(#oZevI@K@wU;jMv-0s(7WvS3GE5Yr8VeoDVZa_AP z=%}wP;MH2BUFss)!1*o_%*3Z#uq{ZDR}9O&`DroQ zM0tB;ii_}yM3KVW3#}2L2sS#Y_im+40#j3G*%^b)O2atAtXnJ2Dpmm%DTFDc67{iA zS(*7A_%_y(I;gIY-U|xnoQd_0nx22S^e1|G)opItAm6m~yq_tV8L9%jPh z5*``|E)a6Dggpy@F9BwEP-;R+@PAnBYus**8;<4!o+d2Y8hmEj56R00NaZfNjE%?) z-oKawl(qB~LeAz=-nkAUr!FAy!ZJ2^TBk{TwSdJl5IT{jZw@N-(?w@ymf9T;w^+PT zTMkd}**oBIVAZa(c-%X+cf{9uA|2zo$GX2$ybc%6&SDjbQxq?ts^Y6V-yaz>w4r?T z9TnIa{+R9l6@X3@RM(L^Hh+}lP}um*>p#tV{a~cb;vWoL z-!ts2UB=z(c`7JA-Z-QaHP0dT;>`2GOpfa5T@a30@?4k(XbSq9S>UcuMm2}=Jra(p zEVw__{TP+&ouV*EfQ#B4e@srrgjTd4@@l7LpmzY&CozRqTi1#9+-A|03Q$zPbS9$wn#=A2=kSivm$J=vI zOogY&DC#0*0N)JhuetB6A0Mv-kJo=KS8;237X<5wbR7-z2e#fF zkk##|nVuH1yxfZxx(z8=0s$4x4!9yR4AH3#*D8M%KI@v!za3VVo;~lU2jiSA85?z+ zFfRqJZBgH|rFfbUK}SDtX>QX}f!Mz!`6ejrEY}FWd|^woI9ViY(=lgn_2)OrubUz6JRP?4&C<2jWbS=k52;D+v(`Kf3fe;V? z1%JK1N~I3c&F1266cLXL`CGDL^PN~b>K(fI5(k(b3IB|yKxFqPFpiZ)Z$n*Onwb02 ze|zWL;cPw_)3wp*y?3&2*Y=6>n_1yk%5ID{(0Hci_6G}q5o2}>NOuJ!qM(LXWx>Y&$q2i!XsPURJcm*mXgm27rDP#TD1q%yFwj_7$zm)-n|biecx`wnoE zf4QBuFz5FS#H+wLqhvk|vx6KLX1>IC>4?Dyk9Tk8*${vFQN#MOopO*SV|PB#TBXBI zT=;NOWmNm|x~#s2ltYsUKWoo5g-MPOuhG_@CawDMVB~~QctpFY>UMu>2-8nM(VlB{ zZWn*f@q6b?)j0=X@_mp*_j z7HY48VB^qbW?U@)FF{ELFQ#yid{zixNJ8-F-#Pci6N-Y(86D1e94dI^#WJ*4q&ap5 zdo+nr={p}HGyQgXF%E$YgsBMa`Bpl!Fat$Bi5F!#Jxy%NT?T7VwHeB7tcw!&?xba@ zxj?zV;CH1sB?C&R>g8HLJgk}RA%Of<4hW(`UJBlkm!WgwCqjB`IQ5_F#sev{y@>3) z7m*+4c-TT{qHIO7D^eY0A|)-OQYy#`E?|O2P_YgIlk!=qMeEu`uA*ae?UBDMzbTOh zwDit+m)+Rg;QpJcuzj>17x%v|a}5pBYhLs>(f_8#?G8*$VuQp`6veMWxgyiv{IB_T z%S5I!QUUYa!K@vTKl)s@Ge`UA_E@AVh+xBW=`ct{vT0~&<` zORbt%(V3*_W@ZXcAbfz2Ivi7p;xf*8=wny}AM+*($}=`UgYvazAovfFBgyN~2}Q)l zGEJ+OELHnT4Eb|r%$e>^2O~_j(6l(+o``XvoU$U z(tO~(*G^4>h8A@9)C4pA!0sEo*wYWCUXEC0O(+av=K z>BW9>Ewl_c@(Dr&UUicj) zUTJIU_L+4)>o$oi!~}~6eWgOrfAD6`C?30l`z>n-*hb4Ta>w(y%PJ&$aXKdu^o~Wg zC>W5GQ)y`!e7eZ`i(@CCc`y_#LoEA;prUdWE&-Y^n`^?;$e}!0S|O`w=PTY_|RJ1!$A8O4$RK*MjRPr^Y#dIQGQ&vEaLw0?7)%T0sD;~v9 z(<`V$W*(?5v%mdntUYz6Lq<}jQUzL&+>?D>&6=S{SSt7L1_!>hp3%eF#QowTM!}G_xWlkwo6Y=Os0QCT)^Uk@C(^Ul z8w5Vnc4r*du3qlO<;63-A#H$s#BmnO$uWkSS|oZibiuot?vwn@HsS;U#hM(dxU!^6D&a(l!C5Mp z_wtqhz3!6zb%)~DEpIjVnF;!qp?4UQ+u;2-N74di<%>i}qAtOIw4$YWFk+mmCs7S8 z_q)oN1H`%@AD#AS&tfU704X^5#wL2Q{3y{=Y9x7=$N2p%^{|{?wsxas-%6SqW8mEP zJieU!rF=%G6X&3lZPvWLZ8LRCWqDwFC+XY2oF5OtsSSL$6Mw-2d`9+|tlz#u-ZwRo zt>9WM_`wRi9|S_bvPh)}m(VW%Bo`mf{xENw6#q_Mwq^R@8o&93p$~Ua&U|}5gaO+M zx1mbH!e;39TRdW>vwmYfp&Ni}BF?-TSC?LJ+n+}5>A4G2KN zb8Z))DzgN?i75HU(-k^VIG{^7$S=NGis9+&0nSJH@O(fPXVeGU#QrO&7m~?XrO}1B z=X2WhA0uVtT-D&(ivMdj>H@Ca^>^Oho|f2H5K&V~&7ZI}|F7*JFdlNrX%hO@wG&wV zG~LeQ9z#WpmFg6WgJSi{;qyIC7*)h9@Kgh@b+{Cu`}t*VOL7r|6v6~jC{5HXsC>ZE zMvFk*dd&9cHQ3iOk>GdbDc__2;61}GzIX9?5|r1bttC+FRr0QJQ%>AZ;=yF!Gng=6 zTzgZU1*JYn@Y4DU=&VK#6|vvd%T0)2{YHid2bzdN#->N^QlOUXFx~S%Jwd=(#i<~(3U2!q z=VZYGT9a+a!qfZASr^U?5z3|gw#2m+>7sSDC8wO@onP-?VSaTTuBAXqDTPK&xH@3Y zyoR2pxYt*b1~=1GSgYv^ZD)z9pMIuR!d}L^t*3Mf=z)Bet>%UfWS?Atp%Vl$m3-eiM!!BFbi5) zFT71$+}oDS0a_vFr4bOtgqL5f{+SOzb(5p~8h^yG({atkdI_?P4Ygh|Az{_P{7jhpS0R1E}e&UXQxXW~!%8Y(C_u1vj%&(ApP)fytcw+$Eu zV%x%suQ|-QUx0UGEf7+k<2pYtE<_GV92DCx_=_(ia_A|`G6f08N%rbZr!k*fD*MYH zRW}F`cLXAbaleVfGz{-4u~U5GJ8&aH;s{(?e;S2x{*4P9C2S=SE;;7ux2Yp}r+v%_ zhNZa~({&F`B-dVE6jAhyY;dHkJQ!K1fMHjp1$NRktgI0H+uQX9M!I6{xL&0lkgiMA z(+0=UoI%WT=e}t~o?g8bmTUF5XGy!w(K+jnPsTk% zB;y<^KJ5A(8>1jldHdUv!bqgc5Oh^2UF~5_7we4I%fxjGHMbjMAY`S7f1pKuziV{a zO-qmb{!}omw$g)WpRBtBz<*6P{Oi$FKQ}(57lBx_rwDQF+5OstMW5ru+xYqCij%19|8yy zzO*U-Jp;yzE6!>@&Kak$ZhbCTqy}T%0be?#Rl$Eu$P!TV=EEe}B zJZ2>n_TfxW9!QIfK0JEbevTFjEqmyZ@RrpB^_Ukn{~SNacPc?z@%Zxd89vRC^E@g> zU-fI}aU}PM|DiFt+>km;z)AFEa`@8WAUs1*;3qH5eZ~4Y?{5a1{bHr8xWL)ft2l{Z zS0|1ug#LcYp|7u9b@e&A{!)b;a{#4vm{igEqy`?80~qy@A(~*fR^k6=D?q)yS^aMP z1mCc%k%{h-GLSxMnW)={7cO#FQeg@f%Ns@QbhqnBt8l8ySPVXWc{XdhXz(BQwvrBu z+oK%LP;pcGtwt1g$n<%Ed_fYvkXKey^@Nelfcxzu(3q7bcXPY_xx1)iseI+i-{AtE zPmMRHoKU2#UL(mK^R0Tq_x@L#_@UZ_%hIvn?{MCO5|=vdD?a*8r4wNYRrm zdLkqE9-o+2JTb;&m`Ti)G`mvOY1#TZ-|*bqm%RSd8b*qb`-K{F+aVte&-9O;p>aW& zx=$TU1H)WWBaAQ5?TPR1mzq1PxT{4lF*JPjvn{}=?)Q@)s+t78iH^_iOHZ|3=O}E=CbBAdouI=_xPDP|;@nC0%#@l0@ z9QWf`(>p$x=|vXY-z$M*oW#Fsgyj~Z$^_GuGqxw?Dv*K^l6Hc0aoT`)#bb5b6-UXS zg-QffeYfdkvpC5M$7D>X@GYqlXae4We;khJaq{6H#7Q)r;G#v8cxpg^d&e2U73fSv z@>r(FQX-E~T56c>2?|io)!c#_Dh{zj6R$z0n6G&va97*Wvo0mr<1!7i*SE7Hz|oIf zy(zp@$SZd@e=!U3S@UQ7`DnR$#(Q1_Lf?6amt*-x{Z zNNx7Lv*^OBuWvMyxtk6~`aui!ITfzD*3o58j(K>5}F&Id`e3+&Sl75EO>l54Y9H1*yca-~XQ>#EY%=C*0qK~#NxZRF8(8EML2O;PPd zWNZwaOiY9y)}<{&p5@YU-d_nQXVWozZbW=RY*^w8t`w+~b}2uzR^v<@+}jN-78=2N z_$9LRBo)euW9CCIU7i9DpcZ?}?nS8H+_YI@h$Lp(aPRgSjV~bf7zlA``2t%Q8--!! zAA;IJ9JQ>k=g$ZTwK}Zo`C?y8eqWyT{o5G3V3<8B00LFsci- zM7Gwn_@6}P(P)Kvc5;6~$M_)#1^1rw5*Tb6pBZ=_1EJtDwEY^^rRSI=^j*L>qFC%Z(#b0Pz1k#2@ z)Gz_jD0f$B#BphQQTUxK9z8N6^mxF{p`hx^BQyP4l_Su{10}fJum&E)jcJuTj4!Wd zzmGlc4id21bwGq&>pwR8cSuu^lyr0M3q5TYXn4(dNWSgsv3uP{>0L`AsIq^1r??fl zF8Ng>vMtNcF)T)tnCdG`hrwOz3on$^X+nu549}uMR zt#ht;IsvNkzZ8HIB5L1u*NVK0dMX@0(LbC(!}sSES~jVb+2nsYkJ3WDkXZp*F$MGQBaD)HQ=(m}FyBYVXc}=H$rh@7^L7<@m-$<32>q<9LPKLvDH3T<&$9^kNAA z-X$;zO!iVVj!9P43=ZeV_ar|-*)L4g+Aq~yl5b71meK(YIG6QR6(YvOR&+sdqfn0# z;X4L&2zkt(J8oOcoxO9j*4~n6TmXQ1MVEX(pg2))d-|Ec;%@!{sKi!&;*jM{6^Hwg z$aSA9As(ckWIGRvMse|CO5%@dcDr~kT&T0VmZ0~}`j~A;I=lLSh+KHO*fJ1N1*zdm z$cbGlUYp*%p2qaJ8W`xhsSdvi)i}|umsmAQzHKX#={l(*1wVeAIJV_?f_oBJB@s#! zha2mDi>1I~r?=~1#r&c*v0@ETCIDk(0<4DPf70uh1S7i7GANPcQP~~~i?9Uk?55nO z;=VF)mr50=#omB@$F0GUg2U68&qxJ{8uykS=3P9T5c z#1I}+_>!eg`lJL^om61RdyR@P?0Wag#rHzxvcJ?fI5DmUo(`gCjM1N_iYuR6IwTEs z6>nTyFr&pS%mo(88w7f`#Fl@-+$W0rl7@YRouJKVxgb*@`78ummBwW*msTfjgF3uM zo`NI@GLt&a&;1@d0r>uiUHssJf1<=9a4SO?CoLmZ`ng#LI4>9imdweNM9Wk(K+%;$ z@i9lt)rcA)-$>m3+dq1rK^Ty7q!GO-zPm}1W>B(I4oP`rgJ}*=IHS|?isV|U_K&0r zTB4P&l#g;4i_pA?=;k7!2XTg%mZh^uyXCrpz2(_op{OJ0=>2t21ZJFaMmfn+ zPH7O%z9I|^oV8&7EUGcz2VsAa3}#XD;@fPmM*kfBK(g?Mj2Bei8yN^3&n&RmU$}YW zM~85<*c%s6HBLACXZA&?59@=bw7CoV3w{s6zBrLDXMj@2-Xl!y>$KmHtE}ns_}@4v z<|AE@0y%AFLo95NTH21LuXLB?xcWKXFVnkn9apRfDO{mD!rgt z9OU7ZClr-0?$pGZo*K@SQS+a&+;uxAKr5pME8S6GKF)ANNJk)!T=LuHJ;H-xF;!u4wV9u~ByD@Q)Aa*U%uu#XgVLUDUHYzh3 zdM@4$?-Wz7Bys9R^U5MPr%w9pFBHa!DN(gvCtBur57_no`&eHeX3~2NjS{95qA#BN z|9u;hu6_b+iD=Hq$^V2(DV<`UUzuLY*$(Wpuh92NRe-7#A!J&%etU77B=U7^HB{|s zI*GeQG;>kHc2*z^jSB&xA9!l+&8VsJjs)FsicLZ0HGG}QKxiA^2Zh&FKY#X9G(dY; zdcArH0zi$M%lV_RB}MmGn$$vyuP~VB*4`PdDrXDn8G&>ojct}%!paU!$bgn_c`BV( zeoWm(Ul><6%4@MMkYOA)sY53ir!~~cPPSKe9YhSiW-=?iqs%15(6_Ap3UjARS<*;! zt1}#0!fNnSraTBM3`4>j$sK(b-NSaGT?G{Q{B4=O+bPA#pVEY3102d*KwzKlkDn zL0f0~FFUbY{ci1SEWW+fay7z4%TcWviZ4Z;)6LE6V%xEQsf-~2E@>7FO*j|o%r6cF z;>D;D75MoN&z}jcG?pW5HaEc3UPhGM+ImQe<+4TYk%GV}CR*}Y#LYaUAWfH9ex%Yb z9Y8ks_UE3EIRwki`B1reOZq)s0iR78^@Lny7p1>jC;kQRC@;OZ(x#Z!>syuftf5aq zDM7u_>%~jKg7CU%E7e982g&M{n4Hy5X*t_Vuq)K(QZh2ctgvlo0}3bH+-skz-#LxV zT|A_?Jg$D$8Z+FNydTMOlgsf<8kl`B2eya^nQ`53d`0x{^M@8S!?$n&XYE1NDN%5l z#%tRkEH?wd_ZCzrgtdc^fEYE5>RgcW>M+^e)UhoqI!0Sl4V$+P%^8TUb)N24Ii&jd zL0E!HK+A`qJEyypNxI2kQXyqx+G95uQ-U(rLZYSyGU0qDU0vL*O&U#ev%NhX0=vx~C~kMl^0KeBJsL;7p8ojMsH>=&#Kp@{5DFXW z5S#V3Yl-u$_)N{-l7JP3fmO9k-IkQQzrQ#9+tC=zcX;~nPtYXIOuLUiU{zfCIW*pqBjs zv<#wzLp-`@j?tpAd#=D*+c{?s$i~i3U9fdfUWc+#8?-m0!KY(f9wYIh4d~Y4r>kVH zr=~!mTeUVY{@}$hiKamRa6`mtE;x8wz|-QWG47{*`tlcRxf1le$s+| zND%6&DLpA81~2KRk4F{iIFyoR)Etb=2P^&#Vu7HDrfG-a?9huxPE{Xb=$IP(0X?7?^Dpp}F@IV3JGW%e)x>4HE>jp`t?d$P1yYf}T+!CK#l&RKAJQkb!R zw3#0Bn0c^^^^c9~IyIjG(w{G1rULLDnc)^vY`&R^W#bgjkSlZDlfz6g3D0DrMOf~{#J*1bi@H>EMm`@m zi?6Z&cspTWa~O91qf=*^-4c#ir16doeq$)T#zxw}kw`10SjJl~X%>HVO|q zY2bbQ=oDa@G-yQz(+;nMB=s6TW0k^RO!zw$s_~z_Q578h9pEY?V2*$1Ig}9SzQG$5 z08qUvcL#51!DI&Wj2<+0^#UV&10@9Cm==YfGYHS|$+_`J*dE5(!W~T|pLT?X9|j4S z)xN2ZUz-a4ME2K~0P#g3|EoH_FbnFWnaB9G>HNAjKgb7E^S{obp>@|veprQ(fmpZ- zFR#u{GI$?|IQV7z#J-n7^^=*>FMQq86ms^RG%GX#$pm-4^^|o%VqPr|~qVpy zpxxgBjX5f4M~3=CS|GFN(FtNeT6_xF_sd#ey)=9=z=VX!(4;bWW=FmG`(tz%DC2ZI zdf-0i|L`y37PmT3`}l1ea4YDJb+L|vO(72sZ+*F2JMzwBKiV*NI6FK_{Al&dk+;e& z@|V-2S&;h!S_dJoiQS&}Dh1?^9Rn1Sm?Qdt$!r?E1)2%L12cMV{|^hcj~-XyhZp~{ zb2;m`1J~>Rhs|8M-ZBqr2vqf{TY%dngOA`Jez_F4gszpX^h27thAJ*qQpCW2GaMHH zh>l@7aNvmnvg)@QzgYE2TQGjOh(%Cfz*>nthLjoRJ24Lk5gfQ7gnxYMFGg~Smi!C2 z-ydi|>=DJQ%IsT6duxKE9LU3}vSoFKaAcUwW7fvTq95>R8exrE0sygZtoS=)3{ z=)i#hc9bo`v`YM;Uz&k9kG%ghUdVdE9s~J0Y8-8QlE9;6xBA0&{!`*@M-a%P5Sh2T zolOsboh$?&uy6g`0-HZj*AMzrX$dGQJU@hv`9=^g)L(0}5?_m{N)e|YhwhySu8#TW%?cP z($VPyQrj?;YIqX;4k^}eKVRH9i9(S5CQMc*t7gY58W{3uBXIK@f?@6T$m@yqPpO~0bg^aFI`p%)&)~j9@ zQ3Qo4c3jkRP(t}X*u9SMKuIFV19)85ArleE)dhekd6g=b{|A*#pP%_dxe4 znnMK?BFfQ!41^eYACLXr_Lzy=g?o+|Iw za|caXSalJb!M!_7%P`2?$<#HpDO{>~-+~ z8=@b>w4#}YCekuaYN+?mmfYK4M555q`ww;Pzcz|% z{d2@!Ks$Uv!khly9cz?eOXM1prhFViz7gB>Gz7`^-8eNmGvbDn6kmJ796-t}(^IG# zSp2=`?~%K+%+|#H51R7wqC>hmLUt$*5I$;?W2bR|N@VEd}>LOzgNRO|9R}GF1xr4e^M`Z76+LNOtWxXH#Y&?55JArnYz4}o@ z8)MS|43bjPTAn2nw6oBAznHtfkQT%3qGrkiN7#7H#}co?p6y-mkJ@oCQ)#Dy5q4o5 zNsZ+TzQTk8xTjJEgbj?tBXj)Ryh<(Rc}ML67f0nq)}D>?`9ip;LpK<~$R)8YDZ+`m_P;|!$iTbk zw?cq2rhv0iv{v5bhS?Y#BN_)<#?GZUJ89fiH1;r(`%BW89MuKq1v>CecH23UO_EU| zxk`#@o&h?AoN-_DTwpSO`sH_2ZTwJ+LBlm%ep&l%{Mzeb?aFDM_zV0^1dm8`X<%3J zojH8iAcU3{l405ph{T2mcGvULI%em|JVx|IBIHZV)YV5a;_jve)OW~!I`O_BYS4HhVvNzL6tC!* z(M$bjd9n*T?(FZIT495QV1w&BLPCn+fMf6CovPI3)khCRm6Lxzq4;L-Fi##a_YN>R zF^*oC z@DP63c%l^=Ha-XqwBg?$#6D8n%82tG(-GoLs(=sQCr%x=6c3#qbrh`A+&1R51J4{7 z9DSY&H8bqFeW@Y9^}Uy|Kyg7$tQ|dq)gn1cjV~e+s@3U8y&qmZ zkrLHL^p63rA%}u=uhw_pC(cYo=z7<4l>A|$*Vt`l+(Brh^emZ_G==2}L>v>s0}MfM zl@sayaT%c~7{G_GGh982sj5_d?$QZ^4jJ}5$4lwO{Q1sM+YXNqD2$V3Qz?&vJ0wF6 zn;|9+5801$1{>MCMGFZ3I3@S6R{!%3V?vjW%J+6cUh9Q=34C-xqMo|9nIY$w__7%$kryUp!@3HTfs?p_`g25S(a5%$V3@bakcj&(^ z=k1M0-v7%%1jg&($A$+;2{q;1Fe2A}lE-2^%fW8po14~v`gzD||K~%vVMdUy06=^N zHIGa-B$En$HUBDjBpqs!(5bg)_ zORroUBr6T)rJ3(WRX6H9DTmVndS(o6BkEqYO&Np&r7e6*IK63YegSvA8wF?&lM8bh zre8p{H47el=&ct7VI=`^H2l)xDpI7tpmrd4;D#mA-tZ?Z8=JR}zSg`&WpQrcC&4>9MSq;*>67 zVLU0Y9WYMch&ANZLxriS>BiLy!LNDW%=*#E%46d7Pm`@S8?P4f=$8P9+ImLTEB1iw z5NcMen``$YS_3{7hI9g#SHRosAoP~H19jZq{yv7yUU7&OK;^r11r6*js6I5}z82eQ zVBMjOLQben`$(hRKv)jtakff+fn@C_e2K9Um9r zxA%-`W%tDH=Ab#b%+P?kEJv`4=6L(SJfLn7%0=fhRsj5RU2-j&G8e#u+&+DD z1>M2KdqlQ!;3c)(CA)GE^1tLH(DTL!KZu)(f%gf0#nTO}rOb>HRhQ$t*vKEK)pRmr zeBxc8T#xo8RnbG!CJN<=lEgTYnTVkyQacUmlx_t`APB5<`JRQ0v&S?8ro0F2!`^SX}`e(W2`OdE`ar2sI56E5c zpT>KJ>j&_@`lKCr#x;xFh7>|mZ%GH$9&|~nJuh#;SW=i|pen1IS-{}G?7d7H^c}*T zecVBL^br&;8**hWToGaVn zIoafHADXvHEoRa`KQ|Go5vd>MH)KENcJ94bxy$!uRpuzN;b*54PK8ka!M*+70&W)l zRr@6-3C63M*NP1{7?y$K% zQ;CBpKYqRvg=P{{wKLcRuZb{OAPU# zmH^{rGIQtp7CqRW8rYt)08c0JklwVj@XE>T1BuS0_S-zEPl-yUx<&H@S3h-KAl;*VK?AMN&Aw1{0JhJZbuTj|1tNj)1owwNJ`{79gea1| z`ed?SA!ArB`Po=;V&2geFvr8=iPmCWUAGUXo6f5#`Cs(Ebk(ZPJdt#GxrGOuaWn5Wk3$6orP+bwn1Z?e=138bO=E(~G%0;8s z>Xvz2Z<0g>=`e()aoLUIc_y^#wVlHs{ZLk+KnS=SiWi#q+*4b+e2p3{nXx$CfZ>wJV=6cEJD)h25}Qv4 zJco`=*nY@2&W23<=F=j^^vi-hqAV&Sga}Ewt(DA~t5wbJR!XSGhh0u^UUECx2PR5}!-rBNEBOF*Th1!*2YxP3#9B--ylX$_6N zy3Rb_d6V%e# zzWRFoKwB8_y{IfW!+4W=+MUVsi4@C;wrEA%Ti1(Rq*LVJWZd6o; zL*He zJp_D>I(_S(#OULm`)s!(mv1TrnbwIE%YvMR$~qGmVdU$)LMW2O)2P|3?^yDSfvwLH zMG$uj5^*J^qOtp(;1!<<2c{v5XaW+61nLBT^x`4F@R3=~U_Klx&!Xw<;_QMA!Y*$Z zH^u;O)5Ct8XkrqtfGai}(zh3^K5LKqoWEeF7!n@=ql{nqu^)a*%L4^pz>3@ig5fOy zu=HR8*M<4`YCoJ*R&ixjx%Y_HYl?H$8NlS%I$MeBa5M$Hno$?!Augn-KyAT`&8BG| zCPMeO9db&x>sLN8H70=5%+v#Lzl#M7^q$$$<`?(kr`owi{9JePFNC*bt(EIL5`EW9$Mu)@I( zq8CQ|zOogsc?^X@lZirD>}eN)&oEjIA+Mk*vyJaK{8q2yu%tb8?@tfITHeL%^XO*# z8fsY*yGyG4JmQ;|kIuH)y>1t5V@RnYJl5xRS>zIZMS`N)n-UGQN?@-Ybc{RkxUN)s zLiWy(!cT{S#i_lz?}-onR)6KnBp{~@)H_9-y(v-37FEqI9no~wpT>&N0lhG^h0bmg* z8oFAgMr0*%H7ae1ItH&Q1m@J}n_k4R^i@g3h5~sjJ`7&GXSMVvg^2I1%ILS?82A>t zNDQJ?4aOLvmi-C=zusJ%MECb*3o?=YM%k`{`Yp}rn8SJXg4F^FL&MvAhvAk}wo9>f z_tJNZU5P4@ao0rVB<7uE<5HK^u8SDVcOv3IdZ^Vt$AUUWnurY7= zuts3w>0f%E1x4$Eo;+E#mT&^;Tz(FRjfMg}K!KL>IWxGc5qFc_@n&k^c{jrNJ*mMe!ypv%cCP(k$W2BtFRsJ=qMdnr zt^J=z$lSmsE`_T@PTz@m9yy#NZ%-PV`2N~759jHu-;>E%bZ!}kPaB7|I^$&(;`1ys zUipTf1&0+@fCB@>np0UTNr`xjfo8LIlsJPcYelmy9bVP}m%fSZSI%%TC`yIZREw1! zq9vDI9F^I!WswZYPd&cvgwEOVd%~(M&HDu?Fvy=~pU!P~L3ILKq_j&XN1sZF=<cOA2PldZIVxX+Wh<{&YBo@f4BCsUjNnjp!i-*JkLrVYheyY5c=-9A; zOKnrzZV5V9Ep)t{2{ajJi3t=soQSd=mYVN2`94ym^&hs-a{_F&CiN`_L#`ss?a9M3 z#})KGf<{r}VjI>O-J`)?FPVj-mOUjT&J+zjL;{cBFgWu0%seh0xJC!N6vC^wCu|8a zo@op2Q)7lDj^FFKC=NMSHV{D$g_1CQ|^| z=7{;cIldEq9@b^oF}(=b+6J6majg=bQTkF2C&eAdQyZ^0s>gD;gZt^Ht)DzO=bnR> zDw-G*uO;Yh?lDLJlO6Zjug(ZpMko0W67g*~ZC3HO%#>a}P_3|(comVhBxR!82XE+i zJX90S?Re-UwPDxTOH??NtTBatH^UaNq9fLR)W$c#aPhzeF&x&3D@!88If&*D?#V)2pXRUJ-mm&W0{{U&j}XkTfHZk z^7kq7D+kO7lKbPt#D}0JD}9-Q5u8<6HI9rQ>3)}iEBy)@ffq?B-!>2RfhHbw40UuAKKIXMf9i^`y2!`-o=Q#aNe|D@5A+1Sx*`+z0(b? z&t94e@uMVTK<^xTj+)xQM6w`D9dVt-32f zr=abx$|N|~7HFE_ubW1~nv#4>_4d%`xpsEQ+z$P>fe`D@g5~$EZ`oZo5+xH`IU`1 zb^2=f{XN$7Lr6h9;W^=+OwZ=P`dS9lH}+!g*{+7)1>210+7`R=QS@=cQcGIXg=C7VU{Ud|con1cDW8oP-T>y+ z`-C>n^>qY>?8EK&kKT=IuJmb1tYscrd8%xI0vHVnhrm^}!bYNB*Kv6uEAVy>asXco zqvV))CKmd4qx#oHR?@cl`52;mX%TVZB317X-iQ4s=#1LN8ZZ|)EawfHc)vO~ zlL8U{zAW1i{m#%ah!uDb%g< z2Nwt>mz6iz*M0MwKUV36AXRxVm7AH*D6BTbyaZ7 zK))F5Bc*L1KQQ1Y7O_r{((a?SvoWl_ehKHww+Yhl`i2H(Nd1`getAwgxZ?bUY(i9k z!^*9aldJk=Cg0XH8VkTS9;faC$63hE;Rgnst9f&tP>+mK~a6^v(g zysDG!ZXVtZBP>3dn&@{{}9q=hF9%YG=GDt&<^(`!YWmhLb5L5wgnfi zNxA%xoNFkJdtce;svb%3D6vLWZ35bC5!scyXeY`8O~59Ok&$~xq9ENi$ev|!jD?sU zO15xc>swd5fcqERwt?yvQ~s24Y9mm_134BAYJbfD3zsVbAEnmG15O-_ff|VK2FO@zT0O9EY}*Z_ zH5ta1{f2oZUB6_3ULtT4n`x5$RrKtffj7A^KHrA7#`D68-1-`D^%l>FhW?-{=2F)? zOB?Gk?Jw_Q4fKS6i?tZW)9K&lbaY&!KL%NQK%rehTO9ye!XwgHC&0|OoC_IZwGk;e zDg<`6oqp3L8@#|!sf(ykgIHC)RgeDmA2DzvT~uKR(uDXrOfv?q_J+*McGJC?b?~*Y z$Z{|>2|ESJOboyn~&_u1>cC+@PR)vl9yKIabDXBheSQum}}1@tGBkE5>e<#IFq6p_;Bt>(3{ z@K@Dhi7&LS^L>1C!BlD#$=Pwgowrqu;STiMu+Q^1)W7Ix- zhSJzc7l&;3p73Uuc-`wIq#+NTlblMj`nyADno3X)RtiBb6w4VxC>HfjKe1AbSn9Tx z*)?^QTJ;z)V)^xuom&L;*$N&%{z*``vU;0Vrm+T_X~U~$;M8+i7(p)%$8T^DcYA?S zKlR!jaL8Ma`oJ%N2-OUv=^eWc2Cs)2h<3L0&O8P+~9P>EKV zYr{6i{|Bg)H5rw!do|%*q!ws8_d#MmIh322;oaiWcb^dJ_lbqPu(2~pv)+{k5Nmr} zCNg}|qAy4lIn-ftFzDdc8`BS!M@EUOEaUwf!buhG(r>@0%=te;_%JVVoR^rKVS)>767dViXirvv7RZXZ@% zE3XUis`7Sb1L$}QqfSs7Y^K0bGw;!G;YDm@&9!Bd?*xBKfm5ae&I$(Z4>sq3@4Wfw z8i!)MIx`@6fwjB%8E%NN{lCQcVh+yKkQT((h$wt>xP^n*-e{jA7hLcZZk!PUL( z%XI4>S>@=DpwcM>=NQVglMzbokH1L5tUsk`A%~{qumml+Mfj}k*HCg4Q4m4~*t_}d z0nEc!tdd#!`Rs|n9uI5sDL%@Nn|k>J&EVoYEUALji^^|EqpX#jU~}~!p>ym{$JK$N zbXHfgY#A*xPaBJtc!`YJ>b#9UM6&GMj-jr?(dTzf42v$Dp>u5+J zwsKI&Cy>$jH4M9L3o@C_va$n2I5yXYr#RN$w~-TH%YX&T-Ni>|J>y;FrnKYlAQPTd zka6uhM!rBLbWK#gWUziF@!T<-pr^0+YUxD<&&|~sUymI0`$#yuGYyRB;DrTlISqVe z;Gyaw7`B3wJMQ!>hW7Z?&b3ThI*N-*xZ{tYLX zpbVp!32%Fxe7&eq1t;IFAa!RXX+qy{ojHr0=WJjt?R;oSbYyMj4fcjJ8pFqat+t@lZNmJ8ubuNLHp5S%@0~PqSHu+ zLAS1YL^av6$=O+;MqrF^Se*WsEvf0eUA~kge=qgxNKDVEok__t1NOb@U;0B!+yI8y zyMcDR?3^+xxB2_0`)p^jAOj^0eN-}EwdU1|5udUG8bKf8HWLU0uOxF^I#YDP$U)2B zBU`(ass+&drE#pLjbG^-HL0T`ep|YleP!TPT=KA4ynJ4a*oh&LGOTn*RR|>? zF>T9MV9@czu~hmD;u^t&YV{9s1;|+_hxvCTBy&3M6}LX!4O@V@sU%zO^Ea}CL}e%2`mPM!t~SYD7q z3|V9;QCE!2Hhy$Pi9K4@ki^j+%_~wuciZ^m;|Gimda})GtJW>aQ?|pfy7Yr zyborcXYXNBQ^UI-0>gK3vpBqYWCY9>ewCdE@@Tv-I^Ku-41RYwAbX0wP=~m?9m@W{ zhFt!*;j$Qfw3h-{wLptnY#P%|HA4*j_Sbw#rkeQ9??onRpv6U#SprO@v5EIw+a0)B zb%zGiUen!v?!f%RoZ3^g%z_zb~YQY{A11x;*=kF2jTDjiWTmY~S zRHU;w%_gy8YqSDIC ziF?{UP>UWs9$i8*dg!JMle%z`ERww1)+#2e{s(Kjd4&TZ<&(J6_|QsbKks>zntj1>z3!1C11w+*+p`y^U6IH4*i5@IcvMwt3PAw=4RcUSsI4zlO6 z4b0gW-U+K28WCiWqB$n0i#IAfwk_vD!(r$oeHA5*zc)0-st^QgJBfju=Q!f1=IEiL zdZ0mEGs5lPOAucJT3A4e>=(Z6(r;%NfWKXsJz?|Zy-w~ibK$&v6^ zfr|0^%*LRXNisCquw5S>l~fKr`e>D#zl4S=x8DTVFEIa^p66r<;Uo&hub5uRe7O?Oo6+$OA-}ee z+wd>Ea$!fPGTQ4BN4*mN2AjB5(@#L}`!%MF$DMZVp}pSG=zKa!V|aG6oUnUAen=i> zwS`TQ=v5-S%R_rD?n0;;(bRxI6?|dItJ**g%wG3DhX0*`N8SZVynMM2o5ep&fXUD1 zTa~NjpR4&~C*F+`v!I@|fcpX=@kif5y9({@!WTuo7xDsisS-;@L!!cl+L^GR5oHD0 zwk1MiI6>^mGV-W}pHaz@a3n0a7!LGtAR@tun@SHUSoDWN7F5_~3V^EG2xOW-j#o2Yo)!jt52M(F_PyODIi5$NILzOe$x%;+R z3WJp+2N7AV?L%f>_s+Z-?m+h5Vn7QLFOobxC}dx-R60i$iW32*tNLV z#tbUirqLJS1udwolEOY<<5|s&zu&uZb%SOXyiBuvhtc(G85dvjf1E+~9JUu#z~+^@a{=sy z0n()Su;}c7Eoc9R$?%C(Ere8(u%-jP7M#F0nB{9ATL0J9BL}_EHC^WrlQm4k(E;wH zA7E79fUK}pLHYn2RJe=_pLnhU4QT%egew`sYm#>U!m=z|#0~38AMThykVX==t#tUI z5;|4eW2(B})Ba^nm7q$2jBa_q(e#~7IzTMRUbmXYn$6dV@cM_5i!g(#4f_eXv7h#; z<6OKeVQQ&EFFuIirMqIvUdGXln58Um8hd6`yj=&Wb!wxh9!`!wD6!G!xp&CvlH+HW z7K>)3IFkvls>GCr-2Mi0_*v7S(ldO5CAPxAih2?dTFF11#RwEM(f~OMCP00>0w-Kc3KH}n1r6)tN8vxW{v(~zVD`r4_lx<3zr zB*I#MD;*=|ze{IIkgFa1MdwwuEEXP%^rL6*z3I4q z*1+%p5(Go2wj+y9Px5MkG~>C-JH_D(%E;mYWG*S#>1sGcMc;@Vit%MP=l_}C(s8|f%Uo<5;0#Pgu9N4Y56l=euZ)+ zh_{Vx_#i?Ggr)R6dWQ&I_w0|u{2OF4sd*mlWGx*% z$x?P|c0eXz`LEvc6o389d35ymM8Qd^X-{keP}iSWeB?mG0y@vVB_ya+oPL6eLJY>O zL_#O^Vo2>p7^V$dJ%HVHk!uS$K{~)6xn>n1 z1nNxl?2b9SK@i(I&M^Lh_ftKbwvsr6RjoyN>R7p=_mXEcAEl2v1!?661HZ-u76pq6ZwY2CQ_>u z+#C<5o*td%`|ja%${#IHDT)I&JIHRaL=pdf;2CghsvI%!M?q_Y8_JWN>6;b-KdHul zS^DDnRFJjRz^ol9yuEYWAs5IR{k=3~JO7!9X(pH{ChW5T{25>Yc?U?Y$~O(&U(U9` zltE3>=C>LvJ800CEf_=~v!4Tw6Que@pgkGMD<}cH$q@-0dycy>aS6u{AnEMw-X74| zRKk#Xt$l;bL4KCpp#xXZsq`mck8OYWUjjU2`EfWDU1s08pZMrKcY}8v>aSQt+l|ie zkT#ZY4Z(3jfcr@lq^%)bztQ5kZAo}dx5|}YQd&g*q1=1EKjrf>HztC6 zAcUkB|A=?J`A?u;2!Q%iAO1yn#|g&&V_bI`&HKJDzKthmF+{sr!O1#3qg?%TSM`$Y zL539|_HzuogB&g~5HCDp+$3hP3pLl4X&qC3NB8mNM|ucRN&(Yx@;S$hTOLW01>;CPM{*H)<>mYaA?6m^Z2c|;5ak`n6qGq zoP}cK(v$5ezg+8EWjVr4+Pk^Y6*{_K3`$b*HRYV;$0F<#TvVs!nw)sVvK%W!FMMVBhfL zNF2aQ(>0{G95b{9i|zw(>se~vdyr%bgyppSmduRxm(i1up({_Zd$27;~*+%L%+laDYsxzK(`w$eg z42Q%!^xF%!+o{J7-|6M;N9H`nh?4F{#E$>-sDxSAfMT&dlmQM;Kg8R>VARci6qI}E z1psLF(+sinf64)Og{WZHS@+9}&V&kzXGB}hfv7A%!oB&Zg(-77AAjlmEKF?369PQ; zPZ@};WEWBhdz3c$pE5L!&R}n^8uqpyyZ;C@wC-Dc`wp(V%;;_tBnq={1ACEp1QST$ za=_2^f4RU9|b(mIJeE_F2`)jGAc6xfSNVI zNvo7==5y>dLu7DmgD@)&k-&eb-!&z1*nDBr-ya|vy%M+>y;uTbw8Em)xI5OdDAE4G z@n3X-(s(_{)YUu?T!Y*?UW&1Jk{WW>q4sV;0LZgb28AH82!9ZWehk?Viy47f%y7so zx4-!uHV?pHUea|A+-suQG2l3B$Bv^~i2tUL;h)Wg`zYMw3a@DS|Em?fB!1uCH6`1y#hq~XEFD_Cvs&yI0 z9uSu4l7L`Hz3o&4uK*S0kT6x`NJs0!(I7|vYQz2Ok{U3JVK4GU1^5;NN7$k^?e?j8 z#SzK(U`7Gh8bI*UTlMjTMB3wSIVgierjQkq3*gRJJoTs}=tG2|tYjJ0t)=~z+Pf=f z_Ml=aZEwTkhm1v+&0pQRK`=0=mN)v`e3Wxvv{l2}XpuW=nD6agK_+7=O)fn6(7}mP-J4AdvD`|nO{bBR*(!3z-4_$WiUMIpxyj_9vvrC zg$LqD-;G?*7HS1X0U!&s-rt}Os@o5_y7(rB0MW;5YB|oHI}hWiS8|t>ljC7j$M98C zRFJfVWCiL$RFx8{mdu{(ik&5u9zF1$O+^9c-@3x&fmCKow%EtadxBY}FjvpDMNe>f z%J_S}3GVbvBTh^|$&U2v13AH=wuf09`9BKU#b%`ljBuK3%Z@Kgl|ZtcNIHnAhU3!4 zYG>-$Fz=2dz5{BmemVn`{>41^GpjSo=8Uro%To11IU95P#R1bRuxCcAoql=3;>F*b zbye%$*pQ!^PWa=@i5X4T7Xej8+v^1()n$hbK_M56OEN9x2RdsIze$SB3V3{F5>#*j z4m<$2|1Ej<4(`MY9-cvR!F&^%#fJv1z;Y_{_oVcw8P3pFM0(sEWg9nsPjtfw!4zm^ zpXxE(3f6JVsF&A;PI0YP9F2L!+VNm8G>0rRgMEOY{3rI{^yMl$5fCY8p75e&zmQ^-Y?#{H)nKCY{>YhmfnLg_1b7 ziLX&T>2g6+WNliP;>R5B_$j2}?;J8E;2Sk;yEoyoGQH?qbLY)?oWyYFbF0s3?PX~o zP4UJJ$yxmg^JvpmjF8J9%+m*A2wiKb_E8&O*lq&`brdH?tH4mo3QkRLjC(>*W`$w* zJ~^mPi~kW^#L8uAXxC@;Xo8E`G|D8yU52+N-lF{>Wh(V_j+#s@=FqZve5 zzy<97RJ}{upum4+IL7zsTqg+wok2@$kGOTAW&OSI)O# zOeL6ul4k;Us-bfcWbT42vrnzFcb~#fO?6x1$(E&ah1mMTM+{km2Cl~wsg*c9;5y*I zL0W$jPMNzm*KzrI>QZ;FRzh>7(GNL$;ujwJf%`S^EM_JU~>a?GKYN1*ofkf#f!{$l6Yz|7^v#K8f7yRqRcY}?DLN@8p2b+ zRIKVt4}KykNG`6MfX6;Mr#7cF?5a^Mj1rg54(m&?bRkeXfz{`}U?7EvKT=bvx8r)> zmWO5ql}=J)+8mJ430O?6cUcw8cHeH3pG%BuztUwWCo%z&1G(!%AroVv(WsBL0dS1W z=S!bB0~ZTplhBh4h@I5tFm+`yYy-F1#%cyV&x!y9VQm)2-q`<&yK@V6(T3H$!E~F+ z_R({-w&MlWeQp;ZFZ^_4=QGKqbbMkNULc7jvM4QS?ND-AqYmaVi4ktOC8SCug3Jf_P&j4mB6g^jZx$cU z>#=!Kw8Qhj;nj}*+!0@z|6f83>DF1CX1KDopIwJz`(~}~5#RY^`11qo!3R`Y+-EJH zZcld6-S+{Vtc>zkbuM+R2o+V;o@`Z>N1Lq1#D}6*D%2cKO!8K?rn<(hjdzFyRgFcJ zr4zgIs3=l;7TJSy2^YV88;IPtp-8z9Z1(ssyEU;vHD~dUR)eT-4+!@%yw#0M$#oRn zpS76oSmnQ&7SHjzg(50j^#$%Yimu{q;&{N~Na4Y!@#&76 zpv=ySZ^At(RoT)M9rm!NT6f&zo_A_mnQG5s6n7NA6j+)j5{TYHg## zbMR~puM!`-l znQ%v9@Gs-mP1=l2V(zuZI2#;~);$Lovd@v-_(ndbp7k!~cU!6QhKZI3%)(IaSsag_r^e4(tS>*Y?fRA$$-Qy7Fv29Jw{DG6XuXVw7 z)&$aiWO2;wtOmZX_9F3CQp{PhMyN^N9PeMMlS-L-SE~E;6yw(BzV={%bHhC!_2Bm* zvRg)`3~mHhO!eKKH@?TOiziFf(QI|+p3z?z+A#9HFR^vcn4VQL*YxYz1s0#%e1m*j ze(J`1&PgMgaW->KceE4>Z-q|wFj5}G<`FiRR7DMBELrdwo;uHf7hWMQN|AcFnlqR& z+k3b?n9Yd4ww|3Im2+aJuuu1wOH(y6+Y8#hMs~@cPdcR#y0e$sF zS?N`SQ_M1nU!o;0x;O_pn@P{)vS0c-kdVJ}S>~UEcF5jJxy`f1dN3$%ozfOX}YsOaTkwO9OU}0s-)Te`>$F5V&BH7pi(w>uC zE-QkzYlMp4HxBkgQ~XXfYqt`QVLkVr2rSHB3i2Kfs~V=by}M-F zxiRXw=;0y$halLj(>S?=%Qed-#X^kefGP?%13z}jHK$e<*YYPW%i`sMSrr-e&q&?~ zxdo5Nr%c@#^=LW*`)f!Y$7VfP2{iL;MM!3d?1+(R0HT?kGt5-l%7n|^k!@OrV&F_E+Z=J~NMtA>|~?->nc(nkvm z^XCJI?kQ+|w5b0>euF4uw!$Nq;c$Ct?0k?7(WWe3f!97O$F+fM0s z{29DU|B$1YxRfkyz*xw#Q@U$;(+ihHj?Lt6A|ei3UXByvR1&CMchb~>sW<2D&?asd zx*hdale5N(UJYF4bK1aZzHHy1x>PfHEa4X9E;XTap1}&wcXtLw!E#H}wB5&|Y^lOz zu#RW$Lu%K`u#j}118>|Mo?TXB$dKdFsL?rIv67(pQsupa`g@HILAC@c4vPNgd89lx ziOpY&Hvbl1&{TNFsqxR$K3Z0wni=-qEj>CuZ!=ujW3GuCl8dAM)=Kafjhw`{hBtJb zuKP#nc~pOE?!D}LJJNkeM7~}lhB<{f1($j{(63JUJx(Y`*#mx}D4&76fl5l8zY>S| zU;I>4hocPk;BZL?Q!iKl?flN@r zmmf{Gm$jg|S$6&Us?Xszme!w4;a-u99Bf+HQWLiQK4@8b3NPCQzPp-la6g);%Izp` zSTnppsz9h!sGq}MnkHjPtS99tRA^+VpdehUbj(dprtz5dw)n#KQ7DaGbq)~su{5LZ zTBLJBq~K1mbHfV%N@(*g>Z1-G-t=(4l_?cktXEsP3`o0LG&_zt+eP$^)C}>xVqc)L zRGJ5A54nu8hw-V1nmKFuxW%H9ck?V0^89S%FI+NtRvqYH!bZH&l>n`3)t*^fU2)lL zS2;7kHHtpvA!$Czjv(PVmi>U-aEna+pxUPNR+i=O3g;PaI6Iw`DbwKjUhhjUvFWg^GT;5Rn(Jm{OGb{cfUbtW$6GZoO}* z6`W)&j5!cr>Qo)G`MQPDTD*4hRdaY3^|J4l^IwZ3bm2*t6Bo#UDviu!A^WiT_daC( z9svyRW@Qtp7wc}&IHlVNZRkyh*+<}Oir&2Pdl+-nAO!e|44y%Y-XtWW`qr-iSK%gQ z&CP?Bj+^DYVrS1sgpMs$#*`WwMZYX=rz z9II}zJ9ai;(1JB?jv7BtQz_5B>U+$Re!si_#uzn?NOiU&T^r2ARpV4m%3G} z1B8?cZ;E?cKCJeW9s!G{E&yc(iu*#y2OyTpc zXcG>XMq866)2PVk@J0a{6l&Cd$cSf)SXyxXW_VKft#NLFq1VY?Qd7i>nf(&mTsrI= z=Na^G_4@%8*QuFT@8j&-U^v;fB(m7Lw|cD^xT8hm07YJAV{!_VL7me(#~NN$`Y zsCU^n=kUWICEl#fdauX;u!v#!Irr*6jsTa_tQN)7cRxQAqO{1%%AeHp@f2wc$RcYj z&~Nh+)b`3uTBEZhmg-h`+aiK$NvWYS6{8VdRqF%d0FauLAQ>3tzAYz0Za|G;0WUB1BJil9D%n zOSZeDpswn!xE&F%=)Nd7;x3Z^{FOgi@%Cw=v+~GE!8U3?E z8};*QEB-T#!$FD?%DG=zs_6F`lN$#4YLx3t|Fo{z8SkpOZPZ*YU#$yI#7pfelz%Wl z7aaR1eJe(1^6~Ic((fZ3Oz*38G9C;UjR3>9+0sEqxh=f!xFX3p{(Ho=>eOxZL9wk{ zmoT_Y=?5;;ysW$*G^1U+Seo`dBWFR-nk%JkCr{UF%Xac!3w2|0kTW5g#HOZX=fg)l zp%I5ur?2iWI%yj;goPbhy`Sk2iZDqsU7q5qt0^{V28ZR2bYMj7-l(s{EhhR%uZ0#Ip`d&P~0n0<$RjZe3oEsD4no1nea?QX$B{v*zVB=DQIKg z5Uwmmf2iOd4t9-xvMtd6c5#QCcVDWMJry1e!TRVWO3mdv+gwxux^?XmMyJws^AKQ9 zGMs&e(8XJdaGY!6`^(hHstW|{PFwH2YXz@#Ni~swt}z^V1h(8mvlL%a;k=uKG=QrM zhBFv!|K^ABFpC=5{2cC^^{spKJeCEw_Q8jbCgJPMlb*WkL(jFsY_to3K+?V^X<~P- zNbdyCCU}`0o@Z?!VlgO0TV2qrwt@Uq@3cc)fc z!!)^Cd-qg=YHsNJg%lY5~@*t`b%i02`JCml?m7c2)#XqTaF92ow1U@{v`0^{1^VL^1(#}a^@n? z{qbN4fRPpi)@Ef&h9kzq;LuBMJ4AQh=m$C$x8(caRU@f6#f=_vo+wYZmDv@(7tGll zoYuWlw>aN$&J+jbvtQakC(zNUW!TrQ^|bTIv}Sed#rz)O%;6dwo#LjuHkMG4O+fZ3 zZGf|zKgB2qbJkUmvySy)E@#Y1i~C+WV~eozE(nxNTFnD@4V5lF4r?5WKDW#(Gmvpv z8EdAm9J6ED$XUR8+y(b)@Opg5P2od3;6sXxoEo>aR;8s*DRl1=ZzKu&HisvH|5nF* ztl-DT@OY_+l*DQwv@j$uo@H|dPiq0(`0$D=eWV=1 z4b4W$Gj?DLUXnxCb88Dg7V78cA`U)irZW5{Zk-eC`X!-mN>bssn}%r2+SGrn?Izga z$!Rsc&c=DiMi^;dC0AxTgwF3;|FtMjPd%l}k8fd?ne)ea1}qW4=n9`Z|MR&QV7N|_ z_7y)}$N4@uU5hRwk<8i5qmMRnK|C)97Tc&!0vC&aB-1)bXb%2{ZHVeVwv-pxpiuJ z=+E9EnzU|vx=pJ3?05Xf<1$GIq;r&p&>E2TSe>NGo4_Z6RqFx60Ny!WuYyiXTkHMW zjMjA2#XD66+%tDkxQt@n7e3fFjv&5q0r8DkSg#zw0W*5)SUCYg7P$+T#$o;fhzbVW zKqGE7jzEpbxe7%UeFqU$W)x);)q#$w)4Sup`TRBWxtWPCd zK9f@uIBpyN@mG0)DkNRU{MGOu4=x8rz?jN$eWJM4TB_x=!d+sA=qo_cFJQ0<+dL~NbTA)Ek!EE(h9lFT`0*wn!KnWPbU>!(2&gN0ShyIdFt;{9)L631Nj zBTz#pH5emO;c;R(IPjX>ZfcHEvCu;ti&owcw-}s7jHamn{Jch$tu!~*tSC&o_=b|U z(?$kH3u;`)3QuQ0Hv|sT7eq6deyH-0W9Se)IXs1hoLH*Q8c9v*?w-L&x8NQ|_e?o9 zGva|0j%T{?I3ndge#u21^I32d@`2sexQfLn_8(u2$e^bK05Sc~xOd)|wmoqMVnViv zFz|;B;eHx2Gzc-_Wh`|&Vuz&o-|?{!aW_(8HZe(dO_3jnPXH)X*x+v&r-FIE$a!CQ zJPJug4PRpR4e1~XNb$B0nm>`fyb?YLGPU(0{rni;XRD@{`eK^ zG!l#b{0-~D^>>Hb(gC5(Rb^ zgpZC##&(`q)a<XT& z*DNnt3GaTa-I&y>`TlZoWvFmAVWzzny<2foyHBu^yIjF@Og`adP z_1Orc4w_d-_eNGnwghK&@lTGRAUj-EL0;M!vCv7YD7{>h|T%`7@QO#X-B5DtNg&hEsMr?nenp2#+i2_5Z(gFnXXNLwWi z0GCwq5EtOPJ96fI4(Mo_Pzi{KtPZEpbsW8yf%7l$lb;l zy;dnsvR7-r4Nd*jFQ085){{Z6_571mC?s-3sd5g!mDCPMDeN7d?B$;t^F+YZscY*fnA=2B04 zH;)L?NTk_r1owMbmOrPd)<20v`BgCkW9_k;}o?OQN^ z8ZG$0KZP>=pEQ2eu>VQpf71BBVi^dN_4b}q>^Gi1VHKaq)^nFAL_r|%c%rb%CGe$t zJvGQ%{OU~E{z`xC)oh_BL@!zV;I^>anppD+GIuPXb!C)Ud>t>G`WGWgozeC*+%sj* z%7nXSvTepCe(*4%DAq=A*u80JCQ%n~hnBPp7nK$mTWuV4T6>-hi643$(j1c~dae#N zCJHP*+KXNs?lkpWBx)~nV!rlRyI<#^EKC&J$F{%o_+FtQ@~TfBx~yKK+VhhP1OB@$ zAAxjEmz9l&C57z-dZ%7F1=k)Y%#7)d39KGAzP-emH!v{Zsy`WfiN|xHZp(JWm!#jT zoII~;Zg;ALpw~sGC&S}i!Jc>HxzVxPiypzXYom>Nx6}7LEJ(&Y#Vg&hDswfpP|jn+ z;f~m6*C(RnbRAZvn?n#GoDCbP)(V=U`iJKMWmQ2Ne#3FC&fCW`;anCZUdtoF4jmTO zLtoT)6(-YJ^Tv(~_C2C{ytck>ndD_0$>km!K=2L3YHuHP8g14oDpIy1S}k0x^K6b3j+S<~JijkZ z+T54y`~C5|8FZ`O!Mdd6!Wpsm8=qvGJ3y1ggJ%F~!+tlLv6BISd(C0jg!Vqr2T)3Q zb?$+i(SKCGw$;_!1Y(Bd?9k ztzZh>{=*QCuTH0<9kg5$UZ&~3%Sk+#ccHDq!+K1UuI)HdkaydC%2X++8Rl`FFAhy~ zcdkkY+w|hvyljc^M?drOLKRBUHUxy1Sh@q3SFK583n( z%WHS-Mn(0Wj7yM2>3-R@Shdbu=f?lMQBj5Ks8xNukVaT!(mOPyR=XQQ{o7L^-NWwY z?9M%zLXzK-E~IQYgFHVdt-s;YUI#VsJXmGoa5A<}HU9r7dk?oJv$bD%&vw+Yi-3SM zr56F|O<2)=)Uf3sNmh!<=L`XJ6?-aZoBrOF>0gOh4Q=2xVn=SqmlJ}rQ@fQi#gAF$MPyn%LRPmaHFXgOxKMc^yZkuWZM} zfO9LgkM_!Znq?>1Av=@!{sLcmQ;n1a8wK0=3i6}3kASU z8eiCQ$&3R8(v(zXF+?&F%d14x7>fF#lj@{n-#(Rq)VJl5bYh_&x8S{DkivG2DM+F` z|16u#y{^^#JSujhE<~=;nd5)BjXh_N6|f*sCO)M0lEGhP=Y zMl1e^iHa3lru?YxuorU52)3LFmF_GzxScRTyB-~O0jPpo@A%rT?VXFd2Y2poIodtJ z*K>E#b6A3W5o$LxZ%Z&70hBeQApk5juk4-kaRBSC1;;Ylq&OOqsxE(iWMe=`2BfV2 zQR)Zc!%OWT*1l67q1CUywP!_E@E|2_LKH(jvkFtO4|b87Y9UZB+%eJil92L|9Iv!8 zIPdd+R82tV-I1!zs`{eoU2rU(a`J~;UfD|idF5dXo$j5tvIz3%rjr{EJ?vcH0_Dg-Chf4>{>5O&PBto`G^S;@`kFeG(w)MZCj|RiM zJm6)XgVg8Jw%rPy@w-V&DCU`do|K%S6^7STmhbN=1G$?-SLf{wQHAYF+hW7d;Cr0A z6-4A7h!UsIMN73ovXAPwe?UxEi%Ri!B#=t7wRdy;b3 z)9PiHBV<3^^v#7RdWoj!xo>fLkHrlZW*}NeNf4`y>Tm>576vbRv;bBV_8reVK}ARUVMAb%+H=7+Pz#q4-oKd+;FMQ>siUqPpGqq6 zf8yd>2$BIGL=U2Ud^#Xa1S(&)RrvUpFdT)Z4-o%k<+JN1jEqa+08q%=BlS}Vi2H#Z zA6mY=Y9}~mMc$l((izFZ49B^avxZu9O*3l5x|TDC#;Mb~Ou(V4YFy>o=_3(RuX`w& zT^9OO|2S-uQQ?i?L>@6|;@`{`iJ@sy{QR=7Gw(dGn}o`y8tj~gV8cEsnYZjg zbB}|;25D0W`oUdBimZT_?{XS=l&=}q4Foi{8obPkm)Bx?tlfOnk2U~s7_oLyk{tME zVB8|lxVra`Q@wHy^~RT6I+Y*RH_LJeFTb8LNdM<2$UTs-^t6YG%i0toXT?GMO+fr} z+E&RE)*P(DYX1e#QQrI;4b?LoTCybX|M1%%CJ+QkNc6w9hBB`TBk5%xa5}kD>XVtI z54u5Tfmj(kQOzbNgb)$NUw-E`^C)AHhl{1-1g2Nu`byYU9JjCQD;7N_{!hotDUNEK zF*bCvPLSBF?Xw`Co-cDCR5N7GJO1x1346U_^Z-{U7Ba(u=Tc!;sYKE$V$L1>yj3?K z$&N@&1zR2$MXgZ2eXb*<*V#0sfymS8>o|IJqUBP!_KuAA+C z8dz4S@CWQ@uw}d0ns;uc>l2ZfKJ|&qGR;M%EDOlXjFHYu9(C6 z%7^K)lW!wq?(U_>GjRl=Aj(Eix-Fx<-U`cd%EQ=hvF@vu_B5<25dXRJ3#YqZX1~~> z?QsT&qD_DBPK`p+YTmLGpr;$pW4NKKGyxQsVe9@-!_eVOZUzK(kKl;3HkGbU^{vl0 z`9)Mjmwhnz6F??yTJ`J$8fk9+3EET5fs|2@vaiU(fkizhz7bDe&hm)R#d{R?Ka;2O z_*VJDuOWRsxS{EeX?*vY%ehC_Ema+NK0$umuq3!pXFR`y)5GOtzbg0q z&Gz+UrK96`?rYorL4LmU4fnmP*$E71uH>759|$4_y zcMP4KyG}|Yq-GjlRn&I^mFQNev?42{IM{5tW`Aj!DlH(1mhL6Gu~u2{nFyp}-r3Hi z%FGB;yA?cnPM_l!^x>alKzE~-YT39D4C=C4>WMXDcHl{&%RIF%hupKX%>99qiC52Q zUQ+Y}mFKT-alk>$a*8Ux+VcAU_A$hdBSq5{T3a`E<@G4L*_ACrCCuZx3fL9DG}W|2 zVOk%F53Ft#$785ES$S9}Wj|)vUL3iWDNNM*(r*m(1bTM~fUW2X=5h#x+9?&4R?oDxQ*qAa|Q4YX}=I&B1=c&9_6y>adZQbV{P_GY2vX#_5EvG#|slE<$ z|MDO5%N4%I`w*Y5T>juNPaHV<(u6dKk>1j2`<{%P&|`+oOtQL7(?FjiB>%l7VtiL2 zNuZG*(xJ*<31=9QWAjn&aG&E!YEKnW5^=@Qb#J#>ckrg7WU260*+WCeH&D|P*u5Q& zHoFGb>^CKR#AWQutPB5Vz@GvQuRS>Z(~^($T#%r>&=Y=w{y9QIyOrGUXo;;l>B&Y@ z>I05P`#UxyU%RY#AEd}s(Kg*Y>3q1B+v99xVGv3PJVg|~iLy6Ejxn6!EUC?C+z%(K z2Gp~ioXU0XqfpG){(I5SvLMJ|LxY?90;A_#oltfy7*rD`^r%`aXRZtXSWQd<`SbQ+ zSS7tzhMeadxI=cdb4pN8P|(}39S{taSq{#qMQw3MJfzu5bRis@e}C}mETnLhScr`9 zxy}2z2Pks;=cTFig?WhovPy9Z{|-a^ic*6g-+D-Pj{*9%|H}_I+?=qNkpF(tF&Rg= zHq7YmB6~6&KZCT5bF6K+6Ixqw^03CA!o8Ag@oBHdUUG7zZolRZ^t*xk?6b_L^nDpkUw>>OGh%Sn-U|#}S4$)0Q~v;(8s;g-ac))ozf=R3v}61LXQ-Cy z(WA8(04g8S8b8T@&Sz*v2Lm#WZujj6E3?`&#=OFgwj5{+C+}@Tfu0YNFQN){?{rUci zHjDCOlIjXL!W$O$g@I2bpd)P4@GI(*+-f!L#%=)zh+_?iV>pDhsSioaw?kvDt&u^e z{wvFm+JpU&gca(7_55m~UT?=P6B-08Qp!fB^z`xyzYlRDu`%BJCW19j|~#!|HW zaS1si=>g^q1Vr@tv+b}*r6exh^yJ)kp?2U=!T;AFhW$!g^b+xl4eF7A96I~^lqiphlfkdhO^r00EZZsIl3+ouua zE`bz}*oc0}PN$~!cMPxmFQjlX<@_m9SXmNbx#rpqoyM1{ZlH64s`Toij1AEAa=yF+ z+8}k9vbIuuErP0>S#bwA{*>*s)o<1V=^A+r(Qqp3)nM`e@ffzKbUFSrsvwPK$Wdb zE3=Q|ZMh9pcLV87g{%I2tD(>)1biKzz9#UOv%3Cu6m3vuHa*!wknjtO#zJ#G(pqCW zAN|Q1`u%!25w>qvKr^U*kpogE@&~X@-_OiTW@h+$Nx$#TuFqXAe;-@P zEHYf3UXls9Lp}y4`enzb8EEwQ^Wo&9c$oZ7dPr$Ql1v*HVuS*w2{pnjw} zp++?Ctl*{$*mc-;DUX&BgA?WL%76eJG`tr7XXmZ?c`pm}pWt2$(xyurV$9$NIvzhZSWEb+g!f ze@$%tSm_w`Y;APd%>MfX?;4g6XVJ38XjXqe$RxPuJY(omMn{)@(V=ab9aLO2!_>)s zbCGg`*+2dY;3qhllM#70mca)3RFLt}2itnv1(GzE7dT}T=T!m$g~n+^;I zw{8infd&eH18Q9xQUq}xQ!h3;!R=(#V41Fi3W76^@cWTQ&swkr(xHwzIpdvrwq@zbE4TI$=T1jr78g5B;FD5a(=yr(g^D&MUpC%0TVOe*#Z>9mnL6LF z$RBBZ3A5t&S#l_c(N{nA~(}tbSDQ2fh)Ec{Bi^tebQfSf(cwO7eKFvi~ zNXA-8DrN?Tx=v)3ALJ#KVg)hI1M!EO`SuN+; zjr(>S#Kfy$jQj?<^p^Sl{1leA{&=L7F@Js!ll-GFbHC=!FmetVuD=d)hma^OvMpLVobJ{nXe^p|*nn zJr^llCmC|?;7uEAdd>x>s_kNxj@7#FE(tO%xY+5R&9ko0_SJx!HFsu9XV7SDEnU_; z5u5Vs%dE->EB0^q9B4CuH~V!-_~a1tuXd^u%XVWOV%4wOWp%|Yv%~xVoDfi0fCYY* zh8^X7+Y6jR5&0sYu1_`q<76lAYt#yJ)n>kJ>bVNrm}Q%wz*a+kWK^JAJ4-CTu5akw zdM$q9*II+-jt;on6vY@5>m1mgtYYt&Lp4O*6r%OJi=T^j#&3gK-6IX|wT(p{i|5B| z1iHfOXD#_>;?$~qX7(0}d{D0Kz^TklBgzV}&4n82bSgSxCYd@7t z2l*9{UCnmw^X)3z3}jDGU#q@VMKzv3xXfo}ZSNK5y3EO^Q$L+Ah-5Ge=worFHJpaD z=FUV0T?Unq+i>D<&Mnq{Dw0#+|$L$axXuYrDbbw$>O z=%LMderrn?K5O@mHY)#=%Xth+((0GFUV zR%EcVS$n?@;tKcf?VVe!H|dJd?UD3iy4-x8pi2X$@tI}f1V7`-n;N8i^6sat+7$M;RtC?REZy4qt^_x$gXe1O6+pHM@cs}0$>Nz0`jka+x-IQ+^> z#OB+v>hI)OwO|=SxHAWbN_XuGwG-q!Y8PIW&{C=0u<;&owHBAqUOs7?@?=jVPg5d< zt5yicc+A-7>pRQF{MH=}ZdViC;`>ZwF8VaTf)Q2F7U)@ZZ|O0-Q}i}Lip!1>klBg8Hp*ZJVYyDB(P_LYvyWjw+tH*~cfZ5XjG|hDU8mj_B@{#1 zd-qtzX7LVH`N-PG7FLC=3bNg+lU)ciI|hlF-Y~73m(HRdIEL5_ZE#4N=x=d#t-s&od0 zT0ubgFYkRf2rUoxHEX}ch40|@b$4eS?6HNvDwK!Xa$^kxJc#Y$b|uQPQQtgZ+sC@g zB$9fN-?fO1OX98Xzl;5u5=*>a#On3MeB*QKlE8jk)lGdh{&rFKn(qMsLcOEP%w`7< zTi60VYFP9MU)g0fh?irvJIDNBoTX}5@rcZCl7B$IpCx3%&k{QSea}f5!JGyhW*?d*bk82SLE=-+>sM%h9_c&d#8ZpJT69oZvX4z6^t&tcWtHpbkN zG8*17PLOEZj`pHxmafKH*U$rTO~c zpp3L0;C(?d5uJhX*n^A#j&yTgNSx27Zx$W+1jdScQI*7Orf>VG?|DaNj$Xk9Q=i!Y znvaO2h!eUMl5hB3{GaJLqZA@tz4C^JqCZYL=0$5`K{BZ#>Z+z5y09q>ubk^QZSAXW zobeyE)FjAK$jeY}a1;eoGH0ssW>(&ke$Ww|))~Umb#JOLVq2}%+Jtj_^rS}hjEI*| z3GIg~K^a{!>a$qx7b-3WE4JB5zoMkfda^|Z>939r3qes_%VJflh>&9%XVfVF+89&X z(XlhXqkBFEd@PflFIl78&P*!9LauRP$rWg?CHmdet4?_b5ze&9Tv3-&$EeUJ+m zO~;8^8*$(Rx}GgY>-Jf~Ngtlu$TwDe9W0GyZQ0A?z}P%955nWRI$-JAf$p4Bt_*A-9RW zWlM%<&NDv}d)RW$bC>loy}cK#QUDS6aVdXxu|uB!CgG(^0B%uKyOQFNyC+t83osZr zlPfr@et;T`mYvE0ZuWsOdEksu`{vQZ+>n~cknONj16E;76N*mYE1F7}UsE8!7zF67 zRUYteHq#Umq9?3`$yMp>x-Wl4#b?a7lqo4ORYQiDjCqh<8AJWShLGTZtPS0G_3DBF ziJOD=JEWe+E(S-zj!_RaCxQK3Ek=vYw0TF!VgxX$NRqchobfP(aPILbe?!u>>n_zS z%;b}tSc?_61jp*dlzOnM^otEJY^ZlAr?UZT{@CVOXXV;EYqCxvb$MSKm8-fiW)#*R z8*3gu+_ZOho}!JjaR=8C`vU$zsh?lUs#@;r0bDY4`xCFhVE6`!^5EYa1(E!ux;a+g76I_`-!}R$E-uQ+Rds}933swODMiH3st!uLR#Fz@OgeDh3r9+uNEfufGO9EPJ zH)n_gV%B%oEqpf5Yju5mwh}GXs#axL%^;J1v|4omXC|{a89~1lQERr5uh09x$k7Wd z`fC8I+A@-G)0U`l3b^tw(|1+3S~@(Z0C-(s+a#~(q|PVZST%s)Tt`IqD7n}K-gsy( zCkc3*l`X3Sn&WfGT2vpC@ZExUrJ4(D*6GsEpQa7Z5 zHxV}GGFbA<6j5e*^eaB()V39y&a0w*pE2GYQwyTm zE)K+-QoR!AZR1$hTNz6DIFTnpz)&IFm+JD_tovr@1)8`9Lw1&3v26AE)f~j{NuCr!q$pdQ_=9ji*IqFS@gJS3Gt8is%?|A}C*AwVof;o%C^nLZP(zocV4?&CPbTy`$MkrFKV! z{3Hj~p2g($n+9#H_ON-Cw<~f+csOLfcoOCEO&bz;;RKW*;T*51xLweW93`CyM+5EZV%gjH7_=17e4Mtkng-5t5lEAh+m zD>-jfgQZa;k6$dn>99MmD#-6+fF)t+)$GiMoPTxFb5SFo<>WKQm`q318@0id?AhyY z$b*?2>fjv;Y|hSEDn|JVG{%5aw$>AOR)v)-DI;--UFICIHRczR3gX{~^=J4wSxH!0KT((;o8YHd%z#b=5+`A5r>u(~YB{ zJr+lrk>!e*!E%owyd>A0Ckm|_oVbekQ+F6)tW7!k1BU(xwGTVXa3w;XA3nnAb>9d> z&(#B+S;6A3PSIxBOpy(juY5T~#KJIqRKkneQDiF#d$Y@1aeVBA@(SY(mAkc=afFvN z*U`!|HQN3yOImi`kshgMaq4x`EoqNSeNvWwjxU-#;LaxA-wlotvd}yHPyw(-ci9t} zc#cpT76839Ijyj|yUXvCj&Tb)pyl}|pmT*MFyPlOVu;UOedmAY&yv}C|LKK#jq=@& zzt<#$`3ma*$Ta(#$XWNQQy^Ohw1TO(thG!f?V{oe7ej`=>8%2%J)6}8w{o$OwNAZD zct20;H{#_?UHFde>uvyeTl#oT8}F8-QYn(GF&_s@b1)$r%xY2Xfb(+v=>@bVy+qS9~|9v!vDSko4dvMvW9qS;k@d_8{HV69&ju9 zOMz{uZf3RgxOI-tGfnKSpOAx?)dz8^!9{$3SJDte75Di@nt9lpX-bL6Re%Y+A8M?4 zl*}%1hkt@$=EPe+UR18$I&n3r_jgQ@)7rhy{67^hRqH{5vvn&oN_%*RVzR)zW023` z5myPMmaBYOj4843emW!6+T=gPu+*9+LlkK3=RxMcyOjySA9i^ z_dqZoCT6tAL7I(?rm-$jtz_#@W~;-}GPGO$mn1n;S@f5~dImHkD$?Mpj+cemoND1Z zuTL)gYw)h^+*sZoNH6NK7yB%#q&$LqhomwDbc%d`6^Q6vz)h!S9!a-6);6Q@k; z?$fw^YlDF~y}(uobgd*}G8=J%lxiYL}{nbzgUv_b=;AFprwDz6Jt0yydBI+|NO@Sw4ja84XQL zVwR#EvJ8Pt+UB`}orF+}&|c9u{59Ip%*e zswx0=hDv%4GVmg_qO8E!8tlP2R-KzyS?#q9%xU^@%_FlchjgI-_ubX-dhv>$Cgo!V zBVN>016y_MJNSEM38!87TWs&F%ADmbw(e+&*juB;!vZz4-?pYU2g|O-`!^I2(p3-z zseh;Qtuk5&T1dQGsnZ&)wC`R1Fec{YxrC1WY)uVMPdKVt!zC+cjz=@ z_V=}7C|=6PepvvXi7R%hwRh`{99=HI8N22r-&W|;Ip1+O;2tzZJiVO9EmU|&)FSQx zzg?Aqbk1CtwCu@}y<&V1=?jcoN0*at+f_18tD z{je5e3I6O_ddZi;(xTyopm{LNXA}f!#sm6$h0%(R{2^sH1?PJ>%m|K&iSNk8M@*ndXAbCW3jR4o{9KGlF z?9i6)j;R@0Xp4-_JFdMKFu_q%MKMpy7(QTU{VLn~Ip|OIokt*a!9KdFgx0xO5%j~6 zK6O_EjN8-wb50P9(X)&YiTw4$QfLQ?$&~rwsO#1!+)Zv1JH(NED;yTYJdlbFo`cM=eoW2{Ulkn+#s(q?*2 zb-BNb>I~j=jjXZEI`wJqq=vBz%B9CGLmOZP55GtjW)48>w$7kGh-LZ6+;Olc`YORX zA9eV(OH%!Ed~dLp>fPs%_6RxgUt{H=nehJXTr8u!-kxwJN=62?-E|#1m^O#h64tKIXQgM#n#gY4?uC6Vax#D%W(2cIJBmQSE3Mp-tFePsTbpvqx{1b zSoPQNg?@RYZXEtRgKv)RRuftUnRB6oX!HiEjzNuO>dwZT#HHHWBKM*HN%teW^(B4Sq6R^t7 z{*r6dwA>^~rOq$gi3tjqI_8Qwwo?0qpjFtvMLLF6o9V%KV9+H?`)@AUNwQ~JvOAix zsWU>+sMzT=a|Qyed9eqacij#ND|P3E%B?{4!?bXp=180*0@|?z@?{#ZSxVJrD%)aO(coDxHo-$_UZ<&NGmu080h`9jFA94&d~_+ zrsh<02x?YQTQ^xxUMFBu?*6bP11VNY@Jb{P@(*ZjN?j|@NE>Gob%J4!OJvfW@iHvW zWZj|!xP!IT-pkQpI!;2wj-4@OXz`dH{B}#F_1U=jRaXNyQ`3w{wW@DQ8b|;F{naFA z#nE@A>{+_Yl3GbkSanbXyS&u)kg2Okx%8lwb3>Z)CRdvY(Z?v&aej2SQJ#kksqy*RqKd|d$;x1_qcgIi$o*gS# z=}sMPbQB9Y94W09ddm8Yl^pHiI_XYkRv10DDDS2F6zZ_RT2nNf-@H0M`Dg~nuE&7X zKmIV;1|`d6OZ-I;qGLgrA?grp&s&@E?|wpl8_PCub^zg{vXP(qSLheTb8rzrz}^R1 zIOTI+Vc?N?qO1^UYb;queMq5founbb!Ki2F!Dc&LD0=bg1YZ?&rWHg8JyyQdw+C==Vx)?yZvmHF7SPT$W)|9 z3`CA)bM%hZd*G$vTPU=br9puZ;fB2x*3?z^*+hqLS{#fl%s0x;cN%5RL%xA zT;0^9UZ8jGP9F@Z64>8yw4Yy5mmhOL@luEnn(Zx}!KL`F-0~qvmn> zZNP{$<)`DhZQ{7iBh-YG42KvT2oUe`II^3Ky*UBK2DPeofH{6d_;dgvLYE{;`9LAW7B}WS>cIgN zmO8N7eV2VL&~R@Ng`ZrkQD|Zp^yeQVzq zrPe|r#dNs4m1+$EuM>0GaA+~?(0zN1Vm$g|egyv| z5vTL`!W0QRckQN=>aDt|F}=fW{*_nQF9{Y#lkNn_s}x#n89J-Koi13}wth69R`YpT z7fhY*E~k2Csb_Y#Av6nn!&TSz@&j`I(>Ndy*Hl!4w^bf#%|SYQupA2!E#C!QSl0o7 zog-_KSyeJ*y&67x%9Z=%P)Ww<@7j&_|n(jDIG~YmRFT0Y%g=Bw!Q)tUL?o5Xw zt_k264ZnXo7RBn~FiVWCjXy3*XsrdT?T4Xo5yl4CPyLS&oKPALCnILB>R}#Ll)z(J zfWoI{$khY53^?%NuMf*B_Fk`$I&y>3MQcgS!wQ8#QaIeoMrVO>CCP2P`((7#g&j8Z zsF4XLsuDdz&>HUFLej;1^s?x-zU-1K>e+GP8~^zv(zvhr*#FR^&w_3wbfp{EA?lgO ziAVVromNxA!tQq1X55>(ikAXYi`}17^v72Orp$xVyVEaik7}$Px{6_k+i*1-F>9SQ zRv{ZR+n1)Mj^26ybw3FV$LosFHl~Y^kRTM;mI5YTTOxQKpWIx!9=po#@*bj{unqmjJI1ziGWmN{y?+P#QL?PM3$gq0yL|m%I`a@q`|~#=Q+&@-m%y9S z>?%`iMNt@N_DLV04aExEPeG}xT@@W)(s)*TXjzW>^N6b={cIu;lE zqlkCVc_8llD?`lX$rCGGh&1ss;;4s3W78$QpkVj4yp1U++buH%P_jzMF(0q5YRt^q zQxA)tq{*9Sbt*~zQAeWvd!Xdw4*>`R=(d6YWDe!N9lV9Et?dV(1+pt_VoPGu zp#O%A_Wt5R4l&;O`V5NXy)+1Pl0M9#tu`ZnW=gKOOnYBO|C>?Bw^vAm*B1{Uw+MQt zRthSBK%-1Cczr=CL-MGS;@1uJSM>T}-6f_cqXNVmfl8}?It0Nmi7e>dc20RlEE~>l>FszR0%aqf8TQGVZ&TssVD*!~c-dgn# zNDuR8&w3aHWO}GoA*)~ds04ZIp)F2Rbf7$l?9QBazC}Ia%P7g4(V1qo81w`bCutaN zMKpGG5j;|`g$3K45p#%3b`pwacMp4l(V$P>7WpRF*&Z@su=aN^Ww7HrJNt}XZo1fy$ zs}XS|lydDcqT!5s&}u~7H91#&2}`d(=rL(~mAhiX*clI89<>AYIHdZDzuzn3OQY># zgtqs`$=VZNq@z}%r_ZqeT|2WKSOVG2U}o*CpvS(*l3Dt^dq25(XD$Kx=j*cl!6+eh zSfJ@_?2(@nPV!OoFd!=7)HL~7+??a5c*3WAMl-n4U7$3o9koynUvC$TxzZI(+i^cC z${qVcCf^9AqQP_w07fO7EE<0H-IUlSwJ9*Ptfp%#iJisq-ozcQ0ZGg5a+i`S|1O(& ztT0-6mOMFa5#=@`-#1=i8S4GzElKj)WxBH~hCnkEq_y=tpT!I`O>m)1XxZZ5QH1XX z9c8H_K*dxz%ods{zoEFkPr5%lXsH!$3C%m`#b4R^bgHl7@?`8pYn9*mcRr2yY%5jO z>#~#?Pt!x&b{ov^gA{vw1L&7D&<4pi)@^RpXSMDfWLE&$2fu6}>xXTpYB+WDi7F(X z{Qvu+`_>r^1ksda0S))ZrCPA#z0idA%VPp-h4JIM?kx}Io>#Q)&TL;Z+PlKH2JfV< z&7+*@Ka5~-2Xqjrso;gmyZ9@R@>TXePb83T<;~_~P{p$F(0`aP5)4^^kUTDlK%oy; zJWIb0rrta?j{o^u9k*g1%3<=2cc`4x{N4Eo73$0iCHpPb9{JWg;+&l`^Od6jUPgfr ze-%bnpYMPKG;50krC1BI`r?QZ<%uRX$!yU^ zv59hDkT~G_CP{yZx!o`Zg66fCiMMK%Q(NEts=awV&=07|ucfKCxX`Q?CcD^l5|pk< zE?3!sYsODtunT$V&F}w!3AM4d?^E^GWzcmnG&I&7uafSd8*ds9+BiP6nHbh&@Pdj# zmaRB^4%QR48LNmrmnj@eR>xSKSF1Ara13WH2O0nk7`BIs3%u7D23j9PRdvm-uo9-I z`=8p_@n2EpLTCkm;jD7|GBr6lr!l{~JZ=do^ng|$Sf|(1vVKLKuG|DW^8u=(t-b?k zb0OL`Fgs+CN51+S01armbWiQ8(5g-2aoY>ZTeqC$TaA4pbYPXDh^d1@HQg zK@hzO`YjL=U-r#S6>d482THWT@~(?@_}^-%MgVs=FuU%LO>!kmzI<-KaHFCnk=!^`w0VW^ zsbSpt4Am`G49N|M{W?67gLC4qRAHgUQDOlOZIXv5RnW(2-5j#oo$2ai?zy_r)$Oga zVb%uN4&f?dAcI?q#@V=j8fj=lz_Uln5LH%P_Sp^stiV(Udp-&yJG{3$-#Hs=&CvBa z4LRe{S#Lct-2^9Kjg<0h*_M7mArEB|Bjk+`mKQi)SXKviY0XBwK+!PfE*9Had9vm8 zB+9-Qs@H%WnSdH}qX~GDfX5_)`rhFImglo1{jTFAzbi%uabo|8T2QBe;Vh>V5 z+xSGNWmY2AolfnsW0ys0#9p9Ybu>iJGfZWFkd6?rYjtk0BHN&vqzxU~AJR55tyx>eED^NM4s0Q5ZS3sVZ6YkS zQ2&6JN!DNBQ}EY?P_a?*rc=QZa}4cmt-|C{fv(nDxtV=7*%Llsw*X;zJM?Jd0MB?! zp|&#KAnr%CY+$Bvd+?&t?o6ZE>C-sK39vL^aAki{84?jfD@`G2Bhbh@BK`_wa)y#j zpVjT@@+gIKaLu^JRqB6=m=|t6iB%h^2qU!}7K(SNYiC;OyotRj#49a~D>Vnbh$e!w zL$csm!(fa?N4<4RJ5U*~f4~cgow2Zcrh{=wI8md+HTfD4<#CQSFIvSb8>9{Z&z2w^E9WPfX=>F0<;{eUiTnkdrNWp?e>y)GJ}9A zoUyY$Dx7}1!8S>vjPSVMxC`8eQ^jek+>{`gqqnBAL8QLz1d$JvpkM)jbIjc)CXA6~ zm^VFFK&9z3Vx=y>qe{M|m7h21{|MLg>c{TwSRjgM#5=_u5wtMs;vuWYhnn>{fiBBi70fXsj0V8i+P1fGpjxzWg)f(| z)&+_6c!_I}yRh(3<$QP9A&mo&HY65ApwYgT4ZJ2@j$6Q<3cPm=YK&40E!Ahk8MUO4cf|FR zbB3T7O$K>rd!wStHTT0k0_*~FMFZ>{YG<>=AsnBEQJY*-?hYn;Jr@J(E?^>zI8`Fo z6deALbTI8dwdI&cFJ97;SK!-)o6pnGPdTzQ)$8~QFF*a#IPGlttk$8+m1#=wmi3nz zq{&zb2^AU9#?9H2lKm^{+s?us%%wK+CL~7ZHR*65R8Ux0zC2$U;r9=$J#>`w2-VcOz3v^v?goWoiP|eI)5%BiV7) z73!$4n(XOuL@6bS}-1V=2@cP#y(#Uk_l9g^77eFb!& zh5LTo$C?4^&g($dgZR0u4S=Ctvsd0^OY;BQjDB)W-JN0oV>s~G$71Bor5tlV+T8X` z9_emmNU-y@z}^5Ib&Pw&JW>2;8p?h7W3#eRqwS}h|K9t|2UxmiTr@+kf`pZvhh^xf7Z215l~AtGl37( z>UdffsT5DU0qx`o?zcw{r|Y$(b$7K9Z?_6c(p81(Pa$OsioPbG4#$CjpEwQZyv2^B zjhJnQM`#sFbYH~ikZq+j)i%fYSkm9S)tp#-O0^VnH&tm1Gp_4h z1COHOcF}ucGSTGrL$ME8;Q+OFDcYOR(=1?_{B+3`g4Du};zcqAIWEjD7woa35@kOSXB>RRDxC8FR02^eA=OtDU@*4maNbmNj4+(|yAzG8HKbmY^dq%NX z8M~1=sBp&?dVmM+DvZ?qh!gkc&>)Y<`3XA(>T$&b8|c~2aO{eFU*{G)MIP8NM1o%k zXU^lm0|WV(%PnQN3b6T~qsTWO{t6x|K_w#@S-S_uU@$-_0kzrQJCs1JKvJ;Y?`NCX zbngK8N_Z@HUy%UmLP?r~7j_Yzb1a{v0AxX((3C$t4Q2}=$!ob^>0%DEJ;EQqQ$g!l z#QxEK>+10-Lg|K2F|IF|T@AkMd!gLr|CLcVCe2VTrndn% z*2A?b9amzo?o9NqU7+J^$PuYSr#+M)IlV}E>6sSl()rFE zoJC`6r|d7`{pTBi!HLK=X3fOT2-zLTPtpn(MV%yeCy%5$B50Dq2t~uX_!_VY)afnB z-@mIP7Ltc#R&aU12GnL^wo2-8t`ld7R11s;g&NN$KFP=B)mm&;-MG z&cwQ-2R>QP)od>!q=O>W$-f{LLkkzhFXD~@wc*)M!)P zJsm`@o{NsDOr9jPTUGXiJ;|&NR@Ni@rxd1UeGwsV?*{Nk5<@IIQc-G_TBGp6Ou4<=P^&OaqBx?~ZczfxsKlyuG|$pjl);k9;)O zf1QN&{+B*ejh%WEP&^0xjFQ~tVrzMQODH;Mw-v|BPLADQBY95luG!WG^Q&`R-s)E5 z5oxTl3$h9BUKB_0?>yc2sQp|>sCFQ!;MA||BGiXU< zu{9p|wF>22Z9-P1TGucFU$R>BcCHClQry>FIS!qcZR@Yd+HmaVdY;$W^$)Qmi?a7b zGsUcTCk@@SVJ3cZZrFi@YM}R1`<4{ruVWdvXc+SEYVpOtX_(V?l5{T_AV^Ezz;>uM z`LX?|SEq&FB$fE7{9PoE2oh5O;s3JHyeGy%5oY9^7LmA8W`REqaI8Upv8pf&Y? znRpxlvYbc8{6y;g3i`%@Pb?PKM6yWg~O+V>xYZWj;m8t48QmkmCsDrT>)UI7-lo`Xl0%drxctIc8N(IPok0 z6?G8m{-qV}9}FDXzOAZZ>aCW$TwpJou^wCvmM0n@Cki(K<{)U%N=;!czCl%Ds}Vox ztO(ondr$tvXlV&x5|3_kt`aCg)tzenmOuX5yjPfDR8hg{EW!vSCrM=vHq*8r_FAli zZy~`tG1pzhgO$!0JG zV>lCYstX7Mg?A>szEsF6xbi|2HmC1^s*)8OuXF}UUX3XcP#|UA$^mQTfAxeU^1JJf zsrj@5npi$p;r$+mCXykdeE2s{XHT8->2*IQa`K_EVFguG0vrFAoBX3764%?@_`HiS zE`L=ULivSZm!G!@nZKe?KkNzOLI=OkfQFnBBTxrcz`$LMJhiC}tjm2R;?^9F2&QWq z+33*uunwyb-%VGGCg(=m-OIB1N5PAb4gqvT0dn3=MqFkav9(|yeB)nSqzHQ;*a&x<_ZG*|=ifhMaimel`T5>yZ}i7`PQszkt)h6O^WXQ9{R zz~}7)FLO<6oWDi#fo1|vta9o*!(gP(#aOq?GmI46}{}0aII@oRQ89zU|9!y*e;o-O^;VWw@-%U{w@0+Gh2Ib zJM6xgAwU>`I8gVmCSuT^rdX#zta28MJ$T0iY~kCLRU_$dgAvq>2RWtl_jpu@ZB6W2 zd$B!f#Wz#Bo-W!vaaljIz_4U)qLn;eJC9!0!`>opJ~k*x^ydnSU^yedVJehS85uLN zj^%7A!#K~??TXGsY&GeaJe>DCwGJpgjNFx!Gj#%(4kchhjKY?o=}YB^$^4&&D&L>O zY&qHW{$lspkXmhG9=Mc|*Y{v@koTBZNCKe-sb~J!BdFcPC=)VyZb0OfN^<*iLCrh{ zv)q=+vd-qbVwotNOkkd-F(_%L2Mb;ddFYH;NnCYh|91t6tpqzY(4-gydk@wSE5Up_ zgi#=Ma+U>P0zT{=P+^j%yLGXL*YGo0LuPG@t7Lm*W&rXx$geBSWFZi+X8KzS>#o+ahPG?dd=Cxi}=YAW}%`l z{Bte9v|}AiXWLlyfZ!@u)9(#b$tV2LJ9j=?#hlPz^cvREUHfJqWtlnonTbr_$FY38 zkFb<~FUA=<@Q|k$qOSF)={J|pry7-FtO}VozhNI{1R75J9I#&N=+v2_KDhp`@~5B> zTjlCOJ(s1ZP+zGH=eekr6vt?!+%`?*qO+`Ogd(Q*P;vUT%J=3jyV0!)2zvaR&w|N2O4OGg8 zwuVLo%5P&q@h9C9xam<4@SK}s2fhLE1B6(5Cuyp3-K(WBh8~az7GNrs6HkqE}KWZ0txrU!QTO{;{vgy^Ay30Xe!Ul1 zX%gIn9d=zgs40Ck%%Z~U9M-CUrhbq{VQlZ%k}~yr@>8O1op@l>;91_W-~g%qlCgb~ z{~ROE^{MzOfcywmd-Hqh>*+xS16a?h&fpgSPw8@^Q&9qch^6o+Z5L3bYGp8MKWi=Z zda@^NrBoP$s5@O<>-V7WiNRKF)Xx8+fCUG8$(u=lwvixua?@QnLC_r4IRwhBszS7z zY|7Gl*`HGf7HWWP)&p{D>JHVDWigQ~S{nPEgL9zP1Ofa3qPARfDP&)61tuoYXR^-m z#73?cb}=3>=ctnoo4ZrYe0lozV(iuaPE%7gIl1vxy7YeyzUo6`XctrEzLcRfK&$&e z%i)kyWm(9pMN^NQ>By^2j8868le<1J3@IzxN_ZItzRo^&L`h6CmSgk&_s?5|u6 z(ynG}RO6W`{AN|sKh)U}q*trUUA~2r+m|I++S~YQwl=_?Yxq{8?%710KUfM4h)d{F zmg{;^xfPK=Kk!P_t9%WS^5vB;cgwp`Pju42WW%CTi==YA_NU2D#Wh#-A|l|p80Od~ zWT2h|CE^n8!sbM>D#xQmpy0IvIyz*AEi??C+jU?X=tX6Lh<{w4ahvtF%jdiKR)wiA zn(sU*yyPR4Y*?i?5MCfZ_)4(1Y;<}vPVp-_&D7vgQJ{YFq?vK_>kpf*t)+49R$y==pHHlTO`~h=Y4^3BkW;6l? zb-u`h6pig_It<$uTb$*Q)0b2uPX{Z38Anl~WmPrHCt0*v|ppisxQk2n6Dlqs^O?SX@`14W{UQ z%Q&2kcRd-1WCxEv9wG2wxJ$Mb@Fg-YQ+q1!CgPHXuP~&F z*3nM@yBV}SlzL4AB4gWsVhMG53Rh3aIuA=nmM50|BJri&0IsC^yhq?x%kTk({OLWL zKkP&Yntje-|2y=pTAP6jq;6{be0#>RH*PCk8s&%}fC{_}P@rlp%qNQh6^MK#vg9MF zFJV(T8Nq~wpcV$b@!MIoqS+1j$Q)AYc1{2)WqcOBD9bP<&S!P?yalkk!03^>!um1~ z+22)1){M=K6B|jCtl}kkv1$IOTZrb9zu(oSPZ77 zc*6>4PDRTew)7V zp@ug(qb@f0zcnd>d%$4cy1l(23)DA8e~x*3@fC=w5xBjk>-E_lUjf)6j@iuQu%zNj z2UzJ1GUc|m$meqsIa>lyf*Mf1X)I}|3Ya17;9mitn{W|mxjKQE_n_)zG;r$by`0gt z?KEpFaVt{^UzUY}p<+@(^+vrpsdR(wlQqZ_5A#XiPiuSGbCFP53!l0}7^FL$fr-QX zO<6Cp9st`CICHt@TW40PU6%puNU+Wud#q|nNVZ=;8(=)mTv%HaE~Loj1#k(~V6k}~ z>pHZ88{%y8MAQ`lbm1lgr3Z5q$z!PfUEKUdgf=sk_Zr3c5*MOc%&A$1io4ntWUYGl z$L3^&iF_*e5mhbCi+e|E1R8OX*0?pw97usuTA-!(>lt zBl59Dk_sm~i(j+U@S@1ynBM}oo(T;0$tlQkBGQuy_0h`ME zZh6Xs(FRM_fOv-Mfjftdbe3~+1j7T4`xWd#_Z&)%iW;EtevP+iZRR4#%ODeivahbG z>~HQ#3hDVOI4XFb5+=!_>MtN_-_Q4f>-_GLNc*k1?7i{bD)_lxV!H9?sk9cIxh-4M zW&7x67C-a%uwiV|6;^M)G}(NA;-nfq(mZT1f9n2OP6L#T^Mp26eYo<9ytD64(c)vA za8SeZ`nBPrRQ1)K#O4m8Y5bI|&r{pVEAeD{6}VO8lhsj# ze$??B!dUP`2;!l6>yq)eEQQ=@1}EF@*ooPVsh@(?!P-Lb=eogO$L zm2s_913(?V?dPPIp{I?{nk$6d`#kN}*ZKf81wdMeTgO7x`&M?Gtw(W(n^M--ai z3Rsk+X<^{L&-i-S#Ljjyd=G=LrgZlH$(kJoygq`FK!V-g>(X9=5g7t)H%7YxfXqbv!N=({N z+?V0H2WOw?z@E)!tt1nDLM z@e*uOt|1^Q? z$rfss7@pFB2vwhS)(bdevI+yT^38{&M`_NOOHxSD95>JmI+{{t(v$Dx2)6a{ zbJ1M(PA#QR$14Ajnldt#!nVpTL(+n*ehf=`d!@!RsnwDtu_3J1$8#Fv@aJ+-tATU@ zAq4H`0Z%lJ_c02fIJ&T_E1X%F9TJU&w3hv?`)Ig^!uO~k@!+zZ(-f~Ss4cryu}6TfLzY&grJf6JFZ~m=p#`cKD@cvP z7trj+1Ha!^A8`n&HWkR+43i}qTh|c7=$F8o&F&rzA-`tG5149qRI`Qmbc-{S{WCFo z4V&)z)_aGLk!IKOEAwz3D@AQ5F5X{Neiv~D;H{tXOg4t#SW<(8cS6Kal&=S%jRbWl z?u?&-*q{uKLw(@IwS1>^2P|%aOjgb#cb+!7-pXWXC9pK;NsxOKysf;d(LPEF3l)Aw6dE#pD0)>^v^aVM_KRY2bL;DlhtPPAGB?5R26 zJ3!37vq?Q3Ah(LD^{m>9+@2NXShyX8&){2Q&`L+D>8DZ)(!!0#@Z-+K6)<{90Y)z` z>g6z923ikrtzhME2ii^vm+m-(CNAssdMp9cx_I$9SQJobIUWx&LHZFcMAlB+KOm#$ z&oF{I7Xm3fp+?&kb|S7r=AJF7($G~7uWNLQutjl(E+wYf#-WXD`y=p=?(6~)-sCqH za|pqkyNlyWu`gSkfQ?0I2Qn7D%)3gDiCSkrzY526tL#ssC$F$|tI6)lZUw8i!UR&>~wq%FLnEzM6)#$NCA_xi05nkRH4CT6jIXKU6tiT$K!fI+%%+M_x+jvwUw@ zx~8&CKse<)X?UC9%gM*|YL5_ErSovUp=DU}*4oa}e6~kbm|PTLQPhT=^x?=8M%s(Y ztpUxqiv!!T-}9(kUPUq<0%Tkl5v%KEP1O)Enf82p62LXCB5rF-fWXm!cb~kIXKn@m zuzi@d28UbDhE;He!qp92`JODrk{3AyO&qT7FBWR4)vU}N>$o~yK=GO4Ye6az9@Z7S zwq-*K)7f+z8nq!M6b<+I3T<=NcHUdVkB^`)&}_m%e@?;~pU*1uHs!Ms7(QFC+8l>R zk-bCPxWLJr^*&U?K%WQY2;PT-FFQ@T=ZyP7dL1c%ek-u0lzw0E3o@-6n4h%8k;- zu{0a0aC}<#Tx)CgDPAXa2C2(`<(?TLx~AfGc0!b1l(Z<7Hb(7HJOeIliuU28Nq;ny zkZLIz4Fcv3r2>;kR00Gcg7V-%brZzO`wfBZRFzBVH9H`BDG7@SS`KVgX$_C?32q8> z60sA>$1}lmDO6{E?~n`HBIIbCUIGl9{g?5FGHn2!rEQQqwwQ4rzeWOMzZQOJ{CKB{ zXH}Rmg0KkGftx6#cK~QwguuWpm0)ei;fhY@-oQ5Xd&lrY=N_-glrJgp0Gu|o7%e|_ zkIEDj;@qv6*7>)+U&NA-9Jcd-^C6A93NpD>Rd_iw`2}o z;Tu5{mHCd9D)19bgsf)YslD+9$Qo|cXJ|KN?iYfX5rd;fHPAg4Om04e7C<`>z)geU zyG9VnJ=FA-7Jg2qxBqsMFT|&bp*x!Q1sb0hM?9Q0&?)3kjokqn>+syD_s=0ucKimg ztm52V6+?N85@TZm5ZnX2zLNgemOE){F}oc zF9avDM*eF2GB5@FdN=yfTCCG1JB3fxS9CldZ_2H-Cr@48I2yJtn!No;*kY&Yn38rB zdnMg1KYOP-q^w1BzG~&TuD@m(DFs(<)f4L2t3Rf4K8uT0IoooOO2h7#3c$X11RBvv zgS{U>SJTwA3ACU4S&DQx&z|f^gCceNAj0gi$hhu%kssE9Z%zP1zUfIN02U1ZkY&n) zgI9M!Ubrtl{pcMuk<^Xt%N$@oORIAhBE;x+W(?x91Z#oxBi!k!)26H4CyY7pYB^p< zM7ch1JTX2otC1^TIT#44IIWQ#a&*hy9+@6E-DHIT8t0B#yg{ZdB=1@kMlQiElA{6;EWV0tPHw_Kdp8`HN8l)K zUfil4c)$tp7@9ClYfNYgbfm;e6tv26J}87d1+%3P zqD5a|^2*-qa-Y}_2Oh``i08-n3d#Zd((i|xFG4y%y5%6Cy$m2h$eW+}Y5@i*eajWw#6w-0P}>SKKMEI|3lO@FO_iyp$( zIlsH{*#MQMIS!g=GjB|vDJ~^|kcij9`ekDS`{`NR`>9thn7X*|+b#x9e*yGwYI`yz zo}3mz5P&T;Dt2niFDOz!1b%W|;7y7rG=m`M>b2HgZG`PpU^+k{%P6-VFARYBCQSk} z3$XY2_8LZzJ&YdC%Wvz)1S}W2$y98t)=>RDT73kGesg+s~8^v9cV z=&eRZa0!sB#m7-37N=Du17|3Yoc|iaOvE~XU^#AGL5VeP7w{-=5V)_PW=4ndf$Rsj z*jzfZ9T$k3_1xSuD$v!TD+(8WzC!|LlC9k~oC?o<&OD78+1suM-2(+>i%9=Q*Cr($w29p7mnIA81C}lPP(TT}$mQEk`g+?t4 z@)-x$Dm+U)_MHKrK&C6oOL?`j^8F#kdQu5~wPDBrBe;_#aIT^-EkWpzbJvlfP%}e< zIr}<04$9LXSnOWv4Kqyc0KtNgnT}i;ASS2qUuc&>eRvYMp@lG}MysPG(fO^wq~?Hil5$+Y&nH-Smbd69&{uhYS%&;GdsRO8+otJ$IJqB~51pl9I6NsA4B5Eaz%2w|B%FCkdL53}6rIDioRd2zM1vd<=DM+%YRXV!(* zV1TZA10{G;OW=f5;-(~tPSyOi@UcrMP#dt=v&kap0uA(=(ro;oG)(s(EXvJbVqOUC z99rH{K#7e73@9fZs-E+Gw{(#BH)x`^`9Q*im_C*T1bt0e=ANYv+f$?y=B?!;1Ce2- z80PoZJl$wl;SPoy!(l_c+=WM3BOKxRsLzOo6I_qB&VsCE@5Dg47Rw)`F$RM2S^MBj1&N{$lo{L?1yJlh^>6pZ^Q zZ_yQCsGq(+W>p=82p%Tnb=<-_o-byvTM$k3zW}NMNDMvq1C9ah;(6Ctu_6k^QeeJ$ zgl43sz^>w!q!CbAduU(-ny`cp3ydAR_u)!5Udb*(c~tR3l_B^OM24{$Ob-zqN!>(= zC#aKaSC@Yw5FdOnFA1V*HB-m?pm54(x&Hji(wM}}EvgHbmEYhKc+ zrzkC}5Z7gRX!wgiaGn-_L5XDmZ-Tc$bqbANpWt@&d;^?kx&l-p_D5vKU_X@HPVM@f+1H?D1Q6F*y-B<1u^7o#cAA~cxDKav`T@Rf}>9doL0!EXYXju ztm!Sd&pE;qyPMgf3czmEn6^NDk}7=@5?TT3rCg1%wmALbl%;X)W7FxRESyzBQq69L zULXMr6(v?w3b{H;um^|cn_hWA)&WXhafXP&c7u+>J>PWTBQSs1`g*y2F`9D%DBB@y zwgts!;bhrLlM({`5FsdljhWmp8ZXo;`hxHo=d7Tw7c&%7o=N}g@~LyKMsa#v{X~7^ zg<}ULgUM+hcZfA_$y*Cz6wDHU>I#kUL<~lL1xX>#ms6?m1{9P5DrY*GCO|-UK`c?k z54|HOQy0J6z2GXMV`BS{du@d#?ycU^w}5U>3>FFnKo3$ntS^Zt;ewz0JV8JO3OG9s z>!X5cmvvZv{=+vn1F&n%W)G`3Eb1ro&RTm1J;5jjCfE*flc^R#k)EbpTOu2yo-Ii} zE6TtF#P6&3HsM%c2WA}5hyDeT zCP>ClaFg?(iGs!B#fg#kYiN%|^1hM7z`^kVgYOJ@#y^%qQwtDqCbY+KGSYI__OPuV z*pQ_e8hl!Cd3gU~TgRp^Un?Orp`2^$SDh4p4d`@PG9h+Z9cQQc)7{k(w@TYBa>P zX%JC@nwQKw8YZi|O7F*EtZ|+Qz7Q$KY-EGj+8r&AQ(_7obGA?pLTd)P_ixwL+tw|> zig#PFih6iLgq0Mze!=jGLUeR_`nH(H8d5)XUxyGeQ(L?bKv+)Z7LSMT49N;q6dmV1 zM+fx6&#oo{P98ASe*}n1q4*WrU4sf(!|+Ef$kOv-5Ob%0e7|;(WY_P|zRsiz-M{(| zaUEWmB15$m7!r~K&>g>n!O)@V?qOmu!3)uxK}+TNHC7v}w{^jARYcS)ZZPgO?tX&b z4L{smF5se;T02*CDA>WoT9?HjE#_I3}S7%~qTh`j;`FO=F5 zpYQIrr0H=NmF8A>*lTR~xSXki56cYWEI6^5S0p6d;)QQR;(QN?^Tkip+aJ#xB8Dwq zj|hN>x5B@@5Nhp4@CDN_;7*iP8v#UTi{*FN&%?Syz|idYmPL+xojh-K-wVUkbAfmS z=x2IX$^nASpms`LUY)?=++E~+I$(UV$~~JcKPF)_N7~!&&*)?$4@5WMgR97GFrd#b zkDL*he1SabhVN`b{LhCKo}KtB>JZH{3d#qsf3bgel~Q;B%XBPMQa(y*@jc))BP8@Z zXhcl#tDI2Au)*Eb&Sn7U_^L4!ADtbnUbL`eg^${aTOGH$O6Ctw6v~IRO(6nAXux+g z%M2Vbd?~yIkp6LcWE5=(h1`L|x6_w6M)ID(i7(#dW#A9EsUKg6%e`8ClfvR!ztPnK zh_2mQ)wv0*1;yvH!Kt~>LZN2DMzNKyRC91b(^=fau zSr=>J@or}!0rMhE$Gu~yv6lUU?Hc8`*0}*dP5at}5^zgCvduI^d1(QT;q*<)=P{4% zc2(rQp=wz5DNHKKc8E2}8IfVEmf;>=A_mUzVJy8+%j zpFZdB-ybDq0HR!neqQ!qCQnB_z!&C){w0u>{xS+mqKwzgi~mGeq?A3fG$@8YdG;6~pTo+m{uFamMv)1bAh|ThFhle3)HU$bcmI8Kk6J`! zwOmdYa$GaB-k|w3PG2*BJu{Zc$wA@L^`D$?u@WV(;YRcn=3>EZ zY!{l=OPMABM!f0JIjc7zWL~rKL*bgq&;J4otHpX(V{5hkrnrd3l=}x3#!N9H%|KL4 zC9j1cTK)Hh;xs5f`SN#a#hX2`>^mmwYdq?dugFwrMCNbMq-QaN%tz`C^qfgtee~Zl z`5R$61tX99qD2=0bu&2b#@22bm~tROvL8@G{7Tc>&!BmoHD3Ui>bIXIVe|uqH%9Rgv2%jjvi#j7TM`5q!vCc z!#V?4NPG^aMEO{qoEEY)k3RiG>kOU^E{HIzWni_+CJ}G|5Y()~r!~-DxY0m27mLdP z4kixG$LlBh)%G!Vg;uXvoO;-LB0?#fW|%?56I1E&8P}rEEpU%dd>{Y(`5ki((DUt8 zEM;hvu+3xA;5TT1t_A2vr@h53e`G8E-zLN088{_o#SE+noV+K0{WAFIF#Tw~g{C8k zt5hsMIeRqd?2Q5=^}6=3C469d8wz`zg5>Y!$p@mzu~XW9BDgF}hTGM#)Cz%e(tkbH zykb2RJa%s|^M8A6aJ++k7Z!DMf^vMsP37kC%w@o=0Q7A>HWQE8LtEiep!83q?lL5v z?;d21mfCwNq{5)vdLqZ|md6{a7HB|)mGBddMfD9ybUyAHi>0%hZP<>;3jg+WRlsC& z$NuoClftE&Ke55T5kl7jNgAZse@dEKrI8enH1PE@|BIyQfA%V4*2%!~3!Q|tHbdrt z3O%H5Ku$Ab!)XOu-A+^s6Tu9O1NpK9`D~I zJV+wknN;%q@o70J*kAq3=;$lr6b7x3V#|9(b$qogp*3&O&CCXsRlU<%ra5q@dUb)L z)hEvs@Tq>Lv)V&MQFhR4v{&;ozAL9w3i6~80a+1 zA2^jqx4MyoO;I}U9?du4NR{7*d}iO>?;YwR4G^Pj0%OJ;pP^31C~~8j*tMo(H4N{u zBoTB>*aeK9~AOj5}ZUZP|Zze2xf67YR! zk+R_%7S9JBqd!HdmnMPK2X=qnXevP(CmXP_|8Gxadhwo4yTY+^6SBh2ln(Px4+4NZ z_NbLP0|V6l{N?e*(UIrivOO5{h2G$i)DQ6sHxsx$<^k2Nr+^wRjy`}oXv9Y_^)buM zJ}zLs`W{QHUJViduM__k!aTb5^GAFOk?12cwEO%G2rq&qdW`QwFav#H32%*9tT5Zn zon}1FMI+=bu@jAv)`(U@`9FNd*wad*PM<(LR(BDFRG)nZ;*gRm8(k2u8c_Xqaq|*& zKe_ofQC>`?#k_GMW5vMr>EU05BL8uBFYoL2z`A+&9m&Cg$0((viv}mhtos*d5Xc2# z5-6MEJ@@_oW`85xke0*ew)IGOLfw!TaZ}C)gaRo(;e)pb-dYHuq}0#O8y!54(F8F7 z&_Hr*-?UK8oKWa852MUQ4)-18$Yx$02rHmxH{S?>#aZKR+O)m?*N5B)bx8KNueQ-$ z+kD+FNA=HJlWSehk)nmK+_VM3(hYl6$|2uq@Sq2M$-Dw&*^eR;`m$l|zO%Re68ygp zK#ThquS5Z9Zspdhw8=XWX2H5J(0^*es00JhUAKKTxTWmTh(bgt_5BXdJJ^rQt|KFLS8r7+M#bUg}y25$8Wkd0f3Z2)S943 z9iIA+b8Aes0)h>UkOthc7ucIQ$~V5G!FNT`(%15Td87<9ho}(WT@T!q#Fx}GQdQV* zdrLhac4FYtY0@Y14`TQGCSFR)JV>fOKHQDdq(}>xj#>l3o9gn%M)n*HmTU!++n-oZ2G_l zfKWYlN$#mCT@Af9*>Tej`b0#Bkce1;lHhW0A_+*%C1gyLhNK&9{DeGy(C0A|4KK(z z-yyD|?0Jt>zwhJyH(9nZ!3SRc&>us42USgTS%4^gA(`1J3Svu~9W-z_@2~6rfV6IW0A? zYq;dilvAEEJActnd|G2z5ytvRjDXwrJUEK*z=0TjsoF+Q+Bf|Ah?fn<4cwTWq5Hy> z^=OD&!rOX;$MA6_4yO7FVB=CFb|TvRUQtI#(rj6M*CjuK0e9^cj%$W ziw9XW;NLkmCgC>SI62gE$$yh`re5G!-9cD+I@nLykkYRhLy?X)>Rpw)mT`7x9i1Mf zD(y8RnnU%*qTiLj4l`4?5C2E=00rtEVH$zJN0MQ`EPo4f`#)XTqFN$uJQ4r!)XwCg zZHZMuRJz^i0(H*eN4c#3>tFv?*ZJF@9FA;Wo4XdvUsSlZ3@QNO$Mo`)VumBH8i7TX zUcHQVpXabaE^ETdLtM_3a-=YH0q6+64v`BR0man!?#BML;Gx^rej0K=((A7!;id%GlpPV^`k>zp39 zkyu=T)em*jvZ8i)R!MF8@JnOHk)mzubyxT#S_frBk_racr!EnjxT~}?sW;(BjD{IJ zQSrbR(8l&|5;m|xj$L4p8a`jfa=_w6Ef5g&V zoFPLD?oFlz9meqVaP&pz`x?jfjlkS96QVVKX5eKczVLwSz~46=H7VN(b(#BGw8_-^ z!Mh#H4Dy5~*tQD_c~ix%eF7Us74>D_8#c|w6QG)aNr6YHMcVMd$4qxSoO(}nKA-A( z$pnxSO)q|o=qjB~#p&9z@YW6t0!ywJtd=)GrRz>fHXILB9niafqq7_pY+v4x-)v3D z9-n1g_sLLyAWie`Mdp68vfcFJ$?C{*ssla9BNH*j@vu1b;pI=2iK`S4KFu2Sx+kfm z2TfHfK{oX_(cfFck$o-^QS?%O`PaCv6Xs+9ydIjf?q{EQTQ! z**xK<(c})O79eg(wH>y`p0yz9;dZZ(H5n}u-4chPKTXoGct7%~Tnk=2aK5IbvjHb1 zemR-c2ctQ(vRs<7_9PTdd+EfrG0Dru@Pj!K-3hjV${Ic@_`u_S2|m3|vPb^6$hs@O z-aV$X|6E%U7#Ds}nM!aMf!;D_O!bfr0A&jUGG0rRxl$J~g{)B2niEKV^NfflDGAoC z*r5cn1~&1*!&|<=zkTrTY>Dv@L;QxU!Rp(U<9+P$CG1YCT!-4hu*Ng#TM}%efektg zF5c*u9Qc8L3GE= z$`~`&bjck{ihmq3jCf~!vMx_-Xp{Or!*~_h(RG#3mYU{4jI1Hva2^NE(#CG!%-3qS z(ui<3OxW}GI^#4yw)xR!Atjrr-5uw1MAb2OC{PQhCJc4%^jYQ7w@m<^vG3BKgJrE`(DfKA1 z4p)dBa?Q->xD{eTH6Ezi>!sskXKgXBrU8znC$Ri%L>YHk6Q0GN|R;2vTJ ze+5<1Tayb%Gl^0zR_^@KsQTy72XSU=lKy*5#nOA~I5->Y-A7ydVZk9+V~{`P9)Len;%$Vim>CZgVR8?T7GawTppx z|92AZg5aS2bQo()&}Jfz*~IaF_IPPr0P&gUwrQdY8%X*oc%^&-RC~;UQf0*-HZR+O zXg$j=?rg}wSNbkJ_Z_weY%s3Q_vs!dUB`LhyBstAJn44`d3VzaV@%0bEk3IA*k#hN zkM1)N%wl0#J@%FKl01|>RkUY*$uOmCJoT5_5-6?lm%uT~dqK2PR<68I)m(nWGg`i; znp?VBxfXCqGi%&aZ2WE&U+UGXGx@Jw4>aJqno5r8OAEJ+{XICSQ~o7ob@fb6GWC*_ zJDOL}a3YcZR(Jj@^0zmHGQ8KqA_<^Lk~1u$%-c>3na1b~xg$>$_`@vn5L-d6o=%5u z^?6PX5)%&coZHx-Hh6FGDbBf$LnL&7<4WL^Hzrfc^R2^}-g3#5T_3nHaL2XkVGYQy z(3*qJPwlhcb!FD8=VzNG=Y!JFidxcdr^qy77)6(sR#Jakz;5F#2~r(HH?G2>I@c}^ zSF82a*$c;CMf0lB_%O+GobRrW_NVuqWLLkC@V=U@nZYEP|C*Dnq@R8I zYjjry`l`hdunaqH8jRrI7t&_6_i%C+ZK{{zD%SVejDf!H*499Y zj9kts>OL@ef0VWVEaWk%!uB<1`jN@;mZ?wJ@7@=cNk|^di*&m}EB1E>r%r4IqK>f& zS>>uQqr+1?$lIO5CzoQ_-$CcKeS4{)?p|Yvy*pPzxUj0(hhpw{vVp>iZ4?0V5e6WHJ$cELyU~-vxudx;x6+DevR_^w6$DpXux6W`t%Yi{u7p|13E=$B?C(HFFqR8w<=dWV9 z|6}~^<#n@KnR7*|lm~g}4W=OHdF=}i-Y`{geD=6Xe62^BWURbqN}$P-18nt|NYbtc z>Aat#fjgzATDKyOOy`%+)g>D;Yd3*zb3u2jukv3tGf`{Jd_YA4&|(Vxvp8`Cl^PE* z(wx|)H7xu6fPlIui6S1F}JcH!YyvxjCWRx0sfL#r8(_ki)GL9u;VO&2J6tRAFD zdWYqLE_JrKX=jR(@4H279| z<3t4yjewqM3&wF1$VowzL3Arj^iYb$ryh3C<;(IG=Y3sV5^!onRZiYo!)-<+{8sx= zlZLB~^pr1W@}4ef6M42jMFdI-Z*Jt5@@~JYQsBBA_7D^MN{*OlVsdAll4GFq;&L3n zAL8M_u-+DR^YLzW*+644`-_n4ex5Umazo;x7O&NL8ID*SfptQ^M?JfIYpIk=J^4E2 zYK8}Wd#ZZqUv64a$3!HGzJvvo8ci0wunh*GyRvs6?wUIuqLG-tbIp{uP*ImtIq|WH zz`N;ho<@+_$y_G%SRI`-M@0%JeQEYKrRtIa!H24uu{CBM^3 zgYn`+>NF=v$Q4Qw8z$%N_Ck-PqM&=4pVC|PA6O-pNaQg?jI6XXa7{fm6Vy>=b#53$ zEk7YR?IXYe&ZH-_PAg;&(=19*Z6+nllzm#YP$8eQ*SkA*FfZMVb~V7m(p#OIM%Ovq z$~lCyQ+)s}C~+{aaM-M1rrh{xBm5(=+3@N$jt}oYXY_rZTc>@HekE$Lm&UNlayKOT z^BM`u{f2V0Q_7)6(d?nIqPJuP_dqZ&m`}d%!C;_ z^eEjYSVx-zu&<(?F&Qwf{OPxYIgBgo)0oaC*v?i(Ww&<)t8N?F0`HyFB0x2g^QoHD zG62NtUV59u2}B5G-1P0}H_gjY%uk)TxX1oAVDCLx`O>y0(Fo*V&O)+iNOtRy_1_De zr`hAXbi(njlH_Le%4g@p?P4G7*rw^H zn`JP7!Z|z?L_oD#*GejZ88KOU{UVXzv(xVvPJJ@}hDkom!bXBL z>A#eIEL1OQskE;5y_bK#Ey2X}&+{9Svc71gIf5#phi2%sJiOIy2P2Ot#3?A}TZ*un zJu_j!$3J_h&J+YY65Z;nrD-432#{B2iq$&@FfT7V zDI<)tgxJnY#*|O6z+{EvGd^E)`d+9W6sKmc0n%zV`)`LQA=}!Qjau#qlH;(5W#SOd zeo#kMuX{x(uzzw9xJ#Ca(^Lqakruq0&eY}gbH}Ar!k7&UdEU~!ktnE`}gSkNPdm7;M(w7n(`kS?5+tssYR`Z^tVhvQ7rK`=VfvFWem|^#TDYj@m9m^ zLhGS>G9k!nVGtmuCl|_6^Fpm=9eUJ0*@|J^iQjRt$~=e#64Jxt&_SwfCf2wZ!Fin9 zeS@YuL0O$Ph+`%^DfFVZ&zY@4Ygv0>wv;~zg1%-97qN(I>L!iK6SCal9hP!PfAbv{ zrQB9C(xrX3>7zk2rS4>pcxy-zxodAPg8l$Gs$=3!LJpM;VxcdgY!#HHD)^6U*j2^c zaVn^Uj&D=?{hMV&YXg!$&*{${` zJ^tW|&A?&jm16>aE5`)MEt8`#(uF2Dof!>|DeRca2LxMR)N-+j2~9X2=j#hJz4UzO z`i$?FPS|;yZA9I(-2HH(kDa=4gP?h1)I*aWB@%C4pEVEM73yiG8eY+sb(1&VUS7PQ zW)3Wf?e|@UOm(;y$%C9R$aniMx@!4ZDwlzSpv(map~RQGlmP|Ei(h$0w{Or~u8~1U z@O}nnNZs)I#;_(aLYYuB;^G9dUJJxOFaKUJo;`wj4Dc;8|BkCIWDNC$|Eip}g09BA zalrH+pHJ#`QwpZsVa(b6ThMJzLuJ>O=!dFCyVl}B1op>XP-dy;n|#!|vGzQZ*haM$ zHWLB*!%R8Ds`(X#XupdvM1ht9B-kKb0vV$nyp&Zi^Qc7fmSO*5{MO3o3l<4wXnp__ z<}XU%tmc=m#MRVr30o}NUnMcxyl$B~XBhf?sk?+;Fi!oDggn^2i+aj8+X918PXGGN zEFab0c69UVcv+|@bo_@H(A2P7Ux>kz(0h6xHf?xLN+gVfzHg?X8URJ#CTx1x{8EPr zJ@LukYh5H|$6jPZ=g&xU#~ zycT`rg-GZ}xWZR`j6iOhp-xX+u28l!c@fJJ#j3BxE1ADmm--9~?JSdN!<{NoKG2${ zhyCFfV5_f4^uU~yME9#4M3B5h84Sb%yUxVxhgYI5>16rt5u1WMGoMAt(R@1e7>1tS zt`V8)>KD;#&l2)OEg97SH2Ia`(nNWnA>)*LFH3Og9|iVSC`yvWs?!I-uSB=w&E)bq z$on$}wdrzp7lSAp^)xB)p0Z~emv8}3PDS4v?HO3z!}#?)lC*aulOez&>WWH~wJ2|# zxGuNxNtD&Y3ba_IKI=m@*{VRiuja<6?-PN&NLE&-0`|f{wiR?M$%l?}R||)*95Z1_ z5gMa06=ny0ze<#GWIHsKeGoGp4YN(dw(XBlzP;rJqj4mA{$RRv z4i4vfK6y@b4js>-k&}vPRPeZ41Rt@Gt2hjyI#f6I;#$XTjU1>uJdWKT-f5*aboCZc z<<~Hc`lHa2DA>6jQp8nc>1$7x*~o-S-MjnOm!i4)fwn_u#F2b)Hb?zk4rbyrua@?` zhwloRR4i^}_s-#Dw)bD#zD=3@ZwA$X)EN*`$_nV;5F!10RV+*$_>e=w>dQ=^0tg*Q z!{iV+5GbH2&6v?mP}ve+9h5I??7mQlS2^Po#Y8}NDl!P46fAhUgCP`)$-H=>#u|{d zfG)rClV4T>K6JG&mnm6_JaN36KaPN%%d6;cSvTbskI8i!aeDFD&mq&nWW^8a2%<-%a8dK(Wr) zGn~wChy9^g6k_mY7M;P5n>Kc?GVd|I@0LB{z>$=DHTmWrmKS6k^OVuk%$3PHuzkN< z@}4>2Vb8vp{LK>&-R8_+&0q(I2_t53)dR``TDxCPD-sA}auLK(**qdOg#9yfff?JV z218rggi5Syf}9AO{krN#k3+vho=dG?9TA7=F05c4WbNUGZAwGfXr0kYYcbNjG!AGk z7eSxH9%u(r4jr3+B<}0vG0kMi*?thjR0cksy{b$jdo}tFP$YJJ(9O542Kol_&4)#1 zB5c!Xe}w$?u%yk$r5TFWV=pyCKxx~m@U@yIym+{LZLqKIaxc)h(4L8W?uSsQM`0}= zN8}F7^Z)snK!p|eJ~ZwzXpyCE$_mCUCCyYMzpE3f9bC>Gzd;uOjK&mbWgipTWaI52 z&^K7u>zRVMN+{AkP1^}Z)YZdaB@{Uhbm9PFk_TD@k>b`v+ zci$E5Ds7Uqi?U=%)@hd%5-k*oQY0b!G8Iv>gb=b-vhUf#OcAn^eVyzv7~9xp`(3Yb zSDxy*pWk2K`NJ;?mq#k}8Whz7Pc3#gfwy2PGVjpyvZ?N!Ryz7l%-(SD0n6tYg`wA?kI;M>86oT1pSoUY2Pl;S&^~v=>r$brwo_;gTiCD?;zr){36I& z4z@ilfVvAcXHM{#vR=18IvxZvQAMd@^05{v;)Qj+TOeca2$~j{i3$E7_})X^cK?k{ zM#Bpa+~ZJlz86&4`D_Vm>tDHXKIdZwyjrH|M#p?Kvmc2xkNL8u%jS!Gih;-?T!@aV#`UB(ov<>YjFHWO=OVmGoPUnqMlQk7}KVbPc!t;ie z{6}-k`(h$>Jz@J%F+)^8W@x+xZGNrujyqRrS-jL+GJ@3`GUeg?N z11*4QNSMgChUk=SmTVy{92%DnTcCLHG52_J9k@(|9meyPrk#8SiR;izy;Jt zcWX z);j)f@~eTVVTMY*{N*Yb)V02|?&zH+5qH)LGEkW;Xum0tqeR}QF;~qMG{rB_s`;zv zxMwT+i41O~w_ZNu27hQ<|M5o}kUwmU^Av{b?FvLTFr>k?r`4`_{paSYZvv6}+h9u9 z?M;u9td7pA^Iu9NA!M?jSO^=gvp?@n?zJYPjz-QV-tyN5Y|(|brPr%nq0n*Se?{_^ zxO0_wk^H$_thsf`?a#ZS?wts#yvd<{B^p451_Zu5JKbE@n?oP#y6f+RpZ3g5k&Hc1 z)PMfdae}O>xzIbkk8`&xQ6|H6TgL5x6OMaB3dpg$OihDNE%#+!?WJj&W>&QlO~R~} z6B?`*8WkPw;U>Q)0|0m!&~|J&`o4&Lx%vEB_VSX@^VcdLx0_T;`02eFFdlZ!{3ah0 zb44ML)Uf8Crijruw)zXxhu-V81taJ%Q|GQCAm*sI^r+$q<}sh6qFstXRRD~?zgy!4 zpWL35o5yvAp=-9&=uM8W#G#9=FD` z2KpOc`<&-5lc@Bk5z{&ebS&zFfgDg-0jBjT44ef@^!t>S~hZ0WN%S*oxOi zxBhvxU*bK~1-iGFISr6@?4t9_DZmGED3-!BqQ>r_zQ4O&-AgWByyZJ9=0$lYz5tbb%J-&OKJQmERGcoEXFX zW)o@P`S(kIqxr*!dJ`yzAelSm`lxZecLt+c-n5y^kF3VZ9Ehl7w_D6&P`+ra1< zEWU)ol>00Pb6~fI{bvIyJvGlTgQwN*y!I9uq^GVA7te zi$sCtxV*mTi`m=j?=%Ig5fdMf4Wnz}G^|+4gURPDuc^~|an9o@{UW96OWEwo*wGu| z5T;!YEA*UjI}q$OYnR@=5}rC3?iy9&Km00Y%RcDAXxzHBprt}mOl^&tR9w0K zy-uSs*D8g*>amm?%RE~VBB&XHML!VrzDFof_KT3Zv=nN+g0f(c1_vYW%WedvhU@_Q z0*3s26g5f8hs=eIq5taGJs?$=dAaRD-;mS!GK2yg?jl44DBwzk6#Y*XYx5nb%pj?? zy)$EU7_=QO*`J9&!ODH&KUSxMqXZ!J=V$(Izr(cTu?6} zA5qUg-os6*b$KH+pNMvAR}h10c?MY1;XNL^A4TsH=Tb9&IF}vO+x9Kc`WR-woAfk6 zH+});cJiCvRqEyqeZq$$@|*2W@V3Xe%N$dU_k%2{v7#3`M&5x7)}I$pdS?kGG!U%D zIxb@x*)-W?-4>{ajFIZ8goQ(c60Ao(=BH2> zgWO|1&_f!j-Vy!MYh>G=iZn~H#tm-_B+8kI8_G<*qdV&cK1b!B5goV}v8i=Y>1URD z394*;)MMz}Da42;7#uao$+)~eK5F0q=UvMWXlYK=Cl7evJAE#$`K*Y`5`OObUQwyh zm2q>}0hEJ9Hn}@mm3E4=#noRqtQx-}=FSYnG3cjRfVQRc*j}VFg;IFTqE9rgFld!V zBmaRqt2!;!_{5hPjdogyG8Nx(`&s6yb^9MTME|w%1NW`c9ymCSZGjoggJWv$!qIK? zhki?#0HCHUND$^)Gr`Apu-iiZcTW?#y!hOfrel~j-)_&(Ih$kZB85L8fltY$DBo>0 zR@ms6+;5xu36}_n9Qtu;#Q5wS;@%grB@=Ys&SvA(r!+({LRWsAm?4|GvJK1Br5}i= zc&JNnwmf}qGe2`tQX7vQt~;@1JJeyo`1CY*%T~wOI!k5jQYc54=;%9sM@!JvnYga2 z{_I!Q5v*KieyeUq=AEkse)f57+&_hC4vhB8u3;OM5*McB?l8*A39ee2tvPN{d!g-) zQTFj$rjT&${tLU2A^|<4d~t{4(dWg+%=KT~j=SPIIl<)^escaTQcj!Q;rt@S>WXSs z8mKSSVlNP&MDr<0+ZpOlgX+@}zD;(GK6d6UR(%3{ z*8ZwcB*A@|u0|paF*FHLYwQarx?x~xQ=;76G>7b2*XtP&FgD2P6hu!vd2@rC(>(K5 zJJL*kQ9IW1z9>x_7Uf;;#n;;hCK6&3&mT3gGt~bM#R44O&z%zl_tI<;2k8@d^laTq z+?g#mFTHbFX*kXqY{k}ElN^^1MTdg;vGJ#C)$E&&OL_+ysXz^Ole){;TrJotGjxfi zJfs0rtG;u4U3Rh~-O4|#M)~I!bAu18Q!8bRdEey?*XMAP&$R0YrHB zt%~L;=OCBJLak83)AnS+sp_lBaWEF3F8AbivE#yTGV`xZYfSU%_{|+=^6Mq}L3Nhs z8*B>N$mtxP+QdWy(g#+Irh1*8Wqu08TQQ-*s!XwU)#<|~Lk)@K_-0MP!IC!`jt1gy z3NtL$$L}eVRf^i4w=F=;@N%8f)fEX{h_GI~Zncv1864YI_7fLu3l3jQd!vEZtKNc= zQu33Be@)%nM@;fX}_=};YA{qyA(6_AvypO$x+3qK`Z5FN7!-jwtaDs_mypP6KOopW`%=JVb1i4g%U z=_~2Czcl^UlPLpT&iLK5l{_G`h$~a#4AHYW9OU+n-_L)qBea`h!e z>Q-6rRVSvjltt`p<#M999}G9}sx?M5*=98KIC4w$Wvc->;tmzJ%f(VIZFpzUt#vv( z8n75^RlFZGur~h_jg27=bG1s3K=g6V(KIt>Lb{ctyHeAC%H1NV5Q0YyOXU&IDYay0 zvMS#u?C$;Vr#u6UnzJy`Eb~>ctv*-gxrAK|eE?PMMs zh84>HdXZ`{juHSSy$DomU4auGcX6i-4hTM- ztb%Zr&0&n7c1+zD>%G2qyq7o^XeQmw*&H*fl@@aH#4WUnym?n@J_tTmPdv44woX|V zt)Yh6L0_gqS3;I1fCW5U6I^)0I# zaIvXYa)D;maDSW`P}TAF(MsG#S_F+N4(vR{J9d8wHa}~wzl4ur!ba%etHzt4Scda3 zcbKvPY`UO@^bwAD3GYh>`|>Xu>XZ!e8a{!!g?!Cn4CJk}X@r6Ksg>1)>&p~Pt8w&g z-pL3tCA!_U=_mOor@iGARHsR?yX1?)GOBAoQSqRsyC&%f1qQF#&KZl^sc;%K9cTt8 zloHN4nzOa@TT6$pv&-o1i!vVDqI(yvZL#!AbZtNWc5Tt9aRl}HbxAZ<2V%Lo0ZcqD z_$9i3@oSB2L;gDYXg+pgZ0e89gp2`Ph()!hq%0bggOnsi`@0l1LvE-J8#mp%@KPld zaZS(=hGcO$YdG!wmfgB_UT0l`OffO5fa7)Hu1<76kaA+>7sUORAFC6LL>*2u7 zz`c#3l^t<%uln42)G+bokvhSU4MPGAJHA(IAo{x~50TM5QCFppnv^)a>{jg3zT}dj z1CN707{p(|Hblv<9}1sCrn_yWEV@tCNqzL&vCqtLY6)V#nork}S9rNKvZndH5y_L} ze`)5uI(DExKtr7Mu0lCrj>dlVN|v6*k-q+jKG{v>fC{(N!1XG+Z6*r6mD`9Ms@L zBc^KinbXwYyKpO)?BJqY`nIYgzZ3+vTrfMQ(iFsNr^LrVqk!h`1=S0IzGbCqzPYaR zHcd+M5r{h9Rd8YQG8K593rE9Nov63lbE{pYpPFUD9N{8Rr+*iHvlhW1VL|3&FbvS0dIB7Ljbc!=r zT`Ia<|J{?o!Tv_Cz3Xaco{X;QSiX7X1F2y8QA!hNTH|Opoff3eEHdQ8bd_Mmm|xU8Wc z0b-!VrRyt>=&U=!!5?Z)G|q6VKvafk^RY9N_ds&QA8YpD+mpK*D-|_wL4~bOAv;^F z@d)(WcTusnc`QxvbVryVPql2`TEV~xu7pdp*>d_Ro%JUC;;+gr<*vKGbH(wFC%rUc z3SzR}GjGKFMQ@6?J~&_~?gsr!6u<=}1^NbFmL31?LU5kM!J8l;N6iha(=0@4VMgDf zgO775)7(H~&sjRwN$!*UlxuI`)i2x+>a zc7prPz{KlKhs3_P$0=#Q3o-Hyo=h|rhAG~IM>tk*I`pFUgH0*<^Q(30LqhAAwAb#2 zWckJhbz2q*PREudaTSS<7u;uC1YW0}s@iWP_xemN^c_LAk+QyBz)ABVB-dp%w~HU9 z9SauVp}Y6=zS0iPqLj1*D_^=8BmD88saf^dd9U&HL9xWEhKbtcToc6FuvV{h6(GGV zh+#b6@#edMcdcGf!m9r!lzss1oC0G`#j|AJDSPArK+5nbKANVpA}48)>-@={gWYZc zxL3Qld14E}R|BznWyuO4`n_ZNAY)jru_kd3J=$`jjQ9$2SxugUn(tms`QSTm<83Hs zxZ#$k#AGLqF6OB#<*8(=*G!QAfjCWf*KKO(mO77kRfx+bYvR$A=oO$d^aO|227Ww_ zy^|v{z&udbgR6g@sF+&4t|hBi`rMuNkF3vIWT;9LAW$_j-niH#4BPSdWT|&sH;tKb z-yQ;bG_cj=j8-Ho(c_&uXh2gn*`}fr3BrN~C{I0#?4iE&i5B|(dIGD~wZT1tIivsG z;6TBl45lURPlb)o$4xF#BUx|L+C<=NG}go>_5}7v2fWezgrD>%SxUYRW!f%U6lA+> zfMUhZ4VRQ|mM3zcl)#^H8PtLD8+UmSDx>U#s{T1F%)Y=eQSIGgSsqW&NHG4>{;!K` zZdtEyWpS#xQfH(kKDQvUQeowNsgb&%!`%Mv!C#D$X<(-JN|VzkpMP;uiNl#RjU4Fh>8s#fUBAEP{OVVcN3cDh=jfAa%mcym@=kMuRiu-e+bem+vdvd9 zy9AtTEDz2|@}86b{n$Lpl^%uzu{~Nv9V%HmXvEEO0OGv)doA*6`7V*Y`wh%IdLf~- zXi&~2tfw#ZIG8jrrAF@7-zZDoXLHlwr$cbHb3!u&~Qn*N*mzh zm~^NMty(#>EjqyKK%#L()n!T@5moEOkz>0l@i)F_h<*BigU%%AszZ_41`at&v7u-# zuzb|Wj>mI!bgoHgD82`SLs3uxIi5(j zJ9vpKA*lyw>e7UAd4906w$2$!OQOUeu|%owx`1)Q;R?;$N(qkf?;X;rw!+~W^>}A3 ziGkewF>Ev0UCd1i6~if+diNRuN&e@vlhAWyAS{hIW2tihVg+c4|At8QgD(`w|~VIi6d!u4F*~W zQkXoCW%_S4R`f%`ou%*=in#qPea7v;**f2)-Uey+01b(N#5O6LI3+5W5vn08 zo*z?Sv#y#zQq+7kD!@~}EADLY(cQ-mzRfdIc-vOPMyrVQyUVdrsh6yIoJP;)6mQtt zCvbqb;bW4=87;hq;N}oV^WddLWkI^D=%HIf_PGTx}Z0t#$B%i_<)k^#8i z(C2cb9i>ht&o_GUOiZ5~-ub*=S81c^e4SdbK6E<4yk~KqerQMt4=|tt3=^vsj*SLc zaMY7zWED61rRO*)q-~4O@zCj-in~!6m~J5Nq;LlOu>i^#4Le>`=c><0OzP}7hpik$ zQ{y)-#1`=jtc4l0+QW^#vMtze!w*e~sAZ(Hu7tR5r@85oApH-}o#D|?yiTaOyJ!hE z>)X1WwND79=Reh3ZTvCv~6s{W`-`Y+;egFJ>;S6w9Z-K#`*?ECa~lhw7I) zM|nHGth)Bs)w{oETJVCp(WCgmGgQWR&W#5TZ$Gws0VYYmIUukMAjJ>$RqA%sX;Axb z|6~)YF74K+yCU;~b+fMOiq(k=eIFU7f2uy!kJ4!nuRQ39>@wq``&rxBx>DO z-gg&SHeUXf`#*GGx4=cN&||)xRbIA`^8;!XKE*`blwsOG`5GH8sXi&Pe68!ekhY5D zz6O$*;d|m9!$`T-M5+45wHo_amcRd1@wSx9#0v5>G7q~KEG zlbL;pn}6I`SZYP!ZcQK8KaT8<{#l2r- zG0RLBvG17)w(%46_>eV%Lv<6g+pFe$II`gHLx1p5h$X*btO9Yr7SxvrlGSyFo&rUy zTgdwf;2_Zy`pRJrz}cJ8ncs8B6Aq{%C??J;%uW0M#Uk|qz{G}xCU5G($`4@BV+`pF zcB368yKky;vYvodQ~GUi|6KuanK8IwJg>L^E2mBnP+TX=FW)A;Xn+gTccnuGS<4A{ zeRop1cOP!TaPL*8r0vz(`>iQ~vXJlmby;K#Oa>k3<}3({C)? z30Y*O#uA%K{AO>AzT}MJkKb`9D;tXn_iV7{LD$fH0GYO-e)!Iq0yI2+_UZ??Xy4R+ zyJ)uMm(pe?3fdokx%!XqQ1QeAoR&>KMUIh1vf$1oIZrvD0W&ea@BS>`SbRZQ-U&$s zxZ*OfvOxw}v=ktLm0x7r%s3B0_qfo+;1hhkk4ZQuT*7-Xzh1)mwmO|YvGgMNkBcBn z2JBNQeXs1$pN*MSXsD)WO$~d(OuP_hbJ%o&1$gtZsO&MG=``~hTsE*wb04zI;SMvf zWZ=+D4p9ZGe`!?CQC5R+;5@akB&VH z9QqclvV+Z=!c135;ISmbJM_m8!NBgYSD-8`n*j=7hY2HUBI=$zga+{bdyeJ%Kcn!_*VN)dRz#@-3HMhE3LmmaWotPDuCDQXuq zW;(syjD$;scn~x^;~oA?0>${_zaDF7nS)S-7GO_67lru=-uY`O!6MaW)|aaZXPf!C z_u|(>dUfQoh*1;3WKFOFfqVWDc1TcW6SR8H(RDXU>Gl+=h;nYY`EQ+q@`Fo7Y95YM1%>-S=~=B05XUyTjk7{J;G*7n-355K+c$Ev7N; z?E~!-7)9b$nyPMuA}5-4A>?}I=MRRYO=wZdHd%`?6&$zP;e2$}^0-4;==bjqbBI=b zA)@MvxTvM^Oa~@;KJe~Q`d1rE&W$je2ER$Sgf?%)4zCJ6VB+voTxux6TkbHorllrR<1$lTB8ABK~;IKTU` ze*OU275PP?Fq2DGCwlx8)57CODQclq#I1!b0@p#WfAhE83SOvS zQ0cu?AOjxsV5%yz{fK5cjz;>KpWpA;vj=EA_OzEOSlJ0iSDAuzn-iW8E#&|Bva8_E zfcz-o;GO1w2MVI>uCi0B*KhqF-{+33Bw@6~cezm(lX-cS0cIr$1e>jQtd z-P*nc0xc8uKloQ`(K1jq0_fr5kQcms&@RpvnO_9&eze57xRCStH2@+C-2dj4s0fv# z`;#3JH;p{L+i@wqXF8pVTc1^3fh5_?&mRtHBZL^;*^_)gux^wf_P(iLLFcAeX-Ty= z-%;j##DD!uK)044+vMEo`JJ$@l{jWM@X`V zjwEpGfCe*D#y}0^lQ!3q_%FXG$`B}%{mb;e9mBVm{C&J?Y~i|XPOFYAp7(!_C2Y5T zFaOiaxfWbWr0)Oz-$_Y(N80{$Up{mNjMNsq+X|q4+3NrIxNuzmuU*}ZQmB^-oo6t7RRWJ^sy>?L zS`-GVDdsJl|ItpuoO4w~u$4a`S6```ySop*D#aseN`cee+;L>F{ZOUY0J zBGu7B(|jaRf-(&`hB80%zbNXokfeR@AezBlN=D_s2lb}YlgrSv@Z#w6-;2OJlpClx zV#92LBs>_f_>WJ_iAb`t4SWURASzymO6gmh$E#6>L9@hZS79@Nb@1>#wc22}Im@5+ zJnx2!2!k&c!d$4#ahNgWB?UaCTU3fh*R=xW~Xah_bG=7;3VDs?F7G(;jpj_7lx$h86N z){&@po?rKW{9;h4>%RT&Fm8ZUz)q@S^x5ZE;qY|bj9@y>&U4M4x6Uc36~Xko27HW~ zXKE+mlw5)aH1qGFF##^?R*p;6ew2|vL+kz9$g6Vq(>W{nN2k#8!tgV=R^zoo*FgF_ z2$tJ$@ObB$h0kAYE@+6j7jO40gbh<1m_UZzaT7Odqgxx0(B>v`n^0x@>GeJDsYQHp z>UMsDg*ny!)D)^W`&eKIH7@nNf?=o)Mf;9p1rudOUW#blp=8=^j^0x7PjLE~IJepYsAgJ`A6DG~80#A; z``km+aXY7l-mk#+1DehIp7f34*44lw>8?>=3Qiyt^1c>BU=rdEYN{~l*I&b6)d?sP zLVyke@6nPx2E(Jn^W+S0Fv-$3-&Mvx#zDb=ZR@ zbZy+dQ3AciT^(@#gePyV4ipXHrXn^iGQ(dC9f(b44C*e4ou&0%iIMx3ESn+Ftt((% z*-Rv%56;-D97?8C+S&&XwemB@3yi6Wm4O_+FZLZR{eg&oE;Gx~PeR%bZH%ds3@RmM zg>g%gJ;Y5?bZ;9Eg&G$mN1aNvGUGDcVlXo_YQ_t!Trc}=MZfr>7*D(5_m*~6gW`L! z`gvqeY)P1azJRjbKpCi(a=YD0D_SHkjnyxe9yKS?8=*+g+w61wrsN;e?bSV2Wj>LN zsSW!w2aGbH4?E(5p*JZ2_)k;*@cz&!uv}@*W z^~Ss()z9aQB1}(UYngp2ezHGaI;VqSdzoXjg{-W!Vh z<_=Jin%97{rBiq8SAJ!&{QScbw1(H8HrRJ3(9{W1W1n~A`q@3+9VG(?Zt4je9=LQN zp4oQ&NqZt%)<_b)7n1FQ^(XMRy7U<{;;{}hV?^&ipXRMakW?Ao61b@aq0MF!xUT)m zoq3PU`YJ;uZHGI|`g}flu0kitX@7$jAlepVf=Ty9%Zo{wdo~EZ#4t|oVbyui6?$s_ zF%C8O`MpGW5Bs}COln`$u!N)VG9^;T zrVD+gC=u@^Wovj?C z)GvsvoXFYNLucce9lU@uZ%NP2CtxQ(M33>o3DUkERFyF)v3usMken~KeQ zVVLisw0`TdKcuxog0#u$Ea+j(V&rhdo5wqg0D-Y@k+Y~NtgNtakZ-UbQ=Fnt&<}pC zzG(&v7b-Q*&>F4JAmCm@uSlOq?t+%{cppy_^n7GBE#Ov2W2jAuc0 zi5v*Jxz;rmkX1_@ZC-)v24GsMkAksPk2}wFtv}vmAUxG%iQr|ck%@p6p*RX0B!+7Q zPbOJc`=HhI#c9RO=Uul2hnWt!veOFif=VZfF@FfxTlGVPdz%RXcWHQ`mZb5$1n{C| z>~f$^>+@nJXA(gfYXXdOrNdAR#OP-nY9SLZt#wW?Mn|s@q-?Zb(jG0-uLzNbR~KZF zX1@_~sfEc6adt&6a$L^Nn8J?{=(2}WJHrF^O3>>{{oeI05*I+i9p+~qj_EFgOKc2X z7DC#L877ltRp!12ReY@@?aHWBBwA z59GCa`QF& z!xlYr2Y~AiTD~IG$XKdI{uEh3E!yi(fXA`xFeJpErcO64h=hugK_wbpJwUMUv*TU$739Kx1&4SMULEKd^wjoiJA3( zgrX$WqLUu%_U9j-+VTgObLaq4u~NwXooJe8UDSX=Wxy0{HS-j}6^3Yf(wZ17L#_hs z*oPy&#vYh)R_Pm6&g$iC$Q_51%IMZ-f4+C(nC%)hdJCqK6KGQ4u+1S!$^#GU4Ueygi^-6=;_M5-$X(cuKuvDFI-9u$< z2%``ii^?rKjzGh>9ny7a!llvJN-OiINkxBc^cn+4U*^Iv@?-tl&}#M*Es(ddbMTZ$$J{0Xu=2?KsNsA#NTh z8h%<3i5V?O#7{U+p=;k02;~JmvMFQvsfQ(9g1Ni*6_&0~TxK={kYh5ZtEa5VOnY|L z3m`YOt<-ohT4plPm(;xP@U9JiT7?B@=vEr`#P5tHJc1$c5RCM0nx$RGAxJu1&e?Z6 z9@OaL#^QN$W)`&2JJd^UOqCqQFC}uvE%ar~#z}%`%BsbA2(A%fW_-W&u20nz50_lK z88=Hc9DiSm;rWv)#Y_HhW9MGt6-ifRveSnmm_SojcM_wOMTI>$R|1eNgBJHT1eLpI6Qzmh8kPQC0rPltY;4v`ofzN ze+%jNmm|8sVnC<>O@S>{n~f+c_Qa(uNa2vO?C zfl9&T7&PgI$mB+jUELK$g|mv9z~~Dt0U$w-X>%^g>Z13P#B4Hq2xGQnHrPLGO}fR! z!Ps^S3XihQ(d)FNx1QM(fnSsT>G-zi5`T;qNbBc7i`WRhcB&6j5G2A{)uljn4G;?> zN3LSn+Lr*nh0sm`EFq!5yYcx#UtYJYNpxtlOuG;!ag40|6aepz0Zf$iI?s$}SaIS^@wc3XzM<`muLx+l8)i7W zGyH|FY?kR6$N(wI?4xD%PVO8zH#?fqxkSNPcovl@PD^$A=)H}>JvVOpRigG8&8#^*qF zV2rmfqH2;dZDxjaES*+toYW9`Z4oGJnfATket+MOW`f=gGq^rKH8uJGD13>QnpVMx)gaT&T}$%^@Ox?j}8@BP{@8#QK_LI+$@SrmleyaEO5*K7OWdf}{;q_E5dOHI1b4U?Y$;|$Z z-fbv_lQZbAO+aS^?YU(kZ^xuL60j97>(C)K=FSzmVH^ZETjVU|&|_0zr@A+=Um2fdr}sZ?f8qA!H9R!yW*ssQzBpMYjil*JwX- z7c%Jj+&+#U0Y?LkK{I<B{Fco-ro=4_p~0Hz+(%7HsDD!*<5 z?Oc7RZougb`Hh6KV<-3On7{XEx1Bi7&i$D7s%1!6NtLaqgFlB*(YOGeCwTuKlDOEe1IVq0EIhkFULtUpV|{%^KLoF9qG`KSGnb2 z8gy!!NUak^KJCxkyf=PX!mpcnDQot1C!KuP&Fo-wJz7TWyZvt4;az3Od=VLQ*fm^G4zY>q;}+_`XwTHMx2Nk+dPLht4J$K0 z8yYy@dwA;N7%DI9iGl;pGc&jd~!$2LZp0Q#(0}_sW<09qUNp5AF z!3=;d87c*!zpQ@60LqXJ3DZ?@aPLe{v{CRh$CB2Noy-t+*1F@NIG#LsL-YhHxEPa_ zX7sU$r}^G41&9oD7|sa|g82neZF{q6<%?qr2(+_s|BtlTelhFa^%`Ee%#sC7Yv$Uh zl`HWrbM!7@Ipp{=8gDxskyv!arrcQ8b{LXbOA7Jo^*hB`<9&F_>(CsEZYXxvT!NN& zy5FX}wkFIej`{ zZk>ocS4NBX6k$3vw0Sxp(XK}mlQ{(?Jm8JXGwrJvn+wWpv2&drkeei4Mbd`p2aG=zW~jU4y|% zY*+vo1Nz=h;3ph)JuY~M|vULnyF3F5Wi~1;} zRl%N*0|2~%DyF&G*i;!Jy{v}qS*wmSLJGURU!?RF6z^dH7Sa&;64*7{De44)`j&K^J<4`)QoX%4l#=b?6WV-nb_EfP3OrV-{M7}FAmI=9}~N6Tmvg#^%|^_6KR?7|Jk@0 z5bB%+C1vWVfB7^a(XLceE(wSrs!5sRo&sP2OiL3S(OPL!iJf}SHDK|rp|+M+zB1da zdBC`3+#TqmLO!j`yOSMe(9az&Ow(jEC=%8V?9{Cj0lqM01kux_{(>usno7#9n&I2g zPj!@@M7IMWv;(+Dl%A~db^#_jb6yIMG;wq2c*i4&+?@d@uZ?Bz=6%h=;*N^BId)hV zXnk?S4aE8ra7L99P{VWitT|-@fmmZAM45_(AAwrR$p*!OP_TvUx9ud}X@gr!1q2C* z2Pg9pD5|#c8lan`m;Zx}RxaW94VkVDz>X$9?aL*;w#ALoLZL;V0V~TB%<6 zfcF#rn{2(5u*{Cd{cT8;gO2xf>|X7V(>q7?6K$@GEM=+sxvCZJf12Y`$|ZmOa1+Ty%f+;AID3&6rgiizg)PU?(2RM8s`(0z^DMo@c5h7 zPdUhRq{K2HMk-$#@8q(S6}rv91O(1JUDS+`-`c0)aMr4`CgCMDp&0V(sIm2}gNt_g zvX#NFdG!N@Y!$^@HMt?u7ppl&_r!J_y53ndGw#WZ;7~=0LyuA0zA)A&ShkYK&V*XEum5JAh{ElAe8pf}aN5i9Ll) zeT~}ibl!to#+Mj~rtT6P(%xSFZ1PQCne9g+qYL;e?36s_NlRz7)pYAjqC=m{xN&Y5 z^wG9)#_mEPk>$gx?T4>d$pUEbqp0J}E6w(1U@f@gS}^igqdtFoH3A2$N0cz}MR*je zjp3u}cjIyV(Za$NH(1u93qB_s3z#ux$n?R5vL^!48RDTH+`avB<_N?%&vr|@a_(%w z&{2;ExtQ)`e61j$Dq(P(4ue z^*&$JuWMt}SGLZvM$G`5Kc-6CJqPMwk5i=;u}M_Z$rRiz$Galie9ffSd}~MbSOTyX|3Nl;HCBy;#Uwsu!l-EWohq&LLkRsu*EBwU}o~m%CajnDB7XhP$9(a+VX099?rJXuZ`3G5*U%f1#n&k z?;ucoxb)dxI1^V-{)kC8Uqk;EL?0B0gvp8yLzy=pmVhM7rac=KQwO4-v0+8c`{Q{i zdC;Nxl*V5GjoLO~4p7Gr)zZLpT!rQ#3Zh)7&T59bpbhqy9ZOB^F5kJWA{5(QgptmI z8LcQkw~hNW8T}yKq(h6kCe;ZA5Reg6yzkG~31pwtsz?gLO$PHALGDX>R(x;M>h)h0 zAyz}P3QipbsQXV({5k=}=O5W3V>f7~O{)mj9!PtM7{hj2a-}v9=1FQOhsl;U?cZYA zFJ2jFuh{s;L{sfp<35IUpFe}N&iKrUTXU?bu+vEu&dY1mn4pp6=)>>#{E#M)YG$BD zLb?U%F717rKz0tZ7Ri&3mg$tBu@3&_@8Pi%=->J%xIVq4YcPpW)1E?~3Z0q_*+qqp zG!~lM?4B%no(Kf>M^u!vqJU!hZRi=Hv@V7C)uoNh`5WxfpeP9Xy6z1Uc z68>$V%CUme0J6c>=();nCLysfE&j1fWiVl+KRXnPM`oNvvju-#lfo0qS8PBA)X0WE zfkw~3e99$o7L#! z$KplxD+_EKH_3aL6K0czWN7a_>>W!`p;cX}4vUpFj!{g zfhn2~S-u3$@7!I!6yp<|0^%o7&A2R;_3gS3TDC9|aN{VM@MO+hI1))(4qX(v(E>#J z2@i6`8$o~=-u#43$)7ki4Fpo|?a}uH z`(dJu-w4oXtJjYKQ-g(sLV<9vcK*%@Yh2GG8LZru&q<9Pl}I4)u}0s9-=7ABO)IUO zgolif#3L$`q5Rehk)RMq4*-|)I}~ek&~x2nKU_Mn}1Jk zHY*V-3xX`TtU*J?B#p-%IzogRdj|BT@;4J0#X>H#I-i73S^tH|EmTLNAUC&HU1Qw`|xmT{DBe+{_$)3qVO!Khn-*m^dlP=z(O0Ep6|O31w;PojPa7cN3z)NRcG z8z8z%qaQ1CuJyCg0Y^_vM_#Uhix{4hhC~g(en~_6JZL=-ZfQhs)y}=G@Sk}FL=@d< zhM5WV=cL^!`@v`dbv6>F5HofU))wA5lB~tINm*sdk)DIi5HQ}7`@SgYC@`}$)bSzI z!YNeBC@c#c1H%oiky*y#8fzf*G7<3%tzsf{|2hpYo1m9Gs1#I~135qPL^-%A)d_VUlAa;y&t|wblPybDl9nD~~RL3}lJ#;z@1=eY`IF=GarV5rAMjn}WVhoZ#R zFIAK}AgV>fCR!Ob?$hBZfD{aMVI|^7LeJp-6KDPD>@;1>yrDQ~sqG5O`|MfhQi}Uq~1jJ za#Ft*xtEM&2*%VJ!y`1|Ya4r@S2Mn78q&--v=yf^6&Zveg)QUV{W{xFxku|w)#7@~ z<0l&RvY*8%z&ZL+6RnggPGWTX<1H(ddE^?PAq5+ePH01(AJD#_f?jFyiq|<>hc7x4 zwqT~9_@Tf>tWJH#y(6xzTLRaIctR+7H)RyOUcK@eVhM=Gv}x2K`rqdWjddgw?fICa z```8=Rq7h%#ggAU*G_Z%2AC4t8qpv_=fiBE*Srx<6Anht(Uf?F+#at@Ew0@17?uMh z8N-`+C^kCKDvNB z(5kNG4g<0>@dUTWC zow45VZrqI@@}2X-?*%h75@9;WB3G#3`&?1~-g-f_AP?ak0KJcv!4%={qtazih=!W9 zNuz9I@UR4=NoHd!0~8Z&C?K_+on&`tl{g&n(ruL2r+p;TwIS4E{!)u^ch2_^|+TL4XYWte3K#(G;sSQznr+L1<83>hr!M zp^uj%i|!+|7#j-Z5!Sq?)MqtYsj(aWS{X6YDPn2+`@XE=zu6LL)0O_#R81|ZF>P&M zw-!^K6UD8zdbJ>la*lNI}ol`P@R>e2=1q)wZfp97sI5eRHY#Vvlz1_k3Nc8Sar+E8Wg{1&Teeed~CTpG2Rpb}E z$_x}plv9nZ0Ys^DsKI5K23dza^&G3@=#G7T*Bj!j8hR`3`&^(>O&^q+CY2tbHg$73 z0O&7%>~0J9mKi-k@5F!`h4d;2Ovs`p0S)wp(-^JH5r`<#oUwkwX^0>|bmv2DgIj)| zk;KUOtza;Y>WFG%=*bnutE|>>Y{l6dHyYw8tqrm|Wxz-MIJ1Vcgbg#6yk~-7V-$KE ztcQxUt12$&Ai`YEct+9t`nN==ar-lPSH7_vLp86Ss=$UD`!mf36F;`?P$y3vTD$^! z4_Y#5yH6kB+<$>SsGfLRPhep2>#4S7<{Do=Xeb3A%iL$mnkVLyrFM#+y^KSUVm)yD zW`6M}wqED#5JH}q?_ZbK4jA5eXe1X@2j0T;LVt2?+2`qu+RKEyZKwGcFvk<;P#h<6 zLK(Etc17Clbuq4Kiq(XLzDRezUy-RTK2r27>`ND9x|u$qtd8T$8gTvgp>_;jyd7mS zS5|H$T9!0jrLR51&?jL(#2zF`ld$LfH?^LBdL`rA68oGO%ceYITE0w$oVeD&v4g9Y zHJ$ZZb$BcNBQ|J#Yr%TwldwdW!s*Cw4${~ur99Z2;X{eMM7q-7;r6v+zNA|*R6DYB!?vNtV6 zcJ^+$uD!`D3Yp2sPFD8b>v!H-`h0)iuRr>$`@YZnJm)#*bzbLno@<%Iz4S(t?&77Y zSzY0p54~p|&>@xKEIoum9e$V|_cF3&Bk{FWrxf<0Vr09NTp|vcZB?zDJqEuL zABU`arrfkmczW|&*jDw-kB?XD*w4i)uU8wT})? z_wSUE(%fu!&9vwp42hy!9qd|>v)q}^4Slkll*?JX6r|cw zyZ+GRJJY$$bG1iAeQIfHYd0hc_15&;DkSVCdiYs}miLqK=G4}`i+fi&i?{A<<*6Vc zO-oyucU;L%dy|^40f#JWK69ee2W4#ji9azHjmmfoAg+d`O4wofpJ3-%)9#0{f z)DS!4MSeq##OhNn?M)&Q%%rdrOU`gJPx2a0o9_;;@!sxB5w(Y z`DTxH#G8f6|vXrdke)q~u()8qZ_gmY5of~z_ zJ1XS^_DN~z=)$7ODf?Tzrd!?p8}Eek-OzUPlj5-(?RDboEABfh^^t;MI-B#?U7JW# zdpG8&r@xYqb1>~zFer(K4rgg-`ARFBvnd&FquI!F@h>QNsVI{ z%pR5omCX8=D6V7)>d;Nc@8V{%PJzRhj_K!m z^Jan6QfGmnAdURG8t5pWIS-f5TNZJ;FHSd__J3J2=I9=E{RaOZIhdPr;;WsnJFMTR zzS1mPPMg^o4q+W;yL`R(Wh1Kw-p`>3nZul~D__2`f4rWy^S~I*l1m=OIQwox@QRKu z&DN0xpEmHgI*CH;o3!R<3O)PS241V4Ro9`4tB{O8c@QV`VXBBoQbZ@e3axTbIxn%c z}o?i@a9y6 zN(^_jme`YGxAL-&aU0Z0MyIBW@o46q|Jr_C;Eh;>r1gy2xAsVZY4cnq=f>$Z&qLSv zW8>7+dlUWR6zo#?iDOQ_$h~sjw;ODF@3<-bvj`KPtA#*BJAV%DF0fh5I1KFxpI&jxmEdOBSrT37GY}VbW%Kr)tx+n z=&eNMKz+N@6zTkgF()Ny(6r8NACvVjrkKzjs2pT?bd3)mC2<(x&&OVOz`v4Q^I>F- z6%`+@>!|)Z??f1kNPWe({cXC)J{)99V03n#-jr)3RC1fE)7hGAnGoL|Go{d8>m)r_ zw%$T|ZX4{*X)d-uQd8yJ5d=phc8bglkZZXn=tYLDq%V6{8T@1nK?;Xixr z?OZ zkbLU{GgYB28)200*WK{+D^9#zx#a$a()~_k=URkGAjcP*J7-QOw6s~D&*^N=p7}xb z%h3E_=#XP>vj(hXn|?Ef%lcZ{?t|gtG(_ppmJ$!y*#^ruYWV5}E$gg~`_ip%G?f`k zHO1GLaYhMC|8`F9;~)A8u|@h9P3=zQZoV?wcz^B7!3eCR-E9nWz5?V@bB(|hldgH% z+qK(8eMu&f&0WC)x2!9zIy9b8cy7~m^t?Dh!1#%Y<%fR+!NcXDL<_=OpChPD~G3YAH35!ga)LwT)(ps(I=mn-2;7Nh94UrCWU^VKXPC`?8cV z74~-y&X|XZ_L=j{sttapq7_`L;4=G;DmNhcjoLwa%#CO<<;|*7nRKU>d@}IFIr!zW=ycE zyG1I#{j{n4g2P1x=J_1C4Dp308FrJz^a(0OlZgak>t;J0Woe+Ny(BnoK)18mT1*si zad>3K&T@Uic>8y%w>!Q#{}ME5K=5rpaJ`b0@pY>`a>C7c+{*R@1H*3*r6h7;?-01% z@kDMz(lnU^qUKO+rJk{ST6wJR5!D$j$FEB_A5ZpNv4Nl1I)~S9;}oWO>7DN^QEKlc zyDG!pBT@xDg;+;@uR{(sf)2HJ$QI1-q#G};Uy3bP$#G3AXI#x9ks}!kGQ&fg&2M(t zHm$gBI}*IlFB5WKX%!(y8*uLHPvj^mh}8SYd`VRF85_ieqha~bx_Z{#xvzfK7$i#V60tZ#+$3@uHw)=1W) zq(&ONG3zo6RV6UIKhuCTWNDy(| zxgx$c?)xZyt3_bwx%$-h27ra_rh>kgiuC9|2}$aKW&Uy>zB^eH@^U8epFUNJ8OLBc zr*D2^2ZW?{_KsS#19P~Un0Y@hnfG7wT9cGUDJELEhIENE&f{155EE*teKCr#kc4m+ zL{PXo8%&I6Ep$iL`0PCMgPCUXzHYPDMgQ0vIbNOnNc||MU(T#B|K|vgOnb$DLjNc) zI^R>Elz0Er`4cxq@X>Fb{Ehe>tjFBuUzzaMYk<8n+!hVuCwCy(@`g`oD7wvUI`c19 zF(mLzP2bw^k6Y_2FAWyV5eP@%O#J%y@^centK#Ixsg)z0I;6VA?Y^`3Z(102_)GhD zFF+GUtE|pILfXm9Y-;267Pi-YC+MZ|5aA|RPP}jx8)I%1P!=YaQ4XUpo3pDFjiNTQ zU#5iPV#@Sa3Oz}mrTd(ZW&8uZJ@@InzHey|G$0#3&EHJpkZM z&Zy3()-h6!aBX)31U>QR%fC$gASZE3IF`p@Aap(|58KJL5EdQ2HL5uiSz_b0IP@i{ zoRMbOX7k`^{#FBP|8^g#_VRKI0eU-rgaGlN0N3oDOFJDgtd6-e`vq&uH5R_QC--x` zuqx_mE@pp|Rsz0QmmeSs7~X22QL5(1a3Q<>))v$#u#>MMCugH>Ozd3mZgiwuttkyS z*|%1ew7ve7LTtOpeGO7syrbT?q6N;aIsuSzS-S2}!zM ze(j*0PvzxH^8xtyoHdqBrK|Jd<#Vxsex(IN%6IC+?Y4T2krPBU<2`yZ(P~0|RJDf z6%Rx^rnZ@po;9;pQ|N~9khf!Yh7?odYa{Pk!P?W;ozU3Kvi0VKvmWKaWFicu71MXm zVP7@p{vjr%sh+UZ+9(w2$lgSCcfN%ta?;yi%-K!XWpE4ox@Obqj^J%#%=3;^QM-{R z@~YKVE0s#gVU;a2^0&kQgL(?*tM%s?8;u$$&S-D_P%szn7qX76MqkFHmzg(!ZQo>k zy_I8>vA}Fuz9PW7hE$ON?AFr?HpH9y-KuX{dGGXZD$EovJo>#URyKDN@_7P7s2uBj zQm`{yoJ-W(I|mDTzP>dWzeReLwW%m0>2`N@-%6s?Ezv6l?vqtaUSh~;p#vU5DQ5Ni zJ5}TO^~La^@>16I703dl?%(O`#g`%p#O?H_`T{t6`!?c8L<`b4n|DmQ&#;~;NPYaJ zYZnqh;JjGUI&3+SaxkB$M{Bh>r#wG_Gl{M{6Fv3rOduiSS=-+9+f`-dqhhW5CSn*b z=pUW&4u7Crj}|<|o!EB1xt%7SW^y=!)cC5k#HnYhWCDj2>c4-izDQ!gpoT-HS7Xn& zRIlo{6~z4`Y%(|iQnN7H8HIv455d}L%oaoqfqdp-flmW2kejlFepXw#plReLkXyJP zNray~DRf*pzBFc?6kd2}LX7v_{>{1a^?bg;6Jqx&X}bIFj}llG8!K&Xh>OhN(%r~i ztN2c^ZB&a(7qWA%C%1f{%++>+Hh!+`iiMlyitRyxfaI~~ByH)kPkuY#_t)p#^{?pa zkXiD&7?+&>VLBK-8K1&muwV0`W#cV0gxjgo1H~0Wns`^_ztha|9QHZ;CLnI$vNsnk z*TNC$C|ouMD!x*w*u;>4f&#s>uQ(ET%m*3sG&PmSf~dnd)t&QCS(LG$B&v{@KJoWHmlEMIP}lsArHG5xdEc{6mXUj*Nd$JHhE zrSWreAgK}lm}UCjy7XH=ittMo2=&Mi5xJ6QZ_uao?Q;}l8`}e#7ZXote%;^R`Cd#P ze#RgfU(x|?A`CR?ZwO+zC~=B-{vI|YckRc$N(~uJiPx!b4IuG%HjJOne-r13&7#nw&@eu@;Z0b|aggZ<)J?c^!2Z7 zO0cLNZMGU)5@;7$&lNh1`r{M&>2;*G?cT`td0EeEXq#cQRs>!@jnsR1K=4@GSKk9f zsiBzx5zWH-v63Axmj!m<(o%V5{aLSD{n} z-1%a860Lr#)%4xH!S@cD`%s=#d)Ac;3$f{=x+ZcCs{Ut|B|1}d68yjS&%5ZHd)G&4 ztgFXm`BG~zztD>YImh%wf30OL!?AmcBi747dfo{YHwl-#YfG6%9w=|336JnzIs3-O z^8)p=wPrAq?t%SBl+VO5OsEYh--Hy^m3Zn^!6}q(#O`MNMDCX?Y}!OkXC?6~@fM;>>2u zxkL)c=`!wDihZF{KGM==vcvBlkoy%-cUS1`-nnKb!++@qly2c4@(7STHJIb$1M1Q3 z4?FKg+~scAdYR0eH*xYZp?j|DJHy1Dng7JT>QX;|_DfvDGMR?oCMPr0uIoj&+(0u| z_(n|~Y`bWVhc2O3?Prn72GD5}B@#b>63aU$$&W5>r~_{YboVt&W2%3IQ;8?BZ7D}- zx)9k_08hWeK5%d}zd67v28o8(>|(8JRI8sBk$_DNbsf-TKnkojF|?e^yp;UN;4I_%`Hz63nxRaiM{y zl#s~sn(EB^5a!}`o8Ij;_+sL}FhVTR3DUr36r7h~(mJ{}V$Pgy=iVaDO`ZGU^%KQB&z zMh|iwR<2D+efu)qxlB*RKLKw;Mb=v416O~zNIW*H^Le9D9zplxc4h4iaL=cI-E;5m zKfW$B86<=WDSEG=sFcT9q-*-_7=`lHMH+6KI)J%oc*7oVb5~pJeaZtEN`Sl!v4{Jf zpOMYvJmpSD^z$fC&NPL3Jra!RzDse!1_$-(VB0H5~q?Nmq4U$>vIW=tr_fs z9T8HcRiw&C%+1d~BUM4Zs&T;b9@X;6j~muit-`%;%7BOo+f>hMsx`N%G3Q8`Rt6%J zYY!88$pSLBxp3SMxA~#u(MDf{3!ab&|8$!)nQ7mdKQ6Vqy+F`Q-m@MnN<2`8ewemj zNel6)pC+Il7(BA{?%uw6c1SFGhn439gofS^hc$GH>hQ-mcYWuV93C~1iVhH_{k_7L z4b|x;QcDkI@mj!>UgIE`0-A+J}|y@a;#V4Wz*{3v6RkulpIWukV}KJq9XW4-TJ| zQaK$(eNp1WU3u4C?0-!9M17}dIZ%CF?M(aFIwhUlecG-Vr5N0>TkaTXI9{Ys5uDI; z7A4`bm;O_~RzA?<%W0g=6m%B8{ZHOk>3ZuTb!rQWiZ%^yCE0`%)#^Z~|Mgd^_v1&+ zz>U-fxz10UqpR~?b0uVh`SS=~8*NA~m|b%`qFhU^E%9MC%eHd<12*f6ce4Fk>CiXk zTCYyvq5>)Y9use z)ZJooxw=-d4^>69XEo;`ef74Sc|@VzX}>6cz3DT!@eLNv-b8+a!^i(1ki-gshch-h z!1Vk8;t_3d8-k>_NMQENflEM`?|RJpl*E8I!86A%zmVd#G5UrR%H{b~Q0>Ku=vrra zbBpdy9d@b9c`MaijzK*Kf3LGHb98ne+k10=`l2My+IwBj)M)3r;4N!4ITWg7&xw0% z)Rg!oQpCqV&~9YE(FGjF%YO;L)poNLRaxyo2waf!5wFhTfqd3gb>>Xs!ygAzD~0u6 z%*Z(KEq>-zLh)o7WSP)ikcz9)kvR2Tk8uNi<-Uqv193(nKn84<<-9jGw)@otlKtrp z4v?gK(-B7x5Jt8{Zuoq{-qHy;LVOifk$DOE_Fr6HHB@~(LC*!kftcXVS>SFXmcZMS z`1=u_>V9VZ?M1SK@N0Bt$mVJS)Lu(K3^#rx#Q6$hxIS=~>D@$oP}sqUwgZkFOD?{+ zJn^yM18rX`M2^Yb$eTb@KZ(k}AJkQ(@K%wpk$pDnaNjqcNx@bRpL@<7MYe_AqFoTV zObAgPSN_IKk9$Zv>zKTwbLK!1hwbhvWSwhB#`h8 zF^8Af#_cwggy9|w>TsJl>GsdXHz-S$&u1pRFtI+f3FJ7|l~{U@1vv?&s@@x{*9k~z zNUpM13ryx9U{Xt@GUY1fBfjUCLw2;$z~KT|+QpuK*L1s$#$w_4ZLc@P)?;`Vf)1hR z&G#&fA)*zEg$SNq0uc&S3_prdYLD;v%*rlQw-zKD2s)6(49DC0bOBYHXli;a z7-^{)IE6!u?fMmCpRDuO)lF?15uEL%)$M$Q?6duzeLh@2>2fwif$C;socmQi)ZtTm z4)68H*TqaR7bpzIOBM6MrH3i6PmK~B-Q zhEKWf$LES)>P6l)UJP0}igtcRj_ zmbOLQut;^2_1e?23ZY3RmPP;P`7NTv7JPGITp#m(2;Imy^lJ4WCc=1x&`vS}3H)2^ z*wP1y0=p4+!{C6p29Zo28QthN_-nd;wx< zCs5u;kyXRrQ9!1qga${+I+KKaAbK8LRdKW)c7n!qgwl%^z!9-I9Iwt^`FchE2hADB z*UkV-Z(z<<-5nlyXnoLeIM(?RtgnXm82VL%nSQl?-h!FQFxbBvIzbc>uU3MZtwQx3gh3QmDP?vVW9jp%u z9-939nhBQYaFFdx#PJOEQV2(i;k*E{N*5+1P7g|4x>v>A7c^_~Lk>#!%ERM_FDiW3 zqw3CVb-CbM?%~6wWXAKROd)UOVxP!_|7fI`G$xH0!fc1R_3EEebJ!-?k)xwKqc4kJ_GI7wV^BaSi$_ZZeOBE3fA@FtEPslTy?XDFqd|= z>qp3?vT1D#B#eTTQWvAU5*td-#B->1=kCjw&RX z35nKS`MeB=_mii|bwx<6thxD^G#~px>h99yD6_GKo7D?^gbsq?@& z-|{_LQ;2l!xzxPB7dwf~Fk$|KoW9Mt!KzZn1ULm-eN%|11^IKR;O=LHgN zGyu%1b1z^3&1bF4#0ipHXiVoAb;#Xcd)`=~kAJ&uCyBBD%2t8uhllDL#5ro|mMT-e z6uYD=7a&c>` z=%y<3hZK%Ot+YgpO&(o|X%Tb#Q8Fj@0Pzb5qr(!%lM4h!0)&;_x)WXu!irGI{Jm)% zNTkn_S8gt8s`nxijVXq1ppjm^?#ib}8f*;}NF~Obe|GVR4@RRoiueA&%WX|=B?$&r zvqPjZ<+Vl0bWzgQ&|cepJqy{Sq_K+t&mHL=&Esb#rQYuXz*Ww}eAVYn!phP7Abh@V z3a6MjN!r9o4?jdmvviC#?-*~vkB47kdbJI1hE7ChS3lpZy7&I4@3sE)J&}d+LL=#q zMl5!PeHJrx4NzQq>~TE*Uyg?~F@9#p7Z?9z$M8ec*{5K)Vp}>V)$a%cCNs16TJ!}r z%Kg}d(}Pi(t#Q3@s8V2C2R1nj#=BqnX^rJeu%1@#DZAZ&7lZ^>wrt|`N!L3>b(}Jq zU+mQQMTkmF~P^@mBnpToOwfzJQqMS4LOGS+saYYTnMfkgDjGtU1Ldt+faS)fXms(LWM8f z-A4>5kNcEUJP(PPW~AA;JmzTHsa4L3>m_LmppLD2FO|O zjl}PbtJM^8QB#O6dR0s*jH{5OQ9?mPASt9A>p^O5N1^^cef4yv|Gf9pLWGZP zsg*c)y6YMSlEpGH*}Ma$zsrXd_nEoIxM-mA;m)+h;FFABp*K24!?0Acg-H@jU<4Bz zQFAXTa?(AIR%{9mGwFU%pcsK zE$anJi9;_LMV{ICNZV1X5IQ4m)A1kBtJ9vvqfithN@Wt$-<*;8%C~s`u(pc+@PMG5 zko4mdAj1?vBFkjQwM-l`2H~?tg~QCQ=LA~2+&|&6A;)_@I3#%@E+e1qd3R|TtuhA= zs*-k3+%kmq)ElbyX|2c6QT98Oo8Z%n{0>`(1724eP>11b>XuHgPTp@we5HC8kiS4| z-0MRp&UWsQ)KR1iuEShj@kpP}1KX-A&(>(JN=ZoXHW*dh5wh|s!}~)RZ^ASVoW@D! zjF;R?<&nZ4jygz4*RY?j7^J=IBn zJ8qLv_8I@-uaOi+c48mR*!EN>ec-MNGaA|M=_Z4fjoV#Rn&eoYWv&_qz);6^$%+-u zB^o);hI6HN8aI^A?3?fopd1xGdXdD{1<>q$Wxb~W6HP|nr+I3xx~b>v4~+`xh!Q1x zdg_^n&jDr{=79C*idzy#m5+c)b0UpT&)?_FMyiM2n;=`mhCwLVlf!h+C$BvKx#EWj zXE*v|^0f$xy$eMj)|H!WTmX}TQt?MT1O5nA?u^Yo<2dMh#@uqo{PZ^a&_Ss8u7H-M zUne~SbXmr-#yM=pq=7d6MOA`H`b4QO#q~vIWX|&~&M}Cp=k)Sj4i4`r17CrC!uH-A znp2)K$MM9eg3PX-ZaGimI4*P}o>%vnio^$`q0^tC?C3sH&u~~d>(UOt>d<`^VS6IO zk7+SV#Uiy>Dil4*o_gUjA}6$BuDt;YxL4`>WCVYMaG6^xo5hD3Ywt>$`kI&(>#co9 zx*M0<6G{v8WQ`Zp_<}C0#sJN0q1ASFbTT9CmXpE9uqQ#WJHRxulNN{B2&V~)2?rkY z-S)m;%(RUUC+}9^gEJkNw;d9+9wkrdi}*~~qc>&w&c(&l1DQu8%Tn{mxG=4Sqme-3 znx88roH)nTQl%Y6gG+Hrm0!dFh6!DbHpA>PINlETq<8ppYC~Qg((4AmFc)G`>!)6v zw^~^DZ^YLHG)Ia5`w$+5;t3K50N<~zyff*KtW%vPAZw`)--q2;YXe`W5HFU_gzf(H zw9QQCPHa+6`mJ;C))Q^F1=3CIqL~=?wOF<|`#Dojsb-(;6u;i6gXd-e8m8dq!aUDv z)56O_i<|qp>2H-zJB*O0ifl&5#pi4Y7&yWXSC0j}%5o(jTMIQgNxy{ZCJol|PyJ*n z>whletmFM;5;^I{ONttAJB0L{Z!!p4-^hD=Yg%sZo#%13+HU!e-nIle`-2(oMDarf#d*(6Ev`XSIe-aTWt{(V)WW6YqimwMz z%bTCAI;SoUh;^6c+ooI#@2ll>-k4S>K7=Wb-PA<$q9m|;h(Z+cZfT7e)1#F!;o%|@ zC7o)_`MPnPyAaaEt}HF%9A**raaZmAtG$cz6#i%bWhvh%LCpgYFR^Qb7h$D}?f_#F zg@bC@(?a^o(F3bQjzjk9vEKS~6fQFYks3cB83Q~mcTf8MuO;qs0j=d!Vh%M)2KdL6 zYaOj}_RbMiTX?IVt^S|t$M@H+l_!E0{Mre~T0#Ur#Z$;TBF_3Q)BdNdV#%R<{OxGU z@&`)Qe*W~L4|jpqZNoY3|FM8wX&67tXO8#=*PYOot-&BsS2 z-5^>iiMD(Kc-5v3!sf?&Fo!=SK00-{Wx_Id%0V)ggRA79Llr!MjpCoUF(q`vz2luA zRM;bvahy9KgEbcaXz@$)Y6m~wDYNHn4|tnPe7FfRX+7w!j>kOssnAEv69_G_KrK6o z*UGSQ)qDk*QXJG_TI9`tmHss#Xw!#xiq!HpprS0vT&l29nHT3rpL1kFQ>@Ug=VgM_|fb@s^*nkrUzce95p)Tzzl73B0L?L~X<2B7z zKXP=`Nqp>~se2pdArK8_$=#62XFiVOWV#N&7}VeI4C23A1fxhy?%wx{cl)hK1Q_tV z)}5FG=yeeI5Vf%!23cI~NrO9cJXh}fpT6T^8OTDWLUfPJ~y(!#!M)qyINTgs(vr69h zkv971t|6#1$#Z_YBJ%qzkR1{dDr~oiWkR87B-=F#Umd zMRSRXwUtSJwa?eh{)-vr$WVg{{cWl;Zv7#huo@;vrC$RQ6pjKAmLA9xWTb7?;s z0g)#Rqk)1(KD0G-wGBn7_$LR#ha{j;3mf`VTg0Fo5@IGgn`&~n(Dh1_;UWH9hscYs zU${xZhZ&v0?*G@S=loWXH%1i6>LmL*Kq51uD^-V_)<>Y%3Iv_a@3)2M^iS|5)^<1* zvPpTfME>sgZur2t((TJ5GE%-Kfe;XW=HlNU{l`~MIvGo`a?KxHRVl2*JBd$rM|J6 zhnkp!wqW1U-NN$sLjKvYDA1G`zK3`3T;SYgp;SwdxhdR|B6O>zvboi`+q|LCqr}&s zgu&u)#r#sj>^{l^y7GT#3tqlC9fTBG4;wJ(G^Zg*BFHRNjMH?9$>19ZbM^c`E&6}@ z-WWW9XDvX0V(qH(wZKUmpR+(uDmPdKTi>$~=5n|QezPka`u!^Z_)P@3S$r%5340RE!~B%>P*u7i zCpxVM6qmNs#-KX_3vd3fPh-3W87}5a90qwJbl!!RM8yNwVmDNjg`|77W>q6Oyu#85 z;?%&^y?#}TyJq@lyAh)5L8lQ$~Nl+v|X) zU4F5uFh+EzB~B`ddb&Gl8JC{(cTka#-?by9(Vh@7$K^QYk5vmmS|0oW*bvFWGK%O= zIQ0hHWAP0PUOmP#%nXtd4qx zLJ|)0N6!M~!#?OxBq!C`4PuSjDB+i*_iA70_y4m2M=+gn8{FM6v;-nCVp#2-J}&Ct z)#~$S8O%YKaop>44brX?5|tCZ6^8VPxphAV;-NVA81)I|En6i>J0QROz34w5s ziXs?>KOeRVfDF@s@7??wc@#Jg)FmhqzOFMK%ZthcrhV|?o zE&8_`{R_CzD~LwW2Zlc%iFB8PI3Zc9u;T7gyNG2G&ja!ALfddRJn$n@(TSTQikXid zehh^FxP*hMb`F2Y&T7DJvXBG83BCH>*Z=UHzs@V+L>Z+8_alIEei2w|Tz*ex)bTyA z>l-4ShM>1oh(uftP(^tB7Kx%zRd_gL2is=#fWc@))N$e*$xd&-;YuC-$J=I&KwE;O z9+rIoUmXU>+Uh|g;7zvoG-N50(0PI!2+PX>fV&PWj5erv`rp@S2N|wB-cZAB5P4($ z((W!iP;H9kd4M{aQ4=EcgVvk{|^{f8&?yfG^A*Kh#E91IOW?w?@6H zFco(n)G6QEau&0>Bd?IO{_DJYoIawA>_IBhTBPOx+DA=}p7lSzfTG{;w)v%F8-5i? zr*!o0pZ&Z0^Ro%Si(*O55fJ=Q0&(vD#1WUJ5AR^{-^$9@y>=!z+p#*oI1!|;3vPsS z6AJlpsXFNWsL553{KtzR3dFCqbTonS8a4vTZAXNb*pc&q3bcW00S@j^`S|E-K9hCJ zMz*5<9?K={x*p}n)&jz%N{17`u#Fun${8QM%hGfe+G$IbCHiE_X9kr0-1%Ho`V2rN zc_w`?AUc6;^VciQ zwn^1`4>j-W38++u?M8rMAvt);_0)>=@`xRM(fu#{=X|6tmh`ZU9TQZ_f=+xaRs)p6 zn^;gd(GmJR4#v6ND@=78D^T@zuzU=D7}i3g?CU1xIa|Wff<(67*N4n~%T9=Wp5ZQF zl@(+0aW{2j0P1uy?K6Xwo5+pwU|RploU1j%bzVlm4o>{&w7liJq$%iLE4zA; za|2C~4T0XHWKV2GseEg-9*x|+*8>U{uyG^xb-n^%ksn^;grO)HFsRey*1=?t`@_NZ z)K|{|-vUbVq3Ee%Ji+eA>K&iDKqA;8KEBu^W87}0^CQFM#1&k2>gpK`qE>WQXK(SD9QL*%T7L8VtH^O^=b|5fkd@d%ro6Ny~4$?@6$+fxPRa zQO%q=C=mKr8i6s3$u^0-<3pctG$hfauq9#jN7jXMc!4BzY%oX3yVEnL7SOzXwQdic zCZCjiC5hkvB-o4jVT9~+y2ZJZ!6+_~J@JiE0MG;w4mRSB^RP=FV5oM(HbHJU+pF~m z&aVtbe;?7Tg9$t{cvl91B5u7#qyi3FqrVk1l)Bk)^~Ley%0Z^r%!%5NCMa(=B;D;1 za%IpUyg%n2eu!{ZFJ%Hik|;+a_mcU={gkf^+D?jjo|uy`DIVlS8)%TwEv!V4T9BFq zak>c1a}mtnvV>rJ6(Ks@M+$K4zSMK;z^8NGJ{eNV}ouc0z?LuiWDk5XEh7 z?=#af(rZK>g?q?;+yBmjD)cFhFRs#s$3fSUHJVRMhOY>RzS1stuu^J!g7?AgCNbal z53P39_b?xmPKceE$&y558Kb(0cpx*w7h-N=8GGwMNys5mrja1%7%#Mac4coPaCL#` z{Sz<$9_UePwY;$My)JT6h8e*Ee{895R@pM^<1u(r`3O5{)0sCfy5uB~q1^H7AL`EC zC_}DxU^)HH?a9vL_OJ@o^FAW=wVyDL!xghM0$wCuE{W)xraaWPimqfG7F9ni7}z~3 zoyqXvOzgmN+>D1T=16hOkkqTy4bAtY>#M|9Is2>@oH?DVEjQI~-dgrQv^L}ZcC8d^ zjH-f~^v~T7{|YfZpw8Wm^)in^i&f(OdL44ajL?N77%HaH4>cd1jI)eifN0x6!}*PUEtcvTC|-z@ zNsEN7&W_$<|DlmRnlNHK>!z8ueYdCkPJGT{Xkr1vbwQBFysTEeICi|q6stMZKYz@+ zx5#EBVx9kPs+&cKPIAutIZr9IpJVlsH0P6El~fby7h+HZvCr)DtaC!W+`l|%6u`DU z@N`t%+>aibC*kZiYxgo*PLaoi8A%%j%5GfbbXK0iW%W`f;HYn~_b(hSEf;>dg)P@u z_sY@yGU)5CC7X$wQ16X6SX`{w4guC3Vw3f|X*Is` zGoMye z9rw!DHz_u_fF6E6^widXQcL|-8HLpW+vXVkuYw<}eO)f}%Sg}9CmRNZ%^&3@Fu1HR z6Q~%Lu3n(Mu9H`93cWmQQv7-U;A6b~M<&eM3kyEi$!W;QO4Liyc>eTm3pZi_?|(6pq(%u4$_M43Qe(Jm%mLf{p5)@sq@5)7I9P>@#@6@+37p zGh9!zxx0eAo773}xm1fn=al)5_0sz%bSb4PIWyV|MN!U*y~|}GXY9uL#~$#-XWW>p z?KTpikE+Cx{*^pLuX3f5oQ>aU~q>PBih&J|T8LHY8jmWITbKE+=S+xj+Is_$Bg0uH|Ru--eX2{fK#O z(7?&f@F+dvxj%7t3EgR{F0r`yuWrqnFHQyvmJUzZZ(nn*E6#7av9dC#L@DbcWy~ z<{=3a_M8ju&6={b(C(m_ah>QnRp2@+y0Y~pEnsVPxm5hEd53)p?Y3fDdTy}kPV)7j8b0t=Eq_?(#9XOg|=&R-8-9Qj@KKs*24Jn&%=S^X+I75pU-nY;9uyGTjt-5Ggq9WTJ9U4j&)IKw>=Fg5zT6STn24F)0;F+MY)fPd4xmnVU!m0-l`;#G6E3Afy)!%s z!QQ{mgTY7`-a$k4kUJ`x+Ytn2nJ~1!mUm|KWfyhT+M~Xs!-4f<1?>GB?xg$C?QYuw zW6!v#%d-YqC;~F{;*hq!0w9GGtw}Uy4IP6xA-eWT=VZDv% zdXeg>q%}%OX=HK6$n$ixAl^|K{Eu`C9=+|?uWwFrcMh>cd8Ms9P5Qoh(ekO3#BcyL z#dZ5~J#==QSHGl|It%d{i0kBTO%Ta4+;5*`peJD3sIGa z3MovUUd1lpqxVIYNm+Qo>xeaqM-NOayUI7}e$~vpN^E_UC2-$*0_FOp$JYYQWo9(= zRg$KUuPWz-mYgVa7_jw!&}~gEgQ^#C^(k@m30PY*qRkXYCo+A=vf!d_rH(C)UUhFi zru>v)ADhI(Jr#mvGhF+EXwrB2_`P;lBOFKPh}!L+xJbA>G1Pz)3R&qvt6DTGe-<8} zlDDIk=fiWxfRVd;&dl8i|^OV8EpBjV$sU3*D}SDg5fhmW_z zP2x(X<1+Y7_XMqaj^fwX*9tg}VwT_W*!a#~u=efxbcTfvZfnr@_8cY@L$^gW!CJnn~&u zs5~6-(GO{``2$Ck36rgR3PlSZyt;x2lIGya|=ir_V-1wtWoIb}jhKSK6<`lJ0y zdU}{t61#SPxJ}JuNAVp#F)_t(DNFaP&3@{n`!``2s-87BOw3vL^kRRf4z=CmkIjN} z#27qbwE3wFPtr>=u^}PE8ny@I=m~h_U6^I5yA8;{V)_VPchM5i9Uu&%d8m!&E3W`2Q>okL zs!bwog_I*a)4C}RB>kN;qP4nTYjeG2jPtfwO27-{OERk%|**Y7(8dwkx9GQAC-gBwQCrX*vkr}6Bm7~@eM#Bb^By; zPPW1rvFk&MF&tZE&F8VI*hCZFXPvR{hTrmJ_;ZeHjkuWY;q6rjdb54p^vRvI&--bAtQ9k8qeIM>&x@5IU=tdi_sna(0 zUZ-W9t}&G_J%e|2>&mkb#|rGnL@a!`7*NXJ_5RVSc3)+uHhFv~N zvx(Jm!F34VpQsRKn)Js9cNAYato(EV?1?=gb|iN2JTe@Uqkvxs$~zZH`c9VIw{hJF zU4H>Rf9-{0g?e+Hcs||Y>l48cE&j(!Z>)r{@**n*{vXjBMByAD`<2RqZ-ZiNG7z0B-Zz7Y*hs7LzxD7vl8QdLM!Yesw1 z+%^QkiiIb{Zp0dZcpPa2nKbRTFtRM?2p3uFsGQSK6#|9pJhR!-r>XAie)7L7SU-oR_7y@NLOZzwlJxC8jrhH-I|O zylbTmvzv!ry|4lC>5TQg;MgA9dy3H0gYX4c(A{Wkm^EMgx!oYcuVH${f3`>hyX>p? zqb=0y<%NRpmZpkVsj&Q&5hNu@321;oemm13-ZRL6xhtf_E7m`2H zei-_ReBu6w|FL@L#@7tV1m~d7Im_JIk3ymu{4rm@VY{n?Zc71a+GT|mohma7F+Yqj zA!;?a(KEaOMwSZ6@=y45RwZ(`2cC&2@o2#DK1}>vV zX`V{@qhFCOEG*Q%*yIMW6vcikvJYG_n&+SLL=R}!8Rwd41-7^~cQaB4TL54MI`aP! z_TKSW_wV~SrBFtLGKwgB6S8+!xa^E5p^_1@x9Sp-m5~)iE<1$mB6}vXNmkkOvR&Wv zrThJU-=BN@e*N9U<9a>E`8?0#JdWeMMD)Kj^&_NOe?Z1idj0}_Gf>OlgX5O2z>ESY z=@2p|StV~PNit+a_W>(_zzUDT(+qbhTYs)Oh^cQyl79&8`S*!jJ}bY5&=Mn`#ir=@ zo`01?6Oghwn0e_wX29I2NZ81?XNh?JoGSEPiZl**(CD$3B)dg=tXp&1>E_^zO46Zq z;f_`WxqJFubSgf66kOu}qlq}6?-Ks8lq9smoTk>S3{HihiLNsNn5>d>Fy0GTtG3j8&|@MapWHv;j1r3Kox-MYWc zVPQzq16|}-fjsUu8y1mr8l^L;-UO476?VyFv!CXgdN*|LdCNvsoEZ&^W4)Xl@j1on zcsX>65-$$@I02nl#cMqw!RBOs1l}L~b)aIL9@jCi!dws>ykXjiGzrJ?o~+5d;+kW4 zdG_zi^gl`hi5)x?d0jl% zraILLyv$tnj+0_8*<%K3a?u(Or-|HNVa^+hSJfXkBlD9#t^VM)>*TRtK0T7+h+pZ? zVqV*zQ=5cd#qR>lO$c<5zSXHe(MX|*`JVuC6l&)Ypj@-Fw@sz(dn2i8^O>8}xwjaP z(?1zTHxFDU)B#u#1wUp`K=~FZcsg0o_J((*nF8f{&NvHtz7}f;^B}$`XF=viv3>pM zeMlo*u)Ueu1qyDqXcqPYmyC0)81ilFTjWj9Is5Ak{2#UA0ux+6JvwJD-lA^n4(;{> zA?9agGGHbPA-+|P@xK#+cWJdv$1qQw8g5iStVVbp2}fat`UDsVVhNOfqKcjvlD=AN zH-I+kK)k}pQl|cQTvjyu&bQqQ{(MB|Gh)_P%GE7GZPp?#4SM&ikxKZjqS0ENHfqSCx$ig4 zCqRd)$J=^kMKk0PlSY;3YwgR9cRv_Fm4!Y=f9siltfFn-Mx7cT{hEk^fECC!{(96u z#sf!YZ@ag8#AU4&C~Mt$5f;EVgBKp5;BAZim&<+OiH=i7WNFuX1jVvLQU?I5Zw286 zht@1Kvfq!SLP1LgBjlnoeTjXFh47Od}$fe5@|T4_?HO{PI-pqm7RM zR}!%as&cZ~EHw4mmYTd&tp$Qv;J*Z=I1KD6Y-WW%HVKS(p(BQ{5w81vwg$FeqYsn#bIjXarkE93IS9knllYm{UjqT&FGRN*!7Nh1l9p?qKU?_;uv(LDuy)p-wiFy>`Ymld^HdKczhh6ebAAZWQ8N zKXXS;%rvgMDqnV=AuAIfNLU*rzS02DU}m(^p=}EeJ%}%x-SJDl_K33Zs3C}{sE)lk zuEPHmjKg*mk(X*dKw_2ZWgF63j@J7v?=szZU7p{04Z@$JsU5g@~Pe9N%mgtY8 zT((2%9~nE|ZH-^lnUY5-O(WLCw~$pBHQqha6|y_&}C3rWO6(4YmnYz)ue<<7_{aS zol~igpZEqbee?yxP_@vlqgn!Y4#T|dMzl5;J>s#z} zV=xi~k8<^2qu7!qkhj5b?^U476@*r+2W+a4@!P;;Y7f+&O9P5xCKGZCWWulN73*_j zC~Y`6%7u1c_Vr4P!R{FeSG5@?_G?MpbAB{%toG56sqTgQ&~dE)_^!8d(p~U`^@^F! z@5PX|LfjgFn$CFqEv4McUl3AsZr7a%I-%fwt72<*3toRt{G&Q6-<%ic@)A2~JoAel z?iW{;nVFG{@x{0tFHM0oSMLePGHQ9_1O5mDFIhgb7EDL>iD-+)_t0_j3q9KNx)!YRNHs(ZoU*l~pP@b0?7$BG0^BZy z;J@m~sr+*D2ocCrHU->l_u`6#*Uz)+*M-}1OFC|)Nc4_)Ew+u{r|roNGbYeE@iKFY z#tq}3#ig3&Ad^FLB*@r*fB^4UjmJ?X0gff4(GJ0EJ4y;LzcMe<>c$!lz+8x z7&`Sb_aZ-dbxu>vgsp)C;a`h}W8Du%tB{&o*<-f>p;;sEbUp~ULA>L!+|$cHy+$7c ztz4iCE7}}_44IN5wPNP^0#~o8=ahr)34^fPp{ClU$$InD%H7aB(G+o6QdxgyKsH~h zSHwwK`~GP3$c^!CW<1o15drBqk;CM$ZwGz-DDa$hnV?0$5J_0)fus(Cv=1$rEa zpjJ}o)iup^?s!aMAB+wtSd=Y>3+_eYm52_R7T8YCO?k|`IQk3?j927xmfJ#sbIwHK9X&0D}Amu#KBM&AGs~ z06aPPAbMM6y;HODzKKY5A{z~X>u1iG=n9VR!irhRJw~lt9SRz2hZeYUh4av6F50KM3fs(n5oH&V_(SEfSnHI#bvWAIC4h!F-Kv%{} zv`BrSjl?eiXan8WrdLvm>UO!-p>zYCmffbLQwzi?@ozOMg%zf+cqX0#QEcL;m17GS zLcp=Jb$Y*IBp03^3bSYA=RaSSu4}q&QxZn_Hsj6+`0JIL7oBEt!%J-g7?V`vW;h@E zU6Zsn%iLSwf^;%K2fne^GWKnKTo1!XRXtPE$KYi4 z|GdX#n>Ho`*SA@@aBsKT=mnVg$tPFLkd>6if6J~^7UWKmAU8=@*Z4IosBUuz@5q?M z;nbHYwmtU%7Xr|8d?G*Y8z=*P7bEgkdAEGO0_;-xDpvO+jnb_WH6cy0{F5lnie6{2 z{*7Mdj#R7ht29}TPop3auq7sV6cuoMHUtQ>h5^U1#mu%CQMcBx>#1qy)Q8y|Lz&VX zhqUk=nx;BsF%z94EQgv^Nkb~|c^A)5>ile@{C06-Vvor(oNU=e@yFIdO?dC>-_M!? z?Wijb%u7WgiJD5T---f~vl%>saMVtL_Zn2>1$VL#q0l#G6Ir%zh}aFDMtC1mu*8Up zjY?;zig(e)iSWk{ftb)d8y!VO;Qh-G4AB+Bb>_$`lY~3vDEXjC*KD&wp9Q%TI8tkW%lEI$38c+z zP8dGYK$6fZ5&fdwz(ssRS!Eg%{Uxh^fu%$5#HhxX7wt-;x4S532r7c73BA-)$o{E1 zCu)SM9GV(;x){8XEq4Z8Cdkg3vUd_>o@=!>P0#0V zx;%d-4^=s$D*bS2f|6#Y{B>-)lDi#J8sGXpu@by{j?eP}LCwZNH2x>3h9I-TavsE@ zbckzMuzWcHjIs#pkPZI?fzJ0SNM$mY_|iXJ)SsJ8>MR=Tpc$~sF-D2G8wOi*;G%pw ztTRT+mTkNUn|qjT+p#7R1qH)yWKTh>FQOJVd_iNs^pU=bRHHPiT0v$)@P}t^0&~Z@ z!Qj~u(g^+0{DwUHq|`K*-Yc`iM0g56;A-e$oU)+pnt8o5^ZCzVKtKuzi!??d&enX$A*RXfH^)`2lAE}5P9+v1 zzJe*>`GdDD#Om0r&1!L)S>VZvOa&xa)A*>6i=&^Vt!H%7HWX z5cTeL%^h{~B_xXx+3^GKS%+`8f(tA4v!dM;oF7fp^_AZrg7rDJ70|G83D30wlAsg- zm3UC~x?cp2Mb9y#+1C0t)>m5*XwaV^5SUTTA z_$&A0#mfb?$&!-L3^{;>PXE1Pd29{r1}LsHBwY3pA=V)#8BRUcCtF&0p|B1iM$(gA zfi1ZnWyG2366y>e-2A3OrE zYv}>_JtbEi2D!yN@E}_O@Fsx3+=b%>Pj_>z zBQT%>GzHF6Q%fSPWG6S4M~HP~2_MyrO$ZLeL`gIbqhWT4ztIzIy;P~jG zfciqg;x@Zh=k&v9AZ+LiOg%{g)pC?L|EH zneK4{=SPn9$P$!oLlY#Jwezg>@NI@?QaxJMVcGm+5h@% z0i}@c?ZdVw@IZP9j+4lzRv$v@GssDy`*S6=z_Q;)rl=YkRsheM>VAC-et^N1v)>aE z%;kEn@E}jAP>Nt9&Ci0F(Q9>Kk!#gRhv@+rkH5>H4K|LO)61Oe(qV^gQ92(M{&1=cLgU91|jT zLxVprE?tzkSj*mhRsjk~Fe>))jA&*qxYd41%NMv6$U*rf|M#~d4J<(sO?*FC03cE) z;mp(>I2y=1ShRe{bc0|zsSu(~u@G7?`|lsxm6`|&jUUAakBp4p z?M%qrrsvuxxOFUVPL5q8egD1`oIVhgo37?|6!Rd9rBBp8lzbMt-rVUA;UG0O-#Lkf zG|hqjelgGcg@VH9G5{skIQjO_KfL$-|hQ$adaGd%0oz*~X)n z=&eplSv$l`GB=m@cSIzr6?CY1TMXa7mn}pde~u4YODY6Nj`i1|0%{awY1$(eez2(9 z(-`#IV$Pm@qW9{J4yZ~Rr5>l;>lf3#GjOc!oju$U_!kKL%kS4N_8}w!!$bGLhxZ>* zz@a{-;mY!QB)6;P>Pqb=b@p>DM)Ep|AA!C>Pj=;~AUx-Ye(iEeHZE!nrUf|~@FJrj zlVFcAiw8oL;L$GMzh8s{jo62Lm9h;d4U`ml&|JR(b1uP^TgPkUr0W2qcz`A`lH1A2 zv^N>Lfc!=20>lWSlmbZ)byP1f{T*96fKmP8wRh^#aeTZQuL~^a)iNc3NrL>y?^toy z#2MulcsD(-{rpUNAlJC`AKjROHYb9Ih4_J2$X}b~czA3eDY9k64ANaOej8M0GuAfb zkN5kWeSb?001U^au6mlHR;#`z>OUcK?^%_djK2h*j6+v=tllD6)D+#h7fy zfIGoSy$?#B0IYGAq5FS6ybfMK*dFMUzot11rXl~l^(vT@Jw5nywD93H7tsgjh(O8x z+IotOrD~DY+(}^~|dVz_h%)`dF1MP-K7?Hcw+VI z{2zab2;wnfX8wgj6=`-;DS2hJ3A!YF3Q8M_WL@0Q87ppd=yoOJ!&(m@M z=1@Ak3fagc3)}?+|7&ub?Ln(#9AbCaUs-Bo$X=C#H5c86^ObFC14U)#R9tw}ycPMU zh{GAD-zmKEd1^FGVcs?n-`#J>o%kUeQu&^~^6SINtb*@H*){x0?)<`c%Mj%;_GYX; zHnB4K?J}1~`dpiP+7)v1N{xwW2j;q=Psxp6x$iJ)R3$`Rfx>=AXHxL)-co1M{Xspw z`Y~%cK>&q@iU1AB@_yj_Wavt14nKQCuTW-NZP-s2{kV{%Z;W#F-iNQyvuu^@ex}iO z@aaJ_F_0IEcYQO`G{^1TmcY8<_5si&>Oi*gavj2|?K!3N4;bkmo8U6)QF!Al#+4YO z!;LALIX-+13Bzn%$R5aD(G~2)VC@B}iu3Q+zOvBYqiGBY%W}@*h^oxM!q7N)W3mc% z;pZn#rj9g`Mzr-vR^EpLJYA_rl76W&ho(GS4)e%ef1sVX6Ulc=4cuKqw7H^Uhc47| z)WBTEQ8J50NugiAQfZ%QFl@<`sBh&+H05bwp;q_O%fK z*(5@@!>;stD-=!nDiNF!F;U_l+0*YZ-?`tINiF#>XzMae3j-Gxpka!LAQLia4&Rge z*ZTy|c;>lIXRFccJy%%ny%l;W`hME5r-}-IEK=QAIA6Lu*TW-)H*`(lm|4WUQ#_5z zFh-TQ}+A^gttYHOgsjgwViVF&oXF!&Ni5)*p z7qvTF-{V0u{ZewuFCsbbfOER9G}jY%!MX>Gb1`gX$~K^ zy}kSH_+^f>-DfV31}mmxL%1GR5y9uCbj|f{Q@g-@r|^oqTnine@7hTmWSL7h)daKI>c)O)KgwfA%RVt@!w*FIjd&n&PcrO zZQ-e!hdgQXCR9_-C;U7UJ*ncND;<{_DQB#Mk<6^!2&Ps|!K1m)?7zfX-q+o_8+)Y2 zBeX8Z@sK7&cqbYm9=k`eL&>MkJSXfqJk|1IkmBb1WS8C#VT|++Nw=PzEX8WomFyC; zt0r%fVLs0fTf`nwlANn)x!9CMO5f%->dNHVp|o*y^C*#%G^LbN1fz!|7CjaA67fF-WJY9d<#?Mof|`q3?!ztqsUtx8kl_STcML( zspKL<5L@Tv20wqsP*aGDJt@PFdtVAc0Z`jc{CLpw5M9gX+ps5ESGf01cXQ&ODd9*I zFOlr~7=H!3DLBJg7pW+W3DhoVYEb(uS6hOtb?d1sTgNeHG7QT zJhzAxP8zz>W5T8@owg|wSMcP8CZDlp_?&dXm|Vknx(M<~vS8!>mxAiIIhG0rpkMZ1 zj_jD5_Hb~EO25J_sG#^jOVxaWC-DR4b}4fOZ-V0m%VM4QP_1izmR}28GAo~?E=rd+ z+_^ZnPeJFwiB8lO7YQ?3xdm`mM!iVg#L=kK2jb7P=H`NPta6E+MGqM*Q_H3+7t$We zOVNk7qE70~DJzu*uqsC{a^JIb&8$_@Fd+FlfH1+VncO;Jah4qDMPj$1$IB`Y3r=;T z$HJU47Bqr>gZMOu`8grY24SZVMD{w;T5-`A$EfQ>AOH z(7x)bQ=0s3$3AC295%?nZKtWavO}2hBFD3P^#1$>1rm#5L(F?Ty|$G&50{Wa+fgV# zGcDD-q|bi|cC9KWk>mWx@>MzMT(qk-Fd{kyfcZu8K(~ds<|9RP z$8{^^FNkBUfD;TgUb?I8kxwg*JzZ+oY{OFYYgy05lqX1t6NVZy9R50wA7)fN{b{zM z`*}-N$4IkH4WZj?bN1fvTJp_(-{)jM6q|;VpL!eiE(=so zC0A`|>AU7->Ha{!uR?sQM$~cZxFVfH5uqya!TXJO^PjSL#Q2PrQts9$ zQ}w8U30ybl$qCM4Z2LP6k&e4o8^`(GY%4`OtR1F~sO@UOnJZa&_{i+o<>48ZoFJ=` zpss`oy2(V!PdhI2<$Ny+_1!l`cl|Hrf)nRg6i`?=irdfhlLi$$sIalHz+OJvHPX^b zxAJ_QACse>FnDo#?>aj6Rogk$M%DJuVVduuwB7J7x41kxoRV_6`Z3AjW(PRY6Opnu z7R?!VQ(`Km%%d_RL(Zjj6Wr_S!3 z5Rv=l`TP6J`|lKR8{ij(hX?BYzvYdH`<{&Q3;p3SY+84XX~bu<+>4y=FS(docsTEs zaG{SpojP57&SRG*R3R;BZ3Iaa6272 zMT1?nRwkZnMf5Cg@_Uu)au&}IQ?3$oOz=f#Vb2aeoiA?@KjEoS9UVERHb2VDGPi6W zv?38HK``+I^)u`0#Q7A5t-6XOi@}OO{?ao(F4{?5bG^wjBsfW=_(B6g>R-M+c9tS% zac(0Wl25s(KdcvfGAsL$R86AKTwf-tosiXz_`_PoKW<8pRt z&x6P3@{6tJj^Mm((jiFsX2hcyvjn)@s^5>CQIcl**!=w^R}Z$-LJS@$=n|`(LII{5 zTa=Tn6HvIdlev?C_L5duNFL}ETg(s$u0DPeV3s!fLp~d3o;Gh7A{eSyBoqS_)CYM5 zqKq|{!$a|?SIE04(-Ie%+84QNIb@H*zy*@p!U+kicIL)4K145L^A&kmHmxxB4-T@yJ=UR}yS)go+d2!tKE*a=efKRP-a3jVID7BJH5> zCR7wCHAVF-rbec=#J+{;^UTf)*VL(1z-^SKv($pZHwK#(_>ROuYWzszR`9I?GR#H?f%v-CC5Ux7&Y;194ma z$pi6`Z`eN-+K3M)qkyQXXxgH*o9`#`+Kn~j1uCsKRIi=au^5EhKxgL)@&JdvF0sVJ z!>uXH%jnGLB-U*D`_z&ML{pu)+w7&!@@pb5xA&BVG}EO6@y^iMM~b;9uHehh4zurD zp4Xfh>b7+}c4_P;QEdWb?^X#h;6$JFnsNNHI@0#uPn1=8uyw%;)82Nbi=6v)U(4?&Sk0Lem3%Jz`EKY+fbUubOzmF1EXbJTWq}#to_ZW0Bdq{}1Y430n+qw& z)5mB~Z0~c2L)^?xBUsC!43uz(i)N=W;gtTXiYDD1M(U*3A44YxKaTM`*%rh9KIvCc zPI6k57&_gi7--@bX&9wA5-QN6h?j_kt8I?WA9G`Cq7SoT5uz@HiPo)-#ZO)L559!t z@V_MpM9-4GA#jxLBAb9wW{+N7{N?gbD1u)?CxcRcY(dzQ@p|O?jwFyq)6*b_cC|OU zp-}p(O*2_O)LB+@!%IXMl0(?o zuJTc-puw{=?2=q#UOD9{o~4kfT3Oq3e64u+gTLgO zY7_NT5b+z8K_Ngc&bPBA9f@OziX{*ARr$QWVXG}cKvnjRwAWV!fzc}MrYc?%@d~EG zghNzX)mYEj`ke+|8(ECqfIk)8`euUpclhBs1|Hz6FXTD_P7fF0I1Cr$tuX6^tRjgA z4`EN0659JrX8y}Dzl|^XBTUN&dD=~?zhh>;vS6-e4%X0!)29^Si92xXGvQtw4K+@g z!M<4PVly+hC}*IHl(1=m$ebC%RXmA?ZahZY$*>U1+#@ONaJTlH;PM<_ZAs3n0bxBs ztnGm~D?tV9e^{ut*=EAH$YL;PrGO<-i&=zwMMNltOlG^+I9y~8iz2KJU?npy) zaB#X1pAc$gd}@l2`J=pcHo^J=JAg$t3p%akH4o5|l5a11cMwQ6B#I2D(8Nd0gq~R4 z`Rz@1;?&=tQdW8BN>kCv%g3h-w_T<0VPN9WOWE`s>H=KBUVx^UU$|@)nZ}bKN zCC-0hexxagZJ(!{SiCLTyzwmI)^{J=Go(pszq0?7hFS2`EvXq>AHHJ;Olrgl^{+g= z=7plE{q@95}8Q!56PsR_RPi}N+ zw^(0q(b(&XYGG*j#1DKsW1Udnv{WBFce*Y62lmO#=NAAu*YdnM^}W0$%n7+n`>$!x z!XwC%)tks~B@tsM?X;2a^DNuzXKp`Vp(RLW{9_+Mv+xkd=f7q|tdEaj`i{fbK|Ae; zU)Be&lrga>uCuR7#hWwtzI-)Aj%xcSGmY0;P!Auu6?HcceDM2oR^pL?}f26!_Xo2Z#9qErJ*w-^A0Ls zT6Qt6!vQU?Uxj^MO8kk0F`5r;1qe4|zlu+SA5BJnkN;SDE~(F3G7!Q`YcFWRS$5l< zNZu)9Nf;0oT6B!f=@hkOk<0OHI?T?rcut|cAt%`wonm$FMI)3+H$|6fmT#7~oI?>i zw}(iN(4r4*-0b+)#A~Da!cJUZ)%=Q(8+qd$`2?x&D(edlJ08-T!4i%1r>vyTxSh_7 zreoTWxE(2Vq{q*CWVFflJV)2+!N(!)mVX5@P((0Y(IggiU?E1-+}P|~8>2k8Kti-{Z;9TgWzUDkf?)>!SVfK|w&G}%>U60z`84lu7?U_rejV%rN7J&1>0&-O- zX~>wZEbw3QoX9>FvPOlS@*rY?&)|?uX7st1Nt(E;rluYCWArv0MZABi`kyROq4Li# z>)<2CdI&C51669KB#+TO4s0_F+gWSr&1us8(Q2$s_ep5l$=Zv z`ROSq?mKgP;+qfaQ3(}R|G8S9)KA{@eu)7^(|#obdBwoHi3#1R!Je5)OdrVf6#nYV>P*c0x3iKFm~lTrV$^iRQb`0jvbGU5z;v~CbN zVc`BHvkc2OP3lcTSIQX2?^ReF6*cpj1TG`Lzpr9=s(#Nsv5a~cgDdU0_C8whzFov% z3YPwZ89!C{B>fBdhtDG!^uOoj4!rpt8$MyL7aU@r(l=Em5WMQo$U{h<5eiXYMTOY8 zCIM7-3uO?0fR20+Lxdu$eY)v5f44behBlt%Y=T6|iYA37fWg1o2U&WUW%$=4^u1-X zerMyHs24)6qoyElN$VZ$18VX_npk%(-eb^B2AN|z3eS7&#t~)Skl^pm-~eVumd))( zRGI|xOkN>LIdk`Uwskf9l;KQ|!}`~}+@Lr#@)vDrvf;-s4MVNo3?II5h~0z&Hxdhk zGLRf4*DLv&UpqhGm?NqY3fa5rYPstE-BNhdi#URJhjw0F)@>8Ca*P+R&CG{<`7z)j zrHY8wYi|dz=7TA%fBg>BV`M)**o`87#r&)z(J+g52$+<}z?VvVC@v^Bh1?BQN1A(q z9b*JBy6D!ukUX1*S)xt@K(nx_bwxTyPE!YsJ$tGej?Nci7OXc(<@nYoVQ)B_X3h=5 z{VM5?E-Nw4qaoxdpp}=d3;r=mi`F;IAmG1R^I)S=csg;iHN;rw-Qi0zPyv}<0qQCs zq{us9!==EiM2sPlQ{npMgF`ms!9RfHG8oJ#VxWysV$4zEoYydirP#`@_~T*rz}a*0WA;({L00Fo8*NYEQ}P7)*im6o?0(O}!vG)1hlj44 z*uw-<%}w<$B-)&T1nJ3%He*}3v!s#Q^!8>4H~AM}f_UvIwc zjP*W4VB&40?KhA!**D&ciS`SgJ9-ke{Cabv9;xt2Rk_3XPO+EPp&Qupa%e~OS{vMd zjNEX@M~V)QLghP|*nW~2l;062HRqYBpBF(z$FonT`mflS}l=B@zE2>`B}DS z=GN#L$RE4ChaSK96lT*iVvRpVe`-MFj0kCwMtnl5)wvIMZNp&dufp;oX{j^pP5slM zg`dbXooFuJ6fy$U*uEwIFEP{tQ*1a_$|84Zx|N;K){<^WVN*(vUNq8-HMR`U)$4}4W^}g`Shd3J07~_RqH|!@HZG3l znYjHZW}~ISt(M1BO5c3NAR*1W-X9Gs6%ULt!_$IdHW| z*YjDn^&*_&_I@=bg_ocBhr*E#yFHaZS0`s)S?yBDdNB}pA-L}BaO1*eVuz1~E1Wo3o*J8o!t!wMKi|u`Tm6;26>bMrW z)IQ;KpCuoh<-Kp}CoxnJp%lhtU;>oXUZTLig5^Zn7%ghS%9G2R74FW8HrQws7 z7S5-0+Oiz7w@DBlNPJd{w5P#`7>cAJ=Xu>_cUmqGlt{fi6gcsp z$82+lCGLgcyWeqV>E}k~AajoK<@7t>ZZzP`>H@1j%=XQ%MuB z;$E^)U_8CZ3W1?Jb&Qf z*ZA_RLF~dwV4_7(qWG@aFiaTgc*Fwg2pzhvFltn4T7&q^11}fi)xw!*YvM~~($We# z&|oD^1Q{V9F9)#aqPK@t1AR}4`p>&n_VfO18*R8%(hzN}g{pd-)2KhrZE+V@y!rf# zpiT8Ub|aJS@NZZ9KTI|nLvqW~fo2Kn3lX@f#c*Wv4A-9yP$CkdkHJVPif#V|bqS%? zO`1$-Z`<~S!Gl|lgiCT8hA7LUb*Lx~<~CczxvVms%!=bZ?5M@WYe>RitSqXFX4+Tf zwU{D;XAGn$B+>WqUjkyHik@_xXYt93ZFCkuQEre`WlA|l#6%1TrKeVxlN3rd(NkvT zx1C5nb}2$pS|Jvy{q^?EHuuG)Qi4 zZm+^E3}ihbL}A*z6{A0Fk05LvTn_X0`~2RHQO*RvXi9hj&ptVGx6Fb{-k0y_uhT`2 zMhWo1ES!^@cSTM9LPidA7~n@CQ~$sv%cfSXk_-7<8q-Z0xyVv(ay9Y17uU)m%sz;sCNRrmKqAW>#gb4;OkNYnt zIGS3tZY@D?+XxTrs(;5BS18;p;igelUGw_>z#yTPR!Gjv6W)*l$_&xXpU`()xV_Rh;^8_Fs1cTRMLkO|_#*4piN;wl+TZIC{Gha=XCDvW*cFo@yyMJalToH{EZV11$%hTsB>+!~@%H}X!SA-oUN6A+%s z9Enp~81>;P0V?!W_@thFn|u(5mN}Z^m04gGsx~ZpuE4~v6%}LY0}$E9=2rE#7Ngx& zCcAphUp&t7V|he9S|5%WrsnUarO@mE>&UE6dDPFL=gM{b-vAQ~>G^XI7x?xVVQao> zN_I7Z(1uJKe*{gV%!zhqsEF~sH{38omUbtg(Ijd_Q^j;6SGf4zIk{W83C{4GXL|XP z1e`fSc^Ba;6qV+{mV*fhT3o~W;Ix#m`imzxi*te=+ zvkZF3$G03p5ei}bQbSnVi78M0F4MUK*d~c<NU)GfWh5}vWX-ppe_!jAf!_+%ZCh)0X>+{+CqDN(PB*1 zUY=vw{?1T{nQ<-Q#HbJdPL))y7cMrg9p^bKQN8!Pyt|kcC{Yi_^@7m!01}G=q%vw>9FT&Ah?j4udTs$7sKj z1Aj#DIg4ET1WLtpI@#i}jw)bj5~7w`8dq(9yjSklmE+9XZVQMxx^B!U+F1>5eR~P? z?p6MxQbmUsqlv{Up(Ddvrqd7AjNj{)?M0W<tG@`*-6SRFd9uKJFC%k*hkcEe2ubPcjKtK?c@hE`EGl6rvL=du&>`j?6m8g zi^EhXA0Y%YBZiDdqa9Y#Zf13)zT1gF#1@IEX0d$r0g%I^7m6r#lKY`=@CT6G7w|{i z!DdrvH&|oBhane7LdbxKt(OD5sBc0^AG<$FAi}tw)PdJMoj+^`AOO@<$JwnbsQJW* zeKY-`qi?*pZ0}KT#W#;A!%*@*5IMOF%7mpU!I=QNU_3%W%T{PZplx z6fz2*H(3iWtIReCfnx1CGY1|C(+_u#PZ#gCv5fAIkegt!<=s;zje5APZyx&(N9ymn zESBA#RTSIs-sO2EUW$9kBkr}$QLqZeE!)-D`8?0natE=h{VsBt3*k9CN$)jHc(!_{ zmfmM^)Fd3&Tn*mp^i3Vr%Zc>f;CBw=oNYD55WxN33Jx19DpaS&IwNi^?2ENe=U8Pl8pua zvsoW80(Z23Rc`M$zB-rq&<>ujB3uIUdixN&AG|s#B*NW#*UlHFe6QNHdjM7;jgI_3 z`DbO7`47YOPoFsRoaJ&dV`4Hkxz##DFP=bwZslhBXduff1rvhDW7U;?@;T5;nNb-J zXmk8BE4a^i`AeAk z=EtfxqkCha0%uPQtULn24P2VtPc~US#7GHN0Gwu*9q*sgRV(7w=%DK&KB1KPU9C$@ z{ptQ3uB1Vw=<6isxH+n>>LrB$P2>%B?rrj+Hl$l>v+%ydD7!JlZMxXZ_WoeAN|A6n zIZH>5z7eGc)(wD+6%$=49>N+l|%C)k& zrC{9ld^N5$2FB6qjTTPhwu6giel9PkuL{|Y`0S3tP=MC{fZ~dkuc}?N1UjfwNH$P& z#psElmJN2XsRt!2BL+}<~kCQsD_$!h)epzJT47*TBsodZzQCU-M8JW4FhdGc+n zsAVnYF_- zR#xQsD?5vZf@ym^!h7oi9f>|{j`iFvwAGuF%RC8LKDdCrBRV6Zj>D$eZga~To+Aw* zOCKt>2Re2jppfs+XABgD;$pT*FS5sg81E78+matC{eHlb=qD5J&D)kJnI@{YG_1Pz zrHFEsN`>yYS}&5kzYsTpbx>yZ=Z%FvC+}>q!ge}TTsA+S7+X6}e202%V}T~!`z}(A zJy?7kN){t_m$j@72@B_1I=4pB7wjXm+xb#uYA`)m#yMJZRd^d64zt&&&buOjlYp$v z^%MP`P_`tTF8-YJV94$CWXSxYjh%o`>M@?YMEUBz!cx-HTbEm3hDN0;s9&E?bwyAl z5^{8?Z!?rNXyFQl=RfV_8(9porjHyxAYcnfVqHV2B~!Dv+zyspy6b@1@p~$ViWMY9 zFf9&DAw1vS99{jRKGO%Syx7BtQD-T)u5t4NlK^B`5jo%TD2?O;m;al6XyW_n)J7lCR4 zZmIIM?P||e4|);zxAxQsNnO8rz|4v01brH<_XtHJ)CboX<&N7?G%WK;=)0!WzXct>LU#1NE z+!&eXJbBf2GKnxR66+>+5-PEk{#%XrK1(H=g{w*e`;q#P8^>^(#)Koo;wyaq>*DR6 z5o>2XghUcVA2W$nUz%v`netOw@TflngQ{I2^ewzF&Kq+(y<&7Pay99go1aqr&C%Po zuTcE1x~d1;9|oj-sbMsZ`vS@;qEwSD*dk1SI!`z1%yttl2s9M8_H ztr|#GS3fGgx`7+JnB|PyOC0tof9SDz-bf0m8J@&rTZZp>y>r=;^Ch;#LZDcgQN+l6NA}$z>G=$k)x|gm1lol zz1rUJpr(J~6e0@&(BwuhS_;u+7Flt#Er_9Po$kGZs;kl>a?t8kvH1HHyVZ?7Ray~w zSEp78+O8S3#Y%*Rdvly!f*aShL>s~mhkr?>QDlRt%U2E!qGon?{H*B*5Gp%xLs`8 zbBk`sG*eh*4jBG+ep?8#2*+!aF!2#ifvJryy92X}Gj2ZI$8YFl|~7p=vbSA|{e z_1>?~^Il(kNncq?L5*^oZFLg7kCk-abOcb*V0#R?ws%L5d%W5oLNYe4K}qaFPv}Tr z*|^5E&F=WX2JTC_SYDM)8~sr6cIJ#nUcxef4hDO*gS{|+Zg0_1SdZHSv;Jx1hR?>! z4)0&IQXe*Fczo8gTG>RSA)vSTY)x$};2H%^pSAI7Y2%BNXM!`ZF6_;wG2S4x!9y## zOHR5mp9RGf*d3;3?QP-u%Flkt%&fqT;~bc&L5n$UhlbpYg@*s2Drb z%aasmuEt%b&|$Tr(PF!2!cVKEs-hZqS5~0b$QNy9c<(Hez?+}`7N@PKU!EmU*jMeC z5-%V1Yb$jW+9@y4T3&#R9u4id*P6zhza zHqvpJerb|?)19{(TX<*vWW&SY<)<$#9>mR>%c#3(QoDM4J(_D3v#%f}(0OsDw`rM6 z;HXZ;9aG5`Crh7qVgj2wGo_T{_Q&y~reZg$ed1<|h2J2Y2|PUigBi^?Z#d8bnXkhv zowJ?CO)-9a(IHFHLA>OFNA=)z;F~QK`L2Z5^B(UbMT(j;LiPyXZuv5;oobmYC|`pEkN{5!$hdc6YNUv6U4g`4#s zcJa`RuLe#HqeC(cdu7fwt6H(1^TN)GPjzC&7p$A8ov&YKu3J1N?`rbutD|781g#y@YmrR6Y}#b!6}hFy3ZP$ zZ&XaJlS@e@udf8C-T1zzk)wTwBqZt5^G?G!63n@*46luG9$cyC*W~=;-u$i=G$D?J zsYSHB-)_%HXeko4cJaZIw`!c;sbFx&9IP&P!M5)8HLCnz!e@8hWENg|+jKP4w)X1Q zZVqSTFFKm2Y^h)==D4gc*fCRSp0@edi-pzGe6;7P6>^_l4hHx!FFi9o%svxQX>jUuH@WkjnU~D!m;N7PUmaHU z+N=#y1_DY;sR$?tSad2NDlMh7ARpc4>gfC8**f!l6w@|`+MhhKm=HaaqRAq9o||Z{ zVk|K*I&7mYbrWPysIC+3ujAvb9 z*R@V+;Ri)!I9IuxG)c1}jq0K7FDEiL-bsqwo*T`Ejf{>sSFJm&&39}zMh|tLtQ`Bu zN!G4H-SjQ4cJn?5+tj(QgrT*}5#k-W7aeTBKevCZQ5%ka%;tW)Y^qxbdS`id*9Lwb z3AnZXt6k-XVb3?NR{0q9vK949i6;}$B{JV<223$z=r?bB4I^(XJ+ch;yyfS*Nvuj{ z3(n*=XOC=l_GFC|gBI)W#Ht*xcD@rb0K$@`mv63)Q725$S@wQuns6xAWD|*2&&mfI z{7y)8ZtQiZj*WZI>35Sp^`0U&BSk!%;_DrKLfokoBF9;3Ei8t2)};KpOC>Q?W;US( zYs(=8!u+aZO44#74wYe=SHJrWsYewHn>P~`zqE1B(k>g*F=Xoh^;tmI!7Xm+K& z8nFVpG0$7$QN8n|j+aW)p%xz*cJ`C;1u@lg4U2Q%q-@yeBoZw2$!rgo`8Vzxhn#G+ z>Pd>@(3i6rd{>brk38Tkd-te^8G$3q_aC8{gtlt;i7p3;z$+$;VK2OX{vcUmF=M}g zq|T2Y-n$!6j<C47?^VT%dGF~P<(x)4{*=DfA!>W&(&i!?_6il2Vu3>E z{pVOo&Z>gz@acB=S;CL;{l{{}_N-$<)U!H8P(O`%`pUWP=~(~xv^?thEOA>^D_dpu zV}>a&FHP{X<@Z%%IAP;$36WF>t+~`psdVh*TpS;6XlcFa@+#j=cgN}Y$Xdg`(SBNu zUCi@)jLRDmSjAuKD&@P|Ycn|X*-ngdbV`rIQcQyoNeeFV5#Oh{h4=Cwt@6G!*+^Z` zpF2JdRi3->V!ssYKlh!E)rWVl#<^*)oPJ5vDogmKLF>DWShYIikdfHy>@Hetg%5Z` zGN(UWs(Qf8dx3#ErqM!ECj4#hu<(g9NlZN>L4oq*DpY$B?Iudzi<#QqHcwtB7H`)>0&kJTJ4L2Ktb(xSZYQnfL62ZfA>sfZrfP-M8f<^BCrxbZZu7 z*iB4u7Lv;j=^*TknCJo@vSTQ5#UJ;%`D7<`gt!I`(?dRHhCIV4{DoBFKjiC)sSt;AeuiN1h%J=KLc%D<8ISa+1*wDG1_(-mQ!zYJ8*g;xnV>VaAV?|*| zZRfhFYlt$EI1basef2j{84172dIx+tl^VyKsZsc=S~` zyreC$+iJ(9p4~XJ3*Me*FV#2F`Cb8BNH@x$n>zP*!GL3`X&2rt*1#U@f{rbXv7fNGaehb zdRQ{XYEGBX(Thqv?!9qG?Ny2Cxy8&ezI=5d&pi~@y#B|W4HF}MW3~)Eib?R6oA|tT z885%0UXK1z!PUT>rd-^<8a9yqpf&G- z&*jG77wq=~MerEKlK}s1wcnAkh#d}~_3|o2#Ma3<^gv1wbIZA&zQuK?QO~P!W~1ck zpGykn_im}t5lEG}8n1nq3#!PQC{w|ytLvIA)+KLmXO@fXf;GdTuERrUVG>^OJrp!S zXN}uNvz@u4aiq`F7t`WVtCBw~j^a`=AGERrKNJ!?S2UL)Uu}>MDu&aYc>O;$`OsXPWmw2x@w+S%%X!JrlFy`v%OF?M(3-q1K*`1Ab8Q+ zzT;b$Z@+ugkwChi`l9$%*I|()en~aUlUKO!&RK`N5taQ1BIW+OA$+d(yoG%`1}KRg z!Uv02j6GqA7Wlc2P0gKWGjAOpw78Yu-$*ppr~s5FIWSckzLzQX?nu z>E0r;1U3&NF)fv91^ zWh0UQ4kO+BbKFfF&ZDR2ouBfW4qZ~4+UoT9M|xDn2NSdrVU>edblJ_5Fx9M`BKN>S`m85N}5!rND#n6ogBdnpvy*^x1{ zJ#@7FqkTE|=RpGA$4`{+hcjxc&QJ;b$}DVda(&Xogq9L>N-==dCh)Be235E&8CJVp zZ=rL^r}OfxFWn6QqayotK%fT6r^V^OQXA7yK1!_IzJvr9#IW<6U=$rvdF`P_4y+tr-x=Anaw`Atd0%g7+(S|pVB#1jT9YbysVGx|%eVXaBBim>6!F-G%U)rn(|1fi*|A%lk`gUG?>RANeQHlR;D`3@fZ)A=M zJnH~g9Ctak^}Vdpn^`J>dp~gDgmWFV$Tu^J&SGCxCgji3(uABLP9nm96gK`3EYCJop>(LVf$NR`)h; zNE+f`^+Nr0UR4}k)wx6!OCtd;`}gOumwPfjM9 zW)|%%Kd2vOl!0M; z0~JE0d%fDj9o;yi`48=hD%x5sI-8@Z$d&CT9a=ZN`TXj8!eD9?7j6~nU;`9i(T?bw_r~Yp(A@Sgv-Y<{G z*f=jiZcoE?m;ryyA0`~=-and0Jn}6tUF-F=^3fe-e#Bwhq`~fmuL0tWL!61U>ao9E z^}z!Cg$d-Nbfiv1@LH9hJ#r0l<|sCSdrL(xRWke6tDo6`X;zHagpj2I7GQWA>{69Q zo*mNWOGt~tK<=B&j_VOrR+E}e*6yQJT%;0+9eab;q{Q(oluHGMvO1Zq_sDCP78R%E zV1CxAV|Dmj3e>S!O!e+NhwIIbf<33Hl}I%-a6!ycu{C-k5d8H5?bJyOJsjd)7x%S? z_8+TNeOx2q@m6w=Ynkq?DCS%(eKz);agW!&-WWLp2f$k?R|*y0Te6Sg?>?ctPc~W> zo^O`VuS3QT9H!5ee&0Ixf4Fru#LBl@d!8IA^b0@v6jD_KcC$bTOaX&2ol)XnP-U@K z_e#y><#g74qhDWk@6h-WPYc<0D2w;N9zrBYx$1!Xtw684duMbHW*Y3a2ek8-IIRz> zvlg!`$YHdK9hUnz9vKentuND)m3JC`E_1^+{i6M1nvy2}mFO9zCBvgj_y7zi<6AJV zqfYM*pLHj5Ni8VdsapvUdoX2Hbg%C=%GLI|2ObF!RK^1g1C%x_)W~r&H(EWa!hE~p!}pn%v+QQ#sRpHc zzg)5Qif)*REgr#&qGHQhc|+zj$8kOK7H+%LGEpT)Qb%34 z+yPhNJ5-8lUbQ&mVPE|n%;~lIPF+@=ie;Z#>?=2zjtn~?NO&iJZF`k_?3h%wMmYKV z>CO*%vy35U*Dobttk}oawgY_V1(3h@JmBD$>DT`c1d*pA)rjNbYuk{msCh()8#M!r}Iu^O#Qs6PSAt&!DFL!|lt8L?^ zayK6p66dbUpQ3Ivu-pn^(`B(8Hz#%{UGy_5HJc?hSu-T9p4Gj4yoM}#YgLJIzlRQj ze9soBsB8czcHZkmyDpCAV^~#XxRvL*5Qel&0EzAZ0sKVi!jtT=T%O7lgq8P}A8;x-Q$kI(vi_K`VPp z=H|UTLp^(8?x?rywJMfQ8}D7{Gxz$fRUBvR8#gOGE8)#Q(3RqBe@bxuISz1_X zROzzKMPcW*ubR;BuB5np@0Iv;Jx8YF#gSL0Psuxzfoq@&IH%e^`QW0M8xZl14`_F9W`bIdYybTstSM}V3+K05pxU!m z{j8Cz`$hGA-$BF|oO~!kromjlBGwW;BqZqc5TPTr-^066iDAAACKvqB_jYlL31Bv1 zWaOD=!L1j!*JL>eg#SJ5U_VI24Ma;KR;K&8;HMRne&NnSdBd}AxZF&OS!GBU9|~)! zql@RpJ~%Z1DC$3Yq5dP-*_T<=hDr9u+9>Wdnt?Fh{Sjq}y4tp&} z@6vNhCK;$@)|&F&j4E`6*c<{=cY%R-^_Y{jQ%LUVRzp0mPrL|FkX+j%A7k_{W_#4L zZ;YoItoAJcJ0Z1$iBQ3<<`35HP^5Tlux%R*GNnkpXXW2SHPU2@!C4Rx^TLN64S+`g zP6q&0DhAuZgF#hStNMDSEDD`Ac#4m%Oq~DWZNg#Xxlii=s~vZqAyVk^03_IDJUcJJ z7DS%xzUb=CJ>p-wG2<~QY1k$;@z##)3fkURe>UUOB{7?C8@2nHQ+x#g9C%VGtQ=Ea ztNbDs-Aj#RwXA{H8_aC604y7Mnjz`f0-cn=R_s(raOB-16UA5?6_@P^KvIToEz}{L zQoG(S$T~K?A@5$#QP0F-Fz4Xp9IIc{T6+k4G}|#5IAaIl$WfaQ0|e=$EHQ$ri8rDO zcfp2TsQW~uEUv}0=GHd4TmSOQ%y6*h_twlnQV1ZT<#X(Oam zvN?K3k~X#A#EyNVmQ_K{Fwg2g#G-S9cVoex5Da}cRgCM>d|~#60jya8eD0!AephSm ze1lgf;1D_hKbaZUA|gM1;q$ZlF1Wlz!+o%Hb1u8$C`q^NJ4*A8{wxFei`lnXCNT6wBTaTJ}UA)SEg;gkPC021NNtAp!@-vd^;s(^!BpiNj<$ANkJb zY_S2nt(d;Z-T5{McuWbrO$OyNO_b{x_M6*~YIuj(48+#EUwoRgFKn%5Q`m=isSUK7 zfyVaNQ`D>##$QcBf;<%Wn4QcDwtqbv__`svr&3++bywkthn><-H&Ns~K8ZY|O!_cs ze<_yu?q#-?rHW@=_|v<@*sMmg(%Hgo%Pvx|XAkGikNW+%IR0{2XY(;Rp--Ux-2y(=``_s{de9V=g1?Zrac zqP2641I1H zGDz5}&3&k5&w0{ms_gWn=_|bs%I2}NchTzL)A^;=6R69nt?ELrrG|v% z1}`udPX5!^-KP8o!v!)%=td& ztYZhyt$xIw5NTKG^*)CPVgM@w9Jb_oYONU%%g**JQWs(D#+fG+5Ll@qaDl_|pxKusH}6=e=aq1;Ti=O{cqj>2;1#mj z(~VigX`wf3v>aACxD4GTfRphku`TIo;6f-R0BQZ#KEDGnA0F?{0h9%Vk+46p{AR5_ zV5f)e<4XOIRj7o18*n#cBPO;Sb?}0%{IEvPhfMhR8)a3>nL`{$R# zjUDcddgVsz<67y-)*iiLhH~#DnM>|RI;g8p0mmW5C~g2}RV8N;0@2w+Z}Qz#SA`qk z|M^b=>2Md{bSJ?7LXIb!{khHEXgi3Hsb5Fo1F*p9-9N%|R|OI5x4mYbUK^lPwmE&| z3k1{Dpb!>qmi`=jHCn zQHW3iQme)T$Vc$C+6d^td8vcI1Bf{(ARd^KbLSr_&F{pxA&12!VSo@;g-uNrd-paJ zUFW`l_uQ^oXz7T}^Aap5d7s!(1=pj4DV;Fp=inTx1Ahfs^2u$+3rG%$si|RMgPN~_ zA^2Jp1jNEU3LK4j=SVv#1*mY>L)uV=JD8ALSm?GHHcEG83lZ*Ha9BqJ9ywP zi1cr{3v8hMLHlfY(z9>XH|?Tzc52>^apk=g?S~-l;W76eep|!F!tmaV_M*pnXLLPx zAwoa4Y`~Xu$BRTi*#E4thoeT7xRIqw_3&)mj^=g0{99x4C7(|1P(Us|dqt9!S)98$_5Y{`x;&!;xqAA<#XTVMyJz=FLld9PF0T(B4v8yo>I))pZk5vzB67>N`V;xUi6lY@}9iVC8?}8F-9T z$u?OS+Sx=2q*Z~!fZG%WG|1t98~lSx_)o`L$wI6#uo7~SQbN6`Y+?MIRlE+mOn=`A z?4<%-(0PEHC5|)cVlf0dB{A;6m`$jZz&il~SSrM38(+~BoNOES?$oy(FY@O_wmVQ@ zCablOX_A2Ny!!pS51#o8cA_}IZJkY~0hDF;^^|9 z{v#(~uisgE0&voJNa+}}t|&aM$^60YA@rFm&hJ`=VXDKw_DAqnGd!47G|?g?Av{%l z)P*~Boh5C5w6bt$#}yamQ6YY359=Tw*wxY19n&@Bg&5pScwdQ4xmZf`ox7+X2AMr# zOQVxH$Ll z&F7$$$|Cs7wynV}obsyiD=URY(0?uUZ%+6z86j=q)7NPJdpZjPQwppe<_#u1hWeXv z+H|EwTdnlBI&D9uxw!#OhG&zRymz%mCLfqMmsAQ`>;BKpNzJL4&OnP0ITAC`3)cI7 zd+Mz4M$W)PM*<2YV(oT*1&hfvQHT>M+myDxheSVEv&B+oOvzesp;QN-Zg+=EVcPv# z$Upk5EAM27Z2T|m^<#{IWVYF;KcR!9GbC$(At+d ztLd=vp=&1hUoRY~ya$DItXo`ehU)Ow?_^w(>unKibDgk$xJy1@Ejae?O2g0!g4DXE z@~nHSxJPYBF@b>~&7X`De+Tw|dkPQ(gBQ}MWREiU9cAc0WpVA{T0B%ZTKHcnFFgIC z-C+x;_MK=mZr}|Hgo7;=E6q^;&3i0klA4^(<23ax!fa9>v~&>PQl0ZZ_f-!)16R2W z5Vx7-4JTYg3T2ATBZGc7U`>;^ozvU#pqQ^GXLj9t6%-#OJ*iEqAs!QhesK2+kd5`3 z3JD6FKvYL>O_^K*BG0X_90GGsOR#DzT=qIV@#x=|2+{6u93GEI)j5b_B>918XPL&JDGCgnp30nz5n_9dTO91F77!Gya zm!6mq8iIOMb`J*w-Vp*Ts{UwLcxiq^#{o6(Ri;TfPPdQkDh8ID*WQu|#BQ$e;96*` zhjmdm%9P*;3vjM`gT8G!Z!bq5^{b;1+eX@SwM^DM5&1-1KLw@@2zkeEQkLYFnw?@n72!wyjJ^5J29+pgHs*$I=&DsxaL8i;j7Vr zH?z~;z_ky&jRif{2Fh@8l_s!d!>6lrNYalQ@V(MKc(KyK_v3EfXN&PJ<1 zs4x;pph#jkFKHA~k>%Y(LLffb|6gX}^fxo{l>%=J`3xZZHC6!y7W>UT6-s8YVQI%Ahg4i#X|_b6jB7cg5GJg8(7}A=8!!oHx!vw%zP&6at3)Nhs_LEZZP!&=fQG6$ zpsc)xu6(ewsFD|;{?Rgfnq9a6deKS?MHbcGkMx||=aCv1n!vNxb*FOt5I)Pf!2f4Z zOydSsnDwvqwkTT6FpzTJZI0#b9#b_JGMcl=sACqk-v@YuL-iN1~XRx2;W;@dFL0cvQB}(-7K`sho#i-M+ z9gT?P>aAznr?C}48EdJ}C8UU9OSOA5kts2Cd*@@CCi~-!F%lq1_8UUIJ2-%>-3L5k zEhq18$&@(6nZF1M(;EIeTOhO4Vi8S4&3ezHKi6tX*+i}G0toQ&+Jw-$5|f>k+!=VIWMZ^!k&_yQ&|?gm7=tdzMO^r zrQx*^Uc3E00NoC*Yt{V|F_}=s2N-k$D2qpz=r+nGV)<-0ihI7nixnKNXU;o#cW`#w$CFP5PL#`psn?1b=3F< z0V9JyW7Qp%`cl)rxlOhLC{W6N$wj0~n3L83PUHHmUc<}4gaW*;*L?p3aQ7>d*r3b` zu@y+s8-x}@R|j69a}0ezmjEgsgLk`+VYhs6ScuU3?NTM2!e$IpPDcgC3|o&>a*@nb zV&d(+_c29dZzqDvRZ~P)p^SWmQAuqc2+akRircj~2;^W8T+W?wz^HXHF78epnH@f_ z?cGMv*VCbz2H+cx2+g3XCa>NdENW9M+P6%N!W@J|SPKeMKt}i-E&wI3GiYuMwQ13)My|$v`c_5M5+{9_0_%M@}1=na1#dL!srF(HAsNMMFFQD){+skzSaOS zQ*5G605c3#r`?eX-_BVm&cx{A?14)9BE~ZW^%bIF_u=vSQ|pKrYW~UAX00gDxKHb7 z*FEc5g3fD)xQ=fgf_xM?+wg9CNd9NEq~%!B$D=4&+$1#(J+iiMYz^)84`rTrU-b%( z7}nI@_@2_e4`S~$vu-n9-cp;ouwy@5p9Gu;=yhV6%F;W+ZaKyRI zh0ufmp(pdt(32Gk^c~n^0$%xUaPm_dW|u~F3-m~-i%!PCES2vs_}Hi60XAS(&{KCu ziKE6hUwd`ROAh1ap%UAJtqWOn-9#Q%D(ERd=;Q$&3#re=EyQzcp|6BAsaEU(6$sRM zaG7g6Jg=Qo8?MS5N=B{fC-_j2g*<7;R7=EsEXI&tCjg@`8meijhz* zpb{vZGThG%3j(MFG3u3Dq!hBvr8`^9GYLs9H3F*ZwD$C9KL;HHm7`F!2B~(Tt6bu% zCR_^?J9i6|F1bi^T;VQ$cvHL8j>F>~C6)*DWV(;4w*^$XnrPtbP?p z0CMMJ!}Wk9N7hVlsvj?g`1Sb#IJ2IjF?N)BpBj08^7dsX+MR<%MSd2;?jCcU4pK-U z1H1;()ecRc=uINnRyb@6q|WNaDWBclFA&nI6Kl*JzwHe^N3A^WJ(P?i;s^*zrxGEK zL>p=C+v8u#X7Jx$3RES5BGpg;dJ98xfIEEwpa()2M(ln^g3f-ZqG?BW3?HE4UmH^M z)?mS_K!@-;+S&oIUFT5EW8Y5dZ9hNEB4GgabtvpZDR|#`b|6q@K`W6i%2prnL)rD{ z*B_D;GJyM!O3qHcOlLJrEoPcM%agPVG=E{^mIJ&hL}(chZ-3G-4kc=Iul%-TG$Cow;+&TUzjd@~e z5LJ9Q?p~_sQ(;gB(;Okz&>0kYYYx8k7I5`ELKUg{OO*XkVMi}NGaERH&($Fwmw;BF z_+@_~=pv9L7V0rj1!cROwh{{>rs(|>gEd8^e*4q?@4xMBSv*x?JxpGPVx3M zoa`z{%5e7mrI0(F5gpM+_Is{r{xp8S2;1?zzkH;QMJb%F+T<1A+fvmS$D9xx= zzI&cKO72WkosJH}I7r`U2dx#FUn|bKYStiO*Lko2WN%Vnu!{iuf%U>6z=2j(4N06p zxCsKRa!^T~Q^~js;TB{G&~)T#(-{k7iK@J$W@ewcoWOjbc}mn4cIL8a%Fs{@@IL5? zK^2510*GULDVm&3U=_waflDr(_9O$*PQ``gDu6^C9NKIm3k<~QZ}N{07tEDxM|0_s zCnY`G>!qc}xwd;*5VP)TFRx~n=9+CJ(d-cybU5Rs0F$}Iatfa z&BlJ{74VOG;k!!!uf(M+o(rQZK(~Rc%tU*$g)e1a_MKQe;Kckq5e9o5R&A#}yk zr6LDt7}=xG-%H5bwl?J==m(N_w}i*&p?CS2D!|KuAggeYVB-L{ujmStFc1{ZQAdfC zJ~5l>(R!;(s~ninhFkV&L)$~I3hQB_-54my5vT#6bwK^DCOi{RL8#)&alP6YgaRId zMlS4Nj8&gnHdLX3xm!)nw@G4GgtQEp*Db(nNQq=LerU7R)Wid*TtG#g90((Pkp`MJ zqRWjYfSBt*y=IB{Oo~X9A=t7B+q;jhnbk;^j6ziq!~#=v*4o`j^&%7WGe|At32{fu zqT4o`zyXkmX$MHxiLs>|+u3+kNKb1C){;c3&a&#zJ+$a-<_eP*jRX{bae(dd;k&0G z<%{~$nRFqV5J!@#Mfs3`dExN|TwshdoFVSg zBUJ{fDXX9jWq~!rYXr#;6F@OgkVI|GZ9~6<9*Emi(wvp*&h?axR5xLQAsn*);4S4c zLjigM+02RH@}+k`UL7kjjKR)^o&$1c$x2*?h4=nX$4<>o71Xb0nPtdP_FR9Q+2iB2 z02Ho58npOyVtlw}sGrvz13g+pP<{ms(&EZ;(9sIqjPB&F7>KcVKJE@5OnT(;1RHi$ zRSIXf#VK$AYO7rQI>Ea?S1rDi4s0c1UWxjgP(&#@gyb*7=hHkpFT4x^%AyEbV306N z@E3}L;PKVkQx3R=z8%EUffRJ@)ekbb_$wR0nGY1Lib1&f^QH)f7LYPdX>Y39Oy*KW zDJI&EEU($()c#M9mKu^r?Ls#mun98^&r&%jk{$&$=*5v<4S3~7yqze1{Oy$~kS!!1 zqb4&)tUvYMVSWHokB`AWke|6fV?V*ji5<*oL}dmNV}6E@%W#swmbEMIrGe@J%9Pun z2>~6y@?5_l2%9*sk3jhxBR?`F!--TyY70hAz-Z|I)%K-zfkVqWk+1}FJPGLW0wcg9 z%72m_1P^C{xzhSZ1Jeo#Mo$035%&Bgk0h{Sfxu`H`c(k}g5JM)da*yrm)L70?2ymaK=4MtPh%2$Kf`o;!*U>qMP3vo4xlOx~tRvfC18 zp6DfO_pBwG1F4Pvr&*gw=ZcQvU6v|~cg}hMxY*x4snM|MjOrceH(NS)q~Pw(LeA9f z$T2F?j`R%HohG;AZjxGou5T|(nI5aYyC(Z3L}vs2`KBaglH)4z<)RRp7!Ne#fcY(g z=b4zx9Lgz)k|}Yl_(ReDGdCo_zY@}csE>V@`LY46s?GSqg%5<^iFqS>A7tagK7PoX*6c zWgj?TlDY^0b?A-k zX(ht6eSceK#>6<)6;8y*-84UHU2v*W-BE(-qIi{dD~*Kh$H^j;iFeO!yUAC6mnf4| zyh|)>AGhQ!xY`Pw#JAm0IzBc*^PG3~S7D{UwdJd9>_p|I5*88@s1-YO#4CBpTCK3e znE^w&u$paQLKr-2FPA#)wT<|asP8t|f{B+R+you?GkYO>r_!yN~K4_|s ztj4$H+}n!+CKBSO=q>b%#folEann7 zsrL96=mA|gGyAOl3~n%kVl-Ou2FvNg zYvSl<`u8uW;<;y7GSy$y1^?HIk6MUNijQ3uJy%occS<;3C6H$7Nyv%Awu(|f$@(^aj*hTo0~Fna&sq8^SeaytLBr; zx{#8U_jq?W9WD3j5c-gWj2$N+{!8_q4m@NQG~8VHIePxK*H*siRurf!lGwrVe0EnI z&~Z5h&9I-DpzFZDTNyTy5Eye=NY8dd5ByrG;*lbOx}qnV@-CE`+AQp2&QxEsPe049 zRQYA4wmp8s_61!p8Pa7estr+RU1?10YGj@W?@$jkh>~6&>D)10^mnEs!5ttTDp({RLI8`)1aNNd zk3gdPSybDu{cUG4Se5OBoAzAIz9s$XNbr9m&x8ij;&Sca7MPv z>b;}rv>{>C`zsqCw019Q(H%-6-iPxct=(uRs`Z>5=47=i=jzlwX|dKZ&x>)pR<<)L zOEHwW`#b0J|DTTOg5MR>=jm`o%W)}`AowG*rK6LmF%r!6auc+&Q+L#t(uPqM*_oHe z7%OYSXYZL?hrkjnAqhMBUfIoHhkh<5&=C*h%r+W+gEY2vR z`}fzoRW#{Inl47O4vrH~Lqn^6=}nEWs_({Rs4`>uZl}v-=$owMKHCS;4CmB{+D60A z_-+Ull(1SGA|^lYrzn!` z{u5Fzgv#mY4;VmLY-=8*Jry2_%>eDY1cUjJv6;`8WYr<`a#6aOS=BtBdyOsnj^GA?7&v#^5m&cn~VKQ zZYs&R4w$I2w8B2tc=Foh>hG~Rm8d@>&p*hA{J~pHH76ylSV=}Tf!E0LTql)o%5tNA z5SN=MC!^W@b^3~x&=a@iYa@}KE6m{WESQ(Dp#uI*@@8UDLp4)>#D!N;%k$~hH`EqS z$EYk)ydy-4I2ir-1zp>-SM@*jR1?7^^YV!s(>D?ZyR(nV*^sFFYcd^4BHqH^1gl4N z{}#5t=_E;Fbb#b8-{s9WDM7*ixT|-68sBexPpF-cJ_WvL{fWZ&C1P7?B7LGqll)Cxh<7=gA4JK z@}g+7BOEh~Ez#Ov<7$2M_3lNwEUy&D!nhALx9#uCQ6%u?mXWc{P*Wt}SJMo$JaZj! z-0Hy27Dg~AO|jVO}Nk_Pb`zC@F2xv1j^$M!t%Ek*>Tr~X5ZhF>>prf8fHGWWppJ&elu9>{v( z;I|K%(`S5jwIQ0f%1GEeSSAHRdsKY0E6TKg6u=dOu>!4&6Xz+E53JoRIY_#hzAzO4 zv^J6PuG7ZI+jkINnPppY55BoL_VexWJi=hX%UV|KpGINA*22MYqS}Z|!Rp~*Ytq>) z@NKJgGHoZjCccpb;-6~tUpnA^KUE`ft3hp##(u|p)MADtr2e?PQmOsWGGr>wafzo( z4@g}J4sh4xwn^OK_*SUXXo6rDw4>r+AzV%MG7BVWaOMZcU2VnKD14Tu^an-ahC-K@ zw#{Q%$998{M-tLviv$^*TCA(-w#?Izku3AS-3qJfMR!-HUMDdxo0jQ#24KD6=ir4u z0qpM_nW(0{YcT4TwGonQ6pf@F8iu>Cev3sqps?vYfM8o?$go{L#@$;khclNyJY^da zWW8LT?G{}6{4k;3OqjfXS>|eZxtD&kIiftW*4dS|{~wZy)ENiN>S!Wc02K3{>)6 zM+&4=tS`h4c-^d_*pL1ifxLk<19*>WY>qV$k(lJlLj1i5<9cYH0j}P3m4@8dPks8e zu?eYAuHlqjEqI>9_9M-|>zyiO4t7_qFH#(X&w!8(yE_oCJfq%)od~|AO0U7G`?b%I+>@>@`sNht4QYxWl=2+wDbGc=w~ z=dxAQbr9wF_R(~@M6sAG+AP%Pio4wpK| z=jCLpGejbWr3vv-WXDdJAZBxSO^@vjh0z%;eASKy%%)MAccg84{nfIp9lu%HVN&2Q z+v|@`eju}wt~RnI_mE`~H+f76RKi(bw`-AdnSdnETC1I$@^i6EWTY}{wUaPH8u;gC zRRIc~T`vZK_V=4C(6tp;az=E9$`chpZv2c`W4Edh?_EPr!#BnORn2n-Emk6P6=XzY zxvG7Pk#$O^{>K*L(PQ(HKK%v2V%6y@ft5Zg8?G^zWN;l`tylBXOLA_ z68%qMcoN=K^$>g>bkqK;t=1~fA>s!*PCmSyf|vM!kP?kL@vCisk96kcq$BnE(k#so z%y}6Nr9ePKb$azkjaJ3+YRP*SVk7|`l<&ohZQhqgeBF*H;@PStyD=)q3mL@kHwtTY zK`%iU-dR03l*EumJK4LUl;ScfOmTHn93Q(H{?r)!>up`|>nOa;89>C1v}KnGxk_r5 z$Q8V`g@H5ktFMpJ1wQs8d+*P(EBaEKS(P7_4MHALp@Nv(S0;}pZO+L2+Ve!-5epVt zU1iH!NcL}zUK}G!+lbkw+j31ie_Zv@yTw3h$;XYy;U#IMyk>|#>8pGH0%!T2%1Pt| zJ%#>!NS+?RYvh3Xw~aon5h^e@U^JL-GXWj2pQPju6t~xd7MNOM=&B74H|DKCF9YTw zX~S#BbHYU2HH)8PI0IcOM{1gpt8)8VN=Pfkn+c0#7`6n=5;rt;VD-%M>jEJyE8$Pa zU;H*p7`XIU8_o2TJo}3vy)-vGR=m2MApORl)as@(sN$g2+3rYT$l~DePv@qOf`(7Y z@d@Zi#ohSl%e&@ka~v@$!Cf%13RYnVPeVATg~Uwy3p^hHGiR;k2+`RrjZCVC`UsKp zuoD=ye#Uv(8LTZ@+Xpy$^WW13(TP?8(ClQXzx65>x`ykjZiFD$g&&oGHq3a5k`_ z2aP7vVGqk4?Mm6+XjSy?uANyX^U6*^{McUAlu75u1Kd@(!FzFJ$|s4qZ$XwidY-~M zDx-KT34QJN_!FJKC<>O8?rbB+L?CYq51zh;qHh+=KJDviSIkd{=>)rTy1sHcokx35-S`Bs( z216=`fP`4_&w=XU{YY`U^F6Ez-hkXQna89Wec+2_E2nu!US-t;evn(qXWy!nFWr{XMM?p>yoH6+PWZ z#_$^v=kfg+qDPWJ53h6j%#VLRwjuEG=bcP~Q$;^c+SkF%l&CAB=Mn%C+unyBCgr(_ zk*YH$WH_Z?0t~t~qH}itcBxcecL$#p_qy(!Sx(i$8Epg01q1(NhgmqCN0L&h$soVz zPZz-iP;os`T?{;HeH$VAhnw}MG!+WMU`bf3o|p; zn2s5$w<$Bni-0_{W7*`r_jQ<%Rcjt-!^J4JX>&O9>Vr?fxTp>IHip*zyxi)#Jn1NP zl9X+{@_hH$H?G$ejII~%UagiJJ?|XXMmxFvuTyNAfqru3@i!pNIBTCUq@Eq(Ho(0YcB%L3`8wUA7l4VJQWDjrr#@Pha7;USuZL zaa*HJ%kI*67)^m3HfzaJ%_m@_v)H>`ZgercFy2uy#}(>bn) zHtb?htl2+X(5U?$NC7WGE?Wv@Ali}LRkDP^mHxDZv=Z%Z4x!V}+PT3ewP+(laMI`@ z5#~#YXXOWVEBsKA|HYw4z;KfEFEWujAnpSt8_c(iw1hbxJQm^i6(S6?OhXD`_yp+3~9SM4~HQz3Fuh{}gFQJbj(*UjyTAZ1P|iF{VUKP>8KRT#(=eI+kXZ zQC4*CLoU8g!13mv9>0hkgaHW!%6_k=!=1D1G}T{NEoxhcT`7Qis%!uu)EYVIZICHc zP^CRfI`S{K@)QB-)E|(9VUf}6+-4Ocq!Da;LQ*~Yi2H}{_YG0{z#K&kiIKAda7(tg z$?QlpAefi$N$3CY9#;MDwcctTcx)?*4u5A)>h}v#$YJLFO?Z*R_SE`IQabRq+~^D4 z;nsZ6@=w3^lQn&hs0nwj6hPCWY(O@&=z-PeMWNE4n#nhngh2y+ z`PHoIUgs$m5D%R-eG!bL>hh!EfAfEBkVwxZA@i>~cJx5pXnWh+=tvTSl#yG`eea7g zL7gx%)|Q#fW1I?PH$MM<{Ln~1>Tsi;IO1#p#7JxF!C(T{GkBcxx9o4>EOZ4ayqFH3 zP5p#SyX;TQ4%VEb7RdMkvtauTyF2{i$p32xaD@IY{Xj@3ZQMcl+sED`w5&N7MPv}^4mm1e(Uvl)>vN(2J!i(f4jhba;Jl;#@feg)@ z?`7wJCyYpZeG4ffc93b&;59Ah0Znc|Xg^C&nRH0ZPU73&`!Sg9DLw{~sH9y1a?D}W)aV zX@aMwu<2jpoyf`XfmDU``=@?zgm4dXWt5D>1RqY4Cow$j88$JETv)P9BOh`GrP-W> zm$?6F-=m>xco(V#k{3!fyIC4jptGtWQIZSr+Wr^8MERF+ngji&$QCTkSLMQ7r@B$vg6@!6(yYznu`|5xw)9&rTRs>W^S}{OUK{{PT z1f)wEL6A-XCC5b4HRu#Xy1NksloT1dTWO^GJI@U6`o8kLzxiv~-I?dP@B74cu5<2l zoSk$PrZRwywy}obOBn&U8AY4z7Ieote5YaZ-P{*+e5t4F(q_i%=i0bNY&QHQ$lJ58 z4V7gmLU0Z@J*#Yxo(|snvvARq{9ZG0)uCJK2$1sAE$h{I3%-muA$&oov&Y@zz!x=a zP!Ywq2d*;&*JX0)gtnby)v0kqIc&2htw6YV4K;tNt|PqO{{TIE&;f+(5a3_6)-Rk_ zJl}WAfGenC@=adrz{?e8SdjMN?`sa%g2>pfVW=+uz#^~hZ3oT|5!yrV3H7z#j?tngN?HQ30<>p) z7xlJpf3Y6?)&ahYoC|x)ue%^)G_d%bsTMien*2LcEdQk`}qMb~#M0Ynhp=WHtaNj(f= zg$sZYzsE%sR7(ln{baL0O{n#8b;sZc>ev&Vr5a3r% znLLAiX^WY(bb$xN@>hadkjxp0b2TrwJmdvRhV=tQf9XOidJ(CRz=vXlDz-lPxTH%a z46=hP5R)N^H-Xu1ip>d5OKRn;g$K#U+m0#(Ca|euoQ<#?8E1y~?#U3Nt#}L$MRIJ= zi%$rdriu#bEP7xa^MSK_DK%2}edMOW!kJ_G(xGY7$!5^=AiCZjx%n;8oCC#E(Z|y{ zc7f^GvG~(*?8T9%EiQX@ehdXDePq>?^ch)PMG`Ue#H-T5Gw2h19_~5j%~RvCA2~tl zYjV9Fr6J2HGq=@-j!8irKr(#Ko06ICV$G(I>LH$#=u~A4#F`2T49gsP-)0Gkg{=3@ zO}9mlMcjlSUZu>!(lZooj~$9RK(N>QgLlr}^hhlTWmQ<}iYWZx$5N|B4i$e6B-&Ka z-|a1)D)ec@1}R#2{z&FQLWs*ax{tN`u;bjtsMj1HFZz8LPx3$P6Q|D%)DPmybo#7? zJWixjbptFNsKhg|tjK%9AGlKBeL~X|3EMt5%=6wKwA#Ay$vgEJuxTYb%83wE#Tz{2b#;XGWj>Ow|HxV7lhQ8Z+XY^Xy8c5|4Fz-MjNE-$@?IHPw`HNAfpk*Po z3_DQ%@hi8W&CRtVA5$SC7n$4vniDQmlExqYpoDK1JeAV?jnE}$9tTp8+H)2~$SOd; zo9OKPbWivfd9MY%cnl0TpU#iT06TO=XTOx(l-aJ{92o0P5l0eZO5F)^0qbqV>`VZ4 zTMtW6BdrioC!X$sU+dtyy~!Gp_JUr@Yq*@!)#%F_r7vc3RXITiA@346QM+ zU>n?r*ZimNfY(yf#_k}M7hXI-da^w!Xou&nPBhi8P@U`_I&WoP2vMfKL>`2?Ib}2x zy7Nh6K|(L@VD0iWI^Q-ff%66sWHj0*q27TH6(bAjH|^r@C+dN#Ha4|M#w;bCg*?w+ zG+II-pPDiE1feJg(xssprdRym_1!1pV48SE|2!UmgPYQcA}Y@O1ycLIGgu7Fpa%#b zWTWZ0z5Nhr(UiLDC!~E93iWX>1RMjGJQ33Y7yDm?kTo=RM}Y;z+!6QHOoSHjU;u$; zfGK&{qm?0-M?%vnW@TuD{CR42sQ`<0aiYa`h&Kkdh1@@&hC#nYQQsPoEOB54{nFV*BQIbUpN@_gw zj;sX^v2gW(63Bh1GZXC8s!1}23Zvg~L&&Rj$qVW_OFcuhw#9qj43P@ioWP?0_Wj7p zA$#zqS{=zUVb6R|gE{kAr&Q7#eKi+212JgGWmpaK2ihFka*zOKEc#-*JyaEc|K+i? zMXUTho28(k4$^bAlyD+Z>0(Dns7%t04n5WEK&<)>p&u6(g1E~xwy3zKYP5e`0?qS& z3UUnQf-6FZ8v4vXzY1qK@uVXh?RDxY5FsNAz&r|G=|ze~3U!_keTa5TG7Zt>5$knJ ztd@J{U2d&2{$6szLXq5T&hKf5qHj%Y>D8w@)rnQ&%2Swkp7b)M+@ z9{e!6UK}o%$I<-N4$cHzw?AwIURfi3E7Eu33a?)l;A1HM7^x>vb52lD5eb9~U|dd# zH^M92@%#Z(sa=%pwy48bic?yAx39g1=~KYOy0Uf=^e0~COtg__Py{C=A6ot7Lm`7~ zZy36_gRr#0V6}M1i0(uYP<{-!ic;hTQunoTJ7JhggzSJya$z_mH@5!5#7J6D77r)(xg8y{ z7jkvix^tPb@$q0$r4~_`s9O;Qq~nHoqppYu8DIg<_L)D^X+6y&wii0CT z_-Mu2kjzi`2OtLrRBu$lw2g^7h9ZG!5$98?dVRc#K$rN)66PjvC-C$xi9Xg`3_Rl& zI}Xm|)=KZ4!o#xYvfhyQeMiO(OT!YJ>$d~2?|tjq9-*yC_lCd5vG*CRn6_uoc&gS1 zdaz>v8BCnP-a2DO0!NRp|K&-67GX^6POTTUmj-e7yVUGP5Dy%%GpA7RlS{=wnpItM zk=FL(HL$CXsJYzhVsj64SZHZ}eyo}s5(P@2b~Tyxsl&6&-5{mkGYp;bY)(f&8j5hm$uj<8N zfB*pJiIAeh3c!XM3ruQ2CuV>GNRiMkN5u#ZaKpOlIS}wb3;R9U;EIYq4dWalWCnd@ zXeY`JvN1t*$t3RGKhhO=LnN~pDN~gv_qr{*IuYDbvpc)zc+e?yhL^0@{2)Km#CG#x za}tH9ICi^m&r*T<)$*g8d7)WItDVn{pDeI7C`WAfK=v|lNmLE)4jNvg40WpN6<4lZ znQ6zK0T#{N{?T~M;+{5i;+1}jho!&~(h_*>3oK6cNQliygw2LbbxT&uc)`1OCR@IY zn|praLXxX1uYXS}P;FbbhpUmZ71GVmz2d|OdtV4Rx zlC=SBcGAoD3wF2W*Oc-kny8suj<@>RBrY`xUVG zX3v$mf=ZaEBQeVsu}a!>f*JUaeA@dawyfl>h}H1_7x3GQR>@9J&PP^Sv1y$|gY5%{ zcZT&VH9M?C{3YU0s9QJ_7iRi=jhYOC?n83FY(JC3#78Vn>$iva5$wfxC_klcYaBSV z+!D3szkbt+Qx&1AVF}2P1g8mp{ES$q1xB#E?+UhGNWpU0toW&eW(gOBtu;owW#m9; z^CJpgn`7lnd=D{816GX{KiUi~q4a26kNx6*Tba4Ps0_LMka2=i=u)0_QEnws=wY)0 zlm$X#eFpv?vQm-_GS`=HK|bBaMMz;P7AkEaAzd6sNd*S~MtzFNU?jwWA8#TwsnC66 z+u(gx@n()T^j7vZ8(Px&PFLvJVhZkZPCw$2mZ080} zx%9bb5xux#FKVBZ&m~s^9<`7&Jh1sIXV6_bWE4Q?Gs-0Jm;n&@q<%(Ib|fbRQi_~V ze$-L-(c=#4@Y1uP8I^ddrUyqaZm}}GkRpO&4v$Vc-jtuyM!@lr##zT-GKq6zp1_)u zYkQQPKY0Je<)-XMRRyEuFXGC|&=vq<4tu1lULPA=?0xfv%(+fOgK+{6E-mUZqBvNJWGuE0$T!K3k5aF`~8R0bw2w zCMw~M%?R1`yW;jYN{s0WfyNYWx?Fw?V-Yh$w?Uu^h2A!fowIXLd@+&dwNCPaCiHH4 z-wPu0M<%SxbGLy|+py+~Fj)2o(x+o4A3l)y?F#Q>U=WtiZm4ubG69%g9M93Mf?{fI z_nQ6X#r-rN(^N`(F?s&OG9(xe>0RC@@=xAp?k12T7nSH)+1#Q5%WvkcFy{z`nj}U+ zi*?}iC;Ab>L=FChG^qCw3ks>EA4ml|q+eaIACKuBkYl2gOh8Qb%n>3e9hV)Q+aoz1 zfw-5^zwW2N%}-!7#A6>p6Mm%SrNeS681w?tmw3rfklPe9r@S}6XOuq!YLbiuKz7eG zA(BVOGUw$ad%#_CV0xH()z5x{dr-AL2~Mo~L)y~7RZ&wQL>n|T)xW${jL=aq;N!MD zLR48gVp%4vXnfeiLp~OTD=G*Z_VrWD4gQ0d;Z;zmJRj&)Zxkkgg!fgD8%l!wR2$H} zjz!^4LeK{qgC*DYa$E;IAj*I{j%GH*jv6u+;YDV-C{;1+Z;lx0GF}W9NaubG&EwN4 zm#9zV6+Urt!eyD!>VIa#(NQNsbD@P_UV!2k)}B5nt>z^}h;tMw6%2+dgu(E8Na?Q= z@LR1&ag3$C2U7Kf+d!5-H&FENAJFR2dF=yo*2krkQZgsxWk0sTI|Is+LGA*fC-vWW zcu|ZKBFvXLu-<^4FCBXM0u5Tn29C1!f}`a!4eBqtNBed-xws2e)~Wbd;Ym}6t)lmh0;Q#qMo-d5$3E;)cD(!g zK_Nmr^3a`CPN}?wL1Ka-L;#o%q{eLO;Ng8t#tRUefaM~|%M(M!z~Hv>#-B&KyOnyT z>zmMo&=J~RC=6r(?OxDH&QI7EhK>Cp*0|OU9-0-Bj58L<=}bt{!)1B+1ikwI?r%_t z-S#_Ml^U@8PFabPa8U*zCm=}Aa{@4OhHV+K3KqAaNE6Wd3o6Z*q14C^b{+pSw8xb< zp)PYh*%VWE8;2jPzDQ(mbMOi10<4{E$%z>wUq&hIVRo@KN${WW zT|{G{r8y84-w%a=?HL=j5jDKurDwy4d=uTd2aD)dx!3%I^L<2<%Zml^uFxKh{GZ=|n=A;+d~|3Vgi#0z z-gyO(u4`kXmZH#LU&t&Dc_&fLKX3V>daj8cynGI>h@rV*@Xjhy!gJR~7ik=Xlq;M= z?u3nvs)`zF>YOIR-pn(u{L|o}1;C9NA=k&g8o_<5-g!-M3Iq-;VOZU=i1<@cK8R=RdJn>t^+6|ulcyLq8!9TVyepOjz6Rg65K39q zHoRg!8pO05O3I6ptp6eLH@-J*j3s7__RNImt5DZFkf4MlD14%1_KIu4=L8{F`_QBi zzjWWI=8}Xuqy1>4+-<(+Of?p>tDf5<-uty@$WJ4mJz8&JeLyP|hkOAH1Q)ecJP6GS z-h9x2atOrnA3;IJ9QWn_&-cGh2J@bS8HeEB0;JaNAQKN^6i3HFBtQDk7x${fFQP-u{|!_HyDePOSLV3*?n)JQw38p9*7 zV@Hmncdv%RchGo$+9y2?s1t%_W+orsRtG?4gmFI>W&DpPWFbWsmt(QvZ`EBd4&>g> z|6G9)8MMfNU{ZrSHUqX;0~e9~`pTZGDCnt}MxKG4SAp{WKl(ZbX~B(n)}UxJNGRlY zT=Is?mcKq%_vmUBq;>n&hXASO96)~J6nchm@$%Q__#Wmrb3DO0YlQrmD2a=@WLYN+`Z9@L{%X>h*`X;`p*E!kp7?i35 z^v)3wUXMl^4%;HFl|QUv)7HpW(N%jG$OYJ*)6Pr1FNs_>wxBCU(9e;ObnFxjYzw^vUXUNP=Zo{9)-g=vT?Qv%RiZV;~1lav9% z#eLFw_YhWF=#Y@3?3KDylp zj=jpggbZ|vK855ub|1N?NkRuB$*Wk!`zd;LYzl{NYXK&QUM{IH`QhSkHicmipOd}b zD=g>zely)SA{d6U{GVcsbnHZGt!~y5VW%FQPS1z26u*gQ$R1|(*5?53vV!|k%RglR z1poF%ds!c4pmvZ370(pH5PAn}sy;c4HvMP)d&xiNi<=>DVB}q9qJp-GlRt4+owtJ(Z!_x$w$U2PlZ7ryVY?izm5eGdpcgG`lyeS@^Jr|iBH z$UDu$J3ZGGaJp-{xux{HA&B3+ZA-1OaJDvhg&^lc=S0^m9skyL630bt*J)Os`*aRT zr`dM5dye!^2_Nu_Iu;KtiHTuam(vksj88d>8^@fVS@n6rmAR;w5k(_!$l2D;@1#Wv z?zGEoTc}KQE$pn%;|ESTjeR@V;<0J)>E;!!-6sqrcYp47I)9-^8Zl6@-q+KhVLfu( zzed3w5EG4sFbiY{35!bcj0uqk@*gie_oH#L$i@aG{a-eseWk-ZK9Xr45*AOxd<|ybvs#ch#@6dNh zv&*IV0*V#vXOOsaH_g#J0(SMqmo3jqD0ND{`!tY$>3A+4CK(rBC-O#tuW&tjQ{oVz z)Gd3mE+KL{qyC8=X=?yk0DqwPMDN2qK=|B(W?_sbSt@$(Y;Al60ThM(Zt0TA=K>zPKH zS(UCt+$oAESX^YjCq_v|7>_BV)>;3w^TEe^L#&>4ymRJ+!LQ9}Fi8a*NNvP>AHhyzk zxp#b$Lg0oEmhA*V$!47Mj!$Icc0=sai$S`O?zTaCRf3$Lkl z@-uD|v994iHjy8hyZqCu+k-*3%;6(?*o@t@gQQRXZaAs36PNkK)^x3Kf!!gZ;!E@Q zo+vijP}1$Hhqd?_+)LK}@sbod-o=wv?JqShyMB&q+Q~`1T>LIxP0S{ZbH|HULQ}u2 zx+W9sDf0x6E*2=9a^o&*c1W3RO}_#SzC>nSOK0uu4p;R%xyHqlT)DXWt^)z(ELe$z$`TNQ z-zD|4fzut_(WHu>Pf;obOWp9whBU8{1EikM@3tE$`0U zS+ymT4wvR&UXLU2i0VFgg!@v3ZbTS`{aXB|#=tC>U(SziVXvQu$`MMI7EF~_-Qina zU{M^v^t{NoTr4(GbBpL-xi9&k(kTw@@sKopYsG&~iK@bJYx3I{$C~>^6K!+^dTS)R zZ(lmzjnywS^cVXjvq)2E6epONY`i;SmeMrN(;vil;&ocb?+->c-Bq4~t!5S;R=$(A z#jaD#(M{$)xdh!;N!zh=HsE)K2mj}9ie$us6RB^c%-jf;o@{&0V}8u4;{;nJt&#Gn zRBH9s4d1G*IHThmb@n{o_`mD`-OpC+CX2rOZIht|HJSnxqYG_!Zgz7v$x`N)r~7J>gt0A6~%Go_*irxF3jl2kMjSi)yjGLi> z^v5#Uy^AgkGJx%0+SM_1)5EeC$Liux2-HELaHe$z<`cqL==RX?ylZuKSNox;DNUH~a8$SgG-c0q=aeD!0sKoiBsQU6HM;*I%sV&cY%H4c`{?QUvME1$ zvs2ov{b{87b;Boo?FyGB3}-i+Tggkd?w3&nt8HCy%g(>OD>rk*R%D2!|Bs=L5m4qW z3`8_5%Qt0Y@+RC@`1)NEu7?9%1OwqRp$fy{dYn;H{EHG4Yz#X0{YGn^U%^7xWApBJ zd98;2z<&iQRqr07D)4`yCJ_Z_^)!SsP3 z9z(w3HksfC=5is(`HG|V#su+>^X8~GCHK?Fqwzt^G5WJ}%!B=9(KIr(n`%~+# z!ib{si?HI2z}|lhkqo56O+8Qi!_cfF;fhRZ%xhpr;twFsp)11giPAcFLHk*MIzea2 zO0d?kB{IbF>U4eS1$pa(z8AI_WPOg7IV{SVCk##OF7wP~Eu4Oy#xhI0%S=GZgbw2h z@9O)3g+huU!gbn_mID4rqRXPG;FY5eC@)W3wUx%=OX6oGO)Sy$wr8#-<%5-(JD0@f zG#I%*jZ!GuK-nw*>aR^b-HzK-By{>mcOjOGId}~h%aWZ&YO965=793T&{RCqC>c}o zhjG)@p#k+?T}8>WUji$elA{!RnB&_Onw?h1i6@=h=cX=X&C)qF=SYrUubup5Cq!)x zv943^KL#B^XVvU8FH#S>t&w;j(H{e)CXgZu`@v&>Royx}7Lf6yJ=vY*XDFy)n@6O& zL~O?;vQvV}L_85kzqu4l4UyAQJW#5g)0ODIn0jP)UHj_&an1wcx?eZ~Er66%JwD@` z0-G6s^!-J(`x!v=VrygZx)LLaWJ{*1Luo^tMmKf}v+ua3J^$86P&LQ_595ByV_y0l z9C+V@%pIXO`rEd>-y=I_PN-pPoZy*{?iJ%?1l!JRcciKzh@;ub6WT~-wss#30R6ko zzdArOUmJtHaA%y7pY{&Qhq{yq88~m|5W?CIMMvVj4+cjJZUL%~C8l`6IyGmT)tSHs;aNW#i|H*UY zO@*5x?3=NryZ{{oZ#)&#P%}uAC0pA5UQpIp?D##>;7!*n+1&@7xEg|kB>9hjb88$B zma*=O#ukIJlp{r3g;+if%0EQA+qNb&TNtVX#x|{%q+J_FM0Ji@F*lLh1sExgNDc-mr~STZtWL zL1xBA-373kd)dIk%@ym$=&OY;Zo;=66xv zsGA{C+g%qbrS2hC2ywmJc4N;vxWQz)_Ocq8Glt1NmcsLk%0tYug$Skfz4jza8v7di zXZVLPsXuzt*P=Neoj7^YL_00)XuQjYVq|f_lQh}F!FDlA0Ukx2Ux8LO+c-`Pab4YT zu8|EU%}ar?Y~{lRKWJ~Hdg=qOF$3;GZKf_$V$&lEmT_)L&OJqpFZpKNx;KPbcvw@B=C;9rY` zJD-@JbiOHY-ASQ^32LrQXiUu$lR!=&gh7)601G%uoc7RlzQ0VjHSXP96Tzf27YW}% z@qKBlKI?(bu3L(cf#lcyq)KMU)ea2nQfK@6n00LK6stkWGTr@*P$(q*TnObZKc28T zL`(ltML8Tgu*T-Mf2qblr<+po52p#+kp4K5K-QDiet4KOmC=3q^}2lt*7u%Z8>WA> zBK4rMdM<=(&`P9r_M@}Ma?zcUm+bh3qe^d~?n4}N{xp(7&SZnuJK}BPp0Zvi)dP_t zp?$k&pZ~cOCub1i|6|Avp|v$qREtsvDy(kdc&qC*Px`?Mf$GxTPQN2 zpF~Q5;Gdy3?-x-ZN@awz@<`g|A84KlGjx!uW$Z{!6h2%*v;S4Uk5I!bLz|Bhwk%H< zSg(`nmdpj}sb7mdzq!afINGFRyYu#rP?{uvlY_^1yT8-_60Pz;8gHqSi^GGV6qbGx zsR8y8#2w0sr)^p`#^J?LHQI6F*Qmv!#yXCzL_q$gtD}RfwQl z1y^_EguA)LY>RjarI#~Tyt}yZ`OA$Fg@6vbTV1{Ayo)axt~MQxdqH^kC8SpApFBce zf~0CD8`AJB*_%F-O$uoRE&MA!`^xmY<6(qZdE%lt>vgdlQA+LR9N3iWI0Al>*IjQy zC5AX6gmh`%&Q&fI--_|5)$M+%$@pRb=(@cdy**_@f?VsfJU)Vs<>AqJAJ4&1FnTUL z&O|7hhi5}U#d?%CQr?}(=9Pb8S42_&A)7RaYX`IK9gz=12Nj_uwuZ=k=wkXBxMc8L zdNdGYZ4Ptgz4s|u9zFQV1E0;bqo-)lzmJ3e6biP5ds#J6Cx5ly?(l{4L_2BBZ^YG} zNWmyy-b=d8l6SS?!a?g{?J1;4Az2#k>e@W{WtzI+X6wJs3Iwv1{!Ae>^Ja&xp$ZhI zPfFTl%YN5@qBS6xA&@UubmQh7g{4Bk^0O+l?5eqG1G@HyT7{#C-H#H8TuB2-^zr*Z zJ765&9Oo;01!&=jIjsE_>SXCBqo3Y5XrGTQHiupKJ(}&wp5BsTJ%X$F%PdFEWJ$DL zBZ*V=D@0zmYx!O|ZY|f2WH09PL{8``brikJzj7s-=evg=#z8!!a^f>qOKp#!%V6gGnHe$ho+M--`%o$j9?|Jp`*kFdzr&wP zt{<^)1Z6G=ewK1aYWM}Vh%^VB_Z{E^SQgpYADCEp;r5KL8oB^eFr^ z3Qh-?sr4HAGUojFI|j1=>hUzLi-#Kmw8Iz=nUeh^@|R{>(+eB{|7P}-^*T4TlI!xU zZ$7FQAR_Qbv*S^NH6rqjWpKxZZBFS3=ie9|Ik%Kln9j? zm3d{eqsv=I=LFwo+oV{GA`n@wVpJ!f@<9<#>V}c~Nm}Sg!NzgO>+@L@UOFrLtsW1k zDKZGU=z$y!=dfpML%3Q=AG8XF z3^43u`Ug5)hZ_GF2|*smuYv4ZEE(vRK)u@{*YX^+XT98YIo#L(F4GqA zx_?Zo8Ol=L??ZHY4y_2CKR%yET6Wb!u!G1UB6qZADv^R72Nk$} z_es0gqJz<9d^c5MjCAPz#s9NIU>+AM7GeUWnQ%Fd29gdA_qCxE4|--pSZ^X>{$Z5N zl3ZXPk_*4x#?DqIWIE!DFuud?J4?4j4VA1!*rGFqi|;Im6Nr~Muxxc>e3Et?w&EiR zrAW8%>SIC6=t)mNN150ArQk9+y@7l{zxv1tH0s34JEY|iPUy8k=pSh9zM;-Q%oYU?oupYUy730>^oi%b_m3grhn_lsx^Z{y zU>D7<98L0_t-A`doY2rBHR@Z^yKKjXkiI0^lBq6DtM4vV(?C2bEgnwKL<-ZG^q>yq zByA8tog=3ik-IL``v$C_Z-Cc^{K>qe*KlXIrxg_E(=Ua%jrSYaK#Hi5H2R&B?HY!y z?;zZfN#wtOo7px6fHwl^(=&k$9rjY-5$186dqjS2!n>jgTIpXe{zT=dKhWYi}wVI)#yD?2hfG{pHhOrh~#UN+DJ1a;P98Z zIKc3mn_*vs$Q^NUch93OAn(#ljO9(pR>sXU!fYvZ)x!IT;3-fHB>Z9P`il zH6$O1HAfPqzIgEP+K7dtcDvzsi)bR4Y7503VCkVh5S$}a2g5l+=R?raZB;O^Js!1C z^I0_=0?v2FRQ#8vM-y$iFVE~3^fKMqS*26Bru@l|#(Do>;)+g5=9dp@f9*T_O!Vf3 zU^^oo-=~Qrv0l;9QBhHk>FM=jJ<+5JCo|q1d93(Y?|8|@&Yj@9mF%=9jz0bSZvXg0 zA;I8=`e4p`au*Bw)~1mD*QhzIb#5rtAuyQ;<#cP4m>Kx|#frJzqLjfPoBI$-0=9q7 zn*7k+r{}9gmh=Xilx7_o%%(Lk)$3~b!C^;QyN9@AXZuRczq!**&^hU9tz@|#w>XXx zyrxw2PJG|nsePejw5LT9bI2~sFS8jZi=RZlu^9DP$g$@&+qCbpRiSSY%TaNaR(_nj ziu3pbgIMg-6l(=Sjfo#9wy9G~arC$;t3f4f=%&<9J&i$aBe}N9@E@$H1ai|qzGr{` zu_0yDKck`5g6F@MqU}A~u{1`L=9}Rt>uo{Nla(Puw;!dfmNa*rmGndy^}7p)->MI% zwQw9_z897@dIGHxl_F-i8Q<1r+rXA?lB8iW7@5k7Yq*NS2w;_d{97Kpp6tG&;dyns zRO>iN(^OJ23p#kCr0<8bTkv?1n4&S0sCqE=@c7%I0-0>;eb$+zr7gEK zJ-l+Y{CUsa)NEb(Bw@z9Yru1`MYgzeE0?sBu6Xk1o_lU?`sZ+VzDL~SS+m23WZTvc zTO$Qc@|K>rXuk?fE!>;!Tr;9-59?0OWUwQW4pk1ExU)TNINPVX=v1^)ozrCPP{;pDe7+EqfVn)%-o;myUN(-c0EmH(+e#)hQp-CoETYR`KQ#5Ro!h>oBDidvc zP4q_1NWN#~mYQ5i-LZHU)8UYI4WcmR_RY4`XDLLbz3Y=-c4)XRe@ZgT%bI%bpo1z` z$euhE7rSrB=L{=flri4^*tu{c4MIHLPDHxk80*t>;TY?4%#t*kRfVy*Np%13Z>MY= zC|EJ`K>W=jfFtqKQV~JlEtGN@x$x-qGAzcKlr=sqXcVy3%uPUVEpdUpeQkA~! zJJ!V?#yM|lQNZ--$Gfh?(hE%2G&gZzE)JV@8Yoz_K>j8`;y}{Id9JCWN1J@-*h4xS zm)xk@i!1jrv@S*XE2s-hr=DE#=xM%G;FFxNIet3kSL z^&0F&{6Xmt*_eQ#kt=zobvbfn&N(V6*>iI3WFpunm)l^yt6*R$0sI~H4x;&}zs`j| zH1K%4T_V5|x!5mc)jt^G8frE(oyzA?CF>`?_Q=rAvU~eRx42w?;NXkG^NeBwG$<4) zBD}$7?G^hCp69ggJ5<|u&^PB_;jNuJ>uAEv{bevNWx{zVCdVHqEP_yM2U7C;!acI* znx@{8ZB*PW)72?Wn(FPYorDfkl?*jQC2gq(Q&!4a$BM>xwpItuL_R%G3&n+|*XjmH zSV^Odzu*~mbx`G?KmIf!H{m-FMHc@ojPPed5Wl{_31~ndW*2PP!Zh%I1^`MXJt4J3N$pO)oKB zO6upVs+MqiYIin%kdM&Cs-!PWO-Z;I*VCR3A+x_T9 zj)4*}7>WE%P3a8xosEpk1KPJo!K!lYhQo|>@MQfKtcKJ3(Y;1rS%JgFwPl%bcP2;DeoYBNj6xX^EE!!{1?uwcy z78U%*c_+4*|LmH>2p^C~$6Yji`>e$8RR_L18xmHo`bHgJX zpeuErX7s6SWzoA%@n9FmZWU|PolbU*DZoDb^(EGvSl);ADP6YHEwefnD$J};l(KQQ zK~xAd9fiV~29&56z%}%T0$rnVOovH~tDEy^<7-E&Jg@kAfiZq2)A6;Gm6G1|xJ!=U zY@+1yp0<=cZ3D55Kewx6V_i9O<;eb`A7wgTGhp>Vg_&2`g&I%FQwWc+SyWx*+ssx; z+1+w<7g8bmU(e2v$(?@tHj+HUc+<70uedTN-q-9JmCy^@>voootmeZ`Wesh?7X1(E znPnVGQ^i8s_+Suqyv8#I(^u$$kt1Mr>M7mBVe5sb>EZF1$Nrp+?E%pmVc^U#9BpK| z8WX$XHt_v7Plhr^{^IR<_K0G;_f)kANxsQ?xT@&rmH}b>+cgcj*BM;ZALy>=*l0Btf1Iodbr}y-qWqUKav z*>QC>*x9kv#$$4eJN4kGy|SM%*t8YRqOrWbb&YPv$aIRv_(_5HrX}FqqY_~sVcDGr zxmo!xyD6Ko>O~o!1&cz`WD&jVS-f^V3(k~2pu}Rv-!*!>vFZ`US)rjwTFSX@X2(LZ z+>ClYmJr1|@LX{Vtp!c@l?wOggh#e?werT?p0^<*zGNd1-MT4iZTl2-%gDD(7XNf| zm`y37TaPpzbSeJkLMl|HwQ*-Hzk)3>h>-3_%7(vwg}>$;Wmf{gOxf8AxN-f6c)d~! zqnqG*Y)NT)!?t60y?bpNYgXNbYZP%0e_&T!`js9}$|zM^n(#QCZ6H82uW{~XWV64v zz2kr#u&7%8;g|*zMz;L}C)DUS*o!Lvam+Eib(YsZR5!R_O6^sPzIBe``4@PID`A-7 zfO+E@tXgJWjEvE&rQJVt6ekfw9FbsX?FE($PME`D%2%Tti!4s;lgB^SZ}NdvTwF6% z>UE_Y4)x%yC*dD9Y~cHLYeCIG={xDj631~WM?|bkR@(WQ?5y&blIktAuV(c+%jGy4 zN$8MAd{~0VqA)KUw1>cmg`IqV;V=oSb`%MKtbdUWFT4vVG1!mnV{#bm5u=@a|4^uy zqO1A$?n+H%Rc^M+6xG#6vzRXg(lae+6p{76(^;E&wj;PHnlATO#lzBI4aDV%YPVE~ zXzwc2g~N#E9z0IM`V?$O2|lG6m@dxblw5sIyXme(Rj_jwTSzz;k1ei6*%oCB6><*RaR;hFW2d zr>PpiIm_c?Dq8+DSJaVk#9qtvs3<(Z11Qw*-eLeoL!G2~J!~Xw%I+2u?te4bZeu6d z*|~GGE!3!IZBYzwrP^MkMV23IEsFQ7#(B{%fWN*xx>DI?inrfz_Hp%pffD9i94MK) z%Z-xzVEN?P?A1B|-f8<$luUnUl%IHaqa@K1(U$2?p`w1qv?z`phjzgwD>J~dUiWge zH*al(h53Qh=fuF{QDJ`pE=?N4dyBKxx4>TP+G=FK5p*3Jj|h9N{u^qyu8L9yVGYnH z`I;(Y>q&`d|G zoozUHZeGSH=k_tT;4TH^Whi|R+@mZC!i-a~05*9LaR`0k91ox{qpFGfn`bvn|#166ZIt< zohXMX1hsh8BFL3A^dHnRNrxg~jZ(bRwmSLwb1RLnxgV#wky7Zx+(7dgi-=hZox42j z^H(&x8UlpJMumS$6%~K0DXyp~u9rwu!%R*Tk#{wa^^FzuWd;X!elm)%eB(_Sf;FEWv;GW_LGw_Y@kUGl zSB|>L8>-C)f&DVhFQ!gkBo>hBlq9=eho?PK&ZaLqYp9_e~8#i!8VGEsTZ~be0t$ipv>$$^UBgXwsZCM?}qapoHI|3E~ zw~Bcf;)Yf3m_UqytYY?R4i%y#Ao5=x&Bth%;gqC{xEj110J>Zf2JpF>8D6=Gq_pmb zWm%V=By91jsR-st!T~BiP6*(#w2Js)7%_f9bM5oa0rj(Q-ro%FSNHKDZen=h_~z5M z`x#71$$`=Gr^(b4To%+SSB1q~!Y&#uooG7$;;_MW)q9;eF~^T^bszj(=Q`5K{qTcK zgi%Wqez_QSRlmCw>nS#ZnNF^lx}=w@o5C@@eR%KaJHwinu~+VZNFMK`Q~%2##!x`Dv_|s!Fct-N^Q_Tuw_sL?0K7E?ezd^*uuFfXI>DGAJ>pkNa4y_XE)IPpc zVF|^*M{BuO_{86_$R)m~oW1B7NIaT<;L26iyN;)9Xx_`c$p6wWSR*YWfRe*r%lI9E zmS>oa8Dx9Q*4B9kZM?Tw8`&x4t`-n?D9jM6rRY=aCdQuAlWajGHcz6Yl7CKAAT3SK zh#*Zl!TmXU2!j!eWiY%{9=|!4{S3!oJUS%Vn~@V4QkLb?B|gC*Eg|J?*{@K>NHY`^3dfi1qD+Ul zhhOmbacq;fH@u?Z%GWhcj8W>A6@u`RvU5=g%d^-kApz8>DC3M@0ThDbYd$>cddDh` zB}2UaB|}o&qx^yd$5buC>B2RZrwc!sgb!T5y1-#I$J2c{G@SqDy?cBYbs8nf%s71a z%T{`YjNDTvRdDjZCu{p&GdKM%g?o~|$1ziPdvo@<)|V5k?L9v(mrk3g7(Qf_UG{l> z=g!amYh(VxV?R862}Ga#0op}#uI7Fra|>>_zgfh}u@;hk_1h)nga*Fg?19aVzX3QU z-oR%<9J=!+W&AHA6xMb?ZqGX+HB_j3$Ev@m&FBJRqoFk_YJYjF~sb< zs`ZCA!@eLo=Ic)s4vG3uwB?#+M^zwPLQU->4Xhc#LEtJZi&BAOQFtXWq0uE~HFp5l zw1&nn`JCf#*VGxmcj#&Dw)UsuyBR7*0X5>aNg>Ly;!?7QrhWJ_>w^obFY8FXz45k~ zp`*B`MzBtrP6T}jTLt?&qQ&i*ynqIHj8`aeZvw`a>b~1t+EFZ#4zHHY`Q-%wL?8ip zuKorDv?@iBCau%S-S*tIV@IPKjmt_G?@XEE0;13NJPyHDTr=z(={QCo-A8#eIOcJ0eAy($>Tros;vcu z^WQd<6uw2OY54H!1(7-?jn}NTVZlyXBg%|PtI^TX-mh8PD5{GjtM_Za7rLH1*Oqof zbn7=};Q?B;nW%&2rL!N|$F;8GQPo_Te6kC+SC?&>;+!=xRjIg z;t#N%IuT5pSas+vA8}gSzC#Q@5?lwq|EUiqraak8?kuu>itOOa2$oiFwEi7t#0lW1 zanM$gXI|-fM%|@k#+#|x(P!d{t!oEsz2lXS&*}v*a{ID>^EsJO!Td=u)aTe&`QNsY zR`2WkPBuAHhnit{;X~nfnoLiIxvST>r9{)^pS7H0JxbKqHhp$GXaO?IqarY4EAHGr~6oHWV+KeiVUG>a!JO4uF=e3aJC@> z^SBjcEFwc2>!0T1`*+jD7b+Cb>wQXz~qjeqNNHjw>hDnlOk}`rr^~Mv&33pdNXB zOS1+VhF8h^vls-5C4at5>l;{l7BHxrFKUA3-{7JgCM}qprgY{@dqHU}SN$Dc`4dry z%k?Wnw-f{2RpK_}1edL4(WDXuB^zIzIq8mmeweAW0GwPB>Iczo4wk}*xlDk*drsiI+fDgd8eu#uE{k>(abB3Z#S*;(pkOX zof?`GUZPKkK9s;#tg^U&h2mwRblZnKBk`rs4jmr_tF|9~vKFVgu=VNKy-;o!ns3(V zxd2qEp*Dg=>I$(~n^&o{oTm8qYRq-r0qo0XRJTo7xwWF@* z2#EE5m+_5^p8dVwG5Mz7>7s-#8PyfTxm5WX%~@1{Ri${wdB5Z+ukuQs=V#OwI*N9 zJjjgfY&FqndO0$MdOMh}8*Dsht(rXMt(wyRvzI-zbaS4^9+PF|9uSHd-{SPi^Gb32(T;7`KTA{yKIEsvEx8_!PS>aq z$#jHI-^0Wu!t}WJ$(4h3obfUHF!%oTP3Xfgf(8{fGhGbF4>P65lT;>GXn4PuV$S7M z)w%o8@Fo;0{C$JFrgOVM&AxC)_shAzoP4DzgjK@LDl{YOb$)KOi&!zTbm=H+ObB1I z@A~-qoBcEKS=agDm=^M7YA%@)uJ-F9Uok81$FKNl8nBNqWjuNPT=wBYjFrP|?`)~h zn-+(ePUJwU2*O+}^&+@^D1kb~E*ps}5h5d58i))-ZDa?qRII6}7;vJ)5O(JxZ2zq% z%@y6|bS!j!q#}z~r7~;l;_l*0w+=`M=@ay=JHD)3Wo-RC&0=-*a_GZJHG-(p-_inz zg0H7$1ZEjtFKgV249_rLYZTh(X&xMnU1~RZgf-sr zudR<0m~mTf!QnFmwJ=^{?JcwzueA>q*>qZJv2y7#`2JRtnAUXYO$tGjU;L5y^F*QR z%MAUrLPzcoSy_M&Uyq#a}Zs4ve?H@V+- z9MJPDDhjbIGhZJbT`8@n-l{EoErI!qX_yyWj!w6s9vHh@U>Djf1bT;s`7CN?T&>)H zvXcHX+bDucR)wP)Z5Md?{(|*oRqKLgt2XlLN~;eXNCB%I;xbK;SVq}Nt+pDH@akvy z`3!~H*Hb@#Uh)c9iGNwkem6~N^X}*I!Y^}|ANmElt)Y;y@K7g(BO~J%ENA%N>-A94 zxn~)5rwYwjSN3OnMN@}ws0l=UqwlCK7o6`JBGB~eb)^v#uui`V?bKL^H4p|#FJ;#^hcS`;eE2~mV zQtISOi=teTS>=8e40(-&Omm(Yx6sCN9F3ER>++Y#-?}WkM4}BiT#YvIVD9A`tdp4# z6>2QQ%m)KMAeq?85W&qQxN0X*_w15wWQ+CaJ#{=%)-i?tQ9iAbFijZ)C&zQ|VvI+b zsXD2<1X1zBzeRdcgaRV(2m0zIz*^3BbA|TsdB)@p6UJ*%_c8EV^?Z(8k zh~EC>PF}|!MC|qI>m5nnv=5ehlWTPA9z4(h7kyML;w265rLjkn+J{9VIb0GA>U|zH zl?XsBz(PQ(mfGzR;qCO?0%MN}R=Y)Jo=*g+N;}WC81ejjombNO((MN`Ra*LWxqtLt zAnscq>C6*R(WR!dp8_*S857qZ&YkEfm-`>a3r*RKQ2h2l({gt&4$+ zqcW|u4t^O-eW_pSROqc1|8v?U)NN%N40+S8}CMt$plY)KJV z>z<6XrX812MDh4Mf4;vy-zZeHJi~seRr+n%&4d0>PXOiWW23Q(kPhuLm=B@ZG`o8p5CU!mk!S)tlg# zD%T{Q_4oHzS$r6upoSrp311Pv+kZ6h{&!7B4kiicBW=-NV%?c|QfZulD3nrX?!Mm4 zn|Eu&m5FjRN%00-Sv7sZ96vMtwlI=8cQ8;c{!4!_eh9dy<$H*Pgl@`{NDA{b($7*e0vY+@<3wD!%S_X-Vbh zxtu088yCmAYFimFwEwU)lm4j-v9Tgs%Nue1+dnG{E8slUiXX&>-xR%Y)qT_3E^wx! zL~beTUjHMjHglJaH&&5u!^3#Q{jwCoTp(dILeF*EGWs*#ZWz;Ik}ST>o4q(?fHn zN;OOq9r%?_NuG)e>iBEWnd#rn=z^UrZAk{wSaUVb?4H%l#$^txpFD;PYu6GLCa!9& zsS|L9+ulq~NVN$yxixEf-NQS{CB?;$n*lZPd(t)j4JdQ23}v5tMK=_ch}{0^?yu;! zV^>2(m-4`N>@Z@+KwD`K9{p#uht^x{61uzq$8!& zAJ$Ur$0ha7LDh9TZ~VKkT-h36Ce!~%)>p?xnRf51sO%bGA&7)1ozfkOfPi$jv~*c*WCU8i0zTrl9wda>ulTRcwRn>oi%>Mlkjm1i{>O9Qyh-}se5z}Tb?^7<(k zaMS)*+#ZyM7ZuBIsXcT<#==4iE&r;5w6ORYgs9UWC(hQVfMz5xm9)f#-b}Z)jyMezYek7_%q%~-(4DR)4n+-oG3BC z{j>mYPP}*{@shD;%wh*?uN~L2Uto1LnN|Wz1z-0ohED-pI^$dXrxneA!>eNGfk9q6 z=AUn8;k2@@GL{ut;?!GTq+Z|)=LZFB8gslbNc>7-f~npmhulC6!^PGLx3<#7vrW{~ z8ZPI9Dur@sGFK%{B&WALeX{abz~WSOEy5zk>pNB#;g|IjkTD}{)*Z{mIbqcLTV>|k z>1g74<`9bS%S1|eWf@qJ-(SthtBO$DTX1mfI=8zB)duEK)qr8twqv#9BRqv{RmG7w zD~c|sW6OZ>;+dFJ-twmge+9UI={kz7Nh?TCol5`nPrMGG4WY*ZHYyyJDscZpO0Ktp^}s4uigp!uxk`7SXEdaXvw9qUJYx1KiCmNl}Q*B z$lPAv7)o9f{bMv&%J)35w3@b6@CsyyR79wVSB9POg!p6(iWe$6=% zk%oSauKyp@AqxYQ zT_9z|?&qK)7X9Pa3cXe9mz`?r>f3vSOO7dVx%HlgnrnVZbaeGc?J+aSt9BX{zh1$? zV>z#q-uLFYMdfQLd_#LD9F+0jhw1$3EL9IIhxXxx+!0+Q43*8ucRIL98juz$@qEA)! zsiV^T{>N12-|Ty89^ zaxZbV<-^Bwvs1eptK%SYd7p`a(UtqUWBnc0&lp0_Gf#BPMVa23cw+1j*tSYtv_9~Eene_kdGNPb{{3ip zTs6og(btk>ePuk+m$SC?}c*pM4Td0R(xwP+p-_v=XO@`DUZ$`4zd7zsVbiQXEI2>ck zj0tnfg1>>G%(`3nRz^7*n^Ah`=#{J>1a$=lAVLW~f|w}4n7*bTO9<+aBG8SEdM`e_ z_;9mz1pfBStKNcJNWQNH3OJ00 zJll(GP!XzZ|3}&qcK>j3nIkh}ms%|f^H_r|+dNZ+KxrfeS>9_=n?Qf2u}MpRpZRo- z)o-w1@MX-^BQx(0pN{VMZt0XW=5*~Xd+r!U+wA3S$$2}Kp|PLTcBJtJ>~r^Etw74$J;ZExOM;(h>4KU%D(0qM!KJQqF?WaWoz+S z)#3U+p_;#kK?)bV8DcZv?UU|izHUl01#iZJAI6@Ds+VVM>`P+lao+y+S`h}W%e&}`uMINC_ACEOdN)@n6SJ_k^VCH_> z41WI6VkNQTqH>YbiCZZV{7>VM1r6$tch{rf)B%Rl`YCVOu<0)Qr zI+rT{n@z#~jP}_DHiPo1qr>Ar%Pe8{HW(tS!}6^?cPO9T>Qk5~s8<~Lw&Z05+=B?+Lh_oOZrwwd?BnabgbaL90nbdF23s#M0?Q!k(j;3dqFW5Oen*pOV*FEj z?q>aCl8r+s^dnGKqzH3r@0;f)Oze|<|2fP5=Xb!=#S*1#2=1^IbVgZnTGgbK2&C7iHLvq>qfRy_*P@(RR zUGNyOgV1Nu_!<>}ga=(wY=vRuQGkmH>yHt#>+(CzA9N>NSN4oBMQ~retv-pXxfN!g zjs{$ffww8r&UdWds>m}xdL?_>$9KLvEMs4;Hh(rZjD(yfwFG+L!j#sv7}0td_3{pQ z*V{!p*|Kx(dg4X2Tq;T+DDmsf0wPJ!uR<#i*srQOsV-N0Q;Fok2r z6?LL;p#dj6Mnouw`Fux;k*-D`dzbZMh63^bzF!XfG0rl8lLl^j#X}L&xke%6oQhum za4?~!4L??9`#LB5R*RJgsb@7q-V5jF&tXKiJ|$)9RIcP}i(qODc!c>kUwl4XCw{$E zSnVr(Ij2mXOaO~HhvKy^@CYGVSwiv#do2o6F3b|Yy!lLOsNOd}^_uXPG5D5C&Z6)D z9SXRX#2NoGX|k-F`V;TIR=QDo07=?;?*N4i&8p|eCie@7zWTR}DGCz6hj$iu!1z`e z^O0ekU{^4EtjqUYn@FG0gTP|77=JFucPW$KuVBZWOr>LrSqI{#^r+pqboQ_V|L0BB zo%hXqjdj#ft*I2Tnd1Mo<_pfhom`ezn<3?_Z0r+(_}#6WO=y_@2A|g4Ul{s6hRo zVSuup!6zVw*uKSJ6^mO{S?TI*V^NY) z_>9}SjFs!1?~pP`uKD3F>bC4^@S?LE_#wij3;qi6)3{nhh4dg+x5e6WyRMhb{WqK3y;ZEl-xN!OK#&(;Q~!eON%c2?LYwabtCn@6`A1gpldN zv3L2Jb%s!ZaYX@&uFv}I`)4%= z@kb36{7CPUu)VjW@Wm^q6{z=U3DaLi7VQiG6eeMqpxJu#su#l=x)zEOPgL1lS5KA! zV~;Dz)LqO;y8d>7hVnMM%Q5gGRZ?1+E8{i39ksnSj$I6w>{MN%{5qb$pn(a=8eQQ8 ztP{);s~RWuE24_HJC6hMvl&G93_RGN*opMC+DW^Ak5W~6vn=@2clPVj=X54`#1~(6 zlrI@wWG+gc`Fc7!u(S8(tNysSJIBP-<%|DYxpJ}^mts`Q0H6(Q3#G#kqlYT`bv3;N zcA;rB`f6&F7q->o;V-c0#jk%agVsk`ocdzoHixWo`{g5_#tVZcvcxlIgOOYXm3rk#taakJ@~f; zLNu;tD*OR!2IPNpXDO0Hj}CNObrAJ1$dm&lQn4_dJLV??IB=CUugtam9yIXOC1d=| z5cj<;!uqKXE{UF#7;av5yHNon>|u?vRElA5KS8cy*cnF4obE%X7mmmTp@Y&boj_UH zt!J%Sq7|rHkD(l_KB9#?%fFsARTQS4PqDS}7j#

zWj{~u>r=8CF4(VMY_V5R>&RkRpsgM1%oa+xKo9>iCG^=h3y}^jTfIbF0 zU^}6sMb=Xz9zOELO}LOnV7ryIeTyBWy?R;daHr=ejupkMGF;RRvClEavjs+@vPa17 zf$CS;Y;^XP$30YA+NcaiUx)EGhp9}r>v`J($bXHpzz9_-t+MD(zX(sdJAo`hCuV&i zViN&;o339#L{M#6p4~vYu}jXPu<=H_h4|&bKR#&WO7+YqFM=;TN4G;18UYP4HVJy2 zRpmeJm!;ht?-+sXepD+S)FDzYR+3xGtl=R4!E+veA#63w2rOjPK^P z0V4{nhWXDH8b@|UDI3e|p@ZM>jKnlzMl`9X^yC-N22-=u>>E6i1al|ND3DdR_tqy2 z9r*h-ZXB3PG!LXuMKF9uiUBI=-G7!^h6`TwD?4CJwn12T8S>MWOm*28TAPPf62S=A zuPxP%YhV&BG+Ye7_Bc+;ksg`mP!7hlD(#`r#Gqh+&hUpnx0_+t#-IjVP$ZW`_}iRA zP?I2hFJ{2Iy*laU$ZPUJVL+~wZG%=?QYPMNGeH0lNR2+|Kl;P|TWKWY)_Xf*vG z5vu0Nr;`;ygruIJVuMM)6DWNvg+C!7!9T8|hd4!=0Oq2dKAIK&fv|R(>m|xj)7YMw z*PJf+CHiPff5_jhUn=*>zOd5CUz`|xi2Cu2qrIQSwxrv*>pzv@SC3bOWGc~7Ek3}( zv@tOG{YNa)sc`q!O04_j^OFzBrpO+i^E7V@AdN|f+3t@k(y@3R62AS&rWwY8zf+s_wxA}kM z`RuR!gvt;y@z6AF?E2RzBIe50BxXcAwNd@B48-6eZ8nQ`1&N~fehqNefGYCpOf3|tD;>p9w`Lxs02j}w3Zn~zHu#g z*XWq2Sy9Zf1g1wi^6Qyf^67MupEMWRKNp`qPhn0QnvFl~wCn1X=S-aR3tVu{PgC=0 zNV@Cm;6li(ij>R9RGwnBE!CXi60mJaSQGBa1~qJx0R|+ISp;Z>V8}Qr;I}uZPi!BZ z2YHM6&`eHp%k6h0SJ5Q(vjlFruNqPt_LiDMiV|P@rwyOcC<;sXU#1}_xTW;N-u}?jD-`}5J>ow)`Q)SaXZ9-V%`a!WN42+}x+8D<6m8fXZ4CA(l2PFH9YrSE%tkPr zS%lKQ)abi`6hdeu<8x#(n8Ktg5Px%B;XVOvGM%lMrDMs(C*4^GpWZ;0ee;GeP@l(V z%d<#&mbwz1+%Y!U8%&BbB67)$ET8CVdR~`)WI0X5@1b@<_A|U_VR#}y zkbh{g7PK$MCqD<0V8luEYeGjm@xvrU!DSNnHFMor{|b6lA4!xnz^b#J%@vNnzMK!I zJ$0*GR*SkOj4&-7j#s-DN`ciZHRntezf@zlfG4p=(+Iet5mLY058bA-D50GwpZYBv0J(Zt3Z~l7!^IKxR|&MvaOU;+YuJYo89A=VtyapCY`8 z$dsQ8kxz9#3!Ap#Ux1_q5`MC9qQ6X)|EPBV2ZJ-HT}M~e%{}%tJk>iWzW15w)E?P$ z$4M>Njzxob=?7sGpQ>m2wgdcGryzU-n<(3E=HRo^ohwk0&xYGXCXI`2Uj>xecR(ayxtIw3|0JN)&bEz!`2 zCi339{3wL5kLNM0P&C{pjx5Kn(Z%+|wapiY#&gl8(tw`#V1SoV<0?a^dt>nxp18Jr zQpcW#U~{T3jU65DFwkXG!91Y%^^Qt zZFr0yL#K}4h}kR3-~c9@O}vT7Txb@0t)*+Z_1U(dv@kXG2#fbI@9lo-_cEQyvO^!E z?4SSf*fD=d?3CvuqCmt_<+4TZsn^Rf`^v5^701aGn;Q}u@ICd#i@G}%Z#*y`F1hnX z_)>i=nE?!QdminfuboHpA+|)yuj>4PvRaMuBHd=8SC_Ix@77NU-|CWy1(|_g+-QSLD(AjSECWksty8w*3pZIg z(_3(69fxnKw$1p4^>w058lF)OA1&Q^zQ_sIbZi_mz*ruYDSG#$DQARM$VA!^)H26$FlQF9 zw27q1jU|p!9?-}4tl)epuvx>dD$bebd42Qxn+oZn_ z+q?!Dd6a^Js+XJ(=aPU`RfaNkyoubP;_0ffW4^HhG=}i;4Y?UG3BW|LQlyWUkG)S! zY8K2XF@X0pXH9_#@sBJ{mMdbA8kn{;=Qqzr9mK`O33wvNBtyuL=bj?(DixJ~)Wwq? zEHv=m_ikkjq`Y0BlL32#P2cfRyrZgH@{|_tzfsKxT#&L2;Ku`X0LRPpk_ylcS1zgb z{Vr##z&O5harom#JuAC53cnBH7%uu446Cgyc}yG)hk^7rx;#s4JTF@+*jtG{6$#JW zO*uCAm@5Z~fRL)UN zSK1+*ody3I&#@~M+$V>l%BS3o#)hhUke7^>omQqkMX^Bg52lAvP{eHlo7b8k?pD*g zP~MR(M9jQ+7x1HWkDGNNsE$Xl&;1X_G{@J#I6Y2A+LP~Hz!TE5Ahw6@m@L3GGzj+C z-h)8h=UY>U?D#{ldr@PZFl~BIPUaI#Cz@*A*`J5(rg`Ph2G+0sMqB?Mi{aqcINh2M z)<-sD;<{{<#^V3*j6I{tp)#;e2V3<+Ni}V~&JGl>r=H&Pef3jVinSoQq16Z~2V6WKUbj0#8q+$Pedbn3?;lTSr7#hWzCAD{$tRV& z?-_}{Sv~sP(M;E%S%#35LV;tQ`?=-0c;BOU$O2grHjfPvlyL(3<-v9+{qkOy(NB#> zj5WGJk@xaAL(62|aFa$D^&sJ}+T}aHvkD`lfO^{n@Vr5S4+_`gP!`3-@eA+tiGVeG z1?%o)di0>#e-{=WUwkJ>yFv{psTWw%qx7$m^jiS(zf!L9+V(@cnE>~o&yLk4OPeGu zunNmV>dwGJRk0}38Gv)cE%_#I5?cami=AhtLfIpj3cAutwH&w$A*_^3&slW(TyRVY zKr0i#`es}@G~GwhPV;TBC^EzkD-3$=@-{*p)PHm6`+IXuFKr2-?C23^)jqL;rZ|9? zR4Y?|iKIJsyOQe(V%-NwBG=j8!yrl@+S!vWmCuqy6k*~`sL2M_2I2sum2!Ka*s-={ z^H-j{NUEEIvsP!rBf0_KhG?obz0U%*BH%pkJIB*zmz(_iRoI!(q+U`C|1==L05P)w1PErjNH5$7DZ z%Ybj*fBWZXq5TZiL7{5Y7f4Km+mAX3has`G5_aOrczz)7hjn-9rGAo3eA3RiT7mXn3i zLU-}!oo^mk8Zx?+ zY(AVdzqYpG=h!Don;!mQ+8@t_zl5#OAT6gWM90-c>llkGGSB&5BBF(EUAPr%9sd5c zRy?p4vo=@E^E4~u1M^;^0ZrJT)R8yTLv-DK5>t4_CDxp#AJp`(cv6 zEv!)R3pf|9H@hx>K@=hE+8@XyWUrv3Y}hsj^R!SG6|pycV7<#YXCZ$GkBg&K8X)Lx zOq!im$SzP#E_|e*X=R$hVl+3I{c39h7ht$pcSOrRqo^b;k26semHfEIUbqi)?50#B2)Oq({*}yPr}lVo~+2wU}L(8e9OrK&eV8)q8LL-=^D=4qQCVU?f&;_HK4>*y*rg z+*xX3GIlVcd&`|7>Nh|$ZN|_C72lDbB!2gTjuw1Q>+1WkNptIO5YFI06!W+?bYf@` zlynI+ap~RL{)b63Q)8=+lN~|BX_8TJ==Le zMgKYx6e4NM1-aO5uR*f`RhpF|@_U|w+We&s2VgAdEm2_HG(uEzfLgw-7`tZk+8FSY zi?94Io$@yt8)WJAdc{T;4!D8dBubJC!gm_KX$_~I1XX^3e1IV)l)T_?e6~UPYQ8vr z6Va2@Ond>-%5?m7yaWk+kl(`*Ea1k~Trm-J-)Bb(tzgY<{1hDK=%S|V2cn}RbELg&2)CJ@mSz5~U zJV|U_y|^MZ6R;g_#d<3b?tuLG!5@rvpCAE?0k;17Sa|e79gjB}(>4`sHtmz0j5TR}NFa5O^h`!@Sq3$=UTw3;RzDQWz~ zuZMU(f=fyIykAF3v*0qDgeJQ>i%%zlYEBoZgya5XPLrE6w~^BzjM$Ao9<1a&Do{AI%uS8Flo9r~aj zCXRmzT1tFY*y9RLK>TH${ektoKSuX5$I_#MUcfxuDv|CAL#vTc^@ece^p3AiW1r$P zPzMr{lVcC-$3|@c{8~F$L_Xtno!KrIc1h?pKuWW2+EtE#&5&jVNpC>zRz5#{GvG(k z){#ddPBZo~hA^LaXX>r`$KYE#`&ieMsm-Byv2VN82Wq4okNg$6L|Kx-Cr`H<#AkqKmY0{09@zcDbSWT>>58+aAO7L2rQdcZ=0)9Cq}+pA04y>#!>+akQq%F7H$AB@d?KcQKNUuZOjX#tmohViPuQkQ9HiW<%s8I# zYf_CI$V^ixtrZG&>tF;kWCS!O($bdgxF?zEE0zZa)JK%j=3$zR39*rBD?Yebs3z#) z7X&(WssUEhi)#*eAqNJqSCceMI!y`IQAr6>I8*%!*$EzFHVz8dg8bJ%3f>t|$z56^ zd$^E=uW@dtzuiO)LF#cUKiv`8^DBJ9u#to_V7iKmkDI{mX_HDQCGn@ss8{G?+Kd4& z4APgar&&;8HuMtzif<^2agTrtB{YREa5nZdVBAVjZdB5tC>ls8S{t5TN{Ac*Tb1;) z$VExNMnb8u5G#dKj{<7qrOklEoOOKeJ;k6Cz;=#uS>(FU7x%gz zb&EuC`Bz}uV&Vj}z&Mz@`DoX{V6)xF+moFJ)6+H&b3`?}LSINJJ__mE6EAl>pbH)y zS_k4oV^{|)ITM>+xutp$+t@^a>}`n@?kjf-8`L+m4lkHJfmfdd7T&7*`O=ydLm*b2 zYj3pqJWug92KyAV&W&JZTA6c3Tj(6!!B^1GanQ=7vij`VMk8~GPB&Wmj|o& zj_M!I?q+7$Wyu}iS6*;BS$w1G_aQ&j$L58ie)_YoM;A20(PTQvuY|Uk>ff=W&{OZ! z_;kz0(^eyXbGkLUp-{Vu`c0#HRnPl}-tF4hqZI|U9GaT00zjk_{0BE-crwb8OK|?6 zy&z%Rwg^W6F1LkMvt6J|W*L>#pP&XaVlk^ruI#2s9PMGzd_u0M?i}Dsz4Lw)@il5P zM%qzSL(MnxG7XW^AByBA?gTMbqb0}I z*xz7pe#{tLw62?@5{FwzDhjj4y~T*Z&G$M^AGaPO@EPpx-fz* znRU;xHtr*?g;^yp@rPRGqC`CLr3^uGw5@cCl*UHUae?Pfa;2YSWsmSU)BXLHGmhW5 zuaGA0GE;e7mowjA7PdS&Z?`PhKhSt+Li}d%t;Z@BXRAcUF|XV;l4nV1F?^K5j3nk6 zOjxqYi^~I4TiRS}DU7nAnlZaCwB&I=>iP5DZg?ukSi^+&!zS%(MSeP&jxqJJ2t}Ic z1vWF`P2zN)&OixKO?}%O`Y@#?`1Zx9=OEHsncs2p^lE~&xBCj+ z_!X@`@hp76nKzg6-K$@VkqMX3^0UKF8VzJuxGQsCUT)Xu2iw=vBjakH3<4! zeO>_~bvbSKm-_;BI?RRlpQIUk?rbVjy_YMw^4TbF@%xX(IK{Qaf)??t?1|$SAGiya zdatE7Y(|Qc^sBtX8=3XYSenlqM^~u&Y2);<2Yo!-)~9=KwSQfuFb#v-oQQacG-Kh|8cI_o}v$KYp2*gQpD^_`>V%5t( z4tB_(-S56tu%ssL5SOBlZ996=;!)oonq$%c3yfY)hM#noM|`}~OWnu_7ecv#`0%`7 zvpJg1lQg}EQVGDp^l-@uRpP_c*iK^@IgfT5YijP7p9#lY2Ot0@Fr<@<= zD0idJXi#s4jHRK(kB6{~b$;ev^38gIxN%3O4`%t?@nynX?^BAE7p8q5^3&p)ybm70 zuj+51EaYVfC)qLms2^)V=O7&`mu$~I_0&M+^=5JSmFETDW-RKM>o1~x&8LD_o7^y) z9@HU*UIdXXKZa3sbo_2THuY5J0^m?Iz^uKbqR@(M(I~!1vsE|Yl$6^qU1e~3c`)nK z+S=M))bUnC2oDiund*!i9yJhwx>LD}5jUu)qQ@lQI{MK(m}Lf(FY)MQ9$;UKD$PI( zUKVL}9L(+*!u+#K9U(jG4_y#g?aMN;{<)>}+h#R&nc2{->al&Qfs;uYu?AhL(v-Ep zFmq$O(-&B2-;!DIV9ZEOp)dpEKt^}RE~`vdFduY3BV$acP@HC?1o z9#Lhsm4$w9!;@%dZ!CQ)$_au}o+8)8LGZ+$9cAP<*ygS1R^b%b-9NDZBD)of_cr5t z;I!`p;YTH8mTQ|~u{3{e4<;nS~g-St)|f{<3tD)HF)NQt)TR8BZcsk6+ooSFrxtie)gdtw@>wM#&jZ zc10=7`y+1u@`K6FvDDeUI5u^n-TCj|6Rh}E?pb-d*k`bF5hU$AVR{}5*P~h$%h9p; z$ZKpqD@q>wxY3}LzfhotK%cV^S$QLdD7*jpNYT?5qkMI>Y73)-=PU>4De*6xOh^(8 za&pI0N%IyI9+}DSa%o21s~7kYZ99{4GnbEB@!*%Vq0-WMv54fL;eJM0HX_Q8hI-!5 zRUbjmj&nXko`H`_gV!N;F*>|4rh=i@8?QYp#jjuIf<0aIz=MSxmODKU7gh!OcW!@@ z8m(I{leE3V++B8!fOU$%pIM|iSLupqkhY`#)F z^e?rH3*R#5X?lB4$WY+$aDo_BiZ=8_^oms9guBVnD~RR4kN!+)eXB z1RFv}tUcM^+x+p(_DMZBRX}WPeQx1tCVG0D9M0nZ9Oojh%%pKO|JA6>Fug?#qgMU% zEMo9(oT9hUZkxY?@AW@)@ssRH>p2MP+Nty|tbCC~VQSyGHkM4>`(ow?ci|P9?b+_#t!@;{%*@K-IJB(GFD$53f=H($vU}WTg8qmRp zoMEdu9LP^TGIUR?2ZWVcz&)xyK^{Sj-1344^z`~z?Jbz(i8r4HJXX9%TE-ntC1?OA z()#TDhLG=pHC1G#>&~5GxhBLSNb1{hvnFp3?M3=ppgYjOKi#Hi5gXq-vtf#3}sFX)PUZ^Xx1`N9loCqEH*N z&xnku!o?=GXS?J^Ggfzee9ToAw$mT9D0+R7MMm;C-F&Y+6Q3N__WPqzZA8cz&tXej57JA`ya^kZm-_z z&Ky<~#F}w^q2ZPLD&kx*$EVE?GcsmDIdqDi?btHC4QL&HqG1sf=+c$0nS!%#Le?6d zezr0j*3h`25?-8>`UPNNdFjeiNcc{6Oj^w66S}h*z@5ZI&yQv*Au+nw zecwFzNnC!>bfwTh=o3?n*cethzZdtl3la?!DJcYQx93M}-@WAL|KqyH)M&i+15z*% zdC*kNE}r3`(PXf7z9`Putv!}pm6iDveZ@E5vAHrWjn^o5TEu^57g5BF4ze~bEAL^Z zv)C(sU5vd)Vzo?+3V;a6IrV{EErDJn$lsBhk+tLGsRu8G_g_LBCpR!02^t&2s^vSv z&Ph`5!<>wk)pAMiUcp_hVe-^`li;k4SRa7)yv^!|j8|tc5YnSE3f#X)(|nN|pYqh& z8liH5oqA8k#e1*Gm$r;{Z31BT^&9lBcRD5W&EPw;_DOCob7oo*L2qu>vY_@D$rAHt={*HFE|65LPmdQc`GVN6? zIXeh&d%&!tM!*Z$+Zoa|p87Zfh~VD$0r;ERuFX%+mdGbr&4e@O>U`6(ZQBUHkWh{N z(v#hTs3LGKrqjBuZJr+KZugh6r_F>j2^r9=?nKcQTws7tosad_p@BzT-{;UY*ClFS zmZHA0-(DazjE~vAEDGl-UNAv?yGV#PYg2PhzE!p>_E1IxH*`^gBRkAWZ;i}nupvj> zf%&oI<*)MVejxUpaLT2j?k<1ScZ1kOaUiX4Hy`&vj89W#$cqK8=caDJk$|SA-$^?E zY~3{ObJIELy{&91@`SG{Pr{Sa>XOz?CLUHY@7}dzDyZdEn)tf1{xZ#Bku@}mWqdR{ z8Dy5RShcsJ?p1v6;vB*(=7TC)x#7;d60)S_8s+O#_CMY_+{$uQf2IgQ*J)s;Gf4#MtMJVj2v1O zVy5wp6mce7cX#)V8F4LkfQ=jD_!S{!^U^9z-Q!tp(Mx0u;V7g(!CLg7u1yg>qH*KY z7p?@8&UmQ{o_a*=PXc->)Q1=11g%-wL%O#MuBD4qT%U5E)-Pr+^OB0ox6;z#2~u~k z$1sPNqSB*fwWpEtrnG+bjmtGFS2w-678R*7hj#PVl`Kyh*kiErib7drG@?F?xL*G- z8HOKM1`g1HdPkYJv#(H5kUzO3rC2eZ>+uNRXhh4MY?eexw>|}9HsCKbU?V64y{{kHxw|(zPF{xIWWs|*v%0db_mtyl5jPxz< zOWN_PPCiMZQ+2YOp2ijCL?-`_0&5^VIo)albQ!-jsQ0rMZ?W`T5+Yy^PW$> zEX*_qLuuFA|k;M#t$}%%bO{UA%5~!mp3y&I;oKEbuO*iY$DoE+G z?XA={WiuoIK^}2-X!K4pZfSee@MFm47`0sAS$fQNrQ5cEO7*+F&%?Efo(;YMah0zUM_ z>u6_b8A(o5MU1|;@DqLWZF?!NpsGS9z>R#uca$;-SJ|!INpaX+aOYC`f0K;HgqH@Nbc9Y*$>-axYX$>OLvY*fnXG486`j9$fH{?YL>*+yZhT*=}19{oF9W=@`v9*^?a*riAGq(VtdWpg?hl#?xv z98R)-4u?itA<i`f3vp;u#YngSPty7f8ij2{;eF*%MjM`kko2n0E#0*qb>7v zLGS>iH-P3!P45p3F71NxhlPtj^e_seht(|Ze$&o;F}1*p*p-i~d@xQYWt(y#wCf+8>A#81tb({kPZpy zlm8-9$Y1kjUOdZV3mGl0IL+`l;tf5+Cmshn6E1p52-CXWyr z8!9MgzZbWsDcKJ`L#70&)nR!D3(kvl$~yE^&a*F{sutUYW1jNFbr)L&L`_WS-&mq`B-ATG{&Y z161a+Recv%(PJZ)-VBj-1acR7m(nZ0ta!f6BBq(iQ6j2CzOuBG2E78`fQvlKByOK& zqW;N!>HJ{HMPKwE z+Ms(bA*k=xemP6nz;kEg)U7*LAFG@)_td{hEir&6bJqHNkIWJq_c>*MwM9v|-Yc~p zbN_v?t${m^Hj+b1z6hPI6s3;w=rdObM`kt#yODdt z_X9r)AqnilV+dcfp)Mo|zPd%Y@*PyA^$JyK0SfO@GJ9tHoRcauS%Z7b*MfyRoDEZP z3?1D{qRSnr9lTtX(3geXg}4Kb}rBJ=OA>|#-s+Fr%G{uEnW(py;fEvNtI&LZ7+(R zw0V1?_=?5-Ltlf|Ig29iEF03n$Ot0a_>3J^Ko})TY1Z^8Zlt5#IXJb_`9FFpNfP(!G&oAklF z3nt0W*6*C9@84a^XSB`~P*N#x;=)QGJIxQu`P`vOT5Ec?AL5#EY8mr0X9}Bdx0Xpt z$=z9E>v-MSfXr1;y=O+v)KgFMIPrjiOTEIXE+9=StB>yBz7s7ep-$aei0d2Wl#zBd z5`tD63C^DPu1{ItboZRI37!$K{DB_P^$Xin8P#d2nPP8JPRxmfQ zq0y&iF3Q%e7|Q}BcprU*4C`?wE4er;-vUK)3cdqF=G|F@dp&1py%3@ zmfv}&mUOG9fci}%?mtGqaqTq}ta`%@`*2;&07f@jH^kw+ZbyVvS8Pe6?=y>Z&@{;UnI&Qgkb^OBi1*_4tJm+L0NnOrZ) z%Oq})m*`8y^%a?OR_YBr1m}{XExp8U2-mV6Oo85NVT-~O8*w_yhEP-2-q#e5Q28-o zu_5}m3B7eSeUI_w_K7p>8X^J`Y(?0to$A4 z1yY%d!kJM@uLxf|#-vJ$xy8|US59iq1@yjE8)Yrc`wsgQPN=AQdsI}lD(lvD_ae_P zRrm3F76zj%Je~yL2PgN0?BI2oQRvs*{2XFx297laDOUzr)k)RBTOzjXYN*--!KRhK zfY?YSI{gfx%IqNH9(H^21<#W}E`kj!8{0DzbL-@#Wsf+^W2%`%AF&VdT>eC&HPfiC#@w{fzeVM`ivzhWpOo?jKdnLBU0hTE8N{GHBy?CK>OOf8p=|J!1lTvF=Z16sFAz zR=UO1Q)Zya&V)apDEcaqHawGhygz9v^J)GA<^|G-%mr1F(klccVv(Ur=3!(F*lMJM z7QI*;8oFoB&P;r&C!G2%5h97`y_R!%Q@xwxCDM+sw@)k4&$L@N#7k;8;~f^CQ(Pv| zPy}&IYs;lL1|18ZxA>Ji(!)NptW-33wtCbiM53By4fH)&59WVi@fV?BkpTtVBF26} zo5nn40#tc@WC@q5A26?{@L>Ph8ctm0ifdfoY*)sFzs;$nY1TY*nxy*zN?AR6`If6P zyPpVYE}#>XmqLd=y7mxRMWNvCLf~v=yOkfvdM`g1_RRM$ke)1I+Wf0 zNz-=%?ind0x=x+JGyU{gcUJXuALfmiDGupM@^Evy7`ApQUExlTII^DixQOX*m5dV4 z?A3yQel~_DXQt)u3|(w52cxLRpm7JriulI^v>aZFBULTY9~)*<8l#@o!07LzBeR} ztIQ|kxzMv*?7#cgnllLoo}i|5SZuK#Nbe(Z8{{&wqpu9DAj&Fuc>dbjb6~!S+@%ss ztxeIquE02ulnEHW&5^3;8!&~8WYg4BjwLB+7*F4*dbV>D;kePOvQrxb+Wm*u1DhU_ zp#r^h8hq()Ilj*!T!|pXE(u%Mw!!vEl}|}OXGLAz_5@=KNx<+kH`eQ7x$2#yW+SG3 zMb_WOG(Sv+p0Tv^3oL4p#JuSq;v(=_oK<1KOH9~mz%!CV%hS*h8O+@~&p!GHGQZSj zp%{`)H#GH^t!38|sN;-_GiAx5F%&V?_`>o6mg4SMCvMnsj)v3?5$k5^KGFQe!c)P9 zYG4ebBE4Bkl8g#x;QFl1qz0x&6kMEJ;G^;z#Q7YX1LIFn)`%1aNEVgXm3-9+rjJ3% zDTE25_h^Y0&R+_6#M&!;D(C6-Zux|QTp?7gK#2k%OSSk9bbS~d377b)ylBWcZ40{P z1GXJVNBp1odoipN41wXVO6NDh4v$;RD# z;TE5d?9!9>TGbTzD-H4q@@C$jZd!J_B#|wbn%N8can2fN!D{i1Q=~~Ht+5fQ zSo-S*XqubyLLM8pC96M)gBZpG!Z})C(?_%`>}3Q)Xw1Gm?@G@TzAPqGx~At{+xmk= zk%alk%ciA|x_L zttC{p*lL1_Ej;m(-8+u=3VT=@CGpzY%>U!Lqx*@vM0)P{GX=@@_; z2(Xf3TRVrx**daD^dRpAWX^~j2cj?f*HL*BtAx)N8qlhE@G1yeE z>@CLQ*&!#5bZyJySS{^XY62Y^&(U&LRk^>3_HUUUP3EkM!E6 zFyY07WQc!h@TTUGY`hyit5JtMipMzn_Yy!n77m9ck^`DVNT@bAD7_CeWxI(^58j0@ z+5}cT=z#~1Jd6uKX22n zdLZi9%{?-H0%{tz#;XB8$d;h_*>i#Ud+_7M1bU)H z^_11bH~5+-2>2%n)ZA_z&Wj#oa1b+&on15tIs&Yg-+sYBZ&@(AqeqJuyc@;30(3Qm zQrhDxY-aFm&99?+OCTFVwST9-K|frMdhQDA=jvICI#(rEn(%`;9B;`u-ijCxbzi%2 zcn9eXR$i&`0k!hn5@_WG0>>+FZFFklTtG%29JtO7DWY7E;5D7TZ;ZLBvCZJ2%{g(6-b$5sP9tv;TZI#cBaP*z5fcJs$k=-M%sr(g`f!gUk-rT(f{P zG9y^abw(@7kn$RKgp8pk@k!^jjn z*s-lY0)B&JBW3le-w1~QV1&OOuI-mxGoX)M(b5n0ql?t=TAvCL!daFAe{w@Ac!D-m9R8t5#qg#h;U*ScUw!3j}x zPW^LiW{3y+v}Ek9X4eNl6^4a%3lV=+t+!y%i6FR+k9--UnikzgnpX;W_P&nAmHoO} ze535C%#cAlNJg59ii$mo0q~FaB7v%#sm?{<#_b<2%`o)01@}x!%C0h@F0Z`wW!RFEpNm z5K(?VF-9hQmS71!zv$>(dTQm_yG7A$UT@zD>0>3hqay|rAKgWGHeGKsdtY-k%3$~# z=pyxHpjYWc3|4{;1Zb}g^3a}xcGNF|2^hNaI$-ZT+iBHC%t=-O={@&fyzY(B*V*4~ z!V1XW9wHDtmi<_Vy}C2(?PdtuqzpqL_g;KYHJ+=~7cUBe4d#rErrn>w0ps-315zID zuRec9m|nt%uJTvq$=Htok#4tC;-u{Vuu{|dfpo0`Xkh~3Nw)7-8_c4j193@O`GKJ} zJEo9NjAz`;LB2w8#^MMIPw$v2e36utl+NiwA`#P&@;UfNDcC_#=R9H}2k}dp61r9i zOZD^~ARPvA!;A&61GY>scp%v`ekns$Z+v{f;{p3*-yF;fzTe0(6Kh+=FPy*2j3*ON zG~(l_sVui$tgPxDFN(>2g$+^n6=c+$MNZq@q6gjmS}s8h)CBApfQW;zRf5a&uMrcG zAFyDPyx~@%QOCk5xCDd`)s}$BB@jZ~1tQ@M(asH?s&QDjirx!+VbS(yTgDlV&E8%$ znxPHhR`kmA%${k$>aBF zKpZU3A4K-}JR8gU#K{N72h4Zf0imgJ*^ zY@&eT7k*?8!CpS#vZhSR?qVKq!d0NBlMbIJ@RvN9W@1lzV8a;*!a^1X0khtpotW_O zqJUm&fW%raey_&|J$Q6{y*0j&!H0HdV1F%L2Xcn`XTSHx-48bUohUEW0J0;2qm*Z| z993ZWADq|kXHz^|c-Q8h^IXbbx?!^sK=u;S#jS;pVv(4*R zhFKuRbRZnka_Ra$8DLEvg zZk;X;$Xo3un93N;jnTla5O%2jw&^j{b$0&wlGLmlx_D++k3JQ0ys=YWL?YXOYJyzD zUjszGq#JqqS1L}O`n`Vdwz%T#L((Yki+tH)@>fGSqjNu@qb`w7KPE^9_mXC&!G{em z9c+#NUe6G1iw!0QzB^_j5?dbYx?VU_{!P^@?YanX?Hj-7#yK@oQ&Ii+@gt7k7TbL- zrebjd^x&+Y(cVB&T|u7hGe7^CQ#oH616<@n>rj+28bb|Vs?xKtdK{#N8kcgqNEgAz zD97@hJ$9r4wIao5W~TvHM6@`Hbs1=gPR3f+e5NE=>&s?NuJ z=eW-x&VA#5?haccDt5o z-&0h~o}_gqRz5O1N(BvZjmX)~y`jyCB(nd$0s{A=tLd&trxTHzVF}!RFGm2FpapmB zH~)84bUbl+mUYamTWN8PW?UF>&TH5>vG4|p@Hkc5ULXJ?OPjU90~FBv2j51$9aksn zzq%jVwiiEACCE9W@<6Ag;Ah-tvk%r>T+nX3B}ZMnRkofrcCaAr)kwB3&90<*hB97<<4=f>VKmCv80qC(F1iI%C!vU_UAlSZ}%fBfV z(_#l?GAhu+3 zA;=P+v{NUNeWRnry2<@ge`pjper7~@huD6|kAvHeJkDw2Y^7SUZ*2FkhiH^HFEq6K=+M?I!kip=` zIp`)?U4cSk;qwqUSE-ol=A`0`<$!v55ykV~x-V9Y zMbD3V9h9LU1TWS6ieGsK^d@J5Zn?7<$gxz4nw{qHON%m3BXalO!z@BV!JW#OrseRk zhA>iP>BkQe3Bpm)8x7IzarKvwuVu^L7=MOq6}|ScF`D0bVms2Wh}t_h>nTRX5b;`m zo;II5Z{7*2TymR*>i8CmXwwfMOiLxh!-HuOKo?HL@3z# z1O&RimiIKAdHj=0M(gT=EuYud-ItXsg}%8Fa^_C^fe|^)Iwtx&mBa*6P7MX3jZ8y@^01a*`>$Bi|;Fl)axo#>d z?r|{WTu=_-jAk|Qgbmp2ln4(dGA52z9Z69QM767=R0#tqNdmRnp^!la((-9XwWFKd zu^jizLCtRFOE}lMyPaKeZEZr(M1M1T#LL~w6xdfIQ*6`M4glv;*EWABYI1c|t{>0D z^jon{vdHz4M-_2wmE_!@U6p*TM4aKkh)~OLJk7Uv7*~4&+fn`?oqNi&jIWJ1oj{;50pRm5)j(a4WE-~j5IQ~OS1n~O_e+O&`Z)a8C+FGl^pvvesh8p z5&adxVkTQk36Z_My||Hm-54(~jCOgIyGt_PxL#3UX2nK}&?|oUkqLi+^fk&K7ocpK zQ)8&C_FxH0jP!|_M0P&QPBr`5<^VJ%yPj0wxza~GTf(AtMnX9KQxBBo0`=>65Uj5{ z%AP|Z;StgHjsHr=gsvXA<)=U2yDQo&fAH6LlfaEb zt{9S4b-A-p1?-zePmZM;O&OQDS&3u-)9iF0yJ>y{%9S@9yK*(G6$W$!pdW#9r^o_T z2@{N8BR;wYR$s`=XzF(yn2rLWv@&dlwT_5m8PbZS_+Ov?4Q}KmJk2yBtVrX_FSb z0c4xQeEI>Z2tDP3pQwfELEs6N<)wet$Y(?j6*SmYm_x)sdZda1vff;&CoWZy1bPE_@YzK{<&taL0B6WW< zBMIgBePG9r7JS0-x7C4CXBA|4))(~TSwLULS$-fBdaZ<|^+(e~6mo)Z1u6-WDp!z7 zT_vLi-6|wP7AhHpsHn?t_^DnxppP~?T)tzSZk!_G6NP5m&$__bRnr}U8P0n-_sNaH zw+l7IB?1WCpyyWr5~`_=xCp16k3(QzZFl6WMcASYvNyroMAoPD_*pE#O8CdOn|W2^ z29fYGinIW?464z{-UGOVGkYTFR$UI*QfL~D#|5=t>p@f`vnyI8hBiGJYLUs6ypD=d z+DuC;E2a|}9r?yrJ`4{IeilLWxH6g8BLxVq^uIrUv*U5tP<;tK5LWJ^(v&x|) zyoe33F#;(T^!;o&U1nB`AuNOplo5gLr}hAk|J`IwuH0H;+;)2wK2Vyl&ctq z+sse$6v$^NJwpV-#>NR7+iXDpE+}r>`D~82PEFPnMJzGx{eWZ8LJ2P2^}jY&gaxF- zif|?8)m6X#liX)M?ix?1oj)4DT~g2D7fZT+Xkmvly?(@q@5pq z_HC)EYO-=mvhR>Xi9GG6O%?8_3 z`YuYb9XqN;{UwxhFaQ}-kf#IHKhXgdd=)xjt21$TPjE1v{7@Uyyv+S$O|P*(I9&e} zkcjTMnD~^WX5ZZGJ_feD`Giz|)g~v$?zOZ9L*L-w;KEQMt07|n4er9O2k=6r>$vYQr6->tIJxB#qkhl9VkbpEuoJGKQL7jGpQ|^MhG9=m z4JsQvh_Ws`XZtVPK$>mByL+p9&2C_L_<`3RV3vWrK;Tx(96I6ysKv>oL}tG-ek7Mp z*{7hY1oE~l-$2ngxKF`;m+f(RDE5|oo*OG=Pct@GJErPo3%Z4%b3RddrljOZBM-Ff zbNJ$uS5vPscBZ<>d;Rzz(Ssw$nBkMJHbQ+)Yw>%goJc^|c{%FRE3OJu%D%)hI;Voy35=fjRdMhNbi9%zilI%1&gP@E=+ZkW|SkJrCY{_u_hX7X7pldkH_nJrDz z9vmejBc@^Ns2JDuEIb!ljV7$7pYpUA@Q9|N&l%(PYKR$M_eF%Nn{5g4_ zIpf09C`c#>+{?eux#;@`&Oh2&F5q1u?OT8(WZv4}c95;%&FSsz>;&bU-AkH0`zBG> zVGZ*ObnnPs1EC%4Gji#YbonHVCD_@QNM6`KkY@(a>28p2fm$f7t%+`_3B3`q%yNo~ zeCrQEF0;pY?y(5T>D1Q+6i8Clv-Z93#pvfedu1q9MQG%V;|L6HK1gi~`E@Gb<13Gw zhYXEf+w2=k31SLCU79}*8ju`>n9E*3|Ev9rEz1zR|UtwFM(V!YRBLL zx>M-f5eoM_MWhs{A2t6{sfJ;e|A${nCFOeJ1Vo#`jWD||YXBi#WPA(y%o%Wq2Vxpf zrnYjFscD%5OzFRUyBR<5YLHgrZ5SFoS`UEAgalv|YVA0M+A9Jnm4N;+W00lj_$wD* zLVRPmk+B^n0zu@U{Yh9m!dh1gr78$~s*Qs1?rzSvwEM3gKcuId-y?QctBg z9_bmBwr>wWLJMw296-8(65n$DC|s2SoKb`S@2y8N)S|?Y=4S2|VFm9Fd^t|SPppQq{(4*Ut5NxvZ&agm zIYkDsL5-Or0{7lb;|9jp>ra#}AzHy)8diX)4VwA`at_NQy6*?Qk1g(K+q1s|HxSQ3 z2e>ZAJi$ML^-`O@n8j1$qo(|UA8uP8gnDB~-LVk99Kaeg>;F-8bp=fIiPZL;zt&|pkQN+eG4al--w7z=QcN9(}uG%W{>7cT#d zah}gd_wphwC1@a6j09|`*R@>xr?dKAgmE>GrdlYm&5Q=#@=5^Y{#g{y3#~@blS;oQ zbWnQl*sLzB9yu?hSsT=zO(#0M03Zmc*$K?TdHtscPmxmxWTiZ5W>u&}-D{3}1RPCF zQ@3U8(Uo3&dFvD^{R91%i%{$DUq*4XQz$_H=)`*M>vMRV2|!;fhH-6um=Sekg90vU zD0HyA2CXP&zQp67t&*mt@*p}cJbV9a8k_`9Qy;T5H@ zbbkNOYNJ_O5gGqjDzCual6td+&_K}~3sz^&3zF%S2RxfwKb#IHms zn>N3H;=1tNSVZuk3->$jnui_x7VAF^*gmjqM;daF7C@E$MNSP**aRl{FM@c%CDa4_ zs?-f4BMp+>aY)@h$Yu777NFdCbq}RtSBkhTo6P7&xr|dK9p|1;~Cn2U~F@ z&hTT9K$5dPo~Z9cSXzbz#?M_4d>$Vk4+xBGPcKn!&Ht>YKoW~4de!2)pT#jf&m08y zS3y2%9jK#ONqD$Vr(Q9jWtAvm@auMNxNaE>>3ZBHPl6jL=IkAJM*B?*Mmq{P{>fjU+o~VUT*Lr{v zlM8k-FNC>{q?Ru9$>VBfP1DJTyH&lL#~%SzWedMRi|7}VVUy=X=X)a*hDo|_y8`+t zcW12BI0j5cHeS#KUJAARHofC^%IC5r8wNY1o!Jc2ITL(07;R2n>0wz7>ihS)Zg~E+u89)gt9ZN@ z2OlAq?zziVQuOfi@y%WET)1M&Ts^*^>eHMI>R@G>jZSu=J_3FU8Op3tGksK$rcs6I zC$~T{35AK$w-FV(cgw4A7#<#%EU5I2N96`SqNLLl_{6YJG&`+X+U=bjPthI4W^XdB zM&*cB`6X9A%V%_LksQx^p|X32dKLJZRKurSFKWbb$Y{(y&n03eMhtC@mP#-iv<1KB zUn=IhDo?%eE)D0_)(Xq#P5tgKNWZ@w`L^UVNH(E~2J8A9gfr{JOg#_i{F2ROMqF&_ z%vz(UF5iriKKkxP(!?_Ax!H|F%R0;JtwZ;DQ>Tg4GB*9oZaDu|945&HT4zH^6g~l4 zfiaxF_h@2a8td-Fb5h7#f5k;P>&4IVw3PlwV9=jDKt@1EnG%X-BDk)PERSE1fw9Z=DFT?ao9O>scpL;4VsxSHuF;A+B%dsk{`Tl1NW+OFwdM z6mPzw`JY?|lrI=sRj09#Ch1TxG_z&xYrN8WX3D6SgE47E*tn83lda@Jwebu4q%hFf zx~cOCC(ArMnNN_~P2hByfi>cV?)B=gndSj~7qgs~I*JdC%A&k@Uh;dqvU)#w9orEC9^2_+ABQoz>JJ3q1l*0%5|N!wKKm_%B%b26@sdt0Uff%$=3$h z*2N1wi;KguCN6D5X=I_JynvawQ0u6c_~k}V(#_(#}!>Y`aHtP%*eV5!HJt6TO1Vw zv~U;SIgqFte(QN!GH;$6h0}VoOnjj{R+%z>8b~rJQ^#hG(5Lf0O=Y8*Nfji{89-kt zlcwI4Tj`Bwg*;rq!0B@*lIX_}dX*id)##*_;4N}3#a4<}v(5lB@(eYvp-E_s1Z!tj zSVNF>DzsfWXCiO7Rlcx&d3K|d@>2~jlYmXKgIe4(ij1I(8=ZGv+mZ9i{znE#i9{EU zyRVN|2bgyzRqqkY0Ci~5Hi=Y!>EeD_m68 zyf=ljgP#e& znMYXES!HAdHGj^qR{vg|!s_)DVIf~R*%UCp%QZUR_OHB79G zNegR7)`rXjORTKyp5h*KE7`DRnoIk`ij2D}e3)LfgC6oyCFW_<3O*X%T$Cn;%6=M3 z@ktxh59Jpr4PSH7g^v<0+zor58x04%*@a4!Cmf^5tv?g)8in1O-ky9f@2zJmmr%2z zt21#=CN-Out;BFFm;Ket_-Q(iO4C$tb!d^Y%Pz5cZ}v-D-V>*7s}QV1KF(?M^X~`m zvjSQHNTora{HWF^_^J3FBpb5H8}gMeA3P0a&df2X$#S{n-<1aVd6D1kr~@u896tZ( zQ`!|vWp?hnQObi!5xkkzgx))@U7EBOr#H;=MtMz~PZ|%v9zIk3lUM^^($V;?kETp4 zyHAjn+WoUHBGLF=%{pfT?s9R=m`&lcYOp6CwT!o)ybO++XAc8L^!{~w{;7yZQkGR& zWnL6thPLY&7iEr;@CT}>Jh_^C{dI8rY2gO}8i`)bcSHh9dL+4ZqA7j7TX}_=ZPlX! zNFQtwSqm%3;;H#%7exPfe``L&(~h%K+LR&De_abbuSD=Y0n7XuW8nhQIrGk1{p(u1 zqX|7-=&_rhn*XyrjEmg3XCzfo*DW0RgGxnCo%`?(?tTUzT^K8HjFay~GC&GDBq3Lw z-xOAQzwF^?_T7R^N8&VAGQ&?+kV)y^^wA78x;4p$?_jiV*zuGy?(Y!ecoWbc0R+Kw zVQKQq(}fyKX_t{gldX43h7Rfi`L7AuNFo+wTJpuDJgu1I1@pnSNjosj*JPCXh+`-U zP^3Aym1L)KvUsHR+~d_!H3W?*T!RG6Li3}%`7V?<6A+c4@)BpkxeY4v{q*hUn_1FC z1Gyn{npQi$>KzHm-NtysIx`hkX}`MMN>_^$b{AP}Qg~UV9ZstQ`NXs#ZGATxjTbd4 zk!rCIj(RA^wv-Ijl1!buS2dfj_`TSDvc)8A`SVkR`a`n*&dPN}x>al0DKh6S|LK@WT zTD{EKmL}De1lgCYTeF1UE60bw+#?z>u^O6x=aVMzWvuiiB}KeG&FoxikH z{*l6@`rmm=!J_|B{(N-d*m}O2^m0h8$gY17R-hu3E=**S?_!(e>L}?!F(U{PcSzoQi*U@ zOB1$G+tAEbw+9|-oHV=I-szpPw53L0S+!V6w$>p}_Wbhg70SXgBCq%CTAaK_)k@~^ zNm(0Ycy|3yvfs?bo7(peKRt@&s4W6u;DolYvy+%5n%yg*837?4(|$QYoz66O zb5G#IF{-C~!(|u!$-(S!yZ^}>hiJ9#X~FK!_RBU|vc@l77!qW4ownLN`*87*fjXkp zk$*L8?04a+)x)G*JTGbEcMBgvG%S@g3^NxBZz(8D`1-1m3Mdb(*BB4;z{voNJiZeO z2MEqOSx3vzP)PE1p)H#}U&9{;78lO5)f)9=Sm7tF6l4ayN%h;wFDiBsmt0Kc2#Y9UJi7%}sH?3_%)Pl1-vr?s@0-1K&_3y^VWfxhVIIwURO~Wc zDYo+^;WyIE60-!tvC?s)r_$;|2obFNrGpY0^8v=i_ZD-PslQpN-%==Um+P&(WglKx zrz(kI=2ffzsqfW^Laoe0q~>YGOWQs|T0(@f`(w8xak8u95_7R!;P^kx|F|b&P9D*) zh`vNtG(G)7G4zbU4HDJ(=M^ zkS9p_nqCXvh@@g$z}T&b_+fjwqge4qxnF0jck0&t2<%%fg3My#P_8Ms6iJq{pY`ZL5NN3Oi%3{D zPaZQvrU$c8>AV;7LQndL(soC} z!ZW>z3Ch&HQZS?8uPBQRk@wKdp&*Gu|)%ClxD(v zLRh!Lt1X90Ls4K{h&5pAP!i-l2Gj-EN6Za>jQp-W>q|fRlK=yAj<<;ZrRR_ZN09Ga zENSsd_NGBxe$&^(dh`Q(H2k3$qM?PO#vQ5(Mj;%#my5rfe>A{kvhxTiN8;wjs4-a) z)!{O&IL&xZJ<@zaPg^A!Ovop;boySvlet{^8`jpJEXLN`^LIaApr-jel)n-2x|{6L zsJ8#u*}l@=`YQKf)xXgZAwLYj>j$;y%Q00F=W<54DD>eV90^Vpr>g3xDP8&WV(dD1 zb@Y`A`tfB7)}4)F({`n#C)=a)z0hSH5dAgOvyac1%S$4-Y2EQ8LaXef*< zB=t|*4HZY0&aG0ljv(Snl7`jct+AJ)PKL7f{s=+G{+6+ZeKN&_b#*=Q!B0{7Mmb&sfp|2kmed$ z;UZCBeh>L|Y)QS^_U@KtuSQ<*)KqA=M~!?3$T%x_{D{4hno=JxWu=tN)}a?}LROhr!t|TZr`>YcOHno5-v#`XqpnE(qQy z5wIB*sJqmOIOqZuLk3el${%fPNGIRTVq^^F1T%(kO5wIRDta zdCag?<6SuF3Xnmh{6FZzGSe}-;D5zMi>*SA<#skA;?vv2?%X?0?{2aZJ8%ywIMJ`B zY~AUtdnom|=6!A#E14EMvsksyhdgMLD7h9CTmn_o%5+P%f|ygkQ^?kuzeY9L25p>) z9M`gdToK%<$ZzXS^J6xGLocWcT)S@<)*VDgq*&T~2JpDi$d&pwsl`t!VQl6~qsRqw zwJzuEA%R;;x>pJ&<2t^-mBo*nj2s?%gEAu`pwom<=tPCytxLrsh_EA5u1hg79T08UrS_aR?e`F{xo?1!sspYWc0qvIkug74YS8xF%-pRN;`4?quz6m^N#?XJB+ z=iA^*lK9>fIK*aiY!|i?Yq;9C_@qaZ+r8LWu;gPzz6BusIZw^1O>CZmHY8OrLPF5& z0t#)Q_+MxP9C{vSmi1;u+p8}Ts4hOjawbxC5%i-l#Sk?X&ykI@MJvF}EDdkIYnMY; zOVR+zkh4>pRb$2!-6>|KYw}N)J8M~%)xhH&ImTfZ9?C;=@B#4`z zdiQyG*@CLRllQA6^$PuKHuG@TM9MD1%4~HwB?FR0>6C@agTkh|wr0~P5LS8)GzdVQ z>wN17`(~XLAhg zPwN1mg8M*iDps{G7`BgCcs}g7$DR!as^Ip>?&XLdP8Hwy0FUPeUZ9elTGqWtm;4`3 z3D+6(t;l-?Nn11}7pF&VVKj+lOCD_bZnwN%GLCEyr_>7ve^!E_w#X>s*Y?Tcj!y9iRQ9au3mAGd*gekVM&qCxJ^#kTP%`!&Vk;#WLo;3!~AC2vtQ3$ zf_qqIWHI$kSk9V1a@q2wjtN*8XfVD~M4h$c8Y^sm4!JPO#>0W?{?bkZ79vlKw9G6zciAbt(s5;v4c6B2wIi3mFjddk;&h&*!I;1fnXFVI z)t4045Kz)phVAfUix;Lt03Beo%iL!Ec+rCh)@p2?>gI&r1Q(?D$n@NwS9k`%S-on({Qba07dNi z(;I(U^%v|u#t3*lyB?bOva>TS6Ma(aY~{}y_4n)RpNm8o=AZ6k&y{p~!Ni==I;7B+^7Vh?1lDB9xC^xaKA*Y)!_t)4pDc3F+b2MIcBx@HhMhVaMlfN&@rYDrK6FItq zY3xCT3bNIo&?{u{gzkDh<@dkS3RD92Vu(1+Uqu zVot*j5hQDaYYNZw&9bK$U4J9iV4TRY0LbjX9X052Gnl5~nok5~s(C1C6c~?MB|+&J zXAdp9qEXZvh@DUl-FImLgTm%6UoKHDNxRQD-NTh3FzJ__P=sa{23YENEugp(~}%1LK1Xal(BoJHIz{}fx`k2{Kll(kil^&2xMK?T&gE&Zl{ z<}!FxXvvR=4@?DYz;kQQ`12)+k|+hy8vx+`wFm~u{K|6O1@QlnGOKVH^hhl(?A6*X$Qa(?%Hp5+&&Kn77gu8{vj5sR;6;mp z7v;712zRpp!y6t{=KxdGb4NYF(u%Hfpq_ZB9UFCmxBU-c=D#r>w$EMw&@aR@8;n80 zX)vwhXle!KH&8CDu7(P_@r*ZG82o*c{ufar0RvTo{KH8(K}MvhmWhoA@+DuW-Di=% zSAOr-7QU0=l>Li814z^mE9X0>*T3j98iqKYsJ>nmpi@=1b(?gkHaTCTQVI~%!tl#JiIPa48~~x$)iaHJHTTNpaE@&{VzrcqIF();v-Dm z`q$q=F`w7x>;ZAeeCi_efH~&LUkT02{`W6{6G{Za6joJqU2Q1`WD=;$21f1&8Q&Io zeii+zgMZvK+Cl342Oq{S4iXjJy44D^)zl~cXhrCQSp>#lk-yEB_ADQt`G5S5e^^{6 z{&V=o-7Ml>FFXP44ATM8#(Lv_?NDj^F?$PE8tCH#oW3Il5?56xBqf0${f8HWgGng! zFQNnhF^=cNVX|Ei@m{RRz#2@5Cn^C4ico%a#0zTc1$3gJISh21jDInp1_k!0A+Zk{ zk8&`-0pf}X-znN7E)8O^at9i<{q{Kw$WBy|O&sYboIUQTR7}N#*D`nn&bR%?*Fooh z3T74p?S~_Z78f)RCk;*KfxrumHy^rSJZv}<6@)(^=_3A@BZt_4B~I5FIsr|w`|?Q= z7$_ zgANAJW)PqHLtX)A0=+YZA`3!7E0BDKvLDRSLQMa^z0r}&kNpfD^f~S}ye5zYZ=Okq zo}@&r9gAat-_Jt^92v4Od>7J_t=$f+-Cj_sbp}$q&4v^sRZhRa1b^4wO6~8WgoBs( z_)qlCBkIf^C!>=}ZzOTV2lVPe(09KyeNEn{y>}3JiJ=&m2=t!Juga81MZB9@BIO z`U(~VUHkR4`}Klgun`)d25Riwt^@NDO$;r@GLL1kB(<40^*OlVCnPul7&?<3<}8CnP}bOw<03dccMmm;xyP z#Oj8#Gbx8Wh%vAJ!+Uy54kR&RpC5k!ijaD$2^tIluYP%9tU}q}FKaNjI0znGSX^=2 z`T)%dZ$ru(&0P};3A;TSU~F{B(tXboD92ZE0BA6E97MJ`|u!^*Yaj7k0y$t32wo$o&mU7@c3HM##T3&&w?Z@Ac+NEg&5aa z&IG6FAYLGZo+sqIJ~s-UApo<=s!fSi>_iw~neR`~QbkBqT*j8Vow6Q9@7<6_74zxHL+NNC-#> z5=w`3mmu8@(sAjKlJ0K!?Q=oLnf3WT>v7HeH{-qcoW0-qiclnR$HEKrG2)DyvN++c z1E?d%(gg6-1k!P5Ah=75NfFi}7_yrOoz4`b?vlDH+(`cu3~QRy(_}BW@lm?|iGS(4 zJ(jsV$hLcBYT8{HSt{6z{Yd7~=p09~`#Yd4@Cz$G}C{b3WTUkS0}R zqazd`Gv(!`1lqf%szCMN4`3JgAKhFo0ao{)3>>ELfPn)B0V=I)##9VrlLx(ML{=OX0>Ua-bIO27(B%j4E#NPseLZ-MU`i@j87^y-ZI7b4^H!Yi&E+-|%<-6v^y|^sxx(^fs_5nXH8sa$;8_ z=Dbj@;l^3kNBYgvg#AuiFE={qwL+K~PVY$`w6+xTnQyF5iWb(!A0yvTyZZ;#yZUel zN?Lj57pgO_uRa+CFO>CclXd!mor^QyL#>BQ6m#r(`iO1-HsX0=azS<|>;U+Le=yRs z-ftFxBv8t}qkhq=@zqV-5a3d4nVSXV?BU3jeavt7*QqhSGY9-0(}99I63x5>OrYu} zDK~syIk;J&fg@BsMfbrfY$$%}m4!I?UMqDSTo;5nHG}_N4kZB1?;i@Jl?FmNBTGju z*f=;P>x5;FN8FJKv_(IEKT!lB!FTnyD2EFrB+7BpK7Nd4kd6=%LiPsoi$KDnjj-1I z|NJzD(|GB6CKD?MhnM?9(XQa0gRqkuv{JZURMDXMn4gz-{m7a}<2O27} zl#7G7=4EB=0mNQ}C&@*{L7l;XsByNce_DeNY>W6k87QQW7IY+;#==-93Q_=Ouc2A+ z*~6VB_H4H*@Jbu2qF!&BNNo5_1NroiMBwmld%E;r1Y5wrLx7Me-m{VZ>~z}AD;jr- zye%*Vr9Ww>O}vg?Ji_`ZrYFEqsR_upAXO^-7U*DWopm{xhv+d}trw}#)a`-eVgA6Y zgGb7DoTM7=frx3E5Px&HIaVUq?s&LpdCql;wNCQMD|?H(;`lNw|drb7C!TYJA7V$_&~{3+Z( z`s%N_;zH?Mp>}yVZ)fBuP>kb!(>*yk3EX*KjaQN|aa$k4nDBD&Ui~a57JZ6+C42}$i>`($DqwNsIZ3qP z&gw8JE_8@%K!OiaYNi-uZv>9};(F^C_@-GM@HOXgKuR$GIG1vPxc?({1r4ZG41niH zR?G{D;~gD!{K^%x>Hth{kcQ0RtVr51Qw=9adnc{Ju>Q{=Ah+5B;;#@?z%5!%^=GL8 z&!t(igogyFUJB9GEn2~l$W*%;cmFb|+&t^I_(uR!(A}G z%dLS#K}ktTLDBfl5zc$P{%H)z^;@&;Ai7YHk?~^daf=o~I*LF#M;DULS^TY4qvc~a z_w|hvD3vUeKgY!n`x9$lNY%_TkeUECPlV{y0X@_QfFO2-13`f(Eb+VV&=*7TA!7{qFJ* zIMBpchO?bsOu|(wX71V?%TxQU!o|!VDMk1N!9xykBL4A^DsO>(0-{HO_IGC&s+bA} zG<09i)I)+z)}>Lpk8`dlV`X<;%VbatvkD7${ydW0KnhkxDGS0|<6%RK^UXiD;50!O zC(RTuDFBDU*3aKRZw>X?@s2shv+>=+P3l-o+Y|6=bDQ8z<)F6lI<|f=feZ^9F^)95 zgMIlT0Sg@O00A1Js{O|va`gb)?sU2pW0f3SYpkVTta>HGH+b$jY{d6Z8y#j7fz4^P zJk+N6P+BZHhCA+$V3|cVzfOovAdq8%0RdtsnJ{KdEZ(`!u6i7#22B)i&qwi9nAX4> zdq3&5HZ=!no9Eg+K}74cepn3P?}Sh-=?N4kJYLiTp@VP`eUAPToq4ZN>uo`RjBvAQ zD)3GJPFNwj{k_fO{A>Cy;TQD&ulChUOA|nT5(GY{gPG91-QB6z z&R=|w+=SM1&A`LL!Ny)i-7i{i73SqWPI?U^jXh^eg$yMhb-bzE8tQZk`HbgspSF@g zhaRY}pJDl!_MYT!aUqyh1)&<$N}CeKYN3 zUJMiMao8OAGErJ1V5+eWq7N_IR7#DXo85;;JgVAmo(uTNzcYz>54XC6W-Z#f9 z10nwH_#7GNKB|Nah158j)iUMvb-Td)1R{qRpkwgg3y4sp%ltWZc2e~%WqWxjW8m8w zIYniBc!c@*=~>LD*|QYWFYskJeh(`WD3zEg7(i_P;U7aFN~59W5oQGA0OA}f2|!@N z|KXRJoMOl##bKIsT+0Gvom>C<&4Oo$%JcUVKaAH3>3c_wxwP}-&D!Z!sL-)}(Rj<| zS6GZ9m1EzC*+J}0aQA@yTy-LdhC**Q>tiT5F`{L{=YCAih0m=ZxCMiA*=Kz{Qtd;vyfKLh_v<4JQ!HYOOIf81qsy=W;5lH}tWF%zzwmwKe?Nhbd zg>P>LfPVBs_hHr(=n3RWy<>b z=Tl6vB8^o#9^u98Kf*k&PnAB_r>gb=0l4vb9@cNz9FgDtm^p9;%XIzwN)-!Ew@z~xa?h)(>byn5AeozF7{KP?A@&8i@*y^3!V+O0N`eT_RJ($e>|99` z`H#H;7+sO{K%wdz+N-tN6b%Y|1))s@!-opv;L^R4#KEB46%O-$J9P}(0<{{TC$o})5&ylv zA1qiABtq9)1J}=hb-N@G+4btj##i}Kn|p3p6gx%?dT-u@TFDD0<=^zJf%GxH**j52 zIOT|Geq&g4OR?y1oc-j)kN%CXiN7oGGbqPs5EI|?7|Ed~3WN;bhEr3%?lpMV6T&Yj zCLodE$`RNN+dfqCRj4~{dd&7 zO9X%>O`cb2ERY-d?DTdRuo8K-`BX4zj(V{T82AH}@wp#H>L|#>;Li??jWtvu*grI} zw6{B@BVSxpws{3}shzubGTs0uLOVOOFjNa6O%7!vGyhPRWMOTGOD`j!o7QxQX8HGb2mH% z&(7VOouXZ3;no6KO`gn%I6&Y1|R6`bJN6VcP#YW&E_jV zG!`sy{+N+II)N@#uRf#L+wnpD5L>#H)`k3 z^CZdKFR9Y1&YVZR5Pn=@d($zfv2>o&Dl z4tmhN^P~f!F;#gZy+<0zw`4nY!iK(fpd8I>YwSI$#l-=O5Fm)~9!Q&=YZ~>Ri<4#~ zoZrQ00uHZ3;U%SvKEN(gf7_d(AZkXYPuzF&lheIquG&POrkJ+wWMzq7T1Q0*B@O2V zgLQ7QkLS^X=I>~MD31#JK!Y;HYg1R&K)kaszdVGaD`fGx;ECVi6ICIo;n|Jh+v<77 z%KiogWHpN;a-mBfI2h~SpUOBmcY+FkniUCa-9bnFl>;k<8zoRgvVb(BXRjmax(|25whZNo zR{!a#pp|IGy`oi~z*z~X)OICBjL`pdm(b`$Z2^>&8~4+o$s*@5onuw1biSAi_+odm zV#L3nv1nGdjUMn}gBY{apXCqaP|s$^k0T*B+#`J|FHzJ7HXvxoo%@r>&+fh0 z{}l^={%pU!h=h>-!GLix7rakMxUKFC;&=D>esdz4!iVqQNsUo9FZf9`;)ZQz^cIIs#Zx+!!ssFI2Ww04-t zTrttW7`O20=m5}|E6OIq&I|uXn*yj3$Lwoe2P^gSkhC6c%?+M(&X0~}*3~&;=k9Ynw8{PO96VbXWILbrrh8Ix@*ZKX zK$AK!pV?@}e*NXv78GoERz`}ZLu=jd8uKpZDNI%RfpL%~FA(=Ys9J>ExS^(iLK!g| zz36=yYYUcRN%Wfq8RcGJJu~dO#rPHwtnnDM-D+jWjFhi%9=i=lx=?no8}_2+xU`uK z-pa(GjX8Y~z}-*}9>7Mlv6aztw3X&jVni~Kq`TTwsDHJM#}+Gx@ysU3-C>1UK^zgWB0Tf2I20Ayoo3Qs4Om$YQt==aPamF&ant;bKOo(AlxL;O6JEqqEbh zY1zV>^AR9M6IlNts$zs(3(e2A$LACo(2xrRpBFd--`y5d_@N*3`HSVTn2=;VZo;N) z*7|7DMk0Pdb+VBY*Lt2_MHl{#3(%FcsP)t=c23vumF!nvPerYfk*OuPZ zNui$+WN2sx>WmVrq<`hSNXA_tG?l3_kcuNq6X{Lrt5zg<%u>95pXqG=_<$pnHKqg# z&=!?wJ=0AF9nmvU%G@wP`|Zi4Am@!K&+drf2D#c>sdt%OZG?T2AH5v`;;8K==e#dOxwOEd8Pzp=cnDas_79L&U04VNbx5 z5yix&7KN~0;m6uBmMO^B%T#Flgsg-0*^R&}e6BO5X?7r60xt+J)XcSSIx{5>_k~hA z$vO$&4}Xpci`%F7x(ZANWP9h{IPbmDy^hAy3A$$oFqoco*j*;(zEj{V=bM|A<7-ja zFA&L%C5dmPbB&#YCAzHH~^$(vth_kMF>|B3ywDA;bb3`v)om$N*5A z1@!>zk>fiOoP=L$IkKJwFASvVgO@uYUS%z{RmNDa#aSQh-egVxfb$+tS8~|B#RN$i ztgAG`VM7s}zKGOwA&vNZHbzk9`(8!yRZZt%BGsekmWix0K!FOC zezM2Y=59!oCX%pkdcTkcV((GK)JTGsa)j99uu%iWO8H2|SVp!$r;Sz{V4ll3E z=FL-l%VK%#$Y!8wP@bA?R_GhsNfgGHfqVD0U}VuQhB(dY1SqAzpTg@Gd2ZL-2_Td< zb$`VIa*Fj(@;7q$HyXE_``ySy*jholY$Ugb#>f4`OYhNhbQeT0Ee0Yt@~iU7b7P$+ zYSDisYGAowR=N*61B1HLafNris72RQUd$~I9Y5Louv!}-Ac1kkPH`DFjuRDS9U|jw@Q29NCTWL6vWRyH-Bds z&kr%booiY+0hGi8(zbLxcscZDCIk;;_horcov(yjoPzIInTn;LHyMuaSYo$vEAJ{d zhm>zlaY&Rma!}o%R5!LZX3+6s%!)M~>90cGS)~&$iCMWKPKG z(tL9uE&G+A<5uSbRvF~%8)5iw2zN!$=M2MThU3w!7SVN$RSDaFjN%z_lnY2Ag%63GAL<)x%i10wqh1RKn0=;8aP}{s=f4+de@O*LI+A$6 zWot*?uV%h9$xA-HXjhIaE0RO3S3E(k0>2W&d}dqk*8zR;We9`wdxA7e?co4y!#unX zgY@Hw5<#%XjComkJc)5K-z>AAOPSP-kvN>%*_ZK&LED%x_#r{*{z50I!%g0g1(cM| zH@~EbS$|LUcNm-&4Wo=C6!QH5>|cWekYVXElx_YizCH%-w94pm(60@T19d`sIK4DUJq>%hw&1?j7HGX`ZjZ}~7kRMoB)QxJVg@8v2I)+Y%T zO#8&<>EZ7pP@KNMS1c%i8>HkE_Fcij!8scNNB~Gk`>eg{fT#G;@do7?`@QLJS0D2r zF`bTDbt)7oB?y98(K zlefP4SEltqe4WQxs=90ok#H)fEu=G}R3)AiTaSK{jF$WO(m7H6<3LyYSvVzhioJCq zx#i!TBe54XQiqPz3#90 zp7Wc{Vfc?zoj4OEGI{ZsbL%LGBYU!Y+Bm^bO}RHqEIK;pSV zoC8tVtkQXEZz&T?t$yP|v6^nglERiD!+O&P=t&%sz2a`#>hZj#d0k;`UlC98PrnQpJaL#fT`jadHp4UVd#bedNS?$-o;Wr0T_2u1 z^lX=g64SvJ4gXHD!+~Y!UJVfU+&?o?2Lg-*z8u0f8?L?h?y1sImtTvw1FiUzyjLRd z;I$i@F1$Fs=B=R41LKMpV+}q;qL}lP0`GP)m4cmjzMb}gg<3Z_p=Zpn>3bfwk$**)|74ukb4RcLnlG~gM$=-0L?4#K6egQ#FKzoJcN*9H zFDf$hLS};aiZ>!V!)tfBC47(HRqeei=8&$!+y=WzYO(iU_KB%pzJuRtTM8JsNupmI>xITzw7UFUnG<-@BjID7?v`J$)xzyg#fxX(m zH_7>+8%Rf3R>Dgi8j}=5Z+^vTrQPc}tG1EzQ_zoJmp2{%#qAUH!#gm8bnVgwoZQq<^)DE?yhu1zo!%*b8^N~ZCI#xE=m0u;5MuOu~ z9!X+woA=X$P*zPTvq5=sLtk}0GuH;^r6Q-LBIl!`v$3k9H=wi2`$)iE+OeYZ-MNwjXn3wCEcIU*Ec>`iIzp+do^_t&AIc!lx~D zo{#__aZ#D2 z#>Xfi-Gp&4`#qJxMK8DCNe}s0rr5y@nug(g>gxvYGkd<{>i{h!H=+g7pRq=2`Hd{! z2{X(;g8_i|i32cvX?SopQ*}&K{&Qr+rwS8){MU!E4?DfmG?#akb~u!|rB{gjFyB}L zpcSyNb9omEn(5y|CNSiwk93@GL0ChyPW zU#9gsocG>A|7jTB>hSod-T(Eh^mm361fQrdQDI2wQEr1$rLObq4bO0)q}tD(Ip8q zr8@09OC14LJBxiHkK%qmJuWfJPw?5RijgHYJ&FeGd5bM3nc22Y4QX8_YVrNduKDQ} z!8yKmf~C%9PRFwn*muVHebDT;<57s%6@pIZnJp-;N zako0;T^>*IZ;$=LKdyEiw4GJdO6cM4Z?wE&Ry|B0=If}&dy?~_(8g#+M!xBiY`ofs zhVc8ei0zKUAg}`Y2PCn}|Jc#bU+B{|-HBdXvoNRr_*sY}F7bS?e!O7cm%R6Qupf$Q zt(}BTQ9mT)Oy*oficJnv{jcH*$`6WD2PeK3f$uwU{p@1sz)_#VQdy^xI8c4!EoQA5 zo(Mg|Sf#|oXP@pkqNgvEtzCLkZb9+GRkSZyNKmlfOKC@xnw|;FBbi|~{rWx;wl7^# zu^tH4v%1&Sb+3Uir#%Zbm@@(!t*n}5wpeoPw2GCbrz1?%R7bEv%U)EIK1MdAzG?}DsLS>Q(f#OQ3UTAj{o7zaEG?*M zdwTp9_BB=gZB`M<^QS(s`IYR!@|*W$VlZJxv}eC!w3<#pwMi)iAw{dDkF~m>8mkJ7 zILZMe?$p=EG;1&inr(jfZU=r%uKzz^8JhTQ-|ay++$UX;NF{C~tHOXM(U>6=;jaQG6}C@QR?;3uz*5cC-=g&4?=+LJl{oOl1l})qajH`zeDfAe z7|;)dxr?1a!v?0vX(R*PT1a`d1C?OzpGNYuyZEspSTKqUVI{mEn)PfAWgRcI$>1Ii zQp~)b!&>4zY*KuO%7%7j*>nVivkdaw$zluD(zz zD9zXGN^DE7PUQ8^zEOyAbZdlPc;$!xR!SPk>b{ey07J00veHnEGhq{D4_0UZIjpld zIjKEf-KUBG z4JsAcGL(sd-ReEXBHU?A%t8fb)9?oQ3CH4J>RC!5D79SypY;C8kZ8AFuDu= z>t(!WGJwkzpqe;o2Wr*L441}tx5!6PXFa?`UPAMN(xAp0vL7zlXczK2w|rPOFr|`g zzrv=>HcKR5#}GhRj{#GFZVylr$LdB?ew&GOYH9giVT$Zw)Maax)D!gL+7^>yIyJN9 zD^h4LhzR|=CiZz|tX%Lg#2wf9VLD4W)N$!bxgI3WQ<*kkb3rNSFu2_+{Nte@?9$o2 zuEAkevhZTJ+y8we7rr2?`govUXaY4^yD0%8qk-|~-_sQcQIj!ndr|I0@A)wePol{7 z%<>S&W|AW1Q{-FBzzc{$-f=w;<-?^i*lZC-MjOJc)#0wbF3~ zrx}#u@DvFR0rFQ}-&tM;oz(&2{nIqMOV3lWYt&z%5+mNdI!2jww_xuySRjH zgj*Y~Dg+iN^Q1cx+Be*HCV$~6C*hx^)+R1*@k?mWbW_m}dffOPQt!>RGJ18TvqYMS zYzHit8BD4A^A<){jJ4Ik0|AR02=ii2qQQoKrwUWg&7|tJUU48`WW8hPL`XWH8>%|4 z&jOk^Sm12@1@ZY0+y!AA)u-Dl-qgHQN%l#?Aj9c+=4XW*$HY!w24$mqKo8n$Mc$`R zbi2dmYTV@HbbD}?V%toOpR01U)ZALAV}H^0RbD`x?KjBN))eTupDZloc^D zax#7F?UMANU4mocaRJ@0dCZ|%6HZ19G^3tb-|yp;{1Pi*i}-4+G)V@p+>bI;VtT*t zJ!HHF8I;bI6_k+LR)WHxfH&QD`&Sj#JG%ul{BVp`*8R!-^(>8Iju2~_ZE;X z`$EUop@@ZcckqoxbY|l))+4?G#{L0lqvq+7tyx9ksXvA4X8QUo3mL=eve&=mtQ$?u z7XCk;srU4I+`LQ*Ag^umm6HvnpoU|0-8VpRQx=Un(F>WamAluX+L;>-nyUg%XUv01 zR7b=I{7X>!h>T1f5XPmuvM?Yirj9QAeQEuM?PP1T1wR} zb6_PV^N|No?7_tWbGdUB@~P<{Ff>9~%)*->7sQKgPISu#gsKdz{~O!4!nc^WE8&zh zBYOVV_82&_Xp}1-S;T;eSyD_O@h?xkwq&Be{`P+iY~iSXZ(RhU0ec7b#qRH&HPC6* z-vO?AYhX@Ki5EZn?TsjS7FG|sfi1L#zy{cXCWT$sx)I8*#4VkNkVAU3ZOIhyJLo5IdSA^07#A9Efk=8)hR`m0caNI zTG-Isss2ActjG4@=U;n80#+!6jj(vEt@)-1-P zT85kyA+Mzez&fzt66-qaJ-(qf!rvHV@PkaRp7VsjvicoBU92rHPi)tXgYoU`a2JS( zK-Jk2K)CF3>oAT-a%(F5GeZz6G+v%Mztg0?$vgzS2TK5FXdzg|0}%J`hl<~a@B9X! z0s%s;5QwN|cP?H|buM0BP1dvsj9NhundBV}=K!uJTG?2ToejHbAmlndN4F`cT3u6f zdDuA5P48SN(^DRyzYcjpFHIkfpPdc>?@-_mYxQZx`h9pV8lfI8tY7l?-k9+x9{^s1 zWBA+1mkaK~=hqkB?r|Gk_daMyunww$?Zl68?{fdcVW7?Y?kitjjSJ;DOCJS5_F=YD zVCLq$3;qG#TF!^?7}y;HYGpF_SUUV4@n&BgAN_@lIVVT}BA{y^^anoi@ud9_KzW`3 zLf(Q&CIp3h8ObP)-*P z=)3PfcLPO~6R@m>FyPL>-OxhY<@q_qkW=_^Dt2(ZF^j2*>sOP%8n#Hr(?nnu&3*X( zA08*X9YnRjrIW7B#~@TlCmHc77IhbLELzR91Q$1xJEtX0fK!m0>h5;6f;XOxn|t*} zlLz>M3)3C|#KvPjsBspFL6Yr&H8%{g=79UI&00H{s=xEGE=$Qd^AW?$1E7|*cZw5-6o13hNgq*O-v^Z13k3q}x(g1f0RW${ znaKDZntpjGg}Qb-1JdG%6x>%_K~RhD0>2~4c|Z`{c~f;XCUjgU1T_S@Ojv?DY0?!t z{c4b(DPY~5g<#-ZMUGbnF2~9$NSSvO2^dE=5+1`M?_4Y)&8|BNPXKoF@BD-1c?l6< zVyO8R>oOmMWFrBrIW{hYmSqvcI#D1`?u$d@{mOAkE#_p|fLO|CH@wqf)|cn!ev4A& z=6rVoV8`qM(?wleH<9M|48V4^%w~x&h)_Vk;)LwTPk%|D z{@RK;dvKc;Lt6!4voUIBwGwiSn8169_!ryut0!%gBk$&C!mMX4}zC zmBr@g9VSl8EUIWHUD?Y)ZpT7};iNCX(w3piF~emY0q40I^YgG_&_KeqNXltswXl)#mhOjuNX)+SNcqyT`}I`A%zIm4q*QiUC# zvqc}4{Q6*3G&!mRgjTSu21GarJvv%(k@F)7oxn&^>biRf*u0|2zfKMmeXK+yr)DFo z;fXIZj}hr_uksXWdEEH<-3Dg#DVjGjKgGN3F@+0ox64iVyKk9N&1H<>0*jSyUb$q4 z{Ui_X6p0n5Pgb@;t}Ou~A0wY4RA3ABL7`lA(EbXt5XJn6|G#LAXOLqsW6nmd&)*{w zs~te}2M6`<%hGNJ5f+>b55ho&Vn$NfF9uhBvQk`#tsUCr6Gy+EPylQdxEg$Kv{m1g zp8f^wLizDEx;E<2vl4b+oP<5*8;*;Jr*ZcZ>CfrAkpS zvJu|*>gO*nPn>;4dPPiTr*4Zb?=NfCM>8hWAPbz-3up4VRVKuVG8;eOD+X^Vh|sG{ zCp73x8_e0jE9;^xP|6MY!}v^ih7}89UJu_v1o}MYt<$5mCdyX&qXePD+knf!bXN-* zT$#%mOpC?ChF2A}rgez`P4N$L2?~?z;WfX79u_Zv}FwAM1<=YSZ1b_&3vc=Z2 z8wP7X){qG^{rdYPV5k~5q`j}09Y)!gjhVuXzaztM*Pi*fpg&lz?nLR=NLv@0VPn3c zj9J*j47itw$q&??BB#q525!p$Q?l((R3N%;kzN~BikJ69m}@pp{EFf|mauxURIyf7 z{YXljto&=dirwZUgVmTSeV?;6R&WO&!+}Ydd?rrBGlOcAG4a@Uml@keb56m?GvbN@ z8)KK@_1WfrtTSWMey2?u^7!GfjVF!AgHg$^W$>5-fGF*SOz>VBkSjRQ{^Mb!#^aCy zc&C3Rk_h>Me=b0BB(E?LJ4D^VTzGhv+WEBw^&voeTjLj#uyv`%FM0_XzPyU8XfTlv z3_oKf^FOAW-7F}4ub7gsYbZU-{lQ^(CL`bY4JTyn;pZX3y8^l9wU{U~-^nuIGw%)+ zi!>fU4@i^o9TqbbATKo>7RR+^-!+9_zC^O$e9M_XP1W+lZC|UX(i!O=p0r zXNe6OX@ppYP2u7LRg7LS{9ELT*&%h;Om6OU=R7jIPwW2BxoffK{X6s|VY!jy_u_n` z@sHV`y%u?amYo=PY^;t{=-^7C8~#4sWPqV3hj3Oe8~Qn^W8z~*gEU8jfgY<7w)qrB zhmUO|G<{oXco%r(##&OwhzN9))aU)wiVFt35Whm}*@Yf4K&K1+1&2-fgZco9uo=Gp zIkK-fu@MS;*C7R$WXZbzVZ*GtQT`9=M_+Y6`=uzgcB_7OS^nf_qIB-enkf8)kLmL8 z7b1qLSBZTM4-Z(VDY5PZi7ytMWVoG1U9aVH&+4is$dgKx$Xe<8>SUp^GuS#`s;wLT zE`G;)KEBldaH`g(DTU%m8-Lb99Y1~50@cTs&o_a*8|p{Gge>(jqR0zLo+N}SA72eq zB;{Xle)u2s?S_=Ve*+IFZ*lB$r=gZ3(j%*&8@!z|9Br%Q(#eTcw*~(BmnVs6kac9r zP9*%E(R+$8n_DB~hs~cvnn{T0*B*_@^-)pMZX2C3c;r=N6X zJ!;bqc^>&O6E*?Ih|6I%t|gA~*lqmG>V&N`*4?T^hKX^l1^2Tk$bN)cyC`~^@M{*R zkK54WYrHs}A(fhkpANBG-Kio&8XS1{?%gxL{gf+_f~WD@FEYir?8#Fl(IdxkBY0Re z%nW?TMQ)gD$kxwma^P8NBYbEiDSeZ^T%^nh2heVC#H}d z(4ThWT=VKP3IAk)u?O`~x?wfTFlggcV7@XM&(;&*a3g z_UfwWAV7_(!c~13J~43=tym^6wPnI(30gG?)s|dW&2BCXI&xPLCQSxzdKd=UDvilS zb)}qSjJ$jOI@YCP9;lqoe2Zq(!X4YCVOfeQGe6ZpH4a=YBbBrOOS?9IE}1mP^@u!l z-!}aUdHy?WIC)pplUCCUZhOK#ybco!vc}rl5z0p#d3VndZ%Z`0dH>p&R?-2RhR>AhYZM3j8Jewm=>X`zQvx;dJMro1uA3s{uKwdd_ZX@wxY#1fS6{#ioX~77n zyAmnopXTsk99Loxt(Nof?e&0P+28I7w(9ehCbDIZ51HSRFu?sU>r-<=zt4hKARw%O z4y%fJw0G(8c|f;O2Yh7D%i#d4sm61zVSfQ^M|sPDgXRDmv|eb&_pfXFPV93ZgCDz4 znU$a6dtP3kSRJ5KUrF{8@P1?66>RlCB3AM*VcgW{GlLJ0-8|^LxXed95*=03l1%kQ0q>>5$#N`02Bu06bs;~A89>?er%^sbJQi1u7B*IsaEHz?+RBK z`%LMUTDeeon`O`PazMpz6=JHRFU zI)^x)SH7mKtsUZftWB?WIbD%dI_isY|MqFg{V|9sEPdJ(V|w!iUtT7ktsTmb3&hFg z5&*y2@`HsY(@Tt>rsMdfmp0Pe69uBD4Ep@jRzcM*k+W{=vPuJEeyWx>z1Nq0|GXb> zgo=E~R{O6Z(wxZm_$dBl2wL5eZcDL#T)$|aqDSq@& z_x3&gs~AY;ZHE&G*&zGgjc{!^*8ZasmTm3wqxC0Qmwgt!g0a}GVG4(~8j#9hh5z&O znjKibO%Y!Qf~wqDY9#vHH4K)*RZWDylN2)ji|$QyTj;L;4Mi&=$rvYPL* z_!?2{3Y^o+-=U*>a>=A1D;;UOpbTkdiS<}1lp%D8+~+3kA`<;*Uq)Gv^u2<{{GM!2 zW3MNq>AN5w<{a?+qJ4|5X%eFNy$gHcrB~By;}6MHPovPY#I;-sDe?#l1}im85HIF7 zw-RF1MC*u{+WsE#ZG6{Kbo*aF-X0RT`sc@62gD7mKk0k_*rvXva3R<6saq8G%b!O- zK0xKec?333unj9aLK8GdFY7mCeu4HNF;^>9F=e1WS=uJ{bq6lnUjD=EESaX=Zjx5b z&15S@4AXQL$6lUv6d~)y+8E^7O|vnHJzbhW9i^E~j?`N$$h=MwIyeQhRRKB1L@Hz&n)ZQ?4&qSEFO##?l`kGx$$v4leVtzZW+SO%To6x z4b;Bp_2SSHG=AG*EU}HKa#K*s-2I|HNUn;kr!emdT4Fi}JQDB6eSJ8pmqTAINku5$ z2-X>i6u~|u*Doj_3KCkx*LTSsKJj}lMM1j=x6hxr*}wig?rTAb^_EEyP)O~Tk6Mcg$YkR>?lV8|c*CwGL!NsNOxM_dEL8tK+sTn8(5Q)g=b-pcPX=E1 z%~G}xUnig243K2qEdiFvBX*| z59ppg6Sgr7!52zhVVk-`2XuHDKMt_tnYfnrpfrXIR4A6MAgBF8Q**fjpH1M*{1P&b zPuydfgJ$)PVfnVe`&_{xTvuls%7-Oi;T=zNcvJeCrPw*ID6$sFj$-gjw<+0s-7}#c zev}$o5_#@xV)D%2fMV)`?~cE00(Mtjmdg1DcJFEW5j@q%g$9bD9$gsBz@I0k97 zG+A*{sOF)rJSVx?O}o0EAL)Vof(pj?z*py#mzI!TC_g#HE=y{>d*td}tJV0sKD8~= zm&ur83dwJIi((D=Lv^vPv6B&fCYS-Z_ zU|{)bw79#}l$cPQTKVo6eZ;9~n6m7!_EAEzo1;erz#d$Mzq5u}_LakSW0*5b!aWl~P2(=nlF&n~5l-*qa zYIQYLp4AFjIVWw;Y>Zs>apCP3Fq2l=;@4K}HAW4qSHWpl(wikD*mb&m$iOm=a&k2^ z4eDL!)^3}UryvjBHhkWt9%!pm@>OmwA}BKZ2p6q@_Da8JV5rYw?7H$3TR&57D4{_z z_7LD>qfK=5LN4LTp3}8F5<@4?lzti=_&_Ie; z#_+)K)hisv<`QZ6Zc`Oqx)EnW;bLMrwsPUCUsN?DIu?ZTs5^>p&@FjOysX|-Ba`gP zgz8>e3g?Q&R_pV_O7AU4_$XC4Y!hM&nbDPJkNMj-+I+^QF4yY!K;$T%Tt~12>Hzbz zg^%*@YUh&*?Q3MA=N{N6%e!UKZCa76H$BU+ORP!`o*DX$pr&?{Ng(u{^-V)O$J{;G zA6-enaEv@vM2Ti3B`gAEG;-qUu=RKyA4zFoV$K)3l2KL|<0!^rx-C-r&U_2D{OoRT zsXuztrT*)ZfQvQS(MP8mdPJaJp{k79<|gWnfhJ%UUJ#HU*+N>32rY0_=T8V(nkixu z!1NM`e(QCilVC-yitWQaa?hS{{k(*DaGghe3GZ%-`)A^>C5mzrw9H5@%#88?IJpYG`jiw9a~rjb!S z#IUMtF*NSqzdn50fN%d1*fCYYyGxRhggc8_ij~=6<;3Tu8Y$g*sdl3y+nV49ix}=< zm7OiuS8oZ16#J=#g^1*MeqZ5fIWo}mO&^xGM-;6 zTU-qwPsUBjTwFx`cxv7&eXWGRn`nm6gQX0~H}Gk8MTmBd9fpFsOR{vFSQu@`OEpth z0a611pMOI)yePJhX_`*X`r_Wb`>QASdj0y~epfSG76Zmht#YpzWwl7kbXid9GZL#& zWmAWEhtJreEPv;v4Z5%E-u_5Em7V8sX;l51Kg2q(lh%QYN<7F$xOqfSZCz(rURWav7zsg3i>>oGN;}+ry|F zdF8!=r1Z>tV&kUEynLUOfJD7fR2SRo=SEJd)rV4OIW+t?y&PMviHrz!TW~KwBJcXq z&-PUONMKvea~hq#2ATk_!2tBmgc+gcJE}3jQv|(aLY7 zvOftfYy^g+_iV}V;CTjeUx|2v!&csZ;#11|aK1abpMwGb#!(4c4IYYN)K9gIy+D*k zvxU%}8>r!)Gc3z3T33IH6v+4YPnEl%3Qui(5R^6!hHC`TxNn|0X-#&e>WAn0% zUn;2}2sM-s9u+S*G$NW|hL**ADaw{V{xwz1WVO$5fEWWYM{hqi$$nZRLHWjK8Nfpdb)lOgP9oATi6y`EZfE@g{9;!y(NIBhUdpzHB&V26 z+H1oq=G8?ePl&it&!-e+d>7yi(b2-^_V{~rTgC{QbG;)zdxlF$UA&ni<~I+Q(8nk z!q0hPe4Jd^SNw!}G+rj$7;bbVD#ypK#@+dz`5ULa;!h0vTUQFOxSs5_&qV4z(iO5D7Gotdom`Xw$ik*ZM6Ekr`nB#(E4={4I zGRsDU0@T&&8$j{NkD~KIy;&>fW|(m+g0(q8fgaW;+^XGODCi)FEpq;f*Y9A0p5C9I z!YeX+$G*WxJGO-FM+POHc1V}%`gZ*&Kfh~Cd)nj&%eMC%?KF$<4LOp}g}W> z^XkMDsQ}aVC;Mx~ZvlPi=nNBENyPc(0R>`qDxda;PTf zV!}!kxxBq;@2ZLK&4sdnwI6SKg2unT^s1ll_H#2kc*B>{-zd|R*^bz4?M$Y29(KcR zQZurKLg3in*Re?=uOaeg7BdvLM?#2yvG;|fKn%zl0em*Zi%YSww*C+q4wkF)amf2b z;e8gVGvpsMxw;lRY)OAR`|=Z7AF3q1 zGJu@ntCkCzFT{yOyw)TZLe_qBI~|M*Cs++aKrnzxI}{qjx-V{AP$3MQ`VI^OpyzK@>%`LaNCpUbx+# z!*Cm(;DLF``|2mKfqOnmz_E9g_JhB; zHRgeE6jlz+AQ3X1cb?*I@0At*E5MU!k5$X#uC9=MQ_l=abRPFI2A~tOb~{=f;KmCE z*uJni;i3sDFdk-MavwlmPRnHyf$RJu~3Mn8L`j`dnHO&Xbz(fTII2;o1T61wY zKI`BvSJ3`@1_!{MQ#Sxr7&-~x2u4FD*xl3yF*~W<${E?*hIg)^e)o(jh{gKBbgsX| z@(3LLz-qT5zy18$wr64zJJ{)_;qd>**INfIawZ*m6E_JIXSnJm(MmX4jav+@x^*n2 zRrXxS#=ff5Z|-SbeVj)9Q;o2|df@-c`=%qU#43Ez%bK&l<7g4^$H*NW{=NkPxueM2 zI^pNh`ICYd1?~6N?w{aP+T^{Mz--C#Na(}u3FP*g}WieY%y#tPYA-W5Vx_>V)_!Okq# z;2+}o_^al3*1pnPx}K|wSKBb4^5`MOxy5`Xz4ho_g_CwG$;K*n6v#W^s4>?Y1OGf&g)W_Ap`+K z3pR+^+nxN2sNaO3;xj#(QerRyMK8M=cUsQ3<_hOOg%WE=z$*!nD1`Kofauzdcu_@Y zp&w_7CIkVASLw}>&Jrj@ zB3b{ZP?1kKi$?IvdOO^8kvKcda(l*2t%eSwrC*4RVD=XMV?gfzFPd6RNZTPN7w+@r z{8o6}W5XxvzP-NT7b||vhkBY}{WYv$wRw)3Zt!kx?=AV*Vh6b9?E>dfL#it4eD~|e z*zz%2l9tn>>>P`YQ)NG!1pDiqxts%`b-#7r43yVVrUQAJa`KF&@66sn%9=ar7hX!U zgwQnu>dEo(r&j7+poeYhm>*Zs|fmrwsPm;p_2^%9|>neYm5oDIjO@vYK5QrHnD zUet0oXW;L4Xe}W+n__vm7ydxH4hj%Bub;a&Fxl}MTQ|4EMBX#PPIw=aeIl7b!9dFp zI`mP|JN^e*n_`*%-+j_U?hW?HUSvssmb(A4#7XG2UA6BROYSYji{#yiU5A2%z(Q^Z1USSTKQJoELACqA9Pk$6m`@x*c&!AOFel1SiIMM-bz@J}^xX5gA z#mu*AdAu9;^;5as3V|3=QEH9!a|=qyj>*Fib?;T&0=F=}A%jme9REXY{&A4uZ};zZ z&erX-5xZSGY`WyUqJBxNde?yIt}JA7~Da=>Gf{q@Ijy@MrB^_>n5ZUoc%T z`=<~4pyn_}p}}-qU0;rrRpcYY1sn}4X6U7|2MzBant!KNbTzck94g!v{1puW49H;$ zvsyTWlG&>6sNEA6c*bON!#>Er{Z(w?v%02-mGQ9aU6eIi>WHJ}BVC@7Bdx6Y_imUK zhI&X(UD&u=$KAaQS-86&_3jBEI3gprpjEv|wLT>-sj>vezt2c^Is-0#2E{Z#+bH(Yh`OfA!l#GO#~YuR8;mV4$gl9uNBY^ zVF3!(ka9(#Qu6q?;HQK1vd_n^aHl@88YX8NJpQz4=+qPQz9yB!M;fA%+&KO{&yBV* z69+4DG|jn|PWJ^8d(m+3O;O@|cC&uxvlwkV%crTyGH7kFzLod;AJKvuQVHE* z=WjDa{-!bRo0Cn<=HfT{lNUEItEqt`W!z@|G}@!9BLHsE`T}6QwMuIe=Bb^zujzCiWo(wxF5zp-S*txj9~0slIDAmvw_*Ad5< zGemTC0i4=Z%CfJV)eCA z#_o?-KMtUVyS3L6%eNk43`z4Wa39%rw4=-9`|0Yy{SC_R->ADmu2yy4t)8%kcm1(1 zRLH^ZSJCwZMHO)2`X5Fw5R#d3>22K_7p=0kcmV{#S!51=i)FBa<+MNiqPS#Q5R;ns zX9aq?KI@}*Fz#Wnz4F95P!`-+U$Ns%XeiYXcrDuP2bp*LFM2*f+6<~OLG3qEc%Y-1 zX%KmfVEL7;D@du|H-grU&VJp|1a#iy9Kf)cot0HFWJh8Mm;Zudvj|tyC|%DbWa+K_ zt^|U?ljI(gOZXkoJ}i_Y%QDZThJO1a6LbHj*#K5up2Zyz=BC)!l zHk43JlBDfn5~vfsY$v#uIQk?kt<;bzZIPYkF*+QdG0QIvsd}?Qr$dldtoLf`ZtLE(rtL~%dPL%MtWBinCVs+FK!TqY%lQ{I4TM8_*cMq}FT z)8{jH<_3yS>4Xszlapm8PLFHpr7R;*1yzN$f#&HiOVTabFZM@H`SR&PJQKf!-=g#` z;;l$P5y1BONJyc(LP6xTzM>?MOZmX{W3|bBt}0aH`4dQjBYDVe6izP5=wH`BZvybP z>+^2-dHl>gXBAL}MfoWN1Tu1h!2#0)KOaP|_~C$QJ%251%Ql|RJWmlAQG~K$;Knoe z_Lf+hb?H7icRBxQ5i=Kww%%*Xuwl+6*sreTZleX#%})9B^1mZ^Q^=dr%~hp&C(W?O zjI_meBFp=xNyEKpqWHh1{}A{m#^OBf#9!?sRb>BEdPwlIFYoou6C(4q6DlFT4Mb6M z`$I9cI29c=wis14PAMEI%U^bjZx)H{Td26HpN^qtgHU>cGNmY&IO3tUtwKur6v*+2 z5@WPVD!JCnC67DYjwX3;m#dl$&3sj@Dqu&`G)fVoS09nHm&6^OR*TO%=o7j4UUooD ztM1Ar+Oj_=1zSQqG<}Rjkk}J+%%GT@T@s_Am|Ap!GB}!#Ao2n6%8MkccRR9YVlRD{ z9j1scLDC9?NqE~IWe-@g?4Cx=wg;@x*_%Pau%E2CC>kep?A1*V)`a$jR_CEo1G!Ld z*aumA6| zuk(1D@{saS;8K+8bDx-vBD^P*oZnB1M9GX$=tO{XX93`73zBF{knmJI?4PkgYj?3 z>+I^Dq|;Iva|=Tev34Vr7bln9_=ytzK_p?kd~N)*#q>-`p z(OZWdcE{RR(7kND_?cyG{|-W%v-^hhYmG|A7cu3FLz%=_chrW>vX|noI?q>d@Ms-ttiQ8lyrDvkU&i%tiD#}qAut#2-vy|-8Y^DRQPt5o>s)*)5qiy8jcc+PIfmioSe_+QXtHPP08Hl=>H!5!O1U%gG| z4#mu|+cV8R)O*ycR@;dC#A zfGNy00tD3U^zsO7>zTG8%j9i*yAQl+afMGmjw266NxAltanV9AZ|-CqLS@n=&o6u2q2Wi5oG)xtLbE^KZ>C))KY3Q;;MTrQr*TO`S@0XWx^3e4=i8%?0-y2N z)DQSVusJI#sF~kHpt(%?)~|&N#~JOZ zPX!2ZD>zP;WJycooG#{oeDxMp#3bEK{Qa>OL=c5VfEPg>nDpp zKc%6#Yy=cm9gWOv&DP^LF{ZjS(6WCE0~RI z(@ycqI3&^h!lI2t7*FL`Z&S7rro|jlaT(tC+BjeDZP`moqqnSt(gz8HA6ZA)W7rwY zt+tDLvYS%N33+W!#GA-ipOU(t4uMPK_UNOCu(~)D3e*`VGYfa0qru(I^*@8oF&q_L zv7Mebl7_$b}diinpkcXFNm+o*ER5HCp03uMkIf^ zkT|W9nmzGhkd;OMJ;+dWD?8mfIAUw_T%xyw=`cv+_dezFyaLEZ7{svjVOk*XuT z!FK;WN#gxQc;E5;Nk#LDIPPyl&Vb2c-u*(O9nW}`RoKwbp^lFN&+e0^=<%?P{8-ra zDc?>kUC<@B;h>@|#Rq;n*#1IlVUZ!3} z2{U5a?~-Y5&CI1#ttesV?%oyz5O^6ys^!!koOT}VFejkB_2d=v>q5%gk8il4(o0Zz zMI`gs7epVpE@<$?KPIB<%4R=se`h_#@eeJ(wkVG;m9O4m#hYH52{pl<;46Fj#&dBX z6;3keSA1eL{JV#-y&O3y@Kt~^MTC|$!qY=P1>>v6f^5`)@nk^?3;$`JV--@iyveh$ z*F#5mwlgXhK4~w!h1}?g%+dK@A-~*_i<=hK75jG59)A5uZol^lbZlHf(7(oX;iqTl zyLq7JQkb_pX|mFM{uBKIK)cxnFj1t%^I&0S=*;7b1#<4g6BpDh_m!j226@y!_d^>H$;XCJ?AM;9i{sL%OAhH{hf@l=-GTw}j3 z^F6)BQuNd3%r2uZ1S}mL9b;|jevM-diMe1MHZl<|D>S9@sqQF0wm^;pJ25xI%0yE* z)STjBNc~z!@cDC#=!Jwnf`%_9zbrJ4U%^)c7VINPI|M50P77@w@DYju3i(Z`=?^wX zC~En6lZ-_L>8N1 zJo=(}e^5|mH(P-;=Feb!;>!OSjj%MQp`usrD-x+bY!+b)U&w$A`QRO$iY?gzEItF^ zAqpCiCb$ltVFsy$l|lQTfi9B<14`B&J;HJAsHtw=C#c--3MrgTs+;TtbO*zc z`Ra%Sr5g0Mxg*`P&f6n4S-i~86<=?s>*=kXc8vUqB5vFj{|eZ?q=*42v;H2&hTi=& z_+QVWO8d+aAJA;6;nal5cn>~u_E75@1fdSPi#QQ|KdV3wE0A{@z%#3qO_wn={JhvR zNAUNSngY}!WwuYhnYo9#B>F=3Ma zMBb-AJ~#a@cO)l=4fQ)qo%~FkC4d3WY)GL{w-aHxP&Z8`w5+5&GG0u6C$G19<*{9F zt^45C(1$xH#tLXN6AT)PjtBGMgh8Wa zc(KV=&mlIz@D`H21jbB+d_c1I?ZL?OWj-tOP<)5Z7B@=UDj~(mCB_sVdwdkMqRJXe zBrnDqKgNzlhO5>n+u}^|^-RZD*7cE-%>yE-G3VQV9g9Ltl$CSb*q7~6goXPJRDcOpb( zQNvd(hxd&b-Vr*8YD{J>ZjfvWR%T&{{#!PkQd z&6oiC;TNmiY3R5#`d3bVt2a^Rk6BVzQc{{9(c@x%IkrjXlbLrT3NynyhWdkDM=u|o z39mCZ`N;jp>a zac{E-m||}}wE8`7%ZT^s<7&U>*RSB3JQ;%c7dMfINI>6eDv$T1mo-k09}vVI>~0cK zZdv6wQ6Y4r8J6)#fU~!9go+U${GR6OuFrS^ZoFBE@+(HuVTX~PH95p_kUE=1vRgc? zbI6R(pB#!2>k=?|1H$r8A5h)ik{L>th<@h8>j=9iAK;`}nSv)heC4RDL!UL1mB_6d zQpowlwemFymNJ~L>M2)d{|LdjkG;JixCr25-hoE2?0#kX(QCy!G*+i`QuD)y9xqLO;s z0Y1tpEs5~GKU(X8jg`}EX^KB4yH*F@A^z1%yN_06e|&`UA1BAoqw`muK2QO^W2!lj z)Z?ZDQ%0%r1YssBs2-0q441u3wVdh3TX2sadl{gIPZDa z1Li-e+*7g7ps0cD3fWLjs{w_I7EGPJIFs7!cx*491E&sjsuCT-iz|D3BYYnm1A-d? zYWJXBLfI5jx+oL|Dl04D28hyrFXJ|J-jN-xCt0zGyrOINhkH^;#LZHwS!ZDkeAZ1} z#MGxr)&D%zqDUGAgxwl?yY#H3E2#(+kr= z1P+xv&vLbxhXs@|Ls`kav32aj^bLEKy`=vNhhG+_m@^s6Nz{O(jWFmwtQHanFZpZw z3W}Oc;JNnn?s9JaZNYypfT7|0t0oMOLqMciOlS<{SOQ!xx?Y&qn<%WO8jagUav;O~ zQG}8T^1+j=NH7Gke};h8`rncE9)X?}SObKAV9@YI*h3&6P(=RFr4@Q7o1#SOO*8?C{DI9ww8oXqPILNIHVTxgX z{U@mZzhAW&C+`aa=q|_^7RS7kuLl1z;0*kR5ZT2!z=Yk^jBmZQqdK4UD;g;>I$hu2eV*K8}ZZ-sATkgiv7j zLdEIsY6EcObmD;)TkW^%YtSsdbj$&XeUNDJXRBgyQ>r-g7I>^V*&XXXjcT*Jsn@cSlwZ;zZ0jDsp|E>XHNSCcVsT!f?a_xE(cu5VKAgha}&=8{*y&H z|IHExkUg!OoS^u1v&MLTv%zna@v8Y>!e4k4?AD40Xa@4U#nMJ95@z55DwA1|d`_aLm)%!Qru1cTo7vx}1K~hnKIa2@ zpP^xH)5BIixd0-Y#T-UN5ALTQNJYCpH!>t_q|X8`AOFwG6Q_t~YwUM`&@5p1aJso< z_Fea_2M7RW%+&22Q>XblFgc66*yJBYgjr2kFWNE))z{>T5+#Oq^;wx+OMg{)HedYxI0T_zIiZlqs{dq(IoRW#5AtBw3 z#H;G*TI&@NL?|~yHsCOiggCV+emCDe7)2!(wWNU*?;s~r^HUl&4H zBG~U}Qza)c!J3y3!omVnocizQHje<8xaLbeDf&_#@?zi}FGf{9Ez}J7{Y)4~eEmc@agw%Ip z!~bjbST}G+Axp{*q0(@0bQF%{_se{3z7M($t3~5&exF-pG?syJ6IwTBKKvF21O$J% zmVbWR4`d7NKxP+<&+fPs=l;IRF3Q%v2wzC9h8Ewl&2)`05mmHIj82oJ#urp;B0|LrR& z!Bg|y+Qb)lAu3>|cfXL*4!o8umJ$lUC%E_q4(?X;$PL2dG3Xnn4bq1m1v_b`_F@(-l*sr@v}YymPR zfCIcb3<8{8El>cEK!6X44ieRNvIZ8C5)&l_Ar*T=c4>dl)mWe`L=s<|@1-%Ik#uo7 zBJJAQ0p2LZiQdWfH_G)T1Gn63?g6QT=ozvCRh1Q>bY1G(5D7G22EhBO9cw|Z_uZut zd={W5`X4U_VGxK3BO!(IUUN&5*DfFKMEQ{JZR7V&08jG%9L{Js&;(|p4jKsRGJp(N z3h)1Xed+OHldqsK-$$@T8O+rv!*;o9>Cu$4?9wL&G9FVggx zuY(xc(C{QZ90jsS!2lEezuuj{5t77A*?KY3ecLb`LoG zmxg!(!-_pla8+OZN8-OHzZ7hlSWvqu&TN4EuDZw~zNMw5C=}FAds?^v%^&(b0wVKK zVj9FB8yY_U*Y{@viXuwJ%Wgq720gV6;5GnSED>88fZYbDe-IVEKmP^39*kdn1;Trk zL<*@OB}^rQU?I?m6B(Eb{!B$bm}tQ7-tfT*AgCxY-n>Gn> z;&vMC7@eP=*I5A?C8&Th3i1cyu<+uB22Y?xoB7pc8^~N|;sD7EPe4-y*d;-vkni

OsNSgV1dG$f2k@CoeXYr4vOY(sk_AQJK$0Sr7XOe-seC5tY9fH;9d69Z&#J~YZyie^t>K7oOOhlaR}e=O$`laq^uAg zj50Q}>qP}e=^Ch)8vvRve?6YJ(y!03aG6TQA*EVi`@3K;fPMw|$YsYgJ${d`Ez>fz zK=|*mDFkAK`+CiDxAwgLf{UxxbHfU-66wITZuRta1A)PwHXND`sYSpGu5JkBcm_zp z*LLMIqdxTczGAF96bgzoz-LpJpmbVKG>S@OLV>dZN@o4W5@$J;t3%K7;C7bl$YtJ% z7gHGY^B^0LJR{@t4Jl(u4Z@p#F%?@7AKk8K1wHrqFpM^G;p!$_sc{T9I0}E0%#b^q`5SaTd~M{=0*l9HL^vs96`(P#;;Hf58U z^YJltvd`EUS(WN#RKnB=Gl5)F6asgU)g=5eXQ1`*WL=V>q7ffk>Kk950S>p)-0#bt zOWALr0?qJ^)DcK>7=>+V9=ESX5L}ZzW?8s0$1x&pbEYLZ8$xjRx!&;Gr5u4k$&@iK z+F7;3o~jP#i@RoG2#?p`YQA;C|88El?$nZ@Tl zd&`TkPmi_IaO@+lPa~v3iAB(`%I#?DS-Wymmh`bEPk({0dn;8E%SPr3;n~pJ?`vOm zN@7JGus;2WZIpLvgjYit&_u6<`Li1JuX>;n|K`?-Q6gl{H_*av^Pd<#y8)^A!w3Iz zOH6x;B(016&f|w}eAaJajs+lW1GFc9m6rPMlpUT96eD88U*7s1WepItlH!Nm`!~HC zm^Y;T{y!)EErRSm#ytYNx=nW4V~tlh8=1Yd6W>il-EF1-Rk&W*I_V*k9ntfNybt&G zgQ*v3ZI9DIn}Knf^|@?vC7`3aq~X<XWql7R_qLm}eFce>%L8lMA%H`$dDN>u zdNUulU~tk0%09bkGYW`cDmh`J19S$uK>nz;MuyWE3y&>2ilywLpP-dN7M$l-G=WeD zG))$da29xflhN^7kj9I=&TA_3ud6AooC^707Go+3)Vr-XUC-(C?C|-w)2*D$8XOr0 zWfQaKmD*RolmKCGh6YsU_%96Qm6er9>3m1_wg6(OvkvjMIl86j9^f>*-B!k0o&3C{ z3EWG5el_r#wymzeVP^_R!HxEyxp970s`p2Rx$$Ur8Z|;w?{sM1`k%W(JU|mqHT-=9 z$yt*UaSL@AKIMCQ(S`7FlF^}|&|}O|G6`#0#iQ($YlYV0tV85-J<_uwAHc#lvQ&oD z(^nDq#{gHjvaHkv5j@x%3d8ZwvIaOw&Jwd*+CdBqQ7}!lNBXnCUabUGQG0$PGf?y7 z5UNFpGfbK~1{7f>T$ar67H0dTwql>^Wg4wozF78(f1QgmNC) z8sL81z+`B24)~DZbQ$X~(QnbJ-?^uPze8ZTbjS%h2Tv)e(1pi#y*Ge=|LHN(%`b5o zJK&TAP}&aO0_0hHKN%Gag4UcgzvA|tMBUl$ zft{gxU!!yH_F)>2i~nUdupgKUQjO})8;1C7*HN2~;urYuE`eMe`7u~NyivHJ>7Mrc ziEs>5Q$Z4-`bYHHA8?zutNUXjYVr8DwKPybXZRNG?d5pG2|wHObipdYy_e=hzT&?F zY}3+rUHnK#uLfwICyZPz!~vxBSr+^_bK66T^O0aut{L+mtJBAg1e&Sbd8( z5l5&j6b#@*;l3J+k3zqfNm-0_|5~a>OZRz;B49SsJ}TN1%do|GhF(ETh>Zi=N~grs zi_9F&>8pzTioQ3bgW#N#;@=cW9Zv0l6ER z0d^>X3E%JCQvc~76>0>1;R{Y2(PwL~zvg8bM$4ety^TY7KUrQrH$A+-@0MLwd(i9tF1*TpCqL#;5h6o)23c0`%Ar-KznBp)uXcxx?_CZipcCo zT!PO!6=ut1!dpl<)cEU(7m0m9_`wN@mmr>elA7?eXbO7*m(CBkuZW4$ja%!TcQ`P-$JIqPWN+dm8Ge}XZ3 zI@o4en42H5g^7MMZ&${gW%h}>rUDNyXf9M{pV$3(HP|7H#!ru-L0`lj$hK#tgat=I zr;i6Gn#AiMVwkk=x4m%HU*F{PE0ir?YI@>k>QBdLbs}1 zhPkKYqNa?c%5X(XxoW_v=XHq`5ZO=-&g3uf?ZFmrPTNtE(U>!?mf)^-<9H`v+kQXG zY#$zbo}FCHs!4In~SzhuBN}PA8J}>lDHN z0wz;gK%}a%pDmN%49Yz*X`A>^8C^bpZ}^2?H|?0xa2xsO72>nt`*`Y`*fg<@f##&m zUhan|2;i{$wurkfLgM)sIC7%P`o}zHhu(tp8#w#}6C`DmO_{{bvcsjpy{%#1{cF(A zwxHg$7a7W=K(18$5pdEH2>DRDipSG_ zHVs)N^-L!Da4!4osOU4NfDf#&nAfgB@neUN9|!ld;OIpf zCj?#J1-dO4Ot2ks!9Igg7jcOy7VE{4pgO2G^>cz$ z?aww9Rs7~@h<-)<&Ac&VZfIF3+&BC}A!XRItZ0Nm;*fzoVqA(v;a>P-oJeS6Bay<^ zsEr~l91L=0#ua+)`kKh)XPrfcsr9}B6L3ocBfIoU#;GdMZWi5n4bygtQf`eU;$M!6 z^07V(J!4K$FCmbto~Js8<_qL3Rz4*6gC(fGO9;;{j|@!^!*p;ABSq!DPmmP)pTHJ^ z{(Vs@AZ@H_)h?Y60Rh396KYyzTiuk4d-I7lt()h zZPvim$%br9{pnE(!0o%IKQCP>1>Tph_t`t>{`XRT)v%NJ7F}8Vu$tCJN5eFmyTq>4 zFfat<1&E}0Wtc`yAV6D@yG$+hVS3+IAmTRo)m^|)_g>qz9u9~bVUoCmqm#R&hAYCd zqy+zRvj(G>#MCGozjo?Y?{g6yTw_Ml4OJW6OrYhGnjWPZ^l0x>tSHB{o>YyK-0L#^ zn2uLPA9hCq^&9g9Xw~Gt7JqlZk);cKWWBuHSE#?MNv}wuH4?@RBSxUWK_ri*j+E`)Xk}!sX`|h7AEO^;F2$zQvrQm^UEr`YE zi=(R`ejI$JyEolH+UlP1o4N5B^Q%?AG?awUx6QNqDsJNhD85o;dO2u{ea>k5569AHe47iHt;{sqhO+B$aL=M2Ac_9yBKR@lS}asT0f1E(8WPPf&A>uRO1O6mQ2yCr@$~gk|IsRD_n%wcb%2cLz%pUdXm&Ph4 zCDkwVP909c84@Zn%XTTOept5MP)Ek}iaxhxJmFi`zr)Y7(KAhxdkSA1JQp@~p8n~S zJIvpAF${}B4VpIw?zQ~5McCHije~G|v!e5OG(}Efjj71h9>R!AP!&`+j_&u6`KS@gY)r{}k{tLW0p1J9TZ?+EMB7dWaLoQV-gRMOCiZy)k`!tl(m%T+=duia5q zwT)?c&;1=M3NK{fnQVxi2?7OCiaV+YHSiE*O_3?7Pul`NRsm{LP^kcV z(U4+~bGXfCePAx0(*YTD19XGn7e;BqUJ zuT;I38hN>%VN3Z^X^reN4AwYyZEz2U!fF7$9z607jVugn#k9dY;M#dxL6&>Izz0%X z*!Fx{qgpvJo7Nu7#u$16G%Z$CFW=Y-uclJ`z z3{J`&xXV-c+*Lh?%nY9n|C7sPlb%5_P;jOTxniX;MfmU-^&5W?p1rnd-yFiF{E^Sd zNAczxgHOMa<+2$3aZ9Rq^Kw+!{^p|O?=C!IoOj)aRU+=E#Fi-rKMp);8>;)ISpJHq z<)t+OwRbIH;OUvlunYR+k3$bQb6=B0?M&qElA~FBE#Ts*&Q4AuDR0V2tX`v$`w3rO zgBlX3n;dclsZCEosfuw0g>I0XA{99W!B-b7RaAX2n)jK-Q&T?&j@p_wMxmpBGoyhI zR>=7S*>kl`f2FnNUInh8899d41dr+@&I0P}^R+LSSnO$a_JX_O9ZM&mBo`>)UI0y% zNR%P$Z|Aj8+X@xtAd9=o&J>_`a5}z+S6#F{8xi#56XBpwmHF+@HadDG3*9vdn_Sb1 z8hSeDoB8IcD;iu!JIi?NsPId(ad84E;fQU8&hcte@yCZ4vq;afdrQv^aYWqhCMZ=xPN}T+ zG_2tNGJ%bhnqT>s+M>0z^nNf6p_3Q!H|yNzf|a#Awzn?+1p?U1j~X}tx$f2~-H`B? z9S%Q8=;c%HhnS-F0nGYi8~(~jA-1f8);GOHVn*}dt32(ao-1OPxmf2(g|a>WI{E_- zy~|kUySkkP^ZS&!onp!vY4{JHg@a)HWuXSq^kvcwZFQeVBl_YXmwvAb@jM^6o6@1+ zb;fbTOy;tMx6k%4W@`Fyj7_4eh!TgR=B@iCAbl3ohh*X-P)l#t)*-5H%1N|*&&x=~ z^9)u<5@`j-_uB8WNWDCG>DAazJ|#>XC=aWpT{rL5?bU?(M+t<6OEA8wrXf_Dz541z zA`!k{B>$uzGe1u1uT%Of=l8Vu9iaC%49iwFcc)6@FveRw!ur2eDh~H!nk%WlVt|Xz zvm6~xc^`ax>#p&V5BJ)(J8Fh9yFdP1@1WH<&H$U|mkjio`73Ru0b!iRYI}(8 zf>v!P*F+&MtIXzsFDAleeHyXztL|Kxl-mG7L*W^#xkYpzRJ2O z&Y{b9V^VIrW`4|4_|y%r7=*%4(NZLL@{) z9ski1#*F>N{{A@)WC4*ICqVYO2dMHH`SciDSC|`r(MAf}EjPEhMeniWJ)?NQai&Pa>Z^z9sZBk zpWwQT6(tK?>_3k^fuZaSTc_Wo3+dKErR~#qUr0Z90fFe3hq@c98f}Bi$iq^p?DrB{ zcVUD4QzVE2q5eJgrY&pAy%oG9q7lb3kQq{drn_B%W>`>A{K8CZF??2W(O(>PZTy)n zH(1{*Jk3z(uPn4~WMTuab$-&pQ4P#K?zP^{Gb^JggtzIW z`n}4{K@NsH-+whtWg1~9`B+4QNzWG{HtPlK8Myo9da=d6*~@Q>U4Fxl;s8u zC^r#gOk3!bNGJt}rL8^bx6tleZ=IXde5<2~r37_(4T#56+Clvp95fVDV3OnW2`-_! zjC+S-Uug82miz$Z&@Uq|U{eT#By_Fk;=D#Ic}EOyEP z5;XOh=ih8gqJ*v)sp$u^7B0u+g6NC5_HK}j7k0Y#gExAt9iLptD+lzOR=5sSZ9lA3 z4aMn%tw`Ku*3y&)Z zhFMpkDG-yKZ|W%tX;Yixc%SwM@^s9KdxVa8qw2js`Hs6FsWXRCE?)o3{-SJ zG16?KQexePx&B^M6!PVVjZ2{TBQ#rT#%X*j&gaoDY5UahqK6&IPO`tu_bt_J ziMVrnn#!iH8@+NE{<~^uN5j3Sd!XzFJhDpTfoPj2(G%AwJ&eNhE|F@FdOXejVDyJ* zp~l=wPpZK7sSqbP(g9a1+UU0*45@YKla06W3&Y!VLhJ1Qx@>(^Mvw5FkUj%EmFiNb zbk*O1H_DW~&~$Bs48^Y`ZfgU)(gLEXlVwm+5OS{`n-$R=BA`Vp?j*&xX!(qQkZN1m*2wtn}!q_IQp z+RI5R%laBFRPKzyNbO(4t~( zm#y}A{d(saaRA8=6@x*cUF|)$Mq`ueeo4foqVKY#bCc@c0z?7gHDNuFIN55zWR{WK zaC2PxGxB4|amRr5W!mi^UaPD()0)nn#{;Ph);lqp9{Dri z!Yep8vYv8q7npC(Q&-}|iG(5FyCw~A19ACBaNvXqU2jt1e>qrfM2_>^ICk5JCnNs~7j8(p_DYytp_aeWn@a&8tCnc@ z{lQCMRN>*m)5rrm!?3gpL-9NGqej!9sd~xQ)Zo|He$r8qFgD#$(cco>_xX+Bgw05r z!z=oe!frpos6Tdmjons^W~V*QTPwc`%2?FC5g$-(-d`E<<$)U>3Q)O1cOaH*fzJGa_pu&Ffq zhtbT#L5X4dQJDPh`>{0t*&-;Tjao!K(R-j!xxCR2McH|_v*k;c;$HYgL3>k3;#_^* z*=Ow`riS<%I{R~}_D{CANvdSk45NX^o^dvC^Dobnp+WZ!p|!pE*A*#Izrpk`JL?fC z%~6EBLD_M?yA^^W6)AG#A#mLqiTMDgmet)$01t%BTZv31D{V911Xop$63-6&U& z`H#Phr@0G(#+7pPgNA~bXj0~}03rpzvuaT6kKH5~ZOSQ+&6uTCV0=k?D{j3;6GM(e zU)=wFGkowfXi9UwW;J#j^FeEfW$*cOs&h756_M%Rc3evT&uT#t&kwT4l;&=g7KjC= z;e(cIYrN-;&-{0MH6^ID6t8nJ=Xz)L#8v4aw)7n6xqpuv%MG80uKee4CoFcWx(E<{9FJDD+@w0+DX)F0XGFBAPWfZ51ZsnxOlp_s7a->>XhBy) zyOATGutJ2??|#hsW)DEMmIzgp&*~x%?)$$f-g(Yih8}~A;&U$QIPT4f`t&_TFb=Ii z*x*zHMorQoIlhfR!E#Nihjd#=CXfqzno7yTVG&HCFJKD(RpsKtM#WBAn!UKA?qGpQ zjj5QakeoQ)v;c8S33-!fB*`8dI0w7yv+{^V(H9Bkw)TdjhjknD<-lJGLSU>Y)3CKW ziH#9Vqk07!wWOTTHLENmPEMk z#ju|sX{D7?yHJ!-CF&b%&iX0KFvF4-g_95FNkRg+hoeV>%|wnSDka|S*x!yU$r|+H znSVv1wYYO5Z|?UUSg>B??Jyr4Ar)lgvt#vLyBT% zC`uiJNo@U!7MSt#>1IVo%L@i*>K|2IDYV1Tg0Oj1bcmfs$vtqe&-_#bsWtm_h{lB^ z802*%AiggFdl|J`;V<`xV54ru)0|Krl`M|Qm4}GJ(vFqR=LOr6_lTzkC|7)`YZV_e zr+heLu)zJQ#jZUnTBrw6v@9f7^Zhu+Rfyo#_@6I^QY%6#(G!**{3uG z4~tvQs*=i27I8JPATE8D@ac~$>s35owx>~5rsJP^=G$Lqp>ER#G_}JOTb+eB-z|X* z(3oB~HWK49XJG0PJLvR0bgqt%v%cE)PEKX|^*tKj4r@b_dE0l#OBwO9aS*v?#ilT) zH1?mxmbY8)O*D$UgRT#kJ<;&PtbGHMEX0*gCrkTvW`X&Jpydv3X3oM(P_bFlJ>h6Ijhh+(NbSIwdw2 ze%xWS7}LrWTM3n9;FqDyi+CA7|AYG-Oh99&mZtP+x>%Kihw4t!W70>(&%V&LsrH~J zM`}F^snZo(s7n+X!=oT|%5|MjYz>y8=aRwKWK5|+?&bWcvQ{3WhTs#4HlbBNH}Q0+ zwk+5g|2E@Gu8LAVl_<+=kBZhDJy7irjoFp_>xbDNviJ~ZYWKm@tf3*uEo zJwBg>LM%&z!862WE*aXzDb^xkFB6?TH zaQ7)<{!^0`w^-z1<*&!LP`*ERpUfMD&mn6FHZOf7z-U)a5F^P>r=v5{EWggLz+@d%1key8#%@TGovQK?cY1HA?iGTs9OHY8*Cw2HnOGz#ts+ zaKyEt+6}_%Zp!J3SY~0+UMtJQ>!mCzn`d>+NPz_JwpH)^z!IU-nEt5O?)55uyMuFr z)qK^_!eS%yYTQ5ahlJvaBuft@_{F1Yyi}XZrU2r#=%NiF0UUCmsy{H7Mw(IRIQeXvjk0VqeveZKUMFN_xB= z6I8hB2!y6@I>ku05p)*in$8O{$?ZkL^wM^t5l9Ijca(>sCck7+4L6Hw40Q|%=?OM$ zlJ!!C9^NMI<|i}Qmun}QuEB67}(teT`rc^hJFMYP)=H> z_0JD&o|#GLx$@F9Da|ox&?^Zp-*6RgusSQErL9Xeey-R$<)^s;-urfMk7EO}nj(TJ zgWvF3NkXF6F=nTpd&VUKs#o@Y&o65jX*_anvbp=+t+59PQnt(3&6i(8XTGelX0H^e zeJ8WkV_r1fF2#?kBoS+Mb2c_>DWL6>IHQ?U*+LV#k z?O~uD|7|wlr-LfL2{@+Cq!(&Wv)f}hlJy)cw6-b~X11|s2riBQ1Z+4fFjqvLes5$Z zesZW<1o7z%{XZY|4~#EWEj*YqRKuaOMxynb{!%4K3;?=re_TnF4ABn?pzD^pt?STK zu5-_|!w4j(O!4P+czqC$I{j*5iACzz6|O?=>x$@lij?UBFB;9URfu&Yq91(e_;Zx} zYCB8OBAKfVM*xCrnJnPXRopz!ik>$s_czr33uhcCmUuR`F^~$$_)3W!eO;e>Nl-gj z3F2x#A)!XMohg8t*`K{0>KOP&^v8my8fAPe#UFBdI}x`M>oIh{)p?NGKVtUKAc2DX znK0iuTh4ynFE)4Vw%Ptq_oVwDAw6}qEjUDAchcNE8omDI5)Dunm^DKwOPdX*j7!!g zbTNN$4}Y*Td?)`l zf5*{2B-wq=<4T=de8_T?KFw{Jt&~WeONOpzJz?SRZ;<&dB6i5ojb!ou>FC{;ZH}_y z_JB?s^RNg!(#vQ6;mloY7RZvs^-)W0T&umy{AR@%V4@_!mbxF9eLF*=;A8 zJYt5cRtJi!aPWeF*{b0eUaag)JH*ET87Y3sD_Xb=@PZZG{~Q(Dq3dImqe+YNM&#gR zlH*aX#Lj+r(Q{XMPWC77l+{D8%Zue{lqQcbw~u5jnM^syPWJHc`$S&bk{)%sng{-` zWvajk7Hk0t#oJ83x$AR3cwvCZ1r`4kHiN0>K3`TkqktdQ+4AgtZ!Z=G4 zs#!j<{8g-qu7l6sz3IZ(7wI(cDvy@(rqA+HZi}%u@Ws^G%bSYD0MEc}pXd^mdEOXc zJN?eL-l`0fgRap703b*E9m=>5I4pvj@7!rMVCi}6`6j)r{XNRF5>X>CuT1xQ=DAp1 zu`B%=XeLr9LvZ1#2GuN7psQnn;~HQc$LD8G@LzU_8a(Z`f^-bQ=9##*`_bPeiGd|4 za{2>&Kei>HDir)K`{1l9obE5xDCC0BwoTx#WfTv#!E` z9wVS&@sXp4K)DUT?axEby5A_Wr;-h6#cAt66P?TS3P_$6W zi*CXO^4SYnAjog(ZMjMhEy=^c;{wLIn36`%j=t zWNI$?+h!2$n-)&&j{QI*-tVm6U|m@W&71-**S;F$TcLrO0JweeIjrIkgpL5H6o#1v zUcMUX1CXmU4hcWpelS~#H`Vzduw^6D|LS0^AD}alN8yU`&@f5#=jiEcYL|yaE|?`7 zI#kF(9}`n@kn>N0Nnqv+C?+(GSST<)r(y=u)Otm+KRH)ZLq?E%P(_EOv{eJC^&4MKwE1vqr=^eK$7=Z1;I zC8K9jFSY)J#V)Ppz!5Zb=Jds*hvFI@ELKASl|%c)6eLKQ$M`M*B|TV#h3Ze#1!QpW z^z#bh1*39!C3GkfU(W%^G?BJ=ccgb_NqM5=&FQR~QXyA*qxkJ;*TEOfL*!dokJue4 zd4i+W-Y0udBE-P@7cBqNb}%1RPyg4wW6)ZKemVt-$QLGyd!e22vf<`Ds`l&!PY%Lr z0J_^nCiHX>q$y-%$8`V#EJ-*w3}bgyw5ZGsh$r5V5{H^Yf6;QzCNe{V43May@%T(2 zx$23p$;uczIIt0)GVu9dJ@dT38A_;c7>A6a{+^kMj4H4#YQmMM;3mGq=X^IzfkDzZ z$`y|+x!ZAD_=j>P-cBkyT>p4(AaMSu<_jP>iU)VF!Nr%!CWHf+Gb)sa0h#v%)t8Ls z;InOw=JP0n%b@r>a2!$bcW}g2qfM)egM)*A2tD$t&402<8o9kxLy&1)=Y7l=OaP=M zMv3aNk@6`!HW~*H0gRRw-XsF3S+c}HCz(zZM=fHOmJER&1_G(*cJLDqPD)k&gwZgY zwAK9#o?<3qCqP7y=WHQ%W9;9CHs~gV%tJn`M<)ZU?*me&LUeK>Z8|T-qEc!1oIn2h zYw!5`g6m)7nV+C+*K}+!0d(bL$(yMIRGZh(0};wRANK(OcW4BlJ=g_!{TfEmc?%ug z3qFgI&9wsvSgg4#dBEMpjzL4F4mTz4t?mRiW^=zG0=rK5r78ou3TUAgtS=k`Uopag zhfb0hUJ_=5<@JC;M8W%H_lyWPMP zzN@kh7b4&@F@j-F(=N#Q5df1MtB6%-G^aH3)jUvHVJ(vh?acIAC#ELqhQJsADg)YZ zxnVr}&){bC_qdq^Z9$=g9m~edFIbYETgS;VUk;(~u&bG&bU&sQ#LF~c0HZ|{vnP)- zsbdyws=5CJ!27FKG=Ev&N)orM?{|j?w3_!Ks5!t*Kk)`iZg2ubh0&p*Awc9f`&_(V z0mkey8JwY(INY)S;(wG&&Y_AR)kUeR`M{|-DBgA*icJ$RU_|m;$z_*s#9+oOe9D>@ z4JI5|qblLC?EmPCmhpx03g!!$8hW-@h_&etKerlFW%9NL{u^}K`EMwC)d64@B(Aq( ziC)f4f9mo>-UnpC<)ctisWTZ9TD1fh%%7f181dbcc(4m831cC<%!8X+m7X&Uid0t!bZ*z=uZ4&M*%b7BZD zDc3Sa&lPs$%kA%pQFtl}U)bfSG z;fjM2-+*j$mN|~@PtaNSPPd!-DUvg-%V)1?`mFl=wNKg|g5@^22n|d!Kw0^)1h78< zQUfJz?zyY}+6P?e?;g$l{VBQMmzuA^Snt>SGLovYadPCL=nqJvEg%zuKXAJxSaoYl z7i14dG}=2=a$sh^MvFMLLc0)_$Yj=Sk5Y(u1!VeT&O7vQ2R3JEbs)R1ObjyqjZl3R zbDXwxxqdutV8I)F3UwMq2EQWX5rJA-ItofLauJ{%Ln;PA>ALnNqBz_fr9zyIIBEw9 z70@xtCE6X}#jl31&qSlS7^tH<>PP}@A3&l_A<*IuVDW|cU7LQPyV}dlfBx)&h`-DA zl`kTGXH6x-u_WJqGwfvZUDiiei6b}+_e$1ed=v7m3Ca@X1lMOYEdC9MHqV6yQ=Z@J zHfCB&0dr~>_<`pfGGdgnkrAwhZkh}x0NHH^Bf4n<64>*->tD@>#?nywnT@9=aZyWT z?V~_uFKXvcATk=ef0}9vIDZhXq}T*%2koM3EMX(*<6%VUnflWf+yVO@Bw$>v_l>@F z;asElDkA^yHDlQVy!P3YPe(NFjNyRSE{OZ-=^Z%=aaxvAYtsOrkv8yR5=hxJR#X7& z@9>^`!k3^`5WhBx-M!vz{#8T%Yx7}rC4nZ{jS$bUh5{rB9nel!;VH|s5Z*}Z)z>&IX5MZ1=+A&`v1ZwQ?_&FZkSuIVmwtWs0F-e=L7WbzlEwoN zUqHt4;+SzZtnZWu4zMSH9p49>(N#J@4nH6#(DZZ8rjh>cu<*%(jziNi;3h}xPa!jP zzMmg9f-kQ>D)QJR0uDy1Th#C}E>=XLElVW2sbxgO>6P`S=v%L!vli_^?Bf*V;B`K{ zbRQ@y8TvOma3IX#WK9McLTVH~k(1P6He0rfqg-?Gcw ze5Rlub?^8C7!W?=pAZBrdtgMRhlg|6lLxoAVF-|)_)N3}NLsUUOpjK;Vx+6!RroHq zAs|iy^tz(v-ROGq-FyU8Evr>;&zsFoC^OLWYYea7 z^~DloEYe6iBbJ!Lq~|^rlrr|A3raBG1AYTaiJ}QVZD_t|fI5a(^IIQk7L9QdjkopU zQiuGxLk3K~hwOc=O7f;;^Fr$q-GR7HJj`qi?3=FYY)uoV;)V7)C3U}M9_$y!+Hu@k z&#zxgUmsuG2D+1fJUqwF$#c=m>_fs_gR6rSFsD8fGDc#QugMLeh?6r-g4)pl?J<;& z`^h&gg{++;35W_Tms0p?dmnDa4ToG$TWl5%ul1)8H#mV~HAElDUoy;StjM?O@(pAK zO!;Ikz-bH4f-5rv@T5`+1u%eKiA9>$tqdgqat?T3o_32~wGn=5;c;26+jt}R^I9wB zW*6YfBhewiTc*z90fD^FU^SY*PIjq4r?{WJ!y12$m#9y`gPPV=Y1>XMDMF!)LIyrhio@05N~+J5@<_x2!t;Ht*{u-=Qn_jqn5CD8ASpags+D1v1?m) zOe2IDG{3H#z98LACOvIcr9d!L*1y+`0@C4bVrh^EqwO{0kQ(WhEe}0veiW5BJZ8{jY$p#A1uu ztGx1e;uSWuTIMJr#XD;Rg!L`w_+cK3PEUSO$_m{F19T} zMe8EwoW?0Khu-SH(Ywse$Vc*PTWQ(zI@w#Yn2=ek5Tv+Hl9a-pc0M~WLnI3wj8=l< zu>7jPx&5RfJmc{Vg_q9J`vMrw*r-N50$7Q@&kY`D(hoQZ+>tT!2}qH{({zKQ>>7cehH5o+*)#LLx1bXr;Rld>F_EujF5qUMZDO{aDDnSg zQ(y+&Vb(?hp!jtoX_8jaN0Dhb&;m8`_VOkhIT_!!3O*;(92X)gT0cj}maH72=B~~t z!n&C*`AE!AuKK6VXUybqw&P4Sa5fknO7qMwVUPX zPd~Z!Pr|45eRlV*ul9)FAyf#Eqf3B$9pHR-z%lto-Uu|~mp^~+H%gl|gxWX8M`wna z#|5BdVP5Us`0Ve})PV#G!`pE~@k+r4!O6DZ#9$9_Kl6mW1LGF@`&K+v_nt@vVmeRE ztOmwqQ)y;6w0v!pvA6@NNZj&y*zhJOMc;c1sx%m#0*$E&Z*_4Rz-!pT*|LzL zgF_C;>A(xwe%(LF|D$QL-Es2%7?YDiBKjHN3_`#5N8zK>SaFb{E}w`{%zDoYZztxl zA9ebgo9jRyv2(Aq_t#YxOq8QhUBT(SB0@IEY zi@H`!IfyHL{Z47mL=*h4J?M3@(LW1?WE1%LD~J6PJ(~0Q1CvjKA?^Lc76$?0yyrj5 zRbjoqYYq4Y=p=NP29!S}8T(O`9wA6xYb2)5CjU{x*=Mv^2E5Y$B8QU-6~ehQBk@-V z2OOZTuWRyuIPGdar$3B{3~NC?7%UTh4GSH+j9yN$@*TN=BodT{GN69b;Cw8_)0gVg zVaDEN^YzI!krfy9@veh^njBg&39%v2cY&S`g#bH~KmQh-Q#&F3Pk)H)v)Iu3fC>7r z=(SLAGPjW`UE-%jyTohX_|(~LXt?MDt~XCJDt)iDB&er}rwuiY=hL8y!!Qj4_7!%s z>eZ3|U#kw^Ib9A0TRs<;hYr4!wb;6@a!lx?%MYTtlCDdgU4TwA$h=tC@YJgKad|jg zviu=i^n5h7l=7o9Y^r!NQTxDp7vNCi8Ycc#*8xUL8k@rQ=ZQSJ&mM1ps!V^ACc1nm z9!N&|9+0jqw?B89=r*%7ryFGR+mC`u;526a4+)%2(xp>G4dyrw(^&8wBDdBP_UFGs zPsYEkN8ao7$1-pVDfivN9q1bTuSKBZALl>>$NzMy=2NFU@%|c{{~1s}h^~~;+yXKD zE0n;;lttizaRD`;3Y1dfznV%Q_>JlAPx)h2q{~2IvC;b&2zwKrHfeh*t}WELK&#MG z8{i(@C;apSLBE?>=w!hKq~Xy{kFPeaFEe`(A(a8el;g0oPL?qFj&KnhF*aV{ueqj0_r`8ob9$KrlhGB)R1|@8$;J={_{$|4gH3%4^HZn zUt3E4fGYX5D(V#I{9GRc$Liw+qC5g15cg!*^VjioJcqf8A#N$fREuw9RLrZ5 zL@s}{12-3w!u-(pXGP}PTao`1l8Lx^^!mqtJsLcHz_tSFkH3u&Kko%{9kGYM_WUz@ z!D$1*COCm3|ER~<1w489Yt7(_#^Tu}P$9sSRLTI~jTQ)8yV1zIC;rD8{`hdPnZ(}P zcq<`d%$-?)pRSF3r#LkG?bW`-cW}_r1>Cj))X;phoBVn<^OqKK2NI(1B?o1QSW6Ik z(%?Q2-yvEBsu!M;iUVPVzWI`lU*1SbIha8a=v3AAIqJ z03yFh>A(NO<=4sW_dsiD((9m-8&{{NN>+UrDnvEY1yz2Sb-Z_J1$Fs6h(19ie=dOP zpohMbziQb%u=K*-9>{1?xk z+KbmSc`Kk6W?sz!1Z4i)m(}>dC7PQT!s`6Y-@MOFH;OejO?V;<9T3sH*Bxf{WW{&;hydK@?Spf=5t=4 zK==RuZ7u#UKe&-Qx%n2le#o#8?)m!A8{z&3I)R|-Y5)L)0MnjbIh-ll+|b|yg1=em z#fMY!x9`3R34&V5zZMFV50rk(m_Y6Ko2eJzRJDb}vG){IH#GE9ifls*u+fxzR6F@M z%jPxKH*gN5m~Ls<2X6?_In(ez{CFc|w*-|_xj83tZ)Um%QlbBCKfLYz=O&+&wfTbq z$S}KV{7L-rt;yc(SG8S87l+VwWHT7rgU$`n!GPj4Rzr)v2Z)yOE1ALnTWjl#q%O{C9dO2P~yxc^l zJWSfl&^94RaO7yKIb1;3qU;%|DBFD%&3Ay&LGV3X1!*sr2IOzlY*UgP&Wk+158_!R zIKkSfTFrK(XUhn5XD&c0Pb-R>O%G=zgn#nE++DP1xjNKRP)_!D|}~E z)IS4|Do(Fy%IcQBBQnwJ&VSq610_ris+I_nOwFN)_oDJpR7%Y3&oR_tlNR7l_@IUg zJv&{m^U{3WYg<9?olDDquQ8S}l(OD=E%MVx>N{=j9zi!A+4c?87nj^u!X1nV%4F<9 zAp((;Jn^i^6!WK=nr@<)L4REBV!!#%h6vo#Jm;@<`S|xJb^bnAL7%?&_N4vio=^Ia z`vH|?cHXNR(}m7mpz!+x*fH=f?-mfS4eYW{ew;=leBJvM{~!n7{_e2>Y|6Ep3#jmw z)?;g`AF9U3%aPr?_oa(y{{D3Zm*HXj{Gz|yXP@a9Jmd2EGgRVnVPvD-wT$qWCZ>pP zyHXQ{i__i@;0QXZI}iYhNrkzo|EEOX48dmsJY>#@AG#~r@AH11n(Wn(6;^gu*s^L{N5o^ zv7bbhF;Js2;ZcGfP>tBP5$2aNN@i<`L{!jC<8J3cNQ%l;Fv^RZE+A{rJgIJIMLiNT zQ6_9EXCaSu&)FWvffl6_2_JC+@!Vp|&3yO-kG3^sBOWzz6)gU9hi06 z3s%g!*8ETY2H8#KyKT{ur5UJ`N?2kb^^DDM^TVcH1qa2=OYM)#=pL7H85(_ptN11~ z6O(-m&*D96)919bCDe{}zXsCctdlv~5wczQ58tFFGijg=DO$FdrY#RD zv_}7zxo$UMZ8WZ9eD?02X5jsSOW3ikU+Ag4Za+=MHbVtybtO~%)_aZN)SpH_TgC-@5V~rR17*W z46~kAH4QjC5a4~ASkUz#TawePS0zjO>E=|3X50Bw0>gKv248KSTyCvBY1qm#xP6XaeeUGn7|WNx)E7eIkm*dd_V*`uawHhTCHs_rcph)iF4omvh>V`3(<|+ooG_6p<|A zrOx$(2@E`G&C;x^HbgFQM7L40m|^tYpem|NZLEFb$y(h8W38>Kdjzf1wL1IRyczWO z@0Jzr!;tCuFV*MMh1ASy&IO@4|W0Yoo>i^7BF;jlsoz zDZ^Bk3}`qOvJ>`gO&Chx%Nh9)SIvrb#HPg>OT~9xOlBS0>pCnGO^BK1^%Cnf{iIh? zAh(&u>9PoMTpaa#E=wg<`(ZMpuxOL{@|FK8&pSqXQ1D*PAk`1s#pp%!u8CEU?5bSI zI*cG6vHMos%k6wYAMT=}-XpUx6E~bU{a^qbN2Z1PZp^!~Jh*~Ukk@3-;WnZX$D-Ac z$rI|bkPM?E9vh1fwAIJ0kA)J1nr2sA*<$>+&6nc|bP&9xw!JD^9kE|;D9#Zkd z9=}C4%ALo`S4_#?$sV}_Md*zlZVvz+JAN9JW~#*GP78bD{SoEU*myI7R*X65=9@Jn9+nYvR~#3@(#6qoUQrOt>1d^kT$ooVFTfhkWsj2u&>3a2|df>g!3J!UZazpNxamDL%`52&}7rUt~$% z;|+rtKCTsQ!oIb_V-NdZwn2|S&5Cqd#1|o5PY7w7xHfC#1w^KT!0t7ba9!t5IC589 z0SfVS=VR(6hUrfe4%!ya1*e9HFkFmYAX@40MxWpySk2 z>K1L%DCih9Brt|-KRZBO z`_2h@se+lz9%3vS{zR|el_5vF_$X1)Z!9F~l%ri`GrtgtDk;^|wY@n`Ce=-qZHGiq z$uUJ}SbY;5dhfOKm&7-yi5WINY}su>@x>)`=tnn}Paeu3(e zovw&MJCX8o@e%&HpmKt@|0F9tOD}eiG}rzR2Uf@Ox654{?CC{Tfe(yvq`IS)a`!$5 zhe{AVa&YSR5%+JOna#*cH4u2d>&q4XLXjqQg6#7w(_2A-vm{sD>O4X&ce8HuopP$%YzG>uNDQ6vky#noZJ?wda=epZ>q9MInJMj1_&`-b@$+qU-mgJ)AYVsH2*JCJ4RzS|U|N#jvg z#=agp4{MLBNW-TAV+-T~*^#eA>jDP}uY+=znF&8f++yI>3kmU1=Ip0sWv|HxP|+A! zyi7Gu&>m5-z7Z@?zg5M&jWa8%B-`(%Syh^$SG@~Gd*hiOC_|9Knn6fv_a|q%+1w+> z=EG0nhv%$E#{^O05)QI`9%-Q$s;03JP7h|4J>RFV(AHe7*|UvRXRu}7*H(>_j{a1_ ztD@e=_mPrk_E>z#XJRUYMoA`aG!mqqWXdJ6)kSNj`ifhok+AdmK~teQ%vzUn_WX<& z--PVXQRPtYk8Zgxgqts>`T{bBMm|Ns`%=?0zzz~c*6YS6ci}(1$52u5PcqizEu?Ty zSZ7SP5)MaJy~$bM|#VeiJ6a{ zRq(sNh)UW8OG2@6Y-Anm1@U=6Q8TOx(S04(6>Hr({(jG{vt^3f#+tI%B?|G2&{QT2 z7LI3zg+yLr;>=@k+ye`)yK-zwrgv`!anUqym z{E`c%Rbxxd8Jemayy)?^0+k5!A)#$Z@}h{_Vt|wWl5j-9_^0u4QXE9HO$;OF3Jw3D zsG|Ng{(;bax<|B6AHRQwl-7blo$J0P${^SDfhAA(e&UmQ^29Wmj7cpuN||kvsz{Hd z5gk14BN?s}n@9Ny%)wH>cnBt@f8JwVc>k2IoR11(owRc_N}{>QD%_Xw1}|fBvEs=x z>bWAH3p7O_s<_FDp&L=DnBX1eaBchoQa@)S+(5fA9Ks_I-mBc|g&`%YJr+_;Sl1&! zgjF|+$@MO}>|0fgzDlJFXSwf0XMZWK;J87j2qQ1oYYRj&d=CSM;C#W?;*<>O3zP9O zHr>n7&)`2d@SD}6n1#>s6Xf`mK5HewT6J$#PsX1L)f<-mAkNt|-Geb7aQ}(?#MuEX zLYeG*S}O8r7R3B966(gim}J{3{V!{NHoBSf?{1^HRhGYzY_GOiPsNBW_SC;NWz4v z%Sn^CE-K23OQ3ry%|%&b6h4((QuB8Mu@L7WpDkf8&PSstHm2Y3kE{{uQ4ujZLK_&9 zFu%~8u^qkLbf@q^2+fq8CJ!)iMDV3cuN(iT!lN(zhE{unlI(%FOVn}?<{#(hxWKRa9cBXX$RnZXQ`%U5Fsu-o>K_u#5bWQp zTIh`Ukf_hdJo-b6tx1g^0NpIBMh_?(pQ+)3(gnuaJvL0V=7d?ezbW{nk5ZeLOY4kn4_@^^Zc2GgDW~O+pWhRr0>yfaT_rgIU#>y#-nn?4 zp;s#{z00VK0^;ThzY9oDeix9S`%ph59r}b`vLSg)JZP~?R;hY7kB{n5%3Q*wKZ}(^ z`>ca#rNtQxm+Vzq6xm+kuO$K!hR}sadj$0@j3;OvuUa0c7*t13)Wgv4HDu8$2HHzt zew0V68L3AiewmGsK&@$@?%kB97b6>&t>;G7>iOY1Ni(;mWoJhz95I8Vzlv$>l3F98 zNsY{UNOZAudx^K{>uEuLp$YT+2XiMI?IuOyu>{Dn^%08U46@(_y zx6VYr!(+;4h}79(0R#(Scfbm_6-)i+3gfNpFgH1cwpm4Sgz+tTKYt9Jc-faC`kg1y z#Xdlt{e-kV{ahDe?Nsu~&O}2)S&ZhWmDhQ}gYF&$r2|+ds`+mW3(6Y-;q_9xAeTat~U{qdh zhl)$C^1y1i3KaS7V|QAH2M;57N4&z5_0`&X2V=!~Wcj(J%3jk1^DB$n6p;xb!KyGqLos`E5$TacQQYy?hP*<;+&tOuXxil!+1lrwIBrC11=q`&A6}DPR z!U}aEAyJ@Kv(%@1YRO=WQ`W0rZ|_Nt5t=6cYgy%G!N- z;J^HerSRd2m&aFUg$78U08^6tLPwM+9!YTh;#P?k$WYw#KxeMiYXDTDzk`QqEK{aI zNb5k}AGMRqUl&C@r2qA1@#&fGum3b>=(>X#ab9kg_mR8n^RyRPXq@M6*+Rad7w%D3 zfBx_+JPiV8;eF1<(Umd0ZG88AXC%A_im#%u(EENnZJ<2qWtca`uN<2)^!Bx5Rqt%k z(3>&taTO zH{~csM)nz39bhSqo@w*Ht4Q~M8)rj^g!-+Hvw9~N_DwAR5@4B zL2<`#PdgIwepo2_GkzvyJAtv1kWq%|E%AyND@mk82{Gy%OJ~o|G7qO1>@=QEFPnvj zuW0}Vq_cftk-77aN2!vr%XME+{-c1CYFGV6p6=Cq(wg32-YPpQ#H;3nn zkHH=4X`&pqC_=L}k@;n1+t~}##pp630R=yCtZl1lw(B!quKTYqAN3OWGPVYvSr|mH zRvmrThT1#H*eePRfqSZ%av$(B=HGVUBBQ4o`ctCQa*a#!-ez=I zVzc|_MS2zL3TU^(;*dpvDv;VYnG| zl|wHE#=mcr9&xYUR@zRa{<$}We4nQ8RLkrgO9~6Som6{&P0~RnjSUrCar`0jK2O-8 z_7%$W*!yWVd|5A|{;9|Tr@kY|ku=^~o<4n28nviL-MqK+tRovkzEkRU*=ByWLND>+!J&-^@@UzIfo~TvO0pwXIeG4DlW3XSl4#VhwGmyPsF~fQeCyTcMiW@|xMVB^6XNkde^fTM1_63S zZP#pwZ{8VDr%Ad%Fn(+pSIDL#v#p&gGLZ&Zrd3GA|R2!A2UVOe`#J|Ic4OIbWk)qgK_(_(?C z_F8gVm&x)AzMCR+JN8Mi_i;}5Itgas*POrg@ut6)9d@S%qgHDrI=6DE*~KkAWp7E8 z@BE8A>u|boXD1TyETOtvg^%qI#Z6Yg^s=s?Wa?lNZP#6#sfH(bdV;eg$8Tk2NHiP0 z$wfBtip^C0degeT>2^{NirJHk8>xphwG*9%>3#)z?b0WH-#Om(k!S|MEguv$wtYb@ z;N#&e?ik8!gu!{(FmU5YEPq;ItJQ;( zF!nhHHQ2Z=%_~4tp8IXNvDlsnsxn;m=G>VSYBcqVz4YaQJzU;B$_++}NH~=3_wk8~ zHls7-L|9@^OEu%;NXF2Yxg}7j=Yw$2iUNk5XE6T>WOerV*~S$g0uLC1mcb%rD-t{e z;e6pW^u0c&JCJPdf<9%#6RwTKN6FeONv@LzJ+61?74pQT$D58SYU0YQBFoHTH}P~< zG%e{~lYRGGHUCJ{JoPh^Cgk3X9k&gF^67$h=?pcda2DcK#x7tqM&wra1;!t)`|OBi zb>b8D+7WIRQ21qcN_WL6HqJz153NNJlSVLY3N8<1-1c32G5&Qa`EF1Olco$C&ISBj z#I`Qqbz!iW$mrl=Ky-^3?XwEw8+k!cQsQ`Jp&Uh^(hxc(lP6rGv9_Z9p!dZqNAk|G zmPt%-nF#i0<*sO>=95_Z5m9gVC&z0y@D;>`C>1uMz_KnH&q}Bs)TcK2us_7LT0*98 zT6y%*ig-=~78RI}vVp$@*XBYqcLO4qD7{P=k1?v;={{6!Q9-}Q|4${}Q0GU_KzeJ! z6PmfZcF%A_tAjZRHgq~Be#$NnWbUwPuDFt@6S5a#nGjkjqPp(F@_g{n(p6p)&~Y6X z&Q?h*^WW--&K6nXhZELFgh$^ z0#hFGAc|=;IT)qbr-_A^LYD@hwYcB-iapT4XX@ypQ_wi4*IU1n$VPkbMqyNZc z<>~C~6(UK+Ay=oD8(o=%PquNT^J@ys%3LQI4g@>(N;hO>%k-M37n*go)d_OybPEpw z&G1r^PQV-n2xg5NDa-JiLoZUB24mEWdBKRjm`P`OOs*zumc3Ru7N#D(pN2J@GA^x& z#quTmJ`FqqPRnF_%SUh-7VN^@ z%OS-zMH$X-eL>N#MhBosfEycM~MS}12 zDQE;be~AV$EUQgy)I=sF@jJDvriS}{KB7pSp%NxI(z4A>Rw^JseR-miFzm_Eu@^%` zHya$`?$)Q4mh}ly;zDfXz5bA9{$f;JhqE3eP3||+0}0Gzyn##T3!B%h=uXUjcV*3| z+gRAObSBufrR6Kxd(SYEC>mVfaT6|CK8&i->G8La4@@gMD2TBo-yxi~rJja=vP)G- zoBr7?E#|;VgLFvnQEn3N`^MZIwCBJRXx1RVOJYMzezF*Xyf!R5h>s~r$aVLnK1F7& zY3E8|^5v&2Gkj`arU27^HH6Rsufe-2^ez)(%r7QReT~4)eti5qp3@?rPcK&|#@N)7 z3Nx5uc=tev^+xpZwv%le<>(za1bOx5HD$Wn*E90L$Tw@w7J5U;7nU6zb*#FwhIi8! zjw*#Fw<-_6eNey;PM&P|NMCo=TAhV%qGhCI=bGj6N?aqQ60e>QwXTWqNcGO$goPEG z1@001CGmdIv5auFO&(hQI~@^sOY2uTail{&h~4ocm*6F5Fr1WS8EDgPlSTtlhzoZt zEUXP<+C<QjM!^T&P^Y9cnC2gIzXTs}^sVXvci z+jO7VYcz_x;>+QGru`pRNy%QvQ^kkFUrt|h)uVEty(MaK`uL6^kN7;)#2nR2@pV&S z1p;G{5^7p<|FaN?;j(V>OVhMBn-sHze1to*jkxNgTDZ^v4|t8XPiS#9y704T5hpJw z4R_b(xaA>#6av4EU+AY^K+%aS!6S#sbwpICn87F-gu-GkwPX z;~iT5 z_@XD=e3KeJ%5-7#w?rn__FPAmFWErv(qihe$LoO&>5P3+T20bw43+lAL0 z{z7rb>F5(Cxg%;}Ghyd8xRLHTMvgww8FcZk$dS(UHZaK7p>81TB1H#N`K zdHA#;KI-UMOpalpn7s<;F8YIgT~b6%MljHlAV+-R19Z@5I$fM!>Y!TibZtJVtv8IWpXcu| z`exiLc3Zb_ugq(LscZ}{u;ZZYNPKo7BOB!uF`swdIV!=SpGXtN`nWtcOOFYMKGjnW zM)w=Nt~+f$Nw$JTo{ZPSX+lgYw_W3xY2OFozOB7x%8~NIeW@Qxy9RUYgFSaPQQpS7 z{M`qe3`mpc6!X^rV@ZIcnT!TI~2%P94EADEtp*jqJQeS=2&0~}#X zKgn9Rr>jE>Zd0v=m8GO}EN6)ogXn+W)m@uZxn1ia%i1$$TM`0s@lO0IhY*=XS`2O7 zy^r%vKfj)+D4(vunrCyBQu+<=Wh@XOsv1ViFl$2dxqC9(bDhC5Pf#-$_}3Ja=WJk3 z`-ssrtG=%&YU)|2P@Ab0P9n1r(t_%(vU2$N;vLXeK}b4&o}DV$;5_{R3-|%dVq%FX zRN?nGlH3aSuk*pU)4v7L2r3uE@h4AwuqBp5?&Y`@;F`k@4TR$2nRDn*?;hg39UmHR z;9=RL6RnddwQplTAr(~%Q(Xh+uK&limOz|rJf!8dHUbJQZv6{h$|gF_8t zR_u>AACO!gFgEWWrS-=b4K1Dg7o=0a$+KPglsmP}SH2Om^|S1^3w#6meaZK`Z`6pO<^_D<7Wn558I+x_B(M?j#s_uZz^Ln9!sDyRg(CC*edtEPJD{adMvzBawdzYmnaB(VdzaH@{nf^MDe?0xc2Cy)y ztZa)&{1dW5mFU;+BGY*+-qa?pm&mye(ORMiS3dmcaA6h)-+S`; zwjSX`W*7Y6;$ZHu%+7Y)%QuH;)9Ceh#yLc2L?t##Te_giFEk>fs;zhT>E`}7NEZ-BkX9|p0S$7mS zu0=&Re@@x#y`$-CZffeq(uK1{GiE(8N$Jy-u6IX78Pe=8#F}%}C7>JviVmgqvM0>B zk$PUR&Ic_qm;qKs*2##=2WZ;yakP?jRbgtjaDPg`>#%f$_jTMh9zmGK8;+cP9Dpjy zExLIl9x*s<pb z=Cf*f0`Qt2J@m4i`pk&%R_H2@K$)X;8+^jGqFKbjupL|5acTk#McpYvQe9-no8CgV!^oJvNS=3 z&{;r*fG7fr(u*L{T!Ab_V-S!o28bAGiBzR#=|w;hK6i%?~@Xta9koB1?Ve{X6W)C$CkM8+PAGk5H&;?p1Q6hO3P z?UH^YU4MzqPoD#WV{R$Cr{h>yO5#8PsO>Lb({L@866?X{D#{q^Z&+64pWxmLd^ksy zD+!Gk#a)b$DO~B~pKNjLJF`a?RK27GpIkAGLv)cKG3IR^%lF##zJC3&zDonQ$J&bn zx94j)Rb0xF`z9HE$Mh7LmmI%@vd_1?Fn7Bmg0%q>a_y|SJl%=;v3k`H%A_9!+op`R zB^aA0(Z@Yvn9Swsvc&Oe*}-pY+UPOH;$#!7MF-~|HCKFIEagb>aX42?&WIanb8(#Z zbg=RfaEQ2OZ|~=~e>PrL#MIL_H|_ZSug12DC~_n(zvTK6&V)W?QnV!HKT7BCoxZ4l zKvhI(gUZipJzAdVLD#NsiLBM^iv4YP2Qx%9`fhGmXxv_&Da9bKc6BL^^T5dP&WtCU z{_vUdT*?()x}}pe-8n{jgK266?!Hr-A7kg_rCKr0ra4>BBsWwi0x1h=u^3sDNIC&k ztg4;Lgf3j~EebS!7>aL`Ll@>gl=_tVI?){Bic;V=hoSP-%MwWEmOIVG8lvp@tfr_9 z4nkb=lyE1m)VCy)=MI9Yh%FTl6butiAJ+__xSAb-;5Y4NZp$-7Ty2s%^w~Ff-J%4C z${Q%i%V=4YI&B8IQV1aa^1}psA75lCaSVfv>Ji5^Ky zkswwPh|pag|?C%d{=Zkyz&QrJgqBj?DuSwDZx3S)tsgpVP< zphZ7+C5La+iR`hdshBL5;y9hvSRdJTN_Yf0I%x5vWaZI8D=QvtVwkqX-ZW}{1`jsj ziv6xRq72Y_#B6Wq1G()syrT0@lp%mt3!t3sEf3wJU78cHbeR6z8>iQzSc0vy=aaKO zXXYZDn{Y+>m6op;26JjPcV&+^8@9&TjFuQW_{lsoj_~z1FjQE`lTHV&bnIDY3BtgQ z%WFVFP_(GG5JYZMpQ6`@NE5GL@34_?;;12`w2#rNWU`QAtuS|lZxn&xL?~IoPGs>4 zM0wllx7uZK+{1mhyFIhftNuG;e_{-K@$VvZCZzZnh^|VH2YFgN6i@nLCq4=fyvt|? zuP$(^RPqKUEH5vYzu50Jc?Sl&4)obyqZRe)xm>1wL7`&ACB2MUwBAHg{r_dqkt|-p zHhEfIvZn_4{c9k?h9)s)q~!RyDa5U`F5`^A_f5e}&rohSX9)t+6|$z}#YcXg*3FJ= zhEYK5Ep>s7>tHczKUOu$%-CR=SxA9bXPrPaE(L@VbGI*Yf^oXBv!-hw|f0#O9k1)7F{y`G^+Dp;qxAlw1xl2r|-` z8-fN$TzygCEL7lhEk+%#R@vcl1rKLXcIW|~cw*W>WGZ9@)lae!y20j4EMOyJT4 zfLjgEWNyJBt{{Lcomp7>t75l`YS-}zfzFPVB;<|rShE0sz$tj8HyG>c5^APbs>Z1Y z-teP%uIQx zr3czx_g8*U^hHREd`;06jSssB5dmq^_d(xs)$ndstL0=D7ZoiNC?Fyz01feJ-jzAs z!b#oNdAIpoqHj-RYr7KvM;cF##P9$NwVR(E@8QhkDzwMwa$czOpO&JQb^UEhwP1%9 zt#dRi80S5xU=5Rby2jj7bJpE!)cAx{XXKI9Kf_ak_sN$y@lP5V6CLDgytGhrpDh9q z3dm!QT*r@GRkTO59ZozWs#@DWtHY_lQDB2~RMrZmFXBoU?*WVrq>gu|wwSy*z~uG) z!U0ZQT7E46r7xB2_S6~T<4`jNjK=)Smdc0i>hat42fC+I6G}y4tJR^f!2z^~ZVYQv z+TRv2Fbkn5<6{b4UhRTCdmtEyn6^?2JbpllCUZH@nm$RSdqY?9{}u$`aqU?fFUrDF zY5dmw!>I#U($a(2XDeVK4BRNbQU3#kWIzAjOnZsa!>hXi02pi%502t+!WMLjsvyQlXDR|LlPr7z_X=xoPR)2$>rzgk^v9U+ThGRhxRz3(cU7c;Z zEx?T17pAu%T!2N?To9IjV%xkIeC!sfJ+K9KYN>+vrSOievU5VPJx9ClJXdVKRUQGw?5nm7$X-K*K95X9gNl(kZ_FAj@$H?Dz$XJ@F@z}MSyUq(Z*zogtZpku3a5;f^v z1WTHOxQwV6tZGNe$1j-mnAT&8t8gRGYu_8b|GE!@-ZwH+=+rhw~E(axbeG~n3 z0D}U@gAU7A)*NC9^0wn=Knu$ j*9G+Yzv^RTdN_77w30soeSVI_$^?>$k(pr;(*D-pgd{AD literal 0 HcmV?d00001 diff --git a/docs/assets/images/determined.svg b/docs/assets/images/determined.svg new file mode 100644 index 0000000..8163db1 --- /dev/null +++ b/docs/assets/images/determined.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/assets/images/gpu-numbers.png b/docs/assets/images/gpu-numbers.png old mode 100644 new mode 100755 diff --git a/docs/assets/images/hf-logo.png b/docs/assets/images/hf-logo.png new file mode 100755 index 0000000000000000000000000000000000000000..7708a9f4d9413a7a80f48feb59e15ce6e673a24e GIT binary patch literal 164177 zcmYhi2UwF^vo`#mqZ@G3oAh3!Nk=5KO_M6UNE7K*5$OcTC!NrH4M->SDxGYki9o2K zLll(IBQ-!E`5(`BuIvAF8aIfaNeygPVxwPxZwPZuY-Uo7g9neiW<4;yM+r}w9E1KTj^gt6X1|v4y+E?#v zC4GQfeY0O+v8zkKjqrBLqG`ADlM6a3nm<3heD*)D&-bT1p!k1(KjjGH82HVRzUnhL@jv&U?#&QW3WW4 z?$7Tr*&8(rRo3T~xfHAM6P~2@Q+U#9n5PuR(CuloBdQ_ym-(2U;ZtohO^ary*=V{L zzqc)m{nnVP@p}9?KKwU*{c>NgN>fSZ>Cop=ed)J=4Dz~vq%QJMJ)TPS`@V5{zmHT| z!fQB$G++MY+boT|{&{g7U4z5gZIlFbM=tD{uE|HmNuT}SVtg>3ip#BBxbfbjlnj=_DvrKq`c|YpK|6C_+0dB-F8VDBK1G>upr`b*;5kZQo)S#EPz}Ch$YrT}2udJ1!7l@P5RnG3n{<~mu zie1LN(4&Ke8OFlW7acLCmJ*%%>y4YWrFClHVD|}DIk*jh4_{5I&~9Siy8aGMdl&ip zblCMKJje>NepwT_iW-kxpW~ZZMJwf2?4b%_OFLQp82Xw&W_K|8WA#{-bN%6ygO9%? zU-x$Po=#;gfw?vjXY)8T_$C&sJc#dnVt9fEeYo4ssR}OX{?Hxy{S&U#ZdX)(EDXba zI8b#SIb|k;dTyp>Zi*=nDCZT=?p~yrPG5Qe$pMvA9xI(v3_~4iI~ly5dUDkqewooH5xGAX z9g4gCm^Dtlf#|u|vN+$8#@w@a7YX2n_A3{9Txfi0w1l6wLHASC)uThAW8&fr7MDFA z9T+6UAE=MJ7@XFHS=i+V@|*^`b)J zAB$h%j%fo;qK#4{&grvGs7y`$L(>Erx;)k>OZR@kME?{Sv%`=4bWAo*z2=xTUUAB8 zbxlUm%eo))pZGcTrXLG1DoWMLXzAMj&DXKUTOQDzu3Nn7Nj(+%Ch$#XzIB83+OPLE zsY`3Hjy`bTW{#p&YSZTW=Jj{?^Tjm6gR}vnL-%c&d$C>5all~l6T*fU5GU860KA_5 z$Z5W7uh;>UC<99c^>uS2zgxAx?m%_p=hy8cMS5}N$iFh^nq_w8-#0aq zSJW1i)6E;Bjcs-L$+eQWU7SWgDT*-{>I&A`PircU{MV-AKJ0YycKc+%&zm#C(eCCu zQ?trdtHa>!iZ&7X1N9{G_}hZ2pA#Uv?H}1wXPXJUec+0&*FDd;R6}e^?o0a5z!09U z$Q{TclL|g#feGQUuNx%2CMD~U*=t@DT;lXBQ}1Gw60iG+XGunz5jR)UY*vU{Om0>lRD|6;<<@k9dQ`|rHN zZ%j?>Dpys}+Gt!q8y0bhOT$ItgU)4Co!eLY&;Kk&S6wh6vUp=~ejS0oK5dj3gmmId zHlJT7E+|S67h&rT_rG&?W(F~D2H$7GCqCbLQ(GtTVNiJ44oRdiCM67pl%w#-gu`)es{R~eju^JMVyRoz58*pkHwq5 zi(J<4AuMBoP=XZ#T$0eZXB8Q_44zT(!_^}$%ka3tJLhF8BZ4|M;zpu)>*h>*Y+^Fr zxg^0eFYl>BHA;b!F^8neHHN}~fv2o9G+vZ7=98b}NKdMy_gjJFm{~)!DX}$X_ zo;rm8tgyQ?QtFj^&prc(QODa+c>YW-Z4=8kXb* zON4Si!+L0GM9q8A@K@{ce&J_HK*H?<7mX*3qkCR~-;}Z$DwSJp5$ESFsqYTpr{wsB zEOh_PAhl{A_!d9-Gvmo%W;F4)0Du%DEgOC~ugql=0 z4(PdV`K(laJ8=?a>H!#kP6VEs8*z}6ulwXc==6;^%8`gT=>y+&aec)-KN=N*)_(L56s(?lupd=kIT8!9(u9eMi2SjloyAKS`Fyu=9>@-=uV$|!t5%i8 zpkgF>d}FsN4f%1gH-HEY`#tD@xVj!E?9atVUYG8UY7=KM%MmI! zx__9f1b{I4MR(Qd9Pn}jsd@&?h!w}a|d(u^4X7m zB8MjG3LeZGoA?QHiWJl?kMztY$fS@$xjREKg=J8(7a9?;>H~v1xKjYL|J-7gmrf0- zDO~2%SQsQrde!uNg72yiw_)3)bUGT58hQTxmL3syTtP(EoUKq3!)m2HA_T3~G{+ zdrocw^&-Ltc6#X~Wo1L3)R?mjih`R8YW*|DYOPSQ23j(VFQ*rKT*stI6In%A-9@T} zM6_t#qskWxYGF18iZXzdlR3Dp!6I$dKJ)-lo)dCAbNA&3sVYH#ciR0v+Wq90dt@~r zAR+^BP2X8XRa|rlu@muusE;^fN<&=5Vz*%ZXRw{0Ncz6enaqIGZ#x>@xZ;$oFZSRTBsC!Y2b@o2E?{Wg7HmIN33k&zhkscEt<)YN6x z*xjvsx`&|Ugr14)#ec{J3tY;@N2`96m2(0vYvXYzy8qj_WnB1CP@&eGL94tC zQ|~NyWxku2pp%ELD04K3iFI_1uO_tN@FWpWg$8@~oR&d)@p}$L1gFX^0df*Fn2EiH zqkveyOb66ruY8F+d^qBF?UV3*f5_YL8^jF`OpHxC!8I#=PFC!T-jLh1_oSR+nUppU zY-*z!wiD&uRH>MicV8ag<2XfOw5WGSRJrluoR zv$Y8_u>4B#SPI5%g90r{%@(gG)t}U;rhk21gLNt%cKZ=SLJwj^)56EG_JFksZ;c}E zh&vY(61&Q!D*!zs>N&NC=7Da$sD__;&kznE@3B|DcQa!IeJ;q^&t=Y@`DRM^Gq+b` zhNz>C%3ZtB?2(?&ysv1eUoy2h&3@#tFHTBi1iwtwx2K`VgQ9v0_DyLR^%K|nqXXx4 ziy!$3dOepPt|j-Yel3S`a-%hC)6*xl--}9|SgD8sAh|Ca27NQ6uJI)L$~=fX(a_Re z+SP#8ySXQixc^@S|j7?KlV@jSQ^h--@J+;${|q$sh{tHFj6mu!CB?WZI@=ZLV83 z??ANtYgbKQT1lE6i{kQ=vk4k^ZgOpAnhH; z3F7KT8#6RrsxokSLp-`(!rt}mT&Fd?R_#8VRkp__=7=S9*)xq)?U!Zt8dIa#T+_=3 zGu*;n9?U2B!%hpIXLYB~0;2GgWf@YuRhE^~c^FuY3gb?Em?#f?I}Lm4NoLBlZBTqz z9Q8SXn=DG#5|eoidepe;b!bP2$%LA^N2x{qP(Lh7+^5_&Fjr#GJg|t)U99?)PqxkW zEud^mh(O6GpxAE3KBEm2!yfkEDkC80ijjB3{oNhnuam1h)k9LkoxMXySZP|slv{fb zL(z0s!IgW@r7H7@vj4?ASNJWwqG%GmM~=8G-R!$mR1J}JX#1%-<~6a^lGo#&`!NI` zs7-3!nwhg0OA;-f@V8%et-jho!D5vG^&P213Q?f z{omz0e7g>9&#OA2TkRbmq$xI=Yk{E%Qt=j-OZi-B;j6 zjfmH1#h%k2#PghcK16X1g7}vN`qlX&H41Kn>_RhP1Yraw3HMUCuBEX%>3A(^(ti7TGFaTbQyk zoc=Rw`j9Vyq;0&)G($skTV`B;1%k1 zKQUSCF>v$B7Mypio-Y$1aa^kxW=_oT;PXd~F?)2>%Z?O$D%?vT?e(e`Nqo)}Lo!^Y zlh`Xe5#U+=dLhK3SoDR)&d2L4l*VbUMQ7F#bCU4x#=5n@LeHy)9lzjRGsEWaWqtXQ z+d;%a#V=yyuLIP~?Z4>jC|exvCbhI~deh$eqwv=Ps!?C%VbpCdH+Wce{$d~mUUaVhgV_HX;GidFpSV&`BB%^5f z`4oN0W>xe@T@1f# zHVulQ%0g}=mgE7zp{gs9TFSHgl%@_60U_L5oRl(T`o*@5uLP=+rP#6YyS*oz{?g4W zn<3jJ{2*Rm1rm~J6kZP0@dYbBgRz(X5cwum+*d*m z{A+3KFqLz}D!T8|Sz(~l&i>?UzXr}_+|D{(XbrlT5X365eY|9zK{DI}Bw2$+Z&t6` z1SsI>7Gc{m(qIOiTk|mTQRJri4-af<9wXTLI9W){$j&mdkx8qkp z0lh#v?r?)!&O=-z*5g%sdoD`ql64I$!6t6+KgE$$xa8#0oz|4A+12R>mH~W$0kue> zC8^tBg{vWuS5nyaYJ)_~Z`y(&t^clW%({ zQ^NGFtqvaEjk}v(a3_x9HX}~DAYKE+Z3mX|uE-C?;lC0!BovE-Q<^|QZjamRD_@9s zSCNy{SxUPC2synXzpM-URHl}oU*KY4kwfK4_EK`u*kIT|cKFrfo$tm}LCI>vAH3K# zsi;()uS&0{9`{b5|M8a!>>*-%Mi%iB%57`a@9! z(JBw@aCho=h8hn(Pko-TI4|5-@J5P~(WIg?pvtvZwmP-Ynbp4XG|0Oc?oZ3g1fqe0 z%$Q7H0IY;tu)2jpL215%@BI5e!_to@e|%);++zmSPl{$bQ_CVptDc)R|NV4Zy&uji zRDKXhXF)-0@&8`t<`xebO>(HTG%YCdq6*<|dJ(X01_m|Sj=gfK2`UZCw&u%_&J!!z z0*ydjevnY9=9uUXd8m}TtYX_zG@SH2jDKygExa*_@chyL!uJby~Cp0`CB0L26so^ zaB=jU{lc!Ro{K_$A%XHNYllEy`X0NAb6}BJ?eV_S98;etE@x#btQCr5^)HuYj#2Q6 ztE$`zqO`Hmq-6&q6C{WjeSVwj(8Hu&z zML=75D{F7&y$J5a8oM>uRelQFt|gnAzFx^pE-9n6Ic{7jZpc^yJR>Q-{~7TX7Z$ne z?$v(tjJ{}E@f9}wHsfYPm0F)_ImW1kj4PbMX8+iD+tc%#tmWjEkB@L`0mkCy7OHOW=O zlQm^$Jt$+n%_H4CzzvmXm=28E^q#0?sL|$u`sd#BAQ>)~7y^ZbqIhM=pdrz{48G!o zhiWFK2IjVmZIZ#;I^!9sqEip7TUD3tTag1>;L-g7tEtNsC&q=QoF2WzgpWf3Wfish zS(>0uGrGLEc=hPaFwrHcMrT%;+#Vg$6#c9QaiAQCql%%(UM#M!9&3|P%$M>X-alJB zAGcDutJgM)yhC(fWvCMUL(d@B+JH0hwXXI*xI8aiQ@A}Y?LUuoxGO~??(wS1hNZ;c zIX9+MOW=76kd{DMR-mgUQ(fl19SA6U|FQF^)rn_~E8hwNS_%y;g)mR|ms3(=@$pnm z_CNO0O`Oexl1``$+;>HE>R`3Co%NOFnb3VZHmSYe#{C6j%ZZ8mWg!*Mt!T}a%uRi# zHDTn?1YYD~>6=-#e)yRDl3r4|I0chPvLn(X{?%8*QJ>@%eUHlk-=+nI9T(rNfy0Zf zh{@uyjRjN-YopMrXV99iDEy)DsGU74=igDlvbg0l>)0olQU|8<%F0&qi#NuQJZ+2# zI(Xi~walLOWNGKuscJE4TI8-W*XHu?c<8imSY$maW{uY;Jk@j z`VP15#M2+QqgaGABJAz5;+PmOOGoCsB;W$O(y~%|*-NowvD~r?4>+8<&3zGjVnc2I zYtK8oKb)I{Uifin7&tF(yqp#`oTvjjzWOL@DY40NWPq19gI}3UW-kM)>Hob(*cdNU zWEXq5YnPKm{`VfM>BynMOXb;+mo&|A)h3cSeKlGJ6D{M1mxb9vl+fo^f#Q>fj231C zA|68fypLOFIlM0e3Mb2sjJm;AScFr_*23kYaqFHPKYyi}qCiee&!5QhP~dV&Q<2}I zzPi%XotGm(l2rXbU9j5UY+B)|vku87eI1Zuo*%b>7B`&_SCswgwXt{sBcc)UoXrS= zZvMGe+UMMU^B5??6*Y|rht!+Ic_x+6TU+8N4a1pc*oktlwb^s-H7P#)FGDxUwjPG6Mq5c2n5R*~uLpbeB6e47CHGXM=+ z*~#d%}|RV$WkWj=r3og3XR==gvLq? ze7jU`|7+l?R!k#z$h56cVXh>THA%mHDnX4zxvHMrd3K(Tf21mAsfxQ0Uak5rM5gTY zpJ*D$)CEbwpkZ`~cht3&+2RA?1GYL}57ug(?F}8+WP`s}agjK~BTACTrXacc7{RqY z7Z&ZQA-fi5aW6@uU&y!Tzw$1AYGxbu%D%xrleyNL3=__Xsrs#(t+Q0gO7dM%>Py9n`2!$Ey@AfF)kz zzMVsEkmra?#3tfe0AVBD`4y3-$D{-ZRzRJ_o}mb?e@ zbY%hQlei*g!|II zTS6L8VKdN@1eK=vN`nF zPewkkShB2p%hE9d*ZWITj7N=(Mn@gEz6ydB!ARm2VX%}S(FYD{FQR$&kO!|a1H;`7 zE|fwJv8%=RNBUHGeIV>wa^t(M7tD!9pL>@yNB2aaYVl*S`+DD)cW=C0GFn@jI`{?K zd>Cvq22uhkitd$*isThmYHS~m#6AV|hruu?%FQ>KUR~m@N3B#;oZnsQ)6o7w7^%-s zuB1m54zu@ldB_u2QN{JHj|xgMdnG++Qpg!EC#%X?sy!90oB=6{eQ}MGqQ{6MeaQR4 zJHV{pzgwXD!nd#pXLpQM%gX8v|6y8$V!XTa?W|}QF#kJ5a)D>Klpq)5h_e>zRe!6t zURlKylZq-8_`x7l&`Hblp^VgljlJ$;m=?c=z0hIXURyp(8={r?@6&gjj?y!$F+I|H zmb=i*H{3x%CEk)!T}PB8_BF3$N!zG)qhbS5J7DC=mLu5By2jOh;-7*lv#~c6y9&@= z%jKxrkLBBW+O2={=m*#XFN^IeeR%eI|Kxf~+qeJV5)yRvvw7=w8+}UHAoR*8DkA=S z6p4Id%|#r?yJPS04k3})2z0;F-I6cM6Kv%BWW|GrjB=7OxgkFeR=4%;zbu(pm<)MT zMpN~sOFA`S4!*Rt=8Cbc-)rFGwR6|IRvcDJ=KY_c45X$FSsq#}$H@W;SP6a(sGZen ztIc2cw0>+Q2zBL3DtSBI7d#CP4(xN?v;k4N6zpa2*jQ5f`Mp)64f0q^kGi2(-gZ!T4Dt+6%Bl<{vxkcR|2+)jLLz z?M>g?l%0!V$lis9@BM*o1Y$r1`P^NT9wsDJsiQl5U_-mwfpC-SRNWR!u8&|X=P=Fw4jr5mbMj|rv$;wOKk_r#K} zQiJoS4jV4~G7+JtQOQN2D}^XN)lzql|@GnQ0*F>fJRL&c*S9|XTY!}HbN7@%VugNYus#)i?KuT40aEmF| z;U8swA#eOUBuMctHZXnOvbX@#ZFTkfGD|ZSmyxR0%(d0fCP@ucCB=KImNQ6K!(kdG8)&!XNN4Y3F8luX!p?Nr0ST16B7DHa z=~+cv#7Jg)QN-RB;+ebzu|W|xcAR%#I79qc5{^#$^;;eWXoL|83W|L67IG#LzBF@x zq&Rry>Q4M_iTv%9*IWK>bj_a*^g?`nf}sp zi|Hp5?6nIUC-mA5zrPZWXYNi;GiUC-sGEF4H>=Z*{= z0O3$becP0NA&X8^a6g^Vz5eprz{v3OsQ7rPg;l(RpLY%Fu47+^-;S*(hs%;xi&CbD zs?|m5BzYDo<8t#3LPk$lh{;@N@b7lS`EQiNEhG$eh|V2u2DP`JPl^Qvsi;Nl5kvNe zRZqttqQQ_|G+Gh=?_pm1_&fIN|MMgHrG0_pyFJcF*3&+6^=}wtWRqzmbT*l0W`7j0 z)`{?oEYUhc(9c0_>|1Xjl6B4kCr5KSlS90YCO%?|*+dnT|YH#E$eIK4@oo)H;ANQ1z9D_U{y2k!Z z2~>|&Wzp}xa+}Ejv$PTRR*sBgl?eFdcNx{6R~W+Zv}ojU={k7RYEW1kakGI$N>r?@ zhJFmgP2;!lWAaL;AG^ZO!;fGNU0wIzz4LBH+FIorgq)oU;Ztui6zo`RIc&{idd4^0 zi!=6PYrRh?0v)*v^I;$ozR$2iN9^x(Hqx=7lGfE0F_-y7>2kp+Xr}2h$U}$LRA#ZN zSJvE4MC(wG#K|4QCxZYLxgga&SJ`OBX)A6VkrHyg2RaR+NR{5-~ z@N8F>qQJ~8rJHV{D(rK}N6?=7XO}}yjsP_>89>QwDrfORug}uD)Qy}0y|_9sM!Wgu z3BL5y6>_>~QA)&}LDBv*fSy??Fc{M(E<#gL@D>3#_zpiyeLpmi#YIffUdN&BcI~BH zt@FXzNN#CM>)Cl-(iDc%$~S7^yxY2$w0b1Yep{YUAasDW<&y^}-w7^l?=$gREN8ga3D9p{|OvWU0 z`HCtY>|DfMp3o!HjKmUMS=_8DRCKrX^fXBAThieLAj`$7ZU*ZKx)&=YuX5KMX5L-= zOoZH=%y1&YN`9f%yND7H=hgn*x$rCB&+X?EkAXx?HgLWPHdMVE9mn1fd_ja&`TLXm z%LmO{rCdrYY)^Vi^y_r^)W2v7@-()&68v6NQCmxC|w9UAzSXcd%X-)%`! zUAa=^=1*ESMlHtnK29X}Yg93v>o(S)Jz&`u)-p5FimV(ec@Zcw-- zLk5-4-l7jma;&`Z*%CX*_VwCM_!{ZE$hAY_+z(8|ed)xUK02F?2!+9nssZz*nV~NP zYp{1e@=CYx1*-3FKX%=K~fY9>ihEl~=+cXeKjVl|46_&$j#J`Dg(8{oC&5 z%908-*@Y?v%e)tS_W9(L&>ShbEc2xYZQUKm9h;hYj+<%IKRynVzT2ncxXk7jXY6cm z_ZNna#TvgbVTwDZ#}z9_8X0vxOL>{dY`k)kJ$ZDA#vRZLAyt8mLj)&@7;mW_y}-m=e+@}5mx!? zK;>dr^=eje@SJ+%Ik8|m<)(Htmud_!g-D7NZciO4wJC4Mb|MPLc+f}#`wpx?Be4pS zWYSV540t!v#K0E&SMS)@5E0SQ%Gu@?8hT1qK%|FxYwf7h2>Uq(I<{1eKlvPdVr_&> zZnJh;IRHsLVky&kRpUL}`SFLTS!=KP3@~l9srFu2=$)nfoZMJV#=)DP`j`+q`-IFD`O)?pEh2 z%qc5>T}Gb-DvkWA7j10BTJpyXC!%#x3A&^J@hn97Oh~r1IK3F#4xgp1UsF_S3B8Fv z!b)`sRg7do*H%|=R#*?_(=){krld*NEsT&p*sV@wh1Mw)^YFhOo&^FtO`{PUO*su@ z+n$u$o&_auo#sw5{M!QIaY1}e0i%`msIucBQKi9%)tlWbZ}i!BO!z4T+)G43FdqGG zyB5vOFqp%Cx3-_thG9x6$ElASx$aINHyOc*9oz9$>WI!6;dV+~&SHetv?ePpD>P0@&diLZ_4BUFeo01(A@a(v8}t3@ZI9o`RdTkg zhZRdn*&lnPHI|7}S_uw+5|4>_s;t~O0G_$}IU+p)dg}i-?^MnYy&V=k5lSd`$r5)- z?*3{uFohGg%=4;EVU@0tQlEKr7}P4><$@JJjvsS@@Ke6G6%wKQ-jN%V4#cvJ@Zpa3 z^GAT+yYEdDJ>!HdyDJk0gTq&cW(dv1`}{OW5XQN|Xr*>8R$OQ`)5|WA9M<8&UGjJt zWNbQPVIEDjAJ=Ov-I^&G7uY99zbaO!b==Cn(*N_{?pCAO1GfxAIaijcM1U0qMf$nL z>Ay{(@Pt31bh9p~vBd;q{XPJ7QLCZwi%NQwL0+M-IAcaH&4_I)~cQ|eOUlkHyuqT&^Smoazxd?$ZM*W94 zrCWbry!`7REEghd2iYQ4=k$%je~4R7M$U}2;(;j4MNR2x6RLC3A63PgAS(ndZ^{+~ zJwBB7$s7F;FuZlV&i<)Bru^_*f>=iGQfJs_UO)+zUO{mSV>kOGesNc--8c4)!D(-? zoNVi!Q*BKoKgehOVck>Q^mU8eVvP|vb5Dz z5=i{KhSI4l5!LdfQB+jqS~sWDwJY@Z5_M*Nyc$>T*tfPd;10%We(s#|EMTM;6|RHJ zGB+)bONIwHhMAIm>G(!>xWTF(*Cdy#TwnlpOD*C&`SF(=yk`z z7d&BUdVK3D!l6hTeaiLD5PgmUpJU$nY?RshAI&*VxLE3sbyUyKH}VMS)qd16SX?lj zF=w*L+ty9=&U*YHdP!3z0OIg&qm7-zue-&1-*Pc$sp_+MN_yC9iUQ-QJ$p|(FFT*P4z-{4_5pPw_DiHqIiwNJ9i(zmQIFm7hM2*J0_Z6rrnMA0xXY)t^w}KCm)3EpG0@}56t-Mj`Cohg$6z|wFp<1$p(;VY^$&d)2&{s`$#|pE z->mdSWDP$!q{lTFu^$F?y6DvVqhh-m7CXaU!|`4LB87z{i2+budtHq$6 z?Tr#@iS=c2zL7MPGsvK-Sm^Z9@rIL^seP@3S2uNRt3$(HH|hghvKw>jl4qsUpBpJW zm*fnY{SZn~96sp4oqz3* zy;wA24Wy^fJ;gL+{u18($4=DQtFD%Z<3)6$Zk)H4J^8k&t)|vk)Lw&2>)_A-+F5$( z?CWSyEK>&$Vw~fZlBUtK)m1jrP_|9~!$7pWYTa4I|AONWYy{zTP9dhAqK_X&8!B6e zUTDrB1HeuGtd5l(C!z;7G_0Ti1Mh0VDAyOepO{gth45_Ic{NWbSKfq*xXeygv-1u#Ts@l5ckoiWmflpA);5;j z7g3fse*uv7+*OaSzu7PS>Hv$l*I{ZgR zWwfql(dyF7SmIDQ=6Q{Q4ZF2&b@~q;_%-GOM;`>?;!=u%nJ)nD$xSX=bh+4g^LuXh zr#<@jp#1%&YUJ$#`V?S=D$tdi>}BfvMhPVXF!Pk3;kuuj>DfQ|@xkUsyt$PwVP-l$B*3wDONyo*>+3HqmuzE7o{|E09}s8ksPSLOosGcol#O6Pl>2N)AWS&3dM1@oRdlG=fZsF))Pvw#K7@)SL!yV7XtM3TANHj zN-hnHdJ|Jw{cUJ)I+}V#>Ajgen;8qRehcg$!_qsyDw7W*Es4;a&Qd+x**BH*PRO$Uh7T8$k zVZ{f`)@hUsdk-#6xjriH2_iDb5ClahIk;NDK3 zF4Hy&t7`94F|F`z>GJkkvFKwj`Te`6T*!me-9|! zoq1xk-a2E<^tgz~=^fR3qrsndn8@FVb;U^R@)mpWRqnAcrPdcgH>1w1bgGKzv8sYv zwVfNFufZTt;#!xig9Ebmx2?hHX}+DIqNDK*$>qj5Ry_Hk0%alqPo}bR(K42RZ&9|| zS2&yx*TBvN(jGk7;?mN@;G7SPI_UnVVSKmU1Ja#3USjrnmedrm$$Bt5jN~NF<-Nsh zqSr6C1P^y-F0V@m6}1tLYoT*%fIH2NZ*>RbY6cG{kt)~p5GU+~y+cGtxQB1?%UmtW z0yA7(9Eu^ihnYjRsS?;@>MXoHm>tJ0?$Rx5OX<9t4`!QewB6_(;*?n^&N7$j%rYNA zB9TePNz!rM#=sE{5pdG9K4;4jwg6zkMl(;DS?WxUk{m#9T{WdJOJ8|eGWYCKdOAyG zYQ}vb-!FbdqdQ-nq+B8$4koAv72_QS*FsJHh&dZS=!`|K58^KmHB`^W(~#&JO8m}` z0R^mQ>d)dJQ9Rhqs1f$8s7$m1IELj|9#@-CUSh;hHN;(cQba!KQcX2(nK}S(4=18#*_0aE< zj$;s*>|hCQeBz7mPys=R)_tqUXd-;vH!L`HOY*pX&*$i_ZdsAIOc>}QsQTt}g^fen zf^WCbAM&Y>_Y;$$6A5A(<51Qr(F!(JE1HM1zR?})bpg9IjP4fomja1J;K8elqgLO;Q`OMm)X1HV^YXUV z^LiUDV(Nf4+?F{)G#_1=VsITkcIT~vjWAG@6-2Ql4ad)6AV(sXP zx;P{BJznDCCn>sUSfVuOwd}K?NYud&x-%H}*7xpGi*%|Lx?b3k z1v=E?3Q0$dtF*B2yqsWWXs}W4V2t_^Apno}KNx!r7bTYcj z)mP(lGV5w>PE&rNai*GmzLTVe7Z6^x>Jq#iXSICorqoa&0UzT_YRZe*ay!+|-|CJQAzmHxm z?%x5wMgu1xi65{heUX!c)LfOpCet)hV)U(69pODvup_(NKgVQ0j*4&2fYN07 z060|fCYynh=itVI%|B623aHxgp9gH^1zJIl6waD%1-9xBc^E7-DNGrFp4_<;!^1#f z@x=&W6d0vrpV*e}if*(1nV539)mj3xT}JV4o7&au!;N(G>{yeBlxjp94Kq<{C$G{V zXN*Suio~tsVXKH1T;#?2haG*+)^p*f0A%+jaEjo)Gd<>_1sybbG z^0TO{)a?t&9#!jm{#jt=zx*1-E$pG>`W;zgRSa5jZuu-@{D&^{3;s3Ks!r{&TZ$y+gyF!s)`>SI<0HH}M%X2xEX!GRMR3-=vEs3-CvPG)|!L(Y3(M%Kf~ zW>&7dA!2ao1q^%b9C7462ozf1|0|e9wAxWm5Z+3`H^7!{S6@!ql9y}YGarXG$gNd zc4qUUU}pRKp;K4l%TGK!1|R9>a~}=4=&eu98rEFOE=(8@CuSj_1>KsgiS!QHx6*K3 zzr#-ypCHo@KfmwSv>EavuzW~do zz|P2k$CJmi_UXi2=4UG!I-Y3LIFejiIV#v7b6>ll*Uj+s_*;whY~xu8RXxQ62qpQ0 zze)tq1ds`3U2$}x4Tx@^qil9fSl+?^ef^k=ETpI^f~Vl=DKi+z9)(JGra;XoyPa7Y;VoD!)Dk@~zh`68>ViQ$& zHB;9#agnDHt{4ZlHsAqeU3Fn0vK%JrAZ_8lL%BVZ)YpE%*YTk+(hvBY$_Q0l_?AWP zp~bj=A_=OUmH9a@cJ_@90pNXoI5}|&!RrC*TI#IG4cY@UcHnslz0_=k-{1!D&C=PW z=ZgDUMXo<`kn$~SBqK~+sj#AAka-JYQJ&F13UuG)E%J|Ad$lik1&!=iTY-G`SJo#& zL@MR2)2s~_jl}+y&QNuJ6(K=m_LcB$bNS6e2S)JFHKSLVH>!>T8dm+Dxw0s;tws226Kk(`{0pGKJ6O6dtFn7QElCblGd~x@=ozr>gybdl7mpcT$!I3Kv zX2E_%zVQj^9o5{^V!lABV2)yIwBN)8Sld~lcI@V=3?r-e?;j1LgeAFsO`QsH>Jo>V zM*x-E%}N-~Q!Hc;GN7@^^V%PFZ{y#*xMFd6ZYM$qvV629Fs}s^(n%Rd8<90hE(|e! z`@w_tP_z}e{X7P-TY-^p>A>sY_n;?X`Tl}Kgo|c>t)=Bo{>zGB+l}Ywz64h@pXNb<^9U8su6=99VV_Xn z=9Oh|eW87TVkhK!=^^< z4xuqcfJUL88lL|7K4}HY2el~GUsHC1p$ z#>as2%9;V|k01k|7>h19jb_YP-+yQ&89hrcli5Eu1DT@BL&niiFPUL>25-2{f~}@U z8v@&R(z)?_(kJ-ztjHHlIQeS#zhzDZ_SA!MOOtnb3g>5p8}tMUzpquM!*(Jz@*Pbh zIpi9iYEvM`NF!i3Pm=@^*ArF`E}7v7)1@&BUuzKDqMj+naPq^DVyS+|Z`>JE| zj$7cFYw9#TDGQ(70dg1lC@x7eH;P!L6T90S~V8`bji!pZyuzNtgsU4{0uy4F`)|r(Gj5wpG3U2 zX8aa9v1j0^cmxhVk*jdois1LaHiV5!=jM(xod}1ZiZBWo0q}a60)CQt4Ldf!=A|`M zu^cA)>Tp%eE_~Xryg1e+-i36Z;vO|M&`XyQMI2>oG-z!WTV=BI(`b#n+M8RFita!_ zLeEp^?;d*8`)PC~mqyDpC0KOWmyxx4ywM@|S)R^v2Dlc_J!{K>Mox{WI*th-2h{{} zm4S07uz|m3vw>gS?7_%|f-g1>x}ew2LWDe*^Vbee=ivu*gpCX|V#^6X74!#(ut5+~ zHX3%Guuk!33XREo@`Ok21O3C5FiMgJ>*RI{VV6!mMMJZ2Nx{WhQ}=ZWh`=*-%@|jK z39boRT9iIO%LAks;x%(BQvEbO2Jq#X80L&v>n2rYJh$lzUVH(x%dwM|!t}`Jl@x>8 zZoSTQJ3l*{*a@b7Y^jx2*=O^wPQCWq08|=%9XL=616O>;3yK@RP^+ z*gIjuw=gG#vy2QZ8+Luo@5Nx$avFux!PqNbZ#CaX0N;3`BuSOgrJsHJS~Ap6ky?CO zH1=>&aItQfuP8Q|zvzB0sLW4sOqm)Lnwko*u(KE6bXlhY$vgJQ3`295fjL9{tG=%R zfQ?A5ozz4{OQ_mOSfx@qZKctwbaDN?@ok!AwUUesG2FcI!RTF_Fi_NGa|d6cnDeF+9InGDsD*<1C`!i;x4L9>9VJV2IRs)e`h3$R@N>5bY z>Jh09+FB<2#RYP-DxU^3D#Yi19nfOc-04ScjSe4G=GxByX{Fw!ebX^z73z;v#elKy z!i~nJbWt2}5)zR-jEut3GcFH16ziG1j-%IFs?v+>Mkt)30do6mBNvJ~pgPU>?3qNQ zBOj>jk$Ai6{QDId+{NXa{2O7ypZ&8Y#J7%m*3dT9ECUN(z(`tMtV}#5Br~kDgG8PbgCho6Ps)Z{b*`MQ%NBh6P2V z)F-r_waA)ip3-s@?Z5ZGZc2BuN?egbd4NP^+y63j`QTA+q(!&A?`2jjzzqY5cYXHc znOo$06sibt>}qVLg`VwL=dY|{=UNf_Y&cv`$RS&hm&f^QPN}+%kWGZ|yWp!BLN*a$ z?*w0h3mfu4du7Np*{Y~W=t&q**aPf%@=tS0ik8kf*&P)ybI0L4GL{&nGPty$bnVX` zTxr`}dmepXCVwsmhm}Tf+h1FdHl~Co1SmEkC!R|Tepn6swLaT=q)nbaQZL3^%CpPv z;FlS|Cp4$De;S$z5}Xw?Tlmbia5Pt6+`Q1TJKdoZ&x-qye~!hc5nO}_OIWw{i}3No zN*#QzC!u0{!~d`dp!qi`z+kO8>NyONsdML*6)?qU8EWB*h|t8ZiG^b>M?Us@o1bmu za~L~vahHca9)X5LhXIL{41JF{DbEgpq~hPRkE5#t_*7g2G!Bo4hJ?CsdSx~(SxvEu z4>=wk_+LtDmCcPBHYU3^Xm{X#?d%shc?6@$uloZD5mFm8z=}#+LwaAlIKRi6kBcB= z;P#!m-+}XBKjn6jS+>?{4-Z!NMEXr0g|e$l8l6o0*7~|Ao8hx)7RP zzM?d3(Ef*Ni;k@yD{Pmat7L7p?PcitJ+y!9B@V`HLGvv0?3S>Hnp)g*=88rCH0E`VYAA6CE`&2_GUq?(GAhPE4%?68Me0Ed1f90|kGMpd2%H=9VJ;dc%4MVdcRe*gW~0QukmTpRz>mMD)sAW{VaYN^(9#f= z%jou^Fp|(Y=Qy#{%WqT*E*W2sK5VB53t{r_`aObAkJJdxYy{uBrA~g*>_tNl+;{X< z1b(-*jt#rt;|0eJ7j<~PQ5*gxvO}T8F##}YiGlJu#m7Qto>HW3v`e|3Dnki_wpM)n zvB$c7&n(XN>YH_LS)r+qS1 z#)~@E=(MkY^%BN zoi!-g#v{W)DHKkj>YQ;IZ%Z=`#w#oSVUZ9TyJurQCgiwPU}MU{rqBvS2j52M-}ee@P#LUfT~?3TuoG@w@H5f`*t+!|!3$4upNt**j-JG}1&i}g;CNu@k&Y=K0i-=B4t`r}V zYYSPk$QNC7@zyj;B!NJUN_W|xKeTif)nlH^{Mc`3$9x}Kn?5USCFg&D1A93!uyx*c z5&Fx{V#l+;*WZL6>fr5z@LfHaoeJ{1e)NU-23(>?Cb@C|*(XLs`u!Wyen8oof~1{Y zVnWi2#wj|Sgk*3cONMK3{%!i(3>ijy*B__4T89Pfk#uu^6&tJm36;qJXlL9Kk_NBi zwA6g0J&>dI@%1Y%;a_Mj(?X|Y3?Qjw?R}xi_qJ23>9h7^62nS7S6AX$gAQ^dc)@>o z-Gh(7gnL~C*pQURKYhDQC%hqx9q1QV`cz9Mn-BmX%dl{)IJ~_)J%W;jc^G03O~~iOI;t zf||nZY)=vRCPm7l)8?FX9!nUQM=9W-xTVd7vXGs)l=2@i>O<}Y2fauda8C~ zk_RQ#BO)3{iW9?OG>0E>btbSYZ`u@jekWW`=vX%RE0Jb<&b$nP$o2X;Nz zCRgMN*jnyZhq)Y;=KzE@K!~GJiDL`~ zD?r1sk#sbQU0i?h$(uKnP)kZ^#YC#4q96?E%PQmlCw*@#CjeXwfli0VbNv}Sk^!U$Jqt%N*+UH5if@W?JH@Iba~olZmVJjUfF z@BDP~gBNE4Rm+-_U5;LD{hm<|`)r>a(8f+`%pfYKLMn>#ej1ZGfc`;?eQRRIQ3{tA zDZ2)O4GKUbZrN>1`bk~!jK;I4_eSe2Kt4go&+Mkn25oBvs7GyU4QN#~08x)fiAZ4+ z8WoY8nCP&9U^5SB$N3mF;SM$lfTmz{S-jgiMc?jx;1<{qwT9|G9n#6epPn!T8xOt0 zRL4{+8P6@&@N1dUEvUQ_lY~2ax4HS^vMfqvWu!?O{^B>sbH;xjb0!r!n8hBNw0ZYW zPXx$F4zeqeS2esDYeANq%uM-dJY<+q8Q@aUg@`BR9MmL)VEk_T5V##`!g9w-kHh&b zlT@AN$sM@v#bp)ZB;Wt^q7MIQyKj4R+V8Vn7K165+_^|6?4}-$zh7|NoIp!OdZtorR3A6 zih}ZA^U8+yGuAO`j<~j)Hr~3+355OmEu$2)RpHXt>w0Me%aQ>wzw1~w2>oE6RO}3O zUzEM;lfm`jt@%A`$K+2fk;@>N#-5u<&+UJ(AwtJ- zZ%r(Xft=xUb66nsC%huh`+Zm;LT%PCQew8R-s=(_g1#F#FZtKqb4PDRD656!9 zfGVQ=}`_#xnjazNrf zyWWn8id$*Q1BQA^q>_7|``WzeeQZAqd5ie$zWADs$_WA#9tk@WFoIWp;3RGK;&}bd zcn5Y?w(If}qH{9jq7Ho$e8keR-uo=?Irq8vRdKq@8I7=V)BE*Rg4?Sf#Vp zLlva45nz!`{&6%aUBxNdkk=Z^!afP>&FBav6T8-;*~QZGx~X0ZdooS&ksqd#v%}B2 zwl(v8PGCJ3Ci3SaV7K#j24D*!h7{VM9ve3eDZC*!J;5V%BlpBaL=1=f`4zida~;f> z89SH~Ox>*(bM{PINR%^6V)h}W|L(jGhiXwtLBZ3fm6gVWb49s;gJ@@}8|vmiD0sydmQr_iQAAbOyBrAU*K+^m6u!0jLKHcyrFmoV33#gBz5ouB#tX8f`jbu~4o zrwonebejy#bdEf%GTxfh+#7v4{1R|;KcWi@`J$sLD&TX>M$|T+F_!D{DIHM14K@Yb z78dWVgV?o{s%x`3&xYOQN-qFje=*?j-bU*oi}|aXtTc$?(Ow?eT$N~_h0lVg8|I70 zwzHVFnQQ;2M?i0&y57H=pDUrFXE^t$sp{yVF;>k3iOroA(TX7Zg7=FR`O^aPZobABbh7q)?WRvfTFTo;xFKqs;f{{);xHnZzv}k*MG#t z)e^Wu8nGraDo9I3LkkT{h3L(_xaZ$>AUXltwK;E*yZJ>f zkNJeb&wJi3#SO5Cn)#TEg|c%1{hV9#GMO=JElfU(bz5`Zi4cB%Jye#nb18e=|88BV zE`Sx}Q`@Iz_#Z##{9mD^@yO6nod(Wx#`Mpt?eBb}vD56apN_`wF*pP-%-<}bPd|}| zV2;TNfr#c-#P7!Ow~k9e#|qedTmrdH&xtSF@rleB^+&P>>#~QW@KPkQ*ecC)8NVy% zy>62uLl+|$CQAtjU8ww3=>(46Jj~BZ&&X)jo&jW{p*~jI*bk&oJog)83uo8i+Ux7wJu9)d}M2AuRb25q&jbUKEQFCLv%VDRWs+*ZT- ztV1HQjHuDM!kV3rhriThduC;wyTgv5>Z&_dq(R%9?v(-buM{BQ3o6CWl0gP2NBYqz z|3n8H%l&7ZWTkB+ZC)yah0klawjhVxDu7dB{K*LfWYkH0)2mFEfS#d-)+aH*@e+b)jlhvPL31 zeY3Zio>L{VU}8)|iKwQ}bd|xRCgIL_bRH|HBTAO6F2Mie(X$!=$VG|&|2tv`(xamy z*%#SOR8`K=7)h;uLNwyK-s#wVOnKNyA1?86*=W8koNS>1xh$KZJyNk_=*2EzCl>li zdqmOe8JTLtqtW*}wZ8Hg;uEwJ0gdRG5m z^ieghn`8qgDsxUs_|v_Y1DRyX5E^doiTjG&DlU}wMIl4y=vs1j#K{^UKCBjS=p?if z)JK|KhhAIUI2*v=P=rlrMWEJ$Pxce={FA{uL~h-i;p25xgapE| z-{~giF0<$|bE6ylBG0tToRl<<&Oz{}>Eis-33Fpp559bD`n%stKnEE}_deINOV!(+ z5tXhr%TRm*a(+Q0#&1p&kqh43c{l@vsg77YQulHqYSG%y4E{K;xFHu$o&QCyU)u7jISCx zLejKMswet0RoeTCXO%o_E9y+8Q;ll@ZArz`L{4#-rY<#Z4i z5gA1Jfjcu^9B+f|NKhQMQ&X;`NAb*-{&Pzw4p5e=zsjjn*{$V}^emf7&;4Z2-(rss zM*A~2Ee;MB#mL2MWlXiZzizDE9D(7tZG_v1Rjiy`wnwDRd73*IhC|yCF7sD^Utt7; z;Aq^r|<{YhKnNP0bFo3uPKK_ zKR8bX@VwOfm1h;$IazeO$*yN=kGM`4{jH5Q`3qE5>WeuW3$ZYn)HRTVlC>a9pH*I7 zY7K`DM{drcFZj2Ctkw0-8R9~zcB9??ypl9qc$OD&@S|%;;WWYr1IEb3-wK*JkQ`rB z7B|}#*gP-bblt-z|30AgN&DnuJM%9H@lY%P7zdRV#^VyT%uO5Fh2j3?4J(<4$^%tX zQ$S*|=}NX?9e;t?evdUFTnGKh2+_i(NYzc$`|A-FHhzRn2A_7Iy2=Q@y#DNr7kL&Q zx>jaJ-3E-Cg_<$MM7C**i~6d0{bn4J`}49XQd^h2ZzU*=BgHvM89(r((p zhJ-}aSr6K3%|JgU@^++RJH+MUikGn0B^-R<+ksIUlB)Yr??1vO`{K{9Omc!V>m6Y^ zY|-f$6DBmah{AT9wB%jpT=An4*+1(mDpqh9(bKf(>dZE1@9)jgO=g`L&lXG<;tD-*TYZYx}yx zfK6<=zgu%SEKM|6Ud)--(q&DY%v-jemD#-3%AsJ{1@mx5-SX@E&F`PU(Twf%4B=7p z>LYnaZC?G;NwVP$%chvv22W_0v5|p>eKN`MgwN{M^_lee>& z#?Ty6pI~c$??m-R71uz3bdc9#)04t?E&(88E&OgAPu`^ofFs;oy3At+M z@!BrQSl=IjG6;3@^gReZKp8b}jX8@P0#5mZveYE=u9B*||B0gpxSqDYsFTxum24iykUGnlVcw8*%nG+uBC8@cyG9S=Djy=EyVnp-b1V6TD+B|SwIrh z(K&}f0?l6v)UqIwuf=CcW~Z2)$=|EfQDMBravxvPGCG=4AJQ*Sic6>fEXb|z0+J3n z^vknRr1JeA9E+kd`7=t7v+KTg)ZiYGA!7hEdrPIFms*G{c$VVQ;8?XRGNK@L=q&+# z?}wR=5c2RKOrk^n=v-aP_kA*Xw;g)fkkI*#x#!d_`)1}|A6eK()sJ7xI7+W^uar8g z6@<(0J23jZy`1^bC#_I;zBCqZXn4^5^)!n{k>Y-uz6$rfzMLw1)AYCrji|(*Cpr!0 z>2W?w`N?jAqqJ`$aNWtnV%0G2278e*?|I0`M7^Di_?ihrn5r^r&X@-H1(}`tT2?`$ z7{*>sOo)1<@1hGKQ5|c`FqC4lEc-IpSkU{D^0_Gen|4#ws|fq2#VJoDQFGcO>%&&n zV;W_#6{L^v)np|nvKNQ+q+0WsXC=-=i^mbyEpQb8pn$>%Q{&}g7kHD58@J0QIT$-g z`zIPir$;$HRjUsO!dbHH#AoCM9i3Q&T(=Pp_cmL>2j8o{oPidd5W9ckHgHirG9E#H zCauPwyBJYK2~+l6%nt-BuBH?=moFE_j$VliLE8kE_g|!y=yN=Z3e*&{^plKs8GoYG zk2FY4*Ip)}Zc!frIk<`J<<&Ow*Lb_OHZpJir|i74YxL^Ng3y8_<4o6dmqRVt(Yt`5 zJyNfg;fl7rAFmpbhn3>#;~!!Nl;_`&~^Ju!iCf|rbm#w zpKCl{E*QW_VQ*uV9L+GMBi|bb7P^TR*8Xx=YMm3Rmz{}tjK$&_niz&=-lY4_4zl!B z*eB=4nodI`RRrz&s#~gJ)z?x~86l`}mqB^b{Xu!p&H01Vv;zwgBdYIS$CZ~?mdA;T z)!m=FzgW-7i$W+3AWwuxt8ppCcWZ_yeT1VO0yJjf*tWYcvLHTL8-k&pyuPqY zxLKWTWyycvn2pXkI$GSH(X&usu7!5*jFVelUbVjX#p2P0KKAf0Qe(A>^?ZXOjj14kXd6{iD%5dcU%BpLt2`~ zS9it6;qc$ITCZ0PT7zyI=;Y6KPW%s+<>F7~;+J zVM`|j%hZK??cw|Ty=1_f1-m9`hYrS3Kw_@?(8}CSuyoTRM}Q}K#>P4;vQXW4?o*vk zj)r7PA$p`li6}!@uFHQ&O0-k3+@zpS7b2!dstF>IBWGUy^WE+Q+d9=H+GP~zqO^x7_ zXK{U2U*Yo+2vodp58*KzVeGYZJe_4cWO3l0Y9C`}pP1C@&rF}IL;lj8c!yO_{|>)C zrI-^!ck8YA#tqp?5H{*C!|J_^`%m`>QQxuL;$kjS7d{Y@Yl7sp5kp`q10`VLMstbb z+JoLY5u&ZlWYUY(hZF8B_MTaszLuWn=r=E}+Nu;T3UqEyBf!6-Cf7Uz0?N0eGl?X~ z&533uw4Se;@70eV=%3uSK>-Z~UJJR(6F(Fjv58&Z3dCQ$k@vflH5mj{Hu{Du#;NWb zB?(1>Qsj+AU7RU6<)~j9ag``q_&H=feIA_*%1G`f0jVQ5KfGPo?hre)z7>ng7%-uQ zRw64GHuK88B?l`z%G?&m0Ze!YWe!3Sc>?dMpjFxJbpG&u445#40_108fsBGL>xI9T zPVsbuXwzpPag~c^>PG?n00R8aqd~2$NykYk7CtvC8zcA!UKu4cED~AeiP5=Bv(LA_ zFB-=@S}yPoOP1H9BY9U1Gta2We2dOw^*g(^>A{XLtz)}&uzN%90qDVd3ungI7r(sS ztNDw<=XJBm_P;Y$bqS>?QvrR6*+?>&?e>F=)=7UX)5&cTTR;Q=MWp?9etI{r$=D28}F?m^yGjs0N=^Fkrle#?;GRW76G799UzRgx*2Df6ttE zV>(!`P(7&E-G@3ajtWM7-wlzjcn-aPl~_eL__B6x*^PJ6((9WVjRI!xWZ!Ej|i-#-?r&R`5VfQUQ-Nlf&&H3O96 zZtEV4FRiAm0ZUf}RiJn{D-&nC_J}GetiKI8)3sz5^Y!!}d$>;6=BT~v+X?#Y(JQjG zbr%0WZ2}6ndn{l~o}9vEf5rB8V%0$-?gbZd7`)naUb8C2G`3-$j8PN9Y0%{A`uMnc zY2_+jY4YYvt8mC(k6+md-oWD`UQGe@r#Hraz^XB$HeCF<@%?)-j9K!sY3{`_#2aIw zHljeMlUsKrevyf~|jDydRaWZ`*;cpVC zv8N^B7FA(l9Q+|;#2apqjb{0J+|i92CdUm(-{hTGbp9^rh?i=OT~E(Rq{_;A)hZEg zRPAjo=0ooqrf%u?>F@UQX|9UrGKDJ=Rf)&=W9HUSQ16@T+B!fs{WCgdvT8EhI}vHa zqCz^0OiKS0EF8FhQNw!MMEFHu*tmt_i8#?q zuw;9>Rk4ayELq6x(fCE87rxuW9zi?dQ2e}F@PRn5drnBzY`3D^T)2=}xJyfIAVlK< z%VxxOS8lRnbK~GlfJfbT8ZKkR>Qkf-x}(x5sKaV>gbL8@4QJrem^_ z>fGG(+Ab{(_7og58fD8!&e%^M$(ob`M?m4h%PZJlL;09JC)UgS;5WEmHC@))8#WU|xE%Gs;ZMG)S^B9+StV$*ve~uaT6L_h*Vtr`ykZa6QZ%ON zLjh3y?V^$ISvYJyRa^0svRHHV_O~=5Hd|kVp$f`&Z3ZS#lsH_6Cl|fX?zu_(!+t@Y zVF50gPVeAjchZ<$wix^;YLjl`yc!xvU=dymF%29zNS;;^A5%3rKCl=Jor+YCf{8Lp z>KW=|0I{B8MT&PdvdsbgYyJZsK|v_-{NS;C0{Spy56gCCxe>C~mmyU>FADD`(fSV3 z)7by^)c7yFndeF)$>i>HwQE1RzW7hi`TjjH7G2hL(&xL{d0v0TD4!@|bO2Fq;ZD)^ zbWbiI4UhKjRqZ1Ybt!4^-nQUvGDxpk!;`un*Ikb%^z5bRC>&!$Jg(n@PZPnotGDrA z%2Yj=hRxGmU8QW3fD$kJgd{DURg;VXN)_nh(Bv3RTqt>KQZ0QlT>W&0s<}D#I?Twx z)6tkjX^x${Su*`o%NKbcFVtBTuTJou+RywO?>|Mpme*NQRuNaiX!6Ajg)8#bL#HN| zDxb=EdSpQu!>b%7U?SaPMwW8%5^95-nAm|FNv%O!pgi=Oa}bbgbX#Wq!Q7tQU=0_l zJg~@N66*9i4xJgcUKU)yS!L98MZ7>9+OS;@zwS9*pS(FvSUekfGXfu9IN~p2bmidV zILITTRL+fo!3=8(^*Lso!=wMHkS+b^%aVxy=EOxU28zk0wo(_+LgvHd2M|7boNwY5-a?14ozKH|~bB7ZV4CSop{nIxE8 zP?>q36z(|G+0b>D9g(K8%C$|kx5 zGjhg8^V&Suggdd}P`?uxD`Gdk2z_s4LL!YLgra|uUFS_m3pCif|NE^ zUx}LPZDD2>ziXC=y{%uEwC5 zL9SwmKeLg+>$g~0*}|IDcOYL+PD%ClSuRj{lY&Tw;_R87%eZ;9Jc4h* zm}LxM@%q2S(Ah0`X)EIDU}U@JCSd`!j2&4lb9m7jvF<$R~p>Go>6)=wAOOL!O z*hxrlim*k~`Da-nGov>w-VjnxA&wdKh)DLxWZ-;gCHr3{%21P#NGem1Fph*iH5+S# zx3hwB-{c1z!Q9tm&n5sAFOVViBv90WM1NL!pzZ5veyGjvw+SVMv*ow=%MSNzU|&&f zatTXq?mnZ;AdLunQzH)Qn?0Ap3n!B^G5?To|`R^gPH6z*EkM)j_|nJK(jnC}3^P%mV+*sQ)FZHkR!;!uM_tKPYAVNU6{tQ`A(%pT6=?S4drp-g(>rCA z3UNhs3LrXoM_(N~KIlHU-+45v!*WBwU(T+df3TDG^^nLAxN?|~zgJPr7Y7q9?&h0# zO_Mg`w;cPZ&=X3TsxtpsPr_(kd3VZ8+;esGs>ApC6BJiHR(7}6gZDuk)x06{*F*7} z>Cuj>0+P%C?vuzkI{)v+&D_Q8Nx)zo-m%N?k0aLefx%q8D9Ll!j3TxET=2cKM#Zf> zW03mXOG#QM^P*ns!7-6V*YFClCyJ=bQo$HYwu_x}6I_SyhR@-ZIQ}X@;a5Tz{Hug< zQQ*N?xX?3O+A5h=&&{+-VZLYGhMj`Z6`DMwfMCkkKwC6MRiO8x@w$*>LP9ly&$ATy zk*_efkzuTB$#l0OOaTng(LB0vUlQ=Zz|GN~JZU@tG{94XhvTzs*AeKTJ2~lXq&5iY zO+{RU&IXw8ER6b2TUrBcGgKI53DJSwUoYe1$9{ilnN9xG_B6#IN$IaVB|wt|2wTp7FX-& z_EVtBwOzaze0!z@#=-J0yA%53Iy@S8)kpbUV#TOf<|GUaL6rCJ!J(of7me(ZzyzIS zz8PR_(QB?MWHQhv2XMLpAN^A}L*Ccd4@2Z(Hgt~1Zv~fa_J1-d zLKAOQRbiu}VJRYHtx{?cyESQ4`E)%xL5lSd4*T-xC{TvJQ#umHSmBU2-1swYoO4p) ze3K1vn(KkPp9kM0m*|{00-t~A@tQ)Bg&V+f?Rz<3PnkfE#^-?AvL4i+swo<7Eap}V zW@m_+hEsk{D_Zm#k)R1O}K`BV9{f%N6NJQmc2DQdoRceV$akHp#9nk8VyE z92*F+@;$$J9&+5OeK$85GBZ2*ZtlYv|DhFVjgGFnZQSj}<*Fs&iVl1%4eo>;qLm!u z$BZFGZXl9r?b%8m^C5BvM z(C(ke32ahgN&E!qILZ}zGsy=2wHR_<>)VADcW3cvuz9H1pBYXDvhbrdhYA6`qgKX- zhB+GcW`Y?)8sX7_TUC#?i1nQc!v`~y3qb<5ug5>MIiX|G@Y;+k-~2mwV4zqTF`_(f zF)I0TI{#YyW|(j~^*e_i#=Xa7nz-K`Jq6S`Amf7J(P?IV;r;G&!>;^~XKmw0nXNYe zgde}NTX-wYP!11#?GQuCH zyekjH?{;7iYLwfsgjeX(HP}C@hD$lcuvVbk0V`#3e^yfx=0w zq$(=XhC`dh-v;Azay!LBv}g>*w|37f5fOcHpP}}87V4bymeKV`k72!oV{_vNuvYv>#7!P&&nXj|&-PC8exy+PM+V7p#hL>PilP{L zS9Y$0Rd48CsA90ys!0PQz}=hr3`AXT>1UqDO(*ipN`m6xdNH!!z1Gfo`+#wn4Tt1i zeNZf(=!N$MLoRQ6N&7~WqUgg?;yT?>8M=~gzL1`;WfOB2=wFEfi#tN+15Dv8J{%LY z;u3*coYX9_8j&*}^f6o${ieBqR)@7`eV$v-{x0n(VXv?_Ya{lO}F7!}6-m%yLmdfdfH58<^@^931b=9@I~7yESUxUVTU^}*yu zT#9Js5xyP3D4o_z>29>e`famdMSqfB?LIrohXoQI(&1nB9S)>zV9|2I!KWvapg&o!F{GGS z+Nu&sV3GzC?TS`Jdf&a*4qs0z`$tRlpd7aJPXK0Tqah$I=)c_5T6?=l*tN6r+pk<> z9_)2JS;`3vcu8JwZSCp4RMbuBsu$zkTPWpfx?7*>eRg+#hK2@?sIhk0xs%Z|0h5*g z_TjLo(iQdXiTr)YfzF6ei_cuk9{|1Jrr+6Do@E2|I3@#@BE>aHb(R9`LCE6zordCQ!*p$?p z)^7i?X-wW)3iE_M=XRLukv>*vI$zOdCHx|96}tOR_+nJZpG7?0D1k6y8c}CL#9~hy zs8yCKwX~!IFtE}~#7tBHAEg>m$75@sH1gh2`yIUB6*RJPHxrnD6*aW&t>A2uET1{( zsK?ZzS5Qzc&V3n2+0L$?_2H>{#1hg!Ob2n*=!^JeCcLhotG4x0EJ#m@&TTK$Z@ItU zds8x6yZ~rTqKJ0bi%gmAS_?Vw8abC1zL|%D&z7iTWTh8y^=lS|N={=V^fAIexZqH| z4Uxy$1C5q@_6zl*sHxvi*#f*zGB&WG+p~wsoP+5iWKx2Cp9rS5k<*P)o;SJ~DojB8 zDF40)=+RaSu^oTc`IQqJ;n&9dKOStJn^lZfM7KvBb@JFHY{>N=Jf?Q=!aR-)G|Vm` zz(e{lDl<+ff|*RnXlM=xpk4hq7=ML$3IrwtlWdHsKiuv%N;FadHxH)NnYy+=FXRAg`7XV%X-AET-hmA|f^CLtgKYqD)y(g@VjWDly8$(OZZgB)({ zQ)C(2fP`hiE`J|n_x@~eT3N0MoFIB3iLd)U%5oBGH-$c_3=6kQa_JJM7ctldI&-?>)jwV*Q!$`E-YA&*hPT`ANc$ zSysfAVk<_?Yk!KSZ^6Q@cEWHPz&mqox2jc+(ejy&rG?+U_*NobTz*f2;x4oE{CG-9 zYp^^A4nPrY&Lo<+PLXIU57Ic4M_Gn-$%Qw{{kb+E3b|R#^V;5_JoH=gftk6D$u^^x z-CKX!+TJ>fr4Kj;vWGuSZ)jMl@j0}Ytrtr0Lr;X^Z*=hK*}kmxD;{wTqP$H)cSAFu zk*2bdp5DWaqSVm2r!4yS&)|GrZ=1UJqUn0JFB-`2UPA9mzMj&CgAd3~tU4~Qv4r&x z9UkjHZvu+%DGeF+XSNH&g;qGX5AM0y*0zpt5d(z;8mG{&ywtN9|Cl>fhR?&<2s;%C zA$N1fvk}Wy4f%nhvF`R{D-JPH6;U7))BzK3bXv8&u#at7=0d-OMD>*;>(yQv9oy~N z8*P};&?_Duo`bwXw1Ak#8eKa(V0pdog}6(nHGE>zDDB+#2Hgq&O#u6hM1VsM>@Au` zZNZt`oQ?haArUTuXTLerH8g^i#x$5jk9@17(#MSl>kqv zb*aInucvHQ%3`{WS$2{{qa{l6OUS5b;jF!HbJw7`df)n0$%Oon?HVTqoYQIuI*S-; zbIn;jaK=kGT4Q3vCbb2gwUaQE4Q> zmdaWd2sk*K46}YJv*+MKWFPLuS@72H5m(c-N6}fxBEe>XxW0g$K=OoO9206ifUs2H ztE*paZ|iy(LHvZIHN;R+nRIrqKe~N}p{Q?|Wh1y1|C@k|PwCU?Kq{rvbBftc80lxf zu0H=SEwzgC>Z2`(C@t}18?z_xTBs$dMjgK{4Hfn2+-v}%P%X9Ov8&>Xx~(CCuOiyT zxb0JqRa==@!ur+B`$^*Hq#A$L>I_bsB96B@5P#m_iU{29!QW3cvCs@$D`6bm)ze96 z8OW5f8Ws$9$$HKr5uIrxq_)~IIsMIbA}crc>!sO!Mdcu_T1jdL{l0_v)@~-mAGUB_ z)GTv~d&wJ1Ou8r>9Xn^x5Y16Lmg>iEYs=wn4d_Gay)B*ZeZ-&prYf)-AIIB`cZyjH zvV3DW^aE)MM@J=8FzNNtzz@|-=Hh0ek_*D0?zYPu#Rr|&bb;~YWj^cH3@q59^Z0o>mp`10{-;o-}#1ww4oMwN$MI?7Q4OwtmhMQ8v-$x+$c~f z-SmU=Z#k+D86LTs@1PH*hL{$p_3hl&OxJF!w=@rw;Lo||jhjpZ?Q`pHw!!{;Z#+C+ zGcha2C2M=Wqy*_U+QLhKF}qgr?5cL&)9$aCBtoq3<}=f*Cr_SB6IJ_Dqt9qMas zk(d7$`hh-^d7ZF7=1XYcMc|qK9{fKY*H?>(m7A2h=hz`TdyjF$NntJZI1%rVnbR4~{%36S;XT^O_m+}tQT(_WY;mM%)fBx%6N+deKC6F2U4@apCy)lGV zvQv;&vN%cg-Rn10_c&W7!t6~4t%ZaZr0@2nBYfA(WTl#>mEVYQ!#U}JAy zYh(M~zyH6dEpl+T;skdo4hPrb?k>TaqD8+23M9B|aM$7vhZ0iU-FgVc5~L}`?U^$d z@4_|L?3q1#)?RDf_s2Ja8UWdn0=D0h^-|Znqs}iEYd<=>SNIoufc{TFqp5}OscIrF zF1|cPA<>w}%~`KZV>@c|c6IFJdHm(_>PB9=-|Nhw=gQg0Se8nwk|OiWsW(gm*q4ze ziIQXhtb^gSFLW|Kzon-3;KNbBo1z|W?su69yA?zS%0qt(rRonoX}rt?oey=jN*XoV z0cCp=q_PUTr8tAMOxO0{sKtcRJe|sM0H&RrH;_jhuU?5-v}0lW#@z30;Oczj(*;IO z8S(ja0_K4e$7g70sKO&*GC;{lTL7+x1!&SGutI~twwoSIw})E|-NXK6m$ctE$2R+n zTyx@Zu(}hfJNIN&weHa@&9CE`9um7TO&3I+D`tI~G>Fa~%GYJUBPZ3DSXrJT9Rue1 zYe-e?_O>B;zeIr~#OySQ5a|fr^QBkMN5j@pd%LH|GaNR5pu!ZcOS;na+XUz6V zZR~WN0BY6U(8LBGr@XGbsy1)%sPKo(CHy8xkl&5QSB zfsyO?_a*TDJbQA6{7iMi*f?T(8Q}E>EK;)TtL=)l8_IF0Weh~MI9ju&`9It#=s{di=G{Mheb%* zq=E{WIO){zqcgEoCbO{0oisIDZDC=+_GOIHc>CVabon0x;4FVtTY!q}5W|4bZxw4S znwo?(_C)*x9Hp;-Sj4=(GS4N{vY=MY=UBZjtxh^`-{EXzmF;ppGqHaNYo5v&T1nj< zsjD0dfU`yEXuU6XxMFgYRBCjv+sf2^=VO%KeQP@t7YcX(3Pl129`?-rfj%K`s7Qvd zE+QL_E+Txy1UVLnN0C1gdKR=dwg>%s!{VQMlhVrksWMgHKrkAn%VJoq#PWlQOn5xW z&9&FG2tGakCK0udc!RW=uDO$@WPnQg$`m<8X9`%GIEN{zS-qw!Ll~poF3M&Zeld75 zf!{a6Mh#!-qkN5hDzCLxtgi*|?&W3q7M)9XzW1wJT*Q~fv&oJg)qKFFk1!TzmTL)FEL1@ScXNr56}7)zjU}T}xBHXP&XPWQ~#li??ke?dim?mX-||Jd>EhAD&JI6I(34+n#)x9$Wy(1ZMt?#1*Sb zw!G+7cXCzZ#++WORqck9(}9z-oBfHg)q*BzGbGrv3iw*U%ISes#w`P?&6gaYxqmNOc+io$S08+ZcjwjPvXolZVBr|5->80qfkcM{^wEAC|E#)k`uue9tUP=dMbu5$lBaN|crF)uGZ*O|6?xqt z^L2r|0n|W~&To}lE$!vz<(uMdH(U`f2AF$6O9(5HL?>(KI9V6WtbJ_hT(i{poyf2e zI5^R_n*Z&nfnT}55gLiuHjq$JEOnYqZESqGynOX8L8xG-SR+;FbS>j^-)e?KxjLPK zAn>&u`5S1F!Ix-^XTKP_u`piJ-mklTtgEf%Q?pGZQDXoq2D9g1Z+`9dN_Q8V8%7Og zEq5Bu0ljP`s}nMoHM6Ei%hG{*J~P6jgjbnDn$UTc%33}m^>yE*GY5Wg(Ks3kPY+cC zt&@?vjfWQdB-!D!;~iXIIy(vd=iw6jqDNn1{asD!Hi&Bgy_t{;;gnk$MeF9|b;2nJ zF6FCN>C_Zgy;*6RY*0R+z2<5y-;uE~hR$u^e|vScrA_>afv&@RM05#4Ssy~Xy6v4g zJ7_tQddRPIZ5pgg)NFO2Iy`k=w*33W@ytbZ&JNnjahCN8-A2nbM1|k@#ZCM_})C~W{Bi@52BZ0lx-+bO|v*!83-y&4(!F; zj=Ty=ZDk+-hg3v((x60xC^?k_Knca>cFQ4X~?^=a=_DmPs_v0@jE-xD0{a!Ya z_&bI2X`VgHduoXCWWDOVE!lx<`W??}HxHmo8W;8oUB!*=o!0Z$6Z7TZz{96=NlWWl zG_y93dr7$cjCu$NaRakWA#E&4jAc0(5HvG3H2sM((*=3#zWg#Z`h0;HvA?c$GHxQ63W&S4EtIN$fNJRE={fkpvoOE8yecoy(TZ~9}r39yQ z#soK3MCtK0K{DT%stS??lyny7DjY{c1D80;N-J%cHoo?Y_|#wYSgi3QJ;|EBrnWaSItS!EG9B24&2x=!er+9 zGDjSN_C~(=?9oX_SNQZqV>z@cX(F#O>7G!#TEF3<+w$Ez=Q86l8ojuUFRdPI&I?dg zauX{wIY8e-FwXF@i z_nNuwm=~5`GSDCEv|Ke^%U;b+5TVPYypkrTUo+D0T*)!)+Z@9FQ;;W?G3L*AI9Uqr zYW@Or&AckSym;!qJX3tqetA;7RO`al-!ez}o$lVP4YySe_e;^pwx#c>5cmt5p?W)T za|8x~HHT3#aoi=7h82Jrv zibe1Xi$YZy?bRA>F+h^%KrbfjS0q=Q*@dxa;Gq+pDpR^fMV*p*+UjTU#EN;)tCzg_ zw0P0A4wF(+o9cw4<<*r5b5&U;3?e3^lFDYf^nIF=abwrq>SjW0pvWFN7~Pstbk+ai7b!``U$1v!R2x8wNDjdO6I!@qg2LL~6I&A2Cw&J>tw7to;x{4V}Tv0M~ zwkT5gXn8f8Bf~?7#BO+)ticO}FMux=Ye?T+Y;E|uY=jN!LN6WM0gt0icIy+R@FBw9 z83Bv1IWJ1P)e4E+6ZXkX25n0hAlJj}uxo{m^?YRJ^;s_COc)p2K;Y7r5(j$~kBf{- zghy2=HUx7j*i%w}dazG9h^-S^0Xx}PMCa<1F@hws$Aq#0GH~EjaOr?8iWWKf%#X*I zp!-f*N;cwRq~`^r=qJ79a6|y!H-+3EDRxd|zM#HFU39usm9cWebeAs+Fj!XBK}WQ7PJk30$exie;kAnY-8^lo;WxiQ{!t*)sH*0l+4XyV8)knw6X z4#LG`ip9t4H|pz6UoytSXmKd~jpjJ;P<4GqYueFf-=4w}aY?|IL5n z`8r;b`9lXB$*FNzQql^|3Znr&w#zJAk_Y$=_ zhS?^kjDF^dR|n3JJwK~`b$m89b(IreKLu$KX89l~>CWLE;kA`)7AW79%5DAq*hbg z8zloTil>6RFZsn!7uYuv1AnCi6(*AG-uK@9Y_gZ0gyzu(q!W|riVh}lR38UTVW}IW zN$OoR*+FP^+q%!UT4@=53vD^5uLb(Er_2hFknqW|-9>sQtt0^77p{!aCf7|ai--vfJ9DL}yJZf3ROXC}&nw-{5@|as2m=2>aCE$)QCXP^_ z90$`Koinuz{jO>Pg+*Dbvw>Xk_-2C+D~#hcY7X!8h?kIGCxlgDt0WY!C(_!o-aE^l z-0eOuKu1TpLW4%EL_BJ9n^9J1by8gsOLL_hqL|7Y*?}2IZ~Lnk3OVm*VJ{ep|nM8>uO5~ zd9Y>Wc`KLjD=hR1i+?1$m0dySENWE&RMTG5sgPUw09gd%wr4r84C21Jr}g*+ z__^z~XWn{KQIRFHf=Efx#MrJyjfmH@C>Ms30|HX0=0>Zh^|WQp{Pg%3@XyBV{Vzm# z_+(^YQy>VMTUDl{U8@N#=Ps{$RX6jr*4RWNb}S6of9P77$Hg4{+V$GH+Ect9kxYqD zB5L{HXn)jQ)!s34(kI{Pm0BIvaRl0GU#&y)?cm)%+qMbxkh=OsmdeUvXf6mi6sns8 zhc^{NonDS(cfelj;yGHcm1_0#{lOS3J`kw9c{!V-%c(tMM@e6(F;!pcN2{J>IT%^R zQVa(8sf*fie8Crp+@X%ZBfYQXcBVe^*$CQE^Wo6b&M;Z_usSavLrEOe5_&bYxje2< zbOHe@cj^n1d;BdFrS?(12CzGuD`DZzcL8(e`|>X)i!Tn!e#f!-q!-f1RhsDLk zu_g2bSD;_Ppl|{0lK3ewkfL zXnidM7Qf61-q6Wc=POxFL&m=dP#5y@67o{yf1=32E21`I&nlu8IAz;wrEp!_!v2`V@2N98c2+uG z(~}4UzkonfS8+WFC_@vqPj1yQZDf>_5m<0UL!7GrCp1ZgOa^v%fni zsgN}^M_=++=G8K&buJK3jt3kO5C7?GftjNGR1KGtmy<0oQQw|_F7G{lTcl+JscGS7 z+@{A8SA%shn+OgsAB2Tha40zVV|{!KEc6pPbFpW2v77_Q&tzz$j&DY4p+Oh7v*$NO;rnYyDT!PUYSD=|oc=%_oA6M%!lPb$ z;j>!1g(;wb+M#6xodcKM#uK@7!I zCPCq7M}n8yc7i*(mK+ra88hI5q7~tS?96g?ZEq>E`E{I?_-d&tO%rfPvQ4_u?34J8 z`btL}bK(F&_58TT42SH|M22quwD~9;rUZbVt7pMZZWisetp6pS7{Q&j?%+JS>amV_ zhedE4T>%gL9VRQA_HcVDkK4A=`oxTo(@jmdqbzZnUX6MQ@a9R#Pws5BnlWkD^jh-& zj_Hp)nF8$o+)>Ymx;DZF8Dn2dfBtRe>0w|*k7#0k-I zYHt>AtY;UUf~kar`zpv~Ck~461@bhH)d0&Ka2HuMp*l*Wgow`RP4Rc}o)ID6D#SM4 zJF7K#)an;F#vzl-r-k=>-vAW`5U-^g2yg`<>ZFbJ_WE?0vV!cG{~{<;msVTe(Xre~ z=f*4l@886Ts?S80q^*+o>p>Amx96fS(2M7hUvI?2Q&y@jB91!GEME59B0_{V8tTWA zwJ_SRW8&g;vnZ7UkTNx)^&Q2V2PxP?b7>n*;!XKFz<05fyP6M3N2JA?k=HSJN%#Nz z_)XMH7YGUEC|+ZEuxs4W^Yj1Z1KP|4uZwKI>Zy2m8m`gjtMmb#L7Xz?`C(g-!v^~l z`{%o`b4GaPEB1`z#@TTTljTyCFG#Mj?^IOD46JpY1?@SmF$oq(r=%~UNK!t*B#5si z@gL=*kv2pd-JzU^7hR%DS{~SmQzni>om%Hzjw_@}+sQ(J>3+rD=JC|Li#n{8e7P#2 z6lkSX-w9AZ4yQpt@M>&wMbYmRZfKE1Wy@ibpNsAx>fTu*jRW@;R60s@o*VWF@Zo^WH_cS_ju&NcP4m7{P=2sQtNB5*_vNefVuA(eLQ-?j=-wfM9f8dQzRx*v(YwKkm&>{OJO2o76AFaAl_6%~2& ziha}qz!Eku1-*T|ZyDHg9}2Gzs&1N1uKG@!)4CwdEt35FaysI$9kkoibYIxS3~ZqB`~k*}FjBDs6}IfOIzKUSO86ttFmXeC5ylWOx}7Unu3nR8F##KM+mK$D+ix!!T8lIz z=o9|#;GKLnl+t7y4^LiMoi=OhOiEs^gS!Q`F-f1y%9!~}i{@A_)_7mWK4CimYmCLM zL%AHE(?g%7GJa$7)&+@0pee`fBr8J@xlQk%+S{_jlCW9(nQfD3hu*dbI?>n8xCHeg zZr=XAZ~y%DeF+t46hqCiT&MYsAGLk9wOZi}HzwB_;NDGRNw}bR?d;)|<%&4JT^|T6 z3}i#-Sh;5;FthBwWds1ow`DGq4jaC7N}gIpo|x3kzO;EIz^1M ze=cku*}pL} zJqDCm_6D`}rqGA^&;ajzHH0j?#ZiZ>Li(tRbCk1pTy@@TVA{#_@iJ8Purvt~)cB0} zP7?m-nPT|z7L8Cp>PQ;fTq&YbIqE>v$%u(fBRrDGl_bWq$SI7evQ^#lJ1x8PR#$Wd zFoC%Hs=t%^FBGni-7oxa;JJ)DR`Sm~48@^n={4WC(jtx5Gd4kOObJtcMy*Lroh!}P zlA~@kT>TW5U~eb4m0qj0LWQeiP-@S!|q1{oazN=2Dx}+!^|#v7BXed{7^dl zHdU>3BHP41Mq9s|;&s+0X7o;#`*5)uukc|)KG?2x zSo+trzeeN@N%5bbGcudDt=&<%nLFv7t?TVB7KA<`B2^Hz8Iag2sFh5VB^epE#D-cPPWWoh^WgjE0R%Y-IWm!mgPZ#nz~gDIymgo zkVOqw$Kz6RPGKxDfFa^vb-4)Zii9;`ks@mxb3sfe<~4{rgS2DFz6gBmV6qQV;zQ|v zJ8s=)NR0~b9e7}SrnwMdxNyt$O%d!5JKWn3_TCni8B>SziV91(l#epE)XvuPG;z3$ zIJxzF{9*es0N%K{vd&-2hL8UCkLtiuHT~4LBHGXa|Bsk?ZWY8OIvbS?U0cndrCn!a zWcbd7TwkgxaH8ep1nC6=`d1LO(McKZxx$~7@92`p5pb}7I?~3Ru5%Ws+L4_sw-)4^ z5xlFd4e)?b@!?cksBu`s7kb5zI7@AfF77{WwgZ1-V0< z)eS2i2>My_l6Y~K{3CqP?`e0bcqphQ^U5wixZqt-^%;48ugnJA0Qqi}&|kH`-O?=B zH&y;O_EoA}fYaRji!fP$aq|!HQg7@GQcn7#r)l*de9 zXOg2t69$}=JQHNGcJlif_48NG`{w?(;F;&^zB&2dtUqpB*yfSKAINyI#Upjk)F-M zl`iLCe&jIh`LX1wjjz{pf1H*}kl(I5P1qVP5!-n+!Wf(R`}8GIr*tH4UHnoz)U?XBTzJe9?=wUTf+~aXUL>JPvib zo_AB_G7aQC4k%+24?x#KKQDigK2G~0&2@>OU9;I>x<|XGJ~0eG?lV0jIwXgDqHM%g zokxYe-;ZWL+(ev?^oL_GQIjhS!M8OpM`F+CEiYYuQAeKfT)FTbRAuh51HUL3zO?E2 z&MkH3(~1g7)6lQ+i1S^$ZmHix-J?K-K1)xu=tQoS@bG*+BKW^Ww{KMOHo1_vM5@DD zC?E9ArPfvTqxZS5%(y_|z86!^(fTN}^slP9sCAm*UsTMG|EyNYoftj;BObV1#Fax9 zeCo5)8TIdvCmrUSnU;v%sAu~p_TkI3?>oCYerLs5Nljg|61+)xrx3SQ0|=fzsyRgA@hAe9j*_+WRAQ^vnw5RufTF z|i=I53 z-wky-HJjQs6PJ6SKdzyJ4Y}a?5NrIhe**)5ezvxIR^xX+Z_7&!|w(R7|JL-ND zK1*c>40;;CkGPTY^ta+My+Vn<$sy5fVe z*s%&yk^vun%{r^@QMV|2a%CMP>FZ>FtTqughzDlWM(jD0Ns&(l(xaks$3fW?B_@2% z;E9$3)DF>;v^c)!*EfRlk4Mk#FFzHZgnYwDHbT~9pl8?Y8j?ddHO=d+0R}Rh`Ll?+ zK2jQ#NtGRsUUsM!OX*82mZysLHna4hV{?Xd4d&2XUiHEeVT z$}f?VSCvekP|FSTjFX~TR1+1R7Az+}LgHP{+ZAK0<`{1i6`%f>8hQKDd1Czb!ZDpl zUM?dhnTdI-6r*PhnX;qadcTg)m%9d(gLiWeyGzzDKP8L*q(<#;6^CCV7O=>sQNRE% zsrB-6KZY;W7EAEsK5c5t{kZIA&=$sKQRGF&@8%5ev?O*3KAj!(8hSp6f4gh-xT$!{ zKF5UIZndR;C>(nSShbeDvcFp*xq~u4wDIBUZ&(+1Ob|`2x9SnT^T^|?w*@7(cDG=l zvxS1pMaOOT-7k|BzCRu#UQWA<#g8J~qU-AMsmUhP-;ctiGLgr^wh6Um%GFZY^|dSS z^dDNkpJQtj{A&geNH&`KR@Z$24o}Dv`rup9Y9{>io(bGb2ld7WgmvSK=IF4SmjYLftcvBytsG>tVL*ieDF-Wf;9SWRrKsd{kbO3f}$gb+s-)eDP<;a1$?v``ePpH3| zLxD%$y(zDN{CGR@=`xI23#O!ln$|U#N)j)Cnl6)2)^oGM0~QFmT(<+e58wG~)Hl)2 za$tFrb1AR%JZfCq9pP^or>tJ_kXs4ip_@d|xCynO7}4N{;?pE)mss6K!C2yGmK9y9 zw(+8Jg%3BuPm6O3$Jp<~`y@*`Xf9IeH{k#YD^12<@gYq~kZBk0hE-`Tkt zq#}d(o7>RH6H)HiRX~6IRy{>7;79xO@43japzu^CQEL39dA%0vS}}-k&JMV^h_-#U zGnNAlBJ=CQ0on1OZ0KAYm*iyHPVFI_?4p=A04;HV->ot5em1V zdNEYVHsR>KDD|Xhs*k@{wq~E0NSrZ+ zDOrq+sgYQN2s@yum!YC(1SL_It*nB-dQT9Y0?x%`Xo+RnW(z(FH2?}F^cPxydai+n zutqy5^TcETD6KMD7@APXP>wu@OwusjCHO_0F}>{n2;V+Ge3i28LoY+qn&1>VktTSU zR}V-B4QtBdJTq1_&kn}pF9r|XpHE*Yyqugk$a(T~f*SJhh|r)IVnRa*uilP$PT@OW zkM4JcJ-_#K6t4FA^GRSZ>#nK56R~O2oU)xoQy{np$Ka^MkPFoofD5WCbM&@sB;U>5 zO>10SK~r~y2G`tlE51*=CqdZEL_Ix=pbo4t`~fUk0cot3txgq~*|uf7lHcQ_HxFM* zo@-z3haN3zQ z6;_+fRQG+AAmU40tc=8Q7Fi)vvbFy6`P>h{JMIOiIW%=WB-g6NZU?gv92F|GR%INc zYejehXxpib#aaz3^~U&?>oSBB$J<(iPue7(}9bocf#VPTp3!JZ^EL1x}Bo9{suXvYsnB@ zZL>m9$6=onts2}4u5)a_;GV(AV@vAEZ>g0}5fgaiuS zSauTW$~&ZQKtu0-XpsyAHwH)K zfJ8w(>62))?bD#5|Is0u{WffuJubK|uv=bg=z2%7^-o;7spv_)b^KZDk($l~~JIi+s#~ORBd8|+-gUW_Ss565W;VIu0>cD0_qW9v) zVxCnJ+Ofnk!b!rROY-K$zjYKJ(u5)-rN)E#FLRPTrN|meJ zKi_SL;6SBQCq$e>e{6v|X2tm|wG1c3O$fEb$6Ux|M+FMsT7^!Ba$s5)R)kLi9Mj62 zQVQ}5OvOya8D^AmaQqGU_26mFt3Wu6saCvMk$sxbUtT3DJmr zjNpoz7j|z>fKryAb!_LZXN#m&DnkEV1Qi?0GT`F@1wsD&)Yy5BQQVhz3CW#=Gxvna z4}`ilb_-OO*2CyCDb-Zs146WH62t;|Svvw*gZv9}1y3vxS$aG?o~i?twdQz6ihr&4 z((A4CI+YLU4_+z5VFXQ55{X*XG=ba&*{25D^v=k?O$EZ)FcwEIm^~9UPeb=bwLUdcA*g>DgtsHM`oB|5 z{cR`&CT*#z4K^68ZI2`60E$APcDA*W&D>~Qo#ndKDN_lGz-oN38;AnHBUYyCiB8mG z!2AsPvJd-l8j7D#EstZVcYj8^&jj=nD2%6dO)DU7ZXhxhnK1yDV9PK6@uKP|{A}vx zeyL>GTdX*}F<_xLec~7#0B8;~h)+l%oOT8iz{)vFDAtgYO_?|yaYAi6V%A?v3!vIA z!mKG9IuW1}7E;W^S(vZGLoH#`u) z4Z6I_nI*vLt@OI$pD#i-*Ut8g=RJ>=Rgyj$NsECJsO8c`E8%5>h2>KN2F8X~o2xH# zr;q5l$oYzrguxjf=OTx(Nh@c;HbIN=U+R?}aGj3jI+n<<<-2(-b|AeKzOmkNJW)*i z!fm<%O`B!XOithYy?LEj*{e|B*c8pBsxIQ`2ry(@e7yGf7q@&>dW8V3?D5#JEL&%{oWUZvY+Y?#q^CjeaLu~5r=P9%=G?QY zz9}OU;x6OmnWT-II9IoMs7Ss^OAfBsN{#q^JMnVpe$#YlQK=_Tog6^L$ElQ7;2ckt zZe|Slz!HscqnDA`9w+~!7*Vr)N!tXPs@k;-q-_nv3Ur4$?hQt|}S zmx;z|i7PtFIMH9ISsQ4o?9xdKeiF zQ?6_NLn}+CTxZPT`oX7}`0o@$zt%sOJX|vW8UH?vnY=DI6_#{SNvo?1QSyXJ3YO8b zDtU6_j}h{)@Ad#RvjV*@J)}QsDEkea5gDv}M(=64mC#i5FvAx6eGpc~^Y!Sm$U0)% zJ^(tM+_A`*9{_Rd*5#S9aYbr!xLU#iB|NKM_Hu6}YTQo02Qn2lf$U!heR%#9a#hi~ zBy6R|;EsBuq-7ZB#?IU1F94;f#R#bKy!DG zCAXy|4|NYak3XKrFCuR&H84Yeh$Vc~i|Fd*yN0*hQZNePHv+?tV_^|z49vgENNhWY zKdB!=lVn|R2+Z1DOO5HR{9vzLg>%MqNztbCu#i_UHanhLe=K-}5W|UVGcaO1EETLo z)UnO3)Rpf3@AgG#wsOTJ22hyJ&TAUfz&?YEfV~?;aEb2=xsX;O9JJ0DCPcTJoKg~5 z`F(DOflHkW9&2TIM2_Hlv-IV2TDhPqk$ZtQTVb-8is8jOtpxwVvsKmoZB0a7X!bIL z@p2+8G{1aytPY_n$$sG zeP`af5wjou5q~jo?y%bd@9d;?&fA%g`k00}p9~JaV_$m;8~!a4^?Y=W{WrdOYiqY{ zY&Yy!uSRMm^p~18sb6}zAV)!c{nu_+!WL^4kGwi* zHjux?TWh(q&wOhUmwe{oDCgAMtEdm7KklzCLeU%Ur*FqC_;)N01#WD6_gbC`Q|}gO ze0%+0cnWvY16XR^R_1{zcT-UfTM63!Fy|V1k0C+#T`4{$bICoQmYuZiG(Me)ANrrm z_8ibe{>~h_+b6mICt_}W__u8$V?X%AI?z8yvmyj;NgNXG35B=xvrr(hL%^-e!Rm8v z6|n4V9NctCBW^m%i4JL|#&yP{3=#*wv4GR5n8Hi%3$gyg+BUGvtwaG0(mkG#Xy`~| zY&}eh3UP_*e9u2Cu0qk<;eG}z>Q%deo7z(e-ZX_V5t#F?*#z@?0!V$rT8kZdi;0l2 znnu!7lXsk?TUrLtyzqfDs20{bocDcAp!eW+4|*YW_dtyK)4b5rw7DD>L3Fd*8F-Ve zf*Y19R`O0JxylXpMnx1<>B6>qGsG-V<h957*bwk zge;9+0{f^Q;kedeSGuIq=z5r8RAWd@mRWi;2QSRi3b0CX!n%1J*^vxCX+_-I$(`Bz zGh8Th%{`Y5LJD`ZRoE-YdGT-n$PEqZTP5vl9??$SQLy7zZ*6n>3w38g8trueeJC&I zX6`n~4J<(A3Ek?0l`Difw73s#wh3!KDO76w{`=n$+Ifz%6jRR%PH#(u{b*CpY-IxG zzB~Jgof7-4Pdhu0)JoR`9e+S0xKAAP4$0U#{<|SmFU(;dwp=_T*T3A|KV3Wo%#Aji zwfz%BitLGI{kt?oT&wDL94c^#@CR?U$pMIpw@mXNd?nKqf+F0^*TQCh96nWL2f0`G z&po{y6e;wJS)FZWRBO2O-Po*AB4;h~9{8qY@+7A(J_Qe6pNBEuZiews1$#%74T`F} z#y*kS?^W{?U@Fbqh;zR905Y595lUvS1X=DX?YNHLw;ArTgZTy85$a^Y<_P^7+9?zY z3z{l5%_fA9m-sEeyjXRM(k_s_!;Q%a@m$v8j9W{eJ^1)=UV>ead}ezVA^{eRa7=fU z>;uh(b$_n($v>qk^kfJv5l=WJL{!9+RLROj01V2ul~CK!!G~Ip9O$i%5ni?4-ZXM6 zm5=m&=yitndt4VJ<2aUA5%{OmIX7NE@NLa>ia7N+M!LYG9L^*+qPE8Fr;iD`lF!s@ zV$@_R%ZQcJWMsoIF5qVG2Y07TpSd8P4WA6=$;TAe@Z6_hflHM{2KGE-?XK}Y_JICX z?}Sh7`N^5E446@{c?tC?wP5gJ0*=xEuJ^R`zy(9F&YfX&~FbBQ9l=cmyv%+Ro;rzSK9Hw92_z- zz6jRl!K?&w^$9bd#MrZugUcBnQwcVwHgXZ(f+Dy$$`SmmyZ_5blLBL^SR93OkI6Cn z8aassrdBNt`2}_Ows7sMVgTKq=9*-@4JkQ4H?^>l*Zw{i|ov~W-YU=VQaWQ02wzy%tVbCCyM zIO$8$S8d1s)G`k(DLx|YMMF&@Y;tkH z5jP`ktS8*5aYSNc+Zx}$KuLLgUjtd86DFY{Zb%uJMj9-J!yem}g37KwOkl`Ngdbo* zF&rFrYk-MIRfhhmpqSkL9dPE8}visd)H`hXHonV!BG=R1^|v$R=GQKR|6lfLZzu&<%+A%Z#lx z{Ph$in`Ym~xUL5mIMb#I^=^~3WU(Z4TWBW4I0zdu6HsYD(wtcA%w>|FNOA$3L$O73 zJg#qG$s?|$zWpQfkNYqZkzigi@Xig z>$MQjXWsJl-L~;tRV8=x)Y3F)5jg$>mC}40C_Jir;N<@4J}(i@3tjeN@Mpne41jof z7szjT!FZi2mWh`PjbE3kP-faxBv>5RTbe9W0%J}MC~MgC#Lee^C!w3@mhS69Pi!Qi zxnd};K#eM0$<-DeqA2ey~`~I zE-cqtLk#=|3pz6PR`J-uAs|{`DhO$d-(+}BgZqX3=|Js&gO-bY-o3i?dpZcRbz)y> z)!M&&k7kLzcV2FCD$EW9F>oryKhz39V||xSJ6zH=XjX$u`kqJPuOB?~`IQ#>8a5VT znH-d5jttYT>+%s!azh-i+kM9{yiNM}GrMn%jb*+j#F)|R(u)Y2ibxPg!(!NoVefUx zMBwnjK$&fAQ)rA7y#Tznl#Z7k*iFVzgDA55vTQjHN+ym6yzMD*=5ctu*R@rZ1gO)~rVJi*ZC!PWI^WI`3rCj-sg=e_ zik?wh4aoR4AC58?eqAoQlk%!H6tt|wn`(yX*Uwve-XNHAak=e%Gn{CrjDHZ8X^=%>z!VD2jdXx}Ji8h)CF1zp-8ti9W@4%Aj)YVUyBUPN_V#cPb z^nIkbRDtC5GFnuUfrWu z@YHms$Svb{DgfTM^Xiu@k+*LkxR3^DW}T)MBM=8_XX(yRRw-4Eu%*=3LrCdG3{K*p zY_qewoHJR(T!7Niu+7myDM|XR#F#p7paIYfO-3&xtTho}LXcd$wlYtStF{f!15c!V z&t3J|yw1ePt6|8(VEsVbwIA(s*dckqnfOH>(06ZdJG$DU49E9A_7!C{pI2CR^6Bu-Wdu&>CYrAa-7)g&@~ zW2Fjet%-<;cmQ8V-9(mmEe%QoM)dT&^lj-=I+$FV$X{f0_P3wvVu0P*?{nkbh{@Ut z;(9iPs$H;}sgZ!G$tpq4UZox?o6XLC3^|@#67^@KCG20bn+e?E8-|Ok82iZ{sJD8& zVYC!2F`5H0!oiUwvp)Wh_QD`uoLfOYa?FW8W>?cA#?rHKcZ;SQf|%`z&c`8m>-&reBRyGaN==DlCq8LhQ0qtvNYtnac=Q zg`SIbOr6n$o{%0Q4owyKZ`csZDpj&R2Pu4rffdROy(B}*^(r%qvP=abyrVYRM}~_9 z0N{``G`*!zz`=b*o=~%m-Ft0P4W)eGdjir7XH!e_027E!ll5!lA}a`r72G6`11QtU zaX|{yew}!>nha1f zo6e!v!PLVK_Rm)upRk|}G)F>z zVvi|q6_yU`%oa#m@(!B2D8+-$mXZxCRD-|9T7n`~!_5Y=%Jrh9V10=!SK8aK7;tW(Sy9CSkSM*sSRknRwED_#~${bSdRf- z-uFxK=c8Ydz=x!2eUT$N@f9$P2UZjN7nz+GJc zZ`s|yaqRWytjm!9!ZhBZf%4!Bz^5HYMrO%oe$4vk`A2FK4tH}S*oZ;dI2a0?dw9#o zJ3{tCYTQHAN{H-5t;11=|8m-5cj`<$A03#!s3zRty#!rL(jHW+D)@^CAYqmF2oPKS zkJ*I}3es(5qnw>PcKXoE%~C^a8)sMRhp3h`;S7#Rc}zCl(H^aARL4fUY+Xcdb}?Su zxE#1h5stkqj$5iM;JznbSF1Pw*Skt)duK70QmE>X1J^D@}x=sVD^@I zVRp8>u6#p}22Pv?W_e|8%;e*L5~)e_-o=bQQ&eCI)ekMqD0PbZtK|a@=R%*bVYOzwCi}Or0m@4Gg=g$-1aPw*($dKaUMb8h*cS9s6~+Hq*i@X1(+| z-tL^z$*@lx!Xe#s!l;=5c^qpAtWAXjm?iJtJO%D0Ft=F}LsSkXH(qx6#Nx#{RU+re z*O!i4>>Ep)BiH29g-**TwILRpoOgzEnLwV-d}SVI$eo`myuPj>!b}D7Yj`Q0JPt+I zA{ih&*cNqg*L^^X*{D=e3jSXg0ORaTOmuO`dj_O5fj2liDbWwqG>~Rstw`3_4Bil> z36nBk57>g>Tp@Kx5u~@Y7gP`X`DeYbwCDQQo92r6yVg+^Q|G98;@zL!UNt`Yr@xjmSU3#p@PUcmk(tGl4lM5I7!2NoYzFvmaY04e zIdU!6@v0(Wk&%&?od8Xzr+jMH0=l5;t$0&@3N<{XM^r-$uBAud4Cc>Geo}C3 z0|jSt%A+kc18xJ`I7g+rbN?{=;C5f4II;Om&cu#!3HUxScY#N5Ct-z$45@kQP087m zdMhT&w9i}8{G@Q&Zv{jWOov#KfB=`F;Sbf3gcmvL$hKJ28SqSia7Onti>!c@Y1c4MIfLos@A4q7b2=FOH?c4%73s)t=91)@@+K6!Em?CX= z%sFr-KZ36m%Et(8Tx@lo#UK6naTB}vM>kq5hBWWSv6wf7&STJN5_m&N>FG^zzbl6V z=BC0{>q>*KOLGF@%@O(Pnwpx#HMVmuNStF?9oY{iWo{*RAEbwW+2|w#7cPznb+PXV zxT*tO95nz(O*Kbo)7tYk?ecyjy1Df<^WA2;zffB&omX#^}MAdPxXkvad11QmFC_*6kEcS75Jfy6Sx2gtZVRN|sM2@4` zfhr6j;5wpBYP>K!n11FSU6o3FO7I#We_x?2QrI-x3SgU#V7br25-cJDYb)Sqz;)uX zZ1`xC_yO)v>LjM|kBFLt8h+~ErcG~k)dMOQ{fL_C>gsbye6@|O%Dm|bCZu7_-=CWz zjHRuuE7w@;Fo69117J4B2S?M>ug_9M?3`nRB@7TX+RWHGYwj*ot?+4W{xuCQd;*Lo zj9Z8>;D3+Bj)y`dfsz2R95*r8v)ezmS?B-Wl(_}LkOyFh+hF8&z+b=x+K zfRnoZ>ks8k+Lcz<+ovaG^GjQ724Gtim=uHPmhn>kmOS(d8aE8c5Y?FDrvVtBI6~w1 zv-?7BWK#$~Q zFvdMK`rlvBo*m>1fL&)%24496XT0_5)HMW1_MCB2Kzg*&2Vr{8t8_JcIdV|qK~yT0 zAB-=9?~QZ(8GQIj#)6h#x9hd;I5EJdob9mKSO04Kc&y<7yZb zIcRBW44A2Y2OA*VFpX!?Qcl=0R~qBQ2|g(vA;itWXw2YS-l|>g)sgGRm(#O=4)F4@=2O4x@iJmq_(c#CB$<-h--bPP{;@112DpTjU4`8T-(n{`zvx&MTHM^qGKh5=Z$ihOA64GEB-ge z9Ur@+EZEGLdL13?dmUTU8uU^^R--a7KgK5hP#!!%+lJN=^_iorvAzZQ`B*8~|JhFd zl_AtfAej!6WQ;Rwu6=|9v_*Jpc7s=zBUKi(L^CzPUJAElH*NV;rdY<4KD5sX>Dpx=aXkNV--g;?2_Im*P zwKFgBB1Hu`x69D?E9L$#KpY=~()pk)oqqa$)c6n_KMJLA{L_cwZvSjX|omxH}ony*-?hBfpmbF9ojFQrHR55K#0+E>J=6+l#l9u;dp}= zPfN?Qq5}48Snv?A+LV)iB7FQ(?IApDFe#W7t_u4xDO$n+d{~#v5X_{scq~k98y?am zyzbxhj@{?=+C-UxuSl{2HH+eu;VZ#;OEv<$W@eZ}QW6Cmb(gzxDf=-wMis)jGLzh{H+zY=(Qur9Q+Ho>_po*BsXq`4x-bg5jB8O$;!>_#W~ve1_*S9aV6-1nqhm@Du1asg|Qsc)MAt6GgRhj^H!m5nvU zeGxwrSP-lkP*2L1?eYM)O}41@V1+h@ zKvHix4lauy?IsU>`XRCr*OvC$o6z%cR?sN`Q<^=uz(SRiq99xiyS!(cgi!YATYA+h z8B_{m-$)3s_k0(EE+I8}4<6t(hKa9re$HP6o(&RLi-rd~2UaJeLw;gDZ9nrY2D*PS zpZ=_4P0qLLMD2GM!+7B74lb)3fp2qd*Z-y zNV)Zo&lhTs$biV73(MtiCX|DCG-RJ068Rp7lj1H4u7>|PtbIlP>k}irT(Nyku3{2= z;nnr(GobhrdC5tGYh6bJ6{qv!^VY<3xzv##_VrX=gyjLK!P2+9C7idr-7${f%HW)2U!j8%G-(%O!xRMGkiTZnq9tmx+f+ zP+2}!tp4Roe&sg^LCJbiNe<UO6gMG*46Dos-tYc73p5teS>>-r@9k>(>OF`K0?wT3?8(#%%(rA$379si*`CmUIkZJC|Rgxdx zE$Q42hsIrLZCnPf&QBa?h9*SsMU9+)NF-X2kx{aVf3GAGa_;^KB_l?D9=Tm0KdMFU zebLaCzhMraeVVa~o)2gcs8g1dOw-wZrdT#a0~{ihk1LnaX?$W^$`-7H<)`XD)aR!E zJX5CT1OL>=rit!b8^*=XjvOZvcXqBy?~W7C6BGCJX7^e7LW;BpW9ojrB`v*LLZ+X8 zIJ?{JB{heVAMV6ls@zTNknU$;?hn2iG;*mH}Yx09l3JwduFIMY%2;Y z&tt~J-rBpnX4Jf-seSDSunblbeMbnvi9ecmzCTPnvcJWI-aZ<+SvrgMy&BFqNDZ7^ z4Zldjpk=2TGyiRej{ZuyXaag&DUM{o9XCLGt&He{<-f^u-;cF1wW6SPVV3x;tYlW* zoWrx>;qbtTefFxT=X`RQ^g2E9#_5ds(d|x$G}>EP8ox4qNgLAox;?gLYb~lfxZ8D^ z(QR{;h+w?S1|*-OkBf$d>?UE;(n1+?9+qMyZvepsUmZ0NDedyn#%uD?olneD zyU>A`>Ik*E7HS&ULX!sZf7A2Si?Kjaso+(;hr+_puyi0B#}6_4_R9{8;t?Q~ zhY&XWw3%1jr%HDQuIwW}J>$F0VvkL)YVCO~a|&>=g6+C&2b(4RKfpgz51$i{`ojRP~$GH20dRhlQgL-gzNuO_bH7 z7mr0d6=l~LF-Q_1BS_FpI2*{iU2MCnzRS{559~R9JT-PZxZ~*ijYbKSoiDz?Wg6HO zG||y))d(RQk@L@f2<4_bj~Cc*H*h=Uh-}nJ=j5w9m4W`F`$s14%=S@iIHmI2zxg#e z#A{-8W=BYiXTWkJ^!|sPG&H=-9ZSsx-&?gip1Y_@0C>JI zZW~NkO#U?(-0yvqQ#jFIWsL2Yw>E2kYUxpvv=VYQbQ?gz>}Ym7T53Rp$6xIRH=5AH zo_+rpO_F#yaVxlRMJv^AP%GtasJr7B-B1z|5&J$iG3O}f=o{~ppn`J+$h&2FI1 z&#{fGh~pqYh!mMV52|1Nq%VH*C$`^I5q2t)-aq_L_ld*Zjn39N-^g`v;`a>;;u-n7 zK_&TmA9gtNjX6eM`nut56C2-&@p4*)?7sR#UmJhQjAzqsm2@ zDGZ+_9)(3kO*4kNU1grVBac;%>}HXFdWUWfoW846{McABp~}8k)9AyBth1)qRdU8x zePWAMKn|W`e&Tny8?E0(=vgLd*gIMkadC~0j#AZtm=4za%N+`pvZ|Nl2B|!~$`}1e zx83)tEK+m9?D~p+fa;ijho9YIA@8lo)(CS zUOyU8BbCmQ<_<=FzPS76s*(^3H2>_}VLv{;`?-A{^HK3Mji{l=-cbUSSE`D0Exi_A zMhi2L?<6H5b6m110Isck@fiC`=3CB$%vDsw@D=I$i&_+U@4$_)CO5cG7?|xFxl4Q0 zmM6f|l=ei#l)b*s5(?3v=E_&p=VBgk*&G@vz9?>&&+*F!=v0J8s{|HDM~QOE931PX zpZ68IZeB%Uy&X2{g5}@;dYVE3`~Hu=s7Eo!0zXQeKGlEK>3HAc!InN&R}uk7X9IoS z&W-aiex>Odpz`L%T8(8 z&`*A-@zSuapasmkZxOTNQoWy~(zsI+hzkGr-^t8$X4K5fLRqy-a%d{0144aE7ISyGjS&m#mLvRMhR!V6@-D2 zU+;=sqyJWKQ|Ark;LVmuRTWf(h_#r&M3U(o7xj$+*hqDac&3Sj1YUQkppS*UiTB?% z%4t%dL2H*U5lCfTxYSs|Q*{2J?kfhmAP+rf6q6*t>kFJZRWX4-iYtS1fQ30FvgLAHZ`%t zbF&d*$?jF+MZHD*ZDH+j$me(yQPs}aL*=9@5kThKBwOG16;l^8J0f?k3MI9KMPjl7 zlF-=?L|TQ(Lzfv<+>!wj1E@?SJ5pM_iZewsJc$6C zi-X%OzC`ElgVgZ#(hVl+wf))S15n$VTr;4@$Wrt+#%WNb3+If}eK@{hrNZRzY`mvS z2XKKb$v^9&n`D{RpO|}O?Cd8=FCqa^rLst>s#*8b#p^ouWIl3{fIQ&B4%l0%nE4-c z^p=_bj{oQsZ-Lf2ER~o*f+=9CcDvA{@2rZbaBI_}FCEgy_8Pb4#nu>HXh@qj5#zFR zy*x8QJPsbYI8r0k?!=7xYRBWfZs)!+X0{iJ>`OoJl%9IK5#gR9S+7tN9xfu>#MNX7 ze`@I^s7#$|!XrB_WNSK(jD8It??*lL=*x$cKYZfnEFp@sVh7s4gdftoFtZ@otP!8( zi<9lBPB8rN)HkbyPlZl1`?SN?NFw>CPQs2`{K-^r;$P*&hMvv$1PUplkZs4+HpRJR zvY^4|aYM^cry_S3V97!=g+2S;`}Sdzh-`jLqDYDUQ5X;~du0}5ZlC&;FTRgXL@FE8 z=;Ho{+fop$?=|{_x-@umcDOMovIi(VJdx>C8~TCy`Q=0{?m{yv?ouaytBt(ylWK%G zxbtgDY!iL{XH=UL#@yyg(=%HEi$MwJ&kS*3`_XKoTu%IuAS*&4RsHYNPV!c|3o_~ z${6VS&;Q$yl(zdfSP{yIynldS=sEFcK5NDl$QL@At6up0&i20Tlc!>SF0F+%aL%F_ zpZG&@f77raXYRq=DPfJ783^!fS;e#G9p7*GL8L)jTY3s+mE!{yrG}XqJYj2lGV1jD z!^+5>8)@F2w0ov=P)W+OxEa5&cvRBX9E-2mgK>F=&P;DADW-+9!USBt@w4VtZ7bf^ zfgxh5Y}LtZNQ2Kmdo=9rb#t1`_M8nS`Wen_OquP08kfbT?2?6JYVbxz?ZZo=P-!XV zM@DEou_y6j?d*DQHgQQx2T)$PdpB@Zsd$ke_I!)f?;F$<;n>N94mM0}I@B^AvSuIhjqB!^2=(MsTXDkM)6+qL$7tnJ8jp#Be9b@q}OscKgHSxUfiJji7&fD z9ZjtqJ4!qkO%JnQ2)}thuZ8$cW%taxkEP>jb^yDgG4dg0$&*37xtIL15oP|W=sAM{ z!BnZ**dzvodat|VvCP&t=J1JU5}HA>5&d7cF9$U@jN*2sH1F^`antN0KlDN$_Z^;| zJ}Up18rakB7kGO1q)^E5=t)rQ`o9YmGDcO0s0!@%WI&IV_aM&x!g?Ccnwh05vvs}{ zG7mmuP4P0UW3N)XC^D19Od&@fRMkx4-(%*TEkc8za|7}7HOPEqXa}?N-c%8FRrlLc zfH(qJ%jW*?RMU7!L}OAh^VnI3r?s3;sK8PuQK&h^-$IpVENI%aRskU0F*Ywdu@9U6 zV>b4}mb`Lz)JB?2Bzg1QeQMXa9WJ*6*S!=ByB!($H377^`KURt+WY+(9voz=;Fs(*EQ ziFEQws*OJJNB`N)O5*Lo@UI^^F4{h4YRA1oLPiU31EeF&1O51(u3j$oES;m@){}aa z9HqUYqeLYrDRuSmJX{pX3|wMjVS5pBGvcsnS%AuzS*0_rhN0qF%w^SgUY^xdw2in#Zgdls7TTTAe&sHTG(r1*13x zjoDw$3=DB)ocTs`9^NaGFD$%1z1u6FSYwzss?A<}p z?Yf-X#SVGbh%_3S_+hc3t^Y;b$MYZmdCXtBuBC*puivv;-@+K^#=*xp?b67ck!q2*#hrl}{7s-~@HHBHNv51M8C)uFR- z(nOD$P&y3gnFZr9-MfuW>7rpm5xMB@1a>zanuygQm%ClU*|l*$ zzYUkF9bNx?*{=Qm(#&%^N}@YB&rb2v`;#k=Xz?(lO7Q+WdDv~*CA!)4r3N=Bl^L)GWg^PridQ!Vw# zv+1SM(BS;gAcRWoi-f%SqZ}n+Ejt@^$(y$et&o!lETzDr%Ay!S?4yU+L;82ztH{vqCwX0D2p~NZR%Y6G1JHvNoVZ zr)tDSI185#zqA~UoyY8N_qd;juS`5^1=JF4RP=(m4(j>E7u;uxX&}aSonxwN=org4 zh3xUfOBcDsQ`p^U_)Zk*pNg!Jh{d*q@bV#!pt%qE06sMm^X`{Pi+4 z;X=jj?q+hBJakB)vM6mWx!6;zP>c;4S^AU>3+-$iYNx}z7x424ln2-8e`5;$tZ&I4 zfX z?vHM-vqmSQR+renFV`w~i7!m-fjzk9*#3tuE1w4{D*Cqc-uZO*gEJYJ#{_F;UTyUh zl?A*xCU$MDMG$pDhi?f=%>yBueMhx&6*1>R=~=^l*Q@DcN@`aXXobh&7vqa+57f*( zI5p9E=03_Gr~>qaEg8^EIzaoet8~qS+BZ5k=&xpR(*v8s)={TsI&tv_nmd)lgmU6A zX(-n9#mQ8HlZxEip^!@(TV~B}Uml+aEWyI*09K}prM{R(&_!Q@xv`hdUQ~3gnYMat zU>O7u|3Zttyz9kslq$?P5A;Oumv~NgG{mxS`D3%1qKvs8OHkHJRfjdMiV|3X1VS9t z9Yl=X;Q`2L_7%DT!)cheZmka}K?d9Ky8C&w1%j}Q zpT+hzjL<5T7K`)Xt7}sIJRL3BeNeR4@|0n%tnENPtK!6rX+K;|&`>+J5-J_I;*I{Q zvqLD_iOKtg&Lg~JuW%RwBqQ(FVRr<>)j149>h#xr8+M(N5nYAbRpVsT1_ z$lJVh{FFgxj6=CJ{wgISKuw<|*tt<5wP$L`TUwUlH(-V70B^-=4}rXnlH*_t3&{oC zP2FDMJy`YeGtv$HtX7odj5AyqaScVL^`>Y`dv453fmF8cSg4A)m{L;P9Q;LDf}Pz_S5iQu zs$C}fk1hSjlkP#4OG~Exqy_6E156qPJWvy_8p|cU+yP3=**d8q|DW}{FBl^iOLdBe ze|-_%f>W7WxJK<^a6g5YRa#rPvmA?_*e};QW(7ESWbBepr+U&mc?NiVHD@A^qhP-5 zq1q_Ynoo#ctaQJCOK`Y+s)xr+^@Hgo11nSFXF3extt2FH+G7Ud3>J|tH$2K|q7|@Z ze_)?97k!r0XT><7Z>g_)u>Jny%yen9Z{?7HDvQSBd^&~3bJ&1^SlA znOFKOExi;1T}y(Y^RVCYLx9Z{+e1}FKWvf#Jsp>l zQN6*@W|bZ~jcGl%<}FYtV7s3KKq8;Dp7-e68i;Y`(CIYcs$)>Ra1NC6_E5kj&bjek zx+nmxJoc4w5SAX=n<)LLeYt5J0dj6E7B?uP6%?tL3j1CsE+LcZh)n%%Z) z^>*#sHD_b?@X!OV*uMeiVEWXpGdMt8xceyX}z)E5=t$2ae3rvP=DdFRQtrXww~0QiMEwIo`K9>fg4u zXKdrI`s9JvRgz*%9xJD-vojY#3`j*-O#`kog91~3a9w)jys4>%FvysB<;_goZAq+a zU;h{`rv|6%45tNZfGkuN|0!e>5pQ3xjq+Xo_2Tq&x ztJLbOG&RNq3RK7qO@x3=8T23to(T}^ay)rYojUzVOD>@+WQ*IdrX`~|JfO)_XkxB4 z0_E}V1)v)0;9+dvje3e0a&HQE=xWcXZVA~O4EqPkp2gk5$jyCgdY*@1wucN!_v$%;&t<)AJpt^xK`m9h~~J=PAmmZ+dmebb@t)y^?33C{*7!+%%Y}xPW41OpG4Au%5EWLsJx2hR> zX%7xh$D#r4%k~K+i;vGl*8=5rfpc@jOF%&Mi$0W!O2^hEz$3M(cwx@Hr`|s@R64^u z=B9BMNL||hd6S$xVs^_WW57u;9+Z>xu?rs>b_yMSHmi>K@4aWctfP9z$D{y$S zsE=I2C@X{HnQUD|!$Lw`c%ygwJ!f?$a23hPcN#EYoxoQ|-*p@He7z~{|Ft=JdZzl@ z*dG0bU)L9XH=bMJNJ>ohOG{!q>d{jZ1#ARMy#muD506rf*^Es>qs8`*pVp?LAk3L&(^fl%DTF?~b}=|F8M8v+%&?=oLi;7aE>cHfz(K&%8Xj3mVZ$0C}tcpcb^A2Va;Q zRV_ZBl%tn>9KLdh_HFLTvIr0hV@R=ACjh4@P^V&HIRmF|ac`_e{?xLF|JkO6No9n`-Hhyf#e`Sx zd~GC-6Qznh5M0E}cnKdUmBrhaFLxnel{`}l+6QQjM=@Kz2?X>aVYrq#P_j01Kww~=>n~l9$2Bs!AWgO|T{0d7Y-Qm1} z`P_LGWK{Zb^}GA?y}i53pzWAf*a1~7mP|gVkazE5i?XB7x3pxjiD&NlP%A5YE34nf z?K1Jxq2wFuivE7iQ*iyGt3u@E215=55V!MX|NB5bsRKdHK94KYc{ZOM(_6S~{37kd z%s+D~glhm%f=d`%u#8;x8*171O$Q#zk1IOQEkc()gRbTK$vJIh(+#cy1YYDei@l&E zkfMj{rH=QjM=RPqwgQSxetTbpOHDZKLT|oi@e)5>NKf*F$5yq#;0kH?^hk-vqYj;| z2QPX5IZD_LZRw^82KQ)Gte=jCp-=uCVEtcVGwQ=SL<%lj@;d7F4^)l1hdTHkgDwcf zU#-EjzyQwjkJ{*{(|Si3It#y`kDa^R{_$SMrHb$}<=uC+*kEe!UCTi0+Du1zNxsF^a1zijG4_>)m ztWnju+QwH#`yd)O>H)2+&RuJrjkkWdcg4jusdM+nY!nsRt0T7tV*9TMBR@HNLx%!; z-^oT6Et9t8>-4y0$H9mjY(oM{Q6Hkfdh*`u>~v`$Veerj?BK_G?!)dJ;7x5y|D#76 zqTvu+LsLBQ$IQ`4-Ym%}R%a~LB6qvgzBgfaxK29)X61+l#o-T(lhvgVRuRVRhe_2< z4zM6EB@LOO^U|H$+>PFyf=y$gRCDDD8XoH3x79YrI{4Mi)2T4+3-!v49JG-!>@xip zmn<4b)|EH>^aO2q|IQH_s$<}OIuZ_SPVY8m`*vBsE}RU6l1eT4&>B((3SxxD%7@kG zrcg1Sg_kc;fWqD4yR?WK9Q)}F(rs8QM7*8DLBB5S0UgC6VDC{x&07JUg;xyI{OuXj zZE;_L8ttvjiIAeLiZ-EPf9eG z!;YHgsIMwp=P;BctBY5s-4iM8ifH$x7XV(~f$nedokT2GA+ik4i2 za(8*n@l&tST@CdRg+>U|u?Yv2LkF>pYNf@f8Ksi4ONm4I3)saX4GV497EGV9+q1+20#PkJdE!Xd9PL6>YJZ@mXHO9csSwyr& z4a$_Uo?&)kAUu(Rkc#Oy)Ic!?K%u3YMXGeriflQA(Z&b)Yv;S8MqtDms5-h|@mH`9Mo&FH#%y{9tF~t|gR| zcel$<{)#RFrGsi@O5VP0^rjD&hoHnFS5e6|Z+rwZkqSZ1JHFf2=@AR~Du$$m>=;)C z-MN4U;hcqJWUsosfaP?pXu~Pp|f*hha zebP7tGxOD{p;)Mfvh9*7;fWLuUZtzYO+lDXPooq<7xX(6g6qA)5AZKXE~n)NxDvhk z!gAfsQ!D=*tU9;KY_QUKdvjM5ZNB$H0GW1XF9y$B1|Y`{{u7{bvtzOF#bBis|B%w2 zDTz*VM7#}zu9kdiNMq+X z2+k9 z(+EK53Rsq~4$4zDW`O8!24xNgl^l@9!Ltx;>-6QRX-$ox5F)_ip~_KRi3eAJ&yy&I z)KulV1qfV#u$mt&sm%*ubYLvGlBq+@g9mCO=l3e(FLVbM{30U|jBfZp?g2PQfJ(-1 z@GT49oBUl7{8vCyhXznS=xyYHb0`NrBxtJc*F9M?e(9@DHlD~+-|S&@wxy$#Fxv0zRuO`wHGe+msa9M=q=*zJn^Qo z<;7xTxCxiOH3hSEPyoMn0EjEa6gzLqgS?*ufd5o%jpl)wu=THF238>@bE`tbLG+uq z#`P%W)MTqEtg0~&+oGkvii&lZB~^~0qe30QRKeh$ihLXv-)_F|lfhHnY1ryQ$O6QA z$-(<0xlo^CVJ|wzwwf8-3&2nEu9>k^lK9twgZ#!r9fB2Lpsynd1Lm-5@hW|U&kOx0Duch?pTm5O*jT@cmIfGP3HK)WcdbW;!XN1?@!d_ zqU#WD{eq`MIk5XV<0eR~C&2~*+)$hcV0qzM-i&?rYoGn@Rp`#hxl#x)Ei+Ebr8?bS ziuP6#29OjD)InWDOp`!f?rB+pvY#ZbP9%)YD96J?C-{nQedKDc&*20j>IqkV zgKKvXgE7sfq%1>IJW5eH-=y>U%`SXkq;GGxJfdL!QAZG2*;V?<5ciP%9{&&2)!_?V zoLHqWpex3H!E!bu23kH5&Sk}g|EPrb!iFc+oLgIQ&WE(hhTi^Qw@>^P$WAWWdGRrK zCDYti391in%$nAy@#spuYW9`nKvp+3tx-$-VPL?bmvPA71DZEA8iU#lz6CeIRSjR~ zQ#P(A1|G{^d(+Y*4y@#F7BcewA}ayxyiTYi(e zJ|%jNxTTbu87mx-?R+ntk*&^Ee(zCZ9_DpTHQ?Rt8qJ;2O@5YCuzW)<(DG;JmMgw5 z(_az_NXk6`Dn360GB0S3Z`S~ehs{C!9O^$deR;5~sCqHKJ-F%+;`X`4q*azfK{to8 zKm*mkQ+gBtLRcD|o2oD`UO?*e)WK9d9Lipcm@&e3)WuB^+5YZ3<7EQoB#ve1SNd}A z4(yQ2I^Wd48$jokA%Tp`Bl{4Z-_s-wdmcU(lg@152t?SFM{D){ZY^h8UE0xJYoW4S zdR~PKg)!UIx|8~4_dqyO5;a_-xS#`o`7ICq@(aD&78;l|*;^Kg3ROsLm#%Z<5>AaS z;WrrSi1ERunZ{)XR*yDun<+X!t+n%={i?e;ATBgkW*C|P513`Ta;@-=1mH0_qmTC|7 zXvNj7#Fk_{LIU8t4`mey!EunKFTo}|Ejguz8R^}e%Dg{~-=FW^L`VEu4-VP!P}AoY zQOJKZ9lY58qDf%h24OjNffGh9DkW1Ur<$62xP0_Y+}V=54I!uBP0gaqEo#&fehm&m}Xe>YX^99C#_!E8CC$$0LW$o6fA*8vjVb zBwpNb$a$~|1m^>uZ?Rk!zFc$8<3bbSNsi0#-a|K%t=k>m1t3(q=y|k_b%iegXeRg7 z#v1MC-cL)UMZ77!X$_!btP^yHfBLhrZt%kvc8PTNiSk&#A;i2TfaW%WN=#e6OGbWs zpYOApH0lS{FGptBK=AxN4_K8{#;hI?a%kzp({oG|y!(6Jo!t=J`i~yE4Lv8~HhkJL z)5}{Qh9h`9pI%rq^NAY|p#Vo(%FqDE_d&{*9B?JkApE<67O(f_7rR0mq}MS+YCbZZ zhCC@L`~ZTLvkR*v(^TQ#48n;s>__C=aj_VIz8Gg+ThY4XpvL0VVpP`p!h*kl z<^vUZ9(tHE(>N=DkDs6mK31iI*%(=CKKyV>AB~fB;`4JHT zlrVV}-iOpleh@Xj*MgiHhfgEUX1;EaXA-ZEToX@t4zGkY%M}bGD1j6$Ybqm;p8#L3 z0$@Nk+3-|)Mm|SA?Jk$I%;0r1MJZezaw%J%Cnb1;0*pEUz^!&hkL7#fNbkOAxDjhEO>q^3z4t|M5jPf;ZtUORgyOyK)MiSDHg_rtHQjC>>MKgb@P3F zVr%31S*!EOF^de3jSW)RkGU-X%V=vVVhlXdPG?0iu8+mwV(SoiXV075Y0 zlIjp2Ed#8cW!63?j$#J5#&K4{69;{6Y`UY-DC*1_{1{rGo}R2;Qt+O&2jA0sa@BgU zeKH$oh^fg6$tZo;SqfLM^yR52nMuvFvo=$h0M&#;nnecG{WIR3Z6_Qx-?Vds(a)CbASQz|$~`PmFZButj12Fmn?$$YQ}Fp*bH&&HYi48fT`NX# zLRCvFts#k@R|uEfA}sLPRY}Xd1X*%uHiu`d6d|?thkJ;IkG$=L6ipC!pSaeHvY?q7l!%&LzZx5x*E7u)iW*?%V=x)h{-3 zjnX;=XCQc6U>2owwgf#trB$`8#R$SPc1i0C_%aPKnHH#(PheAzwy_hSehF~gwMhct z(At@%k1U2>be+ggPg`8iT0%$mZZtzX%{`kH0`sAUI6yfe2Kw0UH})CE0?5f_14SdkAR0XX0-$EUpiT=Zp8%=k#8jq1K9@be`<2?WAa9u& zi0NdX2P4MA5&ScFBWt;)&gg!12`>DL6J;(FK_|lQcF$H(>Cj-LQ-~W)&m+!xZ+WSN z|Ajq}Q`xJU(nuG`^((JK<%tu@#yvsYwg6aNxE&TC1#hPdjvr~_ceVYd+Z?_G9SAcU zIRW-KCc`&hb*^IXZe9|6e4W_C*FL>@2`q888>s(B(tF3V-GA@npU=1ZU7MDewW_w5 zMRA+8XRV+}>{%kH`7Vke)QY|L-YeFvRuMa9jEa_uk-9bZ_`UAW&m;fk@pwHV&y({! z=UmshE+~tx0fo*UMk~5%r!dm9an(9;@@KBsBoopJ7W(TbAvi;xJLi$~=E`O!?3=Hv zQiW@>KKJ{vjrStYyVKWnVI9i>Ejc+gxSz}2QWZ7AUUa0y&XwnE@dR4ZpmCN0H7im+C`K_8Ynbt_(_ZvhrqMFgZo&eYrH8 z3@m8YGvH}rszkp?)MiW5(qJt^n9;qZjUOeHR<-~=WpN!}+zT?%3C!jQXmqJTR5KkH zdp4{#KQ3MVTAdQpfdKMUuj3dO&Z;=--wSN;F;J3w(p~^sux&e-4veUe7??H!2TZ~q z?H3;73f)$#tg_=TK8s%8xGVAbD$fg#uVoKnb@t!Q{a;;_pwkBinD(f5*o3XOd% z6sipc46Nx`VYH{rD19iju>eqJaxx*4EY%fjfKtBs(;qW&8z+dnbk=fOt-$!l#^P$e1H zp;2zCaS8;D@rVOB!Ew2L(n-7+R=tYC7&7h?7sG-m8&Kz;p^)XH?5&sB7mU6uP*&^5@MPSwZ-oD03n2KY7BPqN(yr zT8l2CAx9}Yij`+@T4($zQ@ZR9BNh2IxswcQJc6l4__sCCYC$4PN{0#9Pb z?+C@7;n#qrrMYf%@wlaWiR|n^7xNH7gTF6o1Z}%I|G0N}U!}_j^z`X&Un=6;8mw)C z_TdF*)OV8wmw7qBI+*ZCao&*NiryUL(UmS^r&NVZZP<3((oJuNA_=~~<7*$ji`d*~ zxLt^hxm^gR4iQ|s{hNEqebUEONhW95;yJz@^t@RszS82Y>F}tdw)8}%|B21!^@xq_ zU3`vw21!eX_CCNqd*?xLITw?}Nw})#GN^M8*B$}i!V>DH8n00?MU-}JLja`+RA{iX z$Bx40`HFC}P%{e;9R;CKN`)=zF)^JtUsFeriwbh$v~b40yczDY;)Wg19bq!#1va^7 zjUOH7239NgTIcdG{Ch}+u`M)73zmRd_K3zpit7|o`ys*@OHo2aEL8{RmR6Q9F%b&C zY1BBfFqQ|4Y@UyglP*6tojj4^2qoFR-w5*DFKNOtAQ;}NKk*zgG9)d{XwVy}sO}d= z^TUbVBurnIf1538J%R{)0=b&HWE#Nwwc%Sxl3f`M4)Gy^?O%xuF`YT_gV5xoL$=h>B7gvLVZFZ z&q2le2B(R~6JjF%*6)7}Px&JFC+waeTk~UsOw~=l6sJ=}AzBoA+Is^wL;PUQOf4Q# zY7>N45wEnr1ZEX#-WZba4OzZ<=ImogROfGpj(-)8_9IFUzc`LefBi~;ONB8?pHOOv zeZLyye6o{qH#D7`XHdBw1kdYk*!&~c;cRJ~XEO3z%`_sYoRgD1-kJD{lb1@)lbXFU zlJ`{|Csr;F(4H~8@AEta`y{z|L)_`TLhHu^GpYwV z*QLy&j2N#S@w1XcZ{>CigwWf;#?ISYIi%Y-UwLa(2>F0pzy09<8)!GJ9 zFS0BQj4X{X8BroXjOBS$cT^i}exn+0ioHW%=Wp+>?jd4?B_CHML57XC)vK~xqtw?| z%0+*CZCDRox9vFt^@mCI{!Ffw(eO_2nsaRdRjoi4C!Iw^Z&nYNdQap?y(es>pM9Rz z;*1T&Hs1_`n=roH=A^zI?_To9evD55e!e$R+pVdqVx%^4Gk4nCatl?{v;G_UQdbw+`iEDx^3 zt|i5QN=kDc{1p?%4hSvmGJX}W|Lk3I8v5?llkbV=!fLSd4; z%E%TFpE4`%DIt{5^hHdNUk8v|;|gn(tEm7ew$EoZT1@0~CVHo@2};Lshh2I%3wH8G z#@%su`02_fX1m)zwDgYStNTm?-5*`JbT9LD}GbgQNdy+B2zcA(Kf^-%Bg5}SPj@cf zFX`<_Y7dzlqc8tD*0pitr6`@PldIX66g9)oKHwW2Ii#1Bg<3wg65x(DY_0zsjiD{| z!l7PB&T3eJBEgRJ=!+ruX{BTNe*Xj8@NKKxAJh7^vU5$7&5Y|q|%-mKN(`vOTHmEilbXQPCHpd*L( z7V7j`32MxA&qr$UjE@Rd4@1aPq28=hJz6D3N)8DBQ?eQn>+lCI*i1v74-)aa>fTc|-(#%vS)#3~@RMwfux~jw~5K)5;0z*fq zAiI3~WDKUUH=htC2ijt9=t(m{8G#L*v*RPN?m8>%1vwL+^N)l&^b=C)69W>I8x18z zo&n^ZH_y_zyzV1$Ru5yUDt0e_A=m9w>x`cpA<8%f{`Ii0$Cc5AgApai?F^hE!+3&5 z4{s%@F5gl!Sqv#gsIqI_c%Gr6`lvj!9#Q!E%cr?7_Ipl~b2noZw@Mv~$J8MS2O;m) zG4S?{i_hV|hwjMc-ant5=vupJl_ft^ris#BlA|z}p1z;FXN=To>umMhfZaz3`|yS} z6GMkkX4cx)?g~<3%nG_xy9YHBa9`gcx;zvA_?10vx+OYD+>99=N(cBi+M7+M&)Qqy z+l0cHh2`>O&4+c=!u8LTwXLh&^*rHH)Hz*B-|Ao!b@Z}|I}U|%6@YhRwNFX?usj0@ z5{INf*JnoWb%l%qgx9{!+l8y!?#^EKf3e=baM|@nKs((B`9!Rt1?%FST7Xxp`eI7erX= za!Nk_&ic8WLE1Q(j9}8Cp|e9ZjvlA^Z80-h5Km^8S_`zp6Eo zS(K_mH=$V&{NwgUdlAxlYjR2(&_lDrc+a9alq!cw=ua%~lp|;#Fm6tF$ zw;VUB?9n=#-gNjL;PZLot*GIzSpEL$pgr`&_BQqBuUprpwW_#JNX*<9E|GMGV-w+x zl2{Ji>tS3OT8#!P+tt|^ZZSH&nZar=#y2tJDKS#$vd6>8shMDDJ~!8>zn{vt$hV93 zBuYD)(J0BOhx~U@6T9f&w1oKC2=S^zuR%LpR4ZOKQ!MQ`InV&)Vr85EsK+viv=NMF zs(T-uMcdx|G{4^B2%W#X5Pff6@ON_c{WAY@m+}N;q#x5dXT4;qt{jb#sf2pzpz+)n zj1j+HZDc3TqWv=k-v5K({L}r<(;^{c;28&FNOOrbd;@k>gRR*T@-#xS9)|)|w|k5v zVkf6|=7HM!pY`3Fu|c@^@&N{-JCrNsTN|BN<))hw$Aof-ne<}s>D2E(|7Sw+WG!?Q zKOhEvpicjeMhar`U~H;wY^?cwEe}SWAN+lI{^{Kx)Y8pnh4aQ6S)V3(@rgN2*qE3E z7I*MPj2X0WG$|(WtEtJfNrVM;`pxByX`6)}wrk}b-CI!e$SOmyb|gR!G$v$47v`l6l^Gfn=6D2DCT{J5eVydHzc-GXJ>^Q7xipHU zMh|Ivz*nb46Fl3j%(qXU1QpQ;UDjG=7?k$DIL*ha+;Mrz-Vd(P%6ZW$|NPr%J5Mu@;LITFW4 z&=LKe_oj6izj6%Ad_qT=rqPf=OHjD_TBS2V)y-xv7f>+-M+++8txlF_BZHeM*i?5M zDDcv}r9-Gw4zv&?~@@*NV=GSBP6TD6H{4 z>oV!^Xg%GEFQF)pAk15&ZtZSJ=hq29%)g?c7;!LqFEID&>-3vlnZBS2K+eJYYMzv9 z$>#3wP>ocloGf1%5?I&bj;?ZmE*zAqLc|d`45MB~u%-Mw&Ivbz&HUegJ!HSG&6* z?l!{GBWYCepDnd&k(EZRbWr<|xn6mESHJI7zHQ%CPliP9kH6dJv`aDp+lJYyDnZv@ zhO=Tje%|^48teU`gjBwU^Pz3NushUJ=>CVsBYkYKJeViGwK}f69Hnc5)0Zp#=ENpw zlkiaumgbbgFU=2HXl!pqH^IC#%nE`Kq=wMLWoKI(;ghD>J;+3%5C3v0FFlY+0{#OW z?SJSZz}7jbbt#&v6YW`y!TYHaCn2KvR^4bOU|!t8+p5?aWb27A(_j^zwFKVm?QFCn zSsZO_xg@7KYC`i;JChqJ%z@FJ(FlbjkY|Gb3;}n+KrSx${xQsOs4Y;Z*PJ$dFV0CK z>LK8|G{j&oVDCOOg{}K|b}@ZD*mG)?la!p7RQ*;bY4o@tsay1B@MeEIBS9?EuF#1| z08v$$6oFyz*0HH!K{*p4J*o=y9Iue>PwKdkf^SpFvsHr_*~_8$X44GiiiF1mu(RpXiX*&y^QK|=+skq&CiV=U3fsJ5Wl01 z{%nbKN0cZdJL%G&*L3hUv+T$TPrV7aw&_1k-@UF;k~>wp+g3UW>~=qFN=`virh2Tp zTHJ3!GDC}%YszQ&>!=`coG>)K;tUczn`|Btqu@_r&--Z=(|m*bI)A?5K`_<`5f77wi|W{00MuNKc0qlLJDA0+MIjthJ40kszj zCi*7uyGe)F)!Xn(;mtr_;=!UY7F|IIcJXP(D89_utR0$G(6%u56fq9>&%h}J&J(p*Dj za7fOx!kNk-|ChbQc&60QUshRUFLDv?%#2P$rO8MpC=w&#w~Q4GZh=BnfxoZBAN*l-MA2PQxs3?bG3BOa>yCs;jMEQ7XHe_qgabO zTLg%Z;_kZWkxZ#+k%P^UQtE;a%MKJ;E=YI&V_nr=RbWM!;t=XJ{}^_}_Hmu>sDG~` z{D!{o&&BFbWMy_{3Uk&R#HsXY@d~y!d$LU=+wG+Q6g&K@7$n_9xs#b)m9HhMo$Cj` zIw%rNSO~xDG61YHPOcV?8bQ#6FWF{$k}pe!(&GddI;F?GnhW7D-8Q)+;zs)= zVg27qXK1|(sm$nlfVP*btWu_Fm7Cq#kPE%oP$*E6JteFioI0a*^X5|?YuLnN<~qR@ zD^=AcrIk~9OOdIb@bK^*lU?V&X8Z-dA#}B2qnq^eq}M7Ya#jxmYI#T};I>R5Rhpo0 zL@QVN%tUWeZT)fe{!2%Ev262-&~|s<`9<&M?dm z>(a`4UOM5-T-7KZly5Makl&r3|Esqb7*sxc7nbwL{cP0WaO= zl&UO9#zTawGkp5+s^Fr1aBO1~JN0(u*gfXs?K#WD@l!ZH+?k>#3P2A5#O1uoh$QG! zGKA`UN4FHQc=jAeT`CM{1#rc(td~0ml;ZsYL%f8$8ILAr4W+Dx(r3q+B!gaOZhmw* zFz-fW*V#nhZ{fkxP2Yc_eKTWB0Q$4``clnrh0EofBX->uhaGRxh_>JlNhL-2m#{zq zpf}ic|m(M!|%xnAJzWjcioU6yf+T4)FO zrSiz{&%pNm&m%U`3jba^54X5UsLK6DFkb0+aN9q6wG z?>QD&$J}j?ao+_ox?2vT?GrU;nQcl3=d^Xo)5?^`{@3&#qJ(MWg={kdY{Fgf{^Ri) z>2@T(@4U-Z_6=Chfpwp!Y6P$GNS&y`pciV(jRl0y9^-#Nr8x3r--M~`yR3jKyiDlm zqKqIBhDy+5_QN!;WQsJ);grYiExqk%v!gZWlY86%>=mu~RZbSapOfSlE(Xb}M>UD@ z9eV+N0S9EZaO1f*^2~I4$o8*m7fHXE6y$nKyuNNjj{o3%D2(kVTF$+T*5!Au8L5F*!!!s-E`hcv99)81` z;K*dx!AVY z2FL-{3%4A|;I~a^CvN0@L)0odo*nBNdgo}X#C)2i$@uEuh_CbF=-u)Txp{DRV9VBX zJbjBD4uaSh6L!bWnEz6&=;d z0CY=Q4;}MzTvJQ(%I14spIy^+WWYp23F;GC2OPaelLv3C6WiXY@q%|~ge0 z)(1>dt@_`b_B`tX~jBYC0>NL5IVd*W}WsYxbP2vI||*ri+$ z#lpKGSjP0#B6A@=^5QpAQ86Gb_7_I$U*woU8BoDyn&5(oRd8gqr-*$Of@5fr?y9Zx z{(Tuke;sW|fBl128~HkPF%aTg&iJT1iYqxvwzr`$mYP>&fx0$~S#5{b%%jvxZSWm; zvc6T!;6GevidSbRW3~-RSz9F@4IN6y65gx|=UAonkx-CP`Up))kxyMDl@b`(I0>qT z&taaEXrlfV5`LvkLo#DP@GDyA2qG&|0^OznU}u&2=eI^hxi*C$Y%R1tN57R-dY>if z&sJ5FzN$-BDjlbfu#d22>jHWx29VV3tphM!<^-CIILw@E*}(`jppv{rzhA;v%YuMW zn0lhr;-ZJrk*iq%Wl~qyX>Vq2u4|dxlyTH^)PF7Ca51-Au2ecw5K)i0x$Z8q!fPqv7kftJ@v>4)EStz zmOh`i5EKL;`bLjPAQelHv4(@#FGAugmP1g9+KEOy@%G~LX2|cqMMZ#Eb6|wC`FAn2!YXQ3@3^yP6DV(gG*V|rx%>qQhE+Vc*ytGEH1@r4G_vSXqStsdB4O zXJrZM>rf0SuGsZR^@NBgK!pvaCS};+7#VG+vQkI=2tX&=D1-1Q^@~eQojY*qu{w3Q zvKI_33{Q|e5idk|c{ZwqpCCDs08Z-`1muV-vlEjshRH5&o8KNvlkQYGR0rQ$gxxHC zylCj@+FZZtg`e$&nGqn&uBDX)|Iu=w7_p0@WwSRlEi0qvwU0taftiY=c5#c3s;Db; zu!b2B*y3_^1>@xpti`^IkOtV2LjgQ>=5JNz#X$Z<)EJ~7b1qPqy3h=jX|KQ10pTSJ za;-K)uzvhleq;i`)KZEW4~H4D7X;4-Ql}gt8lArkcX5%yYgIc zto#%xfO?T94#Vb^B{!ocrKNR^2E{tYOJgQ|%p068Cm9RQ+JetQy+$HER}su8X>pk# zvD(b~ceoF*6gR-nf_FY0DeM7i2mjK?NHF`9z!5!%y>irjw__QhPFY9nQ|s)JC>zF6 z|7I_Ayu=xGaQ~n0^_!jH6E!E_UA#0ZDl6It(3M-`l9E2Wnl^EMnlWk6iB^$$Ki;_+ zb#)10s>I#TyP{JeKQ|QmZ-~IBZ|;QqJFQ5+jh;nTWwpPfr=>98;aN$3T^l^q%!)^S z2JJhH4dnf#v<)O9?{-MHxl8%sKU`!Tj{s!~i5w?5Y)088+o3pWq^#K3n9%z?ImHqt zJ9pB1@w>?*XZAmYO(P{AGL;Xvq#sC}00CK%LrA}WHyxRC$9E}d4oXk9JjJg(h|voP zm%w(K0TD7Elr+?4sn!gL!(yZ#PJAeX1Jn(Sv7bJ~bD<0m5{S!y&e{h0XPi`Wu1;1V^P7Wu!TTZmq{ z*StsP=4bY?h))$yr8C4FJ=SA^$lvqg3(&WSj(#Dm5>d$Hw+o>H6u9QaT2wP#){bOC zZzhzs$$&7#_U2W)(#OCbVgbHA@Yk#d&Gal;#dDfSW}8;9lO%7B@H{k?OP(#hOF!Uk zPiI}&&x@v4mIoTH6lF3$iTk?E6kj@Lo8bqWfcuO-0-wr)0;jtHywE8D#l>n&B8X9qFq(#_G?*^UOi!sN&@_L68EaxGT<$W{N25S)Oi!~gG{;{Jp8Jj$mWE| zqMCSj@1AO$n0TXDxg*F!wOv`-(yjElcOdyT;Cg-YBF*;tddDFo=19t9v>f@LhwFQ+ z#zR)xmB`MN%UdV)*R?y_e2eq&lG5Wof(`L)Ltj5oHvpGANS!IVYxi^Z4 z++RO{F!snXQff_mp;&sJuaR}!9t3z>)a9ixA&uG*@WAb4?jJ|OAQvqc05V`=%dE_- z_cNrYzc&EI!aM%iy)D_is2$xO*u3ND+t2a#5ISQM#}Tx@XwYOBm#9*L7kEsXXQ4CW z)@&7uvwJlG1t%=i{ohmfH^$mmoXVs${O;%M3G+KrxiLFNe;Vm7RTo=mM2)dB&vlAJ zQ0_IZPdcaor_$@t3WF)uf`;330`5(p;}P{y(&g9LsipUwr1$`XwUvT#GYNTf4XrBs z;%Fp*Z>aOjNgA27xJsK7G=zDw_p~7;QgWGyH`M4w^P_eD<=$@{{f8{OyHkltY+{3w z&-TRB%Drz5m3nwK7K8AQRVsCgxOsS(GgNB!cg8i<9MER;5Ch)h?Y3k$O&ew=BpAOEtwE;(J?tWnpMAwaCNUp-f%Z4pw{@W^wj zo)Am)_ib&Fmqj~@Jzt`#LG*X9Q;kR?(6W=3s`vSi(Lk+x`P;ZSWoC}wg4v1e;VoVN zgYW{uosXnu04ni5LY5J!MnHtOzBQe~TqBFI=5V&k(elSB*oS*TcL}ohWwGsrW~`10 zW{yq8Kf(UT*TZAVyOqtLg_4+>%`4mf{mC22k$djceuZ8`>D5LNS=q2bCk7h_jUB(* zBiZx0L;0}X3&zkOh0fmmnoJnj*tiDOrKS3u)&#esy}kM}R2&d-=jGS&vV6lv`db~wd$wAs;ni&4t1aq?lB z9e4*uiSKYl9=O?NM69;=CPzBH@kvqoSN5-`|E$9nes;duoS$5&he@uUc@dH60Hw>e zXO1m0CV{shU$`Q>vXTtg@^i=RThh`hIn=Iy4cl7V2;9@6fk2o`SdqZjIQyZg!#zRm znflNfn(Ov!N+)_Ir~)eCNTGoh8~sv}w|+eJS`KPyoio42QF|5n@%1l^Y`rsp!7M?P z*m)7afV8wS)<4^lbq(=n&3)7F_8hNI!u=Z<)a)TUe)75DFw1;-EJBu9S|<9XTS}bv z&5u?&_i`cm?(Pm$cZcjo5zqbMv8j4K|KVFZJckG4qtjr#w6KVtG07~dP%*d6N4UO? zj1w;raAt%&&mclnt*Z;Pk8ho7IJuDSb%Ole;96(Kpw?8!tw2s)NfX0lq&~yI%)mP% zWovEIg`uw5g`1M|hTExKn>V+rciW`!(e1-!*69^G3b0Mp3#VAu#09Kh-MzQvo*`#a||AKwIcUH-i;uXynt+kX6M8)kq&z>avEYy~3=GwoeV zg?OuO3hmPhvy6?i0S!_Rn@E{Yl>iqPTO~BHI3hvrm*4imr=*R(t)BytyNZVaY|AKN z6el`GL){ciF$OJOS@B|*!@?C$@4XEpZeIAywD%VsjoJB+=O(D{jA*{Y6@Fu{(E)6| z0dEyylpvsT;8QpB*=#z%UvyJq(bd(W!oiTDKL8=z=S%cC9$9n~01bV{NJ41y{`|a( z;N)6FvA=za)i3vaa`gNnMT-u;;&Sy1MxhzKyR1P22qA5rI zszTtmUbv097Ee(bc?xiU)_y5WS+s63$~GF%N8ZiYW;r{#FiJ9JMwp{ zrh~RR3V=eVSlL=j0R+OjmXB;Lc7EiNF?hXPsqOOSCFSter=Q>d_I*P>SiAiF&S2WH zzLkgFBq}f%NJ(@&tz2obM}pcaE}0hEgbDMcAuF?_cynA13)l*#uVwA}L=T`UlM2R} zeD~F;8R0(m>6pe&X(MKF-348Keu&;=6GWXZ2Lx06ou5pJC4ypH_z8Edr2g8#h?1@uJawX7tk*jhI<`nTn<%UF=EBQLOyLVh6jo zpOR<5nJ1m86w}kZVrGa2R5yChI8vB75SuHcO58O<1XL{*jK<6<|qd0mv56AH{D z1mA5(B`G2%;`cY6PWS&(?tp^F=P;6ke*DtXaxgaBc;_x)fY%%LkEr5yRY|z;$pcHb zNipWVuhyCI33Oc0lxM*dZI$XQPv^80=o(B9PESQQ=hxl931;;DeB@59K5-(Xw<%Cp zO;iLuwr%MZ5w*ZrC8{GfR2DS+7|Bq_ z*{r{R6SK1iN(F=1ps|%&+BA?kCb;G{#K<*4oMKN*92K_O(R21d>0;a9s)Qr-`)iI4 zpJ_3vc>el22dNfqcS}HT9o-ZFKl}U9+3xMoVRzrjuWw^VIE&NrUaYGmv|=w<+FU^`@RkbYSET++fTitq&hY zq!m^rW*6C~fo+h0pW@?)nMzUP>dInH)a>JjBMFW+tXNb|_a(X|ChUBV<#uN5gM0Yq zfVFUOSy71qb{N8VN+ol~`g8ksIiTQbB76%(3g3BPHH}XgE6sasUAg=lay%IsxcE3xk6!M3xk+5^1CkI}To8k)E=w~8s#T^`3M+V-a% zNs}xw{yEDvIb=6QG!odrQkPl%d?UER6Z zD3W*E_j?b^cd-iG^=U>1hLgNOA%9Q8&|i?6(I95yR^ud|s4C2WKa5qV*(UHq`I!OP z@9ivITrnqa>{D`Qr*YEIZhBzou_hpZ!n+X>So+?)m#c^jY*j`U4t)-?wytvbDXK5M zuV*blNR|{qHE%ZTM4C(teDZ0ur-+EcF~({4LOWCbGsmQ^`ba=u_#%qB@2;r-AD9fH z=lkwmzrpLttJ+@sIQwV8&s2pdB|N0%DhA2emJ{r#*CzQM-&_5u>4XDBRf2_6tql!= zvsR}m!1YP_Q*cY>aU8SR*91ZJ&w;uM;yb@T&VSZL3aZiw(`7j6L@$137V#s0^r>^B zB#c*@KXbKqvyRn4aEiPOZK~Em@N05ID<#DlS?rbAYN>@SufGN7`;t18_GHd>*}b0c z+f7PJAiOBz9hi?+Y72h%a_lr*Tak{7&O+Cek66}WGZrdwRL0F`kf%1*%p!*hFYDXl zUK$%W69%#_{!x8YW3HDm{(-lT;uu245cQ&pl>tK&ZXdu?(MGFBqo2f=*SD%GbxRl00C=t;7|S4UiH#~&<`F=V>KKq^Lyi zo#ZS9>gn+Ka_Ry;r4!Iqk3+0(b{kZPG5~1kKC_bxu(V)JN2hM(xzL7dyKH`N{KHhL zz7@e+TXm!2wb>X-OpGj1ooLqV_1v~|hX;N)MFyCSz1Q6XETbcJ(Y;bl9DE}?-|u8U z5^v5z*;x9_a@SYbHZOLj+1EmLe(tG=MaP!iV;TFzSM~(PG&;(2zs@}wlTyr{(U~?x zQ{ty@EUl_fg~H4H8EHbuA3RH6e8ulm)K_}`|HwnYV#@nx_kHa3A&}!vU^!BXm%U2V zI7O`H3K(?b-pE(fx3#`s2$G<8ch6o;cjShZT*V|x7XU{>oGiH?!x z-l?shm1O|=t;?_FU41)l;t1Js4OoIt+!h^H+JD1j!kb8j$CS1J2`Fnh2BCZ9*v0~O z>e=oufKq=`^L1(U%+FP-Pqb}sf0&K`IF~T0ZJcs_LqOR z5z0!s-kkIq?4B?3p|~Yjq^zH4N#NFl zsOaETcWhzGQCD4%_bzUIRVQ^YID=p8Qx@i$p50mZpnVCOzl#JN(V5(eV5Oln7#yAc0GkZxxvb2x$*w{{-%|JEsJotSqznyZ^yuALvd zP~AMi*>EVzeb^lch0Tb`kA2{;EAvxZkO}!nG%gcjTh4@$jJpHga0K0bkJ&g8DL6kL zJWX~ z*6NXnC}Za*#=l!i5qnb~Wwa#fVTcDM-5DM@D#jvzLt>%ViUM(;kpDr-jx7h7! z_msHB>Zm`_vsq0*9nKD*nrv?G6DJQ7475BMN&O&v2+`=9F zy5O|nu(O!A=1{bJJ**^i{ zI|0{gqJ7`XE;z3?9P7Orb`@W9Tp~%=g-U*k>$0|W3U5(E-|Wm@a$_4uWI{k(t1bde zC`p0JNsGgsKOce*mNc#po)(-=zrSQVc^*!LjfN4XSwHR^Z4CU=a{z#V_6GFLcH;i8 z*kWh7Hma+fESOybKe(M;PoP_RYk^*pn9P9i4BRKSsm!7Q3=nH(rA${+>5dStA5I4< z>%v$*p4Ov{IN9g{p^a2L?aBTT+i4#<)xGvbgWwUSGeNh`d=v9yNPx0t6=|*@X7ESIQ2g=cjXNaj!kQ)Zw@}SY~K93F}Q6h=()=ZCp+hbpW@s7 z`9?3&OCpa_TVh-VCaPN&DL^I%C=F%_CRL}QvW3(S;aU3mp-H){06zzW!A~?70;s8b z<+HNtnypz;;RYmms94>1;HDqTl(5P#q5mcae=@g^Od}cXpD}QasW|LQ9sKw zSz5r<@8qU=!qPeoli~%*n%1v~5E^r$kMa^ftW&&Lq}89#Do0)hl?qGaEzcG}rWqx< zzLEoh#HDKmPcwJEdVE`SaVT4nI~4A}BFx`*+?~i=W2f_iFSfcfW!4m|Zt$eu!=z2Q z;|)ZUf`Wubn^ZRwXe4)}aNSCM^f#rqh=8!Njm58sPso^*WzR%~0ERneSN2>cPJs=Dps~I^vM^tO2-?fWvR7bU%)YLN`Wt*$8zcqU1X) zBHRQ;gyQ`n3jhsg-fGqijHc{fxV%;f-Ezzcvi|=MUq_(4ZZyeLGeTxz3M~htZCmW^rvn{yW!Os@q%o z0l_-ayn|~blq2UXB7Mjmw+&5EK*HFQq=8nux(m6Lf^XuOQy7up5^!nmBZWqC&}65G6Z`BfqG)=@``V5)AhJuYrU z;XSAZkREj<=%p4Z;r6=C;(SH066lWvs`*a~W}Y~x6L@s%O+m~)rnkHMxIXE+B+@zj zd;faB-#<#M8?qShiy7=y_%a92v1y^`jloJN(LCwucbWG($sqint=+Jn6+w4PioTKs zaNO0yIW7Oy1+NKlSc`n&(l6^$q$p^=F9~&Z^ujrE%oN5;=%&F z672iUGEl1)vj@W)b4=0Rf$*BVCY(Qq4kH>?;a&%8^MKiKEPMLvC$VReFuOY(iDzMV zp6vz06AlLLI~KJ_Nez(yYxOa-7nZ`PY3{`aTH&NDcqjf$_=+FM{0zr&K|C9VR-U}c zV3*JhkI4aM48``ZzfVVytQ^hArT+}etuzGPVUK=ZY-V_x!3N`J+wMDetR72H%-T9Ka7+0~D|w*uy-fKWC)A-R|3( zp6jtYd5+tr-9IDQ0pYc`{}ansDSD%N#Tb5KclUjJbK_CLrTd9V|DUDNUfa9BN1hV< znpTkMUCaWJU30X1P%Nbl_C?LID{=1COS=2>=@bTE2R&JPcR{in+57-!gR_+fi1EJv zi-TFQ?eS&xsk14?D_Q?wOZU`)Fe&us#Z+L+08nk>H99b115>q22YADM)yhS{@{?k8 zt^lMT71feHFL9MY!sq3$GHHN#pVT$P29+K>g4@Hlmjic1%0-eVq5S}0Wt`-i>0{5# zYvUp|=EqxUzA)37(wtavIPD#LK_5ccwe>x>GUyH_)epv0B%)C^72tNKtcga0w&1Z= zti5F4)}$$qht+!BBKt32yZ)7&iLS(}_n2c#>W#A=(q*!@?>Ux#n>~wFHy2hS?Pka9 zzdZlIhj*~GuoSA@ix}RUy1bqUy2H(B7Q@G5mnz>9CQ$%;1(Wh>k=JQ??RE1SW^5~( zR^y;soNKt&h|582!V5y$?C1{IY+ih2>E3C$L>SYwCg!qBM9cm{apzQc!yE1O8IG$v z#kWmMHkzY#Yo%;E8${PyT359R+O&(E%?#>2>b$ANkzP$-dSy?mV# z8Sb-k42H(d!V^%bYU^NS@2kO=$lv|*C+GG$+%F{XB%_BE9LCpi`{R$P*Y=Y=(zWNl z^X=IB$@DET3ERSWH*zF#_s3v;Eu0H~<2(j>3b%zf4t;ikC5Y`qq)fI|#>6njl726S z+H?)HtuyJtfJx8GN|}W(DM6*NqgXKwS-B4%cFSW5f@h6S_lb5bV*6TJ1nqs}kq`VL z9uFrP*SaQ;X{&VL{mmc^>)7b~!Y%beTR*emREo`S~<$| z%r4SY$>1nVC~G7iKg_CMsA=YkV8UjwvgQV#UA1rAaPx)1FLJN?$$w*HTk|=rb{3C9 zNOs*!aA!XNga-P2Yp~zQ>O!-nM~jae$zv@b?7Z3Mj5p^@;ZJDzjy!g7n&?|_it3z_%Ww6MCh>#1tRmX#|W;Bd(K>Zf*-U_(pY*wkMNE<2J_vEs80jKO1=GF+F z1Vct`8+lp3|DIf=+zi6OLhp@+Y5-IMC=1Mq$$@JhiojMYj&Hhc|EEHc5x>|9&q9A%3Z$hnvI2zewdpq$-)*Pn9CJ5~> za8dz|lojEcn3?5&ByYpMydLBddiu%x_ETLd>Gj-ClbzQKV$DqsE>lwJ`aV^-&oA6! zw^jvCvz28sH_+>D`FSn+T80pf)@O{shXb-y)4z8SPfF)p_PrRZ?L|tecC6MnaR)PT zm$n3ohjEq!l@*GKNfwONVZfO%_i9|H@e)DXCO$yJ3Q~{(wsu7{vU{@ro2qNZbAkLV zh;ST9e?d;49}}wMQlTY%+0wRtb@~c5TlwkJm(S>~qGZ~o=i)|7FF9ZO)YbS6Gr)6m zZ@16#!-ySsm$xGwVL!^w4Xzr(FP%?r`-3*kt5AI3%WUuVa=JG!t-@h<*A;h{%~z9W zxgNGoZ*X%|+RZe62-k#RF+a1Rvhr+xUesFvig`u;G1;sSQThKkdhd9)*EjzE_xT;C zI5lIhB9hu8MiIq{T0!j@O6^&W+WULZG_h*$y;rDR`?Mmhn$=L-X`IqVQPq@E^1b`b z!+%H~dB5NHb>G+Px~|ug%40|F8{!R->i|czXgj5wtZ(xV41MI)&4mx`Pd;2MN38io zOBjrt)NXXxK1@5w=YRK4b(@o4AH`X+ueSnae8ex1;n z3RNKX2}mF?`x7=hXW6qaT%35xLIOP$Jr6bqjNxNdDSHtvg9KM+vv#?r^3S8EjN2bJ zMW5}wqd#vdPmJBW^1T&2H=HHB(w2BU&IYNd8SqQ+k=q)hM$-yTL@HO3x^#ykl< z2^+c;F0 z>x2(;tULel^YDS(jwfY^xBgfC#I#A4HW!E>gJD-0c&4e<5Tp^pV$IhmD{YeY3F8+I zM_nuP@UBF@8hpd(d@6di@Z-m)q=)p+&o9muuTIAI)^)y@Y5v^V8+#q}Qz!1&jb71| zbn%Y<d`N8697PB?m6`J}y2(u*c0EXeUO?&FRaffU;%aO%E~uGr0QI_*EI+PWNN%&OcJDzi|+mF}116llUH zkOEl#%&-|_6(9tE;!TUETUT9jxn7Go|l_e zy6fpQbGeoK-h`Qw&Oj49PFsMu)yrM$p}x44I%_k~KL76g%Z5~_e65Kr{_J&$J2$QM znz&!_`sBkC<05g18S0QSHbl z@%?;x+>!c?!50fdtXqdSxko7LaJD{f&hn!RxRlVtT`+d1q^S@-l{UHERlD5qCRi`%(`&n*P$<| z-~0I@aO8XCW%N6ZwnK_J2@6J}bF4572Ak+Rlon`nibiV_6(21VLAHm8Hft3`6qHpo zurSKwt94-JdK~px)RO2%LN%Pg_I=&`F_r@Y4pY?(Dj!`WX$DfGpPt?&fC!1 zp_-jK4B4lpGT2ET|3O9zFQJ8Jtm~J_8w%%Zk3RIp!H`W}3+MlQ5r@+fmQ;>^lM2&~ zxHpM<(f%Zj9bd!&v29-j2&F zWc0%K@ZQxWq3Y)(y#{ozw5?t-I$XiKj_)j-CS7jmUbxZ8w7P+-hc_+{kLXjhDn&GN z9a}QMV|xs7iJL*+^WHKoIeYO7f`%#%yVz=ZWHJz3t53mh!Q=xm zg*8G*$cJ(?Q0Lr1vv%bhnMGL&8z~i3q!bTUd^YM2fNs zOL}#t*TMo?Sl7vv^udKyRgtCyQFIO}d6mW6ClI zXPwliKf{R~1Iq^i+S|jW4iD`wsTXrs|7ayrnr;k5pNVWdZ6|GfGI{yWKmTCO9!%zQ zzhztm{%ypUzaP7^X4d6L*Lt%U5wzr238dMrVkw9yKcz*iQjlf~Li9Wty@7A74EgrF zB=lBu-Pxa;Xo@C53w7H{&__vTU7XMPWY>Prl?r3i+n1`Ly`X9uq(B(oS}hHwGWw1r zKK}tFz@$RC8GEpzE^ne6iKkXW9m|_rH*;zu@;TDTZFzsERWyA3f(Xs;erA(TtRSO^ zMg%W-7B)qbq0KP3=jS zoiEpuPPb#|QAsr2PjU3Kwsl%Si%RH@3cTy^^wveefs$eSet_Kd9&%H?RHhjt9HB55 z^R#r|v%;H@@5M3SmKXg9>K<7t?&&x;pLu|d4h2>&-1zbb4-^#IpLArf_)l^*$hcZ7 zVKp7X#$VGi%QuV6Q=>n4nBb+ah2aF*3}+OQ<+M)6qj#?YPw&ycQNI#|)am~vIBYvU zyBD+^=6vetM0#3UAGxVw43%%}^=bUW1m}y@im)~zR$!%)&zs4TIQ9$(PlR>yq9tQZ zfQ$_Db1$aJ@vS;43uO5?&JB<*FCIqj8nya4HAS{oXF+t$lif`u*sQNr^bN;I z4uo8iM74F3c62v`JFWtL_#W2{9Q$9?UH%B6r_e7IKOF1+oY=eQtaw0b4NGw*`x-bi zpL~7&Y~{6=T#c&KF*PFFP?*f+J^X4Sjx<#Iq_d&k&zo4z#o1~2%U&1?6C_3EoTxw) z;i2R-97p*B`BTMj=OmXvSg!GYfj?S!7Sq}Bz;!qTrh`t4mTpN&LrdySg>y(u1bMFp zZWSFAq%%%T&WD~fC_cN~{jF&HGRw$~-hV**t!zCk;>m1v3MVAY{JBp}PiyJfQ;ibw z2qlq6)qch)=P=YAK|4+!eNF_%R%Np#Mah8lw94+S@kUF5a!~yp>wA6yeLd+lvNO&*T#N$a>TVfamUkKDSKh8~y(M>1J zJo={?Ak7d4`gxyT(@F;DU#L<$%LA285VvK;$Ukj9Q3#_%gh6t(0UTEoto@9QQpTGR6 zdm|p$D|ahizI>NRyrpB1a1=6nTtUAWzetavt6!0S{9K#p*-$ z&j^?3?ag>KS+b!u5O=ir!_kkE+-6x{Kuo@*$qyGC=(VEozSfEjSS>4(iKM*VV^z`$ zv~4?9Hl&5id`_yAj&@5OIm}*7 zAM$;v%z1xyjv|4HSTLas|6yPv%zbPt49jp=0}tX#FcU_x$sCF@h4S%zf1;4tSI82z z7`v1f-S`pL_jZW5nsWVy%Ts;l7KfW`)Rr@^%sR8(*}2%TK-pR;iDve6(ci+5g6Wu< zLt;Rv=i8~B?W;vv>{U_4_>Z`R)1<3{kz?Pe#ME0l@Pwa%r#I*%Mf#`PS8Hw>+n39H zbEJmBLwWJAb2FLmZL~L@(?80TWov50!3SZXt{b*dyo{i=w>7mqB!ltvbCm1=(5gX# zXkh@9a2X%|b=K)Gix|wj>~o)-I6fCwf7!M=Y&4pg&`;u5{cNw$853RgE02DJO)FXA zPzHveZydbxt!(@0pyt44)#~?+ zj~*2tpL$8;@n;DV8EegN)|MRB>j0Yfkcujso^YxLuB4W!H53>_CF3ItYztSSItSHW z1_ax`+gZEVNm!p}*Zl!Xl%Gs>y$9*9QWV>RI#*m&IznYVsYBVX`g_;xhST_`tJ%`d zdK)TbS{qGdSTD3420owS*aP<&tyHC`^-Q6b40wg~jd8IC*Gvt3ipPieX&;cjgQ3YR z45X@+b2y^U#f869*Gp?-ihl-3)A%DrgRZKF5uWX8MB{MtsNCwr_gHd5H^(|Kof(Deic7{Kw1lE&7F9`F7%E8{gpF zQ>|&hmS(AvMthkvUrftRU&grvdE@(uk^z>A?SYvHTuQ9@C)~!cQt;`T?B+Ww&Z?|x zrpXmKl8^WsPXEzn5~{u-B40x=)aN(45w5&Wnx^ z6r@()fe5BWgk)q|A&qN$@)Lr++Y7zx_TwIM^65s*6$Q^!?QALDbKI`nI-d=Ry@(2S z*%*1BxZV3%!SQhp&V&(9ey&8VXXDN1j%YGze)92)`)oLbLCrvgL0Y<{X{|+~eu%7~ zbu2X1iHBo~i8RTCPo{24bDa-FU4%MWNm)ON+v~%{_j(<>-a4s$%1^a7 zWiU_z)`^Q^u8;E1o*}Mr^~CnH?>shi}|anhxIrhpTWD|FgX`& zzwHBY`^X2``p#)kR|0fq`C+2Z#h3r>2_1w>QjC1;6+^!5E|V6VAQ05jlcH<`yFD2X zM`3t-m)yqb_lG~`bbm&@04|$DU}7pSLB&pB(C* z-C4g}FDuUFn9*pNF9qgaaD?bR!MzaUKv<&;r5fi@56O65E+t=s=k`tikoov~*xKmn z%FUefaE1nXJ*cnqs)C$&a}kG?QBTG*l?ka;xrg)@u?Z5HlW0!G8aQBUl)vqki+ra} zI>6=8X~vQlQwLd_0aMj9JOs0Cs%<5CTn@?|$nm5noYycA zjG|=gZaMJ9OI_B`?QwP8Pi%m5R@G`?oY&up3bG#sHYwb}{2DS1pG;huHr?#R;;2Aq zlP8F3g-J*lem8@eF^an%W^8C24fF%=JyK+MoncIMEhrzN??`vEFg^Y0GtNINR7!;(EDTx0;60)sHpY7MdN zYBm#Gcn$y7ebqVMo%hadpb5)MKLHg_!D&Bqa&jm9Be#r{mbT2r0M{fR+&)MmjR8tjt4Kd5_<-m zJMMkxdbxkmaWZzIj#a$c*!xL7x=6mLqBHlmA-;Dfq#y8px$JdyU7bPxfr$dspyt~a@!e3FGFDT~ z^;~7_p(Fw3Y9b5J5^eD3KS~msW|{DVZkdQ)8;GK$k7-lTtu>RtgWiOLnyyujAr^3o zQoa^pSykA7NUX@N+24wd@gCe-#c6RSYi_oPsy{C@=QNTJwM@(JdL94n@-jsC*^gQJ zL8n~Om+^DA^Q%t!$;fxc%cl=U^hU4xzB!M7?F@-@ySRQrjY z=p8Ca_#8Mo(jBC}q3dbx$)dN%?WONKnqg%z8y!GKp~H!vRp;iT|8h^d7`w}FzLu7v z57B*kRt(jFii^7vG)hBl8Fu0W!xB3#WHC|QL6(?K>(`DBH{6cT6O8Z~&)(XP)FX?r z&6EGI)YbgPCC0_g*35RS=qa*!kIistie|BA$RI!H z`5RNboGCk`e=GWs$yz3Kd+~$eTyXeE-RO(T{6+mY(J}!Z;Yhq-+8f zq3(IY*d_hvyF@DcrCt04`zlFx(d|O5WBg=oR{iR@>f1rmH_7>^kf~z+;*0{eyBt+& z54@cm9UicD4Uf3@bOot*r$UX(^><;%GOAhDk_LgWwQi@3i&MFGTKh|VGYc|nf^3jq zQDWQfwq$c2lPLQFeH~xh%zqC7m^i{LNXuDrdwueZvAW00-Ft$DPPZ~t4%uTQwtl`A;5tcWgBVRN zVcQ&|v*cF0~?mgX9O7?)~iSc`nSd8C24dQE8wjX*s zIMHgl^v|D`{)kNh9i?&>HL~TM{KD$WW?!(R=C5a3*qJ`wI{bX&-daD=6wOJLEOoyd zbMU{C(q?F{t4Vy6xaWPEmBC0g5{*kK{TP|^?l6pAe%BIAOEb{A1+A-^jrC0LyjUX8 z-ZX4|{`eQSD3@+<>vZ4Db77X&R?1v06lxMYyQv<(H$YzoQ+|BvbcUo^a3|WnF13C5 zPDnaqTwag5ns7-t*mCMXm@&4HtuV9SN>rOPn_$WwpHvk)GkXZPM=aXpnVEqLaRYZ~ z{vyY`DO6%+%BO4Ocme4!x&)yT$7vsijp=_ck)hTQi-oSei*at5BUGqId zTA+7jhu2*T?YES#fy;YHPmT$GuDv1KE$(dH9-jO1Kg3OYTON450XWNpwbxPlNvfWW zQUdJC1Ipzb^*PLpBDkZHKb4$66LYoQ0VdP}Ms-rlEl)aE30}sOh5{(6g4>;S`iFpo zkF2LsMK8=14<&K|zoxz3+~y+g)c%{x&v&0|4@S*Vs_lV=byHiG58AQhi6Wus-5g7m z{3Hy{rbqSi(^e_*+>4L<0DKmY?vD5UI5`VUe09sU;6UAVIPs@*Qat@o_fn!Wq@yG* z2$;SgiKxb6ex1Oz*Zh?G%h2V^UY()dAoYhw8HHlS=9J0FH_df`9s#7KX(m4BoxY3R z!-U;@B6LkA*OL5lGK{h#Xq}&veWp4SEo01?8rrWm2kiOOGV7v44Q|`PfbbE45p{Du ztWW{kz=Db1zQGLe-@r_889dIzLuzZAO9JljebQsRSrQIcQ<4wAlVZujXvZi|@Q@M+ zAL2@hZDjin#H?e`rC|K29&WJN(wfIv+sY*1Ba8n7b*-kt+EB#ShUe~PiHKo}CF6u8 z!;-B^1M%@)qY|AUL31cB)=DKK7izAz$=tK?`P*^91Kv7XH>Cv0)Y8<5Ogi{q*wSou z!%{A>4N_m(lkvBu1rKWr9~XB|s}Dcl-q(MV4{A+}p~mQ6YEX-bW7Ch1iz7q*POHS1 z+{gL}-DeK=8`7Qu6mvNbKUroKWo%#DLFCc%*#T_lB~@|kw9hT^?0o)biBTdB>rfp+ zuh>YuJW7gjNxHOD4sHQ#H_?09dgHMD$&LMBi`rH~U92>1 zglzRj?EmfK*3&6rW$9r8p-mE%CNLeRgzj?%y;}3+=ol3s4_Zs^hxa=D_tV9wT^?U# zPpL0%&NGV*Yf~S{uQf5dIXsYSr|V{1amJglG4K29C5n2G#)8 z!zk2N_wa?w(`GFjGsGs|n2{ClZTLNoFPQeVBK>sxa&7$G%khW1kNu)~uphpzZ~uHf z8O1IPhGfI?d^TzV_k7S9pBjj41}xwPDyNQO)(Xw zv!Fk4{q#u4*Hl|sWn@K}+f!au)9TuQ#UiKLXZ%M; zJjMgsm?c~dvvWzn6yK-?`1CiX39c*B~xQAB_MXI@A6 zB9ClEKrIXgrWs{gOIvQX)V8*=k!qb@EwJ$%DD~Wz4U-7V(X3B*_jIi9`fp}2vq8v{ z%}L_Iy+mlDLi?tS2)Gr-DY>9e<2HWHm=S4Sh2$7Y$~0ngm&h>8%#_s=eOk`ycuc zUFK03LB$T>2dC+Kdf4S;Y|EO#0`VDbu07U+y_Ef+|(#2`w ziLP?z*nf}xcHp#2H_!z-I$sra`m&>Q?8jd!op6NET`#PmiX|W@YqaFWwi*lf!N`xz zNDGFnvxY)%|1`GhN|UxXv+|ch8%tq~j`36&P=;P47q-Hb_4eofSs^T+cFIjJNB>nL zk%}F%xdZt$$Bj_EUOzN(!*+eNJ^y>edFe3Y58GmbE1V$F^9yH7eMfGQ@t}TM>|~ zxi*&(ZF7Vy3u)TKu1Z9W7FRP!Q$h06l4F@){y=km zW&3l`X}vi`)8ID95l1W~rH{o4W`$0pBVFZHD!0g|OVKjG>6{p)vVj(vWK1jIV2t3X zk~8P6YOf5aV5q4~(`wN1irxyh#^;?gz;S!TFScp1GL=lc^W3x%u`HmT1?!0r9~)RH z%EArwTB&n>4bQfmw#R#0QnD?fYq3_Wtfmq@S=WZ)U%!-}uu|umC8<)!$`m}*JQ>=) zFB6)p)Ra%Mi2x5pISEBXNvgem5D_oPrE!*;54tF7k=qn`~v1 zJ6s}c0r}z&hrez$9A6d8jwQEdWt)gha?wIVZ6!gi2wI7+yB1+v4_0BX9Jj$6mbXuD zl2%K)wBH?+C^+-n+m7hFlWp2)Cc7r-@A9k%*64?Q|Ib>&Mo>*w@%oCOzDzk91%5tHl4z zxKwK@_b#DOm5TnhfYQ}RLJTfXG0#w9^`lN#i3(U*4m~}W&n7HdGvB|37ajcMJRW7w z;p~6w6loXXV6RsC2`OprVPbNJim#r7RM5mc7fi64O*!u4s~R(F_uQO1I_1|Zg8GAa zA(ERJRD1qF(0Grp#-9P)3nxN(#v--d0ymhmX9M>lZjWng=hO&7Q7M zRrV8p#9ISL820wwn4GWkW?6SoBn+G`Uo1h^a?flzjvi^WSrE}V8fgG(*wBJ=Gbx{q zSo69sI9%cCQhf27Gyy3iaA%>gkXXchEMZvBKXaiS6;*hUoO_0*IM6p9^;1TjoOpM= z9!AGaAmKF;_9j2WLznHzDbjLXF`;mQ8}*5r$IWv$_mT4Q_&fklJimj)SKx z7jcIz=bm{^N~WCyv}_%;re2L08`rr*xC<@P+LtrQ$ws#1MROlb_B2REI!`-vZd_g- zWpq6?=d=?gT5-DZR$YJKFJGM58eNGcb4jzJ-qTh3cjP-^bP=e;hj z{KN90kfa-6)l|rallXlknF%1>=z5zfmXitwxl(HrZR63*C?8s4$k?za zq4szbG`U-kpI=|J7kPw-sR~oS>|!sL8jq`AGu2c|!XpYDms~tt?kVfVcb@+sNA=E8 z>X(*N#C32W;p&Cqm^@1D^A0@165vIA4ETOzAPnMW%gvezAZ51NGSs@8++ijXvL~Ar z6J-M%q8Yg?lFWF<2Vi21DOn6>WMC!9Y9KP{WYud95$VF-2{`24nQTZ;`k@$fMGc(Z z4=gKk=oT5;LfDJ6Ep7F_3%wt<;N7`eHW>D*@*q2X@_U)X!vz13QfV0-9>ECic}vg3 z2Sq<-%eJrF=#5Di>Ox7^gyuuFd*}y83a8`rQ~HPgadqVg23m8fMr)nem|OYp6UZD; zvvUhPp*%XCRjUYxbG6;isJW?^F8xX>LUl=IS`QoArxGc&*||GQh>6>KYQLKgRFh+| z<|95g%O`1PE|x|#u~)E)-B}R)5dU zzsA@2Ysc!r7cZZTt1(uZMtb*Dr=1D#i2I4yDN7lyDD=N`I6s^l(K3d5;_93zF_iWK zM7_;&=}~azLDp>Hx$AM;xR)KmxQ_=B5={HP)yQdNAY72T+{2Iu^I0h?oHiX?$&IeX zMt}T?tbVS?1&9hWF;6Kqd0XpKcd*xJGXxBj9-L|T^$`pYMecKaQo$C z>nHE+%kKg2gwWv{XsAH~6bFsql~lWptE+yqOk4@FolzRQGI?m>KoYA*eazNmuE=U+ zsdp?Xn62G!4HGPTGz(qN^d8`M&^r0PHL_iH%IG9X5C?Q^0Quy8La~yfSUI!j8V90o z?hhRO=g~Ft%)?2_PK5I~L6BOK)>XX!Tp9b>>H7?{gtX|&+E9p5MC5o; zA5@B&Ai8MD61rFms)a5G8AXaKa^cFgT=;4Nx$dl1j5c_NlEy4QjCwj2(=>2;EA!3A z@-*0uaC0;Vxd{A477~rtPBX%%_)k8J0sT2^&xX91CzYKX9)Mx?Px}7Sq-=5uQ>#a9 z7|(>`ueCLCP!SUqvJUJ6v^jNC%smqZ!oad986Hn~6qV>c5jJwkzUumO*UguHbK-e< zoEqWYtT_v(B&!JZ38$pMt?A{&ape6|hw+?E!#!;wBgT8D=7LTV_v(c_S6_8khr{!J zuTTa!($rGqUtI-7igMc{-6OH$MOvG3ySSSfXSKCAmWqBy#oHb|$}oP>p0fh$c9Rc0 zpntIsIV{eC;$eb-R^V2Vmgj)CzE*;5l1{^E+;0BjvXh^Gg01LMRCugqINs&(*@l1X zG470vSL0uOm2SUGKlYe9Q(rgq@d{G%HcM0BXSx-Rdi5;@BhT4m13bn) z-^ws}37S4$IhGe#8BW#b@d5)-5gr0NvNol$K8SZ`sqaADZFMBJW_>atK*W)keGS=i zszZP2Crm36NzzHlR&VM>)XR7IJcRzegnaYI}R=r^Q%K8{zLc zh@hl2#`jp*=67jnuDjj?Is3AsgFRat=wfdM+Nh6XV~>V_JMU@SsP3z!tg|U=isT<1 zof+#w2~oamhB;>)&@4bi#)(>q(yo4;)>ne+5x1@rU~6fLQor(;0`AGpQQ85)yMmv| zghRLUsfPkcUnV0baI5Fyb&0n^^A{E##z_Wd#&YY*z+dqLvL-Hpy~8zWVlvBWy8xmK z49Eo~T6f#w6CAtY$|WINgY@qepB(^Unl}`(t*FE0i#ERP*A^C=4R&Adeemxes4t>p zCO`aM_ULcU7oW+|9)c7b@fuuSw9?eKW+{R8JnuL9cL#^Oh9_B*&wr${ARi1JZfuWH zg3lhY&LaYEuxSavxaE7yt#up6?pL(i=<7Li!#Qm@^nb6b&oMNi!kCh)kKahO7|Dkz zUn_h5m+GpaVpRO)>*&kH=ur1fmJ72sDo!rtERQJaUr~q%QkLrpPQ1_>b@ql&x>MWx zOO+_~G~r`cQ>e$>l*=kh3{5XeNS5T2!}&m9qRGcE-idkl()BcsO4l`Y^oUA$z^06i z&{ophhmq&SbTB@Iqu|-70rKh)rf?GJprXT75BBeS`~N*fj*P1L$g@5MRsfZ_y4XTc z7I-%D^mBi!axEd_Y_fL_>LE6X1X=20THU#OD1;$2zt~+Nr|o)&qLW^2#f^G~;}gH^ z;vQ@4D zq0Me9B$aOqH`pG-?PqP(Oc*B9{;mnNJ@#g8X#&jR_fLI$Jc*V_ZNYq855(GN@*B#E z$VayFw_{Sajp!)+d*5{W!*X=k&X+^dRbq(0JQUXx-ASxF(+8m0DqIh7%6%=v_}Rr1 z`o9$Q?N^ibB+K3_l^b`xD%pziAN9p9b@6I8a~3xz&3rscl^binF&T#v@YX8X9#c_J@M1RYg&!w? zC;l^h1{`?74Z4+QMIA+Hr&YbQ4O0o~RB*z=W(0B7HvoeRV8Vau*TD443nre&YB>6| z=(;A5H3hmvU6M|COqp2aTYAp($X3P`ztgPtKt-k98|!p(K$tk%7CdE4=SNj!TI$M? zokkp zuAKwb0KnI+Y77vpJ7kaYq#qvaK0;i+xK$}J<=EZf=@DQG6dz^n=H^`1h=7O ziP|k^NeQ1oa+Dc_Y(uNQ(Ll7Jxe6QD6+mFHH;Ae11AEfbRRt2rGQvW%+Y3`6Pblp= zR!_inax=lkIKn4ZiUcmUFP|Tly>mJA(jN|xl~^;r?JV4&AYSp6m% zjK=Ws7~oD{Go2)Mp2n?yHvZ}2!%w?3p)@l{^ec7}2BHepcCYRW{6_OkJ99S};VZm0 zad~zB@_Xj&p{$P+RD#@jW!)D$5EUII@mPAYT`s-#*9li=^4W@$LR7}Du2uoy$#EOd zTy&ox0WJi*<4}in(ok%tOus{s%jXqRc^l#u7L5moGaSYn)@ ziXC!ggszQJ^?}#{zN00ZW7`CUF}XLI@@12;#X!00V~hHgCuL?&i!iqm*eL>l@&W;c z>*pDHUF&PE_dadDc0u#IpSw5I5O9BAURDmrm9@!J_EaaD71QdJL3zJlXqZoNCS)6( zeD2zSG6q*5JuSMk_$~j&_Qqdv?jV`Qx6X5eOpoF5()(S~i0%sXtrZ7*X+U}X9CelC zBjHdacU=P6B!&l#jivmw+t0&oSuxB0Nl_s)X?0K?2XN7YKq!|}>=c+$GQhOV^Bn8n zp{lU99C2b&BT7R;W<$0^$1%!|In}U?1#9XwH3(}3gn0-%vI*N2l#yO>Oi63IzBy?` z;0@(Q`x)r_Em=WP4!UxS(K3R&{MOh2J*iTDy*CRGNiKqtYD&kTQ zXfL;X=(3q7gNqc<8GGO){N{At=*om#j6vdOz=K{`PCWE1q4 zl0Gb(*)oR_t*tG1P_?bN9&TDQT)c)>?Wgxq>|N3B=IG!SKNyxe7i;bzS;0Av#pb&0 zgAb!4CYhKGvWr2s(w|J`Vr`M6SPZmw5u<;+xLg-1(+8AWy*4-s+L~BfozZQ^27-84 z=yS*Th3xa@IS`_3$yUuhVhx0h8Uj186N-w9iSsQ{OUHQa&>Dk%R5ny6jq$jI#CW$H zBvuS42&M-P9Ob>WKeoeEx#7T_&Uy=3SEmtYQ2RE9l1(Oa97U|CRDaBdvY4` zK+sMP^1w>@wlNx%s3xBXVGqjL)Z@T!GD2LZv9*pPe66MY#+THpWTgp~=fy-PXAyUE z&et2F(Q$!cQGeM}xYVd&q4wPF9#}CrIM-RtaE96Ur@Od5b&gje{k3>bmZDnuHP>Y$ z+l*>Lc_mx&u$r%47rzhg+}>I_Y!W9sl@o1PIrH_k)Y^Yjj3n*LMy4<2b3S0L0bEu~ zRdWhg4cL81;oNpA;99pcJ)RoYI$=M0ri*Ka$e|{!5VBd+H-GwGJ={=l=y-UlC4=T86QmRsQ^+cOc)uxF9!jJFRi_!bIE?zrZe|%4&leA}UEO+Yp z|KR+1#_7qqU8>2gbbKs$YaA@`)zz%lsGBszzz*V*nR{-=G0&0aE!W#iq7}FVwKu|` zGoOCBeafMPmz3)Jwl+S^W+Kq0Z8tJmers{Nw6U@-oC3F$1ZE%fQzrrvq5_lR_MG&T zUQ5*x<3erGGn%vZg^$ujiue*+^Yh7091Nkkxga1wN&0WVezAL5S)1tcW)V=&??{$S z?w*6JO4>gY2-SZ_L*WsSdhp6XQ=2oCKzAc~6-nD9ObmpypPm-7q%&Phj#gw!{`>Fu zatzObsa8u~>&;zU#J?VaK&S#T0yvSQ!L4N))$~B*x zhDya=Lm8`067{VlMfevDcC%~S()>@rzz|(@DoN||1$OVuflj-<0W|Epk+}qnYKob1 z#qU;dIQ(rBrKJ^%l?r>^PCIaB-_^ghY;816N}02J;g{*;nR~wadrMRDeWebC2V;p3 zVj>C>hw0Y8?aPMDQR2$f@HnmP>wGP>o1@a2}sa(wX`}z4AfqoyW zj}4B0pY{NGOK_mbm=qJ`apthyc*09}gZ3~uf+!_@+?-`XU4@d`xtWJEkH0<|B-*mC zEt*j38xn_7Q+fMTK`5uqvYs;5As~8pc3`x&r2jt+YAoQLPo4_$?s0sVqJe3Oi3=G6 zQ&*oPuTxsYZhx8hHs#;SYgM2QOmjSgv{7lYG^(dSIxog73TrAjT1r1CSocw;h z%6SaX_#RlPh8ISC;_KH(v*2DWU5^w}TLO%wVwx~3D{WAWDLJ}R3Rz!+P_73RhK@1~4eCj|uVDb(nMIf|yq}MtCEok-cGV2AANvPj_8*)l!B=8W`H!=|@Hu<p~n(iz-=y8(tFik<5 zjPR5q1LE^YR(c!^pL#{Q#z)AT72ZMq#25T)fOXvzXJr-g`!PB#Rst?X6~m=WMIv3Y3p`b9NcAf;zfrSB5_7~qdfJ1Cvqbn*YIXDASvd7t4r9hCs?Jh#(>+#@$$xa z#>otJ6MRZ6hmq!{poMwF@W($#I2n$!&JEWM6+38Ou@QpaxHwMmNo;yMdA1_6CVEOx zi~0YiTKblpP>>K9@nxV0PY-tG7U(^qVEY!ci>21ipLgOGEQu(@s8xfS@`}axwN8?wRP@u%*i?FieeoCO6u@CJ|#Ao)L5_D z8h5O~k!%_ivBaXeim9v+QmlM9<}GwhAV+`C%RYXjwuX(;b*>5Z%n2gCAij=`AgB*4 z;vo#onW~12_nwMnDNm*G^qy`-;=tFj{pdP=p{8-7fDndmzf?>)nRFC(aTBFL6Mg=qDr_6Ec8y+5qk zArL#mmDC3rl1=8iN$xR;9g^wyn))IX*)SL*R5RoAKdH`e#0%@s_7~HECrdn0ancfZ zAe=pF16hdr$^OGWC zxy>V_)bxFo3}1_IEi(fS%=V*L%O!J&M7p=`>~vso()man6E?Zd#b6DQJ~2w-y5+9>uLn`t3;9yhiK;wZg6R_B*q3~uWnaYuyixg@6h{v zT&YBseTb%(_plNNjGGWAkiuzT#h!fjf5dL2riwvd1gmw1e0IH8svcF?hgF&`_YmvN zq7p2*Ghb4=h(WVTbLK&=w^b^u!iI`d)YOcecneXo1egxS&+oSVwnEmBp|8JViF(w- zFkk7qdYV0$q+$ePp5#$dAWaO%SflD#`T5o!on%2XdN;;6(R8+>7*rLX*mwD6{Kxp` z+wTTBiNlR6rfFjQTXy6>RoPk_Ck5TOO!48-m>2@zyN$z@ z&P8L2>jX=tg+2NCUzYtsWla`dJwQ#3HTtz$SITp12DHE(9f&L8v4i6nBWzW1797&Y z)ojD%5~qs1=*>?Zx%tD+{`Ce`;EhI;IE^UW!b2xpZ_}C)dp&a5+t&1>wwUy zAm2lhdn@UGn+f28&>PO6MdHKHj68Yi<6{a+N{~VjI6{kf6I?f`lYL#CZC$?3 zMJu;8^Ln^M#Kn&dbDlY_ zU1gzDK};u4L2m7D6T``5p@aNvj+(yB;0((snP9HjV~!V{cG0Ae;ce2cH2xlf}hzB1A^Bh*o;5OGb8F06-#p1^ml~RbY|PTSn{(G zGn(E48L!@|gsIGk`E8aF`gJf?Mgn&VG|?H|AhPy&y44|w?1?lYAR=S2;{2cO6Jp#y zHvu06i=5n8S)BUr(Cf&Z$c~4RAcQ(K2gH|Z`~gb8Av4i1?~;t+(Xo;I&8=(F0(T&~ z4sUWDCpP51e#Vyvc@b?Z@(RTO+bQB4!*LV{glmBio_%h2x6!da*Ui6%2h4cg3-N%n z+VFo_`tEqP*SG)Y^_-sK*juR9h*cD^isINrYt}AmZ;ld_;whR&P&;O7*9^6bhDSwO zd&O?}Hj0XMQnlCb?(_Pk|FznF_}uq(U-$K1h^E_&2~VZ|pq2Xi+FM5^t`AzT8BIKY z_6db#se`%8=y3%3`4!Am!!=PM0OMQ3MadWR_+1a$eI+l7^U&d93@$;9c6~rENMGP7 z7Sm!k)97>b{AXUZ_EqGSLENr(KGy1C?UZqCku%bxyS=as7h`fG$d4gG=ncBuScHN0d#fpIZ`3p5KW+pY0EeEX+C&I{ zdMb=Ck)1WIW@E`=C{q&e9gIw_&GI$XjqPDKtn^X^XUP+nR$KJ65DV3XP-70uaBwZ6 zx;Pnr-y!^@6xg&Ie+|euu{Ze|5KyO99%|v2`aT{pcqrRC+C6WfeIn4L&!@)?f=@+8 zhZ}vP(#EzEd_gX55OG$~iE4gRbZ<3O7 z8tfg0`Q~<;CW5uEXC9pIUXLAno;)7Wb};{ZQUTsKHa@P^$TZ>4#C!HXN)tt5R`1@H zMwbUV7Lmt6r8rEH+xkRa$r9Fe-z7a|I##cSYR#rrK>9K)4e_=&$2*AWB^sBsba2c? z)GD`mVRK{{M}%%mdFO{dCIuk4LW0D5z{2>MMZOyl1e1Ym6odNF8s`9-fv1F_8HuA> znUvV-W*H#%+uSPN3sM!B^NW4nKgfM9h2u(oQv<73Fo|t}cz?9*AU{xu6GNU?XC^z=MlViRtfhdsl2N0EX9GTt(+u# z1-DriGn3#Y}uSkEPmCnX`8NE-tyST9yUug#!_m!{R zr(3R$A;HBm1HON^*z`}Mxf}@|9IBa?s~PP zSqUgAWEpHK&2wjys(pQ56stiJz2yJt{*n?_bVs}MdEB`7)D4{##~ zlCQNb?q5|`Cr86kv!Eb;biGrjoZRR#Rsk9F2tjyUQVGq}x1Wd|vmze{PU?_gIpy=CHQ*7|TsZViW zxMDbgNs*NJ($Wxeh!p_6qt2R+64;O%mDw{h)?uzA_rC>G`L}sp3p+b84i42_jh-1% z=Hdl!tR9jQpBmHYN+)_2zKh`wDEBDfUF*DV+6}o%jL+h|e%~uG5c#dO524X=Y9~>0 zxyRc>D`w$mO1dveGp(JcG*>lfFnDOoc4~CSpYiTM$X{+Xc+FXs@6EavIk+wt?R6Il zJd5*xP8;1=>Cr8FjqqsLJ z7+fUIyRu4qA32$PEjq`KE_r^72s!?lRqu3UjdpmKIC%o439ayX<>aU@+#@a~7h@gi zf~p9SqGHCH^c;S&KW5VoXT{Ek^tb8a4}e3Vaho!l65Jkl3g5F`Zq~`qj<&ed}XuTJf$fQ27NRuuv@nbhK-;89UQn1StEURuJRVAt~v~1Hu27vx12EH| zM{Pxvw6P|LYgjyU1XjRG4eJrrP4~LUPW&m8FbRqzDG_4SFA4hRAi+OHWfI5UPqdi^ zz10dmTATO|dO99MgudQd8uUtCi7QCf!=5CS3{I6?vEy>!2?}B=%3aRO+ZZuD z${os=bUb_$Z`WQ0E@FH=FrgYWmEUVr8Cq}3^nDCrFm z)^dPTHEB!Ups6uaDLnDip|>J>l00yO8HhsO`RM0kj-a1We(X>!oYfypt(QJHn-kYd zFcW0=ocmX>0MV3;crTDTBWph4*Pfe{rSRhS-3--{POYkZmidV{QY> z5U%t0+7RL?q-fC6blbV&_cu~B=;5Vgg)xEUe*@>`&ri`|(l{}_BtSm5VD z#0$@BNQlCYyZ5~GZ({2^=#fGznI=@p-hM7lPO;|kv;xKR=fUGMBb$Af=hs0l7q6RM zq$Br-LoQ>q$+9Ka$7+{D(wZ}dt<(h&>240E)1(~?dI;-0jM>)3!2)I$9ug0+hjP7> z^%~8nvU6@_VmZ|sOlw2H_X%PgN&S5MMir@9YJ$w&&fil~+@F7vGDf3i; zc7#%Ol}rFQSfWG6f??9KRdr=r&U$n=B_>J~++_v=#X_6n-oRi?ZdfZ7NOX+P@w)W3 z9q*1U0bi&!$OMw@Qlmxr%4JPcXKpWmwPpLF6#0T za-SR)?gc_Bod4miT{w$HI5qVo*oA~#bQbH+gyMv(wq<#0LE9A(Fl*Y zQ*b8^C}>w|@QP;Z`^c`Qt!QSBN(Abr@|sSobgr~cili5`&5CsnT*-R7TlzkDV{9EA zlajz|`^a7?sUuI!PwZ9~r7bF&b>^$*1MDqFAz+kWEZKYvB@T>t!Z^%H&^!(ot1s0KeY6a3WV|NqEaKZ0h$=7v3!3i2y9BL3Y;UT@vR z94Lw^P&IYfnr7*2$N|z`%U{SVSCrw8-QKb@I%H{n{(V2+k4>No)ly5ndfH>78^Jv= z_V3J^M2y26Y}=p^=&(%}%aV6m4Y-cN_s%k#=AJ!OS|)meT!Z?SAxiZ9 zR$q5P&l=r%km$+`EK{hUd{W>?Gm=ytN{uA7$Ffa*1hsq0siv)G@^!nIqG6STL_(v5J3zkYQ9%ONCK zR)hr}i<1mhT9b5IEF5N^XZ|I!1h=EqD}IJ)Ae3%-_3zY zXy0Mv=FjZa)A-LbcP?4Cv-=L$owX~R)PIyGzw@>o`z55+Ac^*eqpAE7%V{0AbGrEb zv;9`SF2tWsU)3{4c;4$y8BJ6B@$@dB;|eLwKDM?o5)8Ne{h1<6`~_I7iMAhY=`!`V zOS60>Out`{qCy+Z&B<7%w2E@y@|qahBT9gRh8R2Dwvspic;p&?onhKxqK87G7OV+E zVq%B3_-a%l%xKi|Ep~gz(5UUOkqbd$BjC#=0rtu6Gys~(pGz0igb}C3in5vtFsH4h z4kSe3RO4RlLv=uuly_I}?1(^wbQKO6 zjjO6r<@e4ni~HBKRux@dR#sMX%&qu1|3jCeT;pgFRxDA?&xgMZFx-yAmN4a+to;d2QC8Ckh7NZtbo>{p&wm`daP1OzAlS$)hMZfX20JjBX zwCrO@nOJjO7X%|wt+I?$r`P4=M6jjLib}y_6WfVQ%wQG))^KlTY!FjLrM~gkdUbfE zXVz9tGUQ#Nh-E`4bywtc@?V>Tt5L!oe?J!P@*$;Y#a zLWO1SbNv}C>h^SDKXzBHQt~2Pds}vniQTFXyR&o@q#?u>6f3yO?I|lODsv9%w_EDL zJh8CjBTa?3*DHNTPJIWfCjswi&fS+ogBud@*Q_D)k1^;xZT=%Xa%oy zJ2~?4i@z5l+|qA|d4iQ=4NrAl33=P zB}MY_pUjHC6=AmtmU?;_)A9rP3UkARV6ZsxA6lI4YE}86{4fQD`~gdF%_lOfmn zO9`jyiE%MnJF-rd+8kwJ#bFf0ZDO2norEmH#v%M%Ru>xc?d>4I&BjG9DJv^ECyDS_ zkAQBNxt{d~y~um}F`8O0cQqbyIpgYdHvI+bh%ruG*}`w+$4#>({_`xtJ20XlMt8H#6P2hYIB0o`lz8Il zT>bpH*oepoC+3wJB8lBQR@STZFwxbPEd*%)sve86?;9Iea(m*y-Q;96{&7} z5kzl~h+*&MVr&ajdd|t8Hx7kp?c$VD;cve$XI5amo18?#Bcr4b4K>90LYqKZHHS$< zZGn`8gv!KQKJ$|I5J>~$*v}%&aQG3`r7{tsOOec%g^M7e<@CR?9am@Sp>hxY5no*x zTvvB{MkXhpQp%D5#y~bE5E*=U%XLM`{ZrcN;GL7Vvc01Clf>*I?bCtPzVmGD3-inM z>#Jjx$awD|>>C?v_KK8nJzgSpZP1dM#)XhePDL5JH-c$s{>*e*pm~)wNB3|`hHrxL zYkFq5E%7cgMu!dbWCS_6WGFOi@@$DOao5nSvu%SsF0MDJ#$fn0g*7EHN*b!vfIZs2 zJK`RwA_BlJmo*%aEdpsy@>A$2?54+554O?Tk)6W`(CO_mLBg$5e8*>&q}W!Nb~7zr8WzY}uOrq)ol+R*j7DkZB=z=jrHaqUgZ6aOa|& z|Az2w@Nk)cci}9x_~?KNKn!?9y+JDIeWRc&?36z!lBg0(7zlk((`b&RJ?vl{NMuX@ zlji6PCbLFBd8?C=qY@fj^AW;!ZK^{Qy`yj@@xTKlM z$`DlnP%|Ke(S!THlSLUWP|DF z(|YvYMn8?EfKlg%{f>mF-p1V`dmrc3oj8g4__!DLGGM-wkfRC@o2J=G3s zygt0XIF5+F>bvrZJv6%hH~iR#*rJM?h9}k-pv?em!M6Vw7i_HJbzvW+=h0O z!O7GHQ+ZfeD~6=Y&UuKCqm-`j?$;(pi5}uWC4^34=J5A2TXC5_scya9?yisl{BJtI zRs7L^s^+JabXcj;D&e4&@mqSO2@1_p({b))|K577+1 zsQ=+RQI&W5B=s%PmDTh26DyhQp?5`^ zGW}|+9JgO(MOlC3^2F?p9}QXeKMN)DanY>wfQ}DmMW>j{MCcKkt?w6ti@7#&q27*CGi%F{gdvl*1-7$&o^>vv zgl_=mt-A3oLICAp_78&r{$d%GednPe-CG0~zgxLc*3?I|zv1Ze{10S@63tevKIJ(- z6l$SE^R_F!k z1h%Qk#|j*BX~K3sDt30oSa3H4Zt@!0TQ1CKlFmCx*)XB3S4_(7%?~}BgF7Qiv@(}* zUw2d0()2gWYc6hYWs|M>jv^kDl$3@btiX z8Dd!L81iczD7v$)8qHB#L~ce@592VAs3Q>QE*rV&))qG7PolckUkbkZT>#vi%bB&$<^el^0e zgv`h6@ze?(RRbJ6y{%$$DG@%G-Tv(bg>MyJ2>ki zw+c8^8+h6hOAnv(qurXIE>eI?M=}d)9ACe9*aJg~=)`(4!yR=U={(*SMVP-WmEQT}ekenxM&+#Q>hPf_8_Yb*9|d zv5)hWOSIZ!^Su}bm)hny z2}(yz*&B)w7+CFzP0eisK7FcNXCubz%kk}IrtLMKK)Z@ubySn2FWE_dIM@utv{xI7 zrkg=BOO+~>BpIl?x=!ql3G4=Ds?VN}cR%Z#o)&>p%c5#3o|yc19Pb?2)0 zse?OFnr6ECH=7(unie73tkSZ>bI6g3bO&ywrYIJTgqiV# zHNsS2)$__XWNYPkTmw}Px55qblxk3IyFmB@Qva@!lL$!h>muNyx9V(AfHutY?e*YFSCG2_l&=zuRQf&+v zYxC)CR`!Zc1uJn^xu8mMm7YG0RVkTaLtVkuht=Ao9t{sNtojjZxF(U^$Scc%f%w1F zqFT=sbz=)7Wf;SDwgzG95%OB|$yj__=fpLx=+4EwHuZ4zd?M@=WFZO)tPCD) zX~TlG&+286W8=8mqP@|> zSrNCS6J5xFj_I}wTPgq`s-~8)J)h0H@L~ho{&Kw-wd$BhqyXa6q>NVUqI$+{U}#GG z_j!U)Zx@sc0&RxP+@0>J=Cq%`kLS800$sWdBgl#~^h*hvo8D!7{5h>@C^8 zqkhb>h<&5Zd8~1Xn={)iL4hyPVc&bOGBhI#P(~dBEFH=}*?Y2B@{W97+=<&aZz{Q( zb3N4PyJ|WNxkx*C!@OUrdVK&k{r;7Ce9+g6@Yd$+6%*SHba{Bq`-hlh`?<785OP>p z^R0@YBVHgWx`b62b~J@-;oSs7R?(~slpGsDz>1RlTN7R`3vi@Gt=PNUO5SPuJMA~h0EMBZ zOjmdJmA>ijD`n+-FQgU9=#|7)Db75Jls)F|JEyVn^sqcR-E^u*+}p(ltEPT zZt}}fK&=M>7O7>jfB=p4Q_~JRyF6ZxNx$#c|I@&iaQrbJ8y-{F(Z}3 z7Z}6VoUB<%jG!ksV4soP;+lEiK8kH#E=(`!I{#Pnz#DeYJ~0g;izFA%ArK+JH=$h> znfgtGGvqIo%n|F6;}ykwiggKSt<#t-@tc|caet$baI zJ$t+tVJ6_AUQ@A@l1I%Ia;d4XDyi^v@Eub#35RqujSSXik1&FNyml$Nh~!j7aFQ7? z#X(1@WWi-%(7nTb2w;I4yb#NhItp(E17M38P#{0(h93C`^7M51zbG5`OBGITtnbJY zZf)Sy*}1LV4obpM>I_pwC|eRy8lmLgr~#bcoF%|LsodULO{4wewef9d`TW{oic5yN zF+^-77>PXFNEj0es}55hDQ*ZTVC)%eDK@P-+-lZcEcOwV%g2wNJRy_GwFYY^q7@_)=!-P zb!st?ml0Ii_V4OxKA&O@ia*iAx}}LpWPS4t^Byv^>sFVXGYhCDF3)NtyIPP;)yVd7 z>#?BO>&YIxWzPejvULg~kMLzpT7ls{m)J5kw6pd6hYzsd<@c^i&5EwKlpBjGEh{S; z{Lq~-lh#EDR8|7hXL|J6ErZNSz~;f|??(V*QBjCEV;m7y0+<^7ZY=9J5u@Ah5l{lXK};yxEN-|+rJ?clI6!74puQ*L5EUwCvX6= zua;9>3p1E?(KX!`+PPU&AJ)|^)CSGWa`^pZ4@ejVV5n>2i!fuKb1Zq#8hb(d;3Ui0 zBcjy`M$NCLr)@b*Bu&_`GiGV^j3Z4SCt@~1#?n(lV5~TwSey>t`D0NR8g6Tn*4&(G zT49%Ud^i+-Zgj^{`?PCh=ZTE`nxwK;!+?bo4H+8o@-G39pd5sZPSd^7k%&F2SDq_|C_n_}OWOViP&~@ABdhB08NbgH!?XwZ>D{|lA{O5&x zBOes!H|-(xm>=(X*(*}J_ynPaTUfBbLQry(CU`bwHow%2uJ*0Ei6-ak{yU4B&l@?n z2m{|?apJk1O5e}k=W*`K;2NC_&ARXV`|W>ADMop< z^mMhxT`)jKMbxW98t93Dy7ry&^;WaBb#2JKP(aPh$sz9w_T-lL#xlr`2-(}+o4`Kk z4qvu0f;OkRnMfwE@7LW%WH(drW!9Z298He$@b z_kz<4q7_C4UD$Y`N?$#UEo!M@sy zX`}M6;OxaCV_=w_ZyHE9Qo*>EX!Hcz3K;Njyrrny^&yR3jV#^9yw0z?1iDj}?_81& zShc5p=FJ=(_8Yd%a>v}k$oY|_Qg{@ZiJ3wU*P#b71zi0=!RrUr2-_Lmv9|;1({`I( z?Omr^C6~X9`pyHEeYDR%1K@{q_2st!>YXdQy!q?Z)hpKCzSHw~+s@Hh!$89ywmn{t zId_rwM&Z@r{{Dgk8Vm^*O*fkDxWwL2UgdBxl7-%gJEyVDB+s5&Etgypjt0_ydGnJ| zoDptLScR5MCRt@Rin7>Dk1^yB&L*c%oYc?zGFt#ntM5}?&UOCdCL$g{b~#<{0&oQS z6gP+M$6qyjRPJaa^LgCMz7{tEifP#!z2*dVpgsI?- z+7KIdt^f%Mf#>_v&z=Y8InLs_aEa^`>)3IXeDs?i#SO}^7qwAO zb4WcVe2HXF)dj5lM8m}1*Q+0&pIs83M~qvUN=T1*>x?1>^))k|J{59Ng{xQ1Xqeo3 zM8cC?K>M0xIPA-8-IBdfWY0X~ef%W2w9(# zSSpI?8+Ssoa&?%*l|5y=J;sFv+kkjsCQ8D@ndP67^&`SZ zG2rWpL;QtDkHo~UQ$bFMNQ3o!E#R&x=*524wQ+)kHho#|8arOyOf(f72*hnuT{v9d z)!@lW{2uGjlp+apaIdkBq1jZXs^_~AbsXvXw-`2kS{6|j`;EB-e5P@8W3M7k$g55! z5&T4p!DS$O5S+zX4uec-kvs%BdaSjhhuy(Y`{{`kNZ15#^@b?F?QBMpJ9f^zgnFRi z(Zip;;ACe5(F0UfwipAvjiqbpk#2dNC}t$A==gSMi;jsgTl+lx6N+eQDW^=@tf{c& z2oiI%o%5qlp0J^#S=_23SmqLZFf*K8_kEs1Y^!n=fHc#~J%q`-01*=0*5M;h` zJkI^{$7f7Wz{nAjjB9k-P@1n*L^aD8nk4gb=qC#4d`+o%Nzy3H_w42kRT_G;W9*lo z38_d$@`xS16H_G~y^ia-&2vWw`+^9U2S@8{gPD)pIWRddyd+e5bY&32eo2b7tD?kOPjEI*;q4YMoZD&siPCib&}=g9Y#)TLlHwmH?6HXYVmMW%1SYJ zQ*FgJ$(o5-TcB;>NP;D)DOcXEqvHsv!*|=ogaX7oh0#p4o9Pj7$;z0tBK(mCmIOd~ z$a|n5b@X)6YVr{orES1i*rMrSr6=Z34?Z7(k(fNeMpoh>n~^@V3-*jat2cGubLnD(!{IL3+C6l7CP{E5K8>-cTg+vf5A zPRRC{k7LBt{UqS!KV-ZBZ8VD`rcC)^_vFjc!g5Kp70I$5eiwYub#>*1b>)?kvgoX2 z^}lZ3II(2>eZGBud8kmEY+qP_wi*~|(Z^`|F=KB^5>Z#dGx2m$cnEiaR8Kx^pQ znC~m%o}(Zdsj)p1&{I7jJJ`SnZG(wGmZIyU0qu_9Vv@}^>KXJP6-qGVa zNG`Xyu(weaS>-ErWk-gOZQA(~z3i=-siiLTx|Sp-(o{H?x4-kJQ>nbo)GvI1YRTda z7Kw!sU|CIK++ih8ip)b&cS+HYK*gThrYO51$)D!Fhp+v*lm|pfW^oTO1eYJQ%u+#7InwNjfa)E* zcJSLP1MSnkt0&r*Y2TdMXf~SE!K352^VQQ_aKgKkczG{`tABysFvVLWM7e!X-9f<2 zO`1h*#1L|dKssC@>5ptvf|Uahzwo}Xjmd8-9W0t&kWYyP{+?d8u7KZDeX-Gu|I^-( zA{d?lSKqGgoEc8N0-JvQ_RM~!)^^X*nwVWMb}#q6^;ibi2|2XL_`qBFVd4Fg7PsRq zMY5Txh{t4ft-j3*Z$ago4nvOBq-#a1a+|ZxqfsZidQpZ|ddLQ+jjLj5qfBQ-?-lNbw_# zAftN1UH7J%iu?JYz%jK#XoamwIvJ|c`eb#8e=K9f8Y4-SJffDmf#D6og+0DpiBx38 zI%_G(&!42=F!#P*>-jyvrY-W~al8K)qKTk)Xz{?pNQ(LjoqyzLLf=D*Zn0nKqg*ob zpvA332zt0d;LTGdROM@oFDpyzFudsuNqOGV?EK@qryYl*K~gE87rFQfLMbjc!rn8g zX*a+4XK;`~Dh&RqA@M1X@EdqSv>yNPi)UkN0h`Mj&<(tI%%E6Of^Oj&4?a9W#EYB{>x{{t_&>p%L~uJiB2pBKc9 zt=Hp!0tf^-L2!W_Ii^f!soxbrO`be-YM)2N{NaXKsILPf^H(e~Z8?69Hx;cjSXe9R zohu10@CX-Pw?Wh!H}5Q2X`k(O^6p{Zv-4jS*EE@l>;IbX6%%ls-8H&iXf@ZqRLtwy z_0C_~!d7~60CjCf|1D4Xa%W||o7S?<`Li6tM|DSzy863g{t2bw!ibFWl}1$feMz3t zA(WR)$FfF`c}HL;rrva9i$Nl#RAi*QwhjYKK=4eCXm-GWz>DbPLCUH_aqsb$yL(#C zKZR#FG}{4mv9x;*CW2z4lk$Ld)}Q`b`ni%DcvE+zVWGb19b%GyghKh5wQUlD>zVzb z!~yaB`1ppbFy9wc446ICzEfAzT=!hl;3Sz!>EhXlL&VdV^uYPTiTzX^aX)1O7-h)) z3D6uy>WEmIR<^!Vhtt7gd!T!92~6S-NZSozzA7+=H-DF9;i~gMKFL9r{ou6yaIsyX z8WUx`Gh38!2Iqj%<48IR11gB4m0a42sNs)rxdf5kM>_1GA<)!*$2Cunu!wgPAtpBv za!Pw*H5HRfj5OU*(qrzC#oG0$s$%Rk2en2R!Nodg+H994Z+a7WvYWqkgmVP3msI1# zp)8KaXb}m4>U~Yv_yoP9;bZm?ZaIP_-l{3ZJ)=8siLrp{^0i+6rq?*Xb3S8{HRzoX z$6wEeCJ6{=D414t{X{E##_duOnWaZ1V2L;h2|!0?cR0c`aiEZ|LxDksew-{CV9<7lO}v_xid0{6VixJ1cptOcMSM zdtdiI^v3u8`;V8ILVZeoBHALyy+c7A`N}63lD3_=^U1LioeD4C_G~v4g`|E! zr4t;s=>neYAWwhJ-3vXVv>!&b+|dKkdo-vQS4xz`ia`BFXe+)ePc1D^b@ct+WYTfo z2Vc#Cu%AC|!6Ap8+46j?CSs7>dV(w!PWD>-#6Ph2zO%1On4ZEas+Bc;@|0+7s(L9y3OfJl5I@nij4RBrLqoBmg6oIv)LUO5CCh)qx@TEE! zZ{01!LG4>s7x5pOu_}uyk6}+5x{ZCE+wO7D0&6S0UXf?}Up$vqogaM}Jss7A3aM-T zV4U3Qo(pPRd|7qcx=8ZeXSC3`NOIK@zlcW^%=?zhSH5|=qwIT+rwIGJBqK^gFNZlM z{Z(^KYewxLI)A$V{=ZQ_Pd3bYFFtk0uSMI&eEN5}ju44{e1weFzlS7Dq`hi+vr z5|l+pTTK9-BC_qRzHhQQ?~OFn%cp5#af;_BBiEN#O;;4xUvgIz?e7!UALDLltbUej zbp3cWa=jUkA6vVV9_VgjQF5=yC&kd>56c<;0deT4WJhqJ-#VK8kBXwH?5RgqGQ}2L zx$HNWLlxIVWb2Y84$US}LP{h9AEjJdIFABhk|p+FZtI&)aL7*FPpihGVl&ulL^Kq@ zdK>TO#FLX|;kwCp97b*}4F@mayWQSv#d~N3x4@;-Qs%NfC%fq>_?)h9G|!kYiF53e zJA8n*iEo3h1o$oPe_zL4#;Nx{-#+=*cYsT@(tr@+hamiwUa#(cE8DqXXm#uX1_Fc3a*ZZGL@bp6F>58@MaoGjWgCv2rPGnkg(!sAPt}r5j$jeD zd!uASR@Hk?r^h@Svjazr*(eP)#}xb-{6Pwj2WJ_3_MQbP`~C>WV@*H%cb|J<8C(Kn zV}_Re8AqcYwMDd-V8G5A9fJXT<+!d{#G2`G$QAkepYQ{@#_MxA3A6uOJ2dU9#o$Zo zL~Cs5K(Z|tZ3~kgZjAI=U~)n#Bgu!k>~eL2+WAslC*S}{pUn51Ndw9B+hH=o|FgQ)lk+av=v_K7-F(0fwh!0c{2jWYM4 z*F6b`l8`1iTA^K_?bbfH5m%r`j_ZPAdyaUC^CH!`0D_B8f$_Tib?oi{E106#?a)t|8(y6EX~ zbT`Pe&^5O$CL1ya1?5k& zsjKT=Rr}c8+|!$N`Iep3S{LGP3>Iv&`>S(0@DyW%PYA8Srp4T4$jp3aWf z8Zi2?+X6zuamdV7#@&V5+m)4v;Gjw#i`6k50 zDL}DjC;~<>{vnD`k=JC%dR5n?zZRJJiXVqkx{Y5>V0T}9FK4r^?2G( zOi{qiM?zO*;lCP)w+JQm4PMrgVqb`Oh>m(VkmxUi=)dbz3a)1E_PUrM3%X7Xj_q z!NV1+iWHE&%U#(kTonr9#OMwydpT&iY)VNg9k_RifqLBo<432w9df;-*D>cpru>fT zkD)YXgJK)12*t#5A9}YCo$V7kC4Xf5dhOHI@Lp@l7xVZ}C@rlYA61Dt?2eB6f4)u} z@afAqH`<<$-==;0@xH_?@;Ku4y0!A5XHK|sg3u@`(JT;WtglSEZyH;*t&dcTm|M&I zT!Fa$K~tWOKMPwFysrgA!E1b}(#-ISf9iAfGup$=0%UbuR`PqWy9e`g-9Gi~R2hQ9 z6D}z!icA~Py`hTLsL||Mdn*W_BX?tmwxsG;3H5lUt*C?j+@$Ty|qp^nn{J zFAf@k+@*P5ZnPH&HwD`paTpZ;iOA%*4(FzsSw46`Ez&S661Hli_b9z$OK`8vZ7GUq z3V!}vPaYW*#F6F8E0TQ2#^TPFBkXlY!l+5+pxc)#A-kCK<$y4!7M|q|hn)lk@K}!u z5|h5kwB5jUWEnd*$Wzay(BPe2^?$`=)`GnxwVb5}P8)1=hd@V=gDChPzR#LA9?v!W zl~Na7?Q3$;albx==nz{JZL7eaq|S)rd)JRnO++03poPTC9!G9OC|Rk=Lafb;Zn4bK zd`HVtYC`Ic`FWFt1n;4RxW#5IrdR?r1g(m8#v_hPKCYr^&V;OcDkD*kk_&mpc9 zyXtc+zu$LauDyCZ;?{##XW01`PZ| zPt6AY6VUK01qF2iBpt8d;O?wMN&9fSZ7r zWhRC#6LTg}EqthB*>O%6dpY+Y7>Q~X889$?LTWvcrMWz)_X&^>O zizNHV0ap@3KKl`4NgEDc+#Tl^(B#?ZX?7b1L#2B#kshprOxryQmW_!Q!P5q%G;rS$ zIMle2Yr`R$S=x?|m3H#VeN6O}Wj3@hVdIs>AuB*E_TG9Li0@D}Ni*P-Hh925k)gT7 zvL%S%!iV@!9d%h{X-Vx1>Z1w{T#!Ur!zKrb^uAMp&#Fy=GGVd~++}XE5I3ynd*#%8 zo+@VwmS~{M>tQhwp|Sm~KR?Dp16Dn|1-*5&aoA23zm@US zYD%B;NKY24FgJAAcfq@IvEIkKgE60z*(x0{c#?WEb8fB3^>XXO)z}@i3-70=FC*eg z$imySgT%vVVsg?AnjXTSWoA+Vrv^HFc{+^umVMzXN3Z+a|4e0wp*cJ&Bz7yagNYCR zUBoSy^ip=GE?qArq>VhVo;GvdEG+^Job{CynSomx>h^EFg#CqUqOE8$DqH_pvLJW& zrY^~ePj+`hno_r=8^y(NYOg!y>QbYgJrLX_ve7CeDTz;2Qzu;R--J)W2{~;uennx?DfiWBB6Ag;xtH?+|+PFSK(`HBJXiE zRrA*F=gBp1oY%OZGc_@)8fBjM!QKcPz~Sasb;TcNe#oW3r3SSftr)M~?paHdq0-cD`?4}%cExKK z4utCRQzynBJ1L1#wv?E<*1z~C&^8@o5$ce;gG*1CU^kP+=Q@ULfiGYdb}ULZ|{0r^WrQdb)|=4o0c@p zCqrnxU$0yvhLo@|M?>$S+xaxmT{AO7(GLpFrJYTWR6B@E%m+uWXfq|lagJs?zL+EZ zyh>!0ZDx5l2!TdbWOeS)Ax z?wdHOA8EK3M{AWt|W|b7xB%y1H zGgc^5;lonbsonLH^pOGAOZc6=!3vyFnA1(-{iiKK_V*o^y;B1S*+h+zY)k&~niMV? z^z&Ho!<*ITbgb#D8*U-jKMjMM7WeN5!Q9`J;by(Yr4EM-W7$?GZ8x48ZoFUJPn81~ zgE(n-+;9gZO+lPNHb9uGes0bC!R|=3-~Buxu&3V zzh!2tMgRLwPy#E-U)r7TEwiRX{j$52{j#ur2^K=Pa4V9GcK{NKw0AZq8S?P`>#&dg|rDz z?KF}&!D3TB_jitwqEaGeJM?fQJphPPxpyo8e7!&KZQ=9U77XO%)4Q+ZXN z#Z-wFRykW){XdS*I;LzCtDqN+bP8;AhctX?Mz?g1E|C-vz7m6tfpjC?j2aB- z3pY@@yDo=FrZ$2)kr9FtsfGBbEP{^$7N`qk?8{bwFq|5ODsLKZ!|Ggrpu#(UsX zb;|osx+TNEjFRRzS|O0UZ+?b7{IulcJsmj-n;p|zVG6eLtEC)Oia-TAbKEc@ z#T1CzGO5?VQorY4Q7$iEpHE>AWHCFVbT<-Jry#hizWm`tqq5sqw!+AEyz9b6E-h!% zr6McQ$wKcorrwk2C>7$L`8D9v{qXeW9Z^hKsrYbrW{JN(MJ~njbTrfCagY_WL{MZ@ zAjkRdReg`D92yiFarvbtDC62nW5I^x-vl>xi}Q#5$%pn62m4@ zw?TAl5RdBHA<;GWiSkBFC-7#cLjh}mRsX2fQLdINLYA3N_ z?bG7g!kxB5rYM@7>#^fuRNp5LRWGoChF#@HutCQ_Gw`Ed#MHX$nskUFmWBntkd9BC z9}I?GT%(GxITEk zj|&`urhJ?sYCM-5Zidh_#|txcG_+f%En!x-+5QB%j4Cv&+}(PH`O3e$_x&R4wBp~n zBsu;yn*-1s0tM9xuKuj>1m{rZR8Wl^yD^9A!(4Qk09h>eCz8`y^Zl3Y{}?4L~I|yCOXAx_j^Z^ zY&amW?l!nsH@2^3&Fbs~_|yFaX%_$(eW3ZR}yq0WFiU z=X^>kzkX@!1W8|HnJW8BOV)POrIcB8PI0Svsqff;>Qb7=HVy&AP_ew64KZf=K3rE%&44*2$J-AM~h^)<1HUrU>M zGj~EQsR)o?fLxbn-4qeoa_Sgek?X2CtEt3lSHfWocmTP{0y#&hce)9lGcS^_F7VY7 zwsHsPv6Pgw79|Mt2Qe&kHF%!hflZ-CKti~4u6;4jBBQ6rpbaXkJ zW{AteNQP~n;i%LUR{;7_cfzOJzbw}%E!s>K#w=NT^ew}`z%Bo#-~On3GfR!SBUAUY zpjMwbZP8!aG$%42b70vcIxLzV<%E z?ime>t*z+@^@0fic`*U3mApbzSfIA-9(Up;P%O;ZHN()%4~kuby9lc;Oohuso0{S! zMkL84;sUIhRBUK6%Y8bV2o;qTnLdDAt$^FDc&gr?g+Oa2B#LU;jix(GjQl;Cg=~24 z+e)!>?Ywc1?2q^z*3=9LQ=nj>frwbT?rE-enV{}iL!j@nH&cPq1dDBZepcCd z7V`adi%N319?;^pZ}m*ONtDujsr_N!xM}e2^Z9FOhWeDUfm%Q)XuMb(OXkYP?7D6$ ze{~T{DoL(kVXWV-S4oa#ic@IxEZ5RiRt&h_b>vJtPhGG9-%sIv^>PwkK+8X-TDusI zOajUV!KH=3qo!ZrcIgne^g%F`##8N`n3l=38Kas8AzTQ4@$bbDDKcs)wQ>OA6sy5n(bX=A++MmgW{j4Ma4NmFocYSFZN-7NjbE~w3afpkzJPx6 zPD!FVnc>b%-j6igAk}9M3=N4 zB$?8livx_+b0cZN4vvIz}V zt>TBj3`sX+bYugn98y4rPGBru-RcY4I7SMyRm+%3h2&VVj)JgR4Naq;t&=^{ZQb5x zriBSo~tPO~+Vi&D7L7>0I#2{9L2&&GoJR!SSqLRn=py9aenP$3@a0Q&1h61u!Ga z=A!#hM7*9-V*4H$D7*w3u-oCO9}E{bRhyj=Po%iu+abm`CV`pj(jp`YMR=Z55f*QH zA*L$O*3H9ed~b>V^ke0(%XfVz*Y^#FVZEL`d{SHm(9a;3|5DI#(Y;)Srp*5nk}mf@ zl>gWEgqHTOJo&M}Q(cgP_(avbvVX7r_O$IuFnZ~F%(5AC(=x`MrxWTBY?PGZqSTLx zj=%;is)^UMJpbh} zMoRNma>#T%epX!*qK7R9LXeo4@uT>0e?(TDj%6G*wB6dv8<-)~$w~~%Bs3wt82$aa zUt-z*yzx}(Zr`wHSytQ~I_ysyeR1Z@UX>MD?Fi|snR1mA(lglqkA!qvq6Y$)u+5xN zqk`outk=U}9j=x05ElKU=Gh6Ctw74ktvSN0vAcl&>%p!7RaTzCJnddx{k{^JjykYQ zE>>-%jFcb@IF{-VSp{epUeRlL1NK|{#qHS%*!ZbQVwSI{7+Z6A?)0nmqhPm2EhxuR zZ-`|ln2U^|okj=znBI+Yp)x{1%jCwmwBX_*%sAltu3oMQ(rbnrU)2F)g4bOPJi-gR zFW3pAh*69BAP^q0BitqNYHgiT^Kqy6OOF` zhA>LE-wev~(kwuf1o4R^k3w@ZD-x0gBt`w!J=jyG1mbNd$=tet#~OJ+b}%WU{PX^O zHnmgTf-DF}X8v3tXaOOvuddF_GqFFJV=!8kq3~DST%DyN)6UXY4~{<6VoUmMJWBOG z@O~Iz1s_z!`oJg+0!6ai7yX~N1uU*BcH2V4tE$tu5!aK6fOSbuq=guLP_AZfd9WTs zE`aW$x~MyyO%3WKocWHUJtJ?rlc#|AF#%KeGT9?jPe>g;<19xe)1Td7O_g=!teB1U zc=u*9pxSc6f;LE6h>4;ISW%XQY-a<{)6bHCZNzT?r&6jkf9LTG8_2(9-OB#5zZdQ3 z#Inq2#zHV92$^}Nh8vk$R?)Z!g`T6-2)v;?>-Nw z$ssznZUUIAiDnx80MXY70)?$dot-JJAC<7ZtdvN|jimEDRae$4mIec4(#rC6-h&2* z*1}DsWLV=S>9Re5E@_4kUr)!;SUM$FF3F-!)l+jBGUhUcrH!;#KW^LhCAP^!*eO^Z zKN4CkaF}SjUy&@8-&nG5&#r^aOs85v>luYxfLY4VL>EU|o9r{r62|l$*-ULf>HLpBL!NF($xL zpmW_SN&$90*Wb*$Hz0yMEC>+3&fPWQgnfBNJUuhg zQ1ORrMi6-2Ddx<6Y#C_ z(J|vLndK#Teitj+K%WXG!08q}#3duf(EH)z*d#sHRDe(i+KTF;Kjn9P1!Z0%VBqh| zsiOM(_f|{ZKD<~5C<7)sT*SJ^MEkjD#(wul#gzQGnP%z|=6(vp&<6U|Oz6R?|A>diIt%rx0bYp9i-CL`iD<$9$er{#+b=%k`>IiM@{@|Rits%4ug}eAbc5+^T__@_#4OuU^ zgC%_(T(?vrRsM28N}95}(i(JA}^AL_mNtYdvcHs9y&AA3NI9a~6V^E8waD z7j28V)C>@{sSF-t>fp!shug6~AzFgVQFLyv`KhQdo?XDT@*@U~9I}Qo5Y75bJk|P5 z%;+*m9z6n#Yz?J4Q7OcWn6Jl~DfH8e+6`aT7cCYb3%&%?zC^EL8FO1PnWr{i1re>= zMO|H6+U8mc)E`LHU32A|DQ~d1wXbBi2Zo2_zPUBxJ(WSoHR_xa38uGkno2D0 z>zsaF;Uhg`)^%v^7{EU!4;XYk6?ul|ujn|GUEd3Tlb`GEZo~)ig7l@9@zXAtx_8-r zv}ZOBw06Zv0;mY#t*9>m$OI)PXj{Os3D_llFHixfb}u+A0jP#KU`36u%ZRXWopYYy zqKI2Dn{Pc5Jyf~>dc11}tL*PtHUN~Wy&4fN;Bss<`+zr`1Pmwnf!(UcfJjJ+zgm?V zfBEh#ZU)}2(yhpL+r4>lKJdT3y?2b5Q>h}=ZOZk*`#sr*+q2l?tJ~?5hn<6>OXcy0 zGtY6-*xSmhs|Rn67}2*TIl@~bx*$;%REvHA|&lq{7sN-K(!EaX%Su{W{Mus3$ z3wO<&B}lO*jSj#PQTSP=CN1$r{G+{{%DwIqU<`9CvQrk!Lk3^b+Nm3}xIVue{k$1}W31BoJkZSm&tE4dK$*$XOZAc2xU~D)>zbrV=Dv|U_I<2; z*pAa(z3RL3+Fbu|@49cT_oqLo9u7~o-u*V7Od9O?KBYU+4e4u}K+GsWT(ghvhrXTJ z9b6~4&zc4Ytl4S9GS=qvef82j5i$+MI>7(K=0 z8YZ>$z8($gSSt*cGGmUfaDjmn6acsDan1dsEzHA0M5Ksu;0lFaF4tI-si%~qAONTA z57Y-}30%qO7tg@1kM+O5>D;p4#}4(Weg&jMVHDbZM!_5C9?iL4Gy`>z>|)hU&fj!C zCxgq&nj>j~i?s38+*=gSP~C=ZxJt5QGuWtQCz@vMfC^T|sbsW;4B3R#?p) z2SI2`XaFa#*>{|24X!ILq)sw@q7;2?aKa&MCn8mWuS?_Rpx3hVS7wofp@F#ia}&3k ztR2HT)9!HMCRObAo6dQ*iR0?|*E2S1hTdM)I7YmXWPi<;324?zk9OF?P+DWG-IAPg z{(gR17`^ljRi*Y76i@kqmzSscW6ZzY(gmu`ll5`K6dHK(03;+jc7l ziL-at8jV7grM6NMeL0a_P`#x55#-h35{7&{OTU|G#M&ZM@hj7S%x3wDzDdKJp@FB( zK3(U{-G{4LDy5sVhYx#HBgK;E9OqO^p>l_^y_@Np+Dz5vd%O%Iab#~yh$(n4+6n)b z;JL9R^R$?Hj;9GT6#9l_4h~!RDD3 zd*VXnT!cVOB7=l>h!yAgoBTr(6qg4-KCz60{`AP#J2l;gh2#ACHMI`WEbiK)gPY}L z$N~`S*b56w?D#FblCA=7A$M|5xBTEaCb!no$eRt|4L*u@Z?yChzzYFg%QpFG73&@T z^F6&#+D`#uW!9hU$B{V;TMbvey-M6(6Gt^XX+q$ zk&n#K2IYSHR;wz(ah@yhF1$6yMh^TU5?-s}5=)y8(ZsvKyTHs?`|baRy1EQyW|GtM zE%LR0o#cc%J}90aB!wlFTAT3@tytnrz~(qfw`0zYhg;{anCTwJ0<@$nl6`%I$x*hy zq#~wU?m+W3q;coJ0tJ%#Bn5xc*d~~Fr{)K5y1w&GZ`}XkQ;_9p>^3;~VWg1H%Bv?B zB)zW=i?EoGOR1!FU(%gOia(m47C|ZXw;eL%M%K#!kf)RWe`kxCog@fNUbVtxjEzeD zE=8E$C>H&lrWm(VFtIaE%TMHjO6joM`l&XuJ(OkKdCA7)r1&Y?@33^c-SBjr9xlr; zJ4Zu0e>~b56gOf!)f;Ig+N}I=Iw%=%a^wMZSbYhTT~_0(FLaeFa2-#?MhH@sJ_IgvK9o#HYrvKt zYv3Td!fnytuP^T3+ux=CuuWO8gI9jbK?u}XIygU|jSp4d1xP?$RYwVSvucELTyJZM*Kz&6ss#qQ|HEn^^AUW&2IJ5DE0k?!pp;uOz++Dr3k_N|2!-CSHGL z(!h8>?CL(|hEPI}ABgI*F$xyOMPb;$USsZZT=p>lO;<-r7u1Prit?!@a8 zLV=TDWHut4G}+fXFHeI=SQE&SiB@jaXmFep(hWnPi=KgE%9F zRwjp0X9K&7Lht?_=fSq4)VJy66dzT??~+vKx+8&KM(SdTUC$HT81|Dvn>mV|YVN%p zvpcurSLdN(rrZR1;<=;Nz(d03m}LI*P)QThll~Mlr^U?ld{jdlInZ>(!NGPuJ!z!x z+7JOwBgg=Io1Ow*IRjYC{PXR2=##2->EX|i+!Y0iQ=!GK3b@=FZ}7^;?R}T5gxoTY zMVrqdbY_874bOaqjZ41b|)e2vXg?TEV8Sz|2`nz!?Yu^8dWo)=B^KRG~#s zh*ds4)xvJvx@mk$$$~{P1n7TFOHFCr91S~&2X2%jmHU;umA60f;z{1_O}uN~k6E?) zCH3@*vvYBbu-Ez5SE#!qRQI+NAH2o#q+dgu8O)@GuL>p<8^NqKvo~EY{%DJazckkY zB3xeL!D3)dwbs93`*!dOzaA|gFx5P)SVgE?Ihnj6CDX1*i;J*m!P$AQnjK7b{&=IK z$L*$zKX?q71#Lfrqer7-JRe4-y0?49h0$~(Lw_g3R%wB0ez64dP2LK(AnGKVohW06 z`}>Q{+kZ6XAIXc?tc4h*GR6VCjBL%FSA{MkCz^KIFNDUy!9Q!eC;@uGkE@ate_P94 zoX1o{mlSCnOnX)wwR?Sf7A5=8&v_fKdO=|;zzcJtGffd_kNJt^yo2|~o+(qtLiGFE zywklG^0KMpt!!!Bm^oDLHu_>)Z!URyi>L#=BBbA0LcFPQLXYN=m5`d1lq_BB%lt;L zh9RXnT0Fx`c0{fHHQ=gc-+lFu@t@jQ_pMF&4dt%icUEyYqovy zXm8pfq0!yNhFYu>7ICtf9l4O1eu;<(dHe16XzJa))TS{rSyC!45b%Grq4h5T1uf0# z6;M(2Y>)qVA`+CFu+&?FijWRGSqapJO=uRwNh*RR2-VyJQ10Z5 ztxyqkvbkIUk1?ZdS2wN8J=?VCiL|C$o1?+J}ISY zp2UPdi`&qX`m!1ig@Q_L9yLBRRIa|#gilMRDu%B@p%&^+9vmyK+Bn-`czTA*^u>H>YZ+qhss3ygtMNvH;QISF?jy zl;9ZE@BXsnqUSSpN^V#%{aWT8keT^483Xg-D_9daNb4RA<5RuVJwCZgI6Cg`9{$Ol z3i|$^S{<%x4lqbT9|A)!K#pal!WFE#yFDIfaoT+flHG{GgmXJmYd|Vav$>@qjD>Kia+_?qFOqqm< zlU0xzUud>mBU(<7))S7Ap0jKhciY-tj}7y@xn1f}wYJo*d*$Ka==9S?>~zEo=S%pE ziT)+rJN|Hbouqorc*Xhvg!3q2{sN}LvV5jFYu*v_fP0OzzJ#*#kj!0 z{oPQkZ(OyUoDH_eW#?E-HKS4J>Q{YGTf1FIc|C7Ur7F{to>O~@+;mT$tM)-6NjT7$oPo#qQ%Io4D6Zj&Gg6dO-D)@oTRFhdd`dy z04}hVpeFDev8b%Tj^>rmRWb%*?z8oWqs+UtUDL zTLA@`O_-1)A-u3xcwcbPl0r(QwdZ%2G3SM_t!O__U9|)#(m1Q_GbTXP|*y3zgUF8#0_Nl(4^; z%1)@r8ObzhKx8NspsdCHZ2Xwpd$R#_+dOOczzZrLsXW7lxo;qmpbo(K)2Di%%1BcE zC0~8s+zZxM#8cDDo%t(?XxPgmm*5>TI`)E~amD4^Es#mL1K1J&n z1q5DAA|=Vm6N`7319t#p$@5^5^Pvm)hy8H6hM|Hay=dr&UOr!$Lc~k2w>WP}W6pO} ze@8#;spPAME?u`{j504BKG`!Q^(wAXV#Z5;?56^j;rbn#&uEFC6(;1hwEB3bSU%tr z^`1$xFTTPBf{hwd%Z$dtsIk}I*V>PP0Pys7>io4hrMsq`p2(UvRCgv5vSTVP388zN@Vt&qx+4kd*%zr65yQ%FD^fw*#y7w~g4;i2>PH=8|um!e3lQ>h+btSb?yf z#3*uN?c;XAbm=D7$=f9pwjQCe;#5)VF@cUwHqBx`2lgzGhWsboJ=Vqs2;U9K_Tg9RJU+7YDCTXU*4%)u&g%SKp^z zQrUXiSOqHr@URZBR-q*2J`NC52%5;bFl^hb}Q)Nf72h3m5URz{1A7&XzK}w)q8v$PPDfdhoP6E zVkbL#wkK{#E1{9naXz)6$B%;LiZW(2wb1#@J-r=3vH*yQ_T=kfVkINP&H;WhlAxhK zyMUQoc~CBJhL(H@O>Sjo3MrZCYW0zHH0Sf*u0()B`I%zLe5SR!Z3!z+a$elOni2yG zvxeb18|7QUSHQgS(#ak6Ve0(kj&<;n?}7~tw>3ZB3kwyVPyS3>vrQaNuS zI9ijfGsbf_d+Dod-P^(CJC7~Z*Kdc2)j^>{P$a(sUmUe3h1QO~AcxkIRP-eYVC=}h zm<_;>JW@kl0*rDGayC;PRb?Isdp*keRN>k}k4ko-{Gk?z1Or1TD>PY7iS5wCy987Y zSSW9+sdWh)jA6<*>}^D?);G5RQ9Sf?$t-duWN`0I4v>MB4JeBwc}J~`7jq;U(GHqX zJJIN935|a)M&|#dJy--Es5d^%j5uk@7*VpgC~~Les|F+6W=tktRMojkeRWM6{{1cK zO|YB&u|d(KW8NdOqijFG32yw?hzB=Eb(L-jj!b~4S2C%cAcx4}N}lq5O2V2nt+IDK z)Yt1-lnb%-HVw)iDy~Wfdiw%(8%qjT>Y}^%aCPFK-Bw9 zEU*Tt+HBxmVe|yyMV%a6OaR}1@?SYUFUU`*@W?#tO_K4%D8E`Xq_2eA3yAo3cJc zbD1`g#-LFDch|U2D{YjWp6+W%uxsIZ2XaP|3i-{Do}RC=PDhJSon1BnAYZ(NjPQQ6 zWa}NuHyeH1!Lee)E>f+2U|nj1pzH{NDFQ2l*pL;PnRxmWs z3qSgwV)w##=@yiE%>x(VU)3o)VoZVQI3wXd;?;{*mtr)LV zQm^JqgTn(KmvlfY>au5b)l=m3y;4Us*`5&1(j!lL%W`l_-pCyz z1m8bM!tgIU6}3Ef+xqQS9!=l; zo(yOCc<#SAwZDgva;aGcY&&wt8kVn?Rho$Ru}%G9q3m;;M)sCR7}TATLq(Fajw$r7 z!m-bEMfD60Ry+8Odf-$`lbw`~gmqxD~x&egIFmHu4J54ZD&fQ$%6efl{x?dc`Os?m}$f0AN% zS~v{0GcA+sQnMCHKFxdSr*p}tvpU@pOW!U)8i9^VBU?zc9p+!ZFRr9ix(u`VOr|Wy zBN^a=4vzS}a6GFqu0y5EFJK$4?v*q`W~#^L-eVbS2dZ=89x$>vr~@4vSdd7DDkr<3 zzs3(|l6t&4^EufC)HT5ZsFZ<(x7JfBrZ!<0a@sf}+KZSJZ~zd#kn6<%_?8I3e5)M= z)i>d?uR5V}Y%eipv9;U-JgIM2s$@3)(!=M~E2{NZIO>G}AfR%09N^2RU?K+U2mrf~ z?}nUoS+|l0+xTV8CadXJR+7IA758_u7MnWSiYg_gf~JNHX^*{0e?0Fh-l$Ge&`=fN z9_c1QBZgak>I#US$!(U8%U3ujJo6~0MY{*u0;*&r`713!E=9mm4ghmGK0mRpUpY7b zqSm7F@V@kR@By*?kVRh@SGmsHNe8x&ca)VG@s0(*81S%zj zB~7-)*}ugJb$>=8K8?i(@AM~OJYO}d-c76x&&3a`jd%o*x^1pvCUL9S3uZ1&aL`FO zmKtq2A)Y%l+%3{M4h*JC10#(L2rFAms(*XJ61kdQ`kcx3046gX$l#wP#kUy-7p*1F z0d+NyW1W*>Ei%6xF02tIq!*{)y$M7Qp(kAq$g`v+D!J(2D@^9Q*$EPCwj%^_S+}lF zm;EQ>R|P8}CA^X~Aiae+*rUNLF12k7fQ-n1|%n+sN^C1EvRCfxWsg;-P`q z)a2dP98fzlw{x{&+}pl+fBLJO?YydqO!+52f&!>7#P3g)&Hp`7mWGIws@63*vf#qv zie)6dQkmVzG2Wo2#;fn}Mm}SCKR~^o!)Dt+qP2uVJtLY~!U+e9iqacnOrIn%#!KC? z*C!75mxB*or`wkdO_I;FZvBTXaAl+cF~_4$_vTbo5B3Y;Ttu^CIxTZkwZ00YFb2$M zj8w~cGeAEpK5F9ZpROeBNNzx?RoZR-HDV2~94oHly%&{oWFis?2a}JOakJ9T6F5K_ z=-H#T-m`R)N#F zQ44nbaE$iPNj1cE05}eVH=^ZR&-RyM19u!}<18u%Dv}jXWz;vlze*lXO*3&F-!D_?XQ~&0fe%Ax)$I1w-FXuhl;zCm@*oV!&Y@m!8lq zZ}(B&{_c;lXzA@`=^ymN?}x8CK-W~iTcbf2?ER$~qRcX9;~)4{*2Cy3dhj*of$p@V zPEm~}u4lf?5#X2yNV);gO$o79I$Z)2hAxwL!@ik#d1m^ zuNON&`PxYZu0A|&T=B=<*Tv{ys)tn73Za;xzHhCb0TZbbP|fGI7csI($;|>6P;sez zm9{JMsd|5Zf=w6Y4QsCz4v3K#Akket{*n1{5u*2wh)WNMbX%v#=YTH&W~uu4PLx%v zSULLVp;K(Bg!Sk95zL^(6fDbsC)&Lz#CCf6r_|$k%EzJqmi+lhO02!%vYF(}jc?`n zN0?xh6|)M6HpgGX)nfO*#lCx-z21~pwdiF&pp@xKO?Q`2ETC7bUryW)RqLbMPJ?FV zD>Dsk6yHRwPL|%0-sL@P#x9OOoY%hn#Tk`n#iyqElwd^a14Rf?#_pb#-hJSh;#=Y# zU_I^OChr)ia2yMBZXyqH6AE4f(^49XI;h-#{`Wi&MfKQ$WH{_w0bhWkcJdLy5&%$- zsBjeY2?5SobY((?mN-QhiFyHg1(|qWa6H&LY1;!~%c04YvgqB%FuQ%a=yl6x4kyV~ zs|c!16GVZw61gyUBf@yAovP$l3oU@pm#$anIzTn$3pJCjb81dr9)`@y0W#=_I(yEi z3&rNoVbrX45NUUuoD=oBi<^=D1yqmM4npnJ;1vhwu7Xwe%tQLR;iF&EfD9ML!RxN_ z7XRxzS!J=mIr-+f0`un>o}3{AnIi6hoQkJ$a?I?G^g0yyL<7FljS$ZW!2Nr_Dt<)N zSSjj!C-%?f&ROgpR`r_dK|tk#5>@2Fgts!KWkHaK2SnV8Aw5->4yL1P@u6D(Ufqfm zp`OfXFFp31#g{DiJdQ7N+pI@iqr#4M3AR&K2g8X$Qj6i^vl zeM#Pe*h5^NcLo5Y=)5%_A@D~s!Ygy!s}3#dgC31dy}B~sdwsmr8%su*LbAjqJR!pI zNHtl5$*L+cypir^L;8C?Ust&?P}YduzMeW3<2^pPb$?d7`=i=@Nx>Hc(5rhTJVnNx zh1}%4-E(PpXo%_?&xmFwKn7Qs5Fn|CMgGyo@Krthx(&+YfN4v*GlJr1q?*vKDb7T)uiK4jfX>j^Vgb&(7tczEbWqCkDbb`hEWrW2 zI#6UeFNIsHVV8Er&*FgLZQzDFZy0pPtsMD@_Rr7;^3BSFX+Zh&c>qPlDMgSwjke1c zAcJ(`eQ^*aMlt%+!ffbk49_vHe>)V_A;|#rP1FjPcbF@tz6b#*oguVpn*szYldcM| zKv1o&Hw~>VJ*_PeP?>s-mdONC{#l#c;oDv8bZ>NTd@}m?$OhAVB&MMrRpu2p+O3SxPzyeqw%|rC!EPGP1I7m`r`tq_(y=fB{e66@4AM*XnFo!O zqJ`8EBj!x1d&OpI3gvmie30laztbu~b^hwr`SP@8Rgt69=>14OP*9Rw@X9ah7M+_j z95yM*k*Kdsud5_y$)-8k^b5%(EV?BbbFflPy}kUZwcZqRfbcz`sGcv8axU`xwGjVB1WD!_Rao137}@HALsRNU67 zp)#FzyP4MrAa3OVmU-1_1dvhO!U2#Zi}EAn1mfJu{YRZ0tiAa0TPN*YQ#FUvQT&)7A;_FAHZ(lqm z(PGOnkTGS}Q0%19sV=~onv6J;Ji_`d&In`FL;=e8gvWe!7$emWe0jGh^o?Bpt5=k- zqSlG2ZG*0{-h0jkNdKAEg`&doEfZfXTS)q3fmn3w-Jhs~V_VlyGxW(NZtIr5ykqv^6_R(0;WM2i&=Ol2n~%nz9h}TDp3^HQ6i_R2w*U_}sEhDgAuq~BuMYmA@SFaA6*x!S_U!js-xiv3 zXDHFW*lsGU-A))D=EJQulUa^NNRR|)MwCmNUL3#E#~*bJEHIp^AN1zp8rS9!;VsFP zmn5MlTe5Jm9H3Q=8weG6W{o^#EOc?|>M2YWc3_QmabHJ=0#|#--m5=tGFVz%a5ZZX zU4rmjIl94>=@C4@yepYs0fuQO*Rsq@Opyxn40E&QC_ek=e}}%o$U9}JqCRskok<<6 zGQXXk-23pu=18m*!jxRL1>89=aHgg`%SGd=3eoqY7uVDGHoy5|%})L%hArSdUnu0S0K$s^2<9!O=_N0`t^b;84Dh)+ZX0KNmQDLJh|9 zg}4FNF~Hci6z~#UMg9IK4$Z!~;nO@RF9KhC%JV3m`=@5fyI+5AP>hI+1rAQiErG8=3s!8T+-Cv;TqA=`oSAX@G8;{?x1>FOpc_=8$Qq2h!(_BW`YtjY>F+G&Ryc}(cF{1nEtbx^LC)nb1!$(NafQ%t<1gbN&RVht{4_> zynglL`lyQ-9!sqaAMaW20THcRT2yk2S0~Y50HDk8q9A|E)u#!Ja>Gc$VT z`}nlTWIVRalIUQqhvrZs$a7Nz>Q4WeupT^F{W0xuI5mMC`n8MKPs#EN&`RBq+lapQ z66JJ=h+sbXN)RF#Z4}rm=QbKwy=un+MS~jIr2tA+#dw?C^3;zdHruhrj8&3+ylT9` zZa{V++)(4HR}sI4be~RJTK97@Jye|ioav%rSy#f6dxeHYh-Wfq|6&eH!~ zqpfB?w={nca0YeX2>`cU8bW~u>Lug@Etl{BVf(fem2LooY5?4WmBvg0P@psng;uB$ z4>nKGu@LFSCjy7M&%w#p6)b+tCZ7JI9@ZE%;N|0y=_9CCo6D6uG_eqnsVC0JV$i<7 zG64(7-7MHVdH6bM{9tRlce-oKLJu(w_xTRH8vcC$`99_Sq+)Aib8?H+YHYwEr+y1_ zbsF9uNmGyopRozXMMQ8hdSdL|Yz<85SoL_)Jiu)r>9vY%g|T!sx*Xi^t8R;9t-OHqw^WY#C_F=m1o=4DJ}5`s6K`B5l{Fp|4?KS(_Z4HK#|Adj8LrBDoI}pygs7A&1F{3?&!8=3U#D1Ly58cN2 z$0D27un-*yed=)AXSCj(q#C*%q%1Z+H;^eMNGUgm$-AN3KT{t4#x#nVetIe z7Onnf;FJxRnuhXmKgBPF&}mBuv4{Np(RsIL{KF&b4k(k+XbSK|ib9#Wm4o0Wwg!>d;0X8}84T zuA6Tk?C<|P1RslpQ0qJ=npJmt*iLOfxZUu5__d(edTob@Xy*Dss1N!CGSASYC8VaJ zxRFD}?V^BiwptJt_<~z6#hbEl@{M(UrJ0VFR$ZklLctqXHA@b!gtYwdv!0szb9>>J zw|rizjA1)pvRSQR3aLhNJQcBdRg$G+i< zXiLo5hV|iD02*zh_oBg6Y}iyl-4165@iJ{;2BrkyCz=%~pf6nOiD9%|CjVOIjASvc zKgtO}ONz)qHM{DAq&sBDJ=~i%{3uJKEAnJV?wt#smse|=@&)l_Jf2lxN&X8gpvM;^ zo$n&-0gHb#pz!eR12Uj^m}!VN&d|&@XrU*E79f5oAU@H{Y!p;=Bxn4r;`Q}PO(CsCBThAuZW4}%df2iJn{EiG&-6SG2MD)T2)g-_bgilI+)WK&(;e+I2B<{&$-?|{ zGTX+Ak-Fmm=`1ARB`8P;e4nep{1vI?pVfUt^a%bW4;%sfE%L=gfFw+j7@GMh{KT@- z(Z{<4SK$|q=WI$8ta~JbniS9GznqtK#OoC5dw}hPI{yTA^ei!d_&g!{$KIwF3pW+obQ~p-AjZQH|h*E5t3|+Vl;UE zj=4l~OGAlaQw!z4$rT_= z#W^Q3wlxw=0s7hsc|3`mSB&SKEu2Baep-WY1!Mq(zne{Q zHn4_yby*S&!A9%Fb4&*Xem^{Vt}i{`bar8SROQx>R<^|nGUaJ;BpH-qAtpK*Y%62! z`Kha4dxCWj`4|Tucs%zSn^u}_WOLh(M$5R4=_v5k>7QhFjNCeyn5vla`QZUjasgVv zcIE&DI;K`8j{g=Yl!66D{^*VK+@fk^|6Gm?r-4A;%!Dg|B%WX1ls)1N36za9jw-g zokV;wwv$m^V-wzDs`Kc1R;UbL5_wWv`$zeQUmBh#%GC3RsnC2kmdZmaT9(2&KsqUr z`%7zM|1~h!0VJI~&*0tuBRxAlZtSpdKCO7#!g3e~Mzu8fVa1^Tk+*KPLy8Q_BRN0}&d~vjE&pLh6$W zk*qT8YhQ&?3ky>QVL9w@Awh}G( zVns!3gXG_u#VlQ)5$j7YXI`TRTRr3r9w127d=$nk&{K0SyNLlSe0Dt&hVD}7`PD#@ zI+2`DK?tQUmj*;-uAoNl>SVi&vd}pQF95-5Y8YAdx&6k;^g%V8cx9e+Ak^#r)!r+# z2v1*rJP~1-@xCHWU0=vm)^*)jcJOlHifSNG$(C7H*Q*m?#zzerc(PNHJ~|b#W8Chc z#mVYdGMh5dxRaC$VeOSJ-X_F8REEi<= zS0oB*W(6wo-I|*$1S{ms(HfIb_4duoFe&G!tM6_}p3}nXLxO%oRX-j)sRCj|2A|)) zP%S+<2|w<)bno3fJ}xleE1zfnY*)em{%?LY5cz5XJFV~)sdK8e&i}!|wEx!@N5M*` z38E%Trq=!6s)WZ{$$kIP=J z6j!Yh4Z2M1@$(D0$rUDAOg3>x_@V2J$V-J2CCr7DauS$NxP;p6)!r^}7AnUGVR`iU zoerC=h!rcyZ!>D~azptU$zZ7xn9<>AaajWZ6p3$2S`OJLiAn#C$6`?abxfc7$tU@* zll18ct}inxRwi>z^<>9ma>Hl=a}LbSz;3ZHAnj&Kg)6C=vId*kKrr$uVo5%g+OCYQ z2NRx=9o|$72p)%GGE>qP|905kDd_KzF<;ee_ip;lZ~HBBs$8v5$)GhLR8CK>OR0&g zNh`t48OZoJIPPB!_C!u3F!-p~?BZuXq?f)lp_Y;HFH`ljVQg@UgMd3jYno2nvt)6< z$Y6W^qrx65%igfQzsZv$mq(Eu`+NjKnYIlal4Tvk12y89pUFh&zyK`z!sqqzo?JCr z*RTO19i)~bN#0^V`75#9;EOEV>Jv6OEf&nh0r{hgd~?z+KHU1bWus%1nFs}6kq{{g z{sQ1!YyfxElTmW_3^VXQZ^+v6Op!-wxX1%suRys2#*Kl0?K37^_~B%jnvkGVnM!`0DVuhd<3<)B%Fi)y*V9Ip4JJxF`h9`?g%FJSLph zQ%8^QHir)zZ$_KXe@Nb2#iWle8NfYDAYR<2t=MW8+>v?q48~b$BEW8w0$^MYtz4{K zY<+AipFPdFno+06;ww{l5fVY{o+W^V>!NyM#)669zcG-1NG(Q_HK>Nci{D{ggx2Wu z>ccbRfFs#@+?aDKz6`LHRKvgl(d`~f&u_Eb&H+rDy`j|I@xT8!roAR_4l@L00U)hO z(B~J{3l^-p=EKDm^Hpl{Pp}3$0{*d1Bu`VZwjN?=_fm3REWpMLO22$Z)Nd!tzW*A~ zUa#F5>z?l3^h(bJSkCB3pMK9v+CmURn z__lNJknzP#Iz7UCPheT6NpUY1g|BgP0@BZ2?laW$w_k=tdld+yyqEr{9C12iy6pX6 zWwM_!h=pjagU=|Yla8rz!g=iq&FV0|5=-<;LXVyuJ(9ihJ*=$0H~Ty~1oP)Vv#n^PCoP@ZO6NKpmR8$mTo(u)6GID1h|?h+ z_5G*DFLHeoTLT1=JxK^kD7j1w0j?6F16h3g^D8;PdP4toQs?HI-&vjZeW&+3{T3Zw zeXf&~Hh{|13yF&ib)+>haMS~CwJllA>UNte{pTc;)230YccE#`E&w-5Z^tFya$#KU zu5rZi_V@W`z1r7%vzG5q8lT@}9j@~928OOCg3eTk&@G2u5qoBKHja{e^Z|rAN#S|` z;ddn->iFJv;p5wxg&x_w7Z3mv~M^Sy+9s2xx`<qY} zJUJ7=Sp$kBGMz@a!rZhq6(X9^CaV#@ZySHy@5}!bd>E?Hac@wDE;#{s6YD9N`<5t~AsYr&@<1aT5&WL)be=A^8x6x9whiL?LwJq8fVd%M$mHNswRcJ|^x zEY*in2SFsA1_DS=zPSs>)v*%$%lscXqk`)V!1Fhu~?X7 zCGSI?C3c>B&P-%6SGeD9%U1w&HJf#^JJ+>>UC+>1%>@I=2HL2|dg~*rzOD(mj8|)9 zhDqlm2nPDv^n%bFb$lh?u~)8QO6D-rfa@zM=EAw^0v!cAYWu$@Z{FN%oTYrbq;>3j zyX71G*YQF0mTk04*L%?Od77Yfz!7%_B7HOz%!cF?l)$BA>)5VF9;0nGBMbo~!F^=r zrSJ(bH0NXlG~DaT8EVpJ?g1<;_qQP>IT~A)Ovg8sQ^_CvdB=U&ZEiv4;+mFF0$IgKR?h>^f{26 znh%_16Q2QcCtX|$=+_cd_(M5co^Q8jQ$M!!WoJ`G@U)MnF#a`GkL>|KnxEQYQpu{B zu7G%~j%T|m$mm>aWNbd`l_ce?f7B1=ur0EbPPcK zpHlXt|5iB2X}s%S4c9UVo5O`h*fmd1OTJ z$osJ*l{tgh|GI1&BY>;X&)7z1r!v^psoEp2Ox6I(WB2^6+s&M7Z{uhGcT}0|k=K6X zQ`t*K!A2ll$PC9Iw6ZRCeJr5^L{pQZ^-hQRQ6C+}4{&@KqB4fjCPq@VO6BSooh#oo zi$eI#3FYrcfKjHsN*ap<`n7mM4l($lxVZT0-vQI>h3Q}^-XfUxiwkQ`S$99aVC5KN z8__|y4a|@)SSDG%_HB^(>kY-e>uf4cO)k*>jcyJhtt*=)1g% z77x7n=NE7Nd)80yPZZ8()oynN&-NXQR>KtHg;W4f-jo{b54>-9b}vPO*!9JYc>%Pp4f_d<)rJ-LTPtm11C=}sU^Qz_u&&%|`LD){ zxWvw~#nPbwtb++(8$Eb7b`kIZr zg&v*mtRjVtLpLVkr?TIDQNUGg%aAEB7m%=6WlvKyWTml>UmvRYeDcKdQ}TzblqTr$itLjX=z$xn*9al}Oc^G>tZ$tCS~>e=b4E*F8UkCZEv^@P zS(}$hhYW18!|vyF#-1z|U0>O!eb`lPqy1YLCoTC5%+A;^Y!|=!?!(`Fz4V7S&Qgi8e5U~FNIG^m}cM!8lGxA2J_(Ir*%4Yb) zBrboy3Mh`;tzSf3=jvOgu=X5SakZL)hKM>+h(kBxEVoQ>DroR0Ho3Sht4qxQ&iiWO zS@L>C_oi(&&Od)RK?$Cu8*t43{%6A=HQC)2kIw^9%FC$~*enQUp%&%#^7rND+h6U0 zx5&Vw!nczanjfy4H5U#o?^^H92zNV&$Bn&>QIltdc`XO({9lDMw#AM(s9L}QY#~@H ze&4Y6A&&p+RcFB2W)O8gY{|Y!fIHS0=Pm@#Sg_JsW(v!0zWl5+L;zgEB~VylSY=sA z2GS~6;53DRBzadOr)4Wv*##9G;JL!R=Gh;L zQ&w0pq+|Xd5P(8fC5r*ha-R}jdooDW$9)L+5jws8Vch(`S>}v?dic*F049gXw6^Bl z@~ImClREq{6fu014q%_bY6AI#pkxrnn1xq=ROLa*(Z@I%D$IriH(VS4L)Gl-7iZ-b z6R-r2+xI5yv1a?-%{{;Go>{qcjt-f5yM!F!@a|#t& zXUWxKn~)*QgiDtsJFsX*1-01OEq!w487xrM3t5nmB0F4%*Vkmm701VJY>bu(?32AD zf6RFE(AU#C8Vz{KrBwDTI;E{kW7}TN92VCzfu0V3ct&E5l_y!pr|tRWIXii9JIofO zkR51B@iXZ|e}D}9Iy)EEKej~#``#S|=Bb4ZcUX5wRi+i!@KXFT-Ao$A7+rHKlZMtm zr>edGer)&hb~t*ET;HE_&C|I0lWlDJ=FMW@^;~pU|Ha5%)vDZKKrU9+y?9`ZUqup| zz#B62y){33p`k8*@93(3Z+r2*_7M}nD7neJ=bE_Q<;1kk?K+aQs6dze$so!p1Q-T` zFr{qyp?TOm3Tv>=Aiv*Wpl|~4<^1Of)=y3ep2HR$Qm)KJiPhjgk)w1#(-$9P!A|{_ z!+c=j+6gL6DXo7rudnjHE_4oviq@c{(a+tg$4o{Dl@5R9VH0Xe_|FGyvTVU1pjOFN zG*mM((syNWWq{TNEBEl!vm*nY28v^u1aqH_Fj3xjx$!4g{?30b z5O6-6A2`PMy1c@Tpg5dK!$!S)0EwzIlE6-BX|W(CC>HvnX$(Z87vVpHKs2or>YwjO zOEvM{Vm&Ep=AdA0N8A>h8k+6dYE*I+@8-F3rp z#IHv;JAJo?clHgpUDM+f^Xm;w>al`m*H!1v*@3TpfZu9;TCBQ_B3wVn4B`3hQSEoO z3Oos#J@?Ainsc2=&(+#u^>ASQ)_&{8b3vhB?o-1O6M2KRQS2KuzV*Alxe5f%u8NQl z#b7dw^r7WxN{R&&b1fG-5ZLUD$^DbRJRJ=8CX)}(7QhnqVk>}Gj-{Ya!gN5AuDk-b zaGTidGnRhQk7#^dqI)NhcmR}&Jb=NH3QW4hcAWvRhHYq~U4u;=9nb)%HAYODv53zq zx%nk@EkbB~h=kPd<=Qq-Rddyme`T?YbG-yKIX_5Use zWaeAOh1^%gjwjujfZLuQTqT4Y$jJ7yOKkLV-7h z{nx{m{Ul%w{SHj~WqLRUL2@ht4boW9y!MgypCj-R<_G0GX(6Y6r2`Dv?0G z{@ts+w+o&8eWx#$bL1lM7|as@Cz$w2nu0II7n!N37@5(<7$M-9*4x7cOJ6e=^oOjY z3c`QhNl>LofTaR|<;^$G7V}P#6cB1y|Kr6dhpu)gJ(mocjwb=*HX|e^MhvD(V*rMG z>Hl}7kj0!Cx<4zjF|ukJQ$~E-=NI6B61Tp$TkYZY)S4jkp$Z892q;0O=VOb4knB3P z-$K~upH2GDv7W=KrocWZ4}1v?SsUvHs*!@N14kA!QQzEFD@&-*w!FkS9hJOsNZlTu zY`p2Y={@GT@*a|(^%oUB(zR_>-BB^z?=&$7ZYnSj)z(Ipmc|IVH784~+HKzUegdRa z?^fp3fbD4gIffh=ec~AHxxHL;y59e*YV$zzPo<(US^Fl@S-g{)nvgVB*ER*4*>ls;fV1eY zCZIAig{WHV^kh*`lno=&jI{gnFVDrK47jIm!9=Q#2Uc8oqUqxg)Jg0)P&YfmY?mwD zt-Ws4fzqsWO6x^VZp~M^T>IKor+ zdH7Ejj#ci`HZmTNJCTejPVq50M~P*2loAQmQZD)V?(5i7p@(R{AyIG1j_P>tjkSL^ zuHJkoWRjv=I#bvd`g}q2XMKL>-S6m~u)Fh| zJBQnYqWbA&3dj8$?Yp%rwauIPyWKr%=k52KtJnR_XH)5u%OCEm!Wi_H=dGoNb)Yym z@_)ejxh@Gp4i*4gpb;Y$)9wE}%CVt{C(2Yl^i6Y*6**Om?$t8-JNS@$Z>V)d-v zp@>0nB&aD1lCww)iZ27QF<`KHVY24io~2X}g0TWx4TRn@-TP2}V7%a&y<3nn%xw&I zw@ya76Rn5zM70vY-qn%11_iH*#9}T@Zcx*8-fTmrNQMZ0T=?fzUk+p z&B)Hk{qx)T#_Ivc%f^4IZYwVRn+b+%Y|!*}Jmn!nr^*yS3R zn$3i1-pd66vHDw=nmBUKB^}KQSSvmVVx69+P4}8b3!}wsdi>w*yjai`bOc&4;UU%g zGbDJLo}L*pUnHHo{!>;>+1!V8FqDNuzj_m~Cc{eo%hMjFxK&5Arj#EIS<0ziJGe zxGA(K5Dy|*5TT#TU*eQd9V?~j+&MXQGE{9Cw-kCxDg{{n$fqz$hFC>mZH_LhL~;h$ zSIWjlp*Dd|j>f0^^V|=;M`65(Mn`5no_OznzP8fytcKJ zL|~3vNrLQnB;^Jx4}W~7z_e<6_sj9^4%gl2-O~E;Z}a|F7LAgd*X~i5{n2+Wk;j^c zmbYJzyQ`xs7NbwcbD}Ky%L`>{>i;eElo9aDGSC7BrLZ*_vObYMjRnRch>ufh-kdlu z1gusMXFopgYkA&Ba#!m0UZWSq6r@Ct+L`P_I`h8<$@g?a zjPG*@CS)-1N+e?*se)c~UYhK?8H2I9Sa;cM_gSUfZxU!r<`pTISNs9z+uwZ014e#e zE`fLgjAGKimlxLN0tQX0(g|h7&m{F|zR2?A&ERKr>-0FcrGcUYIYu?T@?4yllxI=0 zfuF#sc_czQqj*AOmQ%h@hPcPln;-JWOD}$pbrxN(3Ep&GuW3?q-cU!F-sjgj#6JH$ z;?a1wvWbg7+(e$Yg`Mv8ujD(veYbp`^jJ*z+pxr^SO;g5T}Cg zyYw#0M}2?rdWshs8nkEV@diA)yiEq=5_HyNY*=Bejjwnfx^HYyzTq5nh7}idvp>2E z3#Y{y_srf~XSQKbsBSgl1*jHB^YOf3=KI25Q{%FM3jhl$?~rZ;;$Mh=t#jLbKV5)g zB^i?E+mM;5EyfX1zDI>T`}wkT6m3SvphjQ}#$BQT*p?bek-MF|N=1@uR>TtBcKKHT zix4-HBmrxIji|9(sn?ro#{u=vmL~VS?#s1`y=0f5_OP?@VRM0Xv=Wop0 z-=D5;Y8!#fI`W_RyY{=`z}9)+bp7a%eX8TlDj@Bz+4XYmV4MH$pNNaB&X$}1$*1FK zkqZIxR{?1}T@4KiBT0jS)}Ewau($$T)PxM3wN+h1&|VWjDBgEgn09&D`vcIN+{LIp z=6l*xk|*Q_hC$Jhy6+?@VnOUhLe!ETu19@c2w+{BS7)$?(MClKPHupB&}FyL2LrZs zN1iT%fA5^%_P3vno-G9fxqX2oG|xK%b`L9Es{P>qn3>p0!{}Mb|L37_P;3OFPvE~) zwG23f!~&R0@0Zlbnc)+(bol5P<0SnNd0UzMdDsiZvzHCWSN)oOgAS2zS65v>dHQ{l zxOxi~2R!D|!ei8VdE_5JcE$*}Zrt{^(eO-j6HiiVE~27yBq8A4=tJJnfG=`_Kp<-+MI`pgw4>p5o=xY@kXJH5V|-99m2 znbq)geCC-Ht2vpVR)h6$!OW^l-DB(y*njq@;YL)!@oM+{CbDl!@Xqn~G-2~;AL`8( z?c&B=a}i<5|GnG#;qXoGeo^0(#i#o@47DokY{6S@X(DXmre+*)?E_?bI{M-b&|g58 zS=jm-BuGgcxDJ4@=kgPgT-)okpX6uP%~O-YWwKyBByY)@wrAsKC&Xii3!v+;B?gF?qsw_ zV({}aP#R@Vj0FSWbUf8SA}G_WZVpB%7S@8r3nMQ7`WF}1w#V)bE&j)r!q=9WtY@Q6 zCAbwS9#H8%2F3~YCb5&C0LIKz^h77QjAXjm#ajb{k`Y^8NdsVa^1#&R;7{v5ByA+~ ziSf{CB7s=Z1+oF#@i65{#*|hrd5_uXzhFxwZ6Uk~a3dn5#RZ%YE7HcjVI#$X(F3cx z8M7W&Tcokr(7G-}MEmwdtcD+PdL}-zv__RhP^uMVDlG{~O(Fx4CxuujTMZe-A%uB%HfyUad4<|0)u6?CIz}N8A#JaMPPT zM=6s$9gkEv=6R|K5E@)adTFMIL0V~_OTFQsLnUcL$3VcnB29-E9{OYI1@Y!EP>DQz zD)@cb(<-damIuJB+l19J=yCcZ-IEd^2K$oQG)hpf7uF`Pqsr(ZY0Po#fRHZWvwLPw z2or9dO9j`Q+ZD7`K=%I3{4OLe86%eKQi@;NXMa?ISIaMyZ2;sL0y#gakdZdci5grWqNe!n?8v6M!#N`B#R?+;3mZDQYWdj(6>FCyU5P_!Wh1>QBKBa+Bh@*I z+OX~$CcD4jvA%coAvc9JNUEHe6)w5)2qMuui^M=VCx?dutVZa8#kce02V4J+ruyB* z`OWV3W!UEe+q=w1u+QYR-2*TJ1ZqY-cO1LyEzm6A+#R(3TffLt z*(yv{ySHXfh{vUIz8eNe&_WDrrOA^qP7xG7OMU+GTqL_CX87HY4(fk5k&RB=g%e4e zs&g_^9K=dAf}dDahSM{WUxD_1x&jPs%fB!eRd0(?cYQ7B_01VU?&H^d5N5-7J( zY)r15Yj1fbYPw_0P-}bi=`|K;e>bM=^q)=_M*Y)!H}_k=|1@ju(sTbF`*HKe-Dq>< z-f0mpu(JA3S9b0e`kOayhqwY#db3*<^0<)M+*<|fD0?1v>(K=Ujy)F@w+ivW3=;IS z1DtXwUg>4<5D{XM=QU2}>A67aXzva>y6*n;=OF-UA{Pg<>RUfe0Z3@ZdIkbOh|7O4 z@Qb9@@B~;?VAyj|&{ZGRu=DMo&faZeFaM|zr*?tS86P0DF;;IIC@Mu6 z)!Kr=fd`e?!y1Wfv0mG^2y_TOWfODM}<4%C+T3uNtJSKA?4VGQQ9HM+uRSKVOIdkf3opIp<$5dFtFz%}orfhvm8^5~fD9?GQNa$$Prx zT%VMs0N=kqZU}Ckym7c7L<6>GH-&fVVL6UJucGhru2UOnPhRe?_g~*A+-}~T_D7zL zZC-430FjADVf9zxB$$y8N1x1)Cx9{7a&!6rJ&jl(qZWHyBU9q#l}Z^CaZ$8vN&O(* zv4kB?-k);3NLcF<-cNB?lz8asq%TpnVZ&_nI_jPb*~{FOVbrOYvQED*)A%OZS4zr8 zAV_K>`W2jZ@8^$KpQ^(o=_qM4W)w!sB$+CmaPdFD3WeGZH&ym9?EfZTjyU{v&5#jJ zsg|G%o=F|h@qU{5yTZ~xaPw)WMm{lpRJMA^t%03~8|-WIa99Ykt7v8f7Es}1=QuXP z5qmi%RaYApe%PHhcTZMDyPnQ&Qn>=6wu|nj|Cx>cF7D__zBgBNsT_UN?s$V0EabX7 zp5)t6;Mwoi9B8Wi2l_OPn=Cn)g#V%YXMx(@J7W9>nFKl6n+%I^X-|{EJ6= z1U^qaAtR#Yg11KB!;8OyxHb1Z2vT5V6eA)@X4KNAqz$_wNI6`*%HmmIj2b>89QZz# zC6Bp2Suc7&(@@^=>2-b{{B3*L_RUi(R8?U_Yw{kCeO27kH&-3&;|ikzp3Q1iN-&>> z+8Wss=KwKCb2>=6zJqB_h^}4jb9?DybQu^M)JE#<$e;azqV*JXT+H7*Pn>>tDtP@V z`}k7gc6{x6&EGNp=}XD}t@zt7t(^haj12M3+a_)@Q5ebPClT=17Q}oG2(r`W*uJYP zDyR3{zSF?a@Jowgl}tqtD?-AY7DB8jO$d{TQU5pig*7t~&O(G15JizvLhc-@@}Zi_@5qK(K`*xdn>2YN`TNqnh2%XSj3?! zA*{Gi_I)H;p>d-w!BCf<{iLgh)e;lls*W!6R!I7ooi*z{wKpK6u)0Cm|J+`uVd$Hi zvJI>Us4|S+$8kh6_DC6AWdN11doc#K_c2VKw z9d2=(Yg(<_Kip{vIu34X%Isb#G*{eqnNZfpxf#OnfIIvj&1D(QCuf+5*6+2d%60Vx zIG*X}gVg;bZcqAgElWLvgC5r=s|p}~<$< z!I%-`g^!sk2hkx`C_(01hKhqkf*}$F1OgJIBCR{zu)54hLf7606)RE?Jn{SOmc07S zTt=)q0)S7`iUI9%6aA{Uz)sKl=%J-tXY@B(7oe=k^Ql~+?=j%=E*H*daMP^|7+XrC zKa}PUc9sum$y9nZAv>2m<*ZiNeRW=VXBxd{y7>>b|Lm3EU2L?f;9XSXZ*b?yzs5%T zdZT_<_5HcOydM4LWa2F6=w|qL|Mg!-$!1IFETGn35yLpb;t?UVG zOELrKV979(7LC%lwQ^VMsq>2ymX6;h$=A!Czx+hU6{nucHhbrWFsMrDNrIsUL5G@;)uraC0S|m58@xpysUt3{;MG7IQcvupwtsNqm-yuXI#!C z#>f13Pr)eVE{$1C+d>e7=kmrF7uHCblZL~i^-d3fL+qJ!8mwo<`2}q0%3zkdAbK*R z1AGV-A&DQ+A+(2QT-NA*ZjWf~LhR`jun&r&Ne=ww_cE%fxhrVTra_;W`-cm+R1E1v6yLAW%QaMYIFcQ-^%q` z@`(dr%O%$3wb>p^J?`}@6}+okqH&5ObbuIb_MKSC#L3YwWlVX%EMH++egbbW4VF419cSi_Q( z_1!*SH-UinDn;{vUChUScmEh>v`2}j?Jw4OiXj0RvUC<*jUk5v-T4aVxd`7Ai#+f{ z#uy>_p^8$xJ}=_FuE~Af=WvTmrohJX$KK4x{Shr)X1WGu?McmFLn$}3+v|}XVMn6@ zD+%x(pi>15Xe{&a!U70#i67kP{y=dMY1ax_m%_{7%`7#Ux{@zhU6xIZe+AcNR$`{D zUc{z=aGBjRj9Oz$F1kv&o^;D;0#c>+%<$u3qk8%&&aDNxtp&#Ag1zE1E!#3NI*hS7 zTBezcmsZt`nSqg4rw&!$7(JNfLhTYVrlFb$48iB^LLYxqH`o%RqY*F`k(TFo7m}t3 zr8`f#z<||(ParVxvM_|!Qe8Zahu0%R&yU7>#sX9k4_&h_{zs=bHLGq8Oeu!IGlgaq zhXy3Q;W<9B%Kj?a$}he$uAT;VpE~{iwRyWMp?OtQ7_}W7{X*^KdCQ};3-Fxd-RSr0 zoR?>Rooe28AIkLqBhhi*_cPw^dbld!@1dXtjk@L}-wyv83uDmcW{f%{4ND0m z=(DfxY`Tz`G;gJkh-Bpo5CS^`dek+7-yi1LY3T2XeO0kVyb@@yNe9fn*Gfoi_k+wL zPa^gc%2HQkwDyG({(E>x%EUlvvD~@b_}HE~W*vCkN@Ax48F0+lp&(e;A%HGGsD(sO zHH5ZQ7(F#N#?91||7NG}?NtNy_8wN&mD?9-ObZ3LM)uee;bdUWrpUq`L=vLYtpTXm zTF=v0WMCA$_z7SaIS`+Diwk(th@G;XIMC3dX9*>2@X9!yiF3^fLkhXxElf@(}%TVF5CcG}vtd!yf zB>|##vWyZ;7ieih+G~jpM2AwXtz!424_>u2s44FWAbkn&C3H14)Y|#gpC9jq`nEmg zuIZgOVfEk{ysCHMb-_r%Ua+%QoN0NXc$wyKbPvDiq$OjM6W9I>ItvPTzdwC`sdkg) z*?;=zMs2+6ZnFPSTe6RdI$yNr=Xi-djyR-`KL7Oh)Xv@R_L;)9t0!0g?E$tP8Gv(- zSI4FcRF=tG6&OuYgw~yeZAq3MV4e+vx^xT^$yKk-JQCfu(xWz2u-PTN*x>E+%rR+y zQQcM%ZmtTo_wjY(cs-+z*5@TwV08~GTwF>^@d;%prZ^gBKOFFq5ckdD$Qb~SWAWKI zJ}f{8-)Tvz7={DyFOAi43M0alhsS?_R4H*{LyWzVy{&|>&G5rQJ3py$ZuWgxvup?$ zQ`Q3-G2=)A!)Aq4+`33h`Ai_LZ6RbP7zUi>H5r-1aLQnnBn7dB)DmUVidE08eulxo z_Q&)zzGfbnK(@7}`Q3NceY9OFd;VI%0&lWA=+(4ZIt0awQ4z^=K_VnLbfGvM4Q2_* z(63v1R7NR2BOUxOS>~QDjtI4{A((KO-+%R>eKFjQ=dJ~n?_ z;nU+-B+DSt3}tO(HoHa3a95W2db?H`o=@g(D<%StLq=STjlREyUr+97MLHTRRcZ}1RGAsT%$R0Kcktqw%Z||M{F|$WjuqB9s3lx0#zF&NXU{JD9LTK> zNkgs3P$UP9CUne=%90Ru`BLHnbISXl1JnwOdQV@!4pwVHiT(T0RTHK$_$D|yCHks$dj0Ol-A$yR-SwE@@w&oo@M6Fn=~#eZl|T3aSu3f#lXJ(bO;Vx_Z^t8+wZk@~LVdbO3pt$uH_#*LmH`yCA=*M(cnLfcThA z1@Qh%J_;$Nc{QbZrSi#01KJZ4?}QAK`K8*7(wIyXX;qpx20!xuzQ4*w3zk54RuvB~ z8Ub#A5n>(wr*q#wHPoXQuJ#qGyqP_S`wy#Qfdn~1G6l>Gpv_C^7->qedDYlqet;t2 zqabeVqc8__90cgBM=9Fw@)Q4szvPxP<%t-_7?gQjLGu9s4POMa0T#_TsfCOSV_zfGKPIN2F8lUaWt`hieJABeN0>pt6#;nGCY3X z$92}es(Z{=pIb& zNgD7Q!4seOsC#jbWBsPAXp&DRe#?(TBQTq9wTd zQq5}KlL3-E+A|9S2429RpPZIWg3db#h@YI%#O%SKFVfYeDX_zj#qLD7q1L`!GN9ha zqXLG~@c3EwE;=Y5uXjFQ8VmKsixuMO$Us%#fMXZ1YD%qkO{X{F9sQCtCG0wDbeICK zFRKKaMP@MxA?iMgk|1alegeWpn$68Ee+=bUP4Q9SHV$T;JB_(;CiDy!-vP@t@>4&gHFowVkbm_s|02ZB+ zmMsDS(t}jiN9G(s%z*ef8`y*rgwBge87miNlv*O>$X zGSH(Vf!-@F6nF3Z=cN|oLB)etku0WfzT>?VfjTM0rO{2Amvt`U&b$>^De_A*cquG6 zwV5wkA zf+0`rLczBI?m>7hkduNqpYTpK?h)wE9p8=4QEsk=?fg@_fsaGzC@}u{tZ+P@;Nlcp zQk`NREbK#PZ^hM&Py7 zjRIC@xHx$$6JQtf^2V5Dv1c*u7YuJtbkw;j1#Gl+2Mzu+A@{;Igdn8rKGU#*Qxd^F zt|q8^&4MThw$VR;4kpA1cNVtjX>=tW(G#agGP58S2>3U{2GVBws`10w9^5z-exP<2E=H!0Sbz{KGbGc}V&CAA3 zDx>7f^2^p}C1BuqyBq6x*B!_=KB?Ig^;__IZ|(Pw5A$wLS1F@?y)cu;tlt}|IAy)hw9gAc$MS=!BmYbHXhE`Sz9D@%GN8)m~-%@grSc+MXocu9Ij2XAv?txQ@a6VRl1bO!qJ8 z(tqdwkzDM<45fqe*`%vLTS>3@dsEK+Ij2L9C+w$wzt=qG^2A~_7F|8D3(AkDqY7P- zi3`Wlx>0XSW;xUDYwxg+=oxUAr~>3!3tLu*trX9|<5j?j^F`s?VQN(a_o`H{`NBM% zt9l2>l84S;hds!YVD;P=oB_p`lvY2h%tdh0@1p_=tVgqrC{`rqpqhJwrywOI0MUPM zuokLpK7^hT7MNnFLR0}qJzQWm?wNafa|xhTf|sjAsUTDmOwl>zx-6_nktv2@Mav!? zU1e)dd(_)MXK$m_Y{yxv#*$2xn9;5oU<*%F^@@nGEXAzS6e*jK zQ(0bUI*afGpf2SV$Qn8h<-uyOMUV)wdLqv`Gij)LL1mKh$sTi}v$H^*y1LV{Yv(_| zIP_mTHfw-EtbAEz+WQ3wN+McS9jr7ih!G4J-kg#wDeaz%-)$~aTn%bCR*}sc zif)Qpqqav4?TaqX?4mCejvrrrEjnzU-t6D|rq>AV7|yL@%W6)Yp%j2%TKly=J?6jz z<`{_yH^zAz;4q#XNYetJ0ziYxGRTi7#LBi+C6PBt_v~!m#b3^~IeL$2D@CK*ETv)p zJLALb4`2>Ym?uc1sI67a8V1wOtn;u$=ozVc=oBR6Q^pfr z*@lWssFNB?J&0y&^%gLD^@WUqD2dPVhyYS1X%HN-UPro@%Bn$VyMoy zF#kja46?iW`E}Y_i->~5fY-?% z>e?Fy&;Gt9kaMsZDe=N3>?`XwY#fi`@u;nzT~W6XBR1z&6lQs|7-eRHIf;g*2+vY2 zRCe5ymo{yI=Id6+ZEw;^~=1MPaieb!@Hl| zZG*^s=58!N5AkLJ(ctj8A?b*b8qWPC{faE3J$BvoQsETF#x`%05s~VZy#hVdQtJ_? z{n_ELu4Qo5>G|jVMB2c+qyBvQ^J~XJwQEiJzJMJGP0iB}k1irO9ceFrzE;vy+cy1F zw7+qyks`18YcT#()ULvhpBcw%w*87v3!8JI{FA!H3m2a~ghIWd%qPB2gh&^v60DfX z6KmRvxUmpvwt2Hz5;kHgHxjg$lr)fFDw^lzTS|WR%1o8seR*@KcPrrNbB6mC3B3Fm zM1zg<$g7O|o| zofNc*P0WEfhf#yti-i%eKtDNwx^KA$;w`Jq#=|jC%U3+Y&6OU$-V8)~_Mn;ojuFh9 zpJIsLWStv0?O75N)|sJ4d$M97lf{+G8VZe3XRES_QMW)On|d=hK2e@XrBR}oVT@R9 zCPQ3YDl2(;=61pU45O&IZdW6{$+)B0>R?UW!kL>$;6yEr>d5 z1B#5kC=d0notMCpx1tHI-CyW2%c0K!Vp0@v|-05ueWlrGpOTOs$VTbgA zl%o~MVp-70ySVOFS$e?zw5GhT23&v*NQ1JdsnP{i(kCm61433%j#m{rFC<=!V4R56 z7QJ~m5Lt(8W{dPAHZ^8E8*Ir%S?BGxEcO05wSfzxe7yHGc0J86u=u`fF&!)sL<6bL zt1-}W390xNoDR+^%?a-(DhlPAr?WaAMLYi5QPP#9nss`aI=L1tVJ^y`vQ|FwO` zi{~KbgRF*HCzfD)IalY*^$=+~)*(N4>+-CH>K^lgntZZ##hIdtJ~$LVJM#j2PsW0C zp*d(NYW3|DKiBqfQ^>2HCl-cbE)wg!q3L@QVh|EKfsN{e7*x^G9r~rk(c(Gja;Vsv z%c*y;?Lc*U5}BSn<(?99+Yim3Ob0+jf#u8~mA_gQG0vzqLBAwub*A z%q2sYfwF`#muN|SQOg}kn-cmhfZ>X3dbNL_N&cYLd_h!osF;porNk{hcwnY#1Hu`= z1Z1jn#8S+4`3_^go5d@ko}xzWpmYetfmeYvMo;2wdUhb-&0){s{C-Eg2CiL&DRjQU zZ46)TR-wTIL0Z^0f3;%f1;Y^(D?qXCObo!p^Qu(-R$XU2LGmh3%&}K8 zL}8sL3)`9cN ze<)Q+P>`8zAX@dF~cdm@lq1l4!awU1QKV}+Zdd!)tD3#$B@uth( zl`0`6S8Z;IxNe??>BI$s8uhQa-iDCRCv29DnptJ*SsJKAd%k!swejTG$9GK*$_D2Dvz(*9cBDkm{?YL$9z7yKv5k14GCv^nVLLp&1@XgkBc z7Y@d1INbet)O`xqFStj-dzqkNk}n{_+usDgBNP=aogsTfo;Y{8i%Oe_o!Bhpk+n3jVwp**Gq``+sWt?r*rh zsQn00649duqa-81&JU^bVBrA2SfDUqPHNrDAA&hIywwJ zI*0R`W*Y*`gsYzrTU+werPb=Xm^5|;PfYtb)>dV=@KL?#8%J0(9ih7}!a-8sT!ky~gH6Ggh+4UTC`Zcj`9inTaRvbd)B6-RHz7}ym8V-#KtWrR z?(#NW0^^d_JK<7g<@`$CUCsQ;++9sLorH3JH0>iI9R(_H&P1(gpT^xEwrY`x;Q)aX z*vw8sb&j{pg);>zT?Vzab~{rlA$Rb5543uNVE^GeRkw_bobnJhk^Fa4BrLe}@^nJ( z*( z3IAEC{7hQ5yh`A^s1?TpYlY!8J)Y(7{$Bmlt3@RxEc;dv-vatHZQ;Q$wJJ}uGiqiV z)OGA=%7IkO%;hxY()pv4Cw=h@q@~^tjL}=)%)VBLr1`zi|Md7l0_~2Q?N=M`r@=^L zA_X-aJ=O=zJe-rWbWxMO3m}spxXXo(!-1bT)542$G{*j-bF2KjFNA*!2|+loiESU- zVWHr{s~bTaNz@zcbz!$>@O3^zOA@@f70hYPrV;xr%vP);#~Qd$L-HFiZuKNh^NZ3$ zF(IYWC)Mrxcz^4;S6GD0wnbawPP5gLu5gCz`BVcWB;jdJM1y6cAQ-%BSgkgE{=7qR z^9wy+?{o^=$}fbrrm&7)XCE^&%&R!snsqiiqk+cON=xsXu5|ps_AjGs!Z_QxjbA<{ zZiw9ZjRN+Y<_DBFmC)|1P0$4d6BO`|7SsZ2?I1}^tPLJ&mkDrdI~w%eIy}x_J~(TW zX%`2&di`{-m+PmK=w=Z6UR+svSE!ZIeR}->^<}6fSH&G@yI|7e1dtVoq1C zzz|3D-xc$vw!;s5CI!L$Vd%Qd?dx*v5bmZj3AX@k#GMKv+%tIumeVuv_x4i(0kZFl zqrh1e>2G|CDOFkGzby^BZeE2In{hYk@X#;rH$bOW-q5>Mn5C7-MdX21m66o zRM74`Usj}W-Ja&ir}@TPR>vQ0kWT>=&i}Nl_uRXz;8D5sTRQjW3NP#2b@QIZ{nxVlLzt9T6 z@2?eQpJV-siYszIM^KdiC^qABc>4zkdqrPega;gBw6-Lt)wNf5Y8Tn0Q_^Y=`5*+<4=3k?)F7qjG zdT}m+GljP)k6&JX%dEKl+jrjM1CDA#Y`3ph?C-)vw%|XH1NIVJeY_47P7Z@r=O?Aw z<=VR%YE)ITI}W6`&a~zavP_$o$Gv4X14RFLXSQs@)j5r9m|zQ<)-x#_)~<;SJBH`d z;$Ty8jHl6g?u0_x?NVO#MDRf&()hfvei8GtD=cYYALL*XFp6cVVZ5}65PrR#t>UUlf zd6#t;2ZSoT-qpirusGkX8M3lu6)-RZ!y<@E`JRT^3$re?{Tntt@Yq?MutlehVI!uJ zd~ucRNGOICr?+xF4ZrH%bHV&r6RUl5Gb+U1?F+tYh3<_iLXUV%FgNUS5`}(tE-wS- zZ7(#%Wm8i#x>y_>$^k{ufZ2v$lDY$xUx*16@;*Jz&En1O)@%3EQg1G$4dZ`KN>e>0Nh+m=(~QPGRD)|B#};Tgd{&MWk1&K=-YW=jd2acLENz{xCLB}C%SE|x~A}kn+1c)VripBq=#kY?riVkX$~dl zfCW~*MFRpq;u=1h)$phEE`$r!4t;786BiczL*?t--{#>xlwf+?y;EHyA@_KmJ$UFm z1#-h~dbN+k^ygMXvF#7KoEqP65+HbF_9SN~QojA5utxlP_;f6#mi5pY%?NREVMtCv-YP#3w|0$iSFXI1D{>Y7s3F=f*Utm%Dz%bm5W`S`);I2-Zv zk2&vXhRP&`5yjrm6*@g$l~g{@j;4L~j9EheA=wWHzOjNf3eUw0FEi*e`1ayvvo{EN z&ILvuZb7)WR!*9hyBz{9i&l5-pgY?s<~P2D*nQAVkH^`%YnP6z8Lc=elvfj6Vjc|LV-W1Rjlzy?d0pt_64D_R}-?xOPwurTWUu))CCh1x7=ME ze6%J0!nAve3%9?_hC7W~@qh8ATTq7540?U(8(+eMl0Iy@HEBInYqv_3)-AWC!uzax zYh}Ferk-oOjPdd0 zcF&sL)Av6_8qrIBCHW7Q8*uHaur@GA(i0U%;tR*EhLNz{*iAyO*4f&91So^Qx`0R4 zT99eycI(-y$j)HYx~u>0$W$=e7V{4yF2RQH7TzsE8~)+T?J|+@Fnz4iqVk%=GpVh9 zUrpzog#JARxZZ4xpK~@nkf<^J&+yyQ^l*CbvTsBJMc*aGBPZ4QtbesDhe~5) zwW}0cOxOz$)z=-W%7(#*;pG^Yp92xCFa6giTXDk(R}nXz>SgQr#spQxD2rQuda!rQMDHGB-IXNvJrlI^+X^skM+<`OXS z#F2>sqL0FE#QAouOGwn=5~mhAa69l5)9PhxY4mcidBD1zVQc2~U{WU*wiUd7fbSo> znY`@^Lz~xuj@p8{u&wAt#LpvFbI36*#KCi2*HsER zCVmnrNxhjd@zwG@i94s6)PGW{s`wzP{lq1fR#}$E?9-RB-^uT6ymhI}=YjV2>S}ov z*53S_qIBL*8D)rAr1gZKR9j7Sb8R8Ae3zV2zHUPwKw#7w0@@^f9pS%&g?R0FBQKL5 zpaaMAAve4(m`nk@K3#85!J3CCTQWhI$bJ^eAdF!x?r0)GHTV;FLj;{C!6wUCYgF@F zA0`s1?pTrXR@2yuL$O~}OjngGSL|IK(}W(+nm=p74At4jw?}7I{SW4100p$lUHi75 z7)5m(nKg8zxpIq`$`l+$e+SsM{ex|4O(HM|s2{f06<3$hfjnBrH|uVgTn4uWId89m zmX?E$=TS&A%)i2y0sD#&Z_hJbO17FL!CZX&z_n=;PA;H9v-Y2}s~8*5utR@V6I#se z^2lzjEWj*(#0xJRa+Dey(*2aK2teCs*^04D}F(E zNb~EtgWg+^hzGjPtbM&3>498k8!H9*3_6?ZyDvlQvFC3NVQuK$Ep$#&(0Rf#;<~n` z0~C0B%n-bNJvSAYc6N;I>Ugt+M`6WNxqsni953pNre)P16n%PR{3D>jPkY_)6P%7{ z2JLjIQCyLUP@?|E>t3V%Xbc9|Hagb=|MAvgnHTK5NDI0J<0c2eY?q*OwBJ?~0zV7V zc9`$IJpTo|opcL(_euofEQg&21=wHwYSR)Faj}?_q;CLJ7$gLQ7MuO> z3k7rsryD8J(x~gm&UWo&Kdt>yq`n@2^|@!@SXPHj;n8&2YWMw*TLn!z+&3l`k=R~$uPc({#X?LE>Tu`v zatR->Zt+9nenS3fa+@&`$EOLY{Ndx!oY7y(D(e3K+D#Q%t99M^ znaizqjbYu3Cy5@JC2%{Ow=3pmGZBMJ+uQ0sK_mQe9ktj0OdVG)^)lk%d-Ilf|-Sfml6IA*po5^otkJ!j2%hJW#06UmPhZct^ppZ&xt zEtg0me?Q`jbx_B5&$GzHc_&1gqIPG%$lLKj8TQMC2rv@6-sQH#V+uZA$7H@Rai8jF zJx6`;JR;d^G0?;-!}gr{`u0doCOcc(-a}V)_A7%ws-WZYy5LnsklX339SUJy=XCD* zb3ds>c=MrY-RssL7UXr$9cy+)kw8@F4Hw{vft#L+18z~g=pW}X#$zC3wg zx-woec0SaUS--4NW}t+igEj_)ep(w-w}0tq^;3T^77*+^qovT9qc^_Kq-M}=C7rH( zuqa*cx$=Tx>&!p6Bd^06e8Ixz=XcWHiiLM>nPciEHbjDN%Gp>To91=hwQXA+@bhL4 z7qIfF>&-)KAN$hDc?bGve5-qPyCDe|s-gD(Ix7yLX*QQ{*V5{jYxTGth?pJtb^B>2fg&?VOx3ld{3 z=dopX+Q} zo^rnHLTE5@x24X=eR)3I4A=Hxa~-+AG=z&DlDouS4THdY*QctPHS@rLEfzb^qI{AD zTbuM+E9tr9w?kD5kE>@JBpftBDq@i_UalHsdYR&DQ?bt%MNGi(%mV zUdKy#ls<=6&X4&ziLW42+4*Ee`v^O0V)hpHW!qSMUj`gzpl=@{PZe=tGTpcAJV?l* zv$-!mbUG6{9lsfPgH10=Sl>F#|9@hB+yfWX2w2AuIHQHirK@Q58r!cx7U=}yS|sE`ZJjQP{RPol%_MQ z^$}%lv4yXKOc+@Y#K1BuL|w$-#r+1efP&0?eDz>ftH|u@iLoFJR!RSa zFP4)V2LZeNaoqAR^u6@O6<(Om6sWsXeK&gl__c@A;`>!%^*De$HQlz+S5BT=e(e2Q zQmJ8e?a$|5W$zn)jUbUvaWtxO8vCQE1&4EAtT+ZlZA~tLQ;KEyZ0^+fS?!8u}LuV+OXKb z;&VZd=DwfXqP=JPuWUa5!yd}rd>*WwCjMr{?WJRvJf~#sVy6UjZmPEiagwSM$iJH% zZ<^51>+(j&x}av)+Whb7=-2;F+VWjHaw#0ebV}(r?)|%$5}z-{K|~K+Ah?S{4MhS|J3XcL?v24_n|rK*r?Wka5-VSos=KD%|5u z3Mt7_a^f$&n-m0hiAEW@LK}x^LPO$(vL1!q{SbXef1%hi?8U*?fD2=TgHEZX;8Spz zAg%*i5`0av1h!QOtM+6RSQB<j~o+WHBib@3@^GCGU*z!n`MKmbcMOm9J$MzT&8nmxAAUySHzyH~wnw|D!a+N=U2(?SLx%F^ldvtaX98 zlQl0}ut^Ip0(vU>kT9_@AYMIQ*TgK74~GuM-XSCgmJhfp z5fi@38ekOgIs=YM7^r|(wR8gGap$gs=6SOHt5XFoE5WNWC;hF^>`uSuf_0$O zz^0Vr_<#3C1b!+q@>ys973Ux#X8CsK^Kwo62!M)d5w2B3=J~3Ij0(_GlbuBCi&uBl zlLu-wOhOjP!ojyuAB|d*2P<2sQ@#2 z;*O<6+Wd6aDgl|jqk|GaF@b}B!(PYvLQco0=L$hG@%?tqa7gO;(qL7GQDq_ZZsjbk z=t2#tR1X0$A&n!@q9*=hdMm54kzPZCDr`Pui2-QV#;BlRP{Xo<0kBPnJzw*E4$7&cX@-g z!Guagc2IWq{fUr}T9UQ`d0(HmKI=AAIs#9eu)F+z5a>N{#6r12X?{C;bk@qYy-AWw zy~H+~HiXy`^c~x|ZIgm)`oDi7pzcL3$>|Y>;2`LYC1`vaf5(zZ$nmMiJ#K&}!$;Id zl&xI)Agh&D$Q=S!3%T^m^l`FA`r-d{CF>EBkgYO|lxj^0MqgHDr@2nu)snAbccJeJ zEqH{RA9yol#}58@SXu||JG>#;#|}mC#!gK(j^(aEt|&JRf^Td@!03kOr(bdx=a;W0 zNcK<`z)faOS^`c*RK3XkP!T}AJTa4XO?+N5ts^mcmM*YxGNfls#52TDxKKM!XpL(W zj|~@n=K-}vgRS;6&(Sq5Vy9@L`@opQ92LTVgioTJj@CvGeJJ;)H*gsE9uT2#KE$6DAvtg%E@RTaB3T zC#GX04+ucvB^)OfI?zQ#?@PqP?>=gA=2iLR&PNWC&>jL(s_Z*TPc5HlCX>0z(4ao@eWp|*gJJ?v~TSnZJc``edO|N8XUE4P1$^8*|X;Vg#X6uTxr}V-(xh*~8?zn3Z$B`2O@B$npjvo+U%2Nmdr1kXO%}ydE z?6ZkaHl*emj{&5m#5-}as3#jyCks82Qt^ua#5e;4&cQmFwKtgyFbaPTdAc&u2`w#k ziBo2Ns$lcrwAIoe6=dOSq?q#2ht09fRma)*4l;IZ|84itpY_IE61xv;ukD3yRFLD{ ze@V6Qz+j9Xs@zm6^Gg)I(RIJ$y-CZR*;rId^&dBW1m#h<>}@y0@8d1K!kc1)4j z^Qy#sb=LGY@oa*MK(AIJHf}DlKJG!BQ{qwmeocv}omuU|^JCaxu)icKcm28Vm|D&Q zV=i_hDakYl^;+aE=8Ty17G`U z@KA%4w#M+3SZ3RFRn(pQA}7EDrI;|)2^aF?kTI)^WEryeIx0Wgq(u*$qvF5oD0a^F z%Vc!F*136cdHm5Y;IWhBG+Scso@@Sx$0*Rki`sloa1a>ykHs5wYYPcHt3EhgYU!}Q zFA=TDOMAM&G#ru=(8rY4%XE(jkh@PoPW|dWcUVF`0hcx-RrCiU;^h$eyD{*tB?FR03LwD8Ybk;ka#F;V}?5_U# z!R)LpCXHFL==5&P*?+8i(Bws{i1I#{DdUwR)S-iNu)n1AP!?n8PBDBOpN&Eb$w)5? zmVY?70W4j4Z*|8^FGdw4I2?O_1Wk*dVCWnB-CJL31bM!E3#v|CdVc&vTQpqyUt%)H zg&sdCa6z6J$jkL{AO?Sj3cUin58vs*JG+3l_o{>*nNk21-ba{PKheDVIifWq|7rg3 zUzID{#A*5Ky`L~!m5E!+gAeAt2 zuMVSP916CRXou>YJx@ka$D6BdgVSI!uQ1U?S#-dKqiCb6qx)^;hLu_CL=C5ex3UPS zSPDG#6Iq6yi&~SKEGJid{>~g1;7$W)&!*BO6VW>WS?;4%s#g}4aheu1*|Fmx19C z4}B`Cj;xZqK=`rutkuc9T@yumeu;?-^M(R&V)5n6lPEz#sbBgP%x8#+0QW%2_YzR5 z+hZ%wzEW6oi%Qv^^f&VO#}{p??-VJ5;< zvQR@|mDGDJ^#n0$eMUYiX(P6}lta#w>Rpai(>}B;9U_b$=hJ+z^`O55mbY@RZW6j3 z?ZQTE^&k4W%e<_iiF>}L;)IZ}uAdXivN6f-l1)*Gbn5x7OE>76#v6;IfnHbrbbYZ) zWPD#Rfn_^M$yZjmOD<%DIPAdmy3EQS-fqe$(Tw>xdGPXLY#n~q=HuB8eKHG^Zb@k8bf2^TMH?z^IiCh>zAP2 zr!rf*Vk_^4{%W43#(7beE8k6^I6(bWOvq_&Gd`f=UC&RT1o;`08rQIZ z(r3x=P7*S;?{DwkJC%3bTbq9tyEZSvM>>RTCQSLN<4|ihIdPPr*7% zG`ZYZ-1gT!4MN6*2^uw#Z5|<}m|Bs>C-G5lIrQ+p^EWajN(quO_3dfZXa9qbiib%3 ezkjFxvJ*ti&F`DFhI-z?KkCZbO0};nKL0-jk7&36 literal 0 HcmV?d00001 diff --git a/docs/assets/images/hf-transformers.png b/docs/assets/images/hf-transformers.png new file mode 100755 index 0000000000000000000000000000000000000000..70d7c48942cb60b2dc8ae2e3b06d92efed2f6538 GIT binary patch literal 7258 zcmb_h^;Z;Lutz$+w2CxK3WzA(xpYb@se*Jj(#Z*5+RAImV`0@LlH7j6d*q4SR6lrPVUhLzZ+ilB{bh}XMUPYi${YAu9On~c z8UCGzX(2l?E5afYdd+pufO0@m2d8eTXG&H_9Gr;vR`DR^I_#l|%#{EYdBW%wO8aK# zb|mJ0XT>u=;Nr-GpMx2m|9~2Xv}1;|vjqhonnzm$!o|eIZpV5lm%9N3Yq2M5NjPpm zU$=oLVqi(JEi48EBZCK4!X6o!IF^bRK@3AETU&%8*`NQ<08iuSPbuHU|>kVPvAk&U;NIP+i2$yYI_g*UBZNr@1br}_fjtE+o3sZ zoWKl%^+2PKA`G+sPPILVj&4dHfBCyI(siym`Z+bJq?i~3Wda#udaHU_w>0wDae$6# z`cYR;sk*<8nL_sAkn=g~w-0GZ)ae7G_p;Nh%^QNO^}62f_p+f%d`3K}YNV5nhP%7h-Q28! zFPm0*_8cb{V7@DUajq%Qtb4q%D(AKJXio#txg9UL4|_;y5^oaR|KVTqa0yJOe0tyG=CB!HszN{9d1Njs9$QH)_s z%n3YuoK-I4&n*6a-Utk7FQr8Pdb%({m0g>2m3@R&9ux>*D9Lb@ZQ>_>uuv5d(AYI`@&W$o%4`WmPBVHGeXf+-Km?8 ziriHr0qo!LuZQGp#5GvZ*~-TBD|!#$3cd4rJPz07_NVNf{n~Bdfqr3%v4y|V#;IR@ zQ}5}wURDVkFF%7iV82h&+(x}n%B(~AB$Q2KXnQRs61u{$erV)i{_H71PDSPHK3i2# zl=3fVmwSVH&h?gRPCtU>I@`8@`{5n~P37C+bZJ?~znX)|WaOmk1a44I)PK>~A1{hX z%gm%lEw5jy%m5Qrd{w<>x$*qIRnvBEh_R%!?2z)q4rWxz%WxwbI;c}~|ZR<{stl>nFn8O*Lxot760vBuN7Q;&`1(S95%%nA1^5OY#QY9 ztIh7zw<$D@z7oWh?KCXN^K$6&;vG(f^FOEOb9Ho7O~7hFU9;LRi&$#Q@L5WD52hCB zc%}YwxD={C45}+7`pv|o&*QyV#-)-fO;}qLe>8Hl7D28qbfCR?^(6TQy`L&#JSK&T zJ6&YFZ%<-GY3B1^RCzs6f@K zS^MQ}jy%@OTY9Z~OW2AY1Hl{I{$~S>%hnGY9&CgiZDn?{3_XB1#*nk@UhQUibONJf zne$2v)7M{WVB})6j!}&TpKHfyGU^ymeL6KeI|RKy=cg6- zD3Q8DPoB*}@277WWCB~@_&@j}uv%kD#%IsAhqM~nt6!g5rf$==RH+549Nn55p@jONkW)r~U;otoj{;n{n7FFH48*PY%j(Qg$p zi@C`?e{E)Wz1Est{MM>Js})ngK{!k_cJf6=#C64KVRyH(rpuvU#(sjGdLNeZL%LsK z`TTJ9=T6%7u;3V+j?bh)l{GoGbti0>Uf3ZG&hjP`Vl(2<5Umw<0ZU+XoJp0j2)Xn2 zM~#T)n(6<^$`0g>$JPc0w*7kO`$Z3o_cPzEk`INrL8Jz29BkX)*48XawMv9{2mZxmCh4Ic(nWK!B<&h~XeOuEpe{BUFFwW0q>>+Xc6 zZ{*Y5u3x`YyZ%M-mz-k|iNOY4kGdQB6NVFy^&d9y<*Qd|W5E{?8>iv*SW32%s;c3* zOrI>M?EPtCz0C-n_->x3rfeMOG==K#vcz%2V#Be`@Cs2hMrIk<1U5un{pP|E=V>rJ<@=3#e@rkh9?D4uopiwn?AGqm#<7jH+EP z2;~e>=O{CgZIkM>sQ8tU_)1@|h9^_JhGyuE@S>;=KM8tQOIQ2mzjz0>WVKzj-ge&9 z;z6=`s{E;m=EvY+uzY)atNhMRL?-9C+~Buw-x{LqE93|p+T-UTwz+udgIf6fcnQ&Q zz_9`yWxO@c#IoOJd^D~+W-dz0^WhHjbD%5e=0e#>N7o;XP|@H0Z_X)+JN4eA*~M8X z#ge`#8}P%p#(A-6$2Hf0f+?lSw0X2D3M1Z=<^OP&3C+76eqBYNfZEA*b$MD`r-5AQ z#1Z5hChD0d)5K4IbVh_-zyqWxo)Uj*-JfDy_OhFN;^b}x)5SGerX_gJ9DKRXS@`$$ zFu@c3u?Z*Q50srQL%_{K6;7|TEUGlmgJ1;(A5E_@uI5HR-O~eI=5UnYuL4S*iv8Yc zM#J$mwdIR!_`5GLPM`#MtW6nHZWQQTs1*ZgXqd8dm-|Dmb=VkB@v)aRoAQNf2$C*} z^rPcnanGF>Bgf{pRtT<~^O96;f#bkK(^Us{RhPE?G9khH##xJ0WH6?^{183DX`^rE z{`Xn*i>9WslNR^Q;ZCO<$f(4Soj6pf$M!wF6wMCjYhvr&)>fO@?N`_)d2I#C7sR+1 zBwVtHY+j^zXRGqS=WkX`pRfG9J1?^NcgW?>&uc=Yy_xa&bOHh|ika^QhB?NZ6XPZ9 zOcoW+3XrLVx7HO*j&Y&qGt({+=QeR7wF6S%N!c-*#Q3vH%R6-+;%bg91oHwFmku`(ih6u#?Vlnel{0@SA3x2KY$n-q_T#1Wa!fd)kSvgwo}_vH zr*D*sF57nHld&!9+uB!d>_7}Lea@b=%tC3Vg~P3!Nbg`e@1zJ1J(leBZTZ|)-_;-+PnNj7MOVl>%vi`hBvPTs zAn-{@i{osy{oNaj0;}$Dyz)~wF(T)!=^`y$yd2WwSjisFsDD1DG!}kEptXR1)cFdg zdA1*`;nBD2pbiZ@2l&y?f16<`xz5-uFLx>4^77>Sf-Rrxclv45Obrux2BLi^;-4UA5y<6r!v* zpG+mQh~-Z{c&{x%6w2@@xX42xj_Z?3GhxMb2sBpOlXzq39AlQsU5&#P-dX!89R0&B2)qOhoMc&FX{9G+CIG>xp}qljC>k z@Q?Mg(Lb1~snzjGLN66P!$yxZG2XrGQuVn&PG1+J;-Q?AGb3*h8kx&~Wsce!4Z{=g zRjoVvpzA^gTgyb|>2q2Y@@9l((x|ulm}}+ddk)3P5jvp!01`HpG;iQfDR3TWsMxNK zbcEB&>fdgDGEO9(Q5yO+7o6tL!aYrP#!-f3y#Rgp|dogEapX_>K73{3_ zy^xgK=_9b%j5&WRCk?`M zolF*C1lbh%Pjeana^`3rRC3D^OS*kY9n|sC^D7aJc(f*6()+M@7H~1;U8})*BJButs1vtVLSJ^rQ|0?)NAfJH+eJFetTUct z64Y<2sq&3qy(JU}==4ydfEk$sQo^%0=vPVlrrT-{)yrG7!eP~e{295#8e5=xkNwrs z+&qu3U1mXQVO6<$oTNkQ91^8+QEJ9)KrqP^bl0&g;|qAoMJOb@C;kgM~;gc zT&(m~vGY*s=dM1%lZ|;G*PH$)Paki-|C7ZL{}QAfynx(bAOS;~fGAq}9WUKaP>MYE zg7x!luCJJrW%8?@r-y2{p?4ou<|Bv?w-Ue44Sd@pT_341uMu|rE#EdU6?i_2qAtP2 zx~i7bg_zNln;Wo>$>_2hXFP>po=AUjgv!*{j(wadx2CI*81J%$a0FdBXakK3>Y{5b8j_Fuv5gw$D4TZ_@a{DAlIq`cz+nCZDbXr_AOux^I<{L!?vp zg70;z7Qewj|KZocYg7RI8WJ$n1_^*i-*;LMjo1#JBtN5?76I?jf0>fd?WTM;#3IFs z(DBnN01;$CPAc}bRwbeb`hxYE?W@RMq4NouJ|ZuEf!9{=M)yx$v@vfjX& zLQ8+xb+n?{*uXHh#q>Y3-2RI^PYf%0iU1-eGnbPGDvCXB-ZUK6<)`zc?^JvuJ-0Nt)LLoIeKx6|$SS^9e}odkZYv&8)d4OlK_hi@kDg${@2Kex`mgZYVH zM&iL3JnECH2&~Xa+z7y1;)piFXMt`L4fl7T+WuS2f|W%lj;8!T?B01mMg_jK4k$1h zQ6;P~NX^+!B`5(IxAPA7Tt77@eK+D*I!PRFl=y2*YqT>1i!tW&iuyhKo(;B(35~P% zvRcQ>RQ9f>zq@%|y$>pOK)aAnpuRX3-PB;~2l!>K3+Z(q#B@ z%0m5}Cvi77a*arg4){}6B*ix#yIf3CE0Q_BSDAy^$fSgc2%E=SY%%tSnUHBQX*=ttg||sDnF?2(+kN_ z#lR>W#TJq(J{N3mF-Z}Hd|E*pc2oPfxY!j66aHjcoGEJ;TOmi*wt`D+6(M&opX8j` zipKOQ!ce+r_;nnmL?hobi~ws&Ta(1^yr<{Z5hgl!u59dRPt6d;>(T|-Efw`q@7Gv9 zV?cz@?3e=EV&&RC_j!A6qR6%Tqe)eNUP6zs3mKv5g-$Nm?|j%-Lcqs%S9@7O7f;BD z5#A?&mcDZ7073Wo5o=SHY%0coXPUZ^g|_FmOqaP01+s-f9aM}-;gC)Krwx2}3&;8t z#Cc2&$?=J!^v(>w8yA7{A1ffEgzpx^Kg(R5;4Q_JB~d=YB3f@($!RK_e&T9eA_DmY z7evYyW^PlXTmMG?;{06Q542^d`D_Rl zj}#g*^o@Mn2=vDS0QheAi<~bJ<`!?yaiH|vC)1lu2{{Wt$b#nIo&4y*2ew~Z`g5!v z`Kr8Oi-=9g)=nY@Oqhj~bPs%4To}cnU*eRYZ)^S4&Q}XBPMvIif)M}tF>$xL{X`wY ziZwRsZcsyJ*|h#pEq6iimS$$WlE%QV5`k>O!Y$%^N%qVerYA=<7fHs`^0cGFy0H2` z1!Z8(hx^)VSU!h@^jJs1wEED(6JnVv@kBELJ9iJ6qF`a#=RWYPIKEhP)hUe#$C`)pp_oLmv001LPumHV*A+i0V2&4O z8;Fv}fdv`eiMs>5g;q@9tMCcKB<$6!OvYa^q4tBjR;lj@y8kFEp#(&R?ZYGN3V*PW zSt$?-=)}deZZsI{2eX8-WYu$Ua44%HXp$Xpv36e3wQ&~>b}~@zWY29$NvcRk{E=Ak z-_5r%o8tJrFFVjDhA8P6?iH*J#-+a-8@%?Eb$%D433?cK8`?4a%rn|FdjDvILt%F% z0O_o!p?FX7y5$dAek%|1T)t-%f1WU*r6A~Ho--CMHNku*P{=T`uEY*qBUZ2hT3$6E z3X;n~ge`y)Dkf*gK(|{TQ~gC~B@3YrkE-1c0=X!i}X0+P|Yp6t+m)2~`fCvpChTqz4WjU*8tM5sD&$^fVjwA4SA-90e!u z1%B^_jf{x7QByEn9(4SfSc2g6AD_#M8w8P~ValA+Q92;+BzWh%RBDKmytg2&Y~lQ! zhmPl+9&=cNVK)k(30N#TftQifFR7T}0eTkJjjSaKnFIuvXUPpw?FOLvmCLoa!jthA z7DhJVZvAoOtlUa!&XM6MIlV$V=SJ9W2QjD*2bJImVr(ToTtb&ucpRNqA75FU)n{*h$@t0 zEc5>paO?wkgf2$9X*pVHamoVH9ngO<<@@(5aDhh{zkfip7&SxpkQ`Ox+aIDld7!hI zc{AE7%9FKU0#o+Ve2>%bg-m_}L+q7||C&v!{lR~GF8-IZ(MIMGK~5D@npJklQ}%Te zV;5lkD%GT-wHq+Swe#m&4ia~aMXDs~$4Jr??y^{o1zfbH!e}-?T*8rT;9vH=P#F{> zprK1|7x;}hJ@)#l=&eH&xpS$)29Nz5&F!5P85tQ8zy^-+8<8nDm3+@iJ(fIPE0~5p zW;a`&$DLEH{FIU_mk+Sp@QswW20N^*n?S#%Wo4+}<$sQhPBE{Ef;mdwK6u^wo)|yr z0nmqgHH0ziydeWn)0N#N0H_|;K847aL?~7RtiS=WF`^pBi~PBw13hep?T=|Sx;U-Y zN|J6uPvKgU!08eLOa5T2wp`{=zAB^PAo^eC!nvVdvvb+haaQThC7(&n_egOINfY5f z;bcE&t2|=td*iky=X*v7@wb0lCYtn$#r}td#5fO?u137z48{Pz^pT<1OKiq@^ zX_k1B?K5A>2k_4G8f+S=S_d*eLX8SF$)!i}z4ZZGrdjKmtzyA)o_-geD4!gepi-LV^fMkorBLDC3lK&ij79)T3JNWTOB@wMCG z6w~ z7xQl}{`#cfWJiRfiO$#S60h97f6FB7eJz9VS}Z5KMq}{4!aYf!15G>D{h)li;fmQ~ z3m=`^+QAJS+PqrvsKMGg+DmRlkKfD-YQ;oGh+nS{x^RwkI)8-xKq&b5Q}pOuFcS%= z^KA+X_>=zCqLuw${L`PKKOQxlxo>bva(UFpeEfh+s~lZB$kb*>-^y1y0-ye~I^auV& zH^WP5bLFLr17F^t3=X>Vr|e>k!t(!KPTljz%Xe$Kdgr@QjiSvyz7{LJxD1F)bI>8Jxf5CUJkzsL_0ZA^?^ZA*xu-^?3 z9zJ9wEm0nB^xpZ-DgNEr=1jSp`w{>t&0&ZE{XEvVee)WmFMVpxdq0SfG?!Son*s51oxmCDnDTRMv+`I>wM<{ z<8?^D+3GX$k5BphmZq)sF$wwM>CaBJ_k&yM`yw-bl{_ou$?I9Mj>jhfqbWk3;uQz- zuh+nr{T5{PNv-s6CAo1!!zKC`B9-I~{0wSr|;#^eU&c16y3{Ic6rF@V? z{PyHj%x{|{Vx?0i&PbtXmbo1_wYRSE`(z1QFUVXrwUD{n8gH-QWOn3mZPBw*SJ&&> zTl456B{2%Jbv3oW^hJjc#VUpuQRa3h>@l4rS=nef>A(hrg$rImeX?VZglqRlzge

auLiRmMVL>rw{~&*Tg*jSt2`>tiMhp&5i;<7*_|rp>bb zk3jq_so84w@x+)K$V6UlEqAs{qQ&N2vubv!>V8_(MdU%4{ATegnLC#s?rOUSm;fK? z7WQ~T_*~rHHQQF0Nkkj#hL7D^2ZT+Jrg)#ojE{UKK3?o2NgK>-ZX6#x<@urJlyoZI z*v5n4d`F!eK)PnUzRY5kwfCn*NZ#{1k!gQO^^j0L(yD-->sEzj33cvRyx-ru z&(PRLUEV-fYXM7s11|b05?d4#>^-_JL-JYa&Ua#uIu7qSCY`@D_2Z!!kS~T`Y*skJ{W0XHea{STUQqf5 zRDIM@OU5G;H5I4Z9XmkaOCrh%&{&1Kz%5ESb?cWVb_02q*Vuke2m{}S!Umc%OQ$P; zSo3;nya739u+e|5?uNstgLGp4Q#_o3>>muUU$ zE^rB^eh>9)VOtcw0YY32qqS`?$Z08w>n>MwyXIGemUf-yivh)d!Mws`M5ppvg! zd+KVK*kTBd46njk`!F*ow+OX!BNEx()q{fBNEosEzX!dr{adB z%r>}Qtv%IcBz@CRUUi$Mn=CRolF-@+q^CtY-FiH~8Uu2aFmATDyN|Da#9AZi{GXFw z@<7A|8QbvUw6$iickbSfzqvPdM~(|NZ8NS=g4F6DzPJpXOZUV}Jx30G(-ff=3jfj5 zFj4wh;hkO96}*&&ZJo0P$HK=qnxALh~VLIwaSbKiIes@0e z2GWr-4zP!8T@s+16%|b$4?SEWRLJE+uHjwg&W%Rn8AdbZ)hhSeV=rq)^^TiV2gYj1 zCZykazhC)|Q&c%lQj)3XyYaHG*^-Gu`gv+Fi04y=*kC6#TpPOM>^}C8P-0rnN^fee zH6aprp_JH6Y1(8huzhonWMPc($lHxj;0nE?7>~9-G-z1%QC>#}Zdd80$<;F=c0iav zEe8~KWs5G{M0#Oeclqd{ZQs5=v<;G|F8IN!a`2;abZGbSp+V%9XNE<#cUIxZL_xQQV~%_pzZ1&qiAh6c6z|)MJSA*?NFZ&diT)yIsM83q}RGa(3So!N-+7SOYx>dts zed$U^;&y9(Ri?E#G<>(&l06GfR{k2gy*~Pp8aVK8_aNPOehMUf9WgeAGn`b7Rx;L` zE&p|0zs}+$lh+lOEcq33+-DwKR?5##Cu=u}uZ3LB--b_diavNq(QFZyk8N%wegGF? z-Ej<*oHiH%7C4MvyrJLnA}L-`Gfu94#T?%oe>gcm)#hwt3C_MD2+}{YI8)kA`v~!) z>7V5^S$U#w@1mR_k2`MaA1|s$sHT{0KHB;DkrIbo&$NGO{sOv|6eN0!6*JtLF+1 z1&Q9XdmX4_cNAY2c6{2^BWuN@-%=QuvSlfXJ zY>cAFJD<@I{P0wExFqSP8{1ucqnATF;w7gZ#r5EvD?8nqZYCK22VAJu%1mKU$J}a!zgf&G zp_L5Q{z^E^5UgnE;nNNLLtRO+ilWH7EN~G$0i-x^s)E%ho@=HCuAXYR%@(HOvV=Qly|{+k>MjL<)&Ryum0v&n6|W?Jgth6$0u0Va2gRmM(*E5(tKSk)QAc21);E9{sl& zc(`)!`0P^H+W#A^+<(;?)W2B6(mMaJG|^?5RUw>u+A=^yiRYIX_!1z2xG}+EGA0`3 z@{=u}Q+w2>gy!$jTEN_5_-*1}Pf;FD}1*U#puD zO1UL+0Cr_yL1cL&XK!p9`LAqtX^uY?@}I-vk1TUJ|9`@kOX&Rc<|?>-Qs`goebwd5 z;-2^!oTn(^gFv=fF0<@^Q}kH^EAw*+M)u8{zW|y|+^&b{1L0J3=%M>K6swe#qsdQBl?vq0Q1Ci)da=EQEV2LcW zS0dLh4DfI9xO8)st$`9*)ajCB{+2$N9IR(3kNmk(AwuH+s6hLLW4Q#@m<^XA9`){2 z{Gre8g9{cUmR9yh&-TTNpTCZfyEIQ*TwVWfD*zFd3;zv3{;UeUGV@`=(#y@0Pi{t@J_|0cls zKProg@clZ=;(V(p zs+t$XE^)_YOVNCPd$WKEx2m49^)g(UQlw82eejlp!OND*3M!vjcH4JjmwAm>wKD6Q z<#3?Fb7g}8%Te^_VKjm!~3Lth^!;ZpItJr)ghq zkP99v67A=w&#&nGaiafty@9Xq7wT>8mRLq}x<~|%eU}0K+)U)+?3DldCP+SiG60B_ z|E;9{uz{D0Is#pijG}4x3KYPYL9an_1qYz84uKUV5KD(UaIO0OQ@pWxUJa`ELxQL2 zY(T_4GKEI)r*Z5h3^UDBIn?EvV5!B@E8pw>Eay~UgBm3G(88x#O zUJgFj&zcP|jvl>j5R%Y9kCK=ZV+ECT=V3+Vl>M2Dxa8y!9fS6XObr~~IC{a*;H+F$ zeOe&UcjLr}%E9O7Ljy!(Pm8d7nWG518b0tNt>g!oeEyjkHL);CP;HqND`;{8-X%(m z&+ZTbzzf92iFg^hsl}aV@81qDf!rfpWMyTbjpfqChZJ&Zp9CY6S?J^k_nW7Ola+i$ zvm`CrjL04z4D6@rosCZNQCt6&uzPWPjiUNNnZsC+P)u$E4;<~UpAq=V z(E-JE-MCZ~GDdoQ!f;_CJUZx&LiicS;L~8S#$>#_HCTPFeA5wvZ_R|wy4}=hoFS%o z+Xw=-`5bffKFIPlCJr(cZ!97+2r>hAaAFB{hi2rA5&U0$tNkBeqLwVNRB z$_{AAL;Y6NWp4TmB9(SRtPCmu9+MyaN=tL&&Vw@kF8tll8hmXW#%xpBu$D%|n{}6i zUtL%`S~IcrEItE(bWM={;Ss+`3RfR!TWCnUQpj=PCrGmmh26M%D*zD^f=*2|_WAm; zfqTgs!Rvqx;B?5#+yKT`rN@@Ae%f!ZoTjT8E$;)%JyE4uEmTC3f^+vbK9mqG41ChP zwRmgU=_Mk1kRVMDpW65j+=wePqAzHFPP1vFarQe&S9QKIw@bm;%^w3qU8M+5IJJCT$Z=&g7GZ>{kl#s$+4!+d;mHSwC zD~p&BGF}8(l1GfRH|PRRd?z+Cv>R_6jqI6HTj$()IKdu{cU@)(62? zCcW}@1}JE5C-bY)kr;tb1~3yV{luvA-gMfteX!@zijiBfY+sCK2V1uH(qQCy@Xc4@ zky?)p^gP#4!Bd5^_%EjPduX7HW=p>n&R<`PWn<&KTl%(Zg7{vgOHk%iH%F0nSaW7k z{r9FeuI*fdx0;;wyQ-brS|DkN3M3RZ`*!JV1MP3-VcR#a!iUusPu1L8s}`{Uu5nFT z{ND@;(VBQ+t|5~Vz)t?tC5yDyllf-d*@F_YdY2SGp8Fp1D(Vm03LJU2P3|UceECsu z-Rgg{cSvgBVC10`uv&Lk?yqJ07whhRV9sg`VDPMbzV@>D9VkJrdX*6j!b|<4#xSD7 zutD-MLUir#R^q0#dJ!UcEP3F5#w$?IOUAIqo&D#C1T8>hZ;#1KP=urgyzhUa&>#U{ z^Vsh%3~Pm3nYP=-%K3?_CY)%MY~%v zo-elM&vWA{fmHG8#{WtHe?<6SYGIqfG~L--ebEg5;-O{SLRyafj%q-xb`=-?1Ec~G zn}Je$mz^lysyAVA@ONwhBKM*G%pxs`l>IPS3`Ahe?Edc{?)58-viwZH4efo~8QK4T z0MJT*Zoqks7iJW51*NnU1%4sG-*fes!vDPk_)1&#ciiv=;Jz@SFL?dzK@HmEi`U6+ z85ghrjzfk+7*!KJazS5U@;pcp?|%Z1q9G4uR@1h5;63q&-_0QVZR}q-v=sY=#wc8F z{x{UR#(gC6Q16A`&$~>B-47NWf%djVUi>%s`n`#-ARKgGi_0hmBiYYB2E^H8!RVq* z{xyHBP+~vn&+rOjaZxfYPyc3uD=1$xJnZjQ@6n;e9n!-HLC^7p2Ej>1>|S;{u_SSc zoBwJdpN0LWyIj49zw!bgH;H0{ew(HE_Hy%|I91T4KNjeHMyr+r?5lpCzX(XadetuF zS=sjOe*sDn0k*%?FxUzytrP8gDdzZM)4IZ9K@6A^xIg{BEt)Tk?+f<&e;r+mN%7P2 zOaIS!z!mfO9O8UQFP}{Czi`9n9YQ`1)fBfHbp2lMEpw#c#q98Rn*GyI;}1amJ$-x$ zLH;zS@X+AqScTpXTNXObKM9x}TX@$eGye?VH~uQHwpD%r_X-)Xuu<6W;DNnA7rOAc zqIyn7eHdzhn!(F(LWU-y$HtC>%S+3L^2Tvw z-F-t(zj-VG#Yp>g+sNa&=}-CWX$6+_=s2l)dcSg3^;jUYLbgtn@Vfh!UvR#lk=i%a z>D5e&58g3wvV={t%cL}&;Mrw`A~G3e#4-yJv521&k+f(vHf(8=V^L=%I%%cDuepF{ zNuum+iK0oo0e|1SI~|+|OpsPQ=jRbmHjMeGMrBb-V9M7ZiP(1>xjjanqJ@&20GMk6 z{pJX`o8!Xnn}a}uK{fjg;r0CMKrkU{(ftJsZWl=$l{fcK!H4~Qz#i*q-lwAQPP5$c^qu>suJ*F0UGLLj!s`oM0%lbA;agkRK&j!t=t{<4OpW&I_ zY81RRb5Dxth}b)m9v@Mb<7T5JBx-}Z%KN-4^4qz>DP~#?S)#reW0LkVtv04;bON_$ zThsBV?aD{L^=_Fz*tb}N(cU(;Nw7@c?%7mJufl?W`{}`n3TSl0H{Q9u`SB0;MTjs;qcQG$u>uh29)6o-Vl0?*rV0Ga;BV8Q{x^|i*{B>K(3r2#{OVd=>pD` zc|)6s^rU>q!p&3~y#h0Xei9D`%jz%*h}3?Sth4Dx>wP=!evb?HiJ6_->%-04 z>bz)_rc;KPoN2jCD?ldG%@?H!l>>@Q_U-Uw)L&4p1?U{(kKS_inHx3dD;Z2mdJ(B5 zrfB1Jm{e(LNi!vqa87y%YWWVghjl}%v3!TTK_-qyioAf|SC5AA}Us~&B7?06}3Oio~&ct?*kDA+5b%bw++Ysa8BQ5|vk4h6r{yFac=tyKs z5igq+I7vN|@7Us>Z_#ODR{<3p43J`~atZ@Eo$*Ujz}iT{=#PpcF0QCH9ozMt+rw+_ z_Hy_wrkF|~mtRfQBIoiCh8PfO+f7@OFgRUkVc2MKu2kE>#KH2^N9;}dga4!j69_)~ zlt*x8UXL6tuzr732If4U9`M#aHt!k3{ndrm@vq9+($4nMISVhHpns#AzmAH?}F{Z?mS;Yl9e!eOZ;!NTIP>)*Nf@ zw~H&=4#)|CA7LY76p+r1B{9bAJ<#@;wN&lBt)dObD9rCtG_^}yP6T>)k1nTV?4tpZ zO5wYWO2pq%H0yRB$1zL6z-V{4V3Jz;C2C05ktaG)yBd!uDu#Wj+lcb`=QJ6ACAy4V7%h6>K@ zXkZlcWz?HhDozDN;>dGl5=N6G{BbNVhm}@5k0{UE-nRw7@J;+ey?3q{RL1Cje**Hz zk6Z+5CSPe_7#F*Nt|iZwJKXm$K3KPhJ)UQ0(NG|19UCtryl7kRqef&D#oL{{ zNuiM}u=B;7$VQW{8e95-@d;T#GkzjIKJV%C`q5$nLps{k7lS)mE%J_^Eb?tMAbzUO znuEjM!rk}Prq)5}^BNb6-4*83;#0Sf@#jEMP+EnA(Rryf9XB<^N1KvqnCnfv&puVh z5K~6kf;=T8sJ?mzb+I$!Z8DS8TQ2?#gMmS#NWCZAJbaAF5@h_Z)OjGp%~hdp2qip~ zuYrr7z(oOt%>mf?Kv%3~q-V>t+Yr(^N?G-watmLd=IY=MaR(LlWA%sUm!!!?j1LIi zVXsTAVSUJIieHu^qLa{Tkcghp^_PZIFvjgJuHl-DX}u`q!tkE;#&NjR_+%XIo;yar zUK5}KnvtAl8R7GKByH+r9DL-39~j+!X1_UWs-teMs>E@mm@}~UK5Kh8p|yT4kFS)q zM^%dJGUv%B<}ipVh;+h*LjR5ObV^uWxtcp%h~fH8O}J*b=X_ad?c2`*2*vbIf#gSk?0;(=Zl^= z39m3sXmSpa5Jz)%YT6MQV*oS9y_zws79UKobxy;97BqwjZlF`0f+RY4#Zv(Rg-=`2 z&anExo&(F!xogGwvD9z7{&qvmeRqb5Y7R@noDw z;w0U0;FUZ~9<0TxBG+#QG|6|QCBZJ(oc6kB??fdEncqa|FBm||M{8Fmp*8Z)=ynsR z?6|^6%`8S?xtKblGvR`Afu!U@C^`f^cQG$gm-Qg88B4arcqpJJpwC8(_mw-4Ep4>q zQzuwc7|y}cAR9lz+fZ`v=1wTm$>Y9+G8cQHieWPJujATWU(Uyz)9SzmlUI`A=1b1`IwjVjG+9utTn<@ zhC~;v2}uQG2x`4Tk^bvdmNeCg`-Y&@_NWlev;{kZiE|b|3@hys`da|@| zo0=;olWyAoLaP*Rv97Se-jaj@I^z>3!AA0moO70JI|Wra&1^sNXvT$RY^#1;$IWMK zSUX!MD~C}J8@$-S&iB(u7`mPt?}(1ZAHan2w2_=0))nCt3V`3!4hmr5WP8<3_(WPy z{m^VXs~~woe`n)J3FG_rX`7n>Q&WnQkx(!rjbjy(gOA&2Wf2!TUlDoRsKcW03sj;- zqqt3Yl1|*laZ;oYVov*R$h^M3thDoC#tFgV)YUSN%?Cb^Rr?rBw%%hS7*+2x<(7_hMWvg=*E zJ%;zhlJi(;x(7wH@oNqvjXvHBZkO20e>QR+>alN zow56U_XpQbKxFLRBUf(aeCXPE`;U zkX#pjU_ojJhh`JCe4TqcY3$R zmmEq%<3c`Rfxwt{9RlOR!Hk%WJhDA#3OP1JEdlZ)*q+WPGa)ZDDSs*i)ed^!c#D$0 z-Y$NNo-4BplmPd~y)1e=-Axi}+>|`Xl5THOa~vT%rgpLP;wg^+-*15ckB&TkQQ86w z^J+`y^!Xuq>V7vpb6x|e9GzPoyh1{r7Seav#4}WQO3Km7u^3OYDfCNU+W8hNKbP-K zfC)L$czd_eIA6yH!-0UWX9L}IG6gBzon<#!C=79eiI@OP(r(`jV~n2gCBQO*J~SM~ zy7;2QA0axGB*k9pi`64~2>4@B7m`y)_m|G@%7>e)=PG?y!l)4F=XT`TJ6jGMAuoi% zf*{hnmWS^K!cuy3 z?L8Z|W5N%@!;cW9M@pvQsj2^F+nZ56v4D;mJouu$=j{$%CH2D?hrqFDV62ZY4q$@a z$%w@bYe91|W?TzCq@Y3!nYKc>D^s@6O7OA0(#<#Fmhy-T_W04Uxub^5s(oL^I_zuA zJyE7)9%Z`|2mb5vn$PL5UFdSZU_YAIdGc-XfKo;Y<1q1SQpc=BMZ3hjOc+6Y;Lder1jl@D43Jd zbZAwp#wWA*bX!#UR+?~VGln#iM=|)85R>l;V2^wNFacaNOeqk87%_`Lu@fJnI`4!O-%&D|fu zKo;}FZP|7NdmPQt6YfNG=9O=x&C%%(!DhnGdhM;bjg-p@;amLN910$4s3r(>ObXz>PQGgoMeTw9SJaCV^4r?~xBu7&$ zYmuBE^hyJH5{$uBH3SH-mPyE1+m%A1i8hVlm-}s(Ydgqb?VeJZI0Y&g6vWdhFN0pwA4(u&E)RiwVt@vmYA6-7dmqXpRKZSGB)Nw0**0yRqiy_eO&pydq>x72yX%G#w3PB8F+@7DkG)6z7nTl_;lhJbC~%x^ zE+1QFqeX-Zp%bDi>{5wCYoh;2m}!h$&H%Dqyqzuky$g0y_IeXpSGdW)Vf$b)$+84U z5N#hFZU`m;8PGg?)zm`xSSHCMKiZt(YAIJ-Bn0}O@w=Nh?Ghj0E`1TutK*_eu(P2ppm=r(t$8mifjkF4Za^9VeE+=)dL~UYe z-OY`cxnTKaDj2AFM}SM50_kl2Wi-%}wWNho9+^*{K_%`dMgtVG zU~u9s&NvPeNpttlt+@8qbkglD$_{g^vy^~b2y8q*bp0*O>KFng39Wf#vTz-m^2o1K za3WL*itMPIrANF&qO$%eCC}##8z~YZ1X7qy%T}Oei_XOB_C1CB; zZbpK=!Xob-0s_b@yayH`Dc)AiTw6cd6hbfo28I{yk&(e#VLpLL^-{R;QU;;sS>kkto-AAUgy3e~d7L`o5A&XUjnO*WyD6kx` zPd6dn5kp2!95|HlfPd~7#(?OAo<|4oWTqAiaPi!9pXl@I=Xx*{OkA64zAhlR2T-l6 z?fRy0oDdiN<>b+83>{uBiIK^f?+Tsqd?`SMzreY2g_EJ*Ywl(fmGTeP?qoXRqQ2=A z1#uu)ZnhQ&uArrZ$7e`08ALzFCYp&BFRxrCpRa-OA8}w$e@>h){SYW_bcFMxi-$|X zts(|-^r1U>z7&k+bG2L}s{r=9_8H~;u9R>X=Kv@Gwc&lLT{HU0&XJnNCDRPJfoky& z$NI*{=53G#hjQ4#X6X(H@Eftb?Ts%4ltBHq7mwT;$KYID8S5Dye{UBO)j6+HL}ktG zH|Fd>(MNAB|- zofmK+`GM|a^}qONv3EvIFvU!T3LBA!c;Go6U{iSBBR zxQ;&PfBd6h!BVZb#>Hp=DhG1nMY-Vux7M_d5v(h3`}Mc);8rwJ(~G?urMRqVMwV~k zEmvQrZN3tx?M60w?rjI&^JsG3*wOs-lnV#Jl+l-F(HTB9krK#8Te_cX&~c$my@jkhrx(Fp;_xrisKp+(c%3@7BVL?b*SFPC^%F1s9*Dv zUtqBBujsivV#7T@$M%?wY=`^9y4p}CU=S6?tz14JT&3-zH(n|QseBdI_YzP{u*TBP zRc3DQs5_y1;YOz0;Ke)uFeZB%WwJ@ZaT;9_Z?OvbQggXrWIz;nClx~3B$8oC5%9Ke ze-u*gdeP(q;9!CX0OKYV&?+V84X(OIC?cJuuarP%qs{O&-HF`EmVkB*j<_bDMlS$A zAniE`lf#WAsti(@1hsZHj%Npcl0L_)1@3!rLu7cx<(y`{6}xX0SVpY_jNa!Z>Fjbi zNo-Gv&Y&`qTFZkSJ(KKtWH)Vk}ccS7@qMbtRFrw$R>^p0Q%vJ1X_rG$GcupUv?sefg<~_lF0DF>_ zkKBF}P-?+1>bF|@|KVi#T=Iv)hTJ*dos#p%HeOxT;X^$=6Zsvj+V|?vA6s7bO_=u1 zMExL{^=&ou>J`yT%SZoR$Z2Mbm(J&SbWs|OB_Z%y?X&%#d%=DS%)s-?2N!1M797oQ zy+4B7!=bngj`iKLy+OxhpXO^38{`l}p zgV)21De@-7kze|@ddvPV&dO``hC=gLQZ=D%)7`jM8I+T`CR1}*k}*i>U<@y|34Lk@ z`{P+DyFuJ1@fXKAT4WN_y^44wruL%?2W))0tn?RW`%SSjZ`31EdJu!epFDEs5(vGw zANt64BEjR%v+Mt&VPL{Vtep7E*Ac#HDcd!{zvo=R_Tj&uB4g)fMayCvr`k>PHzI>w zF@O8~LE)}L;w*)MLz`WE!@YJ@+i7J=7O8i7UQI$uRO>rAcJ2gMcdRgki>hC!&kk7K z3--k+eXO)*tBP2;_ukA7xvL-1I9Blj z=D+TNXu^#g_Tcuz0tM;K7xDXKCfg7DXbpHhda)2HDu;H4r&DJh71lm z8idf5L`Afvo}=G`2JD{>U}!%h%e-y<$dS%+_j@R3XgGA%aOC{lO-%DU$6Vl`?I02{ zN9ctS!0H~0FWe5lcvDLZUz!f+&Ij3fsG&;x8mqQW2M@p<^=IE`1drUQCG4)68aWi5 zUG}e4*elS44d52oebOPk=1`T3PwVGf`d|qe${1Crjfe`lDUJD?gT%Q&Rs-F5l0G8d zcAsiu&iseR6NVIbs`sh?z6`M;e~l1oy|A#-!*gH2)(jNvI&>o^iR>D*6|=E@u{Z%9 z8I!!9BXw^_C&kFPFy#wgs?zJUDSq^~GM?P$`jd95bc@J)&_rBaA0}FJBh@zc2?OiK zjnh;#bIEL`oo{4tg_Oc;7aiwEO2w<+-)kj;-$&~$9PZ0D7m@Zd21j)!X;I z^Si?+hE45GieKVnF)^Jb&6mzS-aMv)t!;hnfCbv3#$5ae4mYz~>s8vKM+5bXPG?eM z^amOH`Lk+9F<&pAv4I-UxzNmtxFC}zlszAPpXf;o?{qDh-ieO*8dtCBn=U$xl!Id@ zSr`ix*{tkWDrRDUcZVD?VWv${eFylx*4G>iFwBA z`5u<#_fseCwwCWRR&4(X%vxUORw*ug^Ja#DZ6^MqVHCzZYHy}DW-@f76Lk}ls{1Nz zrMkIzUT;SM#F3cr-+;AmksmX*&u^G(dJ*!=#u4DJ)2kcL&^d5J0y&viQ z@wLnu8>%2Aza!c^eF_JbRRqZN-B}*3PYF|6xO1Gzm=zzKq&;MD4cH;Re=uk zh>yHr?^ZzW8XH1vhz{hYdP?^$+|kHskkB8$vcW2+QhBM!WjyV}@C98bj~Pjz+|G$3zy~mXlbd?g=%`;gyv-nakA72xRp@KL z`DR+|l17>Vt$w4XqBo8^(Ne4@;OafV)!&g>ceGu6h)rh^7S*x#Nv|I|LQ##@piB;X zjFbXDV)4g#l%T{$#_gp-O$S@awbuX_tgsHMWa`pd25)EV2{8!qs7pD|ly$P(%1ajk zY=V@ojCU+~bJy3>$Jn#{F!yT{*#he1Uh)nqjhN!_w;nRCsFP_`QpHem{ZFI$$tsgdWY2f*3ak z?o3bMo~AoM@J)KT;|FOusZwZE3afaoJ>;?4xTqsT)EBteiD^z{%w(xaG#c3u-7AO| z7cE24uEYXKN+6C6>&<2q7=B0Qxa;STnoe~A2Jw^w7(iz5t;(4PK=m74-*=;!a(}t$ zSBWpa4u=maXjBDOel;w=SOGtKuyaq*R(oG_Y}B4243JWw{1#FWn(_3pfB6)sZaYqHP0hrMXAvyi+B|Ct%nF zzCs;8(M~ykOK4Q42DIrMBT)%!`jM2r-!6Z4U;aVku{1Z|a83d{PK{jNCd&>c+|y9g zlp73@Xw$V)BLk=h?w$mj3P96@q}g(UD&_3>{qzqGjlEj+uHm*ieVwf4i-k7TX=66Y zlt|EQ#p@=t3eGSbG0o)$OW6V#KjE_oEPQstXgb{ZV{e{~huN(egzlun&d%NVTiK=V zd0ISZEgussl_i+>Y}CY-??F*zCk%M!)M$S*K1}M?M&nZ6y{p;OSs9P!%Lh&y zWQ?&=;@Vyo9*#>t$o84XfaP82b);k6#EFBY9yU~=159XAp%sl|RPCwIzz=8V06f6J zZH}M~XoP_+7Y_wNiC;8a%*s-E5lG`1VCGuaVjGvDX;Ums8R>HyrMp){VyO54vG%UO z!TVy|oH&4q6XPW38D9lT*t?5|M6N}xOB|8hJN8t?w=g1DP57o%wlZ*PeE5U$b@ISd zt5EJGUxDZpe$icnR`X2-H7`A;BbDWqFr-iy{0Ov~1_11y65rprHk#Cww)XyN(fIcs z@%s@fZ4qxM7pi4b(VIu{yI+mS#7Mdl#Rurm>TFY9_HMEUD?ISjE@Z9kFyoyh_q;D& zzGAEn&9R$zm#*3Ant9YB4{(f)Pe-;sDL=#lX3lNw)T`sm(_07J;KJYTGKL8+(kSNf z%(wP4K;+DR<4G)7oH!03@Ukv^iVs|~%E~f8U_9B2>t1eDa>^Lh7a1feo$w(YZo`h- z-rlRK9(kegRE0?^9!Ppg^4L?bMbcNET~(U;vQ@4A6b66M7$5e9sQQZwg9hK@AXNr3^OlPHJ`3v6054= z6fI)ip<#StJmx4Y1JspvJ<|J=adqOel*G!x*e09Qq@tNiJ7c%A%^}Y(61sW)5%L^K zLk%}w4zdzfBXV5Nz0>a*eKJJDG7oBkT~%yiUiI;pb_=zelA?8uhT|7!_JH zx&S3)+RzP*L2d%u2|hJ&IJWcpzy^+m+NA~#Z`n^&q?<*!?Nd}lh^%$NVVm=jHMy+q^6XwQNklpeipF&vbaTeh6Gc4>p3SY@7*RoH z_LmD7%XEe|coHEcIv3B9bM0hYI)bPow)ERcamzXT&yeR}U_TCAc!@iVQ7tSgW!Vzh z;4x6t;BYeQ(dsS^Wc4>j71ru32j{4=KS+mRVl1dq2}y{z2UnXiR*4SIOetht31F0x z?7*tX!f219@p+CWKMsLzqz*T|P6B^@(cyExu>POJx7CMl(-}>Eo!3&-=Fuaik7L~jJZNo!*EgbtC)F4` z`*ukUXP^hik}DiUeH?hOkHhI5%k!iC(DX{)6gNhYALy|z9My;CN3vdzr`0T^kTh_0 z2b6j<YprQN zY%2(8O%vwYSNO`ggkO{-uYE}h(z>2kp4%|u;taN|f&C3-&;>x#Wde?=h9-jN0XtD9Fbq{|gA!zR3x+d>NqcJZ>;8@GzkMC%v z=)hr2gH9r4S~9!WfPAqv(vO=#xDYIsKoB@Z*5O!>OtgfO`W!p(#IdMDCt)rF=a`YD z<+#!zB0PiXDgGd-kL1WsY|1PQcEt6p5-G;G5|6$pQ;;auI^OcV>b5&FC*8(q@^piv zJ-w7L8%O!9-ZCq)H?{X^$F%RU_EF88-T(*69#Iyf$FS)#YA+~SmR<@?(0oXIY~k6q zmo6auGB5FlYE0+q2*qV9b&rSntn)#~s2|XDl%n%t!*uWA^z@IG^4_#&+@Xju2<2P2SgC={Fm=Fl{F?EEyw>G>9p%$sWu+1 z)#)%uO&RNbHZ7-Vr+d@g4(?0&&}rAsa@dTRp@9q*dIAVT9FNUtP@};X&tUe!Ojt%i5%~S(==b7pi<^7E zk8*X&60}Nx2EQH5zBredW$rvuUD7;aEVAl`(2o_rrnBs>j}8xoD(Mk8V2jhjcjHL( zoZ|;aVyVkead|U6eRDR+0uzaua75Mbce5-mcjpt^E4G?5%G-6fA(F}5JJ;+|YY4E3 zGN2bP4@V8|rrvJmfITY!EM=kxmF0$|eB4{@oDF`XomON)#;Z{?G}L1kM!@te1P2D= zx><1fURd)T+WnD;JWfeS#gkLUJt>z$!9J?F%B^I~Ew%JaZwq@&p(YmC^E!qV1#M(| z@ci#*yW_b6d?(WfD=KVBv@T2!zvz46r7}KQ7&QIiDlL zazdVtu$#?r2%Y15)*q@Gk34VZ1D0f-6`7QP`V=w$o9E}vbjfQ6M~9)nht5`}-t7nW zrM|DEn5`q=BgNh>LzWRIZE~QQWIj3hAhFY~1<}_F*iu@Ok-A}{6R<%9niJ`V%}`*s zyk-QBMOa{i!(Ta(hM=XDj_AH&fPwgm;L_l$WsZ5XR#9%e%8zZ@ylPR~n%*LhU7Csb}rioe^7c0G2?K`vv;grDf;skdi z-QG=oqKN8btO7FJh-kvOtf(p{il&srjLf?lMmn-`sOp{PI%0}C+Qp4f;oiX7k_3?Q zqe$Hjp`phIVSP_vK!*lG8>s?(|Lls$sju09(}iviKVcc9ah?)}FO9*@Qgd z3H<@usghV)u?}Cyr~;r4PV@?$tf}q_SwUycIAB_!Xe91mnzuCRP2N$O8@aB|JLs zg;j#-*Jli7ul=y&^~nK!!-a;LdWU&>QN7*fr4x_}q|b9Xr(y%5gJ1fA3PDC!cOyu8 zL6-!bZmHgR4U(N#>QcZq^T@?QQrNw0u8SBFVIkwgrxxe1peV8HfDuKc`%++@q=cye z>NF+5{x@Vx@=Ii=%?#Krm--THokoopj}Dtxg`2wcguM6-r3|+0 z%2%LvH%51%3FYW;1t6?~h_Hs~mnlRjwx~_Lesf!~I)*;I$r@G4(r-hCvjR=p zSzvG-;OL=Qjp?1iG_V0JCivq5sQ<&)wKy`pzki+1Ir^T8)H&VUPNbWVipZ^ts1%i2 zHn(zT=F$#gSm$&K2_=_YSA=9EV`FJkkvmJ+*su`8%zYbX_Ir;`=X}rioZlb7=e_s) zc|XtdexBFs^)wBB+Q?22Ew{0$Fyh9E4-En@AiG;`4vsICN||Q#v5hCYmXFW9SDaFn zN10@R)F_e=*Qs&Kf{_{AWws zjy_Xx;;t@Ii?C~rz*+))lWr2$+qC(>T@Nh)6d=>Y_~xe$=Mm>&tP~OwmPS<*z32eH zPXlTT%H0_KoEQRlEYGLPcZD5v*MN!kFbjLk28CnS-t?lB2So)dv5Q11_Oi57@NXl4+4ZNVVf;7+un9Y?P2rVbb?K`7B1jx#39!6o&fzP&J#5D1etlOsojTXV6vKr-#AJEF%H zKaC^d?6Z$}A*g%hy;y0q5Y3b}nxNzejH@+bZDECGH!wVK&7G<8-2918>2>3eNN|5p z-0o(|xjMqaY(ix3;BkaRE@mSb_r>|ew*Vc#kF6J-Ps8szk-;gVH`ZAe@Wfm5|c72-lZ|x<> zG@XT}ESDaX*2QFewB$U|&Z5J(voCPhIZp}#AJgI!F{#X)h(KH*JE;g~&Ow#9SXQ+& z%A8(sbHcnlanyE(o293FGbmxi$5>~o+9|JqD}KXuJFPez@O09gqFYO%!;e$+&}v7)+kmfNN^+uBfs}$|2M}k0PvrbZpPJF!OoR74PF!h^+$LIsC~Ds1nH_MA z=yJ}QX80UY-ypc}KEBF5n3ae>iVFwG^MDLB)E~dnz^htcnvXhZ`n<{t5`5NG43jUkNJ{k|GRuQng7mDKv(uY>g zr^RUN4|$*>A5D^e4+`5z=1B49Cq&&A5ECZ4%^b5ZT>%5`a%7}6O_}jLKP(6az+-tS zw8sM_`dnN`-X)q_HRyavE*Cu8J}oZ?6k06pqn~34twN*cJ#BKs?ARBYa*HC2e<5q9h{`=m4S}Mk~4Y zhdA=YWZ4oMb2M~qAh;hYP50&C=l3LeBY? zhj!m0B|e(9^z)eyNKeuiHRlC z)Uglpta-9{c+wutH3z*=-$k@?4MwPpF0_^5_STh;RKB_NG2`WoL?F1}?MEvATv@(=et-MuvFI2RFN zvUFA4RrW}wX3BkOsM2^*q%4Z_%zBqOmFYP5xzAY(&C_%8>G>o7T~;x8gR7Id(Wu9M zTNtC8jOC@N)U2X^LEuP)ZY(0JBlq|9wa&dY7cZiPx5Z*}@wAElV$_7N_^`H<0{h&9 zn__dy&6|>Dqj{6j*LUqQVx;`1uH)l+Ej+sCI&j!7E7+96JAXOuWMk3b`%3M81987N ze6FyPul3P1a)1Ak(_^Bm-iu8_6V%*ic4fE%sHXqXm{~@2e1f<{R6N|^zZbp!7FhWH zjmr2+m@362UZq_3FT1pI6mydYmh7fWB2C1VN=u!N!TZkNnhOkdoGv*62Cc+d<(*k) zTODCCHR_+xo&A+U|1u@nF??Q9!q7eW4_YZ?Un?^Ej^X@MIrOZSXrKzsr=t&_4;_xF zoVh(=@n{?Ep5xpHwydNA7n}Xr;cKnr|C4K&pwk_Mv_7k)80B!oRaw(a70Qg(qAxgB zNAc;@u>(hz+R?&+2xU=jdYe+#W>EE?OfPwv|(9a44h#}<3 zF3oG@o!NjMMlI26!}*d!Jo8B5Rn{~ez9>M8x3qm5kuwFa6y;QLeD!`9l>Nf>I8R<$ z;QY7d6SaiM3PKBllz~KB8FqW?P$u5EDQbA4qL)_~F|`cGF>3zcP@2&^<=0Fo47xX5 zRbB~oj94nXUf$cy0nMPKZ4Fb>w8yGcur7D;P@G}Xw-;-I!Y53QhSD8{TRd-6jxUG-t?!E8dOaKlkf)n5Oz8;`ZyAI!_M zL*~QqZ!cHRCG85BbtpPq=D_dj6%6LsI>#EqC)y&yg@-&3R&PMhQN=~ecL$o<{?hHW z&@pW1`J2O0%4krn@in^1(^I*bzTA5LyY7cXEMXa1V*v!?(EIqtzqc&-c{M4X+_@I3 zoow+)1`u_ByjT;j-@vGM@;(Dvq2!51i?SMpE_!dC@cUHn+0T|z0*sE zJ{KeBI;-^DJwUagI0@Y{YFaw#I;duD=oO=nFCts9gk|DE)hIdiSdn4kE-i0Gaff*F zk>^I!&5}lQEIGpXFEAb8+y7K8d!$f|gPeG;FZ0A|#9ZNdSe517Zq!;-^(JvorRAm_HB;S1VdIl`w9o^c$YBW%@J|lyf604!?>6XG=(meV zhtH}tP!aAiq)w!934C4aqewYdubCfPy|R-YYbSnwS?U0B=gz+lgf~3|HZ>a|erIC1 zI90hoRXPuuqqK@yD*L$Z|B;iMn7?DCWXzww8fSEuGqgfXO$(kMOqQh@3%&oY%Tyf0 zIL_O^j_0|K{|bkD11MyqPLU!8_cZZ$fbZ3&09si&+&MwP-+1M_Gp#I9Ltm75eD_Dw zJ7m>`hiYc8#j1{muR*;s7JTpv6ZWfomj4<27Dd+ zvlCCtsr)3~!1nP$I9HIbiO43?$?5h3=^T4`!DUm7YL+oGT6kdS=umfv9g&==hMYC@ zg$t`lcZH>|uMV*DTHX?fVSAnS4|dWnrB(JFEjt+TK}Fw5e(>p#-jLGE+8v&ObxI}} zCZx-(x_dR9u_M%Um%(RHsTQBq-2%Yw3hoH={hpONR|h*w4&t^hJPM=TyP;s@3Ug8* zpbm#`#%OeSp*+M1@O9i|L=rc~L^(MPASXq0-N_XHn?rY~M(2#@ik2cU=N|pUj~%Yn z+uP5o6d#JP3SZ*XDpheGC=^SC9Nc`2z^t6_+~=JRN8c>|kCB(4P2b@+cTxFW)I3ZW z_QKX87N`PH`^L7j?ItqOyJk)AzwM(hI9WRTl<51uH!?h_*`X#v`hS>tjq7nI4|NwH z+5Ipk2nLzgGEn2A@3?M5WFGKieLtW z`Nyc9ShRee|--^JcOdblvrJbuGcgLw9kC;nZpoM`L9Y#Qb!uhY5QC|*^flF`}D zDXQ!R@-UUkGFOHJ&O{u8niTdqP-x+pQNV?(k%k*_AVij}uR2nenEUQ6J;c&Jt-J_Z zyQDdSiN2lB=1#6_%rd{PQNX;{J6}#CYMs}mS=WAiOq25n_KbqT_IbPmYj&j?UD% z$8irpIW=@s=nDsR%+>1U%D9q7`=lf?ngc9x?|kLrrFiV5b3K(9GhC|`K0FfvvLIV< zs3Dml3-4?D?RJ{4h$PU{6% zK%noHuCo3QU?%mhrh8O$lrab;c)7giY6|CK&P~IE%H5lS%7xRZ*q887me_wTMPQ`I-%68s zsd~8;ft6*Al6r^xZvL{;C0LNiSlA2AP$ME@X)lC$IyTa8l3+JsQh=&L`Z)p=m7Ktw z#@dm4uEY0zJ2@DKaLk&f7`m>58)bi@osPA!3Vxs;OS?X4!=2QXIaMn?dr=c~BoX%N z>Xl~gG6>rN{Zm{iQF~x=lV=>ZITG8$NI$RUu^*S*;o0mAF;*E(04!*CPy?qTD%>p5 zFjwHQCH2NkR_Ij6;T>7`#nar0`bV1sBTe{MGmkEIRnDt|qtXTMXVCavR=;`0{~UR9 zs_P?aO<;^xn0}na(Qg|TfKl4Ts}6NA8{#MYLO1iWSkRs|Eb<^sjE$w@2`MmHe#vJ8 z0)a6@44|&lRx8*fH*+Ae*xgkHHWQy1S3XPBy4WS2(qq5qIWjeKW2v7C{HOJTRPyN? z)kk5{`PLC_&fvkex|KB8h=-qQhMZZ4J2`6&$n!4J^GFxYWq6q`(E@A`J`+iMK?^^R za5zK=H+A7`fCa#uTU-F=y2m<8SBA#R*LocY&I!Hi{O@)Bm0}@4&uD^V+9I5INs|1q zBPJH3#lk+>FsiKL(pwYppfmNlJP3mz;w?+b*rR~CUB3+39i1O|?E-2fEcWDWtn&C^ zqnf^x0~wdTpcqSW=cy`$YFMYW0k&EPVY86*46P}HXCr2JbKJLS5Q^Odw%iZ`Y6f_o z%dTQ8l5!*QF6iC%>5IFhZ9&oH6=M|wxY>4W46QGaJ4p;YotkI_=5dvMJ^ zfHXh)6v+p*w~mc6}Rjy@sJ z3K^IzsN|hZm1?~w$zdZ*c&w$HsnsvN49^kgimQ}G7AA`a_Nw$t5@6#{plhUPF2$6* zC_YCn%Mpz-;#C_?KFCkLKY+(g;^Q3%tM6JnywXbKRgptU0IO3_mZ(FiY87MGvnpLJ zQ!FALu!GKs?G#+7Ooo7XKM5u;35-o3*hJyk?=BJCn=a!|Lu!#xwyOI5hyAyvC|;7$ zHs?+To`vKEK1ols-)J2{yUln!q_iLCZUAyPG)*o~=_p;Mvm_Ai3>s`XJp9ovLmFcu zBMm#A&$&`ji8t<`U2A{|UVe_`@TaW8d6T~AzDJ(ZC#@DUltWnq_3PAYmBj^OOqKG| zjChg>hRaAE5(8_l8T|E_ZEu+JzoG0xr9e@?vV{19qvl-AH)>Y8a1o_1 z54bW(`TV=u$*!=0rqOjsv$usx~uV>@V5`I_hnswuq&gfDi^QG5=Jc_0cd+Lr2fR&en> zb2D=@J7DydhS~tyB}!-AP~K*Vl1e2U`KZ3ZK7RsW75 zrOR#+Wk%93p=8FdH=#afnu=*}Z+I32mguTl8vWA3xL?P>^;{+BC`vq1{ZO$kn~WF! zQXleO56r=fsvFW#r=vvHqcT83Cr^vA&|HPATp}IryY90{5>JgB&hL9x=`hgzip8UY zvVl28pllNVquf*&TCh7d`xe4goav{z5;^q>0AM+Ci^m|F)(Z5}>p%1s^E8dnmO>E(=#g3r#nM$)?7Lm06KTPMaS_a1Mz zo(MHFB`M{#b6c4#|JkN6NYPl(1o`k!xPW||>t<^X)49{bom)19qfylYYP?))oLA0` z(7@|+u72CM9 zh1OSYknm=nQe|fyO{egRyfY}62bj3fW9x=?sRxZ9=nbb_L~97aj!01>ytL+3qIY`U z{bFx;q!N@Ic?q(MR!nr08|?lHrE45go4(O?ePV;P^v4tk3Yi_~$F$a!v9S%yCuBqs z@wzIO7Y(*z+RDx$x-DqPWzKq2VRP4I%$5Z*0WT|u)5ly&tH{@7W6=lsv|Y<}Fhqzx+w->D z9ih8KqPwId8w+?A0OH=iaN;FZF?GoLZz>d@{1i;>|Q$4!30*Iwc6GOqGA78M z(KV&TS<}dD3T~S&=-@P;Yd#5NJkq0KCN+aYy=%Kf$%(cbyI`8Fl2ZsS43jQ_{64kmP+Zlx zwTxB?fB6(T*nnWXsnvi6`lNxkdeq@s{RunoUNCrI;%aXjv6zx`M#h5U8i1d26TC|g z2F3IQ*h~iiw%hc>SvLnJ)s>e{Na`1+zg73Mex3>w*Y?FNwYVs`reHQ%nv zP3h5LD+<4)j?a-}YL4^26# z!ZbP>3jJ9K2{JM+Td-XY#{pb{p9b|%F5LSA?b;nR_?p!8Y;L3n9D5}&k`ZtMLU~=Y zw?8^-Z6rsP$`#B!?d<)?&i*%OPZt1xl`e#4?8uerJ;}Wg zI&$2XGr4#f$FqM$1PpfqsepIgt1wyOu;A9VwGKu++Zhwn(oN3yfS%C~+XAc|m@9C$ z=hx4Xjr>+t7;kbDm7Kookhh%4vga~FvqwP5ZzHIMES7-s`i%I%`FoLbluF^IrG4vL zRD^4ulzuc~kHi8!oK-dyN zSkin*P`+4Ea~XXIq(0NGBQ_A@tSfoCjsSCkv@O{5&^|lRs=ej_(5WJJh#jC68va(* zppqh{FBsG@96wIbLQGvicLq+?U>)oQamI~$k0#JiDL{HL;VfU^@nJm-!Ou?Xk+nVX zgiRM4XwP(lkPW^7O4b;gt?S;UqkD@ z9kivVsArZzSv5dshU_*{q6$>Sj?fu=8!mvTksOfC(yk`3u-+30to2M!wr(v!yGEyE z`SccnGiYh%W+I#YMi_6*e&0v=(!I(^h_@q~dF^m@nqnwz&X`8K+1bT@|$S9-Xku)O4}Fa;hpK zuXpC1zv)Yf0^OO|6@f-TJ5wn&UOUK^1LA`CrE51YZqedqol8LMWyU-w80nVw$ipdy zO^mq5!YrMvU8w8ZFUp(aLUC5NVJy7Pu*MPEc9d=sLl1^WJ*5@l?Fzh$7+dKH1d}or zju2T#Qw5eSxWYY3;M>;7bH{q?hMw%VHuKHERuY8BmTn%_7NV5}kT>&@I!%f}7BoxK zz3L^jFyIdhxDO;eB2|!;<(iuGQyND3W{CX~u?d)SLxWvai2GnJWq`m}#ly!2Cq$2A zjcJ%0ncxbYZ>Kg`#T+WI<`2S{n$4ndAOE-n`s2}=RIdTdf>N_~o`}Jq0~Z;xfQbpC zlGuhj&1VY~Wz4bu9#Df}duv*_70XHy1@Vp4hlw7a{bu-zZA7F#{kh@`*^4EISDqQf zTte_j%A-t*#t0f`)e6ZEoH&=yD_~QU0aFzUWd8l;c&zzLsV`p!Ee=x+^_HvKvf@D| zYsSW@T9Oy+JRcdk+**a4z*&Cb*M^hM~?L%KYVPuj=kWYrg z%4O0f4u+MvGH(N%MuN_eKI3`MTI<$VmJnm7v(y#7L9F&{LLl1jAkASd z?L%mkY|Y&UETC7HbxofgkrX{VW42e7)VF5+*Y*0hEu9wmHbq63jjo87(2YH zVbV#2lbvHvg7UFI>V>|b3&d9pN{6(v-0u^rxRC_e7*NqHt_da7*cKS*)Jmd}mvg#x zAeI<75d6GH6-R0S0Mh_3u2r)YZUP{%!I|hgEY4n)HJJz4>`gzlC{8S0Rn~t!gPzZ+ zV$Go!E2 zmlgT~Eb8T|8YyFHlXLIw=AqSlwKTK2;q)3jH&|ZI1>^(G z4Kf^q99kuqRm^wCmibtj93+IKV7s>qPSeIWXfF*9w1dkO$euNjjE)BwrBR;Ukpeo( zB$l_+{64dJoPgCDAV_3&QWImI;}8&{`%+SSW6ieWyQBf1dPdaN#+r|ZfJ3~YQRa=h zkQ6;kGHY^&1Oachu!Y{dka?;g4ydw()FkkV^0?UEOpYR2v(a=GOWzw%_8{g6#{$sp z+(z_Dj>4PK5O*r^kVM^#O|e(0x$FR!C7?W`{b@sGc4^+4DlkrFV%%^mOz=2jE_5_= zVBvJ^V~D{oN?8FnLADwW0Km7lN90b4v$c@2k5IC9c|+IFavuaoJ{9N8&0p@(q5pg? zCgAbJrFMDRv(45s_7#DFXbods*m57>Vu zRhz^L5K^=0CR2>tHh)X^;%Y+BR=P0zW%O$%x0E&1<(UlOPy)8tO;b;RPve zg2!=@nIw4t1K?Ni6jQviuLSw*PhsE4&fvLs`vexjo-RLEHqYmVa*&xDe1f0ED8TnJd8PT`5CBgBo%)C%7+JYr+ z2gd?|fX_fnegRi#%4ky|ft#9-3MfEjE5H$4`ek`e5`CQHunaAcBj?h>BKK?&ambi* zP=CrL$egA}PQ4vVU(JG44k^e`_5EV-03bj(Id&ZGoo9-`=6S^gc+NHjATpC$CN6_m zQ-A>;`1v6UWHwJHV(?YRg7QV8Ji4V}{Y9dY2hZISkX4Wg4;B0!D(CwPKvQ4sl)@e~ zC&2u^-w%J3_z1hqe`l}Wla(pJ3^|eHwB?rI zr3wfn5U1^m!EAKyV&_$N?qx+2DAsumv^+u~5_5>a^NlHi^KH_B?5TXMP|@U} z3Gq3A65Tt6)cVqJ9Mksu;SXkYDi93Xa=XidG=1vYJjj-F>^Se;eVc;&lF4-nK=ud$ z=ni3pee9yHWa(MT1H8B8c2R|paMovV0m0^miMBe z3S-=}GQqxp23UyBM$Qe!CThSKtfGn;qV5^8Ue0)`mq<^oMsNxtvK<&BxxXV2?GF(E zuo&Z1#AzSe?NmPlj~ZhP@al~*sc}H+dCD{ci_0G-SC%UPyXIyn$R#O#vQ+HU%(wPx zHn+0X?#&sHH|O38L%^}(bp1j#<#THKlB!C|s4o`=(0^rWTsOg`0x_~k4&f?zldiK~ zW{l5^@um3Bb+8tkmUnu9N)DaFNjvKEx7ytPf_w;o1C{awEe8G|VGG)t9!XWb3vX|j z^&opF)&h)K>4F`{opZ+xJk9GJK5BZRv3X1BO9>$yT7yQ6_acXwwi6-&eiN(aR zS3o)a&5Z-i_`Ij$Rc5oVP; zI~(>mVr)AZdD!n-872-rT@Zm5uEm^N=4;GXIqHS-I1(TZU1RC&AEaA5-r1h|W6ZwDC(lEvT<~op|o?uB8puA#4(nb`c$mjbYef56OL_QMsu#c#>y}AkyYj7DXe9FDV4d3I8Lj^tH1+PsGt&~Nk zhS6XX$DzBOZ~^z`ql{;}mFHsu|_Q&}G9G3lpsr!v%*N4ftDqs{VxA6u^N*50rH9EN~=PE~Bvm0LEy6 z`GGhXcK#b(cx4B>JP?w{`>2CDBM-8DAZlw)eWkIj>Ip3z09Ng&k{uUx`@-+}(}Cu1 zvc4-Eu2Zc*2p`WOwze!>HVbC8k5?T8nu8aG4sP@@5~~5sYXmBLAPTyBAxsm9lHvyH z0YOpVBwoy9!|cvmsn_M8xEMpZH6bEG z602k^m7t@f&Y5j5i1A%-fzi6n2>bCN0i-i1a*_rp*Kom~f|oGk6%gGD882cen6@uG z2EcFUbM)7}P^O#)Zr-uLk?h6dT4&j@^r>--?)LE#I0AJ6$h4z!&9OP&N937%hno68 zZ5`Jf5QlY`SF~wUYuP&k^jA7FS63ij&ytFi!xzFZmXG8#)iQ-Vsv>1_&{qkyu8jQ^ zQsF%`nCxh5C>$$kj(ir05p`k6_dtCn$D88{H}%aJd+!K!i%f+%<~r=7jjj)KWcU=3 z5L&^W#)3h2E+~*}$t92)Kx*2)9R&#oPbQD%E1^mfje6Wv5?3a`>L>brxU48LFfTzA z`J4y%-4LHh+Ursm#Hsdm@U0?wn)3(&F3;`Tj@wg2dIls5&2YVbS_z<2W?gaQdyAqD z=kfBBp_?yq=o=&*l+>qjMgse;p$$4nhKy^)!1IoRW@|gGwp994OinrfKoU$MP7Zbv zN{URqu3!YeMASEJPA5j-T>-BJySRbC`5ha|If~@H?tG5(w79>wV3W3zFG5q*3nW~} z5ysigG&UTI(xpt5(=HGz57XLVMgFrbr+^Y*SpRmp*1YSu2trXl56vYw8%V~Vu;2LD zJEFL}Dq;9c^fLIda(+HpwsPjJc(Eu>!Vsrwk=@%8}_>C+nh z$uv>NtE(X{&bHQwPzxtSmrlcQVn+r{c!1p{*H6U>A4(LEuAyYcXaE`VM|BDSIbVy* z9Q40{LYQC4>r%Q%;7oDJ{&Fc}-?{VqeP3|(A9@lTDW=}!WBVIh5(!-)axhL1(fBqD zsGVoxI?_F2R7+{seA+?nPR9%34)kFSU2*!JyzIPCh?{2uky7LZljlM(UMw7FVhn|A z!3S#}HlOz;6*D?%9}dNmE~b?f(?WQr@D@;@Rj&jES`VDNl_}hC2dLJ9}A9JmkV(Y{(jdf_cJj2I_wm?O_ z6N8scLIx6qw?rABr?fJ)w0paBV4m;~gh;Lgo-SSzVe~^sXNnY2>jXO_HMuY2^;Tpf zC1?Q@79HDy|vYHY)qz|oP8~YYi_$d+tS$0 zhY$G1Omc3vFMxm^N_%Rou;-pmmmb@YLFvOCMM?cO5`PB_FPy z%Y2dF0Rq>OOe<2983IkoxoBOX1=ki(_MJzRf-x`cm%j1xG)0ZN0z-CBP*nNPyGQY-uFQV4Y4UwST6wE0(2MM>i zt>X<++>nWmJ#8ponZg_{#6YLj4b)l2ZBEd7;{(0^B1YKCS3qJcPGU)JW3?78lDV6j zGoL`HmxvY|l|Yl{?#PBMP39DZ&VX03Nmu2Tudv>Lwe@m@WIsZ(ch}nL{4MBKNMOeE ztALP!InLi06q&!}Hi4*`M7i9=07*7ZJZN~oW5KA5deIoJzL17{uYe;q1~wj)BRfYC z?=B@F*3m~11rmwDT+rEFlA+5l^ne$D4Y*FFr8Q+P-Wt~OT&%1d=(zSSnUSzd?V=-1 zP-v0vRaeqe@5Xr2@D=$Wt98y}qF9=DFbmfcD9M9JL!82Za(<(?+DA< zgjy@3$Ar><$@bN%Jytj?uTA6u2k_nap_w&5X-xU61*-FNwBiLg0z**c9P{AXoU@S zI)8%wDPa5e>7#;IMIvr8<;EPHiavP$G`PUB%Jcnw1mD-;pajtT=hTb;0-yp(A{3Jz zKAz~SkQv-pvXXM3#qKhgg+(|xTZ=zN4t`Y>{p)C7$q7^>hjHaspm=NxNonUjz@QI0 z$Ng&kzxtG4CIZ*X1A}-Z$#}>OpjN%2vEoZ z`pz^hfcY6o5)f82DCXobR?Fr;oopO1SDh!ZvX{Ak`I>~FSm%59@efi&=RZaopNV~} zB@NNV`C>T579sK5X;0@F2-iSH+&uL(0I(>x3Ct#M| z{m=cM@0Xma?Vm$GzVbu<0)+-nt@gmJMb4wmkL4d2=RB)LFtb z>Av&+XUHMDV(L?fRFOTzd4s(E}ty0O1WXmsGu^ z^MP)T@e{6GM4?3W@C6wE^wbw(l9j7Ngq02%xD0HSDQd*TWtW~}odBagtoB16%>luV z(Zp7iRtpy>ya(XakVaU~^IYTSVGy&f@I`&~BeArt)Dd_QIs7KeH(Yz!pd<+Gs)^T& ziNKQ}|1xXu?-Qqq4;5oA74IeuqDEctfhw?wDx`*fq%vI5&mA@hij=T6flSFk5w3?d@cINF1lVbM?t2cw?$*UJLO^*h^hWYV`ESsPt|Q(FD}_R;3S@`z8$ zeQTr=RTNf9g&%1BUdm*v@2WRO@8kyK-&->KJw9gPiE*WgS9%BB9|o*4Y#>%9m@zdi zT%kW&z1V|D+y7Hgb14@w5w*W4$}ks^M(lS9^YvgdS}rnmEnLZg#xG>db|*K*R;hYz zeX7YDwytvX^M2alEERAmkE9^ju&AoA>dB?wRi)m@?qW!Nk{-~IDmT~5QXn2XalwD> zr&S0Orjw6?t)t7T^8L^)Z>st?4-t%WjWAg556s}seG4JwV`(3VW#rx7>@4ywf02x@ zFT9N={V+%Q1QP;h(R@m&CzLFNj5W5*-1IWjZoj0yXjIB<`aHXQtKzR}4066N66_{LZz@qPNpzxr}HmGmmS2 zj0rI?$MuxMcmGCoG0+dE8C$rH1YKYlD=&VM?m)~e-cRoP_#r*mh4o~+*>ejc|Iqex zE&Izt)=Aa>`#C-M&JxjQz0c8Lt5#B^i$nVH+u0I@R!duYS$Ga(>*FK z5s3pWs?G2ryst_lpNNdgj6hWAy(Qf#@G$v?L4+}3%AajqU-F<0W}LVLoYg^^7n50- zt=@)IcbkaP*13NnImCl{i=?N=9MzV3R%`UFc#XN#^B;lrOx}*tm705dbFI|4T@Hr;&z90ri^{1=B(W4t;^@2Umu2*f(m%(HlXjI={vZ^iuwPp#N(uKY6C`P=hFU*8dxfHMUZIaeluWK(|;C z5HtJJ8L{&A!!fqrv5-<_C_hHs^9c1fqJkrzkO42v63KS9sD|M1dkR)g=0A>(^DKsL zumy_yaaQ#llNJ~YEzF-)<#HYqsPAZ7n8sIrS4~PPN;&DitNp+U`_bNB^`T!_zZBF; z9le+_bmQObcukP^+E(Iswg{%=U`{cI{Sj;5!A=Puy0ZMv7+k`mL98_67iZXBIwe$o znPINDunX^v8M59u{37uh1LHF z$ls0GcIXhY5qOv3%K@p4{<3dPz!B{`rT2~0;xz{T`_a|`vmYIg&EYpN?#4`y(w z^-k~pwS%qUpP}bkEEuI4rg<97fc+s#n!=T@<6jSXo@GQggq$ZJ1`4CX%plpKJ5+T>G5({=;yfKeMKl@$g;) z{(=(MJoEkM*QM~Sn!yhHZ03lF?7e?30c-&7)9ZJ52Wteg9KLZ^Z9IaY>wU!xbB9~5 zYy3{m$}#;?)tZ18&g|Xm3p|dMw0mm)>-9KozWI#nDjl2q>o=P{GTm|_$2QTZwyTL=SnXUd$T%N2Sk{iS#{; zWW$Nu*rLj4zv}LH_qxzG&Vjp{qtt11t%XM=hOB!MwZ{CHBmZCp_X#7OYtt_265m@h zPm~f7Nq4N7Q}^~$7!pf3X<*hp-?;#~&~?Ec!o;p44_$?&Y!#t-Srq3a1X`3&L_kGn znBd#1xkt}-B`C(~p@M6?J3ETL-#B_n+A=2X&if8v35S2`iySxmhs#;4YsCEK#pRoF zSC+~UeU}@|n7W>Qmt%jTYwZoDcuRi%hu9si&K2WCj?HJoQqePSJpa5)mob;rNsLBe z>Sk`(?d{SWUvMGkGKdWQhNVj%DTZ3i_dMt`t_96F`iX2Sey_TM_EC~YpaSW|UAfUq zwzBI|er=W5=V8TvUlnvu$L1p?m%C3qCx*2)yqZjfNDRVz?T6``b2fe7A#vXHr+*ws z5;BT|dOVgE$UH`azd#i5PBJae5@U!OVj}P4zI{~Tn?QT-dv{g8L_6RtmZP1m(f1!T z{^eAz#1SH>=Sr_+yw@n-b>*YVbE4J5vlYXslKWxUzxjPhQttBCKM)DzLTF3QeG0Si z;_}BcvLzV+p)~w+uyW^uv-M5|e@~AVL%i<=i9uxQ8GG$=PnLZ8{FhIsk4#J)A(P3) zYEL5(Orj+0)_x7U4js;j44>zDiS7?|o4AD=-{0H+$EZqP`G;Y^X&^9Xb=4WoT1~)T5g1_!@^z9OfUo!jCFQt!g zI2Ge*JK}$k`Xuw0DDu!M>CRyJd+GrM%TR4Cx$vj;Ke;5jU9=Bkx-IeC^9}V`;=QZqTw@Kz#h)&~XXAq8+F#3pIOyU?f~l*@Ex@Cgt_1VZ}}Q5c6a`Sd*SzQ?p^8)xxAR) z{|4Jt?tVd%b@DT|9y&B5i3`V!{xv8_y)i;y6)_G6D`M@hNKUQUN+zXIbw=`6PuAG3Xy!ISM_T1aC<)tz3S)szC)Y- zpK$)IRC$o=wdyew{and)mK{?%;9k^J3evNYe|NGHt1fGC$kcZ12{rqgfLlls@M9r= zn&SV4NU1k5t@Wo$2I@KplW!GeF+OW|wmiBYf9fiF`N>RWm{dL>Gzkb6T+H$q|*GB{3r~4)<#&*6eV5O4}wvB2( zz+efUYkOPIS}>2m7_Tyk_RuTe<9eQaEVOP2gbL`yAjR0z^7qz^MxaL(5re94p7t&O zfFB+xhj076Wa+~l3;z1wHXS?izy50Sw~}P{CQ-#g$FUO+_Z?F3|I}U%wSfVTyJr_d zPvyMSlhd}0K-VkVN&~AB&36B;dJo%bR&ejux69AahsQVE{Sbbw;lW#~pxakjttL2C)dw%sf_V+Yc>X$>%R{ap4OY2n=pr@+jc57M!^GHx{Ots&LP{#GY zlhP%I4|%G~aywhkj#^|?$(?z-hPl09)@a1&Ge>9T|Nnj}Rapj;Ba&Ku{vSS2Kg@YE3yCJPk}U}v6wH&Q@!?AG^SCM(9i zU?|al?^$t*&8+edOq5=EQKX*lx~t2 zf94G3vG^oHyjUZy9*K|fPNUCuV7tz>FaiWDh>qKvLVS0!U zYrmy^s;+=P`s=sJiE0Y|l7Q`SJ^w%9b=%X3fw@MSab2eL;eHy|1&}msO=e0HBLcl` zyG}b2xC1T6nGS_;s5kw}J}#kS=te8Czt_OEf7x_@Y*oJHQ7`-Z<&@PnAKX&keEVVN z-wPh!9J^AJH`KhKHGZi|7CKs#2|vlWcgT?9p=5G6>#khr%0p(W`TZ)fwB5ZtWkZU+CWqv0JY?hb{N3 zCX+=g&&^wo`0!j@#&>I(t+fP#5yF7E1ePNGWhnvXdvT7_HVnjS1|Ry{+B2p6oX;V7 zSN_O5p04>`dj9yd&gu6L!+)3FZ)YX^?vWrM+JW=rbMgMwaGShR=uiK14d0Z-kn=ij zo!q8|kk(w+lp2HjzWvy56Zku-D;b752&+r=+fV<3Fg>e8-#kOm*@Or?)yZfn8M^Fd z8|+b=WbDb-REsnBq+9PBS19EYh%+A<$qiY5M8pXwO^&=(%xk}J64jG5UCdvop*?aF6kh~4?FzWdY5eu6ls*#e%BmNtaAkkE z3fQAN!AxBuaR%F**acBy6z5p^bI!F=x`;6b(!kK)$q|=a5S4!{3_%#c@Qm%T^HC?hkDQOS0Uglx)+LT1P&dmqOplD*gA93!$h$XtZyhCiZ!S94_t5g_G_3(o#_`SQrz*z4QR zMyOZZ(EdbcQ8CF|_0(ln)VUi7p_LJyBR&(hy`9w$sr>MZi`-i()} zNA8*&QwyAp)b1>DT|Y~8U^}|d`5>XS)n{uUoUd7F*?8jU$*+`ys)~61DvWXX1!5+PN~eI7NLKzJ!eiRZfAzo>F=- z;r_nYFALA1x0VE6uRG;_4C%2Y(}#omH2+n|bG=;U1tkbWa@a70prfKg9Y>jt`dB)O zZ+$B719-ypCAhdk#?n{nYeJU@7y}qF31{Hl&t;=*#1F?;P+vF|Ph>lrq{bUnEc1%! zx$G7#I*D8)$=(<)mm*@?%qA+^ZmqtcuBm=Bl^mrhovTB?FS@PuqE#AVaq4Pev}6Qy zBA!~%i5fO{QDw03jA%|LoZU3K$yTl#M(Z+k3fI?8U+NTBAh?D4L`Cxzntx^%KnDWJ ztpaq10o6K9yln00_nbM-&1jS2cx#GwCrZ6=tsYAR=KxXy(QjLQWN+Se!ZW2umM?Z-bzchVP7KXj)w3T&|{oBDG zF-s-qaixqb{W0H=T}w~bPx)%kAJq`P)V=!5`SUT0RSMbU5`S?=-4Mr|%Y zDeZL}wP*;Fj0Ll9A@d94RU-?Nmr4l6{OOzvh0PP8x|0;4eU&QbXKv?a@q`l=k%kbz zDFjI4^-nz4FS0&O)V>XM3+P(+qwFZuJty+=Oy^XxFI)qZ5{L2Yqi*-4~+0s&SU*No##aTtOqS+G)y(wC5o{%-t=rW4G99Po? zi&nb~9YA(4wIYLHDV^}Q8O7EeaCYmrvZlIB%{CytXhw-}^lby691ndW`Hch*k{d>} zbOcSFsPZAn8Jxb--%cNhqgtido6t`Otyq>TilW^|3-)&0Mmj9EI`(kf)>!L^5x2Io z%(kf$m@G`q;EF$n5_qllSpPW1QGJ-c>Fo@9PzWOK;X-Ef8L9qknJ_&gYyK=b+DoPw z2C8?;VMlJ5(ov!*kOZ4<(71qLm;#93ynor4#l7j`(MMWbYO1Xvv%2t0Y z#SG=i-1Kv6o=Xbab|Ez)P5(KF=H3iuIm(L0IaXa;+n*lJ(T@6jfYAmeiQ}7(+q0S- zGsK+~IYf-}x<)q&Ri-9rJ73JU`zQk-F6jtT1Mc;WX;SN`zBm(da$;!krAML zQS!kA**8|n2O&G@4w=0*BuLwfk~RjInnhO`qT9#Uns|rIU3Q_ms>g`*d)X_2@Uf6} z${OJ{Y}lISr{24A57J_qR@&z+y8GCM=nhH^#gO8lhLD#0H*yl1ifiMLnLqsP>BrP<_t{5stDd1q< zl^K>h|GAvZCONBppigY9l1>aFm7kUK%QjvEg;r3d1|7C)@e5gHlt)^)p9}|4N;LL) z%dMN(a6o?z>bA-qH|o%d5cjpb19xtM=jSgwv9ht%k^SZ1mx*pGLavYf$l6<_6i+Q5 zSYTG9dcWS*TB9j3xtkKEY50H&gecW8OAjdIhQ__uU`HMt;#ut;*q7sVugJGgEu}U;~d=!%t z=9MhxC&M8HN|i*ER4f91DkJ@mq>T-m0?$k=SucH$EOP1G9T%*O_afIqT&uECB9gc> z_o>l}v#Pjj*1iOLAz|F-ZcsuYD9sp_KvUF|24B%yH-f(0c0SWoQj<~%SCS_!XO_wC zk835G7K=Lq^auCZ5{@H@!*@F-Zs@*x5_~7e*%A}nufrQP4Oi+!WPnI<;V+RQFOAXq zGwo2K2btD!MkH!CdBaTc)wxlJ)la!IRRSZkd6X@tN4Qi08+4Gs#K=-#b&(~lkmYw~ zKh}D2<&onM*LgPS0%!_s=ANr-)eIW_mi3gR848XqbNTB21wlZ*C60|SV#bTMSVMAK zuMUoDZhjTs*HecWRjW7VB|i4C#3tI=JaD&uqU{NIP?AzyTDpJ1{~WS%S&ry2{F6KS z5)y7>*6m$cSvgmEnLZ6%k^dNHP$22gQO0nBvRRl`tQZtRE1XpuS@5ql3X*%;Nig{M z`)Bi(m*`W6>hHGJwkT!PFWl@qYQ%Ixg%Jim+|4gD?(HHL|5yn~wwB+Jo)I$6y5 z@FPI9x(Ps>w8tKx225cs|5`f!zSG6b>B%RpOYfm;{j^{ma*})rWgzK`L44Q};|ehL zuVMN-1fD|G$4;5l&V=wdKh)584d9<&sYks`d_^zi@Qfx9ze>>A4bJF=#9mJwhd*K6>&U&xlaEMi?n?<{0DhjZ0Zt?E(hwYzZSVfZg0v0zdoq-XC8aqy5~<+znjl|txq0gNs>`##z9XWnVZQLGPDKH%-yMLhW$D|c1 z-Nh?k(^0udEAOKTy?e!OM=;h0>NWCeH1KNn99aRogze;T_v#Pe4D5TuYz;tvH*3Wp zOLS1@O}~Ys=eyukdLyrIatap>&RVU#M;tXu2e=}xemL?eVsFm{q#;VkdEb6GloCWM z%QwG}%6Nf&wJkvd&So&y`M?zO${mlA(ILtnkQIaANv(1_UiU|qv@+_~s>{j5l(8rk zHsPJbYg=7g)81%3Cxee?>3!(u>R$UPn>2vrS~8)rvQRag>_wd)0;7Xc3-cPQnU>xG zu~PJa91xos7t%Psp;2=d%Ej`Fzn?pna5HCwrK_NhM`GZ-J^%Ss8i&utP%+0HtwtA*s~xSsHIM*hy?%;HdP$6sqbK=lG2Ib=d9aaqgL>FE zzaxI$tby{)$#AFO;~j7R{Fk4mdxBD_^CUZ9-jIb2a9 z(ne2~ET#+*vuKJeUTbn4OkSgdHeyt67^h_JE8cGtHBu`S=BbKWfPjhX3wz(%9%Xh@9JiSg z`A#Um(VF_$L!h_1`xEea=cBd2Z!6J|YRJ_$@f_#}+ngc|d%(Vhz+x8kyh@ffwo- zqzP?&h0Q6?QR&0-bN)YCoVT-r?u)g`=4#`S|U1hV=6t7G3 z>frv}m1w*DNo3W8mAwvp-O<4QF-oT9?uQM(G>`T2kVDGIIYa6 z7oTwrW0wnzefgo1bhV@yeRfr+%Rg&*C@rcFoB)he!KxT46ape=N&w0q1p=vnJCDJ~ zG?s5J9CxMajnQ`UWcfKfd(Bl7-&vmSSW;>*^HpWmXO%^S;#+sW7HnmP?^SD_qlY%G z=f}=3Qs4sQvo3;w@+LFFX|CUnubf_%?FdIIJ>f0;0W#hERY`a+GSJsl3MG=tRUZyb zAXFS}dUn>hA2b&=!C9{Q8(G=l#I`Kxsbg7wlkQ5oZeF30S*wDZIg&$-{i$ywjAiRk zS7oYrYeH>4$Wt0fcPqSuSHMEoi*45PI4N8FXkDTxT%;lSRj_a8f3D5%uWMt~gATsD zKX|n49{fvqrk@1dx*%G8FhN11p3RZ2!S(~6zV?Xx^RclI?6t*)%~E4-PXGq6E!f>w zVtl)GYr$maiz+e?xxQ1aX6zzLT0#bA+Sy28CpYkJnX?;C@}geuYe(<8wtg8H?IUUW z@}oy_r*FU7%k${S&~Io{>Eq0|knx7nhjbe(h3iSw_Ckb^s{CPc080#PCqkJ+4M+bm zuV!w?XjAf&&*&;v-yG}TRx_xY1u%WJ5RT^PNw%e;xtSR3mqlYo4EkI_)M64B>BbJj z!p0r2WQ6_Bht-|zhC-i=q;Gw#dY`3Qn|Ug36Y*VSHC9KmQy%PBJ`tCO=e0U<8|0&7 z7k@*Xz{$tL)Nm%{u7vwY&u0VjWgDrg$enj^#{}b@iyg?oF68Sn8jS$TT$Og<8;|q`VC` zW;QQJz{5}H4C!OE{~2@H@Yu-9qOMyT=!0jmKY(MBYo@f-ms7Z&RNbW4!=pEM;Bla=?Uu?&V{kr7e^utE1HC#oFQL@HnD@USO zFz~!W67r^0Kg43W4AHGjPpSpSS9@BK`5srtU+Ci!{hcF{qDDr&r(T^2RMXWRu^IJF zI*M2P`QLgtlEOciRaRQR#Lx($_-_5P-&K1u@v-fPFf&3v_)25`*Tha_2ous=2W|>c zb#1yzc&@%p0mzdCdb`O#Rt`PR1Rab(aNYw-PQNafseTuBrm;;BDmV z2oUW^9)YEGyU>Q-G-!Ik!iYXmpNsL&e>;R+*K|!DbD1(ip|*okoZqgO<+WA$eGH#! z1>pqZ_#%Fq_pZl1-^VFA0OH@`={^8f`vn}v zunD_xX86c!O}F^;k#-H&03)c;lrlMQba4SI>}wK>I3d5wgM-DqhJKY%zV+T_`40K@ z-$F|ynSTeF=*yP|k94?;PVggWz3*7kbx2nH)rWZHJtPQ^248bPk*y-}wSm z5Uqd%F}V`YX0x>VL)Ae6P8=unO!x@1Qs-3HUS*@~+$*7vd6>4>!O703#VM94=uWiC z)QD70|Mm%VJpKfPM)pRs_P@)E;8{Y-!h_N7=0L6Vt5{C%v{dbo6pYoae_XpK7oC#F zTKP0BbK`a)Pm(WNpAl4cOY?ZXw%rX~=j*7Y&6fkVmVnR&0xVI!Cth{Fu0#&JxX)VG z#-&=fNl|px$W#}_GqX{SzC;0*#Wlr@>R)dcApdT>zZn6~l^|urTtA2=XB*uTNZrVP z1BjJtNS?(iLVQj#WhAZVhuP9{qR0H8WpVj~427&`8*ln{AVZoQ*hXc`d@gQi>X-1? zpO} z4hg5}acz@_)pjzXzPPIC4{fdvXpOD>0RKg1-lTJ>v*rX`uYa4eC@`roC$I3Gt*hMN zT0qk??QWKyleETDuOqXHe=tJpRf+@mB7g6S7-HLOKTe)NkD7fj;)vLmf@!AL!_c9J{Ru$T_wZ zm&C!U7V9%5Rq5z!p$4OpC%OKwTGY*z)hgLzi7S(UcgL!53%_9% zj#@zBD2WC58{aeOS#R3i8(fTEvKeIltk#$BXVhOmwxY@Pn&<32Qhms^H+wqW zaG7o4M>|6)jXOIU4Lm?~8)+D6>lAEZW<-#lM5O&@v(buE=^>bi5!I#E+^O@M9YE!_)b-0Jh43!~X}8F!%={QSB_lcPb+)fCqQ0;uUDS4ZU3S*W z`63)YHSXn}RHF;r<%H^_>AA4yg8-m_YF&!foeKFYfk4PWEs*y!Dv`UO8#poJ<^DevgQGlWj)o=f@)mnvT<>H1iFCJ~}4Q z+teicmlluNK{W4;q&@1W^kqMGxFmTR*T&s%>+PCdVHsayZY@&ZgNSI+MDPM&45xC= z)>CiYTm|@-C1_4&rR&TtU(nw@D_hr?lsC9u#=v4EIOy`6QVu3}V}Q41U69TnUV+|l z&%y{*eKSKe^)5D-uLs0F_eO5LUREP)MQ@2{*$uC?Rc_gKb;N^~-p*!y>ly3ji&E3vGj(Mr>zW zfC!it6tn(jRU<)lGSJxR#Ul`uOjS6{xeHERw4MsJn7gM@73UtbGW2~FGrqr5GUgD4bECG0J)@IHA0sDwueu{0cyyzD@oEo$*J)T|dC zoPPreN880@X-DO|h}L6tvJ#`@2M5R=KFrt|juX{+*7)7;2@V+gi(h1}8J>GS5dXr! zUC2^k{rDEl?;2Q;Pci-_=z|;bgZ#4k!tDuWh#MCA6*R_g?z2SJ6Tb!Rd?}7>GQ#>} zHtRrfL&{rsx4yY}ggder{<-S*xT_&laKTRHO7>9k2tM2;1yd3JT*RtAyK5DQZuY!g z%Y=)sj4UgviAjK3jC<@0aaC(e=EZ$#Txa!%$EQO6j1-;?-N+|PAaucgts#`-7c9Rt zdo#TTJe`qn3whKfET45fywjF%{#-IN7C!d!tW~`KEhcC~!f9H;vUt|wB_~oM>Du{y z&|GQ?U-$a^r}AKMb7DK&76~+g>DY#1_=28+gLz4>o(lE|XG&kX!d8A|wja*gLSlK@ zUs@nV;02~Fst51Y`di|(=H<5}O3%vk|DNsuG8fnivZ?*pCXf^X&rH5W$3Zr=`2=2q ze8cdHS*UG%l{_@X7@^8XkLFiJ*USgn4}7#s756G^q6qRC(D7%eXC}m51s1K3nPch7 z1Qc;Bhm!!vU($=UUk*g3&unWlv-9si;058}RQ~SeH%S({K22S=eT5Y;(?H{$`6qIm zb8G2H&_VWcyejNFdaeq&_d=r4s;L%-OAcy%-#I-rYu4QCB#w(9QM4ljJU(&#WR@b0U`Jg@Y{ z>&9T}h6k=<^6=#0*kp=}I()5ZMo)e+e{9uD#mAkE?Ep3wXL)f==bO?e0H(Uo7+a3} z0B*O3gVx#Fh>H9b*$y(hQ9Qmb{_{TxiD{tqk33;ZoQnOtd%=1J=8uzm^?EJ?oH%-3 z&yeRmx#%tr_k1AysjG24B)whB?N)`-pi!XFV9Bje`q4Ja>huPf2fP%VPvZ+&<>yES4_a zRyu+=`1ltCikn5S;Lf4oe)k#{bd&+X@g}W+eRzr0qEySq@D>{mbJ9FFCRp_yVhRw} z3HyPzYq&SSv-ASEaL=#)qa&BB?Vk<&w@F;`S>^!zeB}? zp%kzHgsjD1GyZ^JLFmVN6i5}r8YA>qYQJM8SK$u8HCMvLg_yk=>HFU+Km6;7&2Y%f z>L#+`YUXU0&%pNP@2oEv*n~2AnuDP63xPlgn#eyN@4?W)1xitaId?i7$ZIv84oXpz zwM#>|y~e8hadD6gApFX&2t`i+UQvQzBA1t2Yy!NP>e#LbD?=J+<^&rU#gPNv{Uz14B%}-`c+ElIaujp0n&8RS06%0fTQNzQ-e_AgZ#(vcm2I(F5x<=OW&HFAcETF0%1NnQ$?9DhFrSOv^ z@$fuO@8IB&&3ua8!x$9;ie4=DMeZWa^qZ8UOTfjpp?p?u=IB{TS%0hX)i{19TPzNr zCa)7|u1$RfXp(**_gG*ZFIC>JJ6*=-!as~Qfm3YU!p;(<>o_uG^1fOU6YILEYK=$? z?-7%rHUXAV!1Q&M3W(i06uqz)GK~Yhb?dZI&|iHg(%-_$*3;++!q;M|98`$@QPBvP zr{oIvn$%Rsmkz+*?Efy9rja<)eaFf-M}NN^AS4dwZ7D(Gb+YAKA`q1mw%nf-)hd+i z=X)*o-fS&@TrrW3_Z4^sgcsS*c4)V7*X|(X_yKaR$djxu3xaT1N5P=JYO9yTx_@J4 z^jL#-*TQX!@X4Ke%S7@ufS527h{IcYUw`TA#asR_g85uJ?{YHS0NmqbvJU8P>{%X} zjLVF+-SuiuNNByUnq!pcPMNOCF+vfX;9uADt>MNJ1l9_ zR_zp1=vZp(DELQ}c!u_=;`stj`ez*d!{A+~SzCe?ggvS@teDE7Q9X8s&7^sc&U#|j zX>Q(q`Tuz6zXk_ITAY>(`~t>Xv_gvLR){t*LYhQ#D~e#h_yM~;THO{c*t)$o*IEh) zPuabqi+EW6kMLi&)sIjNbL>7*nMOe7)K@CH8%besJ;Rn7h~&V{<|G(=^eY?aar?aQ zG}Z_qaemtNk0Q;=<$JWQ8rZ@)LzfLgtnyR%2L~sh{E79@NqlDV2!7UspPz5D@hxxwlW_=mKC>J5+ES*8w|xM^M$XWYlE>kd{vnqW`v6%$reHslW`StAptLE=q2~lPCavu$P@86?;~D{acQk~i#@)LZ zPg1L292Fp%SilA#I$@Z{Y{XPqt-8Y`IT9&Tw5G*;66nD9+s2?FD`% z`c&2(@0L(NrEngdeS0Y8p33(i&o`rN`0ILi2CZhSxbfoqye#(j7dQJ{Irirk_X{bW zZ06}&?5n*_u=z~u)H}E~&B%R`&Stmf>71Ec+o6)n=eR}2R?dh7E>qn;pbWQ6LY;$Y7Iv!?-1_*NCOnNIOyx@4cI zNpF3-GOC;4YCgi-d;~%EhVO;LAPZX-yzS`p{a;orM#x^w2B@;_?QBBPtE70JsQ(?6 zQU4G{O+a4PI^y_MH2tx+(ArR}z4w;7MVST>-*_7Ka7XTsApCX6eQ}}3zqP0hZ`$n7 z-v$Tc7opU@roNEHih7Y8FDl=jk;)xJ>%INm_MTi&hGF(A2b}a*Jm+4LU_+6^SuTKa z{*+Bg$9a-#k#}5WW@gU&t)~NZyG?2Q@Kbiw>RIluigz&>_C5I4lq5dNA1A{g6$Fv54xdBV(#yF&@;-^bQKeLl&=8GD|I#9g zmN{PMd9z?^^ZtBQr26i6sg{7<;eC$%9f)X&T8`@!c?;#pj@*i%YhI_*s^5v0BDiiP z+r{q0 zM%SMJHIcQ=vple}f&a4v?_?pomob^veF6ACU8buyQr+=m8J$mVTkp5ISWhm4NfBap zAgSU32_2A){=p#t(T_FI59!hL54mb{NX+igyojQBxTMi_Vy1Zykiq8fs^Z-jfJEwt zMCi}LWYnjPm#6TlElmZI%=e@$A+yQ@^=#l<)yn=w`lJA9{FW~_Ec!XKFan-W1wmD1 zJ`-=dQM^8?hd=dZ?my|$-$uj~P>YHt6VTVFdRzFvX#7|Ne*3J(`yx-eASP332~!3I z8P{({J0Yt2!Aq}fKNvAaTl3<2@P&S-w5;@+1AW+^V{f8oni|RnR>$KgynOnIg*Rz{ z9iu>JZPO7V%+6+UonA|W3|)xv3t{~b4~3|O-k4jvcI7+t(05fl9-K_CtE~pN%qRUl zhA~ieY8XtZw^SO|^_=s_!hYQh0UsW!RViiBR<930a50rgv>xfLokt- zpI(99cNojaDrjI|tZ~vx)_@__nKhr~2*{$cGnrluwj+@85SuOHYZ(9s{G}WAUA4RtNvjL;n?zL`VcRecv9(zf6Kp@H~ zXyJR)iso0b1^(u5ND*g1Qu`$10=7)kG6{l?U~ef!aT9!l4HBeGJ0}A%``D>)aQzu(uT`+ z-9vDI4ov9$_~NHf$JwEccY-;NmtH>n$e%#k(38i;y+Sr7T12;s)7MZj93xiyIcY#* zJ7;0B$-OeZ8POLpnKE=NY*|Lo1xVxJRVv*zB-O$$Spgc^>dLtDf@>yy&Nn_~qKeE_ z&80rn_N02DtDdnbRg%}iM4qO76|FqAARJ0(q=4+pwz9lGvQg6Yrcl$hSt*5bRwQ~6 z3-Ta6(y6wu~pp8TY$?Z7I{PEDxH^iEP zsw0#|74qU+E)}ylIPBl1`8Oh^*9X$l46%9G!lIOaJwpAQ&ksE#y>VtCYeQJFJa3Ql zr6^43o)xi~DFT;y9FvRJY`zh}m))U>``Azj=veYQ#%KD3*7 z!bJ*O(S(~jc-uRTKb$l3V?OWSuu@={2!+6wceU)E@JZ;LIC_2r@Fu@Rtix@uyGcl1 z9BmetUxa?ww&LAn?QuT{ON%laxhPZ*+QlD&eOK+0TL4^I1l`hURYW*EmZxgDp}rSe z^sq~b8fF83c3Ux(9Ff#GHSru!0-d0pg<|l_% zB}~jxfIpT;b#Lx=aZ}g0W$pZK{(#++`$UXA$vh%AA7S0C48i0rkh4xLPKY|*jL{QW zPay+R>o(QvBJ<;xcZMH%q?+5Mna@|WlU zmfbI}QR=cW0dTtd`zyM-We<>=y`Rl4DwaVwU)7EFpOaY?tqRg z$#KoG->Fu3uiL*iH7P~o7_b%d)?Ppqiujhy!x0zA<-;4Dsmc*o!)ih1}L9Mv6qi7m64NF#z1Ar$7`{o#&|*Fem4@D8tye4Y;aKxEvF|i=yOh5_cfc1;((B{|!~9 zilSVpsZ>;9l?@WwWL8rKKtrfHndir%X7V(%*FRWA-&mIKiT)ZAInv)(1ExC}#zdQ} z6>}Ob3a8PFp3rnu;z7bSTt7B<7?1koZ=)_^k3eXPYl152-@ZdnEPRPnr4OHDKnHTa zc|})83Uw0MD6ySb$dH`JdR%rw<4nk&2{g3gIwiS(pmqLQB8w*97|uNJ@GaJ9Xm?Ln z__O97}skf-;>d!J=x-jlqeg7-e&DjNJm=P7p6U&zG6eA?Wu<>`vem@mpZ*`BAMRMw zMz^iggfy)b#1%5=Cqkoagl@co4zgMc<#y`4N-|Z{cb)UWb|PJq&wf|~&e31ch8|@J zU8@t*f^^EF;C?(AheTX!A*=QRSuC;5g}#ZyQg1MkrZQy8138%ZiBS zo5E+6a2_avJ^)jn^PY0pzTSrJ1lr4CbE_D59OPmN?oPFWo&eAlr9J+i)mwWTzo-QW z>UKS(YfpK+V}BYu!3zCC=^w~`cZ@5EMtwptA`^m3MM$k|rUS4(h9P*QIO?Pl{B^-U zGsgneWM~z&U>YZWMbQ)$kcN|S`7B)n3)Bt5uPg=Ok|o9MGr%4mf_@L*IU~8qIL^P3 z8C0iDFs)A+`h7KQ$I;9!r!AiZ9gBsY@w&`FXjLZ`GRYcc~YYhMY(!P zIzQA3<-c>*xa6FcQ2P(9o}7R zRKWIY#TKUnYws4<$CR7x3{uyx+v{8y$_8CYY{t&XXE`;JWskEzlN^}%8_6sK>$^Q^ zb*?)*@_SKb+}Bg~Gnjgpfs;My@8^~X-9pS-tqcqYNHWb#-DnqK>#vIt+UQ*A@{ff( ztrr|xD$?TyGx}KKeTq@3=P%i4&zsSXb(gcjoeN*Xps4F$I}g!HJ>F=}XNnre(5pkt zxvXbQVCdzLL6o#L@#nsx1TZvjP6uphvt{jWP5xHl#!!~mW#3+aq{Zq8hEWxq!KUmV zLRZZSG#fm&D3f%s?EVuJf){01@397Xg&CT$P!FY$2n{dH?^xiKCaqwfNFU+| zAN9cklwOy#z=C|38CDEd_6|roo7MEou{t1t3pT!Zw6i76WUpX&xyTHT%y&YXm$ zb!zAO7FBj(Xi?X&pN~sSIA*MkwRvj4*qW&ZekpxT4%MbMYC6e8Wo7fEt$dxesK}FC z=kk`_yDl*4t0}J_67uk-f(R#$mM31I;Q*%;(8prK?aKuT73vc=Z`g~pzhF#&5}>yY zD2ZV=JIGNs;q@8w>eRnfoktTcTu;k1yKqP4o}*ls)1}UJzbF;c9M{!XvcQ0u@Y_dM zQGl$TW?G!mLt&!IlZW&=;00^y3xr0f6QOstC?)%|U6Iys)lD6r49i+wS7SSfrFcf{ zeqt*$85*8E-mmf3k^xVE=)aFFEhFOA=v{1SOq&mkv#NvMHrR!YbNlI)rJcKGObd?T zA*5NQc|GDGlLd!FgYc28)`K(_n>?0`(%*-LJnIDi0rLdHZa!?cY>Kh;p9ZRn^* z34V<(D&U^-UAGs>5%6O0fGa4?^TSrf zLGzi3Yd*0G5_^Xh0P=%bKXF*mgi$(nQrA!XoDqF2G(%zyhVZoXpgfcS-WET)qx9(t zYZf{o-j5>-!{yBGuz++C*C#;)+{y{IB=LW7KqqLHfVoh#HP}3Vp&lq*fJ#Gx5=+#9 z8Gl#*tMQVOoa3H77`-Tzo>&2(^RANTGeN*^s+~bCXsdOYp!`%-Y#}Or;jHvMh$VXg z1~}|dMMEb}GR?T$9b5|zS`Y58IjN+tljsKC9B13ljSJ1N^jM`^YAHCFvNt|oi;UgD z>%dyNeiX#m=QCxDAB*xM_H9sbj6NxqWIbAgk3}eeKH&!NmrM1IjO2*OKe#9v3VBm> zaiR!rHjv;#a8@=3vEHmqySL=stA`yisI1Cz(v3eK?Akl>v5Utg-}dot*(O}0nIzCa;b=7ztw79Nas zxR9QS9WBVrV4an5Jnleq@o$lIT}))U(Yr0Kg9>?Knk&bu4>MkpgidTXMR_C}7s&sb z(?7AJIV~gfA>-{7^nuXeqsQQweBU{BWXDUh6m+bL{dSZ$*b`v052Se1AzOy4l5#Mu z?4q<_z!`4ng<@)YOeIHesQ_Ln2{vB-8<e8id2 zUZwD8Fwmiy96Rzt8(Mub4L20$#%8v*R8HjgndH9EBe?F#hzZK3{-dQijXw4~l6X7+ zEj*{rwt&6~yQ06S4BEdubR6%AqcHgK(%I^jGn|w4t~>JF7fE_>?s?|T^b>Z(?^iIK zS1W(>k%ySC(^cs7xKdg)LVa4oiD9*>N*gy9=25jewUF>*pwxz_AH~C- zLbiWjFXR*adUQ$x;jm@Kv}sD7vyEP4zE$T(_P;P0*>%XOy)G~ZBd`d2!XmII4Ege$ zX2q22ywYvL?71HqXYMrKC&FJ!M{!BAEGWL~ID{02xB?xh45e$|p`U@={=EvvE1ojR ztPr?qsIwD>LP?y>d9y2+=K>}pAV*4X6N&GJEcdtX4i~k6Sv~2a_~C0UKz_w}!p#=j zgC0!0O~50sK6zvF8xs{>SEcCFG8i2J`5!Z^wEPRf*aZb*p`=Vi`5Gb6>o}@I@aVIL z9am_$%fF^$hqi+74lz7~2j;^3ce-}K7CF2 z-B2FWoM-FucwiT~v!M2h*lH~uH=?6xQROrEkHz7Qz>=Lj5$;+=Tj6p$oNzPx23{>L zS^mK5NHKNp#eeWTB{Zf1vDCM69J-+=sYxzh&kYTexC4*(+A$bE(wxl!?SqhaeosXR@NcG@Um!6 z29aIKqH3QVyY(o}a6>?x;|&}>{$O=dORdD8)(+Eh!m)bjy>IF*M2qE#Fw0t^#o9H6&uEehq?nmO1Hp>pfpGCfm)2BX3ZB z2mW`x;l0uNR_I2o8g>%P_m$u*ougv>;2!)?ja|R8jV4*Pz#BNM2?WxTEHeMiExVxt zHJq&-=Who_cLR*Nf20MpD%0z7Q~Hniq}JZ?RO%XglYd|3!8HhM{)6BxIYb9y&JZv~QLnWt9IJfNbRK!UlVj^T$kW96na9o%u=zV(|}6 z1itZ+Q(M85cwN6D4@W*#fRbdHZtsnfo1niJWLaxsA5`(jEV!4ZS z`?~<6h8T!&@1t=OJbeC6chnH+U+lQ}H@A0&9-oAei_sk{cH1r_SYdmHt9!YSw7_B> zR)Vhihx-gkmW9s15b}a0kfykO%m)?nfs_0w?9uH%#b4?O%i4z81PZh3N(xl8+{y*b zrsza_GIc;mbOulMtptF1T*U&Sg8e4=mN??}j~{IO^FZooe- zE@+|e#|Vf9SD94o&7u#k4~ayQ(j2!pV4BAcR6%2t&ngdwsNk{oE8aaa^xL?wSt|dB zni||VXE(9&T*H7&L4Fe}>YqrB7+%4g7>-!Jq6b7mPqX|AK4*owFXO(Jl{Nne4jFs! z35b?tbD{<9;F>2&kNw&j;4S%8ce?5i2LrA&V+MvGo+vKD*F&F_> zZlk+D+(+Ve@Y4qpv%|17I85@FOk?>((h07>N^>@R`G+XC?>&H$1;8+-q)6HT~L0|&x~KNOo_ zT+B1ZMgu&&{Z*Me;wre&G1#f}sFwiUP=LosDxYDOk8Hi|-ShQjfP>{D*NkgoIF|hY zTLEJ>{>QBVYjEezy3!TC#?B?azx&?aXvvL2DK$MwR4f8l zt7cmBhaVgYHy{QBwx`1-h#*?zA*?GfPi^lQ19acA(Y60+u+2_xCT~x;x*BAk(B{e_ zHCogmxISrY?^|K>O5&uu`?X$6iN4bB>!LS>c`FkJ9Xjo@qnm+L4kFy9fb`K}NP~%9 zJAx{N=TRf3Tve{u2e}wIhX>UsHx;+p;CK#wFzx~8DDXK7d;I`h(m(Iiqm-c&erl0l z?p`*95~hy&Lfl$Qogae1>I@ZJ?i4rB!Y_=?YX?WFELc01>LKQ8pU-ME%kYhWc6u?lSqbjic`z-GN9QL)#mywfYac$HA)PDV~nifQ(h#1i9_W3L> z&G0D$g4UvJp?HEzCYSv;OI%GH_GUJOT+afqP02WbPE@_q`I}20XegRa-#y-&YryC88gBETsb@scpm2XM3Zr#*ebD97d9^PfzX3NAOKn|yiK#Uy?&yFH+$Zn zh`k5Qr7mFS6lTXPz*m+@z>JOCWwbOK1v8vf4{QO$|8a26@V|GXMU z9UsGGHTyANPl%>2kb{XsC$F*=$3|8@Stu>%RDBVjX?Z8I%YUDNb~CgPuRbDTuduQ# z=pQg`!!R>@)Q7A3PMp?T;YwU=!_D?T!2KW4L654fUnNczJfyRlPuQq7 z3x(yTq7y?i@MUXAqU+&b_gYr^vP$>%7BZFdkd98}ew5hn!MSW&I5#d~qkfD`Uh)O1Sk~lPj=+$jlmK+& zk?x-G$EhjZY=ZMI(^dJ#Y}juKi!Kp=sLV+O6YYbxyKT`I*1wbZckCs8iOZ*v5){3m zn-hEwn+^XRd6oK}hs0DO{i5_?n+5KnurG>~s0!G$o|2VcFgP!c=qPjU{joJyqssjg zu1e6`OlVBS^qx}nnAvK17o2&g**4$##XbwPzqXLtT$ImSsA;y!jC`nSD%Lq?W{a1- zif#TJ+bI#Ffcntp+&k?G<+ZEjo35n`RO1wzaXO;+zEu1JM|9TN4$?2CUC(k(D@UlA z-b?SseUbyFn8IkR_S88H1_PulyzoN>R&Mu#yG&z(;p$Bk_@g$KObG!pUs#_T42cBl zS3hJj7DvDX;BizTXyozuqhM9dx#b=8=Uu8%Wq2)2e(^z%F~nu(r( zyBMSDqhHNgdSp4@(85W-4`y1awD3e6k8}g2-GAmAa48=tNt8*EC9}pgOQiUSu#pzw zMcFu_gv(+2kp5mUMf0oEXW!$5FMS~|`rz$KRtVLQp?%YK%!u-ZsTS8_w6n>3x@^!& z3khw8T-Lf|5nW9Pvz9B3(SJ>N!zkdM@gf7-@Y(_u_&lh|eaSXBZ6Wdh1`t`4<- zXDFq`!ndF6c?Za>@V4w|+n8rWPPrB9HR}RK&fVDA!YHf44MQ4uWq7Yk@PA5opQC0M z(h^8{`Hx2))_If_?>s`jGl$!U>;%MBCl9!`VwZf^%Pc4z zf3|Cv0n)jvob7=)BtM#pd4WoHW`AVsZ1)O#Rr_wn2F8~{EHE9y_hfGAv9*m=y3Ji% zM6_YXShie*S&p2Tqde30Uf|x{qSx3}2D^}Tbs@u;GBE$(x|o9&5Z@>`^_K7JOTTIS zZIRBoOZ3Kr*7N|27jt9-XAZp%CH$V{TjokW&wyscKMpWDQp5%aR5$HvS0!8*7K^m) z{IW=&06h`^uCaZvX$Oq*AF{9my<}l2O^aL{^1L;uw{T({XI4jAM(;Bs(LkWOL5jrZmVp zMxDbs_LdGZj&Y3Nb)@_5{(L^)$K&@8aGmRYU9amko?}s`TLxH)9K(awlwO?DS-nk6 z((m=LC8dra9az5pYc$1x)>y2?s($r?&T*JL4p9_1{3uzNLe*gTsER;oWf<&&gbA5l z`c||n`QFrc^$dk8FDuuQjSbFKT+xnAGF={k`hEQPusX`X7NJfpEbeJW7l9-FwQ+Lk zqH|n(RHA8f@{lQ^EZCRk6FZBW5KEnZEx!6|P99-6I`;SWx;#IQ9v)psQBkGwL(rzHq4JyOZjHkyX3b-!=6S=WD5kpJL1|XSERIOl_ zm&%xl+n5deU)c-4nMH|MhqVy!Ri=LM_I=u~}HSRD-yJ zAgblTVTPR0o{g(r%6oJn;`#B{;E&66hQSGIj&RS&-V)FQNRdgMD?RR5mb#VgT@4KS z56;35?57Xo$FnHQKzaQhY0jRd08v7O9J#{+Cm+RH$2>B9MG-xGa`8M69}vtE)mY;- zjXzJm684sHM^?E%zm3dBL6^cx2&ihT6EE4fO|n&8LD1n5?s!?0v@&pZ(csj`cYHw) zdu!=@my?ox(Od!c;%2*yNk^G1-+J+QD{ zoshO5d0S!bMp|&kXxjk)Lxy8U&b!sRT?Y;pTmDElxMTb<(ZD5@C{TLEfW8)6pv{t- zJJ|A`5It!J9CwadAaC+aZz2%C(e(TmppCQiH=tlJ>!g^u5V; z1X)6EC9M%xLP4HIMU>`-POrITjzuZi8PD+Lz1^?YjlLJzC50vlv7$-=uoiSTkVsbs8pzY}$XGv^Bfq^we^Q1rj1I{jeO_(~0rJ@b(yVicXhz|*` z7b%ZDOC#qQ%pYAPhOa&9n2Y!9hABSN@qyh2Vtj6wooY%kbAJ4!F#8WmQVL3|tXjtD z+Ns8M*y-?8>*;)Hh^T02KXzBnCXU!cU5{yS#Go)DD|5FbKw`E$1N5cZ)k3-v`r<0Fv z^{(M2PwIVM(=o)tbtuD6>)DB+rr`Ple`Yj=8V-G|aA_1~1pCHtJI#8GNG3v*A9Cx%^}sIp2F~#t6g>Ip1?7%Cs=lfoORI-& zv@iDG(O{{bb;qKBLJjYE&Ye_a_KOAnFyO!m*@gy^1+=9HGjmJgl04U|P-78J>*hf` zaKT1e+Ai{B>_^BHhy=ffJi8WKQA*4M&#P6!IadLxLrn{s{B>nE4Pmw>jy?}A9CAN* z`bMIn?l!s}T#Djv)r`9_)TBG@wL_mnIpOf?ixo=jB@CQBk~Xx`^&NeD$DU8IU12xy z2f0Fx1kjUhzrM6$K}lYP18sFu8>FBAZDW=7Dqg!5SD*MDw?4*v zhFo$*-5lI^K+kY=|NeYo^+Ue){LvS}6H6jpy^DyCRrvJ5mSYoRO4?l}0%WAo?J^I~ z(#;yve|z4g-y<4CWK!kf^@45UpZ=?N1J>?wr`;z*lJGFN5JW^Er3`5s{XM(FqghLra z9=GTujXxhKkEbs_=$2X9s}^{{!vpKETs;HyO8!p1bmlKd+`bko&S>zjW+EtjS&G-{ zlt~Z(`Q`=L`PI9L7X0wA3KE(*)f3+2-n%he0d~t%?$EKQLq^X8|9Yn0;b9~_I2rRO zKC%O}yxpqA$EIeL9(RPrjIBkefjb{qfwmv(m*YA%yxcVHs8!~!M3-s!ogD9JJ*718 ze77DJOG*7-l`!P5LdJ-9RlfyGDYr|&p3bI3X7WlxlbKQ0314e2zMb><@+cuG*`>>Q zz*qdezul*`t`Ms)5Q3~vUh9g9>kwPZ75n;SK^HG^ON9LcZs@R!=YxB0&Bd)$AbK2- zqhy8vCsMu^e;;{S%(f-LGBSQfcTtjzYdIS^Lg zH}Cm#MM=DG!&fN_;>T#ad1{lDP)dX9yw=1MS{tc8=zJv2ybtASvZ0*}WI7(xk{|3> zRna1~ZrkTVuE`T#zRj|X4jx-cil(^2%Aye};Q}YuW-hf{lkDPkNi>^L-0MFeQck92T009np;Nz|2<%fOy0Rg+N z{IzGe3}`ieSMWOV2d?<0!K71O`&c|_f3k|)_ zS(?iiep^#oiM$BD5uNyBd{}jEL@PX-(NC5DI&h=y-Rj`q6#dc3lZoO0_-5ghj(?rs zeiQD`r2i=@*>`5Yw#hlu%K6x%7vGpENEh@OB@Hc}*E#eL;G#aO2?+M082=N%4Z3rE z-S=ZsF%%u z;P$KQ40nrIy?U~=qSxV=Of9W?yGvatPb5JhO`th1P%tfKNfu` zg^9EFsNs(|3wK>tTSAWWH0PH5^1e5MWY_NwEQS_?e2D;x+LMHQ?jyD7Mi*Ey~Dzlo=!8k|C8yY?l5OMvFl0-;(zyU zlfnzp7Uu6Omj+630qj8@W-6uW+#{SPhD#k*FI*LBjtAvU7X5w*;FTVrLzB)GpG;Kp zTK!{O^aXT9zrDL0|J;u0{Vs9cKXn-^91}M@*LjQA%4E`lpFAy2d;a#j!aKTW@$wO*t*pHz5H!~?@PRq3 z9{(Jj*=)wIwiz03j`2!rha`#5oec(GNr#?1#eWZLjv>AMg{eFp=XavrBht@WfV>^> zHnuT*_kD^Ud(PB5o@#4w*jQd6StOLWOYMYxkVK|Occ~sS9ZfBq}WU|(tU#Q zb$4h%xt^^&hC0*ki2hsqPaXN#zTKKfHhuFymlbr$%b-6C<>!IX;>DhSs?2}#?I|O=mRk_~ z9S{U?v-me12&fAi`{{|IC~fA4QUSKmrMSamBu zJ_(P$TQYE@lx=L+v88wMe$B9trxn$#9a{yABZ$Rm&#w?pX&E_QaNw5H&(3d!)*z`X zvQm~lZsV<#Rpd^)?9?apK1fz`PZS8HUZV~`1fX=0|CfTG^~zXX(`8@g%@{O?SA4kt z`b*>j&qaYhD)}?&RsVsn#w@Def+d+DGBkMn{cTd&tLeQ0^@l zko>b+i`kQW+2{b@)WUA7d|iRy3Z6@P3p|_tfkxrDf8_dfW5(2)w{O>eOF!uB$(Sat zq|#RKVqO2^q5iAW=OEAwL82o1=m#G1{;fStUqV5?oeG~#A&I~akj+BYb(9x&D$z(A zvSf4FmBr?{^*3x2o=fi7--jAZ$W3SK$>a}v7FE|NuT07(xN~_p9DjBDwdt{^+8vP~ zBe`G=o+H^!@X4>)xH{jlK&!{ELhkw%Fo`Zvcl}oTp;&alo?rs_{ZO7~LZQT4utXl9 znz@Y1H;!-&<#~NGFZ?r_J5}$S%!OYc+fevTtK$aei@Js!Q#~HHw6b$@52h%6Y4n3- zT88bBS8&d9KFPS5&~J$aL;$Mm3k@1|m&@L9*j9jQ&J^COe-O9`tlv0_lFJeTM#t7f zeqd5k?Vta8)%FZROgbrd?f6){WwP`o(>W+%lw@U1L+H(O&E2P)ID(Hb8@dm5$)Cm* z!2iNFbE!JCCTnvI6!N{Kxa#h5ABI0Qaz29jusNW5Uj)klT<_mnfb?asg`Kv%XM~ia z;BdzHN`OF3%tnVc96;S{Sin8?*#6{J@8sYfx`X|gYYwKFx>&+=`0bk*+jZ@Sz);hf zIFdN}bv}v^wS5yD_xp%Hp$bSetVgmNN~iu{*r{Xr@1Gq=%U_Y8_XSJ$s|_1}WG8v3 zu4FK6Z?9}=bF821ZC$FCBq4=qr5>ygKG8!N`0BPmHJh#w37AnEIlv5Ar?0e#{4#FB zmJav&qnUeOO0?Y8U|`ol z8>!R!#4BXArN^ufHZ9-5|5S3C6#@SN>v{&j2Efi6a{e5eKzdQ1<`Y~3qG*k{~b<@SY88{S(mlKqFMQRjngYqCE{ zR&~2u`s&>HST^J4!e$ai*ZiB7qcTaP=&1IPYwC<*xgQ!IWB8@T^Q%Goapth$j5AkC z*jC1pa1DOsx?u|8!9KP6p&Ni$Rz%4M0@D8B{`TbMuFjPQ75gP2J3slPjNI{c^I3Zo z7E?QI3r{ewW-i*i+3q2@O&M2jf7})72T<>|;!Q&KvI(=YDkCO}=FqKlMi- zdjd1;1Ov#)E{7b`8WV;6e#lKQcp0=7Yqb+0^PWppWVr_<)} z<@-$BQ|Rq4Xv+N@pu!}KbEpyfCgR>+nlv>|Sa8OccOGl8Fw_$WCak!h^rRtGmju!{}&Vmo$9yXhMuy3J1 zp?Rfa@$D-nBF^5frtE?eseBK^Ca-Y|QgPn8@4F76L`_}JkOs7rKos4~Bk5bwQoKtf z_=_Rj<*dk&V^~ajQGsxX0Jj_~MY@e?=>m8qE6T4brMvtq5JbYD})@?g!x)P=RMLaz24SPjGN?N$+xG%3?&R&7{ zSVYz75_ULaYROFo=PlmG=C(u)?2>^{@8zz7eo?Rx%alQjc7;bwmuel}>OWO>5>(NxVzJ3|;6sse^Ni2mV5iNSLE46XF2piC@Z3z}%R2PwQ7BMwg+c!I zxxA{|84$2V+*qSgI{vR6tt-fzB}o3dKpUi+t$Eog#m&u7tRvA!uI357uXb_}G?n*r z9uNPdclFJnDreur_0^BWuLmx7;mpUMm6ZdtG%W_J&Ds7Rz85M!rM8cE=^p@+8Gp^j z%8Po6Tqa8yt!nIZ+VZd3d=_v_KTbeQHsn+%T~c9sQ9Tgmgbcg#k|9*VE?aoIC~XB% zr_OdK&`tX*g=9sB z9mnX!O)qSmKhV&>+l~>+@j90cnEqUXj~p-x#C#2lQH@rNImpnu!eyO}OCEDp|BDz< zFpj>Ya|!GYTMraMS7vF}{dDInJf1 z$Vpztr}Cbu+sB>$iIO|&gyqSduzK(s7IEPTme)8Ky-J4=;dP0@@&Xi%FF$=IBTl&P zJ(FA0h8mWZbgqyjQB@ni(WHZZ0<>-R!yJrj5q`B3T8zKodr+xz#Q6Hgi_c+zDf4yK z2-HP<3iKrNmwrx!cca&i_q&|VZe>5<5%xi`blO`7lk;FvsJh1XN8mk<@i}n06brBg z_0Hd@8s)tt#WrLnj&j)m1nO!lpeW-tI-@|iF47!N;jc?(<*K|lt=9ZVVaZ}kl23r^ z!Rc&8dylj(8Q0pToF*U>!BF%E3XdZfM6G>NhtBvM9JlymAJo_zJ7$1UtpE1(_W3?& zJZY@ESe+)!QUvU4mCkUIWz@nCQ_?1pf1RErSY4eTAa2rXGW$&EbmF z-T(ka0keTn+ ztWO?!hT5%~0{;&HFIf3Ku=2;?u$9_f?43O(^5(#;qmciaP(~G@4duR*si9mFLH~eL zLl1Mf4PJ`FpRjKB5>V`taTv4L&F$YEz>!Ij2anAb-d&o{`zfeK34e}h>Hnt`6adGY z@4J#5dWaWqyy0)Gb-w|(H+M@+BQM_?#c9x42u{gGb);ARyNeOyDfI3_kB5D5my*Zl zoTE0A+vv!!Ex{pRtwT?-f$7I)q!jSySK(mXZTddYix7}f)Zn-d5v5mTM_oh@9ai+f zHqfJLxwe5B{g#X|R+5e46O7cAH7fF5;>E05mbKs9Hxl3s5!hz1QgNfCYa`UWtHHKyiW< zz~%owI{yes(DOM!8a~cf7g?>szU&KNHE#`9_jj!5&$}{>@uH+CGHUs7J>r?}fE1(3 znDlFuJ9>#y^yDt)Q4S@c=Aq=?A2;cp0a1mDX0AATD$h{)PLg@Qe%o>ZD(9Qx>yeX~ zy^4MQI@M)uH0={@eW>TucAFE4s%zGS#iD{o+8WBOriv)? zslgLB?AxivnkL~w?#2m1bx&^-h9060Wr^3}i&^h}*mMK_FLl5SZX_PRH{LDp$7bt8 zjs_*Lbztb9$=J!Ba9dTS&d`8?5Oyp|)kqG~tXTR{M5!#|64g$*NY(g2#C-wo-TEZQ zzewwh}UBoBn3_Ab6qq2O^bTns6yuu+h_-_izXf zcx-23gM^X<5`!C|xz@Y{6k`uMG{bZNGiVYlt(bQ$!mLxUxe&M3@Fu;$m+H7{_?NHp zf9<3y=l~J?^G$m<3y#@b*enIc@J!9=lH_Ufpp(f3LZg^YKNIupDr;+dnEyZ^eu$|nw;zjeKQ}Plb^biKHu~=&9~5($O~p6wKLa%7um1;%NzmjnR?JvCsNAPQbUup&o&L-QDt4cM15!X-(QcjfxV(e~sZA+VRZx00TD z|`U(iQhFbWA+I@Xsn1Ods5q=8E@F# zz}uvYzK|`j&|YZmXY5c?B7ocWq7m}qG?phoUyAN<^@^4U+Q9c}--p|UL%bGb0oQ79 z7%#geKF_PkRf@j=34`2$tnmqFDoI%aj`1X#EQ+&jEgg$w>io5JGG7A9$C(wDQ}*#~ zC8a395dH~Or0)|?FEdT)v1|=uTKi$0&qi$DM=`YAt(Id_@f%GIUiow^gYRup^}x!I zuv9ip^(?@!*Z1GFu(D4DFM0nHXxG`pQBv;?JXYs#sGQ}}th;8}@rCuqmjW-uMWYOZt~MXaYkc_GYI_;<&f zp)}I`y&*g&O0(iF*Be%^pvfv7BxMR$l`$$7R$pQ`di@Y4{)8GkjG+%1l>_XD4k%t(wRlMsA!@5-O3AVaXis9ty7)Ewbb07;R5K`Je+ z;#X{Ds%O4R*`A!u8gr5DUnfl!9=jiqdxQHV+b9n_fewH0*lZ=*I=7aEfJjHlS?S9K zq?=_@+;obnDDhV0I=xbEc%ON~_|QO6VU>XON%HH1k3u6?j=rF*jAU?MUJ=L1TcV8= zY-|)4K@Xz-hN0J`2H0bOl3+TMw6R#@mbddD}F(ox$LvaQA2~mZjPp zv{hLq?1SeaOuI-sQ2f75nv5ECRa=Y*nhd634k}m@K`nV*@F@jz2)wQkqHl>sJ=1~xui-JolQ#+s-bi2?> zo$xw|8WBXTBZQ}5hdO<@i%`*2RFR3+!AC8Ih_S=OC-UqT6+3SjlD*SC8s-M(njF`P zzAq=$Sh{-rAcLu!`xAbwQp~Y?fivqw)Lf&|1K0JD#W++Ixu(Z+g!Hir1n}q3xZ#zI zOVY>Xgt5=CI{6g-Z@V{tOWxXvEftp!2kBtRG47~GN*8~+7837P^AO ztEwim!tOi5F<2Mh3~{QMzZ1 zkYq*3EM4Zc^oQU`i@DZUlghgp>QP)~~aQ4pQ4IFA6&nvF5(YP3wG`$2zj`14h zk2^=S_m(j9$D<<3Lo|!+Ci%Gz5?RUjot*}H%*h4_O8G9eUBFO7rlMLBLe2THZTt0u zT*7QCZX-y*slMX##UsDIj2HD@);~hY=s6Xo(${M4Qx5>;d6Dgdn~I+7^c3|cO&ya6rUMX_8$G+hZNei1-^ zjM}jo6!u4<-sd(0ekudy!;QAV53t`lw_sCjHBVw@qFGlUPJ^}v2R44KiW@FAH+EP{ zA)X6Po}c5XyNw9rRj`Pd^m}-|7tKyS?pW|RPuMAdJ!ORF;{5W(8{~T=v<6ELL`}mg zJ|wGlkwk~8)uK1|Ac$&OVS`v~ZZ{=Ev!$Wb>Z>F*kP%t%9F?DOALn~xb?(^F>!;J6 zhaKCHTUiCr@~>PkrC%p`E_DO2X4KDWCa;{@WLBeF&jGvIVDTGNg1PR{g@!@+=l0gL zj~I~>;bASfZeD&U0GJCc!nOhb6S~lji57YCS*9?mMt&@8>f`2%e~6Davq9uDq0Lwp>K>DgpTi1A0A`vwH9Ua* zF8KDHVG&N@$Fgi0QJh)g*tJ=a3tFBT)a}#z3Iyl4a%?v+4XQfM^Q2SEb@Lz>C!n@3u_Iu?{9`Buh2y zSL;-?L?m}ukJ(NZH$ue)IBo%LPwTO4%Fqg_le$||F6=V_p91EA9q#MusQb-KMf zu&lPiU~< zj>6#@$=cv&e#Ag5p4WqE@)koylpHJ+sTlB(rrrg@WmNl8sU)3b=DQg0{_y{ zx#nsc@F}t6)?efWruMWOj$+V^?F0sBFl!9!W&`{LfD zC-AEZYB_YiDGw2i#`J*Z-4^R2FLiUHlfLA5@|?#VbXkeZz}VkI*LRa^#>(xKjZQKFbH!3K_mOt=+&r)Yo(pDC^F!d- z=$~`m@sf^4vbi-DN*^Sh1t_W@cR~=&T@aLX{^-!>io{O~3adxIpdljP6qmaZC%gvy zDMc%Cj2@!JxB^1tbMXDD>ff99Fo^Hxa8ryh0(Nk@6VTz(rR0UQB^FW%=ix|qBZnq^r zJ20-IBxkm_QnhZHw=1%q!t6f;A%VG)Nga4o;~P@zWjX~Xy0Mssx8jQoSjX#guG<+m z16S?A<${wZc?;496S0N4XXOna~>9JSb* zv!LfhVUKlSMb=-BUKUiMI$K0R(a}j#FE31^#8BOJS;4_qReXdTFsHRzffYMVg`y9;%9XN!O<3I$I8>tqc|a@>Rz7&XH8odnr= zX}|bC^ryw>Rk$49N;N+l?B}99!;AM+gVg%5ZSaH}l6heCG{6eaSQ8p8qF}Me#Ce%> z2NpRb^V(nf<|Ek_O&V`F2)A>Y(z`ouQzT^cSotfrUn%a%=d3sFc`LE3H)g$WQCS41 zKAe>eu8h{;{g}r4Zm847PF~vyHw9{+4@OHQKwr)r_*q+3OM^&oc5@ zM6eBjTW4uv;c7B#j9*k*=`Au&0Q(Y!W$QWB?8d{;@YkCBfm;MTzpi1z4U(ne0ATMA z8(E|LwV*Gqy&~l6T5=|a2DMBM`wf=LC5vpM=XU86%#fg?cOeW+V>e^_3qDGKaMo$y z*2jz0wMF!?kwW){Qa-E3zopThZk>%egcR(#jbA{L$ zCA|?KhwjcDw4wTSDaHo$orQ)7Yun|wBFI=IYiAp4kBcpj6_$0uoaeGlpcSQY=pIB# zPbFs_Hj3otZG;XG@~mR_;OnMw6pCAl^uo04LxoluG!AGJ)1 zsJ1RfNime~VJ;{S_sq%tq{)|y)lB8=o8km}3NQ?z*{5^&QG7XM)#fFlSX-e4PJ{`P zTulnyPlo;;eJ-c+mNu@p=K6~^tcvnr6L#Lv1;5?BaN$_vLNsa)dc~c*v6Ak1j-LKt zyib!YadxWa%R*L@YTcICT}%}7HNT(@E4-~LE_G4cU@^WyA{Q(vZrwQSLLH>ty->Jt zc1qTNWa4#!u^d^^Wo3D!Zdwd<1^snYbe#98bt@GBdb(8i_q7tlSWZ=v{j_QpmZs8I z!f*hR6*Ft;bBgR*4ARFo4-Gw>b|g1?6nO&aE8$Zsu5)pDE~$&Uk3%DvfBoP<4;{^$ zx}*SeMLTDx|Fd6Z*g(R;Hll$h?Ep_k&v+nNn|2_1RQk83;FS9SYxkw7OR&K--f>?+ zG51$E(Zl8}r|O45Wv5mz39S`(pKhb7S<(XoX-op3V2pK07zT=m_X|`ToF;VoFbYo9 zu0Xwms%|aKSs0NoA=UtpA_ziN^nwI7lyYRCT4fQ(-H6_M&B$}4%noi(CYi8DWF)Jd z5{(r#CRwaMZfphupi;VG7qup&)z$;dQWJ@{e}CkV4Dxjc`dIo zrD4@AH%QEjr%_6|YhKjeoH@6kt;kS}kkSiUXWtE7RM9ct$zSBR&wYo)^Uf7`dQr8B zA!oPM#9naFKcQ_ef{yvl42QI{-%qs+aaE{ji*9%Pl`J4w{GJ;^l` zTxi9W_|&oJc^AcNO03sjN|`>U7LI*j!Lh1UQh5`SJm(jamDnjq{qPcRgp|}SA+SWD z31@~@%&4e|c!0@LeqCUP zAP@ED?TUV?C2cAtg{Xj*%&HcMb&8R^FIC!wn)TOK{IYZ`)XBqfyucb?Ir1JB@~j9) z$F_V;E;Z|FK<`VFDhRp_U_3VI&O1RZcOM>0_{hW{4pAauVu)loR&Ly))wG{N?8_PT?gXN^2LEC^KOZKpE9|DNnog`wauIp%GQvH!9?g zxAo^|cYuPh>XtwNzfXh12bIa~QbwDf69JF4i2~&xq2MCvlgmT`76l)V8tZ@#AI5T{ zq(~O?%Xbr`5oA~!%7-$Tjxt^6?ztj3Ex}U1Zb)u_-S7Nma2Z-}c@&%Exs1sii}AD* zw&P>mC0xpjH*>0F7xns%Q@fi~87bM}R^l+bfk;xqg(e-T43w_+_!Gr;R~;P z?^jUzOgSEHjZxo&_{dGX4LbCQ6;X2iIyCo8vtvIn0sz3Y%8NAXw>@%~(2m)`7R2%0 zb6*@;Hxa)>{B=zq+pQ<}1_8^Yet2Jk}shuzvxsY`FGrw(Eg zGvbegl{!td8}g>Ry16w|DP*%PhIa)BUSHaU3M|c8j+|g(+XgXVvaoA7T*Oyqc;8S1 zAv2fNAiqqQFJiPtsBnQ16tuya5qPviva+V)2hA^L8rB=<)#EYuq^jg?UGo)DiXNN5 z=Q`3)BhS~-z~V+{|7Sx6#oU&r*d9@jLRqCp#tud@9*4npZVj)8AJCrh?vqqWX6^7f zcte9_bo{D|39<~4PGJ|oE1H+v4b(b&2M9?hC|m<#b24uM-)h1Bt(azo26~yZPp3rK zkW{#TwF&=*{6q_CqeeBf6CCQWwz7A45&pnC|HN5xA1g)^`CeqGbN$RQ}ts zo7f}BGeNTk0vq$%-^$AJpAR9|9E?M7hg5Z^kdGgi;>vfL?ou0m0QcIj39G^}sLqrM zA35f4(QbmQJxF8+HY7Ld6R%ZV8XZjF_oJeiHTs;NkDx-TD-_ zsU^Mo3xtLX9p7tx43gq6XyGX(oqe|S$HDQnc0$}xUGw~VUkit66R|cj@-q+SYlz}% z0Y}+}huCVUZR~rwY4Po*OFk1d6v<+{5zTB>hVlJPbRdbY7tG_dVViBzMMtarM{FMs z-gj|XUXD*r4yOZE`kBE&&+OqV7XHEf;HNPZ@tkSpZiJ%7yz~wGV*y3J2Ww%c>&cnI zR(C6&K$P8TeEUAb#Um(=7SV$qYLb8REWy0~U(gVn^Mq|P<_|YKoK2>Hi?@go+(a_F zxY)wQy<2GCzBvDCMKRB*mvNX;4o8=|Fx=4fsb17)UeE3jRIl|FUia3JZpUzr6%w)O zT}<_Z72BE~qv{b@$D)+yP0i+HrM~YR3{K3WxOFhwe3P@`di=C_4Am9DGeEnHi;}6A zXsf(64R|C;8I48VQ@YC{d_uSZN$XVJE#h;qmXHTLc(eDaSy9wa;xZOqjXV4Eg3l(n zoD;Xm6Zj+8=?YAsIeOs3CJ+H|FX$YMVr+T0>~y6CL zws7|771F*QpAiv(vxUdj*u8H`lr^^q;}+}CRxm%c_~SgA96Giz7v4O6nKdqgHYN7q zQ6Fk&&Fo>ZnPq&!+2nO>+3HWgpYcz*zP`ATr#MwKdOgWH@#NpX^O6DMZX1|*0yTbY zpV~*tYfPF({Lro6WpFW~Px0BIumf!vpQ5%fesj5K2xY*s1XTmZ^^+Y321)jSw=0MK z3)Lr{FY^|J1RK8O^CKpK6pui85wcw}f7Snj!x+I)iQc9lM!CzNheAXr>CQzZNb-yxZqH+F?Qyf5k+x zxY_N|hRXQ9y1r!^Hf2q^kBRs4LPeMgR<O3{RP>ok2 zjaA9|%2Z9?Ue6&;iP2my}QYD5D8|Y3pHB*(f{`Jq;Db2Dihh* zXyJ?|2!)3yWV||7+)A_@#V3jt8WU58VXM+Y))wY#>^|uzu0Z| zq-Nb&Y-vO^!5i;EFk1fl`m5?0E%TA44IqO1TlxX1=gVJ?XFs%UC<*U8?l+Zg#1vJBn5r1&+T%l1s~_Z1FnB!(r-d zAqNbB6LR$%9st8>E_USq7qY1EizU1&pZBw23P8u@#eZ9NDVCQ(L|?hmR2y~HV!f70 z%D`!;ZJzr|$LMUG zQl3b(zA*sex0SgZKG}Eo)dJoe=-(b87iBE8Z1b9WS4jYX5~ir1r~~F$?-lhqOTEDc z>NQrEZGSmGHWd~Xt%Ln8&pTj0I)vt#t^2N|=yfZ*N2A+sMLRx2%{@5*gkX?q>W39P zqSthVKf_a!{^jM1glhWeR%%!AYqsK|F2j^9GvBfab4}+VNkEE1H%dl!bN2DY2q^ zz8uShlBRzBnuj~9msC48;_$JJ$*bw*?|RZ*2S-G06B6`w-I68)2ud+}s?ztJhCUPI z4*fo*hYlDAhONi>ievqqeEv#o){|CF-=|)WPMlTz9U9yXPanhklws3^tF^}YuDJn| zAi?R>njZofLhWDg%hbOtTj+h3flc@lOp@jthGE0ZafKYuU!NkDV$NTbF2s*9N0YNd zK+f+Qt>yil;OmZ{s>->loiNg-TP%IS>1OvB%TbQK&}Tk(EQrg&8oZIhvv&yQJdP123X|E5c( z_ejs?vY6IX-lU`9hT+1)WMSFEX2R~JP^I;43(Mx&rBVZ~qZdHLvRLny5u-JUS&Y^n z6ax*Rxb7-)h|+o~2f4D#2s3j@%mJ@pF+OQ3^wQ?CU>`e6k347B5 zyv>^%C95v)lRhMj&?kkNl8{0X8Bp#P>viO3SS#;g!rosLcgHoq2O^FIf3+tN;g$$ zrhYm$Jh3N*PHFE6a(+1g*y!`2AKz!mD{Irlr5TwIT4MKrir2({-r{GCiGhz@gnv6jTl0{lw8eZ z1!?rVqGs6HzxVzz14EDBW^!|*ecRqV&FX90tb#^HMt)02+rVIL31?Q~|8kwU8dBor zncwFUdGx^@7MbqFooi*{hxcn(TYYofpaqmvYVAAzH4ErP*oxpK9?9R&5gsKCrW(X0 zNj9dXS=irwCnQhHiIa2u+@!dMjZ<{0ze*unoE!e9P6K@n)(wT z#e0jozc(tQNIEXFCF`(Do_U+)yt>cUDj}bCS|*Rc%M0SK^23F)^MISew3+>+r~4l< zidrq(mjXW!c2S@MCg9u9NU$K}>nY7UDiH}+OdPBcqNGfD6Q_X|FnzBttgVY#PuKl~ z`$Dcpr3zDa)CKXV36A?(N5&9rqW6R)Q(vZy-I&L{$)1ssT$Zo!uk>gm9!h-5$?0Xv zD_1<=4{&_H*tVV@dc)to@_Re^>izaiPmdgD=lE>SIubpXF#nZHeA>z-TK+$BiR!If zV*Ph6;jbQ)+WxJVcYOeosc0mD_UW6o89hCJ{u6*9*W=^13zQ4$8Z75aPghH9qdT?A zCQ=9V1gDjY0NEs3Ra*ukT)FLVmF(k!EfQ%cx0VaHGJS5M6th-R!*r^RpP(BoI*QdfYuVA^V2 z=(Ouhv&Gtk+cz)e!wY~EO1Ca6ztBuPZ$`l(M@gXUi;KU|h+l9itmzXns1iHd6e)z-~H2G_w~8ahSy3c5zp13@7s>6 z>UvDBny&hHEu^v=OW8islS`Hh@zKEG_`5$UmuE-ndTg)dqm0HP}_`+`MVFQ2tZM zvrSXh!^zEOJA?bc=biodYXVc&EW*-fv|0zB^zh#D-J^({Ydt1bB;8ehyywHh9XAx> zOHNZ$>`3AHVrgkBn27NOM@xKs7n?xS{J;l`?_UI(hx3dr+{uz+5Tb+B?KBMUyKvc* z7SFNT9&>erQj&1Cq&rn)MfcFRF6Zyz>u)G?7B>`lR{G?QZ$MF8YI403sRQR0;ie=X9^5T-MgBSm254vlf=c; z!sD~HGdB&hX~+JgkefTBg*QB@>pPml=BlUN?Q@@6oC^6A=NIXB|Ahd5Z6b1qdgD@gd=46dQi>AH z&dgYtCy=~%n^~1;4VU|)Y10ysT`%?lnAeUbajQ&kv3*GhhHJi7N7Xy-#)pkD6zJIP zd-d3l|3U$1NIXBrd{!_cI)&7{wOrNnv{QM$wjj((dNpR@(^)H=mX%|)q@C!w5C3|t$`u@yo?KS9J zqdSGlE%HD8=8Y6&X7yI;gL{akO4DwZ(WHg8aKf~eoxReZsyyh;3XM2eUf&f-?JCLEq=H=xrPKOMF+{SsILmw8*G8#zLlHFUuyUMXFBIBoEF*}kaDOr3<2VBMg&~C5gd3$yuu~24csaTx-~zX^UNXS$KrF z^FE64tG>zG#(f2N7fMf^-L)14rZt|v4-v5CgoNZ1wz@KF0(Wb{)h^1Sc7Rnd-%=Ka z2s`6sE`K&n=3quu`xGu?Sk9Ls=Vv1sp-(%8jhbxVf!(6yIeu&5SPd0+DEO%JR~3a1S*VkX(+~q^woU?a zSUV`a4=s5?^S-lkAXbK@ok8uB?8fabW&d z;gi$${5h(qRdnj}9XtfzDTTe(w)ove&-a@2te&j&!_93~y9?bhm+vu7A1q#^l7vWD ztDVORyQ?w{n@X4nt0$h)zX+N?f7ZzSq8QK8v^|~XmWvPFuIB3jru&B83;ZBLvtb+eEy7rYCiSPTCYeL3@x?^T zL~XXdG?+@u>DYTpNDEt2y@@OS8?~}|iD>1{X1jU{) zL^C|T_TgSXlt^}`qb#%Y)xU;=Jn`cN*QX-{6k~Lw;A(h8%S%(kd|D-+^BU=%__$~T zJQ@>TDHh-{<-MN6=xB_fDxC=^Uv?=kny^?8mEdlSFvlADmof7v*mDBT5n0pl#>AEY z;T*WpxjbE|ckK-V93rFcSKB{knGSCrANihYFPDA$NIpwVC(h&POD#2sTU?R0pR@w4 zkDmYeDQ;)Gtk>r)vRtXQC$s}?kKQrO`qyo$jf#nh`Lv>U%!Ec{`z-|4=0MU@3B!vM zuU4IJXw!mYdv*^mE#a*S436V{^rBWYL~`>cmh2SIkdEkc2nmn)=q?Wgqq%7&;02@Z z+rGzJ`g@RFIL~bK;h_?PvizR1aNCP`FkOY4ZC^dKCdy6j7j=AX_<_MI?TKPJVf z*pQRjP+U*wV8m~1;NVd?$;>w!bEmOq2}(@yC8_}9aQW&83&yP3CfB5Dwt;P8d39ut z`{w)UR#Qj$mj)-RPZPa=>rRB928-ptaqKH8B!cy7W91pSF%A>(P-k9m89HJYozO(z ze0L~TD}(rc4tBOms#=WK+*iV@J3?hiU67F$!G=@cGA;CMrAv+W@&xk!TNUYBg8bSW*F|DYenQh0(J?E~2zHhDErWyHzU-1ei zdS_!fO35?o7g4WPvX~hBZrvFA6GblgT#PaK3hunx#gSEXZNoj*6NyZHVSBnpgnFN) z9e-dYk%KL3^YjRsBl_vjg1IYQb2bQ41E&m?>Gn&dw<#$$Dncwq6!vVuFclz9ZhV2Jl-!cml63b?0k1^>P%U$Xj^e>s|lk1Zie3+U95+ z7?Q+Uy%@~tKon5A{cxr8B%kjjKa?6o`unW1Gu0pMWViNwq9xhZ`u?VO_qKmqv1h?Y zbI%9qTi~U)3m3cvwTO(1_H}jbAD?$~i8=g4zq$A-S3fL=rI+%oj6v=7JZgaP?yN2k z59qEInNUb_iSQL@IKMlG+hyAx@8kFEVQdR8IF6}l#Y*nL_U2$m6p`!XFlIgZN4Z;# z6B}H~Rd_Az0i*2!gUf=UZNUh$V6<5FT{(t&hRhsVHK6~~z?nbzb6RY^sarTx90 zl%AT)9xWj%!irKXV>lt11t88XBv(%*eT}_ znG(ap*IQ+Jljx&aA1)BRcrVkc5Uu~YqQdGk7XC$yr1oIJgsY3TxlPA zS6ThwuF5E0IcF+@%&$JnYmgWDjO&<{a|;jO(AR4>SLdQaL%3UhEsjaM{C=ZVQrdFg zDZiDIt7&$v%q%`pB5d|)7;qabM0_oQu;eY*hL^4{j2))zzg-1|Odq*?9GCFnZSROh+&n4jOZeV6d0&Hi-H&fWov;#VbZW~&d!lr)M-HdsfEXjkumvQl zz}<3*UO#usC%*i-`?6aqYPiprn79;z@E_t7KLlejlRDzBylkd5rLm0h#11R}q#N?J zPXB~Olbss32?WpFRbD&W^IF>;9zFE)n-Q@^ik;{nd+YNvX|5RCZ(4zmj>`WWj@#KO z`||TMS+38vYFuJ_{+0Hv_U*m+!Cz~2M&MH+dM3X+#`~j=TS=sAUoYp>pdpCJfj<|i#@~Y*uwIxW;WA?ar22ue%2XICHpFMVAlZ%$yuhrSb&RM3O`Y1wZM- z;a+vPV(0+ODGXJ5i(r~TlK7dYQE@YOkji+W#9SzS{;kGPfL6$7AXZTbZn^K=lRJ1wcK%Pxz#Fbaw%v$v&N4JLvM508Kb_q`Bsa% z+I-*F*I99Sajd}_3Y>$by1^w*u{ABkXEZ{7EQh$^ZL)a&t&ZgmP3k>ejmfl#+YJnA zLXy&3I3?dA~2onKSLD=qrTJcx8`ehv!3JlHZA4G6JK(+zS^7QD2?ss-O6Z7<#w( zGt;^-Py4fvkJTgRJC7Rb$8oJ%wh!2OME(`UZ)t4fbh*kkVtsDJ+N@%O!2#{w9EU19Iv_;K6F8y{4ovD*`+u= z#qY7wt{}r;)FgUJYHLUD^aOpypsk8|0nZcReEqh;ru{yDJh8h-KJl|*r7h|f$mv{^Cw$-bS5U7>d=m8 z%PE@QP5t``_$3!wKBLnW>0dlrJZ6Fwm|hMdGWrNslOBXPq-V5bK1myHE~w_dGNSBsYx`@23sm+{zM0-ht7$q zQ+>}a=n`_OnUdE~Ey7v~VR`yHUjh9DJA{urOnZ9J`y%Lq>-Tzn$~*AYcl^P$5C4pa z$_0;xm(Fu*m;l3_mmK!Yf0pST5*UElJbElv*597d+ly=5`^nt--&Vj5Y?HSwk-ejXf&&$npE}X;I2EU z4r8=R8BIW6&%vanVKAgHOGV$8W!8|A)EXAHV@sZ#>e`i}d>2g+Zi|)K&^0f@j{N>@ z<@oUGqmU5mihSMA3JO-Ev&MX?E>|V186|J#iBjJ|;n9iDlP+*cTgf+<;Ej=r9pW{A zn`D#p4%HwS+s-cdz63+QBudMwm*6_0w9uH`&T-{>j}wvJzx0n){RgcA#^wC@ z?e{+3W+#9k&G|QplR?%`mQ|7=ExXjQ^6N^(mZv@qU>?O9Wu-x3X{$*Ae@MCYxwn-GREJhI?@Jv|)lCITY;| zuae$d;+khDwNe^S_1_qKB=|F?By(=7n7Iu6*N24oJ&wjIgG9lBTEA5E#j=d`x-|7b z(~qRQc1p~o56P`+N5#+{nA0m%=}odpp>UbL55P5XQSRNflE_1Kqd_Skn{~sP*?bQg z4~07qT)0Y-t7h5uRZE<9h6fJ^ckzYxei7ZaI`QB>7~@p_$@2QFQHE&0I@nUw>eKT!f&iBrmsMOc#>=yx}#ML+`0= z$1dGsW$WXpGdZ~Bjb$9A`yOt6Dh=ntVyp`D->}YY3&l!NRpM0+>7i8p$GOMhjH!E% z-k!`#Ke@YM3A>gTaGJ%;!3u}WZzPqg-goJFJhF>l-9u@vS_w6?)<UUcVr(H&R@lBPM;JKNsYKDVq4t%7~gvOG)%7Xh>nM`}K9TJV@I2Mcj+@Y=0JlptE3-0KYU33qS zV^NDyq7#5BllB>Y%1Q1!ea)%*%(i^2Bp^{5Ul7crhIrxclQJ|jNUucGms3L08AHBp z8VawoJ*ukL=&sZ>Rvo88dLA)gm}S57=*Q?2=NC1J8-`19s%t?Tf^b@9;ks|Y!}){y zPY#2(p8h?iPy0@142R46*c`9A7Cv-Fo%|sU(axvnBU={LZe?%pY?BzrWDe^n0w(eH zHebeNH~QS~a!b98e)c=W4+Y!)K~0?|@C(+~1TS=m8`tcTa5!cpwoCnl6h*#iD*k=R zS-Xdq&k#JvZdIVrmG-H9;7A7bcV~NUz@nCVwsPPV^ytG1H*Ev=H}@d&WE+ZNMs|TQ z^Jwe5*hh449C05fwaI3Bm2E8(+tcsM^jk2kzBf21?Xr$-AR-Ox!}+7S0@(u<^#JuM z5||*T$*S?0NnC`8;-Jx;`@0a6fc&|XZM{!j7nCDaS}@{ls&QEoBdS~1v|v# zShB~m%*94Ev2>2@LNi$(M_yvcG2!e2Zz0y9c22n^W(VSXd4Zk2GvZ^7gz0n4#GBcE z-T`Hs*|o)CM}?@lxtc+bN5R3*F>_NeMEoF%lrPtDEHryCwlq108Tq&#<1=79L-8FjF>PZO#qCee_pL(7^zk_lW1Vi!-FdEsFNP>Te=xi-L za&mF&(-$DI0Gca3J)*PVIoAa4Y2e{cV%YgTHy>}|08f99r5-;a-7##OZf??Y7fu#^|1Ld95WUCGU$flX zp!;!G5<#URAYfr*H6_n9aI&15Z_ckotfGQ(Xq{FNxiuNceVeXK-G0!GWA*adp~HHl zc~;WE#Dzn8#-&KOn7?|SK>x^A`a z2npiSHk`m?U?=Wes&p{jcOK)l@L-=jsjpO5q$8R8`D2-F+oXLLQ_bR&B)Mj@ZVHZA zB+Br>`XXd=OKPJVs9EO;h?4YdWNCJ8I(=luvMkbHP*c1z*|<8SJlxn!CBiqOjC`dG z2ev@b_-rL{WH)A^`Y`44)~And%BTKe@VSjc)$Jan02V)Y1{v99Ld;gOE`RU|9+c;S zK8qDrNA@BXdp!>r&|RuK)hRiJEE-Xd*UT)}`se%8GrwXrsrjy|@0pkFu;QArZeL?` zUH5002xfmBipXNpJZHq=afIYkw@W!>qsqU08IGCw36PaUtEh=-plLi@30%&)^@aJt z8i@{efGixJp-Qikk>DQVr(gi6mELy3l2zOyB{lblf6AiHY`|I9@k$HI>n#n;ADkD~ zP{e9#=>(j)K#U(@H)N1h89UYySZdLoA1peHDm4-36zqAu0J-WO=`T7qp6r`_G)2 z_GWrd`%WB&LW!Kz%3Z!Ur+!zXT5%$y4^bnWOBuV5CQI33?I$V9{Rf5&`s@Y0j|bRS$seZr zTB21%^2++r__nm>{?d9AQqfMfTM^5foZl7mExVET**mp1%W4m(tmaW@;$>bnu$r!` zwHr12^pc(-CrPMF-TWVbEp<7oYuisfJYNu_YbxoJnn>M|;HppOG$oDe_ms@_O`2R@ zE$@h9X!yn+Gqm+JNT@)b_}T*Jn-meX_Ouc%bHm-Y$<+F&C^{yp+gvB4Zng4l3*Qi0 zGkD1F-cIuX7>$touKKT>IRE#YSn)`_c7ONPydnhaxBh^e`4ig(UX`Wxn{#QIV77~6ldn(*H=gO2~ zJf(~eyvb9jpE1g@oClIw%@L(SUxf4I$FBf9Do$&alHUH$9^Y@;!}$SHH_!V&PI^?T&l0tMGODhkIEuPxzLX2dmqm{Zx8>O?KzOW=rPbz zC=g&+=MB%LEM>XAt-FY?h+$_FZt#ZGjd9@?p-Zx6DN#SvifrxeF{d`}c$mRGNdzWd#R9N;($?t*yBC7hYjtBOyNr=2aLHP^43Qe|4|Qkq}; zZuwy%7x8*I$vC-Gp0~`e)AVMknMY40^N?``L$(xk(-KFV=^@Vd5SMz0t35c_8R4S^W2w599_LKkLsYS0rr3te`A_gzu1%N+W5`NOV^h+M5&%3u?PAwmjr7$9rA z7?bSuSAWu&cO?nbB@LetxRW_Qza?0_}1=iVFo0htcNc?urJ-Xg=M{y33Dj z3HSOkv*y@aMvt3vBKlTg~8ono_EqxQ&o4X;M#7kI> z%wD}wZdReyUAHx~qhzc=QsA_=RI%;|1(reIce>r>)?LYwgL8Jg4^z)hPvY%_G*7Ra zSt0Y6(>sRuQZslm7-|e$fZ@tI38c}Q@IEm?Urxs~mzGMR6OGhAlMp-1KszX zTb8B)hUNKvFfXr0qD@Lb`RKr?pBL4UuVMrC;K5H3h@hq!HIGcPNsdOH)=Xa> z3CUmp`-z4^_E%t{=$ZA{n8!vM3RGUJwhZF7Ge#&aCXa__vO@i+y-v-DSVp-Yd4k+b zjb*9%7cA)*qnWn0#KH^zW1@5k$*6}`cO{$)?+s2eqTp7o-8HPJ7$fNcLutAk+jA2r zmo(I2Gzy(-@PRUD&_bzd^7t6(-|_n4oc&RhB@arhIvnH5@0*vNtZ1l8p@;$XGL(in zco82Qt6KqM`{pa34@mS~Y!`vg+x9}!%iLUQQJh5g!MO!rvlfm^?5dP6-i*aXu}{r~ zeFXnuWuFAsDp>hYiJ21|ReFK2S?LK34EebetA{-&ubGx?FI6SO(Pzu}eaMB@HFI5* zOVd6W_bP{qi_dOJw4OSC<)VF+8#Q7ggPnXnaJTVHcqX&R&t`fKXjLq&YAOyfPS5v; zS7S%jmjZiKV+2w3R~wJVl$Us&2BZ+x>u(3j$1bcd&YxZyL+ZA``Tca}eds&Zo4&?Q zh>F6au>RCc!*1nF!`qsy3+3{PdPgZ;T4gUkwlR-6en5m9h2s2w=sVn1wY~KXXxf_n zF4_+oEb@3MgFnz%3)Doa$YkC`lz=F2j;d{SV0k!NUoUrJS$@9RL`S@hc{WEPPYl1L zQA(y^?nxZCr~-40$N$Uvg$n5LzCAj4RjI2)a;bYsd1(1-L>oz0;x@gjafQ?}FrMBT z{-$wdqJ#3kPhn$!m32+3ubWLk1bvi~M%|G2I$n49S@7wAjUm|4BzlY=^@S4_5gSbK zKQfRp!fwyJ?O#0NxZd@dJ}yYK(f1-vanRb||5@jWgZSCVmN^l$W>(On$uEP4Y-8S-|#Y9aM*S6H_52?;2 z$CTX5%UdWxM^?w`C8uP4#hXaY{MyR)WHY>Oe;NfLoCpc?jqtc^1?Bn^wWPu!r0BLYQu{qLmtc+X$)P8LYQA53xM(J0P#ti>WC3aU7X=6j zeyfWIMW0WiBj$;MN?7D$k^$eZGCi7MeoXa5O9rUT(&=$F>7>PmjIkQ_3`UY~>nnxV zcJ<&UPr&A=gQAHMN~b_cajV0vLjzJ}qGbOU8Ki-mb48o0C3q9cJ1HLK6Vo9=Qt?oesW&*m{-^S(RJ>`zshByVb)cIZ`*^x&^ei=(E(5+VZFTa@p9rHpA6R&J{^+gR+zMo18$pwYD^Vl<$j@ zLE54*uU355RmHBo*vuR7^upsC-I>2BEz~k=G?p&NvUEEYk(%WPCl#ECZKk*1X+!%btg>aVUo7od`?(lp7`YYZ?x3*yq+rF?rTl{U$ zrE$G)SFeKX*3%-Kwz@4$YNMJ5c_nQZ$MkhmB6h)dCN$|b#I?0TT$@V$y~IRmm-&xx zl9kTzMjU_FwT<1%iy+D8v#fo2V_LU^WGG?BVl81m9DIK0#snCU|2iKM-opO~Z*QSm zgIdR%K}HkO1KN)Ac`+ytIPqVN8m#yeJl^2HnlwD&$Jj_kj3~rN6}JfSA7m6}`zCL$ zMk>X_+kQ&x3)N@}B&;FzO{Xy&hK{KtR_1EEr6LZ3ep8baz+4{DsxlFx7 z(YpJ?UmDIKh!XtVN+)0u$|4YdT-6ezLCnd&4d`I$0*gRSzz%;`^lWUbM!GI8Snw(O zn~Ai4eAl$<88<%{9hY)4?wDY#W>HbbJAN0mw%OamiI_GB#zy5OOmr^v-X%YyI`6+M z=ztis23&&Vpre_F=#OYfeSAf-!>!yntgHcdaP5N*&e*|8jK>Tc;zzEV z{+DR_^-nYf+=mhtmOO^)a3f4skcfK7c8m%cmP|)=1P~^_nI810CT-~YpK|hF2>fky z_x|)MTF`(c_PRMUUio;JKM{#rU-*tgTfK5urRT0xuSBlo8J%R<6Pmw$)|T$Zo%)VJ zv`m>gH34P9dZYVH++&C6FDS`4z9VmYyY%7`;?IW7irpo7C7&Y?Sc6W1uSqJb;7a8I zlNn{_(~_qqC-aUu$#xt=MmtJL60W=UHLB1y2PeqmXXotWor8GTPN=kR8V^s4|N3;JY|Aa6d*W~!xR@N2 zG&bn4eg$%ly>pkHcPi4>N1@CCrSn=FfoK_DUzT2+$4Gn5elSz=x^r`N_T7j>zm@ay zBR->2$+BHPseduEz+Zt|ZxpJr`gl=;@EA>oZ*aINEiM|J$k86G?vZ+(x|HJsnpefN zzFQnU@U!4UaJ{V3=abT0QMOrH->rcgf!(4AN^c9j_tn++-TZkcKKQ3R*;dP@!Dl== zJP}Gd_Fx>q4Sf-ZlWH zOf)cM0)`|#BjYqsHcy0FCesJNB~;-1Kow5QQ65z36pKuHOxjdaeEMChkmW=5W==M! z(u;)t1d>BL#d9)n6JcW^l38&j$-3ove;UaFNAaBY>36qJbxgret1>O!l+I)iRaGK$ z;2=gBdIt-wEGpBE2qHh~l9xN!BdF8xa%rytFyUTZkd+p(1^3?v>)P6t`8rAOFP50i z_=K9%zO2$`KBK%ZEVOYQr^qsz4A-OFtP3C^emARPZkI*{_1wf~U%DW0dV1V1&i<-W zwiL=^R10UAa9kH7*kj}%GT4w+Ejm})F*d(vM^G$>8(MHvh&1$NeNUyi0|&FFX0KVp z7PftYlaMFzUF6|#Hs_3zk`nP+?w&oj?0`35S<`4}WS3!>fRfS7?mMbLNNxgwyLE-y zV)ai#=r+B?;lB|{ZV~PCHOQe-x2d=c9OcJB++yc7oIknhw=jRSZn@;ly5u0LM-UvT zyo0JVz4I|MAF?fT!oMouXFlXu=7-lP@Q^YSIznY=tQkr59)Kb9J!jL(`Z%31k&s|nR)So~flEkOxaG04 zz5GMG-O?Z*0G6JYVBY=>c?xKrg8%RXieO_X(;%Pena_;LXO`qM@%hZvd?rr;^HRaT z5i<(_4F?hh=DnHg!{9k?L8R>3+(Y|Flvx{-bK_>VTP!iu!4U2cE@5K3&HN9qZxA5% znnoIyl=CS)Rz(UMVthvQ77jbgGja@eAR74k3Qw1rwog?fYX;rwBLunUKc^vjx-g_E znCFV4q7Q*z^2sNI6%;l74*`sIQEkA$PL<_`{0 zIhnSBK<=%)s27XaAD)%3uujK1g-cc6F14WqrXH>8BV%+Sn|wvgxpxq40MQ54hsDza z=K6-^?uSSQs2>~pV|SAjP~NoOU$@WE^6+SK55TsDhc^Ri!q%z-7(MP3TM9WWJ?S@) zd3q;;a(hzevJ4(Jqx^QSWgZ-4zSI5r<@HMWZ%iwyOQK?W9+^jbEG>y`f(szrB5lYf%_7c!jLq|FO+`DHU)LmN-s7Zq= zf}P>uLu&bukDE(zfyF1q^oH_UKYF0{xdOAv26<6oe2Si0k^)L%LcXhw3*h3Fc^7f! zn(~?nPx-D}PkYZ_p7vkTnD!grKW$X`>tlM(}bDiok603E5;253q^Pw<5Fgr)&ATdRC2vtMd@;ctqY zWgJX5S+1pan5$!MX}9l*WA!Gi-;x0~3P6}FB+*E1Zd!(O)s>Z~Oj71VymMPlsz^eb z)oyiR7{O#He#KMU4zt>Y_Urq0>vYawmgU?Sa<8Beya84oo)Kf8D25u_znK9CJi&13 z__=N0uGVitEGzfrHo1du*`!Y#e^xm)etV50P9N3uqb%x81^*#kr+cVUaYBC#X|9EG z>9>#KGDKbWCqzIB;k=)7OU&@!w?(1fbkNYq24Pp#$0`_hZF|=ENTh;}r zqGczLPu{Ai+!7S(o-HfbLfp%p9F^IQwSWMEf&1LMF{ zaEk6!cE!`o*TK2^3qrqJl|-Vs!6doy+tI+~_4Tyfk^+|@cGMa8aZ2_WgTKH8 zyMfwO^4WKvcNuKzXG+t;Yj|$I2SdC?O1FXQ;9YCRrsM^giGbu~3Q1lO{F{`W6`<^@ z!Qf`Vze^YVX<4C}`U6Y6F|I@DmjF>NJc?j(4#ok3KtNQY4x`cynKe|L+ zA{Td|7*fw672S9G?lf}o#FhV zRGKd2<3sE$^C(HzDZFgw|4NzLxMYLCwAeo=--&iBs7jEvRw-p;e7parC+{9c-caw? zufwH&lv1QF7W&n`_m{Fq@In>}$y~ED$o$Gv${ngwgSyJ66C+g4 z@>7XUkktujCluHux$@p#W^=yPUax6dUwDa)Lx|lQ9A;o@(6K_m_uQ`E6iJA(&kX3v z9-fznXhB2^u=Pom`L?euj@1mhe+D)Y45@W5*A7H~7iRvG7r4GddpQ$h8cgB9ibgTY z!2qN154A7Ur-EaF1mVX(uZ zC*EbG4hhVB`im(yB4wRS zJMLwl8g91%-k&?tLmMPF*T9+9kO}&C+4xzQIYANOjm&{%f3U-DdgUMq7V&Kw(qqHr z{}a@+BihToRvtTxCceVshuak(SvS%1H?UR+DIPt{d+BXRxn`^taTv4I*vqwfvK%_L zOeY;bH@9V&*Ah%7(SDlAa9GKDMM{q$6^(7G&37x7!FZAf*93Mz=JPv; zsu*QMJ4P2k9|B!vaW^DF)$q#w^JisuZOX?szys1Bs|!@CQW-%-f2Fdjbx{0XQT%Vf zJwj#G$QiS;X`!1DYgPvma(rGNuhibH4`HBFXnVa6-2ZZ!8UgDF+b4FcuS8g-uEJ=)te-fFS4Bz{;S@po$isjML6FREK_{9|E|`-g=AvLfN9 z1~)AXs1Vu_>u;gM*Zhuon2eACZk;*MU0xsgewpU15d-H=LTRC?_ z#Rw_oq$@t>d%#h~HkxiQuWr3S;J6&}jXNS@4`}-mvk!kT11)VRcU0Yh6aRz`x4OsQfO?-1~-4Qh<3yvd?V= zLF9b}Fk}a`Z)Ux?57dNnJ^tRu=X_=_OjjtOR{s|P{2gRe<(3UDws@|KoVl05Gpd2% z&q=M_>g_0M<{iMU(?__;sqv;w=XT2rOIPx=J`}e|Vb!J9UXOCY(Q|Ow&zA+qdmNYj zEIhJ{%b1NV@~_)fDB1Y<{8k8w@<=kc9w2r6*IMr5a8mHe4=Wj2*p#BSTvt(%mXS1-I{pr$n=cE(hXDeMxjR(peE+76$ZfE@?2@OpQ8dYF`>H*^ z^Y1tsWuJ?l-&gC)$u{l0sau{g=Cx!KR+oQhfx{@xZY+dMBH!IM?MGFc=rEb&gs8wn zH>wPSgpQ{5k;~N;1pV$*2n3kVel;Eum!Dsu8VV})*C-| zL+uuj4;R>RAy+Oi=L%1Zg*<&Bng8H=`RI0GzaXC6xl7=YWQX&qW-i;#pwG)H^9*>` z>;cDfnqQ)Xf;~7evNcQH?E20b)3u5)O5@IMRf3WIjt6Z2_VF>BA3rkf|2xhb{%I-O z63giSZ6$O=FG$GGkUm-mu`-ZvZqq*pb=5hwujHGG0&iVQCny36Z6e-)n!vIlylD+X zZibRX&!EJ=nXba#v)P>_gD=k3{zcy-5!B-=e=2_r?zHo>y7fJx1ytpDu;MNjrm@Pj zT3shkbjv{t9@ez{d-$mRce2C???22Zx0))P?{#1^wsr$#!U6S1cF?z4_uxO|8{bfS zo;nMLLt#OevY~>Y&#j6iud6;WlF^V=k|V!L1hP&Jlk!_S6P%hGQ80)!FjI29Uay<< z{*~LLRd$h#mux36VYX`PymnULlC6|KxO|kYpn7?lq=!(-S}kKYe0*|-p25sD`E^qJ zL5*Ez7I;7M)Rz1|bTM#7sj~UkU%Zfi7*VL~ za18Z|ndN*vPSP?jCxfAJHs2FhP?9$>+DA>Lk#LHa&w&&wz4=3uO{IKy>Nd4z^n!F6 z_AIm!c#scNEgD-otSO&f*~-@Y=MwO}kb2rfyUKY-7xD###^{0jYVqONv8X?w^v=X< zU&EKpuT5+kh%CA*Xp3so(0^@`rB8Cc{{QL|Yhj7JnH`cybE-ORsUFe1_T`>5=v}`T z%aQ;No7hbXbd-n4wv;)U9!;D+s~He;eT{BWo?0#XXt%l;fo1}_1`aLq6DR`rIX)Ya zx@Q!M27#9AcmjZz_Cv9vM2`hS|GJN#z?N;!1fUKGC335X z!6H;8l{=J#s-!AUK%RMKPmcA2v73B-75c%5`S)E6`ow6zQ_I?`gVHXmxmq6XjqdQ| z*1g;<13!Wqj~|p`n=xF&n3dyn3QZks%crPn_Upn#t~?4lvanC=?D3R_E2aAD3=??h zLnNNe8Gs07ghDHqig)y z!B+Su0!xdrA2s7G!tr#T+oi4sakmMA8^ed;Nw39_K@H_(f5OLwjI2lbz}Azo{7yX` zv_)i0mQSWrCt@dNi#l#Y^(e#$fRP7EGmln=uRwG`J@DiL8)od6mH@cT&Yf1Pqc_TB zm0>hfPPVodkA|&z#WfTtk<`#8$glf!?wg{>Q5q#qdHK=rdv11F1Pa^ng+ET7icR*M zy8xX>O%u7Mnv^0?x`suBHbSl5|Lhas3zDiG*H@GI%RkaahAz-vsU!Ir5{9Pi0qe1C zuPk3@H`rmR=F@&tC#QWzcTXEv-1mX|=}A_u#;@cX3k3p?HRQDx!gw}KJ=-SjIsOt- zhd>)u6Qtn7RbD0W^Svu?(%9edgH9NYdudM=OU27PWYqO>$uV3J$WaN^wj zOuQvuaikLNRBog5^=_CDE@*qM=gh_9diUYF@Y$GHFj5jivo`%D&5CJr;2^Yxx|u>g z{(rWKa^{9!FwRMGGTyVMCZAfK2+eK#cJ+}FrAh=tz-Z4H3Cu?BrGaZj9Lr>j-s_>M-FzuNt-yc{R$ZZUHG z7~Ain>L}YDH~Fh@0%$Tp$q4x*V_@U2@`1|KJ z^JXlagV>L*fxQ90WtMCAfR3y7qTCYdMpVE<-Qs&M^nrJ4GYvx`31njjY1nlnQ#G3D zLE4m3`bxARWd%6jt!IB-;0vI_;*3=% zO1cqE(%IW{b+7GNhfgOvO8i`Q2rbQI?&OR?Z}PUeynUrRuIzn~GBU8ay8oCxu~kib zfB0iZQ1`s@2BQ${p$@BqtM}diht_g!$fH?9fXAlD5#PUqVKyzCihq)C(`NF$_ee6i zvkl<#3~;w}WM+0kmn->V6RKxwgvI*SqDyVuzf~`-FCj{m4%`LlH14``dkD zy8{&EcvJbTJ2{0{w^QzK`YX6q&u#iEKr|(5N;SRMq>oMb28uO;O-!sH!V5`~0PPs} z>va<&oA3s4Gc%EKO#e83{%BBEWu)!>CEvC4>AKm$dDMw(#>&A$?(o9xLho}1g(GKm z2TEY%d$?+SAKx;4NFv<)z&!iX;&5t=5BJmmJe8X^vD9*7MEHX73*c#)$7GB>V^3fH z+K^74&djjsm4>F|fDVXg&L_W4fNHjJA)H^j&?(tW$PjEz-c73{OGul1wH zw;nY#OyFjpOh4Y@?~=;Jwp=c4sEW95 z67dPN&HjC8^}hObx#PRW{__x?Qui6TsVTKHBd=1@Zc7+=mC&u9`bxKz12h^d=40kx z-?{epYXh{#|NXU5-hPyQ^Empmx44bMko11yrT%UQV>4t{9pYM!AbOAb6w{RSIKw+& z=%|8_Zn5Jga>}WoYBXFA_(El?{oB?zUWp7$#k{YjT6a zfLnC6b-f!DuvG9ATC7VX#W;gL=dB|z{c88qWb6}#!CFqR<^&ZaBG8VTaMQkms!8~v zX3Nc1=IprHXZep|AMf6go@>W!nqJ!GT1%@hO6A^hw-sb_27g!*MRwP?w#7O~s4zsx zjI1$WDeY^kv$_`$DIKUyW)8SjZgq^`0e|=Y_wvm|`_CrMohXh=e;78|(h+_|D8ObQ z*}j20Q8pEXp;T#!YpXp>0k5bxA9(YjT2>sxuG<`FL)c^@xBwI3{0s z;&}YmpT#B;U^c-#uf+y0J*7h5>!?>lEgNBDgdgbv!^dZE0-;TyB<~6^n^!HWhtg~n z7;+A(3Q&@tIP*~Ig;SORraZ{sX@yfUB%+k~ zn>$kIa&iG@+neasBWfle`!`qrA9HBZ$7x}omUhUHMorc|BuHD0-ti_r^|>OSx~}c9 ziLpL`F-^u5ZPP9NI~N3WinpAIr|?U5;8rDqgoegw;STtKWq4C6Yn|Mf8i24COHKN| z-;@^K*x2f2>$JtH?r$4&8^0rdp)O~k<9lwX6N)IZ6|}Owd@%y{-Wv4($2cDR)&VVM z&-$KQ*LU#mQm<_2TdkxyvBZs67VTTru3H)_9$fzQLN9$WBH`rDJlTj}FHO`-Gy1nL$beN?%Z6KKrihX_Xmq$CC$>5{;kfZp|#^nEYLj``SRdvG4PD(tt zLhW-QOQ!S#0v?2ex8|3{EVM^S_B1yME?HrIr`_sD`Wc<<)?KPB%%g>qI>t3Sk|~|N z9)-Tc=~^9a?^ky{b^80su!0@p;E+n=t*TvlZa5)5v6W{JC~S49G94dMf`R_A^T8NM z*sp8663jcs-{zA3Dp@Xrk_Ft-UQM?QWwtwq{^Y%X!k_oK&e!EP-ioM={AmZ}`CQXN zrnYkj{Nj{PKjWa;zkRqKT+>5=iVIY~1vfBLAir|0kh|LJVmLtnj@AAyhVT$u=1(FK6;(XW#lw#WK- zo<4?l{=yNbG17;rSp7lzrc`$@FTcg~n$7Jh_B)!;I{^QV-+%1BXw-6FoV0jMO~?+k zG>o?I>)FHcb1LPU>#SRH6y!7c&TTMVSP_omI00RhPew=Wwd=x@ukjq8HZNN|Dhmfg?yKfWX*BLl`- zZ3Wx+XSF@6i!7CwZa3&al^MF&A@n%^W%!KpQ=cD3e+${ce6ky#G7w?TFOC70XV=XK zC!rz1b;Ezo0$qk|J{uXU@E5S`8+t#siG#Cj{`Ql1;1;f5zNKe8qVDp42)pulDA)Hr z?V^$-WiO%#rw}DeAt75?$Cm8IzGfLzwvr_bC$eNIHDhEM`%+}z#Z1ju60%G~k$w9; z?`S(6=llD!k54ntyzlee_x)V=bzL`%stB}iPE+XTZ-MhVRLnN2H$9SBy}fNcGDcP!^T_sKdBm};PZ%)}%{GK_lSGm5YZ;~##X}=Q`J>GKU+6<-W18 zr+!?1s6|jzQBz|}xd2#}>Nt;`*!ky(>_oS`a~B2;u$PC=^J1r^O4y|fZSKIZNM*Ts z!mMA203}yxi6d5^%}Tn(mQO{SH2S47b6g}K=(!H_S?FX)MhM3kc&y4gyqn&;_@$I& z-ifF>3Vf(;ez8It`D2?G(>#rCRaVqelg7JBsp7I70d+MB=D%MfHi+_G=*KNBWz`sq zJgsi?Y{Y@{nRK4<)y>5Iw9?*zPgw6pGcrLA++4 zjS5Lp5`Ud|$vq^bz47|!z+ccsU!ZiZVexC_wW%$l+W;m5d6)f6s0a4}jq8;Nr|tJY z(H?p8=<(is;=R)4yAE<_ViNr+tRG)emi;%UK=k^O_egHX(C71(aHU9tSSONZ%I%l7 zDM2o^OzyrBezWMz%zjB}ybD}NlN_rOi56mhnxjZTJT{B-kOIDnMhAfaDD>k@b=}kc z8kfFOQcHjwzRLp`2<{4L`_=88H?~RnaEY9F$l~H>=>^92aB+f8ktiXsMC^${ zc#m3dBQfh9oG_e+v+GjwBY3a|^>_+069WBu=by6xx%f=7e8sv>tpxz(s`-dmv!SK3 z{XL__DK(IAh6l-+eYPNNZS5H{a?2^hN+COIr`NP%@`Rk`v=KacGH>gFUBPP2L)kgh z8)r}7a)#^8R1l|HL;pZZPL6YnRu6^($FnZ=EpI@KQpo2$k4xiKG{Sg9U&X`82z|yJ z=rLxvtE!1_C7Ao+%x+}IxLxb3J)hhY7$14vS!?-$9v^YZwJ*eFJP$vxgOE#5Uh=F5 zPkPBaR+JoA5}}Dh(g>%xmB9`j;$%MC7Cw7<;Pf$5*fCqLz6j}3v&pXG@5L9e7hF8< zLKhJ@$yK(LZ%q6Wq}k)ps5uF^y^R78 z+Lj`98`6mb{^WOC1HvNdSX1m4pB^!%xwNm@5$AvC8Qal&{%duF5kOX_X0N5>q6|-$ zEVlyKsqFw%hve=*LRrHrrl^iGWmsqk*p`;6)(GHkAw)`|lU?R#y^~?$IY69a*Ty@$9&UXZ<-~C0A8DI=@0DKP znbx)_`L2&`jC$6Mx>Uo9*@V;rE|jcKxrPC zSZgcZHk#$K=p@rsDN||$7Taz{WV9#vq*J8{Yn3O-3w1U%=XWX)qoBjB0r+q36*cj?(#j=0z3VVUXMZyfzI#O*{q*{Lgg zOqEJgKiDePX%OZVh^vefbDo+g!r2`}xabK8WBqi@vSaFpU5)EoZ{k=(CgTyyHUpfx z5^3e^tBc7dE42cChk7EFr6Wxb=x~1YyIhciHDjHhs>BJw^rk@+Ue3+b-F~PX#cl2U zcFr*o6uLdENXOd6}xyGS8A7#qfX z)2tWILFVgG>X4CXddRl)zJe+B?$ZyJFerhERp;!KPg7S9>2T)hL?d6~+lNGGQx}K5 zOox`jO_Dj`G?UXGo;>i#(VvmDyz-=>bh!`AnVt%L&~`O8f))n7$@{N#6xy)97Oo=@IrMiRpwFWU*73Plm*(kTRYwtm!hsOsQxKD_riuUZZjpiyWs_1B z%Ak3~ZI&OC$-NaO^G9Y3-Glkr1s5goxFp;&MkirLCPJXCWc609l5cPV9fq^RD_Ifz zCdgi2#59xVR-M^rGAxj5n2~}Pt4W;@_Vk^=uC(4}Fr{h?jX*q}L&xgLW?~u4bRVle zwGaXM*M_xY;@G#qicJ$+>_4Av_IT8a zR(7U=TwNzv=63^HN~Gfwx5%3)PpMlTNuOCHj$U^a$>fH?9R3UesetV}PnDxXZ>a=7 z7-}1X!TKqx@)`i!F;2#E>{{?hA1As3V-LsJ^B{#!Vb^qr&i8|vx?sv|dK~>!2wJ`M zKt#v=nfU?t78utl@I1ur7K0&wFzLKC70bVm=6EmkdcPOBrEDqG8aKxVd+|rK+>lbG zmGDX+jzoke>=$yKm24|TVeUETI&qa7Y6V56AZ}hLe~i1@h2(ct{a7|9Oox!Pk(M*X zBD3md7d!Z;{1@a&vm8N@@o4p)em>o0O{SiwIN!(fI?GR(Mh`1EXVb$>y^P)!dr+81 zLzv=1)(mBhDkf$uvyLL9ydCa-X7$zmB-9G7!myA3xwyAi?F=Yd0!e{_y12#>Wsq!a zvEBO_A_nEBx{3agtqeX z`>=@^bQ-ZBPJLkwXXeU(fOR}3PHDuqCbjGsFI z>l*xLlKJ)6s4H7Yc%UwMk=+tEIFXOTpfN+b_GjMHZXG`Nop%aRVTxAv{p_< zo|(M*^@(0v0I4p19q8f1&s4res?f zS+L9KrZhKSUOgp5@!_EGj35%RtM_W)Inx!YtPvXcGjN;v_qe{S_I$u6u5u)firUvo z>A;}F&RXbeW~?!|%Kzmn3#b12agopai;~Y*=>vkG0EG!Q475o9xR#v*LbKfyZIV-0 zjY_4do|T=3jE`-2y|O*JuHx$+dR!83v8@q1YA;CUCw$G1>!IjR$#o$+M{4Y?Gr?uN znusM!@D386NnmCRn(_j<4Of+yl2IjvtPr*Gtz8NjPdPAM`qH@>%V~H-t6^D);WT+D zajjvGlor~m&G8|3O?GC*mH=PC9Qa3e+r?t5S75E_utCQWC2Y5<2;q*!No5x!)kTgb zX?M%JE#P=oU*ZY;#|}Ti*pE8187(s@;@S({)7U+Iu=AP%tya>lR-JI^c6(h!GhDcJ zZi{FYcx4;5(BQ~b3`trpyw+|nv_}C>Ze|4xCi7>G=meq1ZYNvb!Hq}rLY0fp6gT9W zr$s6(e6F3nt{FqJnUmU$NYtrCjr#l@H)|}f`ni6@J95aWqJE^^d_lg9IBlw#B11TD zTG|Tuk`YgEzP*&Vk~w`eaoK8CGIE}+^(aCQUUl8tMiNM;Ed(hc$J>z)%2ISBy-@1l zAl`zyHy}b(uFWx8(i}4cr0a%zVNkCjz~s*mB`Z9s&rHlaw6WUh$9hFXrStlUf#|7` zu9T2@xphN(seu>vCIXGl`FQo~`g+T;^*)ZOuMpItl#+~`d*5}fbL!q&%K@fSC*rY* zpG5S82!$w|Z?u_lHXwpdscQM>zBJ#vFR+5=fD7I{!YN0=MihlK6&KY74B}8ujp!XAG;^R_?P7ff8Klb9u@t_OjV+P+D zSm2f0+`1&b51n*@w;^4R)N)PX>SSG#R%8tm$Cc~)#d9YG2y3-Wqk4+kxOPYU99p}3 zKn@TF`P&8z5EWT#H%%KQB~k&C5IBI~?$av93h;B83a02T#?DpONdgYGAt_OJJG}#O zjp7AnzQ+yNRwcVaUU}E`g*_*u-kSA+M=YEB=Z=Vu47=-X`jY6Vz6FJ8H%&(dLs==D zBcj38h3IJ_pij!d6l2`q>gY%lTUa7AuxCGNJ+9j2<+oD+O)SiIpMGIW@}P)ld=ik* z5Cm(B!QJ1dXkDV${7GB-c*RR+x8d?r_9WY#`zJ3fzA(g1=g-`w^NtTG-+F);;Y^_E z!#c?HKGT)-Y)U5t}$d#Cp+>!Afq4HZqK*;RRDFU1k=zhndlz ziz4S)T5FluX#8cI?$L6{Cz14Z=d#wjqvO;1!?}^An4Vyr zgeh6CZ;iV^xGxHBYhcVy#L`H;$(E6vdu#}0YO2@Tz0eVN$_|(KCpya%1PTB;y4seAEt&lQ~M>=x|F~#S@g2 z9<>D5cjZg5MNMDlerr_7(JM{N?~}F*KJu7pJ6E)f$B-hQ@k8vr+kjXii~O?@aGZP$ z2(nY+gW|*Bhv=>^ROc9L;lnCXIVgeFpb>!JZ%KVP&Q4R!I6CXCxJ9ktZ7v{X8FBCb zBqAXzQ7xDwJ)&Aq0tjiI=a<+Ez8)1v+^!g~M1_{$l@DH$MEEHhkH6C=0+Z=YFxhT^ z$(k8`aaDENhH7H=n-$Ua$`xNJ=~Q+#Er|m{FKf5Kdovl4b|+~0h~s)&4udic`tez= zl88lbdPU%Fk-1xR7zkNaO%hOhN9U-0kHO#z6YY^wd<4gZydzFm4Av)FWgfZ?Re+=t zuvGsihOYxOyn|;{1FCUsH;jgH(vq6Y9b^Vz-Vlz}gyllHuRDtGKQ(Us<-+FN=VooO(pGn&&(pu`y`i zt@oaaRB5fm1@-IBm-gYk_4#cRrRQ=DZEE)5fJ89sLo1!)Nb*~O(H)2G?@`pk69)V) zLAiVy3^3qXB01KD*RWb3huyJX-jV3h2mKZOxaU z;xBIsz>4^%#gjVTVcG#36P6Y3opNPVUh)WXy?W~8hiFeHuE{=5jC;Nou*%kNGcrO> zFCuMkd0#F3#(uhH_s9!viE4f@s3>&k<7au$R7*YgXSeRQv0J`SUC{B}SN1=UJ%G0g z1;tVK--A)`J0+wvV?)fso6sKc@R_q^8& zv^q#JOHYF>!89=kxESoUT0`wFC`5SnD4>BAV3O`pkC`>=6<^bDzt< z$n~TpLRjp%IewTc?He)WS@cMWG~}F)0&;Gi%ne?ZmWq7rHyuS%{>t~Ore=z zBJSg4I-oivc#*VxhUvP1^V z(`^OazH0{+7KQ8J_tQ?S_mD^i2}W|7AWH0QDLvfV0Cq`$Oh(ie|LM}ZxBE+8+t#}) zhVV37qTD!*meWv#MO))~BX`w(O$o`XpNx(~>o9mm-1@GTUpP;HC+7QAR4D02ECTG;d4rkxO9u$IEaW)=^$u=6|Ltz6*<&@x>Fc*qaSSv zEEo}C`zlW+Pvbitfkb;_@Q+VFwxi<1*pljr3fcJjAZf0Bz#JVq_W?Jd7ZqbAQ9UbF zW7`bJ8dozk;nYg|bnyU|Z&I zX-$M$C0YHH62L|&z~PLXtwL>gz`le)Kcc*vLG!j<7|yUFh+7Mn>IHXR`X#FUqOD)e z0EjCmrydlbz^6MsuPw=*byUN@2)%Fm!I5oHAJfg1E` zbl0j7=iT!DA=^CWkEQz_zLijam3V1KN1&IpssPfga^ z8wpKFhLBg<;5wwQN6FGp-&ffJ>8(qFoU!$Q<<9-Sx(QBi^5&Y6l^_v{z+}Y$4Zzi& z8*zL*eeUSEJE3@VZ{?i2K<_SauVwP&OxL3X3$(X0CNL5hVBZHoguXuSbrmjqXvv~> z$*?vX{#|ehdS;+G=kIImOK>qNE{tnU$52YABm@S|)GlX7O_i;-7Z?$hYvC<_;4-WCezH=7F*thghJmpS_BxgL;dezXC! z7~Ic1mwC(h)kQ&&bv|s%jeEW2 zGVNAXux}n+d{k)E2zclxniBRXckAnemGk*VPlk-3A_D>G5WF=tCw`!Q^4rc z?~fxd8C~)Me&ZFKFEWpp#c)~Aek)IlZMvZniFWB5y}$9H)x^l{PeNS)m+a>EE(~!S zOcr?mh$`IqTTz6%k zn!0FAv9M8`7oI47bB+%)>}_I1xaMOt^aG>3~tLVaXPbs}3=5xOQh4DcZEDG%P z_e{Cr%2Yf)fzDRT z=ETjJPpx1TuSCE;70ufw_iW@Vo{&Rxv>Ra~aT9Q7#~&X)G0XA&o7f{>7G|P`50~<; zxtnyoBr4A&f9j0j@I}+Q!-~Y&088L>0-WJNkD8^nB~u^51Ub%^wLv}2>?to5Z@!Q_Ya;!i8wGZd zz)e;Q@5dPX%tx_=*#arRx>E74Sv^^lq{Trg?vLc5jOFRDqA)3lEr)*j+-Fsv*YA#* zv_*MHrM?I0Mjjf}djg{7P4sp5K7f!=`p5bRh@=H|A?U7iXG-0o9|AlfFrBLGSPcW- zqrkRx;(B&91NMeow8uw`$?$SS-EY?3;8BH1tEg+-hA?*OnT5^T-gJN$&R1~*-1jWh z2>da>jKlEWSHMaDUIFaD-@pIuDazP#YBSv4y0${+v$sqQnY_$ubx!s-pLr&HW7-rI zG~YOa-HEyHm266!t^$0!iQ4+@oG;wd4oEw%z|2VaRA zF|Orc zB#?tLM|9YiZQZNsCWWp525E3Fzq*cvUt^TgXr+#JGqxO~vp)34=I<1`8s1R%-Tdh_ zFn=Pv+Wja2m_J=S|DZ_L-8;KBAr_4hWejYx6_`Q@>hqzR3I5TO={-7l7xu&p^QR|) zRr#>C*GppSHLO{VVSNgiQ9yk$UCPa3$vPx@;9)*LC2(lxuMG_QuotS*IYDuBLV0TV zC1%QNgTvwLQ1SW5h2tXepp=Z-J0z%K^jPVA0pl~T!jb6t442+DeX^w>p%D4m40jKS zs3_+LO))S3!Y0G|s!%8SWRivr-&9x>kU|%v*IzUNn5VMO1w;FZW!Bo+O6x5azXojl zoVB$Pd;=W&fGJYpXaJRa`K@;+)B_4dSHZ_`op519 zNg>&c6ZB^Lff36K(C7TL`kvo_W{@Tup1&0PNlLX#t@~?-a+57LVr2CQVEO-?mb1ZM z*kGTSY^n)ZG%Mecrk!{ggR!kzw96$n`fMfXI;Fy8vfcCI5Q&v@n$5NFvpG$7dmQ>x zvh$$JJwg(0Q<%AmZ%R+mN$DzorbOyY(I;CUe_xqX^^7Q#`fDdpDZ(4);nY=dvub3o zk8cDG6)vcs0b$~KW4M7R1i|n0!iEks+7{6KQGGsC!yh1}c5xcBdp^GgrulY8U;V^% zTIo@6&Tb{hl~2Ff-Od-3>JYG`&4KZlS+hfd8A<8D{6s>Z)FsWse~oVs($2PCOEgI!)kxc6@b|SCr4_~xiMHQA$hWh(}17gxZ<)uFw~$Fatu&BC)3ZD z<333ZL+VKbr$B;4c~5Z;3GQ!A2z=|JI(-~~NtSsc%6KIUpfk~oNyATjO&L6DM|LZ7 zBU1A63XO8=Wg?D6T6K>e-AzdZzzmRr+w{K`rN~plbRN&Yh70FRe?2EVG`QJod->O& zRE>B4;_7$7Cy>q{0*DjLQ`H`q@~Y3y@?g$R2(mpfv?J|%{i3jvyxNQ|KnDC8DFiX8H-f&bD`07Gix>7f z1haSH`3x#gj@EgPwCEL;_vY46nNg%d?TGLKRK(S@*otMSDRQU~l zEC&}!ltCwnGUj;Ao)kooX8l&u41{SWiv`So-LZfX8~n%RZ;{80@3H9K#3>c z;*mO}iG*#syF_zTD?&y}-HRS=3Bk-35mt6BR@|jeJR0bY9wiwJFIfAGXLauz68vDP zT4{qS;a!e?iB}hSf8=P7zzwIj1z4usz%GDGOq?obTmKf>t3hl&A}kos`E##13U* zL=#%wMxWG{j#(?46H^H)8Zu*rYo;hs?t;RwX?o3o!o(h-0pWjfV!XxuWd%bQ`zcSn ze$5ow4s#?DiP&Nhc^s%#YVMuw@cs6X`5|zsIUJx@u%fdJhE~tMXm3ubnt6OgElazi zJJ(1p(mg)n^6HK99d_8pqAzcrf-A}HQVhAC!P6wInD<^f@nzh+nAf-~Coa0+dYocP z-pj%yMg3Jz3L=iyvA+In!3!jBx-_$r0NKY8_vq7<7T)6pd(0O$P z@uy6m6265s!^K<8tzyB2vh)FB)pqr2z8bs|LWa!8uXBJFkfzQ$n!QStsXrnT6b_45 zZA}dQ2}3#@Q)N94302p<3K(bo0!evsoseE!WjQD)?6;$EOH4$B@#l5Bbp(4|pamwU zp$|sDGLQ#K$sv{yzw|q!K0YHZsnYFp z`yZ?NbwFpJCXk2-@RFCc;tI`rFMfQ1N={`kCzhdTHnMw~raKS<@F?JVK%W25#h33 z5Oh`Sp`Lp7L_Z(!#>=d5V1pKgH&2XHiEETWREWmJYECC>MUZ3=eW#dkedVBEN8XM` zt6%Ni8o>G6sY`td&=%R#;O?7HKg(KmL8jnF@5tbNxM}&JXWt1HpluF+TVYblyHnYR zbnCF=ZGjR$)^q%09}>1lG9J59W_&>U&Ng_@lFbJ2Wo_WK{jebtg2nuPJ3Wv|=| z-Ul*TE1f`h8nx&4@~BES`5c#2j9jLay&?D+D7UTBt$~HO2puu}`EaPAKY<4D)--!W z$JuFlh!eVL9!fp8(RsX8^=%)+YolYI6>kn}r}Tei7x@tN^z1i|*vjXFT^whn>kK|v z8G43yyr^S(k#%;^A^WW>I~^e@f+k zr9SYLdE{M|dQZld7M{wSxw}tkVzytdOGfM(G5^!+TXZW|arb6KwFUP?SteynCuDf; z;4xfP&9l={n(xF3!vS6gJ^b9wBXbDUYxdJKUCfRb_?9rb)UYJ{=yr$%l`F4ia*Cok zV_Lq$uP&jXEj>-InScN$Migs(z$7`&_9VxkIbI+VwhhWYhkn(turE*1Xo2D@4B9OB z-<5lbfIokEz-XU(&aiAQEHj|mF>bAD;YJ|72QBRgCK%eP3@{vdbVQhc=J9lRAa54XUWQTjlAVz8~jA%%*G&(L)+#b9}8U-mH;of@8!Bf0J6wf7_qWwDo|5|JH}(Nct`!x|(f|(0TERz~sN(DIPPTJ5 z2Ei*y<7ANS(*CdF~n`Mb`q9n%E!m<6;4E(lL1zrDoN|2*8wb1O)+rVsJdkx;D zl4c;Wi46bpZ#?)o@D<=y^v1ly%E}$oH2}k!qAlJwoma|fR@gyfSl~WbEI4i;UOw5;rS*vbT z1m^c$@bj^0q@3qnvd{*aU7Eoc2>eRh%B$*W>S((@|Cehjnf*1@x59$j#BOKjJY!UH9D3eP6bh;jYF_GevZG;MlW^ zW0nE`Ar`lK;Vun!UQMNlQ8yWRr{T$<>vRdyvr!xUz?D`JXa(9JwXhCMROGj7Atb%q zz|3*(9vG+WQS2Iggf z9&V0-r-9QvoffDgp-<(Vze+kZGq3=uM>>2r|2zq4q7dZFXS$Pf)@K*zS-z7{N0xtb z4}*wo?(=yk8+uJ}DTeT@#Fn>!kG+l*M+6O3p<^Ak$-0{Ul!a^!5H~V1>gwt`p#P6x z{3_>%f!(VQWgAnK_GEfK;5z(}iX_zQu?{4^zCL}MsmfwaGR12AteXKWVVBvj>{Jd7 zw4ka#U?1rDhyk2{p?=pu$>2%QF%{0-ubq3V5NbMxnoF#yr~T=^)y0uUdT%+}s53Bc zESk0b-RBHn-?y9z>ut*?`hes~*7dva0L)DXtA^gbA82U|lfQg9=QXjyYN_!g&Yg_q zYyxyu#8thOr(Y9{plt~5UkWzr4-fwM+@c66#F~MBEqCFy_`cvjKUep6#Ya@+T{$ZY z!0roqIl z1K^>!lVB{rsbhYnY_7!bfMx4nz#4aA64`a`G}oUuY$zSRep4Gl=sUEsF+{Rq*T%0f z=p*#|`5HSt$pXIO*1o@J^{^~3bCimdv0hhE3G4M5O>zM?cR(`|_6b#q&1}v9eDaeWdtHh};Sx7LbzvZohCa!NdHX&GuVM7e9fsLM z5qbkNao^PTq2^$?Hyd02GO5$w*U#Ft*0IfB5b%ib(lBegO@G(joqI4a*{=Jv{8~+t z1s`awM3p6O+B#hJ*!wtYR~rT?}h1_QXt#nd*VU>8smZNz~BM|K|% zW4(>p-6PsFr2qUbvWfBQP?y@>R~Bdl^+?(CPH*1y7T8VsD;`SGM=E^b;y?UEyTq;& z_k8~4o~z+;b3jeBu@A!M9wiMVGD8z#>0vCaX1|Wb54?i16z2&HUwXD4sJujH3i9*e z#H83by87Y%x9Cqn^e=XB7gUoEYC3+a5R`jsmMJAG1rjdQHA+W*49|IV8=RtLSZeNO z0Ss&6e20Mr=G6D_BBzCEcKV2Si^3y;g8fV{I#RUhBr|rO(xYgR{o(2PjCr%w3?<9o zltlu*K(Ah84}%cGh%%Lkd zRMc@Af4R(~j~U3c?+a131=Q8)QKJ|bxk6=RQp@c!KLsF!3f|LI_>h-P^AOX-6-L_z zbkYg$`W+QBcbH^5?z7%M5#Mt;8Gxc4k_Kb!LLCy|`+1zdP9OG%PT6Xma3W(1wrj*) z%EuU%#(cSp7mW1n@lxE4PC7OF>Jcsv0VWT$0#P9> zJoM`!S+UVNpAirTTX&BXWKxoV;@tkpB6zgRZ&*!4WcU=~=C;V1ZFcO8{HMzi^7@OD z3@qLqT^?yfTfCF}Po%V%pUt;J_6?^)vE;&|DW^9zqxfbJxzD)T6oMaX4t{K!9dN+{ z%#zl}SKHu3Y+=>@zreS)v`sxqdH6qenT(fo^s#IVs43duCITnX;TeOWlR%0{a(%Uj zOAFo9akpPEZQ2Sj2P2b7T+2HLQ0FS6#hB_0U_va0FL@uPlL;1`E@XF4!N7p+X7nubBFaHVvj;!2@HSfjrdW zVdT>rJ%gm2`l2@{fhxN0mqrXgJCO0ty_l@;chs^qJyV$A>U{1Ar5c72|El!K!%t9_ zoEQ(o!BF7+ISn6m5DEB@o)^rWe11K6!<;06eMm6iC8RXri@FPR+a34yOo~XD!wB>; zkrbJj5(b9Q?M?mqTi|$=_`|Z9nyF{&X}4Kx(;b33>}gHzd?B_+LqwV8H4mFQ6T1uo ztB56U_i!y>3I;>1h=mOgAL6e~tLB%VyQhW{FmDY4XpqBWy)hUw;BcqJj*Mt1W*xc?^6VNy6=7= z$M|g|KE5x^cCpJ49Fsi60d^7!=<&KA%eGapU8L*t69G|S)Sy0DOolnkxS9KJGoUEm z-of@;{q&)yrRAzQ!$yzC$lvYb0PS{B7sz`H+*H%kzWLNXTb}$rJb~HA2 zMsy|LcmnW!jY1HF&)qfz*RTW-hkSj6s@6I6z8ZY8;%@JeXa0GiLX9%bl&1oiU4dq`h_62jV z^@Lb-4y+~u-iIzoU>Y8PApUnugF^gV`uo4W*SWxl$c6+T5e9TwlMJb))N;NKT%zwdgKt?Sf2n5jgdkrd0)p(e}SV+qny+mLM@7N5o zm8G9rS8QRX{Z?zW&0}}}@w79s8rTBQNb6XqH$ZwkJ-pbeyM2H4;ErtX-HIz=1FH{C zElJ9N%7^YH^+7?R3H1}g^s6NSUAsR+U)CS=oMEwPu)d{TIlAj?fgr3kY_olU?Am|n zR&|A;&DI0691*t=V-GHLmNxF;(%TXYqGve*Tp!2(9`g*=gVBLgdTPh0p5Gh?t>-&}2Z@(Y4(nE3#>Ns4`@nf;(?Jz+xF{ z`z&rSR3(1B$bYQh0=~3!4_7QYxtf7$JuwWdCwiwCz$J2rhNX1njgnEPKxZr3in|K= zrkK|D-b|Iea@Tm@udgbmpWFNMa+1F7nw~*RGsmtKRX&w@YvHC3qx<~=@T<~ca-R=% zu?aoOrq>};R=sx+@&TOddJNt1=~IjxckZv&rXLJ-m)ezK<&ssfO9nv4O4R3U+ARKQ z2hcQ8UnsSv_@T@EbzQ?c0t%dd6O+dmhKRqD6H$G4`9@ynBU(3TZtpsLGyt5T;`0|L z5R3yXEcJ7ok<4E;wBL8&KTIBe70=|9oB(;Pj-%~PZ-#pdKC?+2olns>+qwURHXsG& z%yUGqJ@4$|C;aA?|7)ENdsvj6RNwFk64FKaM;IawrzAz&Ge9D@RF#$Y)D?`sOiu}@7$>B?Rr zod({dq}BubFn@~Uf8D1tdSt}-!Hc1~wt#oVExAjrVYt@~#I~E@Gil1m-S@p-2h9s0 zWRKlM_BrI=+(g7qy{U6QuY7L$zQd=3+77#Anf;kV;^bS`eBjbxd-v<10BVW$i$xAK z(e#NfuK)dc*bSHq6h**8>>top_aShzY6k7V5{Ewf-}v01C#PcI@I6}n`|4n?^;YaO z7s7jvDiN-Si#M%3dFwJhy&m8VP>EL-*QZQHT0UU7U|-2ICOOwilPtfN82>&z!7BCg zRhWr>D~SJ*Y~9D>xHkzH9c9TtWudjx(dm5$jwq9iXkOFQZq)Su{)o$ZKTs*aXqW|& z)5UrL^n9tE6OC}`>5nmj1ESmZ*^r?t{%1XP8@kr-iGPz#k%#|k{BM(n)HU!5L&>03 zGn^o1@>k71s{ivLmjBwdXTKFGnE*Tp`cZK}_LI&%3Jj)Lz}-23N2pDWN?Ha@2D@NR z!^~!~cj?-tKinI(yaG)?AsZVTXoey6HqDF$N|EVn^&V75KAkd!8N9G?&W7r2(0|OG%V8{mbkPq*f{OuL6t(4H3J6TNMD!{NVMUR8<$Y( z|M>NVzn4CJE1=2Sm~U@DZPYijOAfc_d4iua16qVX1kwL6;@YSvTFDJp&==ko0?!5W z=Q&lYfxZB?9R@nDE0D0!1+*P^wM(r-ylj6wVRp3iF|HCF2i(^oiaan)3)yU4KXE+q zl$x5+;pa1sz#6XLA7IPRX1)Q%HhYZ7JzBpEyhjqgBSrs_7aTZ#3jB|%^nu2nOHWgK z!e9vXI)|cQsZ2EAwUm-A;^XZ5QU?4{>}`D1 z4JQ#)&vO8iZlD-HKM9)d`YWBVzU>{kK8w6N)s#MZ7EE=*K!{x&V6OK=5F;7i#|sEj zfUKwoU3q_aplwn{nmsBJ6@Z*P<*W#gAaj|!3_4jyOC3-O&Dn%L1=+21-yy&MdM0~- z>J_?nUU0I^K4RT~{-(*Y(K!x<8&7dLJCqj{gvic-lJkFTf?50D$XzviyajT9>V^|#+wm@7c>|=xdpsF zT)+UYHXmUykLpwEH!Fn?@@0iPpI)X0=a(AhM){vSKZP86P-mX@F7^?0Lwfih(y%Wfr!D)O`{_>Kb?bL_oU7cGJ7(r_J zVLw8BdSF1$M$t5*A2aI3bU)qm!OQ+_==-nkdE^3`p*gbM0BG2CX2;nFPJ{A@0H{8w zhywu4{;wVRcYDHxcC-c?dBLF8Oe{g=yB}*XqpcC=z&FLqdV}KOgcUrX3u&B(%esDt zuYPWCX!bXim~ulJu~jPPk~3x~C(|=F>)=|x@MG)c9)O2}nF5cV1bRP@#H>d{+DPC8 z6W*n{?SK5X#_^<`AFr0C7h>kKvdZN3Bs%8+UUmo2P7%Qj6WLHpZl;E|SPh}Jd-zXzQ-JT-1u`L=;Fs=p@b;(g zNaoMI`ZET|x&(4haDoqUcPQ62ru0%1Q?d=|7zBw|lG#~7ksIf;Pw}wlQ%H|}cMTYa z|JMc$7zP-YJdUBi4ro@{&gJHi$J^aYnr8V4Irz)R5uEO;XTT_9yYy?1*U0T?b~8j@ z){9=g_^is~?+2#uc@*!>p7ygbK;?vx=-123uj-fN;Ll+F;N-Dscuz!;N`jvr)_Q1a zRaO667-wAM>erkdPwvX}I(dE>2y-Y|TP+=Vn6on{WE(9m3o`G_eOTC)Ts|5sIPf>K zR+fT|On_xw(B6ONEb4tRI-eJbi&o<)BX;aICtJbsZ(O>kb!s1h2`+X5M&c_X=#OlX z#rE1dTI<;(=`YIF5A_9;D+*r_owE-`bj)U_*q{dHv*wShnbaU35G_kU9@n&&;TKRV zdo8`XiZ<^QHk$W?UE0RL5|Fh=A+VWN;Gi}nqy5@B*TrEt{^FQ_h#fe zN8oYmep6g)n+^{uV(>ghd~pJ+w2(mx1KmAf+Xp5q+6yvkV|Oo|Lb_61wItHuC{oK1 zveN=r!9yPNKr8vQ7}k^DScmwo-Ih69L$9e_nJ?ct3v86md6l$l{=6yBr#^Bv-S$%p z{WDEN=`di}mMMZpU88J?p~&zx427e(DA?D)rGW>s0npX=TqY{5HRVau+0-zgrhJU2 zR1)q-&;G6LZIz*#jPMbmVH-0$gMrCw1k~1SHF5Z~3?qzGxUU&qIK2?^th>8f= zQ90fJkVsLh>8QtuJ48>3v^*riyg+R(Wcf*sezR?fR$?F_Mc09E{H!4*grvQcfJUx9 zs&)4@Y}U=gzg70Ot7(3^S5d2Q=)A)SEoW8M8cM)nS+ibfV)fqJO055e+|?UE<~9;( z4P}?hyK=9$Ma;&|ovI{@`+51@F!dU1_d<1I9XPE${@2Ud?s)4=ISV@6MW692$`I+~ ztZH0VOeDC>3fSJc&|*6P$3_dKlR8EU=r!vV{SP^qpowQOem3JCMusHX97aC2CuDg_ zJlY?QoyAx*TV%(4J6t?B(39fTyk0}!nC3sjuvoL#vz?Ah3zC@BkQcB(!;;|+@}hX- zpO+gjQ=)q3e+1da65dFonAbu&fZPRE>gGdaHZoROCmpv*F=|GX6ytN3GFQX%4dFAq zAIuzT;EHG#gnt2lq$AEr{_(t2>+;75gYMjyah|j7JDOj+v;G4Fh~Hn@Co7a2>h zIY=XLbufR!RWsjTzw`%Z=);}Z>r6$RIhq|jVm zpKmx@MiZ|*-oR+J2s{fgIyy%%cj~HI2aA@jdzgXr5ablAvo&xgIIF%J#Z4(KUHb&* za{yCJ1-9+zsb+94jQNCAa>Zni2+U_j7ZQ=tyd zj!H#)J!r?a&~M zMtzdJa6Yl4DQr%$xyM9MH|6c;^tBdvJqvUClGeUb1$%S?5Avkz>`5Vkz$hLzh7fe5 z<}#2<4W4fsEEG0c8vvK2X+3fsms6@I^5rG^!5eo3*fie^iQ>O~j>k5>?&27pg3$M0 zSGza|dc6~1>=M+G{bx`=UBY`Kttp|Q@zU~#1ap4f?(N!A0J7y7O-PMMuq%YbqnRW^ zr8es`{^p)h(d8_ee7tjbnDg@UW*b(l$0?=dG{e<Qjf7i(=E}to)2w^RLhS9@r|8OJO;0X z*Pgo!mWbJZF&g-enV?VcvoUxA%4@vsYMx%R0n*KHPWp>$-WccleBRinTR`s;Bu5Y4 zwuJmcMf)+S6Ugf|nNk9f;g;p!uk|m&J`zT&j5FONUtOx+EIEX}PI%Wz;ODz*$SCT} z_c%?0&;P;wgj;AuC=zyE)iA)Ejeel7RL84Zvh>)JbJhxfzNbzB@gVZLCgtgP7ZNk(q{tjR8^D=&TL21JUZv^$JF9Vh^YI zbxAf$ABi4@J$T0nr#HJ~xWc68iYz*`Z)pa3d49;#-zo)9a-aT>x|nyxI>Y0IrtoJW=3wP8VEFB|ZYpxcMj$ zXFN_6qA&pWtIan1{?}rI2amU&2Xy4;XTRz={t66l9`SgFyTVasQ9`2=h!Sda!1@K% z*JiDe*FuV(_(u@wReCD{si>g95k)4SHGI7-`-`8bxx_~~&RB)@m8qS#1GCC5>Ul=K z#CK}Q^;+PfgSj~_=RP)o50$=uvvwdwS z+50;ExTP>~G=hNvfSZP)4+*LFqb-vPFqAisZlU>p+stawWp$-ldAyBAB$fO~taM48 z0wDw0+mtVCY7^mDVz_D>=p;{g6Mz}-cied(t(pO@zx4oC@**fA0Lrd8N1qf2K=)lR zi_J=#02*xL^Ak3BrDmd9?>%u7!0x_dmWp#HA6l%gnL4zUi5YexA$3oKzNvDmS&>kKIJ15dt=9e7eU+wq6Htk?Si#SF#?C5 zjL<5sjFagbH|$#j1pdEwpv2cd?J@URUu0qbv&)K#C-2|a99Zt3Z>d^mJ!|(@yeF!s zPF1|mT!>Yh$Y~_p8&}COdHvR-X%|$pHF!%v?HJMHSKPFGgAx)~=|G7@=Y6Yv;j&&$ zIXw@iU1TAmg4frTEg=8_3r z&V{=-yads}B`#if{heV^o?D3ke!W`;Y`8JGT$x=56$D%OBa48bJGSa32I$78O&;LD zzt+%2MMa`z!D*n9IJQ6HijWFmI(|@>%8W9o_ptyn%^8Qi;8K_3FW1lEAP6LbXYR{&r;eN)J?W_9HBl$Fr^`nKpR9a$I?Y+6@ zVM+h6Y<=Jn3zW*^n}(wo9c9uyySV~EoGDXUxM3d#$fvKa4d0vl+Ueo-(^xfEipB*d z%kdw1!1GQ=o?c{kXc;E>ujt?t`~+cLsUU8si;cX`4`HVdj;R$yZiY3&qUbk`<$H8! zF4pWpxup%j+n97-#_cWA5AV4QJ`_?t(8y>B04=_>dO_rGqeIb2k`Q?eh+LzDH~&n0 zy4c4!p!kTZWhxIfz)?k)A(4!fUx=eOhH&1qdBd#q)#fX^3c7YIwobWmguG%73PX{t zm-sy}r=$T?eyI;BpqT|+?|6$oFWv5aIl>BY0vpo^esWl&<=PMJOQmau@o3xy^#Xd( zy#qV}DrVa>Setq$^wwvX|n+XfK~Y3nE*)8h{ra2-4P~%vO9O-;?9M zJNung;t8{EtMG;)`Q=v~HeThk?o`WOz=?2E+XKD-+7FxUMxKl(yCAdUoqLaJf!-#H z^xSX&hXu2c47=`=p24}GpY^ABsk1BDy4^@xyBD(k^ma)9-S!h0I@EsL?2D+>O6%>= z5O1i$ekn&>0+NuCUOh(}y0zm{oyotUn$Vn$St&=QsbRoigaO~9&FYU0{y0KG3v{71 zrXWj7fT3Cf1FyJ3G==%-< zGeH_$=1XsV{nC_4`@ zm)+p<^bUt-5tedwF!>7^Wl_fKSV zNPuDjF#evGJc6u;%r=d;)Qy)V4g`VID;;;7<3kLataXc0Liz#md|(E@{NygM`nli+ z3?Y9ANx_=#{r8%ZLCYE%z5qz~89ji|g7DebLdV&Skoef?%4z{flxONso($l9z1dm* z#j}NQ&luotEkf-=b3bppw?)6NqJ0mSb&tVsyrnIvUYmyP-#SM)DxHFg=`iRnzuBSx zzJdU_S2`7e+(3384alpY?-lR~43hi}k{gwnJ_&=JA}h&X)B=3MpKMyu1fcY&*=S?i zw*6mw&l%QKw(S8dC9`!3G?-uK-f@6CPZhmmBTwO84Duf6tO zC5m5jlVsoky?g@ll>Fgc1nru0g@sCfOB?a2h76bOF(l;ZkFq2`m9yQp@SIA){ZC#A z91lFSo?x!2&z^jWPy=bk|Ag8sI-=9Uzdoe9X`ipl59+);sB^?%B2f!m!g7`L{RXf0?yi^Fyp^vUxqZm&fp5+xJl{6HUghE0(>*AsdCvWm=!)wN zn#}wKU@xZ5{ciAZl@8DVs3ZL4$98xRUm55H{x7B|8hG3K)3qz?mc6BR3l#EBF}lOf z==s!w>~ODdOF^-7cKiP5nKSVP1qGszf1W+({kK|Z)Wg)^#`^sS<7&4k=K_BY;K+YB zQVCMKHP@&YhZ>5M?xTgGStN>SFhI27Ae!uYd(=2mOexNAO^)$~9)->g0$m3WfwDoXmzY0>SHoCZq60ux;QBz<#@aCet-Zl+_|rK56jAP=va=A1B5|CA8_OZ3Nz+$%Di zGf2pxiV2X>?g2<&nf@X4_w$z95VUSiY6hlz@$Tzpml(r*;}g?Qio$p>KFH;afMAOJ)M2)K4li4U%o)Pg}m zQ1qN<1+yc){SE$|tk#|_()-s14^(m11iX{g?6vInsOT##-?=9+w*b_^E17du5s%Tm zOTK@`Q5(LOF=MV0*8F;}@z)Bsnm1qn-G_uB0gcRQJc*dmEDg?dt0Och_av?t@PLQ^ zmf-2gE)(ZiHZbikyiH8msi$?K4P+Am#oxbh{tiX-PSULJJUtW4klkBBd}gX+lCl78)~lw&LAM~2uNFf8jVr}zPy}-?uBRiN1msW zRONQ`T8W%QCZg&@P9iA4hjRif=?CSRK$3OQE-y8r;aF`bn*ZPco9%A+4I0)b03mmG z&>N}%r?;#tY84=8O?B2=ADQr@gK{~wLoZ`?2BPk^h9F!Q1gp<=xAUh}o%b=z3SX~w z(BN&kBON_$02O^mZ`cA9N!z)R6s1-xh_K!+wTK7mQn_O52yP2M`i}@5baw?5zIkf@W$Z)~&3G6s znvrw-kil=YH)1aRt5-aI2sByEaYBYGmie(-a8f=GE!GAqsr+%hEeh=U{jPmg4RC2} zSZc)J&3b4!KNZXt-1zb<{kd*h_0eZUP8XoK8<83QFNoiThVlHo#PU-mxLWyS>*y?l z#-V^R)nB|ekLUy59Ux)g58HePqFYuJn`%r}esLs-X5y}gs9#5pQHMXs3k%9DXpk+? zd`xJulz+BN#TzDtHL9*UM)GezksCX`f=aUA9LfD_27jJ$=S=&op^=0Tz5Q+eTf2D& zQq`1XuRxK5J%%fmRTPEcdcq5>3hT7K-*Bwlm|`V;ET`kTqISN{cPGz44aWYz9ANW1qpM9>a-vFaf|d}{)|$(f6? zh7G&pKVe+?|-2Y}F{QnaChqn5^xS+{$ z%CF03bGpic1NlRp{0%|N%j)6;ZyXc_6atdi0)_4Zk8!1Wp>AT9YA`_*^OmC*#^1QYtVzg%#D28p@*N=H&141 zAPL!uxHml%Br$C@(0Kd9=UQn(UvHMVbu!ZFFWV@rziMG=QHC=Yjjk$pdIG6~Rej8`w6`1F*csB&vZm6F|`|LIDzo*s#J zPa~c9PWOtaajk+w5~!5Eupw&L7JTo?^F~>t4a%qfr~U<*L)4>!?%Qv>D;p4{l8N-BI{^EJwr1Rsd&^ z(p5SYtA5U5kZv73HTFZ~d(Q6bpjr^M0hdS)o@hy91l1}8O>@(Nizr>7c$64wd0>N9 zCtc~&CX?;4Ks<+^CV!runxu5O=~!=Zs3Arf8tNG9Ub!Kk#C=B5?V#(o1V)^Q+!H5% zcjOtxucKYRRzX+T(C6vS9e<%2<#k9USk!9BRfQR*zt5?$htcO@%{Aj|hw9ZT$X>@e+&KwsHZF6F7I+t;qQ);9~jwW z(mU*nwgOO3L{|6w+^0X}D#zDux@2wlC|&vG@ekM)g|YMdXjK&y-GS3n<3#RV^@M*D z?1xItn`H|MrVodcX~v5lp0=&_7p5YDS_QL!=461WSa`R*DslrJ#e%=ZIChIfL|f?N z7pp!pUW~Q^yg~^%fCE$k`8Yn$-tp!{q|F}Uulr+HYVvL_qS_u{zY`?IXw%#Rg{kVu zo_n90{(;t5^+={s2B00~i`OB!;CxgP`*P|4w`g4`M*#Cobk|1xS!xeJZl-eQsg~t% zR+hJmnQ0ZUn7)Qtsf%X*K~BcHAI|PORSU2epBRdwp4iFKaEaPQf&XE`pyePd7~(%XtuDxY$W3E^^OF;_)y+#Cn!ech>l?0)kbTW(xJ;|UMe$x zwm9+4Vk!EOMKR7PbzFKfzPG2TscR>!%98z6RV~e(2|el#z9t{tG;>l}Eo~-a&;HVH z9nldgk$hxasux&>j~*yXL<93rEe)7|zyG+o)Hrw*`IqfOG~V&t|TOWl#;+YDSo!^VoJ~c*TxB~t>WiJGtfQaC*%RJ+GZakfWqt( znh8=Ei67ace;>e?Ppa+KHp^)sVpy(gLbOx0;f;w+sy2DP!7EmJH>8s3B)on(DrX64A{*GcBLjnD zgifpuSFijw=O|QffI}QueD#w6{;S0la*P7O^Srq}qs=?}23ZUpBgpkp(S^&Cbauuq zupwec`Y@V>jh0%bD@(`x>9Oi=MO7Irx$XQ-mXl7|iy$0P*>bO`U;bDbpVd-Vh2+AH z{dzsVaJHgNwnZi(#iXfC^j(qRPNwwbui}5&9o)lf+fcwac1`vj9M2#}Nwd?*Br^Zt z{O!JpYntN7RAC8EPoi(_naqJOvqTn+$-%a?)aBOk@G#9&gHB?4X>#+jC=8*>Xr51l z9dxy>q=-cbjAhUJSX3WXeb@B1E>S1ot^AERDT=8ru9fF zJ68}opTBfBh%A{Oxw|RR86%*KJ*$ZeI6rxr?&VQucdU;_gOnfN)!Oma65kMz;yg5{ zCxOA}w$vr1kKHmYqUSU9HWF$5M5$=!UIKN4T>~C0S^=I--Cf0n=@n#0lKl3S+Ax;I zQ=xcoeenuoxvZr&lT150*2~Qh6z$0EyYq=YFGL%oy9_69iFYn-+{O1vfi#H8{TCV% z@l_i1qF+#A9P8RlSHcV}l%vwmEg<9VjpRtzHNMVGx-sYXsAiDI+F+@7wr>c7DR{xV z-0m{w1cclyNpx)3g`?WV59qQ^vO8eQceSn6Z`SWzQGJPH3G!Hhq2@f;Z;P3HZZKH} z=)sb3hn2f93)NckFAQePGVi-BLR-v}a>K}bKonQ&$r61Szhx~WPL}QM-_u5sNE{T2 z)FoS}qz)fAi}pcf?ZDEk@ya3OAp>}06c}y>#9KF<9|x&+zYd6SF8svYy+6}psG{Ij zG&vr_uDk_~_{NJFnfd$&r@hDdB#t!E8{f40{qm-0GLgn_^vk8dn`qvw0nVw*u3 z8SUdf>qOqz3DUCrECq&%G#*GJnU0ZHE6;autv<_D*aYS|`_f@PIJ)+rzB?MuaN0N)%2v>XI_;+;k}T~9|#sI|M2%98i^#j&Q1 zaqrg_I~oVirfsDjUgB|d+t0QnrYT6xCML+$!u-RyMIWsxlhrXXMV^#sZmBCxfLrr6 z>hP17sh~OogMXGVF3=|8vIn)mcWy#2M>DDskN=wMBQ0qZK3l)urg!Q`Q`g!w1r zGn|s9IQ@;?4;>he<*%xbhPPU_A-xA~411`mlSf>i9%|OPL(ntb#IipIN@F44eKT@Txiq zSHs|MfRM|5ga#lnbY<4vEF5z=Nl^*z|9I3B)^I-$nfMhFTvKp^enYwoZAiSngY`(D z6&#VFvm4`AbV=AD>>#qTJ=xllX6PEs)0R*}g=AvIx?|8JVH4rl*mg@>6&%j%gDQpD z6bo~9rET5nuT9);;^E3B2{+y?%XU3h8?F;4_aY;G-#i&2Ic_YJYiCzzf0ivw=RYLU zqVr!^MBO2ylN0C#;?^6jNgJtTe+)-zG!GOZJ@@)~rjkgBI}ilbvFh&++h4VYiHy|? zuwCa8_Id$1Y+Uf(0_YPWE>4Bwn+72lnO0_;hHG$PFUWm~=mndew{CfEi03T%Tr#Uj zF#ft7N}+=4)DlS8xti+R+VGgl-aOZEDqPV+)2__r_VoBt+2J?3BnWToV(@is zCPl|WQmf-E;yUtt!<8}l4xC)I%y%0Yg60h62ucWmXpF3c-gZLkqQTD@)($IgdQrmw-k@WbL4C z0{Qx|Pf<63eGnKJiS$-IpE~Fqx8CxephFcXJpG>rppFsArF5^O>=KQZWlayfOLVq{ zs8_cI@@*Q(lIJd7s6i~-W_V+_xHo!8gX->qt_hC4ar7%nljqR=j!Vi-FhcXaH-oi^ ziBN6LddTH;0?iL8bFpixw z;_P!N+82^)QX>*xTy>Nm$t9&a6ZXf`#w~?IN%d?OD|(wHw?iUT7k-*LnGVjT|K7&J zEOmclyJ^q6>+-7SyQp~n%u%q)@;O~P^viyJetS_%@~qgPd~b8Y#z}FtEfTjH!>R(O zXb|+mM{G$h!c>MPXm_c+n(VtVzIze)#b&R_+zgpV0mqi?WTmcEs1-RWdJ9_Ttf*`o zS8DJp#TW%LEVe=KYz@R8GLKcm&?XVoRDB2RU+|%JB72~DVk}I(RD11_uZiBa5gG1; zvpb1Jmjdy0x1xaLbov4adao}C5xm$g$mJZ3Op}735UiN4ub){2IZe4eE?Md(@VV~n&kSeq+!9j- zp0J7x0Ov#{e_(NO@sX~Hs;YD)xRGu(QFT2P$&FtI zAIqf8{}Xh}&6^5>3fC=`;^Igtbi6Rmd8+Q=LGx_2S9>Ym1G``qP)r6Y1oy7s+4OqL zu7bWc%KFj8i|CDT3J(6}0l%a$PMxH95d3VjO-$WEs=NEMWbO4oX>>_?KK5>;#)1ZW z|DnO!mrTO9iwYXsu}{wa5E%yF>_KApX0~F`ZbG(-M>(PrbZP_R0RZR5b3M`HE@S=%hH$7P+XXRG3^Qe({x9s>%0H zFAcNYa&sY(8?|X=L9GJS8($fwBx2yuH4zan7GdW9l%RoO(GI=3f7&c;t&y3~ZS$Fe zNcgV0t)`4@iypBj)0wyiL)(l_n`m&006xStFQ?Zof-_Hpbd;TQz$E&2jpX?FXIw>c zvEZiezq2F?yZ9@rq>(UxC9wy)EUHWI{6Drq46Qs(57@XLSbJoN0lfID`1xT1jz3Hx zdH3s~7+8pDm%5R6Fjn6G!i{l1z_#};;ufIsf9zQipdEew#>+oS p-rw|~4~mT;B0SE^fdw*h_q@OQ@!kq+8i`7cF}(@g*X literal 0 HcmV?d00001 diff --git a/docs/assets/images/lightning-dark.png b/docs/assets/images/lightning-dark.png new file mode 100755 index 0000000000000000000000000000000000000000..d1c929b971a5a17bf5afa300d49858d280d7cac7 GIT binary patch literal 11515 zcmZ8{WmFtZ*DdZEf;$9BaCaHpCAhmMxVuXjAUwD`1b27$!GZ=G+}-sX-nH(ZJ8QhU ztB+Jy?Xzn~sVGUKqY$A$K|!I*%1C^Nf`YDveD_5{fV}U|#5zDekR4@oT%e#(aXwzq zQ0bWjkUybazDtWiRe?wjAwS@(0Ez%8sMKp1bjj;M@BXe`SM%nuu$Oe8B|(zq`t9w=1RAgb(V4c+?8>*+9iKl0xB(2 z17gDgu>Sw22~jeW9^NQYFa;7-<}@eM_gAGpAO%t|l2-#KldgY`pqJEn3AwIU){Mi) zso3nf#YNUe@%x~xXD>4|Gy9(wY6c4;(T2}cGsFoyjb+JFAD74qSMsb~U0pr2u&@x| zB;DO7`gQ-a^l7)nY1f&H_yLQF?%kC2t}0o{Eq15 zuXY|A<$@tY;~&Rv=Gm%5<1rl>sj1bIfHW9>rD#+~A%pj@EEUs}Qw$QGW`1Ro6AC3j z?522$wKtVAcXGPyTUl8V)XA~EM@BZoyPPZK90(Q=t~`dHDdcZ~X=Fnje8A$U7`< zTm1WXVj;$dt9)AA0@1zq2W?PLnDVltZ2>5 z(7&hjc0Q3OYwYdqAOyrZ_rwl~V^y^Otc1J!UCpoPq+_?0N~c#PW9u=orihc#kJq`Gv-Oqjg=q1zVwR}8n?7XOP7D~C5- zr*-xmlP1PaA$kB9jhZ=m=#t&7mea-p;w~-hiVyxAxYlgAIOM{a|8PFzqz?*T5qo&N zYn=BbQa;n>BO4MA1z#<>N$%@(E6@M^;O0BCm=l|GJwR2OJTcXIw1;$t{9!z+q#7rS zennobtthZL9%Yi?XDk#HW*mub-K~r-F;N3<+7&J%CJo};MDPBGTm)E%SPt-W2^>8 z7h7W;6!N?)fH@qiWNcd6|2v%4%d|1A=UxP)cS?f7AZ7+f7H3!>t}JHm?d^%niC5Yh z7KSwr(I`TO>C5A_a%qOs><%WxWvE>4Djmf7|LpASy1x_cGxO~2B4(x&+7G2AC%adx z>f%6Rd^#{9%M$BjQzaG+r~Mos-N{!}RFq?e2q%+k<2is)|;+qbzN|Uh))Zd#x zj+XcbgFF#Axm8X`_c^!b*PGT*Smol2&us8P*F5!rS4Vi_E%5(E< z0{{SCdV*jbu(~DL)IBFk+&;kpQh!w~$^09{j-i#nGQQs&U0PZKU}Iw!402g=W_+~A zRxPOf8?-W4QaZC`r4*)}A{Fv>1)dQ>Jfa#nl-rZQhjyRW=zk}E?+shUo})@HR?6~@ zIMbaS=;)b4lYL-1APrVQjT2#WS?$9eOTdz{1g&@akmGQ)_b(JBw&mcT)l|e$n>PQe z-`UkY=<9E?7*E!Cx`5=(RkBTt+Byyh>{$+vU-tR+&bl`Yo710A29+w{an`NFm<#}3 zL_ua1aKrlmckFNExHfUo7Rrvs79ok%C<>}EoLyNuGg}v`BO{8Q3p)nr@G{}_Pbsw) zcZ)^=LVV23T`4Uw4H?|OX_va+ZmZQGbF~p7Q`5?3-9hTeSy)JfK^H3(Rla&LA1V~P zUvZ=wH$gBoM?T>bp~=F++BvZp2{ zR&hsMX-P&n*IQlsT~wOREr*Wf3Pc_@L$oNBg^L9bzbF_hxq9O}S&97(@-Q6`K z1aqjWs@5U?%9i-i1`9xEvKCUAZu%hwhE|OC;fKz%vj9h#W<#Yp#;z{KvEG$@G`j~;e|+@M_Jj8d;| ztoPjspIl~gbSCnKs3N@-Uw-0s7JvsEfs72xUUfo{cbg_w25Z)d5l*z88HMGu?srEp zw^&kAQg`4r@mzqDoM(Ka`Yhk!$>^!=%RtgTF6?v$8ZsYh2xK)f>`X(95%IrH8>c?Hp#$S zPqjOg7-qvRJ3bL3UEOpvF(7s^%&T9SF4vnf#~=(6DEsBIw0J$Oq=$te|y}2OuXL) z2=cWkLet91%O4Zs;J9=?q*!8w@#R9R_eg&wxt-t=sbxDwx5&@LhK8Q)Y9~PKi$ebc zpV1^d8bvg1^DV@aIEg1y;xpn;`Tq@tKHbPDHdf{ycL(Nx&u~lL4sfX?bmJo_?w38y4y*kBP$0?5H>}GQspE()1fJjKwspFhHb6<$W3vm&*UcUKuOrnhuH+)rf za&n?Mx|J|S+F!`9Gj`LMyxk~P7veJmvs#MweM;6UDRXA&zVUy{UcVn;tlqa8b6}BP)&`mb;>pCv}mkCU(r@vER6d0YG zj9ezUQj9uO{F8VU;Xz+Uxa6cFx+nQNu=^WTkFZT+Qd9|GDJOc>|BbW}XlDy1@hreQ zqBt9nB3VYTRS-F}VpK48*OI|6Rco%*DLzeJ-w|5N4NPqh$Hkq_HQzffm|jt|bAhqh zl8H)R9ER)mo#|sz-kyx}xT&N})Wf{2!fNY($5at+Gj0#TL<+xV2J34fs!z#eE!3iN zZA6(0;8vE^ovji_ceSGThr<9LBRdeu+)Az<94}D`zjxt)udS%`xeFX&BpuPP7sG!R zP&N|YuI5WdKUEub`-fjhU4ABWb8|zzTrG$b=GX3Q653VA6bxu*{+ZR|b1I+Am{t_9 z7z~f&Ynqj7u5lG_x3;$&OA?tB{{KL)#%>35&5vj@=3*&p4QJKXeq^wA_?ZJ z|AQ!lrtz03hyW?E33@>$(uC&$0p(sNUA~L=|N0{^yZd}4(0P2xJvT-7SdNdG1oCDz zzgi3M4pJ@G&tl8^2C9SHt=Xuo5r=+`dZ9IOt=z4%Ngbc$1EWA38k)1;7Qh-dqbqMzSu#8JEND6utX zN&h{n6!^b$tY>(87J7PZw}a`tb~cu*smt2l6qC=f{V`+zY|7`;*?g(J_;XW%0;9E<-n>)ZtEk><+}ZPI9rGMA-$z?mk9Vx+3l(FXfMxz&-dzrc@f+6u}=xK+UnBXzVn~FKb@fJ zQtEO2tywAad6*TDOxX8tmZv~-I_1L*4o8Rf-=Z=GYIi5ii`s-`ULz ziLbA2ra(IGZ)j-fKcP_Zt^XEgW@fOpne$?VSbr?l{97A{NP#WF@KNMNW+PlEzPuk$ zes#3CD|zQ5Vn>445%3}Uj--^q{Zk%NP8cXEDjr-y=ST)Kc0{QQ`<=m$18h!8l~Y+w zIJmDWih};OH(a^$3P`H(&8*VB@|7*Rrna=SEU}plF-iq}UB@5w#Wceaw0fH~M}f(avi|x{prBS;*$>&AW-cmmt9`;CAA{$%vWlcv5t-4d>D7H;mlqtsM_!#$St$FH3* z@k>DxQmLqQRz{D9RZ&jz;aAyG(r|>U8JU@x-i*gfq!c5LmsG+G=66<9!4aR0t;dJ6 z+jwL41zW#g6~;?2EW*_VscqzK#WZ~({;X$ex@Ew-&@WiP_HJWJbWBnRwZ8&mi^9_V zaz)F+HKQUu6m7T6v+lXwL@MA8ywm(WN+VDIBC4hciyT$;_*W@YKrktSAdjg@L&=wx zghNj5zL>4qi2W){+5g%i0zR$9W8#7B`R-(yLk)?Yoynce9Dew+y36lb7^M5HKmgoN zEV|GpZDam~jKGfOPJmG>vxOkNI!M)1~IB0@t1d;hBgmtx5kM9N>wB zb41e;t3hbNCrm8Q`K{$U?mqRyy&kK)@XdN7`Hxfh=N!sh>aL#*6ZS7^DCn7R?LP!$+utd+@yOq;emQ8Hb z^BMkeS&hx2vn` zyH|Mjwd3j-$>EptB*eD>12Hj}98#M?=w7}Xn0RZ)o4z&-lNj9d&PXv0vI$CaI||Z@ z2@P>pB|+(k5PLFV**)Ii%O*Di3!LN2B6Y@Pxj5A##3mV@V9%pSev6 zq+(d1>na!xJ0AE>YT55$fH@(c$rT7ALUX-8<+1;A%SvJgFn-Cmn#yJ{3GAdp^?d|S zRWnvUjpG}=ZZBT%O>#Jg`jW(+=Gy##jxnRQ>zqnQBj*7C#G_(nO}OWsK2?1F?&ECo zbaTj-K&Nc)j8D5~jTR@`*}zWzJK@JrENQfz_+syjv7?UI``fEXxRo?`K~7Rqk{O+5 zfU>G<-F?^Ze$54S;ILa%6y2mM3!AR4A9{*NN9Fe=g>M%s3{4v~hjh%e?n?(o(7931 zJ>^piIuR%R8D@ZvFM@x&&&$K5m7&jRBb?tVC|gKiXD`Dco7eX5pJxig=EymzU7%j& z13j489R=Xu|)<9Faf z)F|fwm+l7@vbuT>ocrJa)YtzNUu%(&R996o&lJg*?{94_-BsZv92#k9-Iq3-^h<)s zvWMr6muk(9Z)|q27*(bBAXJ}&{nj7vlY_y28sB3ZLtd-o{U-ahr@+0mZnkroP(gob zz0t7rIRyC-Esa}w>HlUt^bbdpz z2#>1`rgg`3xoIfk8_%)CwO10uQInK}Z$=zlw#e>tf80%}mQ=&Cn@Ln}(W!(W(U2lD zXl$#Lea4N()f7)hmiyUeLxC&~pG@q-(z!A|e*Ssm?|M)+w}=bhQw)wcIp1FhvF=Cl zqAhA#cvNo5<|LTRo;;V6E{MYcIHZ9l{oz01aL8D!$P|`5nX4))PUxPa^ z?4j?{1te24li1qXd4wPz)zDg|c(nm;oKKCn9+xI*1bzLUZU~Vqx$K?M^1kldu6xFe zp!#FF%H_aLUXo831JU52j@W)%X$3xBqWH55S-W)!UDicQuB=sc-mRR{DP^jZa1$lZ z{B!6g9j3}MGp-Zd=6r#a`SEhvy3nLKxXtU{c1?RlX&SiZ2^*|tA;92@ zyp5#VwR1z(JO2l=)(X-&3kwIl*9wDx*V}*)r&@_xP!KD!^920xcLWTQz-Z`Me~`=WpXrcllB1xRMFmv@ z2U!zx`K4MsFY`~-8{}B9%sZTznV3$;`{4N2Add3n;{Yax*Ykgu3~F0E3gCzU`YGSiR3>@qZEx zbZZ@anHAX*IPXxK@AO&y>a+rhS=zl(z^hxa@_iILda;-rDrIBm_91NFIe}fo_a1E0 zpqG~ylFa21YlR6RXD1s;FSlM))J3vEmvwGI=Nd9A`xqjhQ`$Y1Q8rlFz_iqpi4!uZ z{rT(@NT$DO3&fjY%F4(?Fv#jOKd7z%Cu3O1z+gs zM8;vZYpL}kO(p*Hjc=Fa_@|?1@o`oXSF&v{Dx`BB%D&tTRxsnMg?x3oiNvPfi<bf;=}%xFjbO9p2zUxmUdCA3o&d1Dq~xc_*CR_yY=O+X&g;X0AZ7C^#duMgyD zk3!vWnsIKG`tf5`f8qA`?bldr%IK5SJciPv0;Ixe$3+?#t zGy7fuK7utZ;%%Tbc8fDBI%WR^QgZURy-#PQdWO`)L(`f6rKdk}`wDNZw%dQ}9$+rP zy8bC&Tqgqt^v3ELp^*uCHf~LUTz4m;Z!P`eNce&=0vZ1D%Y>2{r+Ie=yuZ!5+P5V1 zyA?=Rfku{$kSu)hvp?HUr2mD0>P)6J)$W?@`(-P=_2Y%gf1mei+%Ny7<{Smvgc6i> z|K^>sc#OHdn;nlk4p>C30fZpLL!k<$HZ?vJQG-%pW0rmmnl|c+d=i`ZBY43T0$C@# zS|E$qSVcvJ)n@jOSIP0z0^rY;hOgKPk;cZF-$rFjKO5;Y;Pm#X1fHwAo{si5-rw%K-Rfzuc_IolsQA2A7bz~X zbNF2L^rZAoMqVUvsBDeD2Y#KpB(|I#;I-IiP<}fX$?jQ`%)*tNA5qHUf5qcubUyY9 zh11@Q-01KckMUS_lD_;+DQ*goc^Cvbo@j%+@6HsNto+4LmdIZZVtKBvyI=Xv=myb3 zW6RcLBpEYk7<|HIH#k4zkKc6;N%gD$OSk*7@Aq9=3MhVE}yJ(Y=l1|>U z;8_0A>^uSYM~EaPBqR{BQJFGOi+!3=(#2yjeDRqca{Nc-;ggU@5#67c3gKy|5TI`d zOVvs+#C-H3@f9-M_4@k4;`Ft24BgcFiq^;>9-NTAay|YkDZ?SZ~tVwiguVN;XdL!SJ|reHaFQ1c7gaJ5FUHifVN*A&=+H z)0_F-3La-e4+}IcGc+XH$z%|q^)`k`mKxJJLqNbG6Nu1BOyXI|DWBey35F6$k7B|b z?~-S3HSf4eSg{7*%TpS>BOJcCtu#5zxuC|T+#Sq_$4X~saae(rq+Gw%&50QI1m4TZ z%by=ER+FEd0b{*y6Iji?>|xM}J)FCs8!C$%?G{ak*=fIhLpk<2&+g2Mb%a@o;)up_ z`GOT$JGQ#i|Kh{HQJhsul!|3s7zB^F?s*F-Z?nUo>-srN2O=wT**ehB&purysKO^2 zZg>sLo~%{`+;! zu{$?0FNEFPoM)$U9pACF(4gDt%vq>ow&23@JIpwK`Px1DggpqOzc=|k&lA3szFB)c zfFlI8cDFsb9jOMJIvjE%2p=UncasFIVo2M%QH&UVkS+)d-<5z3LN82Gj|M_DTT%>* zt#e0xWNDTyk5+zuLT5EVC(ad1aYgx*<($AIY|5a_QeG?2+T+D5^xo1t>bP@#hF-po zyp>{3wYJD#_UgAio(YxG6K6$n@vPInlrZJ1UiUxx9TC4%Qtru+Ym{habEI<@FGs)j z8TpYFY_l-=ZsOxlJ`{u(FxgiD5YS4 z7oTd8vb7o*s#;936Db)*>P)0+7?)G`Q7R4l8JIaQT22ar(y*2fAna4tH)iIcwZFCZ z%)~-mm65D~sxS%k=ke;UmfE~L6KWS+mDC)?2^UgpQzoBAu+X4pIakP|&hNA`Rgr)y z;FRw-diux+scoE3#sFBcX>*YAJAe|ek7%NGqRh*7Kj&$5H36C`{xKijE zig}!#9+7g`>7=}^m)Df(3nDsE1HAB=`IjD;7*1IDN$QZ)+kK2yvr2i- z-1s=``zQ0JE!ee|@E@iFUJv&?5Us=0T!OOTt>}4|PzjGVQN;b>KOZE!pZ5+hyu>Yl@`GBdN+2FlXf2mZ>&NSTp)=3u^pqfF-je9A)`e5mx{XJ1 z7q@Y7!cPaATy_ik&@nDlNU8FgWGWAm2(ed&ioGjPdQD(&w>>?MF z7uvOXcS`6S#I)0$1bLuO52J$ydsLAQ)ymsT6sf2i$GB%1qTjb^JAu9-zg+YWdSCTF z;0>w3oAW%^qCQ6?ADkDDXBX*_7*lAw`PI&jnzN@g%7=I+n^Aitl7#p7qx@u$Br1Sn zvZf?=*J817VNuV0Y;KE{SP--6$8sIY^^lhGtiRMS0gx$&c zA6;vOWIsbc{wVN_3NJ9QnO(m*8gbF<6i;D}T*dgRFJSg_SC)5BJ{*_xsnzj^qZT44 zOQ2c3Q+o8Drkkj+ntxxaZUC=Qp4Xmm$AS?lMKK%|KL>{k<)7QfvGEGs0~2oOz0Zaa zl@AHAjMR!vk*6Y3jRwoB7yXVAvT%Pc?8rDnD$0gx zzZh1pM6!UZc4Z@UY7_oeV|q$@eg&ejoMBb6mmg_h?M^gXgnjXi+vLk*!H4>zz9y^d8s#w0P)kZ z%Ke{fZMV8b0PDMp4ysAE_`pzt85`^T7{mndlw@l|8$Wt~54k#!dhUK_G=b&lT0G)}OA}0S z+w}7vS$`}-CQ%e(>gsUt=)ECtv(DnRYsS8{Wv_=_p$Se_Bx3+EkG(?KpFh8|_?j1B z2ke`zz^sT$?Zb5}$IxTpkTQ&AXd~uqE~g*-Ut;1^)xbSsQRN&@8GUP#%y?jYRPE0j zq93Y;hU=!8ZAS`r0qBmO$YvzqhhF1)##E*_hF{@mjmDbV+j`&$l>YWYJ8+@F&FEO0 zzM$|MSC{TKXyo1fbj~Rm4Sab7%=MvBo%m9M&R}k@cz&%Mpi!7ae(PZ5eU|37d}^Qx z4mn8$Gcu;Jq+12iz$Yp`+Pk)_7pfnKM_{bQoEN^(ib3lj4*XW_q%YQFITF>2zNJyf)d)!9y0kZZjk+0 z{=zT7V%+wE+oenmEftDYM`nzOI_WFddaEVWW90}P&f#A~SUYzd$I!f{?5ZXC`559+T2%a+L@L?t0g3?Ax4fehAGP zuk+)Peg)8OKlP%%ghvq@9KBcIqLp_4 z*Nbu@_{enZpHG7$zM=PDlo&HOUd>z=W)dY@DSWz4$VOj@_4tF$mkx;9Z6b=F`%4z8 z9fn@nmqbON?NSA=@!?is%1|eyu?-uzbv5x;e@Z@u3ulBXdQoecTLTg2-Q|>QDOD;d2V}tRGUx z;NO3Oq3Pr{sRRb&QlgEKDU>ELo9BBp_L!BtZC0Z{1eu}V=pCuWGDWfB_WPKp;Oh5E zn9C<@I`KawV@yw%>^DLsMvQCF!xi4(UH$X0ZNrvi#V)M^S>g}rna_-Jw&;Ez0$H*j zu5M)mpMHI~;ZRYIfJA{43*_>B86$getFI=F8gzPi67RDDR^bJRyDf$QXdW6v=&KNOW)r<2CQ~J{&DP0WfArjkptwBpoA&4b flL5~hzL00Ak7a&6sc?m8m7!!Ml_aXfjD!CVvcZlv literal 0 HcmV?d00001 diff --git a/docs/assets/images/lightning-dark.svg b/docs/assets/images/lightning-dark.svg new file mode 100755 index 0000000..23f34ec --- /dev/null +++ b/docs/assets/images/lightning-dark.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/docs/assets/images/lightning-light.svg b/docs/assets/images/lightning-light.svg new file mode 100755 index 0000000..9c89331 --- /dev/null +++ b/docs/assets/images/lightning-light.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/docs/assets/images/lightning.png b/docs/assets/images/lightning.png new file mode 100755 index 0000000000000000000000000000000000000000..2d789ef09bc2c80a3dad16e2764cb4635fd0c5ac GIT binary patch literal 28443 zcmYKF1z1&G*ES4qq#L9gB&EB%8%4UiyE~DML=4*OS-%Nx$o$*cAs5sD1 z7)a(9LI?y!%~ne4%^MqM4`+89XBSEZDJe=9H)ktb2TKUVXFmI#wZ^+$e39$r3rVHu zkk3la>Uc<$>XLCGxJh)3RLEG0(Ny_!cyGJ0Wn|z;e&aK_eW|N$W4&2Fw?6p#106I&m<}mq)R2Mi=8y;8FNoE!vH4aJxrHJ62B5s z2rfk2FJAH+L|hgoBs-n%9V8D9VlrfIwgGv=0x_Wv{x=B;$-c?(hk@vSrow~COM+11 zSVl`jY=j`?qgt^t5FK_1u9Z@s0A!X0!mglgB@d}?gmjHyq1Hl>&>-w@qa$8J;QS#b zzp1HxAn!9ExUy&3!lz7?Slf(Xr!s1Vn`s1OLiLeYToJXkS;-kj6bP7bc}-uKW{R`+ z_+}7shH_)Bp8tVB@{;ht)1E*2j9^rbjPS-cU>LLPbs{{!GBewF+8Ql$5r;t5Jc7ob znAz%xLxd1Q9G||@9>Lidq2_sB$63{4iq}K(cIUOvU7&j-lb_f;H@CI9Ijh(wsc$-{ z9r$G3W89_v=x`e({BV7_+ObX(%4rxX2Y0jD^XEdjh;;N5@_Vzze+hCAji}ELWRtJ@ z6wI1+7;!h=zI2V3&iruBS@bGK^3&^|v=h%3>nqInu%atGkZLNScOKW3W!F9|;1H&I@I% zmLR$4CS>kHAnk@<>_&Ys;e9Vj)7K-3C5d7YLh5Gnx;#vhA#%8yjM0ScuQ(ZZ*IS#& zWETw99?d2U5f{umQ>2`3hPF^-IEmj#IHpwd(J*GQI@Iw<1Q`)GRNeA$FXLz^2cmJ` zzEVo!k>^m4)u7arCp!~ydm#{IAls58^c%t%_7K@7$MHF|_N~ZYw0f!L?|j7XWxt}1 zS$J?{CgA5~jk*0G7Wu%NzdcrFgE^ldD%H>4wL^%=)#t#@`m0y6hMyTZ*?g$xRXJMO zFXC#ga@f|GOY_khZ@8DCGF`~LaNd%XrsNXxI?6h#v-005NolyT=aKo4Vc);)p?{V9 zUFip13-)avp&2Jfl#V85sP8!3o|yu};rI$lMHOFDCW&nbY_4sqc05|J zmr}@8)5f$XhIZw4(sp1kywT7j@xuF~tr>{22oVXbiM$D`(v`mvO~w)RYq0Zi?5A&M zU}iXL>M%Ai2H+(PqNFIM(4~+tvT2l*ek)xrMb$jl9MVWAF?ttRDXYo#ZleVGZ)%xl zY4)4_cO>tuON72hm0Evy)(|TU(tV6U(Ct<3Rg)@wn^$AJ(e{fH)tso#hndPAy)4o* zl&6obW_T{Zi?KERk*qaNq_iYoK#EI|OS?|}{Iiu9;zqjb%@+?<#s?^aVs}-SQ^-urlzIpl*yO9EtA`C=dC;}&i>Xe)2e$PiXqXjNN+$dXPqztpS}HV z?%h%`N-@5Qh~Q-EDQL9^lb z%kftM9ReK{os!C)%2B4eO7pkZ--MKfl>3MO41XP-&fv)s;h5pv&f3nJ$y#YN(eu$` zZus8NWT{ZQsrOCysG-d%LQg=qNJmr8uuirjyMlJQwW75cySQ03S+y)@+Pb&;v|*s3 zq>-q4YZhUlvca(-(A3(Dz`F5gW$#gPdvah);KxT1MD(aRj50DkvLN>#Zn%P4g3FmB zUyj5Ae_o{SvRUhlVvWvnZ8YEvz3#KkewQ^VXH8(;>>hp|9T%e%HpH4H%9=&;Yw36U zrlQBCtKpj8#AS9?T~^QG_l2q>-Xq(isKwkr+_|rFbh+HedVfo;zTFxZL(NTLw_hm_ zQj<~(wXWZ$W;16?(yo}PZY=UHy*5225WNz0%iR#&5awIvT-G)I!`IdLyKzXZ*Ez>H zVnr&L`q|>y?|B?j96}3Y4I2|G5vu><{>CqWsxw|paKZAbcJ5H(Lte84o+STBq!C+;h&Mqy(_@IH|4ie0gV5+Bq9U{-#o}F}b%@6~(s;bb7p2@+ zr0=9trI)j;In)HISQL2}MN3)jgy;EMxLd@2v0CZ=nbOJDQE*Tq6<}qow?r3@I!RnO!ZRs233B(?GQ(){EqV%}@9;nhu-p zO1(nmNzMH$($Sc?6=z{ZJ zAN?AYKGG(*gW!~$X>9?r;5gni2TlRa9TcE)F!Y!=pt9Y(4U^~AX zHkoXFHu@nuB|9*P^>)!@;cJ8cw)x149j5bf3zo;)gv@i?woseUh{0Kt$64*d7LIn4 zc4n2JvE!b{p72xhq*{OLm)6Y6W&``?Bfs`~f2-?Vk{rH~rHgjoQ~c-YsT={3rS@|_ z=j)%>mn-;7L2mSqBSWoo-tm5(7HeD_FnWRJ1$tkRND-o3}$$?xvjZ; zH&%R~Oq87b3iwy?3q9yg6R+l zf%7NhpRy2$g{*?Kgoe-jLAG}Q?$q707?-}){WnBONpz7P@8KpCkPNFrzJ{Pgqs+W8 zLN(GjdG&LM0M)2`wH+xo4DTTycQ>CJW>-@WqClN>==txu|KEq!EVxft@?qPLeuqkb zjz&fV+y}RXw*^Lfk-|b+VR8`rAtpNf;`whFa8V*8Ps42wz9wvU*N950VRcF5ttUwS z?>9dQ8fuoCC_`}GyLHot2sQM#Jg+17zlaPCH(gux4aI?Txxjw+zdPh}ODg4RA0V6P z{(=#2a+{7p&n2q4R}s=M8N0^8<0K!GAYg=nL~g^!TM~xkr;33$1@EA7!Gq#sv>H9j zh3}tuAC65zueTN&lBkSNPV%NULC(h;8DVrf&pIAKJ^nR8@Y6yUbg}=v&Y%V{%`ked zb3DNI(1R;B=$w)}2A&Dtz!E|YU#Qom`{HxX>bPw=S~%VA7^}oC3)At^QPES-e>eEP zV=s=CEBr0M{%W(TLB*bcuuhHHovIKA)fZ`%7iJZPeNzcS4O?i}^+PFSn3^)M;bkDj zI8|)~8pFz&C82B^>;IO4i-H$;NpltOaOHB?<~|rn`^&f0YCd0`bL_%ml^3=R)ufa^ z_LWat4<=`x2R)_$A(}y{?B2?cH;A`4_bJZ^|L-Q=SjL?q?j?vqS{c=*ZSbl{NUG-D zLNLpbVl4iA8VSpz3OphG@Sh(B3v|E1g&R%25>Uq(-%?P7#z zA*VN>x=4uUHfsJRE|Rgqy}T~?FQQ~t9N+k_`sDifN8GYH-1S70Rn1W%!Wy>BIIFqL zgwB>JKa@aZUwl6Jmp@fUcu_*!j$oVrxBBZ%!T;EJmu zm_C1++HWPJJ9F_}^g};|3+rK|n*C&U399H|8)I7}YrhCT+}=r|=lCbURp+%Oo6;PPgj+#q`%I z!Ir@+`m`^Jcq0_`=8yEUqz_}rf%XW4R%nZKA>K1*d9sF!1&E2?A*rx~UPxGwi$LRu z57>Hea*8sia_?H+{@;DVKAo)$vfTE5WHBbHxuhj2gm~zt_GQ-;PRPe|ca<-a3MPx&z+xlaC}@C61?e8u;LGXm>dexJv@PQY-bh`Km? zRe_vEuiM?T-`Kw}^scz(W7D?)^_EkoJPV41-xkOSos}{Hsl@8>`TzIei`idCXPie( zw`!cTiR_JedryQC5{;M%Ax=t<*}vPf*Hc49c}O`oQ5%lPZr+-BnhO)36cb2G4H?L-&Pz8M?ndtK+F3 zPf8Tgzh1J5Ciw5kKZ;+#$BLN8AU2B;)|iIjLGskhyM!PK^`Vg_&9FU~Cr-D14&nna zX{QC|5tbwVF>~|g8W&dLLNzmb?#e;WDtdsRLr`ojcVjhW5s0G zhqvupnez_XLdZvdbDY?U=y|<*z0}2l_VHNme}%g!G`d54f4St5x7^UA@085|X}3#~ z9HHqWmRh~K{Kpqzt-^hADC~{|t6?Dr*BU$Li5OHWPs+QpkB|R_-QTPoR9yd=5uut> zcWSSZYJlkTcDuWlAn%HK_y>PhOCyKCS04Y%ckD46yC=RNFN7(!0wKR~|6j;A7knL+ z3qks)j_Fm-y_VL`0*ZtSBkJ<*rEV*URP~}048s%*E49lbQo{7L0*6H`hU)-hNYVd0 zd(&}5h*L3^t4myw$ss9DrPDPfcT$A3R*ql=w)0l4TZs4c2Kotv`udwk-Tx9e?^+D+ zH&K<0vQXuNZb(7Te+_KVksxq8em3plA;2|xF^6I-0N~m@~ zUX+C)_orSN#Dm1(vmdOF`?tJIOK5)6)(`88gAmplbc_n2v8$pKshRpnFFKk3+(d~a zjLTh)`oD7hxkdJ~#|t)>d4BxND`s%3wz^YPQC)Ty-!#PQfNv7hIxMk4C*t@cf2)c> ztR!(Qv$%J|ONYTTSw02)f8BY@D@HWZbk!B*B>nv_Heon%e3XZzp^atlDMEHuq+kP& zueib%et)Y{sy_L9rah(s*D}xl_iPz06-|PSC?Gy2$$eA3( zLc*xqd*#(fh8sxNn}c5|mQF4egeR0|kK<5uMc#Pk7Ybvmw(&x8Jz_Ab3i!jb!0*>2Ex|M2Pe&SMK*u9*uv? zD3MGEBd@0X%AbX%zLo6QGav;!?cSk?<{;|C%(xy;xYE-=;6hP4?g*oObGC`^^5hX6 z=Z|c4ZL%j$L#M^?0?|ozv0OY!67qvlL}jtC2G}9JtC{_@tLqX3{Onu(7OzuG*$LI- zDa1P7Fs4ikl6v2k4pmOsori^mdD(Jj7TDF_G4EERYd+xN;h_r#-dzYc8yGPlg?;Tf zjxr@|o!tEp9UpJ>_p40Ywe5aeph1(1>0k)QRV zkLGmtQnr1aMsq~&hppVdXfWap_12azt-0)&o0+Bl`uX!?LHq8NNxh!A_wjPOF%}$r zFT2;7aipn9tw3Mk4Q-}U%CI+~C&VB!05 zi^yUAhwXvb(TITKMwiOV<^1!#;S_Vf(4lDSeMG zZCNrDsPgrisG?P@xLqLFmMc;>J)Ez9 zQ=;+1bh(nMFu;w{Q`luyT!$IIni(}l`bFK`hwtAR*wHpl9s{K0s;);qN=2e!gTrGc zjO=c9-*#b2eaQSx@sSR@v#KnplkIa$?`|5={`0)C!C{gIol&6BUcr7Yj8QPtQliq)Id>*p%DP^P6P8cu@QhE@fST0GQ>`j3tf4Po}s>$e4G4SI5eYX1N+>mTBUCB?r+-F$cUOSxfcCjRU3WG|ci z@v3ItuUF20HF`GsaH*5RqPmm8nPQ{bv|7r-14?67@$ zG9!55`{ufM)%*dT;x=nw_4Z*MpHG8K*ZGarJa4I^WvMcnV$pnPD<(Oy8jP3v3R*Mj9tA0)88mi&6dvsM>5q9esavY` zMO}_EARtRLP$Eq!ljOTx4Z^XN>MMpK|MD(6H9nnf4dGlJF7O@qQv~;E8wYL)#eZI6 zaZLX~y#lYS(yys#{xXo6a{R$|tu<+ti6GE~FmETeEO@{OGwhv?&xgDWekao+`^E)O zOPvHwhAcRDF8yrk^_mP=bt<$vRrfpu*G_#Blas^2{Yi&2d2kTW2)q6kZhaF|o zo`GjuX~x9AK9V3pY?j*uIJvl}QWR@!C%?&VXSz@ycnN`zO}A!#(LsyaR;N%|8HzU7 z%jgK_iy1+SCFzqCOaTbt!)Yr{<(d7MM{7Mn0Bag5Ha2#2TpaA^c?t@lnM$=@1;5V; z<=NR8SX#~4Cv2Jhq@97?^L(z>Gk^LF`EOBBdzPm

LFJDMhhZgR$O*JJX%-K3=v!fPjqb(cI6k zC1ABH^garWXo6N$lw7?;W3iS-tNX_f?my`qu+*|2bY@H56~|nf9zqu^Z8C*hp_PS5 zNjG+=g7U7iU+0tZtW+fAGCwNpWgfqT0~^)FamIM~n@*(FZ$1i7R8l`J%Ax18;* zmoa?;J}0Y{LS6?qXI-JNcnwUxIRuz24s*4Zl7W@erGjg}&V{${$!{C`H`X9shhF=P z#sNr$%d7XddtVAsA5>6^l2Be&mJ(Eup*SjOT=;4;zYNiYnep<0H{N?g2A8Mc375+@ z!`fmJ0f~8mWkZ;&AI!SNG_Y6BpquLXiNypgqNJoGA>eXKdn8-PTe4vM255-9B!XTC z(YZl)$aKji%e7`2w7jse*`I<}%0*wST%WqSX>1$Wy$X6p38#9U+`mzh#)HetL*L} z8&}TLlSQ2_Mp}ITl0z%-pI~R09)5nwN4? zL}f?c8fLKOy&R81Hw8zLg-<>YJK;@_N^7R0q3i%MFXvv(Ygzjmsu?&tA}HW~L5ox$PRV zYWrv+8iK?KT5T5~jYrDa7LdT=KNA68A|#W7>sBz^QirYLUNbTh+i?@)2$Ls-2~(G7 z=;0F+xA4-?Si~vQF`ZYWwFEhJ%rJqAG&D50|2Aeb;>Ie0@>TXfBsFqx!_1#Og0w#I z#?3d@*yw1TxgWNZRe}2}8>cR+ z#av%Y_uWagb+Wd%-1$`kmO=YQ)~+S`So6;&4Xt5;eP;2srD zLawb#erZERsMX7^Zq04K*IW`;@GJZ*NUP@@H~`oOplL$N0_4G4661eaQ|7*(5#&q) zryImhgh#RZ3}~%XMbCnc2zu!^Fp{h@28WK zlVJc4_P>7pn)*`A)$JI%V9qip6EYscar(EgkxRl8PZiSh?AbH?{n~$iymcI!*yvt5 zkX;-5^;(O2mpZiUJxZq^E|VLml!ptA&)8joq1|Q!A(|Q*xTp}Ty5{qPxjOIpLod+g z#Uy^!GAq;JEcD7?78jCsd}(+?yu4UHqn!g@!45LkT(} zg1MN^M$XkZ`RepT(ZmNZ){vVJP9ngYGx*c{ilcX(fi2A zNT-guu5EBxe*%SZ6QB!Gyw1xX&`Ef^OxQJURA(v;=`Z%CP>G0$c9vSN4`yd)cRe|< zaMsl3b9&dJXFG$RzvTu$2{=98UGf9)8gXo4X{n;1q$JfD^f*FCOI!4mf_YqS&|GWs z>jTlwctO;x6+n23N=oB3=5l|sg&KW5)lTjjP|K!>{ddOl@v=Y%oNaQY22Dvzq7}yH zJLr6vU^xyySG&Vead2?%?EtTWF4pI`{W@MnK_PV5>U^lf?Qota;PG;11A3~{9?Dcf zt)P9d8nyB7Zs!IYojhKzn4kYc85YJDcQ7+se=iY^kRSZ~B$xc@(*(ffx|W>^PpN5X zjsMo7C{z`cmEo{`3qRr$5EOtd0e$@IjypdoSUw`ya9;Tetp(W0&mbGW@VmZve%Kb{ z@Vl`2hOoikUt7L$u{-H3@^I3dJm27`Z~F%doFXJcrz`Xe-W99vfHrwwQ&Te;v-92d zkr*xVZwwB-)2rg-fnz{_4`yp%a|5q*byqq9!|j9!z@498>omL3>NPm*YIhdKTXGU* za@&RhuvK8*A0GkIaHFTE$BsKytJnx;b#ovY)y>_V-f^}XW+m|2(2`;lA@OmiAYr8S zdf7iICnugxC9}(ccO!;ACuGVFzTb7-r%b)1+I2&=T&LzMI};PrCs2mEqeYhyY+CF1 zFEU`s-ZAfF=0|oxu&`o?b?yie;KM@XwZeKa+pu21bV1XhsHbZAZHdOM^yaPJp*%gE zfM9rdxOctnB*LihIU2M%oNtdxF{bpax#WSpj@?&{Otui|77aV=eKukMLKSy~Sqt$$x;H z+MllfgoM0*4_8=Rd>_#dPz=NC*WKm1b?o-j<+F0+zJ;r|+quRh6cl1NRkJgorK;u# zBWJ5defsp09Ss&x4A4}l85q9LFIaSpLd$|2KC=$i^)TNGRyUhyRM)$5-Yj(I&PU$m zp!>ta-WQ^uZsSP0$sHGH<-Jn<7femJj?Th;0Qsb@?ca5>vlKqKPZOqG^0L=i2XBsvABj4OP%QveBBZubYg9OO~g zu1Akuy%sUQUWvvl78aEs)ynDYFweK6!C-uB*+2fv7g8C|p>2|&$OnuJ8zGOK5Fq22 z?ENkIJv1bxrIniQogvHx%2L{p=GHm=wF{`-2Wq0X7L)c>?Fb;;{`^sfEZaP=rj<=- z@(h`6kvb-b?Q;cQ43Q{*C1@>SAbAD)qOVX)1BN(4Qe_J`5u2E3Fl zz}D5(1)&ajShWr_p1}+LcqunF3k)b9_}|oWSylhy$HCJj52%seU1bBP z^?S=L04(?BW+TDcajC^q(#p!}oFqo6LQ9_tgQ8|fgLfKW_|7}}VEURzJCezSCVRk3 z8B!EGSFQu`hqL&ilzP(~VDfg=0rObiEQR z28oU%%o;{$U1HTMgx%+3WJCd-gMooT+{=si=`zaO9;bcoy1E(s zREgS_3;!&5MqNokFMtI_I_UAv6&0Tui%#_m;p?;^RA;T=jy3B5$uaQ~jrF~$GD3C} zh@kh;2OuP#cD(33_G)F)feRS&JzbYsJME|F2W>eqB?Z2C61i^fK$~ttHQ-scp^g*- z5kMH18ugWVt z+G|z=ftZ<_k2kwpe<0>T5gtY5nUl5u12@f8lZO*mb!?U=I5RLhN{|zDXZvt5srGwh zBy!0qYQW;GE&%Rs^V7Cj-JD6?Tw-b}0!W+)RD!R8_g7!VT=Tw{mZXe2H5Py>c5@v3 z9LJX16SeY$RqIvK|mj#3WfK~ex*MR0$0*gH|A6tT0jV|T68xWJN1%TB6g zNJPz8#%8P}s!%-ON(%V;8!fF!u&c9G9e0X^Ja!r%8wbcR67Ih}XLUdfY1Ls%sXSw-matGVIs z4-pXqbEr+S4UYC6&2DRmA_^65yD1iYY3ZhnbE8 zP#`^tR8oBdJ>H4bD~63uh7OMJ=fJb`0ica3At4d8o?N~ObOOK6I?;RX>&%RdFF`>H z2Ra!wJv}1GaI3mG=Qot$r3S1~>Lqafu89K{LHTBck)aER_>(`u45-Q3<^X8@R-r(B z|L*jti61h*=0f@UHL_ySOHhV?bR>Q0>tL%z$JZHAN@GO^nlS|ixp4Hsfdgnjiu9@g zM#OLQx!vU$&~&6*)e&ix;XU48_e1-+|CIxfM*>wAkZcJ6S2iJ(#dQZd0yApAYtX`8 z$F*di0!UqGBpxg@I@7;CH+Dt)uJD_Fg0<41`4t+4C}|d-vn;p-;A5Hn%wtagl|L)e z{gZyg+Uv%ee#F zj-5i#fHf^>HU`n2ooW}SzG5?FmOdFAD3fLVQor=_8RbDaMHxG;W&iN(uJUveNZ z$~`|lasUVdz!$D^j&MeEgg;(|5A%>@k$U%<%j(~fcML2n?c=4H$?`Pu0L*@dDw}{@ z?@Sb-RvLF2nJy@RY8%9}J(^4KybO4Rw5)6_PHPn*10$m(xc4b293+kl4LrhEv*z`T z1xrqdqvv3#0t7o0Th^HO_g(AP+kxb_8SfnM?xNLC1o_hsXz9CDHe+(&8H`T6UHf;q zVph|qnk|TTe|;?H<>gg$2o@N+?F)P;7{J$0{j2O}Dv>ZS@C_-WWtu8wV0EFoh3n9` zQ=|Fy(Na*ZJrGU-H$g6*+_ko~AVk23g7s@BkBEr4*(9Av_pJb8~Z$ z5IOiQ@H}uH)dG{`X@EGIx3Ucj-!yF1ntxKc=^T6Z!_Vvy@eT(ijIzZWZaGbZQO4F5 z+FjxSbua~qu2XFyA?;X4HRVASSir#y_MS1NR8r(+oTbSE)ke;^jWjgD{C22**iG5J zGcz+=+4~~{2x_zGbh+*huxkJmX3`16LxBhJ8yg$zvfhW>dE5ZZ5uhudlepVWG>}*k zH@L0!pNmgw;42+5h2QJLWMJ-8hL-pe5=36=Arm1eU)i)n5OhT6-+%KcyN*W^h1?vq z9(!$VZBbEE!_p-yEUs%ZrT}g%M(%&W!EHOSY0tTHE)2Sz_RVTI=6c2qAtg#sQqy$Vp zX;oF#qA<0BaVY0wU}lz9SAUt~eBmK&Jp^ju41OjE}P6M@y}s;oeNuP1(uR zSq)eFUD_Qix8K{Ss>ZKZkq#EM1U%htTc@(S+kYpAY%(+ohq!$Jc(P$*?xT&h>xn+;Pbi_~Kl7Y#rTtrw*kL766~tADJx;z71?jV`IKuCA}@e7buN%Efq; z8e_^jusw9z{dj<2_Rw%D|2}*mnZ5wn2Y3z4x~UN$5&@|gbfRf1yCcE21p>zRTG3C* z$=$A`oJ#aNZ6^QrTpVqt!0?#)N03#qr zVpK{EZw|eLKsk<@F%A~F2ZcALjPa53^+pa5+r>`%aDERCzJ$*`_9>XMbG^H;h4Pzl zM9g0RMMqaxvl9%}9!*TBZvl!3atle)|#rf?lF zoN_yG-*|fxPO*auE`VW2Xy|mx<=^a*VD(5C{`{HIYl)iOET( zXv}5r&Y&(x#o$Gz_W+UOgDf>DD>2}tT`w6v38Y$*A!3pn+ya;dB>^cYRGe!ciMS9z zW4oEj7WDMvqoLWP9ICB!8w&9Qh|(4jgY@S5;r86$VY!VcBO`-7LpcaGb-+SWQt|~9 zIq&c}FAFmW3kNu=AR2SRAcuu4wR)Qme4^c1Xgu!w{rh)SS!}jUkU5aQuiOuMAP#8%K(`Dk?up^K z27W8q{fN-9uy*oIyzr=0g_b#};X9MXn4*stlMhZyE$EZ`muC8PKLZ2iduAs!8ZXjwI5_A3wkwSzcSF3 z27p*m=Y2RI5l<#`ryDuo#SDSs6p1)|mSuX>0?;T_0>DGkAw2!-4oB4PI3E#`;^O3F zY|;Y2`sGUS^W#_OHRUS4;06t2cJ-k3YXVuiM8gi?G~yo~yAz6P=xHe_Yd5DG&`JpD za&|pr5(E@K5nvUtN&tGfT1(kgJta&{*T&qpM^vC*OXxKd(u=gupWg%8czc_f{I2%) z_I9=-P$=kji=eN+Un>924YF5lbzFS>FQC9w`=0-!2C&Vz=vC^$>YrROv3!t8c+3@H zlpp|2!GM4d2-#B*OwU5BtWHU)#jUIN0ziNi@Z9^Gq?F3+>kM`$W!iNEH?X5;6pmZP z{B$ealj*^NG@SN2jp}P1T=^H2&Qh&v2xamB^Hy?tUCYYw=xFCLbcEq@-o6TqK1?}J z$^Cf*rn9_?iVQf1mBC6N_qN#siO=Vtrav(w%E$%5#C}I0up?e{Ef4~KZ z3S;^Pztk!Kjv2I!jCO$<8h`J8{P-~fxE23hRq!KfM>1?p`rf4&hM@daOQpxY{W4(5Km3D1>&gj zJal>_E-sE_2B13Ai=A;oeixI7cO76(0qh{^$*g$-%Py$n)q6<#W)Y(PSIi(lVZez5 zyXffXM7-fsEaDUcN3hs>+#zIGHf5B_=S&XDh-r<^!(KChcsh+vWZ#rCI2m5OGQ+FC z(rjl3fcE&?4J&ux0&y=pR34wMG`znBhFHIrXT{jqSTexk!NQ`VPwgO_$f4@uqZiTM z%X+;r3S zYm0ZsKYBj4>oMEn$rJ=6X%%qCa(DaO<0DlqEv=O9LN>z|ZVo!SZ-XbsvIQKbpupc7 z`>eqmbOw>Vefu`3M-4D8vzj~*v^RJARpUToPwyi9+=03T-W355!q9P>3!F#dj|d=? zdxehM!i#hK7?n#jQZ>B9036DbOCW#V1wykNC~Dr&;iR6~Wgi&|>>yz}Bg!NubpMgW`X}0MyQb6%x69E-x=1fZ!({8X8XuLI5wKqZ1Ly zZhiY?00Dt|A9i+#JdW7I2XO`#LF%+sV)+ov-dMiW5YRxVfq0Gx+@c^Y49?~Upv>GX ztUMt=5EA;SnT*$3UsTcq-4U?Beo&M#fKv7P-M1TE;oporz_0}}bAS8wvJf{Xr<<%@#m<(2K9tI)-%u_o5QDT*V~z-H4ukZR6g=Mo`%lO&f4{#~wzVxe z`NK7;;af!!{BSx3noD|_>KFD`EvP6cP^mVpx2IoCc4{1O;@9I0ZU%ru) z?4fiko#lJdD^|@u0YY_;1?SIOz0XYg(JQK%Job^ZvpVCLH36aFm-{pKWWP(xDHMPY z2DNU0n3KNT5$F%iRlG&EXf_&%!%_<#2+%f1RLA&@GZ<+!Vs3fdM~k**x2@P6F5M~j z$z2a$kkF=ro_pL|z&S`y%gs#yU_wkx5G)u7FJ`xATj{)j13`=bMFokQ&j#4{Nu?t7 zP8RQ1H^7nL@IEqVJLy4%yjef>fm(Q)5hrO1G%sfWQA2=G_EZVR5!-GM;XxASm>xQO zXW^Eq^PYPpmOqfyBds2sA|<^<4kypBJBY4Z-LvAumYo$HA~e~L1kMFMI=AL zndwW5`5`4(t1%3`k-i8bY?2Pt956x_h9sJ^&4d@Qb@)?7I$yEK4D_A)&wy(k{+M$B z17V){N8uOah!g=Qk0i*56RHqOAiAH3J>ROWJwF{kKa}_M1z!MG2?lOBAZL5~0-!Y+ zwEN)!IUWxK`6hC)IfKUynp+YQp(ns_)UKBG6()xY-kpzEBxndygJ4iiP4W+*=wQ_6 z?F9!KnB;qL>T#jcv*H49jNd+B>=?&m`MsYL6R|q;R{Jg?5LSaG_HJ+eCHjZ4LLghi zy?*_A9fssn)RPL*9%swEa@c6TsH^pPyeV_EHT4k!%_hAk!g1a4B8oMxCtx@mRPN}Z~hhnqP zaHwJ~#DNA2wDZOV^s*^EDmwhR)Fse;@2-y%z~s}^Xj&B`YM03MVx1kpp;8Lgv@@>| z1%W;$T2}l0N;JsLe!9xoxVyaa4TDLI4%E7+orgQCP-2SeHj&q@vqEq2*j?8KlW=GV z;V@vNtOvLx6R2GsXBfvVbM;_GXlUc~H!h0I8W?~~AOINPxkD--`1dr&90qh5NA1@} z8eIRVa8ck@b45Rb8*1*Kp!%-ZaMMANFqxnSmiB#Li&c#dD7c6p2szAjJS!?UAAuWh zs3v?WUGH}?Tpl2e3!q1^Ve&vHP(tcplu=mfm5j+tE# zKD>YqRDX)Nl^gG6Uu%xj`;KugbCRVUN*}eWy)Nc38KPbs` zs=$o_+R*ydiyCQwkk*FSI;DXdB564-nb6-`VbIJO23L8(aSvkqNgOmm@d?YR;@l}P z7)1}Iu7cSk&Rtvl7(i(Mm8>Qj1DK~Bs;Ds_+Ti?ro`F}`5BNKnb?X!K5X|0LZWrmdX5>wc+9mx{?@nmV!g+Nb)Sj`mFL}08AyGKrZrTNx0`daGMWaZJ1fc!Ghu2vg;D9ruu69 zzK^fCxjoLov&XLl-K(J0NmZ&buR1mA&*%(7jhq;ZMKrVk!{vA;GLn%T5hfIs75G41 zT_WJ7$KBlAG&)VhQIxElH~FOL!w9EZgwY$(YG5=8v>@89hlY0rd52e!_7 zu&HJF!Ax+(mtxSEp|c2JfR8t6^#17@=ym`|>x2{*7NQ{X1D7_2!~7R_v~Vks?xYj~ zZ*x-VuAw&WKEBo=XN>f^qN-|#0E zzq(Zh%AK9bB5SH8aDlsPnYF5nDz1k+#GgPw-yw&ReJbPvj2ZCsbOZgQw`qhQWdms~ zqlT@dfOv$3kKb^t6-+HCct>vxb9^tfmHNG5iWCCeU7!@B6shYLg!F=Hf~;wXdGd?4 z?ArC8TCn1gR+VdA=UZ`RN&|4}5tM%lvk~Com()${0eeNo#FSG|P{__%t#HIx=zIx9 zxf?ER`j+K29D+Lz~H*Un&M&`j|*=O1;q%Dy}5k54XC^@^HuCD}SxOFWd% zm3w2d=n=*djshq}-wx||C37uUS`1O6eq|>W42BY#2y|hKwtrW`}mt2Ye@D>c(rjw}jNpr&m{nPqMerp6G zHx&SA&jF^Yw)Hg29;-U24{-xP6Ngz}v;+uBdlr19gEZry7=X3AoEqbLB!M>yW|9cS zf`oxLDqPu7p;+hyMv(uu^FF9zIjsA)Wje2AZy9gn-#%T66XeH zzy85SBRkmBPBc2dbEA<0g5l8{P8_Dn?fUiBQ;_xt-jujl!% zUaxN5_jO;_=RD8jcps;>#z8{^gSOd{KJSCTb=Gx5Pm7lD;EbcCquYETDysDEh&$>r zlgNpLTXmK}-^Ld>ZDbW+llq~;Nox6&ws1J4cAmSO=G#QN^-hb0Pf~nZ*X)e8jGwC4 zRP5R1W95U9dIhbD^-dPXZOM9>AAuvTXPH&L`MC7f%Ji<7a}E?miJhTwMr0rjkPgTL z6S9u!*p@^KX;5VjF0K&L9@1_1wS^CAY0(ivgCh{czDh633NxSkwA=e)pp(KU+!0iv|aV1?W`dB)sf@%yP z>$-!zQZ;O-A%t5EOWIp&1UwRCnJ}0r+)Q_b1`RrO`_J<#ZWu+q5UH`BQzv z&a*<|Opt$EC%fTYypA);>cM4gIUzKy zf_+wUenU!mU*Uv9Q>21>RZlz@IPj8CLQ<*)^7@(hRqr_sz4P0#^1|V~y0@B~-0V2v zRYJ6fD2vbzK@$+CBBpHdMv?u-n_f(b@#`=0sd{l)4GLMBC7kjsNQ9g<41&fVH=5!5 zW-rBoaURcXr$x~l%9BNQT`nGQdf*OkNOYh)KrnQhYgQye3xTcM`u6jMW7hc&;cL=o zwn0RB0dK)%#kJ#IYf{C)yk%!oQ&%C&=bD=mUb*+zOVd5$O_nE%nb^>)0`qFW`7~qKiHbg`@aKjG>va zo4A0{q_7hvyunL;&$H*U#=Uzm-ZlVdItVQ%QQ_l+%fWQL-16DRHi+d6dYRwe z&6O8s8Lr&uK|JA+H#o^ny-ce{JiFt zEnCEXv;d77On#so>Div~MCfes)m=Z|>_9RKv^JrMfXG=C4yoI6-1&jQPB2+l9`)FV zyN~CTJODh;jmPNF(WA5gX<4>y3x$PDs>VotLqGZCkH4+DRBNgZVMFv<@jVg|H*T2S z=*eC1xY-Lq-wAxNwft76$RqhvIIK1!cvky1h}G%m2X4Q@nPX=5(9EC>EGVK!+0D9T z%X;ybN$U5>+0`mNRv((0n?uDpX%OoYTI8N}*caWsQr89$go8>|#PXhnF$V!DLiqAT zP28w+@_x2LJOwv+GGfMs$E-X`jEYw4fwMBe>6%s$5RAiety zCpuA$o6D0nu-o6&y?rYo8`sPp^^Arubg8*R+3Gz@6nSQi&L51h$58>Qel&_zEwndmx4ugbe&9z8L8@R~jUa}w!j1ttci_b* z+;A6A8#Ej6O`HP8dprhI1u_R|T3TnoqI>(Q98SCkh#~j=FS}>xRaEFeW`Nfsp$IeQ{Kc zu&7Su7;u8hy;Hl{8G!5`oWox+JO^WzO$K$~{intC&L}JC2vfTbxyjomfg!Fb1L*+& zMrOOC{Yg&CldDXC*^B1+|1Q@@5!4;vIn=8&Ok>baFoagWj}&S_6^`8GG%Mn7yF5K` z6EJLfW>OMqU%W2c%N<8uu3wi�$!eT7RS_->z*cZZC<XjL_=BVj}`(pngkoiL`M33nJ<( zh5zGw)!&UN0w~Nw9S1{^nW*d!`xDl8N^0s1RBqCA!aYVhI=`mj17-=|&CR4@g#es! zgrqk{4~BAMu84i~CWefeqD7Q zsb50WHJl*f3^9&Hcul97?~T_*0s`ZT1wP?JW!K4T9u z7QBS}wVfnQX(lZt_2P?G_}f4DZZr@*kh$_DEz355sqSGFj1|yn=@k9>xYcfqd+3(4LUP2z1>Fb|hgGzM}Ti?dchRo<#nj#7n zMrCSZVnT?y>TZi2mtpAmuyJyRRB7st6-b0&yaptl-7049fP&2c3Jqe&y&?A!I@K?R z1^M|h5YNG?s`(s_+7OVG+(cMUy^_^px!L)cSI7npSjf)#_L_ zZna%>sFp008s{RS+i!wX!_8o+Pf^wrmsP42E&NYB7cSGXWaDJkJzhqJX%}@WYMrwl zI?LqJ>Vg+7cF!&MG1MuoWEQAMFzDng8s;w+IrMqY0v}O4k85*OODpqRuExJs6}m3? zoZH$WM0}jhacweDR^z9uDVVn-X9}opzUMp_HQY|T4RAIG(YV&hUt+8UBJgIyrFOva z?ygTq;}I@oNvuICbr9aV^_R4U@2(!xOYz-ma5e|>>iGz!46RQc**)5Ga_$XQa(Dr* zT+kuG+XYVh;=Qv?XK#Wffm3AxAioT$4(|B=lOoB--WQbifRGj#ML%>~vCoTj@7}#K zTSF~hvCFkY9@LU(3a+U@n66+dLQ7vN7Fh*@uBpttdp_{+?e1*iaS@i5X2$c&S+a6; zqy6e0oPEASN-en%;(Tr+eQiqF+IQ`^uI}Ut0q%n0(6q4;170M-5i zER;l+^{|TZt&Xd$$9uY_!<8bw*{+a3W{TdP7G#(~O<#F(t6Ql(rHhaWI~pyxBlVbR zd<kN|)J!a+l3 z1;-%-C!&d04cqbl8cI5pbKmlE6<#Gr0{TBcc9t=eM={L!r3Kj@`)*n!BTyuIcRzZY zV;~N~4}=U(e4rTkGuRmOPAL*lV;Gx+VJ+kH#|yzA@uH`!-93-vVn$Z>dG<_JN=7#{ z64>y2A{9euXUN3Qd+Y}!Vg7iJvwH2&%-s<3M6U{3dinzhs>5ssA?@=0$)#cUgwxrr zmr*)O=OLvakDl^yPBOK2z_-SNWsTW*J<@i)b+vjz?1E0N!qs+}pLiIZexpm$4CB?n zM?+HAyi53ITZvtCrRJA}e2EaWF?UttL(S*OEQWn29{c{Ru`S+xGm648TCdK@J+9;*mY%MQ&T9 zEBT3L!m~oTCtPeTzM6Hi@{{y*MWgDqRNL=q6Z$0%(K1J(MNV7-T;FfOMU5^gu;VI6 zUbc%Hx()ZIy-Li4^ zLGOJUFvx5UL{tkjFG@atRA5Fx{J@b&+y!g!r%qDLyk{UQoUc&V~ z#V?jR;4t-o!I0T^l&LqzG&{0>kTb+-wo3Qb7j2#!MaP_2Y`1jKZ?7~zq@wVx^##Ja z`@GlPo|_cWqowfgv0pmQUvi6fZb9hbtbhk z_qccpvI31P;soptXMA0u`tjmp$_uZWr{E%bk}#3GH$#Re@>?@2H%32e6#j|K=>Z-{ zB4Kix`IV742)dyTli4XfJ%4;)1V;?Y9O(T?L5KX`zdtI=r4mHvk=-WPHy*i~)$nkj z%+&3lrV|ZtiA4=LNgeH7GBOL#HLX;Z8g8AwvId6A`umG`C4LgihLY{q+gjT)TWwBV zUn@?HYZWM^LE0gV>`YNj))+E!Yo%c%b&UKu35n2Tpl>A^WY^{42nn84Po__ z7U`<_#k-q@=z&%Ef$P0HgYc;>9nqzdXN4GB$n?)=7WzHjpK z@`a5xTSaa^*47Pj!E}f>+0jMN2=%zibX)q z*^DW5o!iT?Y0)t^9sR8yFt`L!+AdY#$e^o#H40g#hJzDV^Q!g^e-uhEF8)=!(tdV7(ENRdxk5Y;+?*T zRJb+buFFDsD=;tzC!oYNRN{2R6fJ8o8gJ2kqT~l*6dAyfN>4oO%Ps3d0I?WnMg!1Pwy zkQS&IMT@!udU{g_L8TDsvR@+&bD3GD=IHcr4!*-TDV`Hz@%#6v7+A=2{6z24%^+Pa zvjWd*4x&{+N{Zn92M>ZD6u?gf2}|gEnkWiPX2hvP$%-O6n}aPs5=3O^z!I54kcyz(5iWvkn!=!S1AAfK!e2TW%WdvLKXhOTywyk5r9NXM)D_gh=d zdl_!GE*|gSLqFX<{?~$1i0n5fRUxlffcD4!Q4ywFh;cpGGkB0^_|u^jG9zwB!SBR2 zvDCjx`ziV8Z&U3*bwt}USPazpmoqQc|hmG-_)EB{v&)(TCT{!9R%ZWUP zRiTKnH8#c>cK7K)(>j(6^*oGCg*S zy}PQsaI{?ZeT3mo`H`9HO5FP6pGV@#lo5Rp-qKp< zlxS!GOvsb}46!&5foI?KE``y&JWu{o>21}B@ zSl+b}-SU;6ac;vghNDUvrFIOm;JWc)_C-B@y!AVNUW=k03nd~9ynCG~31=RJ(H~}s zCOO3|-d0V9*q{O)Ksg&+-&c>NNy%W9>hWHyJ#pX`{IH`&KtWXWeOc&)kAPqF`-k~& z|7Y06pI*-QeLLO8lHYOVS`WsDo}8xZjg_aJZuhnAa+jYFue;`X zK<3gVt`w6dZofPJ^p%}Cg}^-vgk|bgCI6lNqFLg;FzL* zX;@k9fUgQ6&=Kd#1&p^;eg^CbBc;`nrX=Ls>K_rYBGhSbnk!x>`eCr`Bd&mHL@{Ai zp2uz(U+6ICkcjxPyj>?70us>4-Jt8t5aOR)%}7@zdIR*7n%Dw&3)rK!ZA(gEVoQM4 z4#@xdu8|!xFg{OxC)cJqt z)bWvmwpz5R&oJ@n`PDo;lap`PpY0)QMw%%KwtUTRj{mwZw_CqpqN;rG-ZIc za4iIV87S4*vo%m!-2nSuzB?!nu-Ml`G`Ls$Dz~BZ>Smn&ATtD}4!Aaa+PlR4f+1j? z4LYAj<%t*Ap6YMQR&Hlucn!}ilPNKE>}<*VR(l+Y1o zB!xAz9UB_!BHjbOk-K0BiL5*;7hUb4K?m|`h~zEl$2rlO`JLy2kU&uLP{|LX)6zSA z8l=O99LKauI7m0_xYJS@J#3rkEYi7{emqvVuD%{yTayn4I;w$U2TcOI0d@)Th@sVo zQBHx21RN8~4#1R=p#?jL)&oW=BB<+MP^69_2XG!GnVpVf2w!#+3E*cp~p=_;-QV+eCu*=hA0b zP8qnPS_Y!(uf)H@<`vjFvMsADd~bgc;+F%JrzR7_w^(n)(xoY<_`qY!|NDXGt% zy_epuh^$+tK1>0iWtK@!<{(pVO;0-#0!tmEG21SD*q=WKakZyhfdu5|4g_TCMto4u zPRwf$=$;B~Uv?FzB(2I*m$ObAO@IGz@09(-{V`H3-7D|AnsLc`j0q_R&*jrwl;4rx zPLk(6-lI*AIVi3TMx`;@DsQcCMJAF&*~AxP38m@!f^M?B9wY&mOsMp-=GWy;lD>6UE4|kn8OP<- zwO?;`eAv$#AUmwBe|7 z(=+8u5!CEnB}`Y!(GJE_xgC^RKC)fp=Pb;m*r20nMc6l{ieo%(3N<^YYvLUmQ3xMe zHY#~l(d(H=zjA~e_UgD=Oiwqua|<2Tk-UU>zzZ7{A)F-ta6ReU$d8zmSJ||RX+Kfg z%%f;e*_8izd?DqP&>cBw%Y?Ml1-cNc1)*qYG!(cy zT9jL|kl(t1O}%UUth%|50)^Pr%j6hGnto-wu|Zkc?GR3(txWlf49l&8o}b!xOMd06 zLpZkh+Fd?x+(vBbNhcJJ?_1AG*_C9K=5klbqOeAYq)pLv8o&0{1gjrs6G2 zBcTku9kVw&-F~mt-BZ9h`eb19mk=t7sRz>SWusT@kHy)7z4ob|+^g+h7bJXtj8o+R+QNnQKFjP+m4CYkS^lvzntLiBB-%>OKWK@dH(r3nN7)8B1Qw2e zj@g(D>mgX{dYd~Jb3#;8g2yvh9-{SmN)Y9j`f%McK_E&x986Ij_YT-z#M#5lX9%rv*I=na(#`4O`;b zfYQ_$|75%0V=BF;mf_ANTNP|N`Nz)9Z@3Hjyjv=Gyw8P1T9~cj5JgfyH3el=O9GC0fp~jy(J1%`{-to+(gV+)ZsCZD=jK<=gQmYJN0n zHl!`pFGKF|GEz&N5gK+dvG(6Vn(4Zremdj7%hsoa-vm=I$oK{+x5%XNc6tu!qdL-oDus@2R)HBIaC}G8?yhg$bS|jv)@#Br&yU-H(E2< zip}%i$m6Polwzny@{^=7^IV$eWzAe<*#= zc+|e+a+!|s)u{TL6{)JtU`IRCm3p@Hzf17T?3Qo4IVn@|cH7kazv1B`<-<-|wHi#q)q` zWWuACDJIJAsR%_0cfaWwd}8Q_@pN&C`ENFn2|SW`LfyYAz?XC97?zh$TA$0$ z65&$%zyD$6D23S5>t+A>O&H@k$_LKAe}A7svb;x&CT-!57B|m-UnA<8OuT45;y=Gd g;@I%|Uoz6qOZh4bdox_{+eb)RhjkCVIAG!bKQGaOhX4Qo literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/azure-cost.png b/docs/assets/images/mii/azure-cost.png new file mode 100755 index 0000000000000000000000000000000000000000..942e1c4d902bf935e503e317a197741bb9468873 GIT binary patch literal 43734 zcmd43c|4TsA3xlpOjMc@Nui-64HZ!dS%xG_DP*!$)@;X`b;3wB$r>U1o_)_UCR9lF zeJ5MUzR!&DyYJgp=hXN2{PR4o=e*9TqvpD<&-%VT+chr~<)!!09HrT|ZQDMX>yo#( zZQG03wr!_9Y$xz1$4Ae+0e;(Vaa&q^TT1n@5#R?ZJux}4ZQIg4Y028VfS(f;Z{9%x zzxVX?^!N9F`}VD_u1?adRLSnkU577XVq&tgvT|~A_wL=3H2>VX@lRV@8}K9WtBm#M zYuBzxNJyYiC^a=Tl*K2>e?Cc>e**qcTwEMj4lI-~{{(yzSPJ}_S=0#p8XFs%fp5TK zu^!RYuVZR}AA$dW@ZfIh%h|T{ZZUo?TXszM#S|I8y^mt z|3Cao6fqUWIHhsP*cnzY`9$NM6njOh^u6Mh$-kbjRhYT?=Y{sbUob*{@tkq5jNM`I z)9R*^tfHBo9GyV@nMdhVDzxza=lj0MZwy;>@)Wh)7_{^1m<#^ph-P(O) z$^#$y?4{?z$J0DAFCU)kCo~mB9QoE4_HG4#`g%S4W8LW_$85d)M_Io17Kdg0*;g8# zT=t57D#b@RQ*_bYQ+HXc6X(JqoRu+z!C>qY1p|W_v_pG?^YEX(_L@I(+VlRk9Q6k! zIaMF}a{Vv7hin4J#)4gMr11p}QvU-IQai(6)VY(Quc7q5!H%*Ti@MI}X&`x7WZW;^ zD)au4AbANN`ch!!KRJ~SPFLKAe>9&JeNI>66J6$|dCpMnDN4jEg=4J0NXp}a?I4SVv#IhKP;PKFa&ycQik zU-yZW@tqxd)$_SvaODwyCEfR^KJb836XOhh$<-5jYS=^HW)nHe zBV%`9xx~z5sM`QOD$(LvKhahs;WM`n5axvtPBh<=zCY_HQ0%UihnDkLw1w*gXCjCf zyj6C}*ljCiB8(i*7ozdq;72+jbvGyGIVL^y2lKM?d@b>%&tjV&huis}=~=Y%SDc7W zzhymZ)r{+s{s)ecTaC1(X|hLhVaqTJirs>y=5Y=g_+CBokV2o4Tu&6p#J&Iy;n!Df z7}0ViA5i6xJi?ot-$rs{=o5Q~-oSIrA1ry4+ zOXlbY6`UgH^gMD%4Cm#})Aebcwc;#3r`h)kj)Xe9_DvbU2k$vpF>4o~RQKK2+*h9h z@JEvzSUwvqtnmGEQD_OMr3rwB(61x>MQsRvMt=mI;iSkr5f2Wox3=#!s`_kS_I!va zyag@zjT7SLU}RTN=|LN<+Z2FYD*Bw3CP# zX+=Cqyob9AMJpMfa^25y!~^!uRJF7+pm}OqCsSn8;N87=_p(k-n#$x!@wa#$ORV!O zyyZGCULW6Ae>s}-hI2;KeK!u7^MKLhE1jCN!|oj1Ze_9IEz*jq2WeAw$=G(q-HkL7 zyz#cO_>nDx%x!wWeuVoJsDl`^FRhEs+bZ*kJ@*qwu7t8y6U#;T8BHFa=;zP)oG_0> zYI9(vfGsXYvdPemui)heSm#g84h-;!Uv5`Ny}a-tz9!S|sLbtSu1un~tCvDbEpW9- zlN)+tLFdz4a+9r>UHbGYtYt~#ILFA{n8c@y+hy$b1k@}on`D?C^ob4_9Fz2!JHrz; zt;E+y?QwF%Du#A9+9!J0diqtEDPn2Hn*+~-rUDR^v1VzqsOq|pM!Wl>&&d(g{VV;$ z!7!s6;59)QlmCBwTj0G2l=)ObIv04sFmpE*vuTxg>A@M}#z_ zqabj$Pqe8XF8rKRMbleyv)~>AdC8dl%JOu_mpVZaz;0L-k2kOO?F;Lf6BgQgy%Hh3fz* zAsHyey(>qZ0H@sF_eiIwo2lH;ax5@M8eq^Mxv0#3#eaN6xFb}K0Y0lS5s^R~PSuahmdKN*_g^iA~Q9GG$Q`n3b~qmXB&Z2B$BctTz#S&hQacbVU%!?kc zZU%)u|JBC;PaM^77SZ(99#8=~)h<6g>Fp@?>-U>U%uEe zA#|^aB@$2tr<9Tkf+&DSH3_Z2AyYQ(jyg5_aN?P z(`S`ExwQGuxG$(1-YX4D`blCqAwBdp$l4H|TRhS;4_bk7Ppl~)a7uznO~5egs#3Sn19iF+ zfbAOVRQRdrtK-huO%qbmR86LPS)_Yk-ekOf@pgBg0Dd`{LAC8Hf$8xGv;UU}6ThAp zYJ#%k%~i+lvt$b2_K9wpTbLcD4z5?cCo_0aDL05U8&*k^blM(I5z`@e-nt%qW02o4 zi=iV)S$SWqhgzzKxx?XPM{1!=j}m_XkXsQy4qD(8TMUvyzgl~3-=D~xa8*k~ zscF8=TtzE;;p?T_$Q5_~=G3tO|BQrGF4Y@p3H25C{i-ag(m^o9N7|sN_6;0XvVFXe zWgQfCv0C|sKyX7M{^#6R@BG)K!Ccnm?SwQ{w6a6}N6#KZp(%87Wd-lWPAXYr`2tbH z7>crJ`MJIpgZcC-a61+>l}F=Xy^GEv``*v18C6e*{7YY$$MbX%<80(&aRU#uXJ6e` zGLrR)Rbli2E93yN< zDc3CG?d^JWqbu*;FKOvXrbl0R=C~dKBCNyrXIUl}bSSaX zZy6u!q?$CBy6W7&tlVN^oY-e5A5d{Ovj;H{G*jfqVIbk;RpEf1onwO7}^=f(8PI z0SDn~MQqN18)+sb%yYcvY=D>%Z;+eQJ&FR|@lVv+!p|y|(aR1U7?QgsqczMY8ji*u zUp|g1m|U*lC7E_hz9l`I`4XLf8Hhl~&JXa7M5-7I_i)JEw#G9Qbce8~yCbP)`xu?b3Vh-@{sn2 zAt>&jY$3?_2&{aSJFdrM~c5;9FJRs&zK_G9d_31SAi)728@69 zwIZ(9+s^U|8&k5(N%yekjM^sf_dT(mf`s@&BhC4pgQrS{ZaZ!8MW&?$au^(Hh~_Ye zUS@_V`6>oGck=mZVuQ}!DqI-H7Me_&vhOuTyh~bcn9#*tTbD#|tn^Z{z@9|WIR63t z7DV9_PqPfO8fR-Af~p{0g9K>5pvIiqUxzQi3*zWkNTDifdMHImx^n4~XH6cfRYXXv##sO>$>Fc2m*(<}h(=G2G(OuphKQyjlHP1KB^DKrqsVNm zXKTlOF=XLwdDVo9`ts6qsGKb+?g1|RtJp z)UOPEF@xDFGhf0zV^HzA2Sl0~pC3#LjzMaX*^)RP0=(rgQhweotcqt{_Bk%Dv8egs zXH7$1X5L)JL_2~kI_=e#_%+vCke6W)8E-n)9}}fja4a!QhXp%-Czc?E?x}G;N>0X| z985l*nn$kB4ZoB)QY9g0#`A@kPF2`h@ajFkgk~!uTDQc$q<$;|^~9ib&_V6TFs}SP z>WAr>0sIV213*Z9O45+q-PxB$xvdWjzJyv9YU*oaNXc9%5wqH7=4iBZ3aP!tTUS5F zdVmqbV|0o`_yk}T{wdo!7;(9F+N#;Gl_`g`xgD-)q0)=(V{@S%6K^^OLyJ2vrSW#5 zcnTTuyi=FE9316?Dnq`;FzVu_>WQCM5{gi&`14kFm^!wk;W`^w7EIm!+~Phm@om!X zXi_<{q_Uf7>6Irf=Z~vzGPy<$w;>9%$g$xTD!3xlkHw+g4-YiY_J%MSBoCDd*ns7F6k7&ZkccJ*$btQwWZO##D1f9Lzc)^CXy_KU63vr8Q;UYOQ&Vk zNdb$8nsOPNShNj}IW;aGed9PHZ;tyI8?Bz?f;hp4(#B1-&lqFFSW6d!7ZO(~4=b#~ zFajlZEIOYFcFsAS_ri@&myKC3w=jq;2E4ECi9D(P>aHTkquA3%a1FFmu7SXhgYsx9 z0a`m(z#yZNmQ;}9%-l#ZWB83v3}QYH9v-IO6Uaq_#;OD`&V5$))xw1*0Ut`O^;hjc z@Ru3GIk*Ir#k{=&Bhye%Z0G5?^x{}=e6YYx(XDEncyoAFbCrn6KxcWcX8vp(|GApm z9$?A1`tkmm@dxW0Q^m+X<>`Bf&maJc#(L=MF}$*r0m6z60z>LpRbLoez;IKKS?6>E z3keqjZWO5QLW%3X1aJdoe6V>DZUn#M=M)|kgl*qF5ZXs*kwa6B6tp3JJlK#Ao8Kwz zd1q_`Lxei1fq8F<=5!5xDTbW|!2Z z9z=9kGJ|tk=nWFJ|Cqa9fN0dKgGlm+435uV&YV7YL5V&G78H^)4#dur*||(dL}6nU zysPAgtrl`Kg#30R%6Axk7MU8of2eMmu*6tS9+WhbB`Fo zIYugt;P<^zfal#)>j*Q3&wMA?a@IE<-CJTOHDA3QbH`R$&E=Fna36;~#Pz;BdlXGo zRo_ne$?T!Lb4KIxCD<6#o6oDGsqsx;iUIu(WsD20P<%#eqKSk-P*!Y z_4|_0SSx9*t({)NsXWEulH$6&qj=*z=0a&s%a|s_z~E{Yg|}nAl`Tmxs|n%vI^3fT zxB-V-5v>SGiSsVKw@$lUFaL=e+~3>3vV!E=C*E*=K-I%(U|;02lbD?(jbF0C65ztq z4~Y&la|H^d8bo93RgQF&S9DD}Dh>7g*#oNLC`Br7uQuF!$KnUJu8Mg~wFnKhrxh0* z4m4Yob5mGjcEvwl&0ks_mwqu_pjg063zR12F6Y2WG{d5kh+wlE>Kwogz^D_ z+hRzTl!mH(t?4xN!nG&JX8&}Ve@ATvpeX|Bx=&UGq#1g47>gU?Irf`|+M)o$06jrX ze&oz?y!79HX$Wuid1j8Qr94oQsUQFMIhm;1T{A+@LHjw914}87j5s1Tb(&s`w9kU4Sgvb2PCy_l-IbFcI1pV!N8 z-r7{4FNfR5fn)pHWT>@|?{=Y%Ok)S&O>D!!Zu-#g@U`H7#CaTRbp*!pyyP{2n7jTz zJb015+lfNfQqoS_zq5GftyU27!Z)ORN`}U{;_}f9dzu^=JpF%uAOZD6@Bg;RWDacm z|K8>QcFdEt_DaJkX9?CxQ!3@|+mF5Xhg~%X(x?`J|D_wxl(QXmm`BSV`Z?L}OB@5@&Qx_5l_@+hriy_y9Z~M=T z)EeLv1IQ!Zg-kD|P6J1t|MQXTn4yEWRziP>y?qyTqQKpsaiX%osa-z&>_`p1x##R$ zW$Q?UD*!lrm+;r7uF+eASKVo;16OHK2lfQY_(&PSL;D|ii#|{5q8`v8=k1T+XEbW5 z;cKh{Lqt6@yFVGGM=RoL+2;ozlvx+|mzy%G?H<6}X=;J8WO}FZCp%})fhlw&_R`%X zZqJK~2XmJL&6ofB(V8yAz~ZhD_0rVc-zM3Q0_n2V{y4@z7Lju=@ON<_a`U073Zp4kHoo?vk-cpu?$D{Ri2(eJy_!T6BQe2mm8AFSw}y z@O=6F*-w^kv_>jprTf8Aiv7LMEjG+=rXFOir+BQCm9gHr9sluJh8EFtw{8fE|7)wz zVtb7Y&ROP{Gu4GyUQ;PGM;7gkty&29{qnpAt{!kZ4hBV=IQkEmL3Y7nSogJYaikmo zZlp5^01OV=))jT0)fez}VATe?IqSRBF!Wvemsx(+!G$wN%|)FMa{F|62XG6cBqgjB z*m*nRf_E7tj_*+}sIc=8o!UN;?rZFMLB$Zxd-%-;XnF_#_oS}kEndS zVw-4mCytxg6eNm?%=@|jcUz$OzDWOYUN3U(>exK~;O6l@3=>(f6(5n*A1YxeRkxvO z5LT-DHg)NehVs(byLK)Qd_(F8gTCr2m%?Y7{&Q${$~%B#>}kSW4pnq4;nrK{CjruS!Z9}TWZf~_or13|ANCSoP#w{a6^p5_NK zyU+E`)n;52g)sZbme*pDE^`mo_55PPb=VUtS9c|@CKWb{25Zcks$uhggzl zr%hworHy05AGslVG(+k7eZ;k?(|08KA1&7ATiLG;g#iVJw{%%ZaZ>L@>_-Ulw=5k7 z#MB)tfrrwF!gWz=4j(B11Q^iz-tL~U`j!5bfrjn#*KOExVO2z@hmz-kr1vrN?^KT) z!4CpQH=f$?T1~Y@&3yfu#}153izUC2=_rK8Vt>*f%-*wZi)8`LD~vp(*{E?Ic?nnY({XfSLFqu)fVwfw{ahvUppKs z&SmPN`xCX2D{dtWed+3*h(eM;RKfS(jISsJDK2jP^2Z=AJyV--ktc1Bu%9U+EkWqR zN3;^TR>8KC1)uIQkuDS2; ztcF_vTJhii>2K6pRx6;f_Pykt#ifujA~I0d%jK^;L%MK+sm_%$hCZIZo-EZZDstO> zYIc}3+5r0`D&HThJ>LJft=boqNv`kj%@YoN?S7$CH6@TFYesSI)rQ;;-iF`+;!z+} z_XX21V?$;v_Qw^9nB`&3l6=@OD~#n?z2~!^^uKGY);~9+9-Kh2=g!>+Yq5t7@`&Lp z&vj{>7GgDcg0JYc_YiTPrK6&g057JOKRg?C=cNIR1ORzU!TPcqGc}WpH&%9_Pl_b3 z26smo9pvp2?`m}WE8jWMcPY&UZZlPGCyVBG7!NC!I34R#`s>F91`2kd6UibXg1a4u z6}zK9jfjCJOb^1(qiHHj4>!g24#W0j4QT#l+RWA2ur6kg(dtg_J6#>G zEw}&G2KEi&#Q_2Yiss-3Q7Z!_@x`_OL#CZHM)NSa$=c3asyRThqUQTwHF{gkIczdc zPTc8GU5zJw4fEWtl@8C*B4ml)-!*$C`I^RPDp0Y;vANBz${82upH$7=+g|!tfh2cJ zU!Os`Halp&?m*nfZK)FkfnU@Z-OXE0Jf;1z246a^hQ-Vi2&c9L61ggAti1Z-2ZU)n zIhfD)-dG!rI~ zK8&5*FZx}*4iI^L4!R2%nfVe}ypS&3Alzid&B5jc#7$_2bi+ScaHpBTXX1HtV7xE4 z$Sn8d=G^e0pn$ODiko_8WhtHXejbr?Fxx1)4ur$ef6=yye6=adJjFz7F_V2W<8EYA z5BlY<>qeu9Unu(>R11c38Iw)v1~h;hs)2CbJ_i=|FN5wf1ebmQQe4XV zC4~8LUPDHryddfYID>N0UaqVdQS%8P7xwz0od>hy9KtnpXPO%s_)D#r(AZZ$>FdI{ zs)5REAHD8wf8zA!KnecL1M6i9=H_3ri+Ao~{Bir;>Srk{a^U3z7JSaK{`N zQyW)g5o!s%r@@d~DGdcHXDF_`2H;}5V)L@cgq&pZ$hC3!sY)bFy@cTeyAX60bd?5c zEG_E?W1S3HyQouAw>w&TegpeL#v5D;%yCl|%K}@MQ5bRtEclq66w= zPnezq_c@RaELPUc_ z(MONspnKR}-*%*^jXgRISt6LZ5sGyEB7htR=Dy)CtRKGhJ(z0YC3&*8tc>({fYJA6 zz?FQ6uiZfbb!hjs)ge~=?L0;y{q?Uj-Zsz zm~%>5WomN1w<6eM?bGC0O4BOk0_D)K$~=H@YlWasj*or7BHDMN%1u8zjV-x^Zuj^& zgns$eya?T+7j+RdGKwvMGOj}g*DFY_Wu-LVi!RE5l}|8{OBmMK(v{2riqtIh(#Ju| z@xKWi5K%^EqMkVYa-5V#jwvO!fy^x1f-n6gXH($qXT3~#kKwReD~cUhOqKzwmN=Zj zZG~@;8-n4h_F5t`U`1*+#wHWo3N>cqx~dqQLCH_`e+6)9y8mmvO(v?mTpoTa7`SAi zL=)laMpDLkO`grNITq^0a6k4dJivW3{VuEGzg9WnoC zg=psFai0TI4WP~qxlp~$7#drex_2$;+#)Zdkh}`aZp+@_JeR%WVi9&YfL!UEA2>uO>BxTtVNb!rf?2QbIJ{OroB~jzUwe?pq(Lthsas!bqmOb0pEsh zi**|(=8KX?c(b2TW&%Xv`QF5=L+kvyHz#)l3$9vD4+)DI<7)Shn$Uoyk)yx+Q$q{6 zPoc49AXa@x&4#3?7bzM66ReXfaXY(3Z#W#TMv7+^rFyE1S{?xkB&XI9^LkGeXhn{n zWn8^h2I-FBS!!VGM}w(T08jhu(bdn4OBiCMrRju1u26&k$nJ+2(VH>$sKIt1i(1>D zm^D5mI~Zy3L}Wv=MOhOBv$ik{V;wg^M&E9@`=@1cHp}_tk~}>CTY3)-bz2qeQpFq2 zeGdK#WXNmQ874S-z@&wJ%5uX-9Rhlou2+4c#ndiEieCzh<5{(AHQpxjEZ@hQ7qyfR-l$oBZI=*hLkm;5wZ}Y9qJ_?pszI zZ{c0sx%jWV_xAy5(xfiv|00t&ab{5+u&c~K91qo)ddl1;S-uLwTOGvcNzWFJGv~l^ zQM&EvcI>xAYv_7xiOY*ef!4^n99fVhVHB^_ZI~a7NUKlTPcEz(vh4Sb2u$*SuK-dh&=TWXuBDqA zXuQfseL!AlupMJAQ_n_TyhphtPhw6Nb-WB_oNMb{n!>K>r?Te21SPEP_GLGZP5P20 zR+~MBqL1dlx|nR4z{HUD7JFt6O4ZQnr9hMHz=KT%3SQ&V(R^~=VyD9HHQ5W{pWPpc z(s-(R!L1Gd63DgMbYkBJ<1#{q1YIg|QeorwjW=Xx5`M3`Ga86O6qHv2e?XunpW3I8 zC}egABX$}{{Q$!6`IT!9b?hAHSBN*J_V0j&;S-HElxDdz&Qv5sVL#!?2f{<2aXauV}6b}CQ4!@!YCV@E*72IKk84Gh|OscG}wdluZmTdm=- zVRx-D&g{sA1}3GG6rF^M*yC^Y<`NngghwUO5gV!5?(72_mqK-)b-H-mzHGe zkE=NLIQ3*OvI+|{XbJlAmo@LnJFr^ND;xpj!MgHquLsn6;f zzMJZ6LGx^@vc$}k!0m^X1rLGfI~Np``QEn5nt*DxtMFfGDg|o@3vz>gID?dm7{st( zpa3f98%nj5u_b!b2j~5l367#?yrX|s2l&v65Fp>sXmBXW@>q08NiTSDVvqAUqmeQE z;Dv_rJ3=}M7+YN0{>*^Jm0MfPs2hj=BvpRO_{4Q9M@7ZQ7`!k?E?)Ey^#19?&^K@r zz0m9l#F-o&7Y{3~w4N8LIxE767ER~P#B>p|PHmc1iqr|6uH!C)@4ZT9PxiY61O2~U zZ6hc59i1GB4q9Dl2$Qcl{raSijU^x+_uxsES!)afs(6fKbZxr( z;NCzqEe$%cC1^LKU0JoGKTQrdVHsC!TS9#XEgzBwI)UJbql2#r-Bl2Y`jGYuC5?sc zm(};3sx;Rn9&Qcjl1xLL2JAE5ps?U>qb|*}s^G$x+S4iS1I|%mC8Uzd< zo(XxZhJC&ks?;y0gFew>;D2^(zxHF{Qs?84Sa)62lPAm_&4wj@ZocsI+nS}$#$E`z zF&xBj;(^?4pwk*S=LoTUH|TI>1+DlVcil}E^c>VKN-O7Ej72g(0jo)Ra(DS#dha5| zKL$mE9ljA#D`?G+e_Fz-+#(T$kh9?5>72JRS4*ymms))J(}26!8LY*$-BW^By|KEB zJCJsY3+NHwB#J;3(%2s_A-*>=N85e`Jq;feD{)C!hcXo>y5ihBK_Zyy(e>T$uZME~eR%vbraE9(Tm0V9h}Tn#;_)+bH+zF>fAhYTAz;4`#$OnNb{ zyO|I4u#oe}AHnzRqfl=vf4VT7xc{5@RMa#449Xs17a@@Z)Q4UO$ZD)wszV^zThjr{ z5ptb(ATiWiAjO4J5{h_^CI2P{9M8xB`EG}HO{r((tee2;C&vw}+S)SC77!UCFrTdx zJHUFNP!w zAh-!cy_ottZ@t-Z4XuR!96FeaFW3=2<;V)5!AV9d*ci9Cd0WPs4!vq@U{&{;gW z`bR)$s1E0;{-K6-Dkiic4g@*c|IYjU#uH~%Jckxj?prb!LwbD8YYB5@=<``O97RUG z0Ux!msUkF=@-uM1pMAJdUO;13kkPZ4I|PWJ05s5Xh>_5*P9ECTaq^OoqZEjaeDOFw zKokWB4?JdefBKu%Yu29C`EtDE!WoZyEa~%1*%u*>&J;DC;7_d*h-*d(PC*>#fBh$X z@56$ywqfF9n#)?haWH&Q@Kuc^Y!)Xqc&6C;_Wn?yHVnv7(n8HKQbJ$Zyt3j$p;m`c z=+y+!mYJASiHn9}NFB6wV&3V1qJMeP@U6=oLo#8G$~=|gLoOWvGJy!k&z%9BP*wVz zW46t|8b2Kfn&iS|i}Z_!^Rr-8V<^eU77nN{xpwI|s%4&TD~T4>y!;_34S>+@$pf9w_|H z)*6EDRsh5PXI)hr&hiRe3h{@_PgkPV`)@kcqzg+Ijy;D6Jm`;Vh`sV$ z#D%Ai4r9ogmDJ9IcV6Qhz&H!xC0U+tv1g)w=e+$SvgCs91hs#FOvhMdQtH&hMCFb%Wq4qyHyFiub^jhSgv3IiLJv3 zJ^>lEBg-2;FFo>LO%L^f&SU*8RMWP3->xy$D>+D;X#pm(GUj20$1>&7HID!tppFVTGm@p z5D{9qw6UMYSX=zOyRY(hUSn6UsjG~gAHf>Il*}DX&HJC<^N?5&hHH)_kH_3?#b%@3b&RojA=$37qJ}x zbMgBC?2okEpbYY2)aI$f9)QYt7TZ&7fA?-XLcarze|cx+JwaGJo>pXWzIPkwR$Q;~ z9WZL($`z-(>py;Je3zjE$ttv7--m5ZY=RPo2+zeg2!xj$0+VMt?IR^2;*G7(sk&!;*{|S4~s#H z9(RSbye_H!h}m7P7T5fQO6c;q9$#VnDo+^Fq<#$bTjr;pRyZ%+Nzbdnrr9vt2YREt zm8)xm4d*v%WR({RQU@x_M!Y*2zA+x=r4C!GTa3tN+O*11anD_fn#a6pnHiLy`r=i8 zKTTbe{(UgmLH)9xl!x!LmFm$L%$=nq6&&>w1~$+JEdsGDjLLI6K^+cSguAtqxjc1` zw7TTt6~(s107$|QHzfQ_8<*y&byyB8rb27QM;>QdbwY>haSmVgZdvWohZCiFEGWQB zz%if^wN~9LH50Gl-JRa1iXydmW>tc2>vo~Ys|K z1)b};%mNM}0CB}y5%q+^kKjb|_zS1Q^M|7pGQdzMeeQGKrKwXchwa{xiB&_|JhJ34G1IH}AE>yS{>p^iZ0W#SUvy^R>JRt~ zt%Pm0X6i)Z`alBH2JimnK#7b*6!#up$yyp_R9A3s5%it~`Usq6MCbMGisxFMS9lA; zlWKRKx5;b?iX9(k&|X=nD->NRfP>Q`8=J*mjnZ=rP*lT?DBkAfAM@Ahx}Z-T2Xv6! zp$^iw1!JoHEUr)%igujWi;^w(B^9Q3A=2*M6o#6$e2`AT`7~{C9Pj4ihBwj6LPGQ3 z>$pOmB1_&}TDdlL395OK<#26HsVpgp>nLV;untQw<3* ztZ(BMj=oP7#Sh95j^(T4rWRKxPX4pWTpd}OCUbgquvQFoCY~IE9*=O8eAhibIbQo> zH7sNPX|kSt>Zs^J;k9uI7%(nDCjnL2)X(*^#*#y-j10HUP|WXEWI7e??Sz^bye}H) zs63(87tJu?b|4P(y?lLt%|O|j;BKBPJ?th#7cFE7tvWsN&^YG6AGR0hXFbKc7*OG8 zHQfu0%>2T=8HM`IWuXreY2vA7)4SMt+lOD|hm;z_i*S%(frCPO!3+8Gd=;{!k)v_E z>9cFsdfy|Az#$KGEcZhL2cZ}V19^(~(pqyyK(}6YKu3trsvwdz7iMf}2M$e9b~!xc z4HvDU=tZtgv+V+>*&zJ@!x|N)ckHlNI)-u5nM-;$U5F$p1G+7Z*Ng@nJx;neh=uMg zX^q@~z=E57rscCX-CPp7V;3>)K#2shY0;FQKz(;pwpSkBn=#FWjg9m$^!aXS{V6dU zhb+l_t2=R3C+x@Y)O=E?^Z*8YMzCHW{$+za*)%)aI!h*1xmA@;B`3LePHaz(K~n?Q zx{R@&>;U>{T3X1ECuH{1FOq3g-tnbpsPkP`5%)^&MdlS;WpL! zGG4v#7{PLFz{DqR^n%5(NA`lF_&6=<^V-vcE3Sw3Qh7q-LNwsG(E6tOv5Q$v_b<;uT&*NI}bqEHo5?icKxiXxn8Jq3w>Eroe$o?gjj>|%n&))v{+Rj{m+WBe}fe@hiR7H8U@_4`b-R0}iHRMvDTuzc%zwx)*l=D5y#5&VnVTo~{kBC~pFIbG+m6L|BcoRB|%;OaqW zkZH>^M+dm@O@-U;#FF;3yiSmQyJS~4yai|H|6%T90fk)JSfq8nrq|;Z?%Z3FJJ{-I zR5B8ux(5|d8ri8ZuhV8ak zJqOU{Z2qk3zZy#}#J98ri-^ddTC{T~4t!)wa@=BEaZsQ{-2|Z;A#FEY6~#K~+G0La zDXfmGy*W}ym>U2_0`)%#yjjLb{f9~eiJ%Uk*R(}h{KV~dyP%pjSoyT93vF5AnVO9L zw#xTkrN?yxks@}}a9)mjE@%=aVL1x+ zoVE^4o5(P%fTEK1;S~+4iD%r~{wsx$8%UQ_)~;P!SkcTnX$j)FDljA5UgvmLOuCO;1)9|_f87ys!iaG#N*60oAazx~>O zgelXrr>>@|@_0X1lf(v)_WI zXuv_+b5QacvPfO+JxV8O>dLvRxF0IgOM=#3fBwUcr@h{5ExpZK%(t3!X!yDg&dmN1 zCm4wW!!UnRMsy|-EGAL23H{HbFFNkuPMrh8oLYlJUnqOJvOC~UtX)nFhUFT=ooalQ zb6|E>Ae3+(ca{BvLPw6tdj%p(#=S!}7@_(TBR@nF-?bt-S=wa&$6f|TKv|pof(ni8 zVBh3r&mY9-Atwl@XyMG*G8$}%(=EWOf5sdF9cU9WWxWUgU_D2JOb}$9WxNzn^p>_J z%ICoJKxy)9|F3CBMB!})IE!QktIxj6P*Fpmp$xw9J1tC#qm#%_Gb`E86wz(968ZgK42c`&x-jt zcWvZ~`z=O>4fy?dO}>>vLxL3b2(*9ZY8 zHycwe@QQ>P!D%uY1easxw%^TN2-{EoLTQwFROXNc<(+^j|49eN!D>(I>5YIw0GDKM zrXcW=UFFhiirOvXt@SELXn4lBI;6}$H(r$IG~CJaKhl| zUYkN5X3v8ClzaUH{=Z}&J8`N+7gzgELdS;qQ(SQ9EE?5vT)%QFJ0v>2SiZiW4E@BLv z5^tFtajIMu1g0Cl{ABka(ou_1V_L#tjYB-05sEr)IpK?KpqX60HpVpj=ID-V z7>)jF%@5L0Y7`)g_jbiluO7ot*F_Mf|BP zxY(Nmdvq%KrXak1kn#a|>c@Vd&2-)kZ}-Bga+;>c-!~EIvli}} zZvEGp$1kM>g5AF>u>voMKsFO7-dP+yQg4BWv^g+$YnXDp#=$LI-i@ZZ;H%us%6qAa zY?WmtDz*f?iv(H+!BvxNXB@>mlF`_QKv;W+_r(@KyCQc4%5aAPy_s(!)46Yq;n%E* zCJi%~{D|7#C~6182-19f?iymBm~cE{hKd!2SOTh5myVMH>CSe5#y_cS0tbph^F_sV z$6XG;R{V!e0EOcXHcRPAOOL6i9-8b@TlSDDvjN!#rww&q7Be^lN~HwQ z8^6te8WX55APU58o>!wYP_-+ALc2x3z7MmfK6!<^n8?E}^4{T*I}nvljt|$+2^_|5 zZ9r?Zs*V-NpF13|`dtZvhU3kqXTDoh_{O4CJ@3=ucWsoyrdxqo$XMV3TlvlWkGeP@ zT|X_jzg7Ngu`GlElWA&87NuI&DZn!P_O(UOLl|KC=GS%Cp7%1Eu&=?!tsW7qFEdch z0Y+=xY|vOUV1o355@a{(T8XH)qKV()=vrk*DIySM!cNP@JrX42{t$^l_OIEDmzED? zB1_maP|tz7&k4RY!`UUEh8^7b^^?D*zugF@zcGl~xfO!2{Y#IEhu2G0&{rTN1z~d= z1*3nIF_V~MPmC=*03RbT00X(-=pn;kY}JPk)sl-43%)}drNC1(z?wP+82V~^?pSg` z{8{R>y&ht(lzTEKHYE6(Q=Ju>hz1~SLp!v9rE&@a5VNlTV6r?r8hbv&-l!^HI9Q`+ zva~~3TvYeo9QY+)z?{H_O;`fm&Az!)CBvrcg7O94b`cn%crIs#B?=f4%OzSzC7@0( z9=JV+@4g8X{ziR7J-KSm(=mbB3owebZi6E`I01-NXrqAE`b5qt73@a9{r zohfm&ZK;v&K->)0LDg6%-un?SxXKTHi5M_&wY3K(Tw2wH>A;e#4WXA;iMvl7tFRtO zv8WZvq|JwhEykFhrX@C1H*|PYJFE*MT!lm_qSV0Gkpl+GUWDjNZu|RJM7f~ODwXa@6U%DEd#@O zo_t}n<&%3xl^2_cUDCD~q7D1sNkETm^BL#q8%L%N3SPnbZakqmnz^a}Ib$%`fP&A7 zy1^#9I>9|klV0D^k2l`T{Ds`qZm#ln&trZHT#q^y88?TiC_VU>tK}!di8YdIIrq!H z>WyJ2;X^T@z*ixNS+8Xau8W0_6#NMZot>y3XzEwZWI#Pj!z;vGm1)rrRCiY2{8)>> z&v)iQ_3yJOnt`Y1v$n_ur>PA1rle-gGTv=`UqdG9(taol1wk5D$t) zS1x!EZ10GR#?C@L`7Gz5ST_E$>AVr_w`^9`#FE$hu* zaiH$C6zg%k7Lg|djYENhr)!wk?yqZ|V|irD!yZi<%X$e>sX6DK(zt>(0K(XL#XXtT zydC`T-`y=%X_9r>E!@(%HK?O9f^uyoSco;a)Jm!cK@R<~RxgEkllCug@dQvOnjfa5 zm-a~ob0zs$d~gGqvY?zZ6bd0HFf6(bds)iER_~ICV}mhxtIoZP52hgOgmRk2^3hND zx3N=J4x!V*3=tvy1r^X6u%}y#>*XfnCI#Nttt4W3ru3=_Um3(wUqfY4HDFvIb;WEp zoT&fZHeh_AxNGbh$W9J8q`!7C;fYf9cKw%44~9nPi7KonGQOIT8zdSwR;W{3Ijxp4 z#IaJANDOozxWHQ1ntv7J8}=pj93*}bk|jL}tqq=E`Oy^(ZM$`E;jd=aFg|PKO>^tK zHn3WS6gSxWVXq$!7{ZgF#UdP*8}o03N&m1@m>&nyrHVTHIHS$$A0CjE$E&5K|?$k6y@zEjvmX=>p^Y zStCHVZEw+eP=F2dr^EEiO<33J3{FKN?CP*DyoX1&U^-UbHnmpkjVM0Nj}oYc)#Vey z(20LOnG4^4pHm=1&qGwfQjZhg;;5<26)WzI*PG$0-o8! zpj|^T#zs|3uTvksLA{Lo*5#`_GrT9mm*+%TtVb+LAMIjQ8IC07!jC+$fQsv5x2^&0 zE)no1^WFc?@Sv(V^ym?c#@xX$yi3fb@*!<;k3tLs2H4pg~#+-#C3VqeB`jo}{mQz5m`F=u@s6bs=BF-21Vn8LscHkG*J7(p+pqJwfgLkX{G3e zp>6BJk=PIp&La?fk+6YLN4bT?^E?zOMl;cpzQE;65FP=l)sHVP%6c3zr&`lTe8ivh zQT;Z6oX34h>?$jef5dztH2x^J@Lpr@%ZE?`s=hOR#{aej7l}w48Fy03NGp*7%kBE3?!E1E+=lckd9Dy?SPeqzF3!l)P@9n_iy=sh_^qP*=ARel-BY!i=6zpaElx;w)=6^ z@7AB}A|#Bp^5`m(cUf?PCadmh^h(E`ZpjagL&jKF1<7?`Mg2)wS8PA8QbI8?Blh=J zOdYe>P{Z=!ol;AItcs#3*HlSBV+c6(Oyi#q&8Rjt!keCU3kcqo4HSk2qAHC$r7jV# zMuYDm)jh>uuKQbvC2TXe0t>*oJpzra`Yd&YGBPB2h^;I1xBtWzx9Q|OQi7E=O4MZ` zef0RK{i`qi+2aEOCETIUfME)|JAYiotStQWb$SsHzJ_z(lKG=_?J`jxa81V8{`INg zv0FF({JC7z2TzIs@a-!pC@z$QHEFGKvXu{6p2>zerS!7ogLV9KQ0*~A8u`ah0>3 z`^2--g*Git`r;oqnf%1`Bp?{Z^D&gA=}*M?79HaJ4ph)TY_qq^4k}s4UC!gcD~FBj zDfhh|n-KLHz@{oIU0S~(e#I=J^5D(F8aXOPErryZ!_%8sxMl1k_JfDKs-mFlDMclA z_R0Dk@hi&C;p-Eg#H|r~$nKB)>e>{%=-u&i5q&cMw z`+wSd@2IA-sBd`2h6Q011(l{$=_*aSf`E|{5do1BlqMbNq1uo^q&Mjz2+}(Y-A)Oi z_mF@>f&l}P&;lWOZxR${M(26|de--S>zg%ext8YMv(G-e{?6X}a%~!x%`%VeIX1nQ zy^d>ZpXqPw#Z=G#FhH?$6D`0Logv6DS^0lj5GV`jJPByy)~=Z6aT}Ai%VqY5QP;+! zUd*~WNLl^SJaJrlE1~dUXnx6hAMUa#s#}C`nHg7S5nJ)>nNySD!|iukPX@k`%-?pD z|6VS#$hgLqFuT6=@vW7+`nZb6@pIJbe4x^Q2w`jP15%Ozd_8qq|Tui0~ZN z%))6cDHlhwYJc4n*m7n)4s(hcZ)yCn-~yaDP3J#ixN*6M?7!7;f7K0a`!qgWEB&tn z+e}|u#(1(jEk+Ag znGclwAWDcUZxnNif#G)-Z&+@)b+BF>@1lY~yaU!jfe(8mU;rw{v$(5aD2+(Dj27 zGy_WX4vulqZIbqv?*`TH4lrEM&tYkdC-rIDn`JddyP8T`TCo-O32^2Wt|o%n3;Q2i zI3Kzu0*o#dIrBq{=Q86Jb}1;Sa=GJ+PXe7BTj9I>HRA=yAWxhg)$eZOg9&Z;;MeIL z_&>d%kZsckf&Q!Sp+EImW=iM9As%2_)8C&aU%wzTcep)#Pc_?*ffd`bZIA5o&)g`@ z8_FGuKuh4!Z3f{B-oEje)IMT2);^Fv+F@IchP@0-2*i;q@!4FrbAcg1+aLUf!uvK3 zuU%Db3N1bKJq=#34_X%{OHPB9DE~@oXXl@tUg}O(iS_E`OKs|EWxY6Lp$dd>^!QfN zr%P*jYSL=N9Q9{q*|Q%n1Uf=LH@l@@r#$>=T0Wf2^{J)_G{LmfFCOrCueWTJU^I2~ z;Q1vdb)Z%i2(9Rt#}pZY_h4ht`=- zf@#GmqXMgeP_!r7`^V4`6KW!EFi~z4SI<>{J~1Y|ZzM zsO@LUT~Kq>$(tI$?b#Bf^wyawvLxqQUwDQ}#Z@nAZNxSSs*5dO&G6l!{QS%+zvkDm z6aNx?VC)2{a__YKqw-c4rcNFAiClj8yFyB=y)12f z%immpG422>Azv__Z#;Okry`j-h#rezWds7zW|}uHe>v~!3vkVb8YZc!2-~A zGrPVvOwJsk%A44hBW7BHMUL{5mJy`RQ_4jRKWSVg3Zix3&jen>p&17(`0Y>jlQur&1oxZDU@KTUaS^Hf-+SE~yDN&j^f7 zWIiz-wsmw_XzxEpc%J@!e?NMrn&4l|xA{>qpeHs^;$MDqRowXyDj=nYbDQ}brNhGi z-e`d7>RhHj%Y7ZrPb)!=ca!g(j4)26-Pt$6@)^4(ey_Pit@!Hw9&QOKeh;@wCRv(^ zmmv#&olnhzrhb>zpk~TzGI3+Yyql%^-y zf48w|9+GUpD8PI*;Fd=$4r$!dPp{gRcu0>FY!<#p3ZNSIHYY&Gk$nJ;am?3}ROF`X*8m(y0Pu&!MA6>~Jh4TfT0KyU zN_!vsOIeTWo0ab0cfz&xLoGkrhFyR^j(NHNoxYYW`g%K^9_>^BW&oUczPg8r-g?ki z%}m)rA<$|x%pya#Xw*q-rY2zP3PJ4mFV<*MPka=Zq4{W2M|rkZ*SB@y7itU-{QF^h999u zBbJ!HSrQIF6RMly8vi3hs4H90s%AXDmx?!I>jSj5moa~u?phhhJ%rL)wdCabc$7Am z1naAf{vO%HIy5O(?Oz0w7xj7z0_LH~jhttKboBI{t+OI=aqNAMu5n2OJ z&3Xi#xT4B~s;Z0LE? zahZq4x?*=JyH!^2A|QjyQ<&p=$S?Z4H&;KmwYob(V#Ez7oS_aun_oFd7qS^NKZO*S z2>_N$_5BQ`q>>eundSK_map&kiuLORZ)O3p&>u>dg79x!{*nTW74-NhOPBn-L_=cA zJgYC{#cZ`jlY7);AjhdQo0bH=B}Aw}t!fT=I=p{xonG*t7n?}&D05EvQFY#BZ`mDmvvy9!J?u61Ff~=A=cZKnl@{ZrdHhM{H+c|3D>?k&I=v}I zam~(AiEHn~GdaXeXrP-SyKW*?E9mT`;-+*AH(Sj^VgxlQ0N6v^U+#3NADjtzWVL2u zCjz5ZR~-A`m!fc<%m0hV{CF7Jih z?8s270$DKd%hVj)1Sy++cBP)BO5MMn|EQfG@;=E8Kp~1-D>=A z%lc`ABCe`rNd-#&x%t=#e2Z_+7?#HGNh+YEF{nk3O+PhXku<{Ne*i#T>De3=8{Y&R z&txd-7t=@NWsPb41s=LWwW9Kto{1BS#i~rDNoZ!_Qes>?9~bk#EG-g> z_2TqY&OJDs>DEzGV6uv`sL#*j#BV>4!(K!#P+4U*)wME1XEGh)blNcLl^LW!TaP%0 zs0gwu{_LETcAGqUMfPc?nWyM+1kbBjBNAXG+W?HSOZHIveU6NNS1d>SeUf(ZMx|6 z+s%#*0Q*cu+4uT=3|io#vF7wv--{oa&vVH;&0!tmMZy(TxlQF%=xsKNeGx@YI7MM=6Z!-8YiSbrflmB6PJoDZ$;9-O4s5O5huFjMDM zT;9HocGg?>e!+|)NNontLr<+KShAqk6p-i;o2}l?&AG?Rz3`Cs_`r#M!QW2H{#9p?Vy_@(7xvsr8a7`O8K{Cv0 zCL2IV*Rg?mpv^ z2Y`X*;_`BC^0^ml4o?lmRGkJAd2?t#?{Am#>)<-9#8+qbn`4Eo> zbB%4hz%Oh8tqcTl-&2J;nC){DFtSuQ+t@Tev|7~ilYIm;E=IuP}_F$AV%;JVVyfadg0*t)!b(II|LNu9x zsW5AmBjCIfR>s7`%K0>t6dN^|+p? zM(7IFDaVR`{J92d8m>S1fS=32UkNIB43v>jC-v}Z0$+w18ffA|h~>=)J1nKYIBfE` z%KK2Yp$p_9)mY2lED{91@fLqQf-ECJ8A?tzO%ASuro_-k9{lQ9?jY+L0|;Gp&0NIQI>OqgM8_>5E?<=2|#f@%K%_|F659(u!J7paP ze7Ooj^XS>X(xNxOjQW@Bz>IqAJzz$?7WG4dvzA88I>6APZ)`Ff>1`PDYnzZWKK!!H zf>wmriL`Jdq8iCVDvm?HdY|C4^5wAk3~@g#+5eS8p+Qh)mDAEq2S|wg0upq0^~fpL zJ_}2vdgN=AC(SWxva{Z}#*|jQw`#FHN9=*r+n*o7_gSrtyT|!F9IQdoMI|%12TS`M zR=r%goWh>Z_OsraR93wfk;)2T5l6n2dV#2=&tCbJ>QB~_IxpA@ke)BV>>Y@9=NpF+vjTMy$ham z0R^$1ryCsuL^5}Q2TAuFgo zpnQ>~Hz!;k8B)i5gqQx@=`Z|1&CG-(N(18zpNm(`oM;v;mYLfd=Ze^q-H3l2g=OPG zT3y6E`TAi1FH*<8kH1{J?3~XDn_s`y)Bm$9w;hAly0Nid(ZfFZg4(WohwuE+L^lh{ z3HufEst1yrOMpQrzhYAFbWz>?HT$oiu4$T~>#w8bV`^FC3puu}}8GYcIxDA8&_+eA(sG7Y$ zcYfI=tHDDKK_-DPtM&Um4}kUq({Z8I1rxI;17NAKT{J5iVZS~SXD~89M5^){`E<%G z%r!RuKeaWs>9jC50ZW>9w z74y(!tK3_~e?xWYlxIE1>^SR+!TjsSX&LidT@vP@rYwbo863HF8ZkRP4XZ znF2NpfBOmi54!qky}Y4vL|=v2TeJY)F`KeipKqmJ@ZiUzw3gqxW53|0#Fg26M8l~` z4f;z&4y_x#*T#HQ1^eWws7*gz>laop&C8H8h4o2#BZ zgJ?tIEJol>k+ndi7VbhYT}b*riWsto$K*S!wrwKc-0T{s;+j zAoqTp*-)$8{qpYlM*XXl?iReQaxo2I{8gYRsI-q33!6>xE2o^IHKhJeB(kN*GBh2c z4dVEP1xj1wqj7H~YTVE&%$4hxQOed2fzSH{#!UGBw_wE5@=T<<{UO8V+%JB##N~gW zb}{C^k6V!D(dg;NFXN$J`oi=C_gB7D0Snrex&A6H`@Um(TKX#|rt5q7|02l$es=$z z$9$JcdYPI^L?{>WX8KPvD0E`$kKQ_R* zKM%JZk|+kg%6aAh6`}tprja`WHlem$@@xV|ec~pM_WfcDz@^%C@6*}UAynJAsmivE zjX~LAZ-Ch}GW&k1WW>b>-YKp0(sxb5}1rG1GkQ?5q2iTOHY`_R+4;^x$T# zj^m+MYd?J+BD402Ew}-+Dy@gij2h*lS>qwSoxm5K#Hqfn?`qQt{|@gM72p#?_c$K{ zm8e@tqOb%zob7=}u*K{z9}bLfRf+6-F+;`H|B1ZnlXWx#Pdlk@@d%=M5WrFEX(m$* zYd;}TJ$rTOXU8to>^Oy-+P8J=0i4pnhH-SXd4#7>@28Kov1V=)Z|kI77Bdo>y@C`BHf#RbG|nd-4^ITtlc)vz9e^dw`t?bt+PPLb$oVlPmM$z)uC# z&0&P=_ibX}*?)o0syd9C(MlPT#ovak#M}Oy*3VsbO$a`#Up*_q;I&UOU->6ch{8gh z-t9!7#n{9urzC3+MCsw>QK&zZf?ttawx*@@GZ;E)b7BpCdHKgKTZv94c34hL?e z@L>xAt;Lm0iBUK2I>t_@UyRu^Co?(kldE-pQJuGBPt2mHcY{TPj@jEx@5IK_mwPY7 zv7XA)*|Bm6v%bHkp=@H{LQKTpxR=-RvPo902(F8dDWFDuh73cZrz$ zV)ywP8~h0q!w`#8Am1av-vk9hV$98(!l++cYsZ@b7sMc6Xsx=1e$3)$|Ey@fwu1Y7 zd7ogw*W!{IIy{H|osfpmlOTygQ-&GoOfuf# zb4<3;we<(VC2XMH1Cuw;5I&akHD+U6viZ#!`LOZQLA+uM2Eg;i^J6+>pPGwN*+whN z<}MOJ{;MDLLB38{UkksJp!|lQlOW-F8DNGc-%%If^J>|6q7*>owFPH@WMQ!#z-Q-; zfZ3}mAtyo9-{=6J8UwY3tiV_E21W! zwF*DH(KtRd5b(smlqffA+YWBr&~`!Be`yZAJ^6Z2HrJN0OfDlzqSmkyWvmQ-`bufq zUs;pWFDTiSN3JaO*Ec?cQYJ3Ve(cf+(ejurJk8K)E`jQe!bC3-O*XRHCISLm@%Juv z$`5{F_~xsgpcx(+@$EyQIcIUPgMd~K%LTFb&R@P(UItGRqa6cvl@E`q zG1Wh|PVu&&(4-$n+i2)>jk;7Bx4}1$RqpvuR6P;pCVdUg44COsHe8EqiipjN8A*%N zM_akxy}2%pDt?aV9U*0tv|+|q2X8sn)S)d>^eSAnIs@E&g=cR%<=o+k?TYh)=sF>9 z-0|H%D|t(q0e`=5>GF7};6{{WNUp^Dxd$Psss1cn!>QWR-8Y$*1#Q9)Hfth7)Ma2H zy3Lci?N#hOLG|X>Jx)HsZQyXYv>IzyK9`(3PljCcdJbQ9JFSv)`key$cQfPZzF;_5 zSnveG>VsESY9>0Rm0d8`S^ipc81@26yq3UKkfwi~Ipl?yv|deOosa@LmxApTN5{b0 zRQuf9gV*#Ns?)jN!|unf6?szhz?J1lG2>278;X^3S7aLmYMCx!Z-^Wc!gjWMzm876 z7!V6lzIuCdUT@_r>G!FfB?ea5GMsD$zsjK$V`BOYn+RBOjVrq#e!mG4V=2L%X95fd zY@O8Aol@sYFH4zXC?y&b!@z=<<;q5!U*=}?3e|SjqB*&)>RK{Q!Rx9P;Mj$t{q@@U z1m;>K*~+fuK)BNZovs&3zmHx+DN45AGGJ z3p$uP6$E)G-#k`a|T#{*5iDiVgfm9usVT`v-#`Vbje(iNhvHN+4@iHl% z)A-&%$Y{iz18};Bk^7>&bLvp?lDdN$8~o_6pGf}b55og;(LCsM#ByE?)0Aj0#Tz_! zUXBgS;5FX9{0FDq>dr4r6X>wQAjg%LTKSGwWCz9E2J3oF*nEsOa@)Ve3R)2Q5`-+K zVTQP;(KU%KXXXb&a>M!4_zMN2gPNQg2YZtOr{I`et@@t9@ZcQ5p0wZ4huA(?w#xu2?MJz+X151@{OUJ^Of6|Du4{~ z9Ms5&3!C6JuxzW#%WXxde7!EL6de&5H=1^!J~ajs&t=m20H#|(dYPlQu`{8tezj9J zE2P5UG&~i>xnX4|J6dpw!pEA-We9nx^Seu&OU>2%Q393(FvF!lQV_Jx&4GAJh}T1m zsy)R|+`V-*r9CIo`H7Wuxu7*KA%jBM859Im0lLmvU(cEq7F}cVkh- zcr-kRLrh93jJ)fa2$axqIeK|a$nB}HO3+{FOtZ1K6?}O(Gw?UDy&R&?>(1JyYcU)r z|F*VM{fVM`w3u;rkz$khgB%r~DCVyAJ~5-jLPWH4p2Vjr(`d)Re8;W_1m>5Hgy(s_ z1ep{Nv@Tz0UZ5_2&A+puL^nJW1H-_*m@d~#he3HA5`2C46I=Z7EM|cim?6Hc*LjLT zhVWN#j%|0^Z@5%HUz@4cwxpZVzv19c~LAI*RP3 z9@c5Msi~F<;Fqpnc}&VLn{fM&%BMUGTVsSewAZim^y*Z_dkJ$}1Bd6mKemS`w7P@% zyQfH7mrP1Vby#(?O{kgzu`_uLZ?paVoYbVp+J`dv_kCjOt?xQoWF zSzb~)+Fl~l+0*n*i%*UqZ42cP3=BM$qK=YahiZ>p9(=g?rFt5Uo*pAFs+f#cTr1J1 z+EZ5CzV0+ouI)z~b&1Fkxzr2^&d&R2J306BGBPT#QU4k1Y{kYP&%lqLR$i?iBmXwK zbD)B4Bt{+8KH1Cj$~oaq%tJ@kC6Sj5=K%sy$VR=KhQ0v{0gc zRdTOOQ+xxItc6_$lmhb`{+0|I__~!AtMw7^m{}s`OLl}Pr8()j$IjZ#sR-*WWsU%E+_Mau&CRNl9rYN<^CmvbtSQ74N}6oWss@bao|n{fQkHbpcb- zl}vgU<|@-fZJ`MnIqxjjRxvEt%Ya=~{&o{May_nY0l9$izK|}X%nG8`&>aWY;=dkU zMLAM@eT>3zma^GdRoMhJm|@_B(84aPA`c}c3!d2eX<1}&m&AE)JX8VtKlVV5fiM7hPDD zhkup5Fy38yVXd*huefnRmPEJvbo!!eFw;)Wm)TnurwYdR2_bcjqN-5)?IRjNkWgZo zZGr;!P5_4ZzJvL6^kI9h_WR3>or93)sZhb`wmiI|dofYZkrmp^(UEm?9bDK_W|pQ^ zNor-w5XRlS^$hk=wXdR8@Pe(p3cx%6xJ~j(6j%JKMIpD9Wnv;+(zJ+q$sB|}(RcVa zXf5x~Kr!UlH-^n!N4`> z5j&ygwmL?FWy54K_3u^Jo)mn%6+cFL4sBf%c`^S-J8;xQ1nk`v zzJQnb>&S{PWkpXI2+Wjx0NmSi36*CuK&rw&J6zVicr2Oa9$4qvRDmR~9g{eo!?eJE z+BLAr%_oU8O?aj7crLG(JeU{PQkDb<>!Y)*YlwzxbZ;-k(9L10=o($rJdFQF4h3lb zkOF^U3hLD#${oF;SBG*tA%=OWQ?!^*j3Q^}kqaY4ayD}62$BHJ7u+9fpGS2v7EHCj zA_aZT@K>KArG!Y2>z)NOAt&2eY9OYOVr+fbhNPhOs$+P=C1S66T=^#$Ro^%EMTsQs zH}C4f2@TxnY)X3vb&xxi8@_fFV+q}f9MWCS&Gl)kJ}3cs@g?1p;MX6(81&o5eNk=& zeRZnwQc7jeWgj=Tnw6+lYzTU>cn@2U?X$i+WmkKw)6{9?g<^3T!P?c+z2RA5O=}KB zBWslng?J~fq-uhS*7L-W;&fNwqxD(vq`UZ|1zwj1{S`ha`lC*_>VaQ42-s@5624g( zgd&0sr9=XOK;^dQj;a0ThG%dbX0WD6K3FN0S}3L=p8M(!SeaXl0aE|vM!2L9vt`AN zrW19Rjb1t6#1w3y&2EIo)gO z>*??=e{HC@r}i;{(BJH7zEZ!QD==wla+2`r231NKJD&a=~i zLoE07sn#DYdWizU;V=U+iF&|#Vrt7x4aLu!<`Qfk*pkg74vMO=6ZNLWe-}X zp>EKT6qQs#;c|`=V^I_1HLe_q*w1hyZ9lkrc5N}JCSJtNbI+817>|+(G|NqM{rytG z6s2UnHXF=c7x>n>KhA*c9q&53_-)2GQBa2+Cp+Yi@ZCY2t}QOhVKr3QK-C@YYE^T1 zV+a!TE~WBO zihh8jULYYU{Zjen6bnJG1KsW(=S%WKCbFYuPi{(gk-9^y3WsF(N)xu}{E#VY;L@Z~F3nOIPv?mg>vQ ziB%)W&@;-Z87}htFk(_Ef)f!JQ3FnON;4xuF?V6Qs8PD71?X~MM98HyZE@BwoU9x=_FpWz#R08vac9U8E~2yt_E{eC}wd>$I&da!g**t&n)2 z#JWbVrjx7FuP@X0oS43kpQFCQF%yT9w7p=xiDwjg|c9l;)ivdsU`U|b`!I1t3c=>#rv^Ahbc^dkD9Va69dIxT(I z$VXcJW-DZbn1zB!2j8dIikbAWpmFk1CDjY5z7|!bd9zEA=jY3y!pvL=UdpYp3BET4 zoB0Avhj5GaE9l|%)WlE%$_(0 zlr^D$q*uJcJuC0h0Uhuyvq|^p5+>J}LWz?wSnY=-K)*4K_$k;(59e%QhO4p>;eL3Z z1EGNbBp)C# zQQ@E;S!y*Hj%K`US+>0^PM#ZCjX@fjzR)@ol&Ku9_<2ez zF~swH)ZG=AB{&=f7T{1~XwRec2nwyCd&x+Ie|759^6tge(IrhI81ij)+NazVJ^y}L z)xvP5>1j(zkRKd0GG8?+DV*{cZHoxJ91sRs89=yLPhJ}A0RqM43-}$>u$%j8P5I&@ zSfVhP(leKMgn@GQe-kPC|H4|xzOD=vmEkx(C23oKP_KIL?nyzQFeMlBR06B#Kecam zn%%F_%@O*BcfNCHeVUWY#yjroMDcG6y5An-3FXB4l}O&E2Yd#e%82xY*)r(Ou6n|2 zXWQqdXS|B{FUU=5T{94Pi-R_LF~c$!I;+=KHJfiYB+lPnzB@ZFs3Met9-TU>)~old ztJ-kAFnn4K`em8=t43uWMSz}sJV$&P;+ai5uJ}1(Bq2nRyz8$zi?N33HgKB_@tXM= zK2Gc8E*Z@62>)93iM$cN8+}2UNs>+3f_O-$MXP9+-UwEzRp-$~qi&pIvdv8|HDVC( z_t~Z=#t)FgTqiV=Z5JP$!V;d1)ttF4qXWx@ZD0?|K%5V?XFcq+(4I3ncGmr4;$p{N z=N)gi7Ix^R5U%eJoD#wph^hKICf(|aT;nq3kU4AByU`*v-rX6DkHt%t8}J%CH0Ik731E!k)1iD z>|42#0xM!qp8*W_wV{z@h$$ci%`OGXxvGa<6+e~&*Zhr6D&!14osAK6|4gmRt@ni; zwkES1ZWB*fGaCf1=WHHT@A0?+*bS2qoi2c?sjR3c-D_e^hip?N_!xau2r)#Ryf(2L`V?+!FsarHlKYtycWDr^uExNG=U&6PDb2pgt5 zZ0}Op4WeY&ENwI_C`c#|HY?V1-PkbW&{%oHaVwVc#bC5^5``a9**Kt_0_MjaJxk@% zQ=(`W*QI><*}z8i*2s;Nk+U!i2bc!*`x}wuh(SFSrEmu&Y>rLY% z3<5t6>82F{K{zILF0gkzk9tyb_yJ~GG{_9ebW^b($AHo}CRT6Nu{I4J` z1R>>6>b|r`9oIx=XJ&@{6A4lo+WV@*b$T~KtlS$ndIENTZu^qQ*T_RV%(EHjpwZe` z*Yvv>S6Q2ldsQBP>FPl(lg_~A9@OTKbl}BPpnmBqGeh-l9xb%}ko8{!gt>pB6E>M0 zgs&Xd2K9+1+2%OgBT#bj+&5&}Q)?Lg3xZ+<+IXL78yRJIJrNJVQ}7a>PJ?54?N)F_ zs3@lrjl@Zsq{^iD@>|r~;LXnoEf7skj}tfqpIK_TLKEB(9~K$>6DX?I-jhEF)*%?D zpHHrhh4O*T2$y0*oaa;9nG3;*)7dsB`vPJ<_r*V_Q(fO-4;?K4$F3Bo&(Uqj2Agnu zq7?$5o`t3(k^-p%R|1p2;G5km*?V!RS2U9*@UJn~q=X<*xPgJ(v=^}KG3ASCPZ4R7 z#*B3F1c~c(LIXP?#YA7bUYps)E9W?qPzuQm9W@a!CIoKh5CRl%d1dikZp`8n`l|5a zjrY;Ey&P~#XV1B%GUR;fO-w_BbeBOmX?Jd@0&|j0X?DORlrF?h&Nl@c(H3xk*9NWN z37EEQnrU-P4#o4y>P*0b0U;%1KcnvWPI^&Cxo1Uu26W9AdRY=%l^3Te5xNvP8xpvO zEkuVgsh`m%tF0C;5^3pvE%|tLtWAkhDayR~05l|*`-f;&K-<3xMA*HcxP?VU=yoN@ zf(6meqwNUzfMvj|v>suHmQ73?(&*^OZ!fRG=`ZLO(qm)0W5CGoRY3IUtJYd+5(G0i@SjGCbs0p0I187zruQHC?$WB7l38pWx zJ$3e3N3Y`w`TB>SrAW{Xh?xu#<^(xA2=X=_5|I#MgH=qGc{()cfTdjyreEn5xlP^n z$T>Z#CoO+T!{l=d{hm>;Lv z>{$Vx9Gs$9Lb%q=N9Po7%O`WofSV#2+$d7z9Z2O5m;?RP1|6lYJ|ZhjJs>#BKvZDR$^aSJ3_n5}Psc^X|zG;Qmsb?$$6B z*1B!%o~-aPI!X9D+M0yrbD#vFCph7*5FB4TgH)fFLemzH9ElRwJt4h-SU;qTwc>bx z5mdLmf>Y8Am70t9Cwb&5GNSjgJ;Gnn(z5|G9Yvo<7^m+{+31 z56|7IL4?^1dpwG)E84?K%I!!24zSVt&9flk7;|PC*YKpCjyb4%l4(0eF%y%?u3)WP z;`0K(i#W8LC3ijUf&!>$nIN800G+qlbWq6@RdrA%(p>6Y4sYjb%L4CN{ zTo&sBjczHG8#9NT+4IAH7AAG&gxRo2|dB>!AaHXaO0>_dH{b7V~mke@qWnyd-p~9UE)!x3=SLDw! zi-)W`sFUAPJLlY`=aG(djUVw9&LGS;M=zDWJr23%LDTs`T@~nhWBm~ieW-FnV;?M} zuC}7%uD;dewTk4Oh+1zYW|vM_Mhh)um@T;x10RPfms&NZ4U^J5EfsLlJzgsruT)NA z2WZB&FzwaMkpzlMayvm$va6H$ZCSTOSw{pSTbJbMnb+EqGUBF`48Pq-p-cbYnNrmU zPTEmlJRu_X7P~u(FTi#O*g!x(Y_*7N`otB-^P*eZFIk)0$W<01m0+l+4|72G!ra#%>eU5qh z;Se~H#>w*b=h;Tf^#?v4o1-q)(x*ayEKa2AWmF_-{$gpIuzrbfiQPdB8Xu(q3P!eF>BlHX1rF0<@*P z#fY|GjF+eGC@>7pen?eS?uEaz#+Zs5%yUDM6>6zDMRwP7f8rWHOaRjG`1o@5)2?wH zq|o?K>I`C?q%?It<(*U0Z-^~NKiFjp$kcEFp#z{U6QPh6D<|JpwpIwW45-wYtr*C2 zmzw9VNury^!5|fB6{W~cc|83}&EQC^i#E$f9Gsm*hK z8&!9SlpNmN#Ef%%2Lt>jONJe7${x}pz4jHgIyB;0^KBg&epR0g?dEAF7;Lt-mE(#+8i`Rb)|Cm9D;9gS!uDKv0YnylYiWE@ zg{ctJI3=@bklMYkG||bWE!ly8^v)(`Wt1q`6-xM;5ed+XS4+@hYUfA{nGRy_du`Va zSH?eIemyfXGhlsjRi9=HvUQ4*n{e`XvxwE}nvM~=IrG9U6>?*9634dl@ReurO#!@O zRRnby`!5ip*%j&%B=a=-Q(aF)xGAsDDly~r1@n+1ZGl4Np&*q81o1i#!T&CJu3 zyz<0En{J{^a=!vD&*0@gTaVz&z2tHPr^&(8qKx15|4PBuVJo#exUD;N*!#v(xV?Qh zK0Th(PA1!|hzdzjR@y_TCeZ6f_0shi55si% zb!@K5-JA_*4SSvM*zFC*lc|E=G$ca?ju&=7h_w!-$`SC7O1TPvWc}|I<@`SrYRNu8 dn)9Xv9X$Jc<~3KY&%Xixfz)*`6{}i2_&{XSm%)K%vH$o(T5 z8{7QdyOa*Fv2k2vW1E-t!)*Fa<$-eiOV)1ZAv;YgQ#;3_HYRLpN9|6Zw6Z&Cer$z< ziH)tf)hTfiNfGf4D^A$iowk(|6}9~Q10q&7W}?r8vn%i-b5HNmwPj;laFqT#D?XVqgHN=_`| zcu;(||7O^UW2$#v9=dbn-@U=~>0 zos*kr8#XRJ++i4Y%dFJ2IXH0F>P2|dufMjPEwg&}fB8P!$Zwu9U;i*0o9FVCE7-sN z_6n6BF3$Y++m9E1*ZS>u*=+vz(qG<{|J{%;@5ldE$XEB{e>dcRH{_o?@V}z^&z<=D z-~YQI{~vFNQtEj=Hn!Vxi@cPchHjBI^q6EGN;|RNb>#JhWQU&my8B0WoP3qCMlaoB z$;QJOKF>q9s3aNY-Er(|PVP>8pgcV_DWGVyR^+R9V|yWk=I^<->(K&5pUh@w;bzx? zXHEmpq}zHjTQ2+ZNJr~?OivD%+}?h+%lcv0EqND*tz+*Z#7s*459K-A`&zZcYQ@#L z^yRhPf5h%Ba9L*6?4OohIVbcDDA_C^dUe-oZs#A^ZlAg#YBZ($bK_yFyd_q7Ol^OE z|EjwxiL(mxK0KUf-4W_o++bA@`Z9mK_kq>ckB^jk3V*O0Aw$NWlGo`|#I#x@!bKW<Y^nXgvF+YG7E~*YYln4 zlvefR4D~dW>{sDkGKbf0-}{s@k(UuFekYo8>?IgA8)^~_co?;bzU^g!C5_p(#=VW% zVzzIe9!fdBTYhr!*B8NNF9`5!KyeM<1P!WG>yPS6J>bH6YVuh{r1TYMS=GeLokB z6p6Mz7VwL2`Td(0;=T#+eBw7{U+(G`0##2+1BBZ$3nnc~$4mkZ*8X1}{MvTuK{mEO zGCC3;^j>0i*9s=|WOsyF|2d1ZYC!3mSI^$QZ8_V%q&<7~v?iDE?bR*#SSlQG`}S=< z!SBBQ*~h$CrJl}AT&I$`xp_r>vr8ZEh2=K86W;e)ZjX`FnsXa&A9{5wLv4OYGSbN*Y@>kXisD{XhnOKIq4S8Yt*S2ip z%6P=SMMLdOP2K48T%Ev4L5Pl;}lVKeo#?Sv2No=Uq8hunP%ti1gD`_ zW&*43n3acFk9EeBaG8ZHv~0+D*0|uCk8s6gbiO+0&p-TLPBx`OHH7`hI=R1spJ zR7|{|o*D?BUXD$8DtCHn+`ieir#{`fY-48OnT}A-qM{=E-fzmq6-(Bdyee`#!M?j@ z;llnhqp8iO@|ZPL7?g*@#Eg|g+{ZfmUti!iO%wh4rfrL!3#!?*GsiRQ&Ye3>=v{5n z1*2*KZkH}yy5ln3`!d_MW8J!ScZa6-28#IFK9?2Ljz6@&r{U|@VXMkC{rlb&xVx3s zS48b?yK(r`ZoRUyGH)!Wq7Sbh8Grce&tyDe5AB8&lf6{GqE&A#(omeV%N>0iY;JZa z-OHW*FLo^0dGzSf9`~QWejCpXRIRGKKYsk^pT0y`d$oK%ZdNg~)`%Z1&#}VU z-QE3tR@-Ik<})2dbk97d$6MMiZ|S{=edX_xhxf6rHE=A73N+M9R>pWImO)L*(|NzPwsr(lh1dSz#K*t_AKW*K7SlJ6 zuO{P>mC&2+y|H0;c5j$VuUC?5yD)Va?q>%BaLqTjIG$2=`Faa&6ImxJ>W9WDJ-Dpl zdldg|c~BRs4P73uyil0r&Wu~PVMBbsLY_18Qo$56Nz9_^?xA!GtqMuBi2jC_DXCN0 z?KilYVG`yCO6%_*eYEQ8uEJ9fdbA_LzP^1gmMvbggm(5%tamw46R(R}q?4Vf7A$4m zose0xetEB^L5`gl^)r#Y!3uvy7}m~6^Kd>VR$?4e;V?Zb?n{LQ7@+}!hg zM}x1nyg*HAINvop-vE#EWy0Z1xjx6IVkL!t{drciM%uD|@$^WEVpr~huU}%@+E1$y z^(#)l^3G0gnRb88%^4lj;~vwZmXk))V>-n{U*A5q$Di)miG~cz0LEp7v3DnG6Ag-o zmTx}3%dWFxjfopRCRf8ZkYfc0t8m@fae-YckxSh`e*=1Ikocv4+JmShwT)|64*;z_9r@;dv!$;=3%tt-d=Y1(26P~vZ>1$4 zxbg6f{%>E{VlV53ota$$8HNWBF2;H&AIV24QafyfzF$22O^dm^k@XngH9c^wch=f| zDqTyf1;s7?{WqU=9}jEgiXHjG^E}@-A8AF$r`t~O)NOo7s2d>YS!uo&)ACh{iT2Q} zuOHLh`KRR!9GcEaXW_RHK^M+ZC4fpYH>CG}O9g9oK4|+L? z?Yt<}^w}QWzUDj^yRIssh;McrTh%#M<@H6)HnD6i^h(BE_vw)Yh+ZcmqOPkr^4D4M z49>5wZrfVY<+`fhy^M&^O*P%kFflRd2NKz6>>`ifFz;63ceDF{FVHSr&vK86exYv1 z7B|NY203ZZ$G48ZPwjs=Pc$;#5li*5OzTCtzHb`(mEEkTH~h@XUHr!%f4o*Bb+Ybf z9RquYG`S0byq03wY3PQ_ z*;}|W0^a>F>y_Z(^O1qAH$ZEYLjK5)6)~9)ePfv7M*XWw<#8-u+Rl%e~h(o=( z*vY#4%fyZ;{lvFbM$X~=h^{=!Uu%)K@y9n$`l~cGioj~L+VjC;43}FME=&*mhE1#> z1hY*b9c$oF*6E|gW1wsC%H7`H-rcz!3ghoN6vtlrwZ_IJUQ$|h)lO~I)r(#ng2jbi z96$|c2Hw8Brx`;9ygE*&1Vub9uLZrXZMmRr&S&+cHMeXRlK z;f#~_Y}|klXhjr{hk(&oH&|xwL@To_{&&xJZO0pA-aO;SS5{gB?3Jv%u>4xo?$2e=vmWi1 zDggHW<%x-jcUAW^(<~dL@Ugj_B|csKU1>C1qV+_w>0Ze>_Acj@v->c|e?-*~S1B;ybIBPz4enuNn8FMi)){O-l?^X18&`Si7K4juuR zKOa$EK6z&)^iLk=)t~T zsTU!I2Mg3m#`+sP;{ri<-+FlxvY*KdP9hmwKzrR6&tmOq>hl`*^hq7%< zcKH1=ljF-v-HAPP?bXeHUaGWtBLK!8=LzdhInEz{&YtxPW%7$M)+&cMvHf1pI+(qej$)ZeDgft z*{?4uOKH_YkfQyE4%NMg`1Ir3A`Tonv<%;5vBt&R7g0g}{aVo`W!1cV;b*JL;$3A+ zvfJ1e=?wWVFWrZ5y^_+Kj`6O)pGQ?;Oo8$EpyR2NE)zGF{QHt@kLfBFE~JXJ^=~mr zLL_|Za!+MTc5=_cFQ1z4KbLcttH5{%P|`PF+b(~(Cjb6{D*^;3zWI9d1B&psU$TVk zzWMt9>r0=gAr$__*Ngkbmw9;ZUJvE1|e8r55tFp{AM;!m7AHc%+U%Wc~;e!{81^(Cf zUpV143N8V<*V^}LeuA}rRPd>GgL5ZC+bE$XOX4E-$>y1r2ly2xK0I2c;ObOSZ|o<( z)TJj~=aazVyH`!YsuQw-55J;98ONj!)(`c4Xg9IxQ>47@70He)9zV#m+T<}c!96@t zU?76p8LCp9`*=?TD(cH5qk=kMn$|yNF7OF;Y?j5= z*(@Q(Ci!{MfBHx#PNNN=Kk{Mkdav1gqwxK!RK>U@OP3ad+8BcaiUG5LFQ52h_q|f6 zE7$n?z=YpJePV8%kC%|J1iC3`^ObJC2h4xufK%}0usN@n%4@F zIu_py0xr{=)z%6q<2yAuF8x;|TSFL2nECfEuec7-Tk%5RMPasIbpiU;&8=r_^c`NA zSQG22)KF}CA#hbW)-&t(9T$`#+R1C|&^%qOrBj5vb8Tw4R5(du&{6ojR-3}Gys4D%^3y{9IRWt1eF2gwlb?U_v`5e*;t|vv_)Q5 zzx43oLnjC^#?OKzg=&}L)>@bFa#Gtg3p?{R_0@rA9?(ljNjMI)z!*ZLOpsK*^OF+~xYbt2(X?g7RKH z9_Tyr8rWvX_iT6Hu0Iku&v@$6V7UA?wfS{Nw@pmFZvmzEZ3YNw>xxUd45c8xS5seq zd1Z`7c^BWCXw*Px@~txJVslyFa5U#jr?l7iM#e&*p^LDCVH61vNs8tXQ% zWyfA41@5|_M&jlVmJo1#$zcHHSW>=qK(#&{GVflxrO-w5oZcswRGfb#d2WDkgn#!v zBYbQ>O?BBdy-@JpVU_Rjdo(OZda9-KMaG1l?N7Ga~NXS%nMCxT)yjJ^@96z}tDgxwoH?Nc8WQ#BMxj zztnPtEu>u~Y)XL;Ee(tiyF}TcH@ia_{ksj`Mx6GtSL|Ef|LieZW`w`WC=+>baLe*u z&zbCZ9b_5-pyGJh9X~!1)WJ8?D%Im0Wb`!^_#tpuKzL}5)=Y!sRXyC<@lq=zddqWrSp7Fr~GiE zt)yN7MB|&13~kk$n^s1vhjIh(^^5_16txE$@eQ|lc)V$EuQoZ@br%FaS|dCx4aD-k za~QFfplGN$k>P>4uz(1j)g8USo08HOB5O;yRx-%zyg1Rbo{WarynHOfw@+_zh?_n4 z!9v_h<$Jx=095iqs_aIir8-@k*Ru(Iecc@DGAjWbxC z6q_$Un{D4Y+z0Wc8xH*(6%}P9W2mdkMT=Sg^q;eIUpc<7OWy4_+*K{SURb!&{jGAZ zFfh~11zR_ric4UKnq? z?*COoJYEkUF6j8OTc@)$-jt567)Lb@9QjAx4Pbb{tx-b#lULoj{`y+AJWmWz+*a% zQ5&zz;W5$1-)+9x)6)AN+{D@ox1Vq+y1QPVC&0lz?4>WuUloqu9(ldMW0_#c>0j#Y zQGdPXa;&YYJa}>=C^X!h+V^w$ebG7(2z5sd{foI1F!_ckX-f!#2UG?Q2Gw$+!0l2nZ*WADVYD&$r&(D26 ziN*ImmfbCvAJ&&wdsIWq33~T##-sD|<2uZ71-H$&Kx2u3G_jkZv9({Rc$jC4!^w&^ zGV%uBzY=d7zq$q7hp*Pi!>wMDI1Q(fYjW7y@9G@BTz45Z?XCnKEJNR$ zgQSFbOpTh?lGOna+-1H>!Kxy1_in!h%QpSNS%j^+;t_BIwfNl(R;??4V_`A+v0dm< zL9q|l^<=ll1_H`H7tx_(3l_HL!p2vHy_zUF`hTxfjNjL_+@cP~~t558NdW8`Kp z0UxJkJl-%*&d`ln?bMTA+lrRrN2mx(|4v$(1knZBg@B+X4GJ;z4_TEc3l2-c?6`~Hx<1w)L z)S3Xj4d8)mCr>o z8pRiyfgDIeAO!=z%?UCXZct7N4cgJvd*_-Mjc|$;&_gV(FNo{5wBdIjIZ9r3GgLlu z#DG#fhMIo|@OT-uH?X{S7*EUBFwa@?aR@BsvG?hZAqBRcU7&CgA5sN^7qQ%Whz=m$ z1Z_*uVwMtS15TQKYl&yH3EebWFovK2qSd%9oF#lNCzXe|fJ!Uc=zr5o2)JAsXIB{c zD~(CAVfhN*DqJUp5WHCV3HWcM2E^mB0@W+H7-iyK7D5ABwB!6wUMx)a*jE`M6S+|R zW8Fz5C|uq;-tF>033@Vi&39h{dp!iek58*W|(3}R+9 z)L0)gtxcc4Biuv5eY~NC!V9f1Grcu6=A4+- zI&B0YFJ6SvC#o*n?!Qdd<~Xz#Q~Mg)m;{HM-rh26ZSzx9e=t;eMnGZi$6L^jIlBnr zllh|Tvuq))94X8C$R0P zuxIbyHhiSZ3lv6H7&>=Gm0M5;T<-ktcOp8)S61#Ikv76sD!L<{w08U~OEo2Ft*2hY z%y;C?l)TH(d(o;)?D{&~Dr3AWu}Porr6k@kGxDnj^*9$&%qI(u=P{HQq4693{z|aQ zpN1ORzhi=O)voNlX5Lh=kIsZOnpOTK{aTb9S-BWc}F z^6{x-5@H@0Rabp%FCzrvMZ1iv`_@x11Ww21GiDU#O7Dez&n24s;TIMbM#U`#e_!^X zH-|AhMs;H~rv+r$pa7QdL?o?^vtVRr{fEed2+J&%ZalpMXmTbg`b8*X0q2CKyQu^Q zpUM#ea;=GJyqg>Sb@Pm&kpzoOsf zauDq$QzZ+^2`v(?&b*Dcq%8IHQ6#m_A6csjyihPb*~i+4{8Jw;h}pb(yg~J5GYVKM zC1Ow=rNP5fQdEqc3leNMUyJZf?q4j zsvk7c>aHQtf(aW!S^WV?u4`~PIgrSAo)?@*LRVZj)i*uWm--?yo04nT;Q#PQM{vv#(7t=*7W}XwmK;&honoIpxUVX0SdlHkJt7gWN0x!@A`E z$X!eI*MG*;-;&tIh;h7+=qWT(Csgh3#yD(zqE!T{;4(?7Z|yH6-{a2RyX$6N|E0-9 z>f?XA5AG{|cqs$nhNb)-6V}?! z(k2iBtjH}&|M9|jE_8EN5P+~>-ua*KC^kHY)t3d~E_Eq;?M*($VN!_($`ut!T=e8u1x;4iR0!?=c==C#sje#_*Y?GhfnR_MbX zX3=%vkih>u$tQM7cLU)X9t-+}bO-ueJGNK@(5bAt`uN3fY$qASoXLN!s0X9&s(OH5 zLQxF=hc3lwdT@u2jp-*QyNtF6wn0rx(2{BFfQ4EJj>rLR$uwR+d-IM;Nkaf?)^-Kj z6JNxhnLeLKst}m@Ku7uVmDj%%l-1N9IP?VB{loHZyK5~eq`fKEN0nM;dSW0VGBWac z-ho?4e&87zdBH3+jhco}*jA!AW!(tqq&t84D76&eO(VHxROe+9C-x~1Q`4tHju=U{ z4>SJm-HS+#ieNh{9*;Luf)jZW@J%+cU}`MhP1bp0q@U7DDiGa5T|flH^N|!8PqAhn zo9y%8R~&t!v3X3!9g0kA#a_{bz$;k<)2R_c{Cu`&x_IbNLp|6Wi^t8+C_7&~7{Wvn zi{%OPyMEYyKDul}ZNnRskfr3LDjT^xyCp7|-|uU6T;iq;01~C^jt{2qQYz#}xFf5- zm`A*x!_%@1Z2TIZ2aLcrwCY&<7BI|0WNixUw!&CMqED+kU&4^tpRI2!jXs|B*RS(7 zb8Bx!kW6OasW^l8K+|Fpzo<8Pj8%rGEKQrAz3VG=j2u%nsedTlzUkN>+0D`5LdVK+ ziL^C*&U>wHXaj=S?NZRBhh!0@0-T55>qdn4!zpAm+LUcf=StOs_vSx1f?iDcg;q`&Uj&8B@Oiu zrl#6iX($?(0u+)p(&|lMvN~T4yPTZ7mk%8K-eH-a%xrSF8s67^>6R4A@kpnJN00;S zXH@+7UZOz|g@|o(0g|%@&#JL6Oe21Q!s{M?D0vmbMIySSzJ_WrEja=ri9?nEyLXIB zF4M0QZ%KomK&rNs=4RhcSq4ai^~;@{MbI+i+=B7Blw#CF@8~2MvI4OP(7=tYaz1aT zZ*|>;8a$Sb+IYP%K#ZK-yM1lrRah;pfdvyLYm0qQ3JM+b2SV|zmD8>80fWmEUs7=8 z!4Og(m!KOLBpQEr-&gK4fww=Cv;~S+WXS@+6p{iH#w%n3pl|?lRVLo+?y&J~f`{LB zZLi4g@%oJW_c!JI9RdVi*M2Yz>An>tJAQLyl8_$axglrX-s*oFY*q6P&n(lxuCH01 z9vL2kl}%q%os1uGs0n{-^^5DHuHW z66B0%@-dVuF?{;8qcQ|mj{z-Nqvb|7rAFg*ThDPC?$YCdTcJD*AQ^b=eDs@+4pwG4 zLfQAye`d~cU-`kDOj9(@n@C?!BAe2oX)gM<&m<}0Q)ijk)FshPS}^eFk$`TB$i#%$ z?4hu(1KOsj>+W$rB1$(!QHmF7 zxutXXT~818G)Q-H-sJF7qLK*(jGt&|F0a zH40YGLu9Mje{2B6ZbL5Amjpl3=0uxXghAt>`O`uwCT>i6HB{09^#QloWO=hm=*}9> z;p4gaZsWxl7YeijoA|m*;wu(yid{usqcA!R>4-pMg$TxGmt zlf6#qk;ZapCy$OnhO&nKuY$Zo{yvC7S`a)1D5`f;!n`7|*|7>yrN$Og#?W%)n^07Uagj#bp7NH8P?eGhAeKe30*6NIF?M&+9@ zcoZzhFkhq(g^HE90Yj+62Q2syW`k=BcRN3g;8FDaK+sY{eei~UZ=;D-Jc_@lug6Cw zJiro!Ylk4Uk{2X{G5}~lG;&~*gGy9UyH?p^=l+GTG}G z0VutD+zL`hJ~P3p2>FEr&tG84vI;Z=9`k~O$h{B9yt%TY^~w%*FM{U%7ZfK};=zQm z0{2YH1W}~ler)(v{a(?$#SnWYD*AEpMd006ligOWqBSnc+kkDz;ug63@|~7sK0;|D z!;xA;8x=@wF#+=*)CvV(-_g;x-U$@Ru^;K6IN27&laXfKQjutmiaded?}O(^idTgs zB_xROP}qo;9V0P&ZI69twT_W5*|yd|IkhL6A;d(_f8B+m-iVO3H8d=(SHkGLa`s(| zL*?wpPP&osft}NjSid+aEI{;eDdl8G;aZ9~FB{lbYf-g|?1oe{MA#(2jVGAyqS4rJ z7i^Kn(~ss`16f4pHj#ZoMQVS9auMAVf{Mg&+sDEY53)v{wMtgToWatZZi=*SpLg2@# z4`-0zxrmBd4E8V3dVR%zNHAKf9Sm5ibRg~AT+XK!ri1| z5ra?6ej5a3{ka(vH)Z5ixx03^?2Nh$UwDz`+Zs_^j7$B6#6S+YnJ$nNuDPq%f!oWx zy=iPAFX&qzZGjB6`^a!S3cuDOjEe~QrWcVaxgZB4Wd2C5U zRzGdJhD3K-bd)Ssy&O(v@*?U20o(tgV5L4-5GfI9eZ%y;C}D$2?mkqqkH7qUyDi0VaS`UKWwCN7#b`pg+?PRZMt&pg9rTw`|FqSGr}E#CZgwa3PUTQ z#P-x+jG`77dtfORA2)3v)v&FythYtw_H~iS=!nMafOKW zR0|Kx=2o5ON)~~~My%?ZG^7+D36_H4uAANAvlW6v={8ue6#lZKH24%RQ(n8g zmm}Lo!D5m2cGCbD=)_*q80@lduISAvIGLakuFwx-j;!&`6@8T3K+l#jnS`csnRFD? z(SzOcRIum~m3room;dyZIHPHL07){{TbtK>;_0sh0;tf?@D%r~MQ#=V{ND5eP}r~9 zJVS5bg=Ljs-AC8lj3@2SBv%*Ww^7{qK~R=Pv;sQZtJ$|hR0?0Kf z)ud>>%u~Fi-YvJU+RxRUo$wB^xJK-Z^dCnpkalatd%c8&k3c`#q7!gs*QTS+P(?0* zUJBy+L$12}_9qYBBLzz+DRg0YZgVyWXKS3%q(#D*RRZ7^MS=^#-nAkcB#S%Z?P^o` zC;pnl<)Yv>@@6a5DTvBgSXs^5J#_a;;X-QA!nFY&KoPooNQxmYodj@(WbK%L0!96dralIm*Xr9mS!0c1 z%GxgRhY4hyL96D)AdnN1lNrw^S7?5^4;I;H!BQ zyj2{3RZi(hu1?N{{gvTU5_t5VhQmlieA7`o*=;yy`RQ%YELwK!P6L?j>yTfiFn7Uv zq|HBJ{h?i;O;Y`-;gcJ1Cm2lxkS?OAZ#Hp*&ToG9>b2dg7xh1AnH1>>BD?@Tq!*DN z9*BL5rF57WPkRs)wy62f>+7k)MwX1DE22;x@ikk$g+@f=njFb(QJfwvuQW7H87=xK zoS3!BxIEmKnY?r%A!hm<6^YrsIo+F=Mh>5moUdW&Q?7e2(F}^}Wt71D;#8BMZcAtK zzFdbNv?OTyt=w_GkN{7L0zj4?Q;l<a-Kum1h~FOz~V-bmJL+Wh)-wxa@^?5hsE zw=y9@lWN8y2M-dl|5$qa`%%2vMw(9a8N76n%x;n^cQX)(rBSRZp+mOk-FqI+Y0}HnPMK)QJ1p&Y|UJ-dr`G=@-kOxn=F}3Kdz8WIJ1PMaN*|x#VYeoH2 z*W0^fEF+zIE&@*1Ye>v#`*4(-ZflV=q@nfGCmw-!g>fW3u-%ThU)$l9G40TCvg|A` z+Q7J=;RpL>K~9vR^*xetcmUQISgmeTdU5*8eW<2q%ntkz=+Z$o!@TyIZePK)yK4`_ z$eS9aR^lX~oO9>S**B-cO|XWAT$5eiArEGZ$x9I5!M+y7i1Bvt=vkclSJJex);ZRy z-W}kBv99jy3L(|LH+0W%Zxg9RPj)2%#z#$F>p^ypv3T{so9i0U@d){8b4h4%5MEnu z_hP$HHz6NTdJ86`I#uV!z#Q8hK<2J}E-ht;@Dy97)p*`(Y5i4jpd-cr#(2X>eQmzB#`X4mGa_* zkQm+rHG&ArCnzA%1O+}QMdA44D+I25JN9+}t!U^XCt~n4L^f5=4c`qMbc!9=J6Vr@ z$nvib6%hIcchRhga|usMOf+~<3}Zel+1+M zW6a#~OMGC0p*9@+mMfKjIJWsHrS5p1>P2M+eV5=yU6Hp`l4L zk3W=e)7nNV?|7P7?2D3%7j+EJtU*#C{N<$0pV)$$HKgsNg+*{TA0tGI`aEx`gQq3_RxOcYlmZhFAj z{rR3HyXX_L0;M4K_FT!&p=dJDWR{=)V+L0bVVt#jqC1ta&2%1=1CY02U#jYs4>F21 zT&8**3(A~~*fKVvT#}8Jg&)s9=t9n=OUt-n=?5l?UDQuq<;C5WDGV5M)z0z;067%s zp^(F>t9M`WQDCij>I5RUZ?3ACN+`*+UN6_SLPL%WzXi9Y7;v!-Ud_{YVe{`nR3z=l zpDA9xRopmy<__a4*%~$f3YTnbHcDup!JjRM6?q5_X^*x*5j}>92ubz}-5^s)5qNk3 zK$@LwrV&NsJOUeg+Aj)Re*Dhx@LpzWJW4K^FWvd~zH6nFS9h3JCv;ftU%e=F=AVvI z5}n8H-UZ0JOBV#=X=$Jxn8?+ksq3er%Rz%D57Z>)ek`Tnhp(A{ZA$-5?yXLzF<-hS zK}%aZ`X{YeUob*xyfr-=|71#KqwHX6 zo^j)47=}=H>+a>;oSja!>L&+GW|r@{+X8nkWb^x<*(@mGx7#lXpMtjkM+cJQE6UkP z3QJIA+jdG9Q^@HGfrp_D#~;p2EPj2O>K;xT5ZhZRt&Ymp0?wD#=V)3U7TPb*Qvs%2 z(afmzpHaCsk|NC|Oi7Ya9K_@@EJg=9#kG!TZh6+Xu#cNVYq(`6hVR~P5LQmDK zNm=MLbOw0_Rb7fVSybCXlM*U$3VP<5b@H5GPfmB@RRA|u8=^)QAW!7kKb^v3Keffi zk=AOmu}L*t8r9A}osOe`wB8Cm&^+|JS@%Mb-c`yx)wH=e6F!9l){8&0tJyJDpvf&w zihIC#W0URn=(B@ecf=IUcyXmd3=$V__icbV8K@Qu)`uTD%5Y^PZ_VyCRR zq_#KyC*9lwdL@r(g?md&bv6eM!S|2wSob~m%&*#pCPq;H&3?{@&B_O6=U9kk5G_Ec3Wq_KQ? z=fPd9q8uQPR_mW~!IzBKUpr{XZm&OBFvLVUo{TMZj z(H=M;{wk+jr}}~1@MsrRtNIdcONqB7dO5f6QF>q7_R})Pvm>ins2dI*g>Xv5N|tk& zg}XVBik?_|Uk1k6X5Obf3(X%fD)P!-)n8k)_drd`nP*&xq}Mplyl=?`=d+>>vbEY6 zR};#CX1>CnYH!POIRC7`s_SI8k=48XHtLAr4R+Ux3g=bkO+Bp|AJM*ze1_uCi)lcfN$Ko}@oFXe z?|iS_!e~N88@V6wN^*SswWZE_u@>9PO~TsocRvnw))1ym4~tu`HH$kDJ`_OW6$dqF za-x&8j+`OY1QU&YU5(G7O%GJK96fB0JAs&L2I_Y`h{cccRInB@YJ!-uMGj8w&C z%DW-Za*N4D`9Ghb!^y{X`u@7Bi)=Lu*I@FH0B;e^ozJ6Pd$qy>Fs{ZqW=kJ=_5qn1 znQM9el&>#+1W)1x{GuF)!{sD!t5VQ%&5qVd0H<4Zk`JuCE7Qsqg(2G$sjs)w@I zf6g-twj0?W*M_(-B76PEyU)^qH42CGhx2@x-N56dX>fJx@S-6@BVSDMQm`Kd8pr%6 zMmS^6@}DQihaLIvo#ARIxt&s%@&<|5Ym@WesBZ@A#5mHAOKkk8qpzr&K;2>6-0}s2 zqu=>?{ISp7^5s&*JV5ERw94&d%q=AYI^xBj%Lo8>ww~uXMQKm&zI0>&Dab`}G&*#I z=qGE4`0tSpi$*I(Vy3T3&bY>oJdqimK`g%DnFVP=5Z>MNs}6lSj)5?hksj#@sKd|4 zLVR`cX(-8RJZUj-Ms58+AsFMyT}dmA1mFlHKl#2eYP@MjVy?&eu}}H;E3#f21AOlY zer}!;|D~X$@cz22}0Gr`?riZN|~VL0e7!AEi~qs$Z>5AH3Cs+PUb znfUuF_A|SsUDOH=PWf873iP6Gz_XL@lGDyb?1XhR6;ON|8mc$CWNmgw#@fKXf$FQB z-R4SdZt+n+-WWfakrZBC?H`s3b&A7JVeABj^6Z;ko?KgF4JcbeM&8FVQ9sI#X2*0} zw(VnO(-9qx?+Nbw;}~0pg)60z8TavssMllW~mRY?LkqdJ&c(WwqQgwOb;^5j1dvg-+ zYZkndP~N=t^Ih{bGM@i8-=$zOK5C4?yzqJ)=(5A_iWqSo)mzv89@aR2Quf{Lv#pew zp=M!M+WE@=uS(_mpm15THcYY_O3&77k6Cyr@PO$MzyJYrMo?B3lIQTgt$ zK+^4X+7qLVb-7(<{TTy49zJfY^!aYw)%l#I#g3bMF`^#pzoNzk%>*-IPjzxBkr2VO z%I#MMiWf|nkMpHC1ftSm&Cc#@f{(Y}$_S+bQBcX=(7C8kdv)WwrULhX>i7W3kby%A zAq^N28A-|?v!D~uVmpWpP;d<;N1Bpqdd#|-Hy_>df0qD__g>mr5KP?S9lVN!x22KM zh|HWJ9g^f68YjPr2?qxrjl-fsLifVA zZ(gCbjpoF%lRodj4+ z=WZ-qNaHu4{jwA&LUzWb~kjGS=kdsKC<(4>1$+Q(gYn8*92ix}iYCItHX7s4Go z)Rw(DK}gtUgHb(p24VfzbQ~3R`iRu*=C|j09qLw^ zP(n|)K{_}OPzZ+r$iZr)gDtM^ip-s_#RIdq^ol&Uw%eRO+}Vun&Q_2gDc=^~W6(ka z5p-G#OQpgT1!a;(!sL_%`+X|AcXJW6a|ca>OBiES8JDz z%OJIZrBl#kGbJxr;tNzD!}cC`C}?ILS*Pb;s;kh<3r{=kSY*89K~Ggk?h(k*>o#n;1;7ku%fTA%1a+sw zFfvo2MHHsAYCHWRp7p3tv&$aO)uTQu0=V|#}|F1o04D7-OlwkqNV4a(1F{^O?vP~;YdT{Z3T_zK} zS(go#%{+=3EHZ^+;_-m8Up7wjlv0)lK8Z&0INa7qx63^(1s=u2PyLH04mALO9H_DB zoY$-NT2{CcXCE$N`TMgYa}n=j%Dm*vZZ<8avGemcG1pp5hZ6P6!!(^7fnp%u%*nQD z6&Ym-_8>lDSVbE&9%bBxj3mT(^~1$7(<@Hv`*H`=wc+mRQZ6a}{i}Tyd7=g>$GvLH zPTG<%F0udN>(AS&?uII(x&@C1HE}C(VP|N2gVmj)dK+xXI5zB41V41v{E&b=Hbo#Zw^x8?j&Z z@LbZXX4IZ6d&9gjZ9$~^^d<5<+gwZ|1le4>D9nYaDxGW^yuAo1tM-x7Nd{3NkR~YN zU_-?%R!@T@g(D_oskU+T=^i+6ZOH5uB&p5$W;^8m@|+1#(0+9d2(W=)<+evOne*cN zxfebkhj8Suj=p}#-}8s8I)2xS=9m#S!`TA-4jBwInA5roAK$l!CsZy7rBlxG=l?y;^TXtz=^&fHf zN{a)-!f`5(e>w>~q_+3Vi${qm*09SDzDOHx1I)b7lEyrqmqx=~g;G)5vO?&1mvb5C zd<-hGPO36*QLL^))vPd#m~lJt@BEBbA`cV*P%`CY6U7C#W2g0B15unIxrE`~5h!KO!poqVP@TNx}a* zXY;@b7MGn8=6p^HSY^#o2BkSD)n+mYpj6r^Dya@?)7k&$**PkBqrB%IL7Jl`emQAL zECsj_ej!q17R)DQWqWy^iX$wY_GeUjKWwGw_;j3NW0t?X5h%ewJ;?QF4$@jej9Wi- zJdp$jxeUG2G`jN)w+{pokTRZ|z9nj)lGn|ksI zbCf%d#*IxZX?hNHSOI5tVKRZHV0@S}J&$8cUe9O->Sw_$bMB0tpp$C4E~pFQ8Rc-R zdETT~<4p(`KRZ#51VeQEe9RKYoNZ=wBcruf7wYJK(V*YcUj(xlB)#=n~|6ewVS@BuD1mT1QU^&b==;1v z-RMadArlnTPbyztek(;Aqch|)`@Y?42DsLbs4mtax(Ji6Ij|HUR5_Mk)U@X*)A>(7kL!~!R;kHZ;R%+Hz4i=7J!b{gz6^C#iH-5g4 zkUFF1{|eeVy^P=W%GmhfYAuYMkayhGIEO8^0%<%}01J)aNX0ZCfX7-cshz#=uwP2+ z;ZumGKo`HLIPrP`SEm*a4RdW?YDy8pVmb{rrFmI%No_GkiRjEJp>FABr-&6NGq`A& zC+KGGL+$^%qep~&B|qi9J6-8K$sV~~baWi7a|o!D#0qg=V)z7O!>GZV|D_6hsTCsl z2~V$8tY8R^lI?EnK-0fW7~f>lNBsFka2bo?EvTW#;54Y~dLV3d46Y*?wB&XAVMZv8 z@qFvRQ@OtDLY=*}cp7&d!cntg;-7^p7AT8LN@4c!-pA2+Ie-98qMW5;H1v;fHQdey zD72O|y9($R9j}4;NwV0unNQCbkA(zSA{4Hs8~22{;qYery!JAR@yEndG8!WyTv!3= zy#=U3tg&t6Thn`Iut_GvF)6`t+s(lOyCj{J95lY}pU!FMM8oxl9rASCf={k!d;)yx zLg?=X9SV5l=F?f*0TJ5Jl#&J%vhPwf4rX&yJk7tcG7|szuo7Z%;n_mAuTPK8(!Kfb zk(^yRpG{fhhs|q$*^Gu}P4jy3fe6yDcq82G&}v!jBtu?W4C%G>aXb6ZvdgU_Kz1+L z;y5}0>x>QDDuf;|<7HlOqyhVk4w18{5(i$$AAM#O9WS`Zi%v4@T6_xs*Bh;Lng%8q zBK2*n_N!=(&zq#eK%p$GT86x)93l=LG$)042|`WjE(i#z3o*(Sr(Nqi@}vKnqd2lX zG=DXwV#h|FJLYp>(+Sm5UNpVeD4e^^xJMHor9ZUKRj?!etc2o^;=S zS=Iy>2%#`z#$jmd8a&N9)y#M?1FO*#h%rUzrvRInVW6{@ubuDGHi0?Yf0ln@MLR=; z%`@5*xluyVk?A0^bc8}zl?PT>zXxIy_FY%!Qf?)*rNIE#Zmwvii1r?VXKr6F5a>o{ zJR>F$6|V?GjE*uTw|o-|6o`?Spc|(vqn~<0@%_gQsz2a7p6O)Ame;z~j?;fA&fV?# zv-Y-qx~G1>eCfMiX9-?xJ#sgpMt48MfY*Xwr%A8DBE7ofrA0!*p`!IW77CtNvi$DF zJ3q)=nV&Lu<+fk0weU}xYuQ}z+2S}msn-6CLCHC?%ksL_cX7Awc;0++Li#$0Ey!5e^+L!D&Le z)pyz0>n7wa713Wt z!)PHm0)(a~M!@rQPYKmKx*=LHciu^hh!u(G3@u~RG?N*E2odr79}SGqgalMB`;dpB zov457LoOYGPGZD*6IBfjKibHf#l<PSZ|PL}sM@<4MydI?%(1d)@Pm%a zZAF4nCE(I`1)DKaM^&_RGK(Dl2*A@NdPxed`B5h!DE4W4FQp5kUK5AT`uaF!`eCt^EqnL>rW;nGaWE&l?y^w)H}6@bMJ zm*+HY3_#@#@sSQ$j~b>ONJ1T@EGfxML%CYM^CVYAWB}6d8;Cu?UTc@>p6vXv=c-Fp zz`c3@o^iOl;zikG|3{NL22LK^c1l{-OVF$qSzu)wlP-VkoRttz{24Ma*nU5*XmcC9#FJK zKG3zh4o65GgU3jRvMhrwP6w0#gN-tCF*5oVN57MjwG}Ceex$epAE=m`nv#KfH6TE% zJp?V$o055_ayp}b>ma9|Rj7dY_z9)a^bPEeiG*Ps$;@a&L-)l2L6o;otG;lDR8dM% z2iU48o&4v73mV`eA(QwWWsH-i8&GxNUYQ=&4@3}}P97jSOySB?`}Zp;5ikHCOSH=; z&};|`>9>I=*9A*}g9Ta@7npRsd7}i%vnb31kinAq`(XVbi7-8~%UZr~# z@@fn8@SzjjX6Jm4_HeYgkWf8%H0Iu9+qASaN{n;S^EKEysLZf!2f101q28YG&djAL z6`<$t5-J0T1-_-km{l#V(Xi82O}(olsMQonj$k1K>g!mR=%j2sxwD7qH2M2vQHpKFVtT;4oaD4&K6Y z5FN!fjSqB6?TH@JG%PX}DAue=sD+hrTIP5C?%{q4lS=NKU8p(HdwJ{79%F{j zE*?%0a0hgzsf-+bb0S)5N>?gNsOzA390{grj0gEY_)gl)YYwSU8D9^2}BT zM`wzXkrAXUYuCY^OxkjPs*{t`1+>lqZloCRY&j*g!3zU299xyv1W4K6o&7-n@BN5)&aOmO#-@3*L$miS>~R=85_1jE&LK z6FTAh8+PLy$}D8tJz?=V{ipX{@iGAQo;PlFX#O6MT1aDj3;mi{R^tW#} zq>vg(9}kK%po__>*j@*LAv$&DZ2iKA1>5h|?c41gN{6rm2|zEi(+qlDh8$GwZq#a( zirh5?J{%eGhS8Uz!DnqIowva)%+?sYSP)C1B80h0l&;ZQ^u(gC_iTf-jGSlxgoJIxjN%sE z$t0c&LLm~u5PTP$&2E;4Telh==w9Cm&zRvXrNT>QA_L>vhg0N6C{IcF6q9x9)Hdx2 z($_~^Gh=4Ct9v?o>LV6UVYivV88%h5M9)qm`+{{UN!HUQIR&z@bS zg52furrW-!0BVHiSHYn>f^v~*)2B}#zqY}jjUDzRS(k;!o8b7oYBo?uWch(GLMH#y z75Uq_@0XP4uz|91u-uei)g2>$eQ`)SO!p^Kn)MDs7rD0c_cz(zzmQ{aFHks%ae#Gd zFLn=H$?Ywu8o=03Qo=RW;E*{&(?4rljdu|X{tB{<7bYCQ+%mW*IZF|NkYxbQ4MI_{hDlo>D?x9d96A}$uS`6Poc zauY2*wkf5OOEqvWaK6>=1*f<8c zmjF(Y>5{PcUIM6V6&arl5cA|FJO~!h^w!SDqy`>DJ9oPCT>{mH^cT^Ra}pG-^xgqc zW4is_j*F=1$yt00gO?34P3D?PpTqTb=rN}c&4v_bG{h8knN{Zhog`h^w0Uzb?IU0Z zwBc(2<_{j)K6OmEbLS2TnUs-G3>bH|2R-wXdmyG7wq|FNc0qo=C+lHeqTw|OOVl+- zDK8?qPg44E0BuA0fy`a3ZFk}UH=;;p9E#Cj`4Ef{X0G(eT4|ipM8hIdrkk-%A+lQN zdu1~uTo$xAKuIU4ZT*xQ(n)B8s3Ysb2S9s4y1sS4c4MoKXMxs6K*IunT`04}+IJOe&Zn|h|uYtD{Nj6kw`=_U5 z9?T5i&!m?&iUfYP0Hz3!x5us%N<5^Gc^)7`^?{cQu*CIV&nE(rAbfXvekh-_<$)iUL#~0N;3O4Ih+l z1JQ@;KJI?$fRHO+pHpRkg|~_n5Qot~#ociWu62Nv z6}^G5(PxYVZZuiWUWyGtyV?;kVHu81Jfhi1W&OY1j{hTm;PrhT6wGo!dvUG17!g6f zODO1Wger5;8_VxXjNI1bgM4RUm7NKebOLin8&!>r6xsvRpaTn4&@i(yLBNhI38Hor z%`SEFu&AiTxiHxDDr`>4Hw#QeopixC&{uNd!iB7&mQUKVi5rF7bV@}ICsZQ2tiCh* z-Vvs!($crkeqsFE^f@kl%t@liP~jxf+izFdu^A`HL$3-3th-i2VQ|@sz z_v(0(Tub3Gj0FS+2aObGnrB|WQJMnv1S!!J56iww3t1E$Gud{u66o&)u1S$TO7$(X zHyi@`#xZ!zg4mdUOBSb1Vft&O23e{JSY%ML%7YC7M^?B4!U?UHbptpl0-0+n%NMjc zqO@&`>h4|jO8^GSqBVzRS-V+SfG>RnfJ>Xt(sjR0(-zg zT{sfqZ~_J1#0DsMF}Su#{t>%my8&8S0AOlH|0oxJzq0#Z80I`;wa7NR(y1#4%aF_p zEEyI$+*V|!K%=Q=9Y@^_m74UBVtE~&Fi>q-7!u-ulAT`-IMmS?T4dvh&3!eVS(sFQ|5>~j3d@7+p1I5!AtOJhNSq2#+X&4^#Xr$b$cEoA$!w3YY z6k2cB&n zDi3jT_=pVMM`%Df6La~hg)MCl>6?eVkPp!UP1(yR5Ge`a-cAD^bIR|f(mDO_KP=BImL}4j7Ik1P16JUbC^DgOl&M=8H zJL1kGCPXAE1SW#rk_Vcv1&)JR*i)=vHll7qr7#@u9`k>SGbk9R*%>SNIXIf6D*fh1 zE!rRJZ-sS2DJ!=0uxccHUT7TU;=H7_qNkTvR`>j4Cr*rn2;P$qn5iMgRtywCf9sO6 zjv#$ecw8T0>dq<=cQp&`;q`zIOV8b-M3)Z}Z-Iva-sjVXdsfV8o~K#e9aB=2eDA79 z_oU&Xg2zhESo7ebuHPSU1{MOVWg!v&-|@zDCw!H8J~d2PEPP({i*a*w8WpBYC8MSp z_;xP5^@Q+%%ckNV9(o#l0pO|8UXhF)o~Ce{F%3=7mwbr#Rl|Ufe!~pFRf!2bPbQ1r zNBRdCVyR$Mh#@Pv)J--jct+WB^cIEGfr)sVx!`&mgm!L&n{I>eOYjHjKhDVwc-Ajl zGg^wFBH@rJq!b`CY~?_T8~@3!J0OL(i{6m+;tU&d(GsNuDmryCPbrcTHUW zJK%xuf9V?X4lIyQ>@AhpEnj<8i=f^n@ zMz>5XT_zwqPHdN{-Knl7z3{(zUjqi%+~+w37WWJRa*q%$s|I%8IUieIz_g4l0ZXXhuNoc^tzZi!HQo>Q2>?Ko4ef-3U zbuU^OdT0f~ykOA1+jyP?^vm|)z3TLYmF~o6*=Q;ieBdFdGq-Y6G&Dsj~raANY#Q3!s5&T>)#$@wt(o^mN!For*Z9k|u`91ldg5eDs@jmwLy5kQeS&yMKH}L(ME~=fQZPaBXG zTi51SIi(L=)vHU~(r)AsEa^r~QVC+Nxim$LzryQKKsmP_-yrSvkzc zy0l?Tp7@DwJ$5P`HB(b82IwqbNBt#fYy@?YhP@B zVcYf}7qnNK7c?;Ohc{UTXACcj$3HvkCZ&MQ|Ic5S^cjUxzVVlKvrVZywq?)Dc4_T@ zO~g{~TLUTL@@=UTE`0ygbyRr2_t5*1!qc-G38eqz;#QAar{8_=IpSYWP*ezd*P{{i z4!#L+pRsC{=07h8OT70rZ24%T;KxD^3P*z4f81`za^ZIW>-k3<5pXA`+8vX}+}(FJ z_CH<`uyI0DPXG16D|(+}D8t>AODCN?`bLQKU44a@5J=|n7XU5uvYxXa$GFDzA63c z+4mQ=*s_Rcgl`PinMT1+_Y6OPMDM@7RcEg~$){-_g--mvT35IbT%}K`EcuVGT_=3) zsq43gXb@QM0-qsqu7A)t^k*9IyBc5u(%av_ zJZUb~MbDHX{*@#?QxXolE#ri6_AS<=Pf3@8{(roXW5P{bbnYqGm~EBNfAvqXWD>O- zL{i@?1^|m0tXcP{sHh-c7)csA&Mgfb%k+EKdPGDpBtE_G0;7IQf48@;FDCps?!|IS|+fhp`Hw2zYf zw08rEojN+?;y*4joU9e|{msj?-;~k79h{9>6rZDGqkCmKE=kQ)2^0^Zx*$BD{e}|} z4i{8KnlQajg(Y78!a6#}5C~i5F<{1;z9m|+^7?G+T8Hs+yCOZ@`(7u0)3KxqfeysN zNJErB)VL&1WSSXbKytR_KDw+}?mNB!*&u<8xU-;&G*KmByI|M=lL%pVN=vU+j@DDz zu=V7Y19y^@j(^zbHhR`7Jpm%Vzb-V`MZ-9h=Sd{hu&PR)h{qiAD-22EUVJs@`^JQN zlw)=1d|b+HGeQFvQl+%qwr|>D;4BWOlepi~qGpPJHsx|}0JPCd5s!a7ewc>!+9m2q zTBaWYujY?QuQe`jd_8(?(#F;H%g=w3Z+C4)(!`x6_IZCy0P#a;0upf{<_}0G@&*6x z(AA+D)?jDM^trc4Hu}YkIp1D;5iow&<789&yvQ~>BNL8-$gKFUMO3$p8rxYrsCBKS zb|qQ*rb=AK$vxL;&4{uE9ijN+2cj|oRq1yGkw}7amLw;oV_zNe)J$N6Dmqdh&@Z)C zGkpNmh#tVzSS~(j`JLR?h@SYl3wpehD1U?wsQwhT&&9?Ndz6Tt-C6Xz6|JaXO-f1< zdvvlMgJKM@@I&tyq5bq6O+w2;@@=49jQVxc0K20?$kOb8NDS2nNvs;Ms%%>@a_D); zzx^%<*ddN0McLD3QFyzy|BsaM}&E$lE z$jK*3maEi1<8lyw9kMC7d-o_-N}fRSg5~0(XZzwYqUj0Q`0rZovDZg03odl(A7Rom z8AZl@l$Too<8wd&DGS6}o`Xc`2n0m~@XU#`QBye#2B@e|kANRy0a_bdaJOK!N#lq2 z$J!k05rp%owV{O$F>8`jQEF%1x_ft(^;3*LpF{OO6`ZsI!~z=yr5y)5Xo9qa3)cYf zFM#D4O$QX^v{XXeGc5A46%Zv2jQ1!VHZ5$e|RP+&4l% z$rB$PK-z5gFn!0f5%79-xsLP0(lf}xnhB8?8x8Nvxc6WeN_z?qETAOtq2U;Hq7s?OI&DHu_Pw^( zT&ZCFXz_TX)3+ABDGPG%mzQa=DZu#{fr*<&slkd?80~=%_Jq>$CSF@;YHDhQEqePG z01ARMCQy6`qN=MSLQpB%jFycuoJH2Xup2#V&#OPj;R&%1G7O+0}~06A6&=W#ku*h%m1YI0ewu8yEvO&?l1 zWD>>*tBV;%*H@UkSOz9~$*0eto4TBL7LoxH!EbWT4X7=aW+uJN2_fQQE>r; z%@So2#;yVgzy=(TY?uzjH44nk^eOQ;AV@752c6aT&&11t7J+?^K!ujrYwlm0L~#IhC&AjP6njz zEiCp7LIqOLH4D&JF_F6>dk-0GH5$ge;#?{hX|pCw3BSF5q;A)t=3Q|{iBelLT1z-y zA$ms_K8hCBM>qf&`DF?R?K%xSn3uBm9B#$04t+pxldWlz7+LT#t_H8jo5PjKM6`)p z3^9(P4+fac0xdy5E;-7`v@eE=jUw4ISvxD!nk;D~z|%*#l7{|A7Y2Pfjm1!E9PV+W#lQKFByO55vQ)m$;BBrtlR!R;ui#mM?~bITYFXc2^8(b z0Rv0->nm$!&!u5M7bjAv?=Z|P(k*m+E zsxI%bzuR?a(uWloO|RD4!ZzeWqa>q(a9X9-2AFVzt|<#5eS^OLXq5RSDcL?C_l-GH zH8=&a$6Fz<8ctH?(SX|G<>Y>{*F}Htus9VoAl}|yb8rM2WGl*o%pHZ50vqEWEGP9~ zud=5U{VJV$9}M=hcU&MRCx_LbTk9OP2RFM~xdDP$w8Ia2)sPPYzb!-SRrOi4ks`dt zzUAeGBsvPdU5M05SGvK=uc1Pc{-Pw;ZyMIeBUqoDUuAH2MgPh)G8nEWbF08ik)unf z-=AC1k>gBix--z1o&6cz(#2@Q(kI1Gm3+b)u+x@?$6EswWT&1d6Uq^<=g*(p)I~-D zf}M>QzE~ZD4Gt&V24#iBGV(ys0veru5n-FQRXOMr#fT*@2?Xf`XyNsWsaKKmkyk^7 z7CbiBRwZX4w@-zMnXhPzZR1EJJaK#+Ak&Fh7NTk}sddgnYX7tu5kUiNw&?5fN_5Gu z&D=T{b)ReEE6<)qP{#0}cD}H7Iis2&;{HqF4F8N2Vjg;f%})ddjt7^65hF->!d*>T~@%TiqlQu!Xu{`q|+*uXV0DqjbLQ%8+n$( z9_nQK_m+6DAT`Yp3J+8y{5-~Xq(S=m1_2}paqx(J>vd$Mc}!%Pe&Cp~^;+D3ggpnu zI#Gh;r0c@dFR%e=@i!bX!f_saqte@!9?(Nyn*ouTqsbli3y`Y&pyX3#^Y+%Qw@smL z+_yZI+2O}d_CWleULmkP9%xNphAw~fnNnsiI8{pggkQP->vKpc+5f*`dkw1#FP)=k{8c970-4rn-A==(ha7rK`6Z7q z0%;0!=A$&)BAXCC6HjZ_4=jLkb}hgDrfdixK23WjUz!gh~g+Zl+zdVpfZi$xg=OP1e^$1 zZC*m3Y(YwLbyVz3l(KS(-=hbk3h&nSwZTQ#QG-qorV1EL%7To8xT--zs!5(@vc=$J zJbv0#iXBp4KUfiyv3e?d7^ET%ssmpl_HstU2_Em51d$~)cM1V5jl-Pkz@`RlRBUx(bFq0x24eklH6EytO_8i;xL0$zX;ju|Mt=Mi6*& z?;~;sJFJRq=xs9G_P_7+Wq*Kljxv!Tki1A~x4<~cLHAJq)m%WWRa_QB&t4v5I9SAq zD{;E+XnD8Jc*I7w%NHP=5o>a-qqKreX9H^=W*Jq_NwwzQA1&jyLV{25>v6+5f#5nG zj3XMr%aKIOWUNM?b_4w3Ef;-1N%Z*fca3AbBE5W(5X^7FW-yUCoOtv{EwIPmHEN;UXy)>XHLHqb{LMiLH8&O zKk@3fO6=kZG+0H=8DD05ARILkZK-ae-*xUKq-ibZ{LQX{N1o|8U<2f$I4c*j`;^Nf@LTabc16XY3?Ru&oIk? zO+QkkDU`N4JV9P347$d7s{e#QZ|?>5GaxyyofM~Td zP80{erHX@7un}E;$5h4eH_XMTebD!KD+dxs)EhBNEj%a0$3;)zvaLAe5@9?RZHVl8 zyYFI&N$&r{85VDBXV+f8oy4gk7-coGkieK>{UYgmV8V{(xDXW3cC$`n*b{3+T1NY2 z)Q8e+6c%O1an--D_&*3|^IW6^#!ufNN%x{~<3U6E8l;cWjeS|arB~@7nmiHfAXo9b z29ygI@6@2Lt{1KE25OSjekk|Fk;orEd)DlHGN*}94i%WltU5s6RE#-A#GLA~Nc|cGYd}7N&0w#kOhna~Y z`62yGGWevST8dmMVA2z>E;~U7ClP!AuLKB=`7@=*#OL)-zDEQN%p8B8M`jlK8)<(7 zf--fXn^%GD4!zkaA$kl>C=0E=~CG)NEFOyd}v0u>Etu4Ltj@sxI9IG}Pl~&EABXlH?U$LaE~oBADSN z(b7aq1n|!so_ks@zg)-rxeUJbNfnx*N9&oD45z?@?7OhoWNcd=N>zo6tS|%|I>;a(D=w%j}D9gX3FS)t5MGeM3o(G#*;&2Z`fLH+H?~c%?Ssy z(!tfJ41}Rc^c1U7p)z6vP7CRk`Yt$Cm62Ip4~uVy(s+9C8OY`UuJ4m@&U*d&wV)0O zCNP6t;8)Z&MPKMtPD7`%w6UmjYN&I>D5-vco~IFb@q)Rja9AP$lw>Orny?QOfH%+|GC)N(LG8J$hi4Xnw=?tiG^WOLnhxdURo7)|57wH_Mo~7uHK>Ag z7fv=EQP_r$qk8lL&}}{fvZ!hEpVZ1N4Z+tM<;6I8Po$L2qk{m5zxlzd&|cAa;~I2P zXd&wOlU}$J?ZGF({o8{&jvReQsB|?rKLOp zXDx;^r|UKpyVB0P$_}T$Qpz>}$0G{?D;ijImRJgtebUWP@Q*B{SUf7ZcR8Fqd-m+5 zuQ~yY)BMr3R31Z8h1h7NFFkIWGl6xI0s|9~_As2tW<13}meEM2_uXg*UL=#TdDU*z zmoR%Duw1h~`ySE`g|^ISjGGF|q0gV96+KapJ|CpC zN@$N87CWU{`UH<%hCJG5#fQxjGM9P6YCK9l?2UM>$rB^KLtvhGV4apCbCcda7YV9Z`PKM(ob{|*qHO7r zFvyB#5=F*BCi161Ow2QkGuGd#(uSXd9A}eP4QM=$IBF#2Cf}Y1WX3fSLPn|m6K!lq z_z%@kUx1Np$eMM+oln$~pvOwO>N;~X5n6RLuO~3@F@q-|V=G(-;XnPs81hH*wnX4L1b{FGA5NR7!V&o-*+BJw!h5 zA85FM-mcVvyLj7@!HCJ1Z#1NG%lSs&?*;Epl=4?kvC#?pSM#UNC0cKjY#6rc5=A%kqoV5=|AHsFX%k&`ILiPSwu7e{)v zK^AhPZy*-%FH*9?8~_L6p3|KZnq(5bb_A(NtEYC5POsoir&z9EZ{|>$ufP9j2op(W zXOEgy$<(Pt;K!k2;~W{fPb(%+7rFtJ5yjohkP_*>@`JRF^TbGj$5u?PaJsq)8A)VO zI~kZn5C`?F#3P<06l7?y_&`x7)Y>#l#hmp8;~fiV#qF915?Yjo*So1xDR%%TzOwo) zAZ|_|wrU(WAcvSKh03;f)Xu6D@XY`jK=oZfHpx0fQQP)eeMszuS8d)IpK-K<%b+{9~VWIFf(s&Z;=v@_9w-B$6 z;s=7K#&KOdQ5b%hszWeq5|=E)(TV=^v$#%RZ3%c8+CBv^o7wV3t#=gXiNK15jF<4! z@5rn}p@Av(FujPz|McJkXw_W?GAMQ6o+o=|?9(8bk9%%Df#LbJ(gYnj`NqwUWv>&~KCUgu>*k~15uHqs zapahr8CYI)!Xs5V`Ay^z?oFg)hK&jiSe^$(IsYMFvDk{vd-Z#f>(pj$;~d$rM#J$4 z_VLLX=c9YM7SC3hXq2DN>d&=M`zaZ+{+m<=Q?LM%mOqfcD@me>L zK$)J2)YnB*`I#*clkVx*f}?3Qb+{1zbspGB%8P)1QtCvF?*Wc$7Kqg_Ilzv)_&_DI zf$`{#n35%Mv2wj;!<{>K+Ar6|8UnGX*JYCryk1Lz583SXxhn&4*1at-?;b(0 zfMB}5F&{2jh=lZc+T${PXxK;1%Y8Vt$;73h@Ru_vP-w4mqZ8*0QLG|5fO&_65=pQ%(fa8hqiJPULI31rL&>mSVQcnTUr3erTV?M^&a^sL=Lg( zo-I3IfV6)=U|j!Zt^EzE+UTf!qa99r1^a(VTfoBs)Xvfvr_=)r&(vwU-$=r)|KLKGado^KF5Y5Pm-6A#IcpY6KK^lZ9Vx!odcw33IMm9L+3gtLLKPfsU-V=p(Vi^rF zWq`wYk3&13EFS;#oybp$pPc*op0HW=V=D|=af1y(c6P{10^J{vLdyJFNC)u<1Sph6 zPdYPY2Y?i&N?CCPU(T*yuciKH_S*4(#;#$OpNu11ILEdIdnIviq zS+@drgXQn8^e42by%L850_VuNwjdQyAXqgxt`)gC(C^v#3#~Yf6e>k^2=BdP(Z`Jo$yqUQAQ^{rD`3PR-TZd$x-+;*z^Q+U=AhN3&49O zM*B&QhXseN{kF`z0+c%&u-zD3M$?!)Lv&wRDp8My;D7a$m6a8zp89*b>#f}rZU@lU z5sV}$=d4!;&)-0~K-D<*pI2Dpc+qCK8|0zPIqg6R3iYP+(a@?qoiE#@bk^M+u);D8 zjTd@m@PbS?cyqY^L;tXR5kOt&8sXZ4d(6d( z-%zzNDb5JRLXnEmC&f*ij!Ig-jHHB=9zn0KzpH@CZ-q8|?%Hl;M;w>mRoOh}EhxV}B_A zMx!_IF1;cS=?JcSBCFzeos~V_LRq-q}hpJ)~b9T(J6GCIq zY@}z3L#p9KF&s_qK?y%Ju#fI4xlhR&{v6Vk+if{_ujt2AXkY|fgT3B+X}|$OIghD4 zQJ$IAi?50w;B7h8nbrL-U#pi+6boXooJ;lC%hugVMW26QOdgVL+`J-Hhb_Gr9=;64k+7zbqc?G?pum_zWSH5O&V7q2J+rvAz$79?`&Hz=-q$EJ zZ}I(G9T%)s+2AHJv6?igXY2LWVv^gD3G?DpEbM;3;$rJw(lU+{Q*aP7+h4i6kWEKg z+3CKKU9)#@_1=rXufm!PE6u;WefO?hOUqru`p)t%qgW}1d=euF-lvDl&-n1l^zH+o zE;s`<&*kqL_P<_R?P9Bnr>bpP7K8)t6u>Fn(v<;}qq3JMnCKwj)Sfjt?sj|^`Wmi@ zOi~aBWzHP`IZxQ9+u?#hH7w5Qmw)_vJRrc}Cyz*P5G6_i%@@2h1q>PeMvRe+m?F)7 zUF(8(JkX+rf!@9ZPasX2%H?GZzWVov4=&2zcg6}&TzciFt>*;Bm3Du|Rr!x`3x}5@ zU<{6%jl9V<5fHha)B^)T)S!+97b%^PLlEb$(jBCh+Uo2^P`Lts&b2`Vu5ZcsWYRJF3Z!nPU;G_$As(HIfRNLl$-KcT6yxwY}J zDhWtbIi%H1gq87qNbN_3U%0}u!?jX%xjNx@G9ya1{7l^mMzO7ZYR3u603mYFt}_cy zrqpB};*7t;6F|FtM`l#aO1f{}cm$?}Z)PQmF5YD;d}_#EMGcOL&cMr;PY7OOA?}i+Dgm}GXdTV&5VS70=5^!vTur+J;pjNojQAV z;vFOKE!oSEUj130)lu4m%u=?7IZCA)PJB4gaC-O~&l?ZCh%8OZLTzpBjp&JJl{avD zrcKobdp6N23aomEBlRd)qNAjK?C2F4oVcHUqCfX9Wzf9^Ei2gz>}}c&jw^-`ThV9i z=+QX@1|My)GzNW(j%iv(vi|w|zEx6H-4|0z2WWZ#O9l7PF&>&4L+J@a-Fnxn&`bfq zCs=kpb)g$qaBy&$x7MeCspQ(6FTK((_kBT(o6Av!nHRG7^c%J;&6w0LgIYAO1&KE8 zBw)1rCQ`R#)9 zcxde1`^YCz&OCDgo7=9#K23bl7hb4|L}tGBsSCUAFLwJ`;G`O41%g6tK>TzQr2ZTg zIH<1Hvg;x{=0xMk;MiTrWTMe5>NmtB*F$kMmdARw{bhEv`Yxjc|Qbl`zsW~%aChAJ-thBl_u&+BFU~t(yr~)OW zYa+jZ2KCyO@8Vf~k>_P#d-Tc?9<1LQF8B; zf-FcV#Ni1^>7gLHiF|Le_O?@M$xrd#&$o818#Cx~&2! zVvck~ux?c{j)Bt->>skC@?Q~!mw%R>>zExcN|>F>qxe(Cp;LoY#t>8iX=_{bwd#0| z^h!EW|I1u|0yp7FB)!~d2c5JUkJua7nx0Hu3E?1&MH_*ozn3m@WI;+eAMh>UQ9GdW z4U(gqIKf927RcJhNOPDj>V)aRoS{%fpnHlP5Gi^+SuT$vPk#t7<>oWChbeT#|2Q8=5nMHwrjtv1Okh)HyAn@BdE~YW@}Q zTZBr7j3B;Lp2BDon5LC(LOXye;>M-Xl>#U+Y2WkXy{iNu`xvC6F)xGY<%lblqxpy2 zX28@@FXxYDg?n=w>{w|=wE@J6Ss+r&oXlZYfHR?K6iaUew6@`qE1L{7aV!4F#T_vO zHi%e8B!j%gz=^RGCKPp4>am-IR>}L3=Q+UKC-V>dOQ-cBJ1^~Vu4yZCT?6u3aM-$= z{xtMqc5NYBuu8#%`jmV*y#{PRvbBnjp3I7LVH0bqmm{*^rS6P zVv`k5HxCau_+>Dgs^n6$CKZP(0}oSVzU`$gNHE`maUlrpVROK;7;=BR%Y`stn2P7Z z#(UI}(U-AT8ETQh%2TgH0x@ILWnfU8N460xMlXnH<>Kkmy&oZ6D9cDV1dnpYu5LVes| z*0TSZX=rE=X>kM{+pxnvDkf;Iy%^??tI;8{8Nonm0eQ8oAb6m0QUh;Om?lqDIUwLo z+;%T7FALx)v%o?R24`zpI z*{yifVVlmu{YKD9z|b=GK`U!Vkg}%}L2<2vSUiWx6;(4ZIRvCHM@s>A<=4Z*G4QZM zWf5XNExxLEWp$eRD;fxVmxHOVP6vU_PL!B+Fba2$UIcX-=A$?ZCey;yfBP9^{p=gC z$DZHS0uaNK^g<4!NbrGu@iKT!P6MBW^m(M8CJR=s$o@k(F5K}nmSe@<*dyavG(t$! zu=MbA1&n;@cYaKbBQ?u8RMHdzU*SoEhjOyBn?oaN0D=0r_xOd@K7Fb|t1Xxa#AJO{ zLmm<7R)LorTuB^!??{)@k$DDWH-c}1ADuh7_MqOYBTj{4s2(iVWLoD-747Pk$cpLS z?VmeEu9?5{`8aZ2$x3x1&JV8+nH^hk`LU+gNTi~;{&yRd4dCwAz~j;u^Xb!#$H$OR zcp3O4&9R`{iPasZp8}BqFAH*jID?;s=m-_{7GQPZ@#NHV`X7o54tLbf5>qA_V?g;8 z=*R^WY!B(r-X*AQGm@V&r;xnJl4ffFOY>i4Gt;nn>g3Gz|$E8hVI^np#yau_tmWSH@C zU`6fDK1D}W<_~D@`2gZxKps<(^SUUcUCcIe<*z;W^=l>ZgbR^FTC|K3E-*)U&=onU z_a>vSKG`dad1b*42hnAsvC!a=_Rqi&TBR(D)SPY5!`3!WBu{P-j?xDuWV;CP0<42& ztZhs5-%q(e2z$SauQOZ4hP4JVb^2K<<9PoUDwc$^WEtli(k+kOQgY}SLmregfqcYV z9hkJvi)54|HE?336%mbAP8@42=S=_;+`&H0Gh3H%r0FEXeM6Ds76de1bh4w?EkMH1 z3|paD&%*at7NG~m*7rtwT7jWe{QIJ!uX}HrFtWd@LPD^dKCC?^g7>mYnMB3G55sa% z4vb`d%08Pdr$F8$KLX~eYjm%qv)?9gHE^`ER0m^VcB>(hPRmtw5VtJ!(x#*UV3X>?(SV>WXq(qRQpyr@O=S?usM{;HWlpsZa zbp!Qx1mW=}X`nRegMVPSZs|O9{dv~VLSQ5Qx?&@(^<6D(LEjktL;!pl;z49!4_9hc z&?o>>X4E7?$iF`7P9-{0q09%!aN!M5j^ke%?M^o;a>j`i@~!Zn;&9bBQ|sT)3iSQ#6BED0_K_-pc2s19+EsAU>Wf7rNAk6!Rb$3U411SLjq2y`)!r?tX=bN zfpo0{+Pz*A3p)*u{`of*BIJj1bT>fK7|ZrH*?79?yR5Yw!@}ojKRat@Qejbkk*!kH zG^6S0!yBU4A*v-0#16WgGThN(h!e%>xq{_Sy+U#Tr-~BaWwu6#B2swh1cX)w6+O%} zrcUh>>7!q50YT>w}&rxSb7Cx2K zTX7uS+l7%xSgNJFJ-&A)H-`^b;FujHqMYMVf6l%Nr$wZ!r`L+mwV_F6-b)77cK;}M z0QooV;-rFi;X<%gkKIuBxC`4}kUP&gJOSp)buoBkxB=^aU?+m|2nfwa)E$_`DxmOS zCorOM+R}XhCCUH|=$M5(^LpkxQwl3djwakBgm08sln7%e>JcCwSfFqNZzdm@6n09K zIUSRuuKvRIx6c}+Wq5kAt?U}p`qC|bG0&D*V%jm}d32r2g!XNVjSI(*mUZ(YEZz4@ zt+=?@&7k-Hgv7_hEVhkV(Iqz2v`<7l_wG4^!lde22T@U#$8M5J{a8f3|3+sK=|<&8 z^8K@u|Jr3Xr)iIq&4my7W8b%kQW*%%db?Aqft#s#!;|rWLH*oxdVfl-*Q4PJy*Tl$ zcV%}i?@qcO<(Z7b6DB{jMj*=_h%j>_r_!B?(= zJ9D&RN$Jg}#X~k0Z@;QxeLweBpX^ws`eSnhyWVd9x9|Y+L80iMZC~SQaL409_-q)> z-frx=;M}vq0uuHd!Sdiy@XPeriEW^6C3s%2Ja_Yo%h}1{hd+<<{QB=}?fnlm^*&=3 zcpX$0ZE8Dsnbr%iR?v@xRNA_2`TbY-`X8sg|Eg$&KoYxCR@%5uWV!l`eguTxABgmM zuX+5)eM7o9b$i>E$31Wi{pd;tw65%3G|n>XL`vg3$?t!nwzkXU! z?%S(d7F0R;T>ED5=GB^O=lj~%{AdkSykJY((p5MA4nuwa_77E|a2)s#e-2+eTd603 z=)qOU-|skOYdcly<8P|$m8H^7qTXtohx>yfSBZ9Q&yn5&{p-k=p-D5ERf7EUWWyL;cJ5cP`UvxGJS;87EOa8}BR4{^j95S?`ov~s{=;aN@ zss(!4Y+5YsCToCfK2(r93Wu7Tsw*#4&;XV<+JKzgB!DH(c_=poLnD}egCCiviR2aky-B5 z&a%4wTSmf1rYzw#PZC4Basi%J^2E>*=Im>Aj-~!#V-g|my-imnZSA>>oa4wMko{X03y|tCMny-sAo`obat7PX()xL!QPNd!1=Q z9QR_Jvl-%5ZJ{jIK6Z#i#58@Fd?;Z-pzUD3t{<4)`^a0D3Y#S|l5ikO%;cPA;E~3f z>lW+7>83Xx6zGoi4$BwR)R`Ci^#OW6q25O@MUa4%lrcr-tdE7P2?@onSjdlw%$0c? zSW<{e5Xh?rU*pMw)QI$6u4Sq;F3sIxJxgLG`gBG_)_uVJAr+ z3wWg7ziT3VbI~+D^H0D5&%T=f2Z@g~N-k01Y=`h2#fD(Tp;j>sOe9(Xj&M-5dg~hf z{HnS8=Sr8X?zWw?uiTRfR=;>~|r9)Gm8 z4c{5nE8~TWL>F8nj5qIGbyMPVZdhy(t&J)hj33PYcvJMy)}{VD7y?B*UHt+=cTWwp78qF&+ z`^4FPW^2Noy;VRURy(5)jV)mu;vutCnX=;v#vRscb#qeAjG;rzq)C&kjUGv_)K(n* z-c2s-!Q3|( zP5b*B9?A8=Xd(#y#DIBC?vS|2tXwWEda?1O{h4?M7sb8h^m_4#b^asz?)1Y(zJ{<^(ISVjoId9OxyLFl9K4LQFL8doBjiw7P^`6J9ZuF{bQX^(66UD46zK3EkzfE;5}TF*)_4Dz(g80 z(`Iwjber|a6bzHzC9ZF+h?8Ear5LyOY3`r?*_M{e5owCmSL%ioi)QJGc_gcM&STaRpO_9sH7-8;~nAbDHVk zN4042cNcyR{fE1fZ`7vjGS@_e-DK6)%v)O?M@en;{dCoQXG4*^(hpf$6p1X9O~B2s z%)2ijwSET`6yR^#qjITeg0-yEK&k}0pasDQfzEW4z-=o>>x&q!KXxN5OdDYr$B>-C z!hPEZuk&3AH@>rYIXGf{gM5mt)9cRv9cvHyVTgnmPNE#tTo)oEBU5U6pSao?45u8$ ziVlBCrm212`7w0guqj5vfWzD5wzGRoZ(F}=^`Rd^mWqOBfXi8gz=s#xALa)deDL2p z!Ec9A+lOyA=HPO|V;r!(b5!qFM?M59Fox}GG+T>yjif&GOhMn+1wNvB?5+@*5Q^IJdSJ#!JhT*S} z)y7zFP&Mc5EVm!~>{kkPThTUlbhv3lVj>#j<2P5GlPq-K@ghS|v!ho4ZBd7ryHLA$$HWr6qvo%-# zRKRPeRG7J7v`+QLZ|c1h8h>&Rl6Kp!P&B#n`q)gzIdGz%C`?>9z)evO>a%aE1 zckdqZBL@iEhv9?#0yIKH0WuJOMoJWqa6A9epa;XbjCHrdbeA*QPpV9S$g-Ee?I(`- zHw(zUaXI?}%``>%V5kL?7|J*bT}J-uvfp`f*gX63_HNyCqfjQ}rv8kOCT^ffq>DCvjM~=xgA%!N=TpsrTeQ^u>A5qR_L)7M_8_ zg+9MDeJ0BQhzvsU49$uzos;zW1wdk}nmcz1BFQnQEX=>#h(9qACltBC;n&ZTk40{j zS{;1KVJ9ZVa7GflvWGn~Uo^OUWvGi*kG6ky?M{!9ybTjXMTx5*PD?2%DAmMtPHFqy zc3Rw81KonEsTD(7Z(r?kv~%kHZ>(dnAHYiGzJJPkex&wv2aEvKKv z-gU`eHo-FIO4d1}^9hrWI;D7h)%HFu(>KKYEkJQR0`Q9P+soFa-}Wwz-;UuyEXNtX z2^|1H#8c4$0Fr8}Y7mY;Dc&Uy|9Xwan4J$Jd^G`AAGV?FH&4SW39UDzVl|<2URxU_e}Dl=4BmB9VX`e zdb9lnA*rmvbMcDb`{wBu)z?=W-xI*1C8tlXYv?hB?<)yRzP=-}VtZ@(LtlI&C-U{PI?ikt5R|C-TKyd_V>GAf2n)QZ?OG91ZSF%h7XI+(hpWd1-jkuVY(~JO|3qOp&4z+UkXxM7BzkE64(WSYFuov&z zo|J$FN<99B$JOO0RzJ3#A{iC(q2h|KNSNIBxqZK>`3w848WGf6pldUhPNIy+@q+zQ z7(J9Ez=BXW9NT5pu7tI0pcFc#gt@qP)<_R@f zNL2x(DG3H-QvH#Q#|jHId!i4vQO9M}-znYjn&OJm(c;h?WY zRx#>M@UaU!B+6lk=?7xc--lk`V+o08zpW^e8X8O`D+nG6BU+q4S8nKTyM<2ow{Qbhz_C5^w*9Uv|elQ0glk2Mdge^BTX>7h(n|)IC zFEL#1Div|dsQ1W-+~SCJYy3t{7l+Z2{M8ju7?%ZUY7PdcJ?`TgII>0nh<>}|uP-uI zl%Opg{B_@~M@5|0LSv$;nP{w?MOQNH&q!Ipr_0-2M|Pk}Jd^4?&nQwdF6h(8-;Zu;^c*L*LafB|vr+4U#Kc_Fe?7D@Y019w zIHhBjaZ=jfoW3$gZPTMKuH~kW-eVjkx}{;|6nXF29gCq}wiL~g8ccnAmjff)-$9W~OAZ@L(NS@MnD+f6$iQ@=Yf-r#+t z>3utD_9Jmd$ZW(YiOXPYN;K_lEjC=f;53@ozC)#*7wz1&8UJj%t%{VdnyJMJW$|qH z>{)tRj;W!K@3+4#Q*IeqxjNM^cE`eWschv9OII2>toZM*h*TVyv0P(;tc=LKsv}#n z_B!(uJ1_X(|F%}#QTYoGd}@?q>b!aKUL9HwN6JgCIPj&}d>Sa@U~?e6EgBz%;1vCgwk2B2KV*S(H?c%#a5Ru-Pw&fQXCR(DpX21sbTXRq4dlU^E9PhpEcD=~g_^*76SV!e(9S_IU!pSbuB3YN@inSF> zGAvpm>vC>Z1<9u^C>!%6p!qvp>ntaKW$~YGR`uO>-)~d0K10 z9nW2=Gj~k8tygEU%W909?3}9s7E=cmDb4lhXKBdqS1TNZUVvU;N{oS5cJuUVhhTNQ zr~^GaP1JjucPzbKq}Fs?^q!|@_A2FRWeH}c-X0RaWo`-o*L7@I8jh!PEbf0#COIm1 ztBHG`7-u3LukcvMS9-3-oz9ob?DvFfutetpYao-P5C;YRi z5mJH&`ipgROpGk;>F%6ugEnBSvkTf=ff=XgcplAO)$=xe%a+rD4&NBr1U(YnH#yA+ zrSNb!i`2_Et2*Ypj66Df^YjfXMo8YQlFDo=Q+`>Le(F{CY2UjaKP{c!<(J^6yrCw^ zD|CB-=^#Vj@c6j4&0=n+f4?m8@IctZ1-6;W;?u|3ihIlGZ4{4h$usxNR-R`^yXa@}ABadAs6QtCHmgj;W4T_dMRn8v6erDV=1RY%K1* zSoEV##i0rtKAn&`AVRn!U((yFA^&#UtYB<1|4DADZ(sj(U)f@3P@vxuME7T<3&$NW zy=GByWS76i49mH{m1DV0cK>**#E+I)2x6h#=pYk>gN=6B_3NY^QxT_@VVh0oqI9_; zm8iVI)M6ci^mpgYKE`I9RwI84$xkxRz%^eL#VgmXaoCrcwcv}rJ`x+W13HFH9@iWj zsa%m7u*E#-iug#a@AH2&`B%WG6YIT@Vx^RuXNR5}hRm2%*$oGR~De2Pp?Zq-GX z{ibu0?ygKdHdE>DbN39NikTlH%_;_co2~V)py$;NhdUH!N({vl=Shf|k2mC9D7Q-z zca@&<dGJYxZ=aO&3V1Z6FItD^ zR=u%HpY=tD&ve`-&HxL)ht)L~PCS*ryW^u|aQIr`x*p6x0Bj9y(|CBtd&j`re#+w8 zruT*Yj;=rE80;6{nh$%}-`==Aywh9T_3eI)ikOPDNoSz8N4Apr{yjVFGv+RB zRC1l0`R{Y<_snuk9n_R2Wn3qB&tal;vfTAm;@(RTs0{atH=fD-=$_Q9g%JPy`f7*e zX8Y$OD$2|iDb0La_}8OkrTg=*?A+Amja@M`&dT3HYS1WQeSP=w;(4XXtHE74Dk8{Z zZm@*NqQph_zq^vsGJc}@HHmG6<{gE`E_f^}zVk|{1a z&|1Scqq}C>q|p=HPLp8w*Ti;_c7xL2ry?Vp{;v;^;s;nFFyE{iry=frUfVN!;tTJ> zbFo^fzB8SX0bEnwa6a?vAzDlSb@f6}9&pRx|DU>~zR!oo_}=)lAYfU+^TX92wF!?| z9M%S*c8bYR``I57g4V3oPS+J$;U99J2SX*+{p;Vl2-(I(#LMxyRrQLM%je_0Nn&x~ zu?8t*F5r6KDiJszffq03ZsJp`c^~YGg;j<+|L5NC&)8=^sr}L|r06hh?xvG=9{HLY*hc#eag;Y5?72&E!~qLKzgBq<^^QHWC7Qfbau(LkA^NlBSX z(mbI_QHf|SO`1#dT<`U)IH$dH-jDBx|NrbSr*qEUYdz~(_jBLZJzftKT~k_YYfNcF zACrhJt>lbywbDMN`O2)kV^#s$`09c)v$@>w^0V(jo$2JI<`li~12ot~Yq=DMSH) zEc534e>DdN>#sojO?U?Zrmqa!-1*`y9&&GgG!SR5KYt9L2#n#AH7*mtg*!1$ zk*to@{E+?yb`1@Mqp7wH+dLlUWVGh2`WqJ?r*JVKjNAm_+d+=bdt&BR6O<(4FsXcuy)dA7F)s z z(_)<464x_tj9gVD|I+NlUexT!D1FFdYm^}roR(GzVb46v-5{2G zPlP#RI^^I{s)S~pdonvzFBG5xg`5n&a*dM#yF(op(c zF^iM;P96<4D?c-ax;}7KnY48cf9u+JipRJ(vNz*KS>WZ9^hd^F&;Nj3^Dk9Zo9qoe zz~UGs>wSEoR})m~X9^Kzz3^b4e<&AnBTsNGOm&Kg^nyXZ7?HB=^lSO)fP}r0C>;^x zb%LOOR^m6E=68_yATXzov|w?V=DsVp9)2|cVgPsxm&Q|Q9doZ^TLv&P{i6K0kwvLR zeyTQ^=4ZD$TRw|FUZS+CX`u@sd@a|GKj2?>J=j=m z6(+&MvUq}}C?K8~(@{eJbU1ID$6@LF#xCbas)>ge7}mm7`zl7FG}UdXmocBu_k*2* zlmsxWB78IDT&$!?lq=)Zq_a9o^~C$&3d%qnnRhCDGlHs8{g)$IOy#l-}EDXlI_X-C!to0i|T{X0D|mZhOAIJX|tceyzn1 zukIWBc#KPnOaq3q_8mJwYGucLB6pGFbA_GF@%|5A!vGio`SwVc(!o_P?P2mOQ26uN@~2XmU)_Kw)bRZsKHWvx+g}#g+oT3!m16-FzKX>ajq^2L zx~6b6zJK8%hQ%2x(PC65;G_mhX75VyY`MkmC{?_`NA{p6kus~ZyRQ5Km(M70$>v!7 z=LGGxn|fi2W12NFmUgg=T$|E}lZjc;*X25-C# zA(dO>V;~+%3(gt%{9k1rNEr(4F?DsWCx~j4E68wW%x;0nP+DMjq+~2GK#(XGL85$! z4=iUqK)>KQa7hVtbpAN#C;*EmT_E3-Q0sjAMr73G=m~QF)2WB)pZmAmd%_X_H^@0o zNbzUB81+6s{|aGHmSDr4liubxfE(sGRv-Ppiw*tNR6dV+D`&4lacG^fe6=!RWtqg< zFLqxV>zjsl0iKJ@FV5v|*WS`}Fwgu;BUgQkb52IVw&8~jsG7=3`Avs26gC1!i(_*BnvW%HEc!Q(2+`j=e-(L>`Ko{05UGIv3%x+r=b-CXjdbjzU$30F`q8R` z_lr8W2ixXlow$3(?H>ubj|c;vc={d2DB~PN8AHwx{|TriPh!{u*HvpvQV-~L)w<=o zi5Xs=i*!Z&VhVY1?@$=ik4^SXz2q#6|ENr*PRFSgw3zy~o#O05_u=0iL#V)}=p~7* zK?$!p*}4as+I1bHG~QI8@QcuFn#7_dn1=qIk9Y1oN5l9Iw-a88t26Mk7 z9i@I3(HHE z-t(Ma4=$R!m9`R(F%}8~#Swr!KZ9vr^ea&LOszn9{XY>7y+zmWLGo1%cv@#y zxK}q^@pyc`oR7~J?wVBRK(}f^;U2f&Ud2E@{O`a9+p0ga0#ty}R_k3$aUCM4Y7cfX zwH|8q?Jb5iaFaC{$c+zdOuv}|-0GA<`vX{WIf)TfzpR(~Vf8lXcfWn2`Ir_XU7@oO z;F@*Sl&l9Qk2{VDx&d^4P6fZ_KRrX#?;``pgv;+|1Y^d{odsX5MG+}$KH$+H^Xa7T zI$8MHoQ`_X%Jg@v**i`yiG#jPawQys{tj-vPo+U#t$YY&12ZtVZyh^{A)W!@X?xZu zQF;^Uv6kgn{3}VWdDgtQtI{P}_E&ya+>J$)hbu6#Q$307;tVOPLFXhxll~sLpdeB8PXE z-Z{t<`k?#S872Pnn@yiiI9c$b5!rKHwvM?Vvi3d8n(N2;i<)F*s#H~JF+H6DEl+>kLS@od+T6(jmZ^63tTS*KFi3Io{9C%|23EY0R2F*rt$?j7ZLfct#qwOaKzt!#TAIYCxVVafuTsZ&EhF|3rKlht!Gt>mawLjt4 z@abXgMXh4{)I;2D?+znA^e>5c%1r)Gpuxsmc)gDYs!hDh zt$Gt#6Hhlk>0&6?Kg>>9>wTx7wP^6aX?LH@j@OHV3qQQBeL@(sN_vCmG;FAX{rbd?V=~S<%W|SuVDbp^L#?n#v-7C$;Do z%|z)b#2CMDH0o7+7ug1!vDW%{b^c7fMvq7@Y4@}6gEwjiZA)Ggu z9;%qTXc|I0*BB51llRmaBw!4J5uNdwDTZZ`I2I^PkNEe#bJE|6ID z_rqjx2$oJ1iCZ(*mD4%tC4eb^E)XDuQFUDMh`}vW(HJ|oOxweLCLoP9cQPJsFhEld zEtA|KsG4ph5G(i4ug0O6Z*1A}jOJ!h1*LNQw=u#UrWze8ZN|h@OPse?D-SK3xUp1J z5Lw1BoVW?^J9yIA<4r!Ca{ zfq>8`k%`Z4V7OUE7`i0R)~oPrJhz&eSBA&c)itlQln@!XVj77snXYzMR9~4)`)O=! zTrSN}%E82R_ycgRzarrv3&*@vYK%6S^qmi*mi&K=s~NsIeUwbtTDlxgVlR-A!RL7W-xydq83#&Hdg z?#kNRk&%3BP@DZSal-W$8Dh02^9SC{wwr^w1kbu$0-B2>g~|960qY9PTCGaq9WlWK zrpbAEc{iF9$4dzHw;omd<8Y=Z-r}?ke&vQyfKi|JPWYbk4(jbDl#$6=WHNMe-?d~X z5R$DXlxJjaJQ+ue2_@`0Gfx^g>aT&JOtMvsh{kO>P87L@7q%1y=eEgmvvEx>-l z!#I2*aG66K(?rSqOPe1Td0o6j_1(^eJ%7Fuuda%W+{Ngkw5(gg)zf7AHrcco>RbyA z4P_bCw6L%!J@bAg8SqIaR!NThY;PR*AZv+}yDua(bXAV*=nx_NqNv5l&#iA-1a)Xzby zdD386Te_fZe`a5QuDwiEVw+uWJZ-qUVD{i?`oa_3<2onx=B=>A1*F>!Q%eO2k=Y4D z!>3&jE!2eAtN3{Px08uzUY9G&Lt|ql@iI8U!|jH7H9lQA7`Ls59;fzjciR=6y%Vl~ zbQ9t!%3BIFO?L$evbVI2w6#V008%V7RqI#|~f5;`$MdM0tZ=s&Uhs5-#)>@!Y| zL{|s$4xe}x8R>d%B*Jfag_X6n=cx(55F)2SWnW`cQ#B}ZN1f^%XY;yYe21VWebf~9 zF=>Px zS9d~Km`EN&c_yk3sU*|BI!yHs^aZ&j{~?Az2ckYa@a6p*$#{`KUrcesJdFI}6kZe>z_tg>&UTW56Lz`($^jma1~!Wuep zOWnx#){z5=+4Ef8Ibd`6>@x9d=c%tHq2p+qx_W_A-GT)RXbnMk1SanM*uyTT#y?^6 z?dNsp>=G}#d4j{9p3r&(0Np*w+loT}me7rt z;?JKyAM*6+(=L~LBx&y*HBv1*!qY>5t}O9<44K7k8AKtu-<-B8-c- zYm1U`gah!?6}bW$wL=T!`&N1@56$i4tC+TA$YsB-*@OBVXHIxkO9Iqa1LZ83W*jVa8A{SZIz#Rw6b=S2k=2hpy+xGD{J%t zZ_+B6(f(Y)Llczdb+V_|g(-?_LMBok3;7`QtB|nSDNG@xXO$40$Ts>2 zhsG>P8u${n&DPfTp6Z0}bC5%-mcM1TG&@$G8)7lib9!`r638_b^z`(8v+cJC5>Wd< z_}ya2Fs;N+3{n*#N-!O5+pNzgyx0&*(Jvtyhj1Ounl%ggz`#I0Vo`VP+VvaJQ6u{I zdMQC*oqnS>JmBU^l_KLOPumZcka?$<-cNXr~#&k`k#Z9}MV9Bl#R0 z%irC`m)zz9_y!3Oxr)YQw{vyiYH}l+he70@{+f7prV}A`9_{xVb+#YMuy+CT7fahO zqQ(=aPw$RlNR=Q_FG~gNg|g8ZPTE+5ZANj90)5HY^}PK2vst<08azg`)RR~-0)KvY z4siwi`cegz`@Mpen@cf1vD|WkVea7+QX}{x>WK2*T2=ups@gj^IJmpPD@`VveOdVg z0aUq46{s;@km<)XTyf$@axA~W@IS7z{r<~&R7=yz{m%}tpqC+}%Vlp5nvTOhF_;Nl z?M&EMY@Ol4Ohd%1PWb%4cgg;XN{@R9zmFjN_fp1f(SNbF@tg7AZv1~^H-==*TczWg z268(4202-`Z;s)2Si-LM@l5;LJ&~*SsVq+vDi4loeHG~w8R;t-*?6Ml`92<>@~@Hm zqP5%;O^$LsH-7GG6u~^rv)h)D`0qanw=etKAmGmUdVz_0he$Ou_E<2RXXmIZS%? z?B_C$cJnifpULTGET^4L6=^a#OnO9XaGm0svTv7Us7W>5RhXutv!47HuEY1VZDL9~ zeN!v1EHppla@SUS5<|cLtW#s*J~>BUbOS3Vt&Mtq$m8P%y6ZW7qRP3MnB?W{XfZ2y zmoIFSiCgsGGToP$f=E~$pLbHs1uy#IS#In)&QJG^*GwcNjn8C^KcV|Sa#lAvt>Ff$ zAJWjM28*8y=+RbagFK#$M-*GXJ{=+Dz-P}^fEIB+iRNZ{3T9xMCp%$LBMI>pJ*SwM z>Mc?$>8{H3X)9Kwm~R-CI2Ys_ni~h3BM3VJVJhMUBhCB%5t272Tph9yZc6&0ZG{R^5y`OnL4@T5d4p*)t_$F(#(2 z3F93aNH$>UH}FC>{_hQ_KTJvL-;7&*XCQKz6t57{-1X~^7WfdNmC(4xCkH^)s0q?% zLVNT_*q@jj#MsV!T^TRy~l$d>qa{C1=s${7Ixx+rhVE zHi*=E#fSze93dRc>4@_eusB|GBD+fnTnHmxpZjQ^y98!h$0s&n;*z`UaKr8?MK~Bi z&w^<&YxK1_vklex#jjt;jEd}j<0CcGjy@bRON@G4c5MMqiXD}ZZApNxvkKImZkXts z0Tw=u=94Te?ceX8PX9~1^ka*4XhKI8p_nNzumuZ_yJ2Xo0NHNw@^gB3+D;l7xuOKf z-j$S}#9#i79DZ`9AalX1fg2}mx;Rz4gETz36*nEt%Bu2-ugh=~*Lm0P+rcs@-;&UO z!>DL(cds^w(JoiX3aN8L57rozAG2LUUqv#N9m+;%5)#@KE2mn39Y$f-u59DBgBYEE z(Ec8 za+pMp|G){g5+1CL0coQi-}ca5NQjIqkkcikV&3{& zuFP^qVtH=!LC}i5w~RstoFr*y_N>4$JpJSm4+bh3GwAL`(qk(b>8qB^b(5%a3VHl^ zSz!d>Ibsu3{$My0tvhin5xH!RffpH!$wOj#uRB5d`M!o~-S4#d{=sXC{c#J65zrH@ z=<7r_KDjW;V8^Cv6AnFr-ZzRKDl)pfG|FK9{Se#dk$fNvl+3B1`Ny#VgZ+m7ue*Sn( z-9dLe^3%Z9OFIX3m(X2v7FA)igd8kyedvR>h&pqNXaa$_WF5EK37J%b?p61tb|Z1zG-cGjN21dO~2Aaum_wtaiZQce%Z66AxWnn5g+~f4*L64 z%$D!d|HFRozI`0vS8(4oTU!HDxXO}LrB2()%M)TCfIoaVb`5mF1+tq77M42(oJ-Q2sI3NpZB9f%sJ>cGgzbx)o= zF(2d-J+4H`HHqmY^^U0H5KnhopwWEV`A?3Q8-`$jzi*U+LgKrho&M_N=NUZ-zRQJ$ zn_;u`5Z{jt4J{fN>bMoKFPw^h^BL!LhUOP z;)MD+GFip?jCj9Pkz5~I510Wu(Q z7s~Dk+`4ru%~wv5yac}~ONMxUh}+FoRM4iyv}gp3jPRzG`;A(v?c29B=Jw_jF>O!Y zrd4K5-74S5APK5v>$HScK(kymhOTc1MNc}0bywd|SmKYcl=20;``a= zgJ#{`-Qi~&J`qbNFF$L(m=Ix}z5A1p^3iTT-#UxUtJ*k@Scf@joK8@;3B8UhGu7u3 zcmV6qC3Y(8&pUgzKX+|1_3d84tiF)XJvn6dTsocb`ZaW?oT`|(c*f+}8|Q$8Kz*pr zenej(_36_W8oS?zx77{D%LGFRepWU(Vdl2XHur@K1bxo#@wRGwyvD3Cf1&jK(7)!b zeOS`@{rh*E@4b8XM%T4PqRMaT1!#rhs3g8Q>KCklV22+Y2Ap~8QZtoq6>`(ji@Zpc zuuNB@hW2X3aI3|F%1|vmqu45At;^LNw=$1zkk&641Y;oAOwLs+nFDBlIUT$=QD_k>CjugaP<6DI(xV<~r5h z%+pK7=?jk(Z$cg08fUmIpn<_9EqS;kIgOAovy>@R&X{+xDa*Ilg44yizxGbG{YdWU zJVLkq;l4Pg$)|~=gTQU@tj@Ce;0BDY!<6vW2z?7qC+jdRg4t3l3->0`VqBwjxTM6B zeM;$P>k1u#X6W*Q5C|H_*4~Z**?Yi)2aCgmN`2deANJ^*bGp1g9&sc4cwkV_(pE5k z#-J#xDkRYDs)$|tH5%v}x{IRr?A6dHY3Y}0>(V+<)e>zwZAQ5vrtaTdOOxoyzb~QL z`QBYVt?*=~E7biz9|u-U6_9(g4sPUb`+?7WwIcTOpbm29U1?hW;p;8U_86N;+2E&j zk<%*Uit|SXz|g1*{yS1zPG(V80r{`2PuioVp}~y;z;Gjtf;Y~sON$1;_GqdNwf7vC-*cQO`2vnd z-NyXKYwU*l3c-z}*IXDL4kMpI*o0seR1Y4Uhts(R?^;Mi#2o^7!r0gij0p;+rlw+I zVualAOFQKbpa8l0i@J_0&|sM_5h&nl}l+7OBlyMe%iK(*+JXT(D2&* z(@B!XAlf}~>QwXkS<(9J#s!N?omlv&B72+qExcEk?R`Ybu5`g>DP6orOKSo4ae*IU z3e!po!}}&`462Qjkh>CESmJNT&3fAsYg(Y9x&FQ@p*YO}-)7%C1sPWKPMlQo-59HI zf7(^hz0}M~9mX4N@YHWD4Cf;a3f^@PHKti=&;K%<2}|~Lejcaj4!K7Gdl(Gq9_k4W zf>C*>DS5<@)GN11%_ef`@>mJMo zbw~hX_TrI@(LUaAP?J0L8S)$DIu(Z=iR=kE1h!$)Ln71eygSuf(sU1pD!1gBqc+WY zyw}`qoAoF@Zgecn>8-H0A8NHoz6L%udHJ*b(k%m)c)dhiNsVvcck$$TL?zOayx8xp zH#(FPacVucuzIKqY4Vt2`yD|Qzg`T<5IR<16x^=C`*M0IWKT>A9lY`n0xV{mV`KoA~<0ECC`J^+3v{n$dNffk( zRS`uBwC3cr>f*T;`*K1AI^3k=4%aPR7PZN9kJ%R!V_4=Z+xbCs%Is9>;jYh;xdKXe z=YVr*MLp#RyccE`e<(1&HGX-z&G!LV6QS;F$$MW*Xg<0x`i~01U*Cq83j@VbN2P5X{kW#^9eb!Cf+LzEb80V-*4%% zZR@W?L90O0y5|IEpwj-GOTl4n6Q#j)*4e}NujTfX1QMB3)-kjPtI5lhG#+bs+fqe9)yrG(bC^m4juLr?b zfA~R6uL;_Q>NXM(yyD=?hlg_yq01#F0(@qvAb6NTIf=QcmG%7Y5}MhS`rE2MakJg0 zZ}}g)dFJs5!YbaEx!aDF)a!Z^vw7m9zizH#{xq2%=le)^L?2oyc>RW(^{18gRq3=9lrbPyMnx_hpK=CHK7{O4=41sR4`E8fHY zTXKV(SkvswKEgegUK(q9O**)q@Oa$Ov9nyEoghKZpFF^H&%^v>(BgZQC70Iqf8H&- z0MUk~Zc~Jle9v`ei<|oT(bDH7+My!6%c9EU?5ArIrC)>XJ@e@Y7zkNH$JNxnohjmi zaMP$0IWMaFzZYbNqN9e3kRuN@swZD<_#Dh>t=#AhDmqndZQsrz(jU}Z8Y|j17=5aU zl^FM&PQaqoqSq-A?J++FC+euOQ%= zX^7K*(__FoY&o2B6SI6Dlao3h_$L8iZf7(jRN}y4i1ZZE@CK9Hdd0=XEv!efPuq#Y zKd9)nrJ9;he{5ILR`D5)VQlWQBj+}f&_+eX;WrohT29+@`wf=X>9&@}hC`Q_dq+k_ zrh(;>ha45Eg9{u##(oxWrf{Y{a9nt=7s(5ur6eks3(Q&46d}INvgJ||hHx$y{NMx# zwumu+Kt@pyt8Lk`h4@fY;!k5%owU^`Iv=dN6JEZ2NlN#tct|_ z4K&8!+j<~UR0a5fZ-JoDsBd1px!+KYVD)>QkuzaEe=xn4yS|$zRQJOq)#N^@H3MC3 zf-yHYpFD8Jl`$@(bTAxBV{!q(J0Mafc&E2}1h+~D50J#^R!O zPro64DrPNC-s|6vJb$o0Ev7Oe$MuMU@2wArZZ&Z!vbi4FvZ6Vptr^Q?19-oe~z6TUic-urhdZhzP~?oxPL)Ww#QKpIXu zH!-$0{qJ?$K%m~!=LVf4La4GyA6m7$Y|;-tTrm~S|3TzicFi7^K&)ESi+&c0N>$c1 zbe6o!B~4UdtFa5$XD8aj5Kb-)%*BhiynAf ziNchq_rIvy(_BRS9NpY$Ob&J!NuJZ;o{~8DE;m58`PI?m^uCNUe3`?qi*Tilu&9Kx ziUiq#TKS+db>Ci^PW!lN+Ey(st#CnB77h*$(uj0d()4(EFj733%bw0>3EVvjI)M0k zErzosM1oRNT@c6=vBU19qY02r%A&QoL)uG8fJDOkKIqG^0S+1vZ?Te$jI#}Eeb|3v zp4b5%x6$1Um-|GpQ2?yrOQ^O(z-6m$8-&bXWvZ!VT)C_xq{$Piq!~ZTN@x%MxH@xlDpDI!R(8&o52@x}*N$E&d4sr(lu> z;VL9K6sJCoyU(R_Zf72?!XD|C_act7ZdFoW)rpfQ8)C%a)!Wp(tg|joZp{)7bAj`^ zp$#(1G`vZM7V~8C@vSKyh92BIHmw6s6~nut(0BlV^v;b)o-9y}aY~+A45Ptg*H=lT zG@)$-?LYIOC7NQabA?JnH^oWInwNbVDJP-ts;j~QXoqdvg`1tceHv`KEu85+1^fBM zZ(cC4YOgfTwPtmEdhd*8C+Q&TFm!5ygQ7AoEvxW7}p;OPID@%Tl$K=Nkmb@ z5-M-C$r$pK8OU@0Zrdhe-~XO?a7qFZ8sCfxRuf-Iw>-hJ{lRQoAp=9hK={SJ_9OjN zPQ=HeyrIJ|qUaPfuB>ye&393trf&&^|KOps1dmq3NuJQw8ezSXCrj6y9mrri#DI*@ zJmU~xiI~xbkaT7W~(tQ_ZOE!E7vx0hubBd8=M^9l#z7KJ~ z#(h+nNM!Z(>uw}XEu*h%SUa`&Uu4c2%mI^P2AsztnoepfEe6VZ$w=?$NQF<8NefBz z%3`~8!R8u&xzgMW^mlcMWYYdhKtV;s8Lp&NxH_F$3q?8Z*@7(VOiskUr~A84A+RoY^=5&xkO|MK&)M@%GV&xe!0OAXo+^o2Sy%Mp zNJ>Xsr?a4MVN0jd_tWPuAnr#(ZSJl2$J*4BK5MbCz@PhI$cM|$&led{MusHCT{8F^ z_l|AZE~1RPag5pxxI^Ch{Zp%a5h+s+I~GxdTp3aLgl)^c%^SY%(FmW>%e~1%wIY^A z$c}Blc-u9sMyB})PiRl-X@%}ru~RCxuSHeGZAQG{0$jZnGZDNyPXU5hmc{C(UG>(F zIG&x=l!F#H`{-OzFD2;E!URiGZR$iET1bnzmy+v}9waWX{>IzlgrW7n1BF+05&yFE zsYRscHX-_6;+c8Y(vBp}`f{9{V3NPvAQd3DBU6-F-9DEiud6C$$Mc7;|4tZRQSw&( zW;Raln3z_trX&yH8p+&y_wHHQRA7+7JU}KE76i2{W~L$Jkw)^x)lz&*K*|$uV~YT( zWf=g`b;U%d6eHUK@g2|kk4QVG>{}=yY*44|QA!VIsR$bTR%Cv1BH;BdtN?<4sT>yR zSS8c-V0L$3k~vJRR#N$=PaeHKSleTqN=_1yH2rRtmtem2%w+nba*0|nsAAt+_TXg3 zX7mnMj1JnH`YwRRl}-vPLtkgSP2$U!Yulvo%RK-Et7_OB4G#@(KV|i!xMmwYC?Ek& zf1zz|JYJor;69?W0F@6{A*NdeJR3TO-toCN=h?;=<5k@WQ&(57vlYY+F9*cAY*TG= zdUU|UDat4FuipgjREfGm7+}eg4VTMEui?V7$fKDa?~acSh&Z3QR=G*FI-zIQ z%x0H*jzqPKcFQ1YQto!1LKHPI4!N^MK2{4Ixh3m{3ZVgR#Ml|YJpT5XzYuRM86F@~ zLBvIcHYr;|BkCGVgd}xgF&Nh)q&ZmdYBaX2uf4ju#&dV;mnb;DA=NorZ41H2s&r56 zvAP8w{o(GPVEvx9o&2IuKdaunDXJk7nrU}4ZC}ix(-u>GU+T_gI()bU;ofRPoxwv2 zHP6d@C&ft17EeB-Wvhh^n7JxuQP%s;Hh&pXI)pJC%~3(attre>E847a96v1^l_-sO zA6nt%+;!t^YUNaSDTzkqgl8i3d56~yB>R!)0~d%^c*N1{&%)xC`OYPsdIg<5y{|^= znu{XF9_dlV(06gkT&Q%U7NI$)_+59b-&Vtv;y>vta^xCNf#zS2;1M;bV~3}JnS7PG z>+9Nq6VXl9Q0id&Ajji3JL&I@Knk$3HlccrzRgtH@f?q>d1*S!ukXP-s-t zdSsj{vZ)OO((Uj;;-k>P7D z{q*^BS4=XI0B0{ds?A!v-H?qjm#;K;O*yvKix|x?LN_KPLzCFh1fGSAs6X$}E5nn* zBoAHqd7i#E3(4RRU!MvPqtrzRcWJ837!RvXNeR`Y!3qdAOS5N+pauk8H=I5cFOCm z>&FrD{5Bt#NWz5TP{!YC;kSo5r}5gD@Ss{F%@?oZV=zh{4}3%->1Oo|aX0zDyd?Nw zkr=7FmhB;xUiiqp{7sqRjfYhJn(pn&W&P~)S^&fJRH{FV)M0FH5_*NKGvm4lyuio7 z>7593bZuryHL5N6UtQl^()BHJ%C(eR?uEf30(9F>2|M{s%>qZm14Cs_?*v3W-Wry8 zb%rQl-n4C?dHGP%0eXNunRKM%Gs^^s>@ggn#2oE8MYl`a=+@0tE=w|xD5!i*(p=hO zS<&*RteW&QT!TojIzCf{kdeB1ny2;o);f8Oy?PgR(p}D<2sys?nh6%f*NVdtWlW4; zd3B3aMB;P(*}ZxCsW14zA2YGkMS%*b;9Hp(9?e*acYPIAO4gP^I^{7xA>820lXGAP z)hmlL%lr6o4&nL5unhucbv2;;o@vIe4X?w(*o(i{Mst9y>1dV9A0qtAP$r{LHXUSA zbWcG!pVtq;BfCbXQnbIGJUbQC?B-JMkDEG@eW@?M&0%3~UG6ENS$jSrz`S+o6z%PM zpruwETA#rGn+TO$BjGzIGlpVNWElvDPJh-a&rJRVrMuWxAzS1WTqQf>PH4w3dD1g# za)xi&-LF?7f2Y_VW?>2oJ2ISF2utWibCp=9ee?KVGzZ^y0=OR9RGI90%Ilo2V9&Bo zCvIKr19qk1)5+>DBs33xaZ0-T8xeW@0&zqt2SP$O3e83Zi!@j#K_(o|uc(kHElEz| zU*5A}HLj!veM8mA{7T&{A#d^T4^<<>uBD-&p$Q}2K3Fj-v!q;VR&*1|q$d_2i}@;2 zK>TJZDU<0fnz>}Js?WF0fB%~NAv9L0LIrt8dXwFi1U|FaBY7I1R#}|evvKzTY>|Jy z6Y5nGkUugAs7#tDr%y=@30EO0C?Ipg!ZrGg)U^^x@d_DQMGzP9z<^m1i!EzMw2`SC z6;ec`jZ*?lmx#t}_?=1>dO{{?n75XaFiQ#8h8jLsUt&{sa!1~q#5L#YVR(n!4pPsS z2n+Cabm{VR<((!g=^pPU;mWvr905vF8{jraGhQpjgw$i4wyEZBM{xfo7a1O`xChEf zm%Ih?OYRRFy7&+0B0hq;f`w#ES^!35y(8yR@ft!= zjPabaB%X8PwunwrNgW=hIv0HIS4mR-M`enqFJP)l2MVS4Ku1#M-Gl$8d!eAg?ie)& zr*6+fDtHnDvg*$bk;s@~1xf9$r>POlT{kRvwdP#Dn)25|3zK}Ga|d~!@lTKJ{LV10 zn$nsoVy6?k_m3f@Q5PpJU|xjW!wy8?!KXT)9HVETG&dmfh>?y9|D>YZMpY)0HKUC5 zlmDirr#9&DA|Ya#riLO6)zV{$+$xMl29K!{;%On<$*7d6p*S2Xxz!aAs?lqfU8QKU z?D8J_wtY~!$CfWg#h2}hz3YX_N3w&~bhRm8%bG4qN)5t?BRR@!x6YFN?mfO@zF`4J zpkP|PsNF~X`*gZPkqm6wx>r?|g@pII22!2buDw9cWIPaJ%D=)>#tZ)@&_fWUR05l6?Wb=aShu188 z1T$R(o5OYYX9dMy;(pvKxpj76<6n_&_3l~0EN=5Pc1`oUh<~}zZol%{COALa&q42^ z;)PfhHpi0i*4RJqczF9}RBp?WlwB{9ywVHF#c`jdio^?&1jaBQ+=%jN$OB@OqbqV2 zVTUH^zDu1)#VHUCzLsKSRi6=lEX^lV>7oBiBO`_e3ZY}f*1UcliQy+{D2}SS%&A@2 z(oG7j$WXYwkCc{uT8oQ*+Ar1XapF*-8bvi5DBKUODcc%-)ivga)`~6*Q95`q!M4uc z{dRQ0SxXt1pAaAG9b#cxEcX{CtPe2K&q1D6$t{jeF+Vil1`PyR_15ol9cr0aSdrn#Bwc(ZH1zIi{<@0i? zK6V2Q;n)E)8(*SxveS*C%jOz>eYUK8HJ+U{s^N{{OistACE^Bk)0w)}%>=VSCc+7< zy5I8KGcMw&qbl1sW2?qJHhOZBASyP5Y|=FnCtmCI!8j~BDk`1O(*tD-QCrLUHA=Kj zbvFnkd=eMETp3%jCGObrCux;^qX?NjQ>Vok+u4ubE2c8yvey&fsWZ}X2UTs`;_nfF_zoR9bTt7bPySt9%bBCQeIWlg z(b%%hzVTj&_iDb_^XzJfIAO=Kt5 zF*Zrlyy#dvsc4r$njk{~A$2{^nm-quYks@;Z9%2_Z28iEvliY9>}{vG3LT(A;4& zj$5jT7OJ9rqLI^H@X-1sRu-0@I`w+K&A8X;;DP2ExkhS!6!8*C(DqzQWC3DZ^779mkV#L6i)I(|H8ry*`E-CX2W2RbL5AAJEGx)Tm-HFf&KGo10$w`8j5TJtB|{L#hk-I;`kxBP_&YrlZ8w~ z#k3)zEE&9a)NOCdV18pox1g1?t<_T47B3|u-oWi z=pv;j``1;4(P9>_3rEd+n=o^468$mwkwgka6`$^Ze)Ya_+%l5oD0^!Xf2FEeXJ>IJ z@?lK%?qf;^448piPFI`Ud;}Az;hP{Gwb3kCPE$5=+_BDXyXM93|0^XmzADv!J8{x( zp}3hUg23z0hgd03iqVp-5S@GHlJ1`k*p?a3TT?+)n6h(>Lq!Y6gfvq@KN@TnUKWE= zQgC@qdnG_uciF8fCHjEwl`3sHZ|wqI}?aX zf`2PF2u}U0MEG}V;2%N3T0vXr5MYgXZPid`t~<;*-cwO?>*${F6M%nmM~-UTLmlYf zxVem!eIYGQ#Ez9BQ>uRkn;-SO?SHAa{OOf~Qq=R6YmXhXHEGLvck{31eI#!7>!V_T z7bo-tYK$7bhxkUz|6ZGMUMcBBvd7_V=D$=BdB3o>2W^h86}Mwx7XqhGHyqwCwjx%b zQKtWS&6PcKyk5R69I9yvhie;es{@drTX;i14UShD@tQ`<5yn2^=y&j)*ROTppDHh@ zrIZ-QuOy+wV?4&W1*iyJn6bb@lSDwtxI(l?xF7<$4Ivbys|95d$@afgGgeOYZdQ-I zXSX`gMW$_GibUxTy_{uzG9Gygds!R)BCV~ToamaOdZEK9!Xzg z9dl@XIehc*o)|NNi22Hu1X9#wSS!T~v6+AnZOA zsA@!=YUmocyGHLS<&}I3K_3l#tf}fG4$=dwvbv-18tS}{P)I^=cYk{2`V|`l*Is84`|lw-Ncfe6p!0%YdmY-~}aKjsD;D84s;OpX2Q zlCfRm`KOxlT|ljAKo_LW*&ALI0d&_d#*e4otQCr7exTf@`S zOCOL^JD@B>i@CaHn*4B88$SVaRGAKFa|w()>!H9C`u&G!AR3FEcRhRohklt=jU@i2h=~$>2c{)NsgVF$Ujdb_xPvJO0o^4cw9Lw8PzOX4&hQgo}eIM==w~fMHC)rDplSf_7CggIhSP*6FIz1%)Kz`hT2`L^; z$#i|Vh@ksgAK45ki(L0pJ5d0I@S>nWUA8+a%*eYxVQBE7zx+-0Y**FJRZZ7o|5+W= zQVN=Ttux$z^Uj?Nw4d@VA(#t@i{!V_`4|uv7xxY&LQIM}1Uv-%_qbm zk*);!<;bHuGzrDMcwLXaxXV`uCOeI0ln*;9?5nUGwa+~&ka>yUbKds8%BcpM)_mdc#mjHCzD%8{WM(H#OVi@M=PvCo-}e{i$>dhY zHC1%~b$KoC&=cjHS7J*OTYo4^S&#lN%CBHJuy1el{WWakc7^(L8;5li4sUws+qav^ z+|;E!ZZaF^&bVBIlBL{F)F4+bDW{ahzTpy@gXIbj<4?x>I;(_seiKmTaD4A`#_QZu zb*~q!8_v!B%Ho*fh;L2`q{T$^x=*s*@<2@@|9;oK6scWVw~Z9rMk;G2ZDPE?$tn^< zK8Qi_7HPICc$nWOk2ED`efxHJE;YRMbs-4a+g&m$H_SejFUlgI%IPQ^Zx+}!?a&9V z>3_X_urP=wabL8;#p~l6-z_}Mw6)h#mL}W3j+>QM*)e13iwNy%`l3wS{)GT^tHZN6 zj8Q+?ghbctnxBd`-4^#Z440Ex-SntnzUF%lOjN$~ov=teWSXa=F;e*}nIX ztMCI4dxgX}2~Fek=Qy8<=?RDlc_pJ+K46aHmKAvPui5K9RnYcS?+IaD$g{fAu;Yag z!!s|Mr$Dy+M`W1oE=2dHSfU(NzUCDFDm=YKQs;vHdIj0#$TMViY$m_t(n`42n0niD zCl9kd>Ji#q(kl-|=U>IH;vQmL&D*WOk*is>k~_6>nK+j7PNa8-M#>%Mluh7FU97#0 zCNWx4<*oLBwo)y;$h0QE$NuKW$Kur@#ikFBj-1KFxBY28T|OriyKg-_r|inErskZT zBl;H(!}yQG(Y}%>Y@6`%{una z4lf(ci@NhrQY2&MeyP~aP;5VHfD8DUAix#M%+AigavUs-Z%NY0WXQ1F}xZU$1Q)-BvP~ZneTLr zOq#hI5M;`hN=kPJNQ&_Q!5xm@GVU8(B!i!GnMiea>mFAk2b$;@+n%C#m z9Cv-tizuqW2;YPK<>beQ=i&zv*Vu1C{0H&gx_}HKbN*iq<{mp8CAw(Ln0n zZd4u1ln~`>`fn4U#gvOPJDKij>=S>s%mVS&$|&xnErAMq@eo{_RH0qXQfqK z8C9$^H5>W^|JNb!iUbeq8dk?9nbJi*)5K36`rnu0-eTPd$M%vxdpV!4g@{73lAQIaiOvIHFlC6paL{NaUUCyoR&&dyqP$EK#aqhY+aouj26(Uz%A zZ+@oE80aqn>mO!cB&Xdq=+S@y4H2g&p+pcAyk6;zMR_>4c zYaoUa(v>)4ImdHv>@68pg;r&Ew0;Tuyh9?NZ)8FDT9}Lhdwq-8wD=I^Q#ltRMo02! zYtLj$RtBy;(w?&6JG~Sio=zEPPG(alDuHvhW7UCWZXPqSj0;V z)w9Asfs8rkq)*MAL8SxJ7dWLHk$^(FwS%YiLwuOC>7L8!kstXIB{bVhtskEFx<~r6 zKMze})^s1My|59f5r=~=U2N$X`96Q%#^Ft$a--e3Mc9mo9u=NB6V&ZovUh^`kP~(r z{=RLIFSXfvZYg_LJ>Oj(w3=CeZxYJ@-q$lFG{XnWE}A1g19m+2_=j6^)f?LF1`G$T zZ1#dJar$W~AJHUSw*8~Oma^)rN@u&`jOBc4Ag_SAwvH2H4|AY9ULFj_+K*<66)Hj;8{=#95W#yq-aoyWUPp&OhtnsN~21MMwLos z-i^|XB2gl#lr(A3ZWKj^L`k!xK}wq6_5JN#?Qrfr_rCZ2y!#JlknQ*Tj?c56wbsLC z9oao*(`ri~O8dm+9j|A(c|M=V^;u;$Emrc|vrEJ8x|0Tj`^nVrl!QjbbcGPT{?SLL z1*;YZ-^;9+haTrsaCfqA6=#EA`uzGs#;+c*g||OkA&e`c)EnP7>5Hg1_5z6+aK{8c z)T=B{QAMOsU9J)(%I3ds-iJ{l|KhLJkzlKHJP`MnN7KtVb{9+Wqe?bwuS#fq*cDW1 z`8-Q)HP91sar5H4%V={#aQJVZ5Uk#Pt^ISQ>4!s=YI_Z{w;lYbxeHMVo!l2Bnp+&l z&enbyalq&^bE*4SB-!AMgjCIepZa2_$1rIVBSz?Hjy6lM$Qq%eiHoRp$rX`n;nTaa zHwc@wFT|+g<>U~;6)lzK`g*D=91OCZglp9BTBiXe{M!3Hz4g*@1?P{%Z6?fAq>&No= zI)r)m@!lWGHTcdvW^-2v-r<;&7g9X@v>3lIoO!O0#c9)@?lgm&D>}1ySha8j^CTq? zaB>mG^Dlg_V(*D_68WM<;vDklWK$TCQ^FDNw}GR`CB&wb_+Xule}+Wh_RdY8$Pg@Z zpXu-9h)hK;XndZq3kZ(8f= z*grPa?Bx2uQ6*o#^9QF-zMA3~aq)xSfg!f3gt=PlUVUq!dEn_Vvge{R z3?dGq=WL&++M>{5;c6%G=(5=~qcRP?hoyg#iM`!;yw&wkcE$PqNVbL!n+E%;lM;fv zKh2qN6p!ql+!I?F8S(nT#BiH%)h#V=9T)??g+C=oF#Ch_4~x^5S+qZ;eH>quO6P1W zMEa;1y`!twM1jOVS0;F$9Wy5DK>WViVFT=TjNDe_b8_i|v{J#NleA&fLSiYBh2T$H zT(VoSYzj@}(SGM+U&`kR>?yl@biu3o)en3nxa7_vnlPA6qkr--<|?r3l`VQ%DufwT zeJOMM)lb3CvWjdIb{bvma#n@d>d#ud=;-tnL7Y;Pf+N|H)R7p39pbd9_)5 z5sg1S^R@FcwRxwmwj()Q)>&*(oBw0Z=Pb!Sz6BAiW6aq(bc}36URETb5o~h5$o_fG zN{ed=B1VW7cwZvsntujq-SWFNPwh63%T(KC{qnm{@k?hq7J#tU7|p=Yms!a=kc5-4 zci_0|4sHCaa#6T_p{=EvMDvQTiz1dIj%yJOpSDj?)J&ph7Upp|na3R>Y|Sh15=ED- zwLFoKjf*PDahIMm^I4wn8$n~~sP4^*x_8ue>fUJic>V1ct1NAc^$3(hwOO!*lb%e@ z7FjAJw=`SXOAo{*Sy(p4o*5F59HlK^$rT~N!#mFfaZvYdw8+{SCsQsVRC7;DAoxu& zGlyM@gE&3X#L1#a;EJkD;^R{x{x~*#7r*DPQ1)Nh@hSQrn1bVmC zo%MKfHfyp-Im_*qzJ)a1(#b`>_IA2-jm3&eM?td2-t7M(W{As84&xe@=*d_??73o$ z(0Sk-P7n>eXTtA;&>b;BiKa1$UIUf~;UV>z4>`*PZNgpqiznx2zUO|4&Hqzw8k{g0 zF%lN>-c41e_ms-`Dmcv4-Hxn{YV^bWY>m9uYg38Hdc3`>Z!=5umOfqxRrQH#38uj? z=sRGS?bQBw3{NiU2$ZMiVRngR11EQLy3*sC?d6gJ9yVMaTeEtyG5c;3*B+ZTLuL%k z4!#X?$m{-hNBmGgC|c7ya)w`Dw~AB<3!6)f3Zgh$-|REVgMwg*_K%F2)hMDDd=H$; zhfENz2)yX+=H3J02EYfO#;s%~Ou3ONNjyDddAQOvai{1s+_lR`%Zx;>P-*&OikSqz z&I&f`?|nc&<3W%7fA8e@I`5u}BHC~gx}MSyGY$Ty$I zUmC)nkPL@3%Xtqa!#z?jaCRi=K3`}O&V(+cs+u<=JStv3mHFiO2lL_eDopo6Xvhbd zqaR3q3Zz5T5v8JAH*d<4)@{3?I6d6?CO%alO!2Ch>7|hay#ZmtKkbpe<{?zT9%>0r#BmYteR?N#Ri}Rpm5q_+5 zQH)B`CRB>CRZ`3N(zG`Q{A_iLuFmh%G+GgPR{m7BB~qGmsaf(J8+eMJ+F%#~dR20| z(L00G7C*k(eeB-Z{bk$V@6=KfWb+q)9d>-~F*83!K{lFDX2nG(PYI2ViqZ8_30rX~ zzDYF%5*lcI1=;20rPF5PmIDYZ_K1h&N?X5xi)16ww}}3|wL!ao^+LB*9P)Fz3bE1@ zudWtA_fH>~gl=2cnWyvwn%#Nd>I(fcGBS=7DCI+6y@*i%YtSCe_pd+ym@3)bJFjrNK?QoBXk+|TUz@Od};VSA(vBr;&f*Lev+ z%^QaVd=$QZyM7E+`TSKRujZ}V&N)7|BydzwS7sxk-|t+&6p?7LEKhwht?3t=?VIgd zVG58-dZy-sD1Uhchj?2#y&{@8dw3Jqp-NMU>L@f+tbxCSZ+Q5^ug9fIx{mU5u+eHj zc{xM8k;2GgE_(NFcIS_aa=H=$&lX@bx!7o>whi~lv7@%|EWu)Rz90~+z;UGt%xQUZ z>ZUe>UCXR4#Wp-w));oj{S-i-hNWeyUB_*W{<p|-tuvS zT8eEPEM9iP`(QD2dw%FCw$;q+{+sg65?p?82+1X`nR#HZ38C?(UsF@l?Pt7SRT6?d zCY`I0lRNP2iU3ON<}WtRJk&FnOvQTJ)!$8&Z8=Fam1IUvL?x-+li6vqa#Zr44Oycd zkLSlZW5yz=s(ZZs#zL}nj5B0DeKb{?QFz)nv!Z1_c69m{F45y(P9Ebrp`l^jn~cLY zJI(H15|F&Fm72R4X=L;cE`gai0ho)?&e6iXn#1=HS&VBG7o_+Za~WrBZ{SZWHO!Fz zgeu1>y9WYL$lTUu;zLY0!r=F+BMk{7Kw!#iT5XUYyo3^I?+w{ssRk{kQ zE2Rpih?Gf`W*P)!)-Do5>}q=aNoadsGDmjB5#dPP5WM(a1o@+t9M{h?<{&Bavr8g4 z?kaoRirY{X3+GY&%V<1+CUchsJ)OE6>pC2KkeiN+OD*VDp+=DUWgf{OTp+;D15>^l z`V~K-LXCk7V}e*|OM?0loukLaU}U6c-t)r@NAR9%3v+YxkOvQTLCz%vE1w;zstZPq z8s+8hFWA}H=@k^T#41@67R4b^QA%hd8yX+K=iU4FH3)9}VIigT!Li@IzTUZif-L=v zscg5GrbpRYgC`t`AY)ge+RXV4l zpuf&}*qP5mHo5ZQcMlG=mNh%6hANGAZ$#F( zRS`Uzm*CL@DH(3GLhR31R@?q`Z!jpj8bH0?T!Qi>j;hp*E<&~NI#v22b-O`4wRZOX zni=G-kU5&;t-WnITiu<$B$Ql)F&;xfYh;J}NK#ERS@d9QN3Rgp_bmhEX9Me-CHOu= zfpDBx!QB)lA;y;vDf@_8PQk&!fj@E;$2kbPJvmETaF;dQPunluf5I`e>k6-zc}`0M zF(c830wl(;kQ>GHeGCc*=Srmhh;hv*fZFcT%h>1ww{y?alWzmfJ5N$vk{#?jEWq{o zbppr2Nu$?zkuZ0URsLwR<4*_z)S7RP!9nnJf0;8qG|4eNV`65-_^7?SbGyY428@kE zKwDGiOu-)h1giA*?6_%rWzNcbXAa{uLf9Y`F%+q%??qL8gig4}iE6%9I9=y`NU^Jd z;YbXTCLEfm$iME@svZkHY3ZxlvPGR;vc+VVgon#Rg)H0&LBo7_SbGHqa&7OQKph&L zxTgGq>4B_}`!|be4FH&qGovicO+VES!@w2(v@hZqPn+4ZK)`k?_>K#YB?9{|zh;+8 zQYw}gdQ-n=d}qd3^k&{=$W!UDsfHb_eSJAWx{rv>s_i36~T8 z-iR2utnB}HDIUs?JCs#6GUD2N?6>wicKlKj*HH=hyyk^Z<>4-~7ymAH>9pDZcPaG! zZ67?$6_ey%$_x2d)@{hsspQE&W%>dZhucsJNCv68StGAAf6?6?5r*=@`XCnvBvNQO})=ndhOgrY8juK zB`&vJ@uUYLZbn=*W(X5n^R3B9PJ2^mTa~H6p7JFmZg!5neXSM~O_t%7d31msVTiLr z6;cJU1}l?HAEtUL-})8Tq<5=0WwFlS#~z|~)pyUY@gklKt(OZ=Y(jV98hCzrjU2<4 zi+-m2ByHPnPn;+xy>^+}3emauPbI>wE*JjN!k`x~vu@H5UcC6(rv|dCPwqD597DFv zq-%XA*OvPesIlu zlC$pn(nPf7u7*@M0ixNhUx|)-#E4C)GUfYGWcHy;{O&>+&t2xGk+=-0$J^LPAStP7k^z_gkTYt9@em-u@duMvZA`(<#&Acky;}rXacYr&h zjvLK6e-)rJ9Rkg__$V}ZTMN6<4Ze%1=+DG;R;jy@JRsw9k$ClAXS`ZfcfW#*S+Q4Y=4w7qxhobsLcP&j;0*+JeSChAjiZ%(N6 zbKXR6(H9Bq;b(uY(z1d+HPhvF3{!j@`nr*0P6tMXBNl}|ACt4(IBD-6mmWXxY7pg8 z5R@=fR^G0nNn1pdoWeFy3sVJ~X5m^lZ}HduZL>G6B9Wr#-aK6{Iemg;Vx1PF6r7~} zPH|qr4N^p%L9<8@3|KmK_FGNk%cfQ0&U9T)T=a(gY%>q9S#T=5eOZ0MGwOLR04Yo; z{S#kz2&1pO%rUXrUmLY?l6sRP%F@F9t@#8}_X~blmAB;9EZVnSj+(&m$qQswh@r4I z^!#GkRIQo%A8ntF-(90t%-Y=|981oMBGF=`ZT$OJu;u6~y3XO+Vk=xm54?8H^1!0A zqd#o4#P>}cLMSVgqq0Ml4~f0?Y$IzVX5T6&`c7j!vb7h|*8T@|ow$F77lv@iQ z=K^QE4G_-))i(qMgk6)s(J^_izo$fC?6LE}>@nvHN6juqNo#u73=yE6U?QOUObRM` zD6l+!23@Uy@e{26H_bW=8%JUjI8*Zp4R6fWxS%kzkjE~spp;Da~)`J0xdwYaX5}3V5 zMqt?H2RefR8Q>zVl$vKRcm)~E4xlqTw3Ng}KSxIFPGDP&x(PGCT+U5R8sF6`L4vi6 z`tVcEv6E0AcA{6v0=HRzpn3D8PjkkHW(0o-DL%fJ1^5e`Gc;m|wJ3-=z$hfGDg@Id z{K`tIbK^*w|B_kER zLc>pRQ-`7Q#}t5YA~z*8T^|S@3Xprq*@5ajW3YZh z7-2o@Fya){npuGWKNHiF?W>%J8mK0hX#Dy|6I8DNz(Q3E=bH)+-uuJHNP1w&r*jep z816w_ZDtd*D|!ke{64<0Pg0e+-*l@{I4TS~0NKD-nNOAXv>4&sMW+`MrHM^EoE4+T zcxlOtu+?2%@PBcxWw9o!)RjjqFFaUeM2#35uY}tf0%}P-m9(8D`LqOeE0cLXKys!q z=7k(*NTW4MVz#rh3G{Qh9a&{~>YYU3+MKh#&k5I&1^t#(Fm!S(o(<094pNH>5}d&} z(=ayszdAs|HSh`X7&x*}?g8g(ty>j=P2>q|QY*%G11I302(a#l?$3Qo)u8MRI!1OoE0vA>X7d6$Y};Aj>kW$y52g8L2jw zJMWr2af`I5IyhY@l+kF@3FBno$qrz(`;0ZSX)`OYWa>;`h9J%!5$1m))>&`Ve|8 z4D>pBD7O84c}&eN>4>p!urP)m(2aDCpgwMr$WaaAjeYvoyUyYZ-XQUcuFDmOA0DSa zgH|P0cToS6SXk`(Dw*5WEj}U_=q0#l*OB=o+oXF;ME(}8f|Yuaf6Xqg$8GU*K#F@2 z5bl)1ySwqgu+^NHxho!BEoOcXB{_|V_q(EFO^T@{yD5M&bMdHXWzNm z_X!1L>|CmfO{ylya2W9nwXclaQMyE>yk<`D=3tYh7!u}HaCFk-E#=M&NQBq&DHT;q zch@8lqv?@n6mi?&@u)qDYX$s>En!W%8xa~Dn0R(HHVmu##JrIMb>0!27k;IG(E}-_ z$k<8pwM!Vt^+3SVW*ztn$uZcxf@zlya24$AwL~$02chXaQAkQS%*-3f6u`B*&7neyMZDoGo7l)DF$k>#PdaW9;Nor@?J{ z9$RwJ&jhxeARG`L8RCHh1HBiZGeY_~&Sxt9z?VhfHz}MxvykiFli_4OsSh49 zi?~%;2qty3!6zAwqk)1-q@IqOu$+OLul;si0}xyh3HXw*;m^?5UdFlfzdPYt960t5 z3uz=A2i7_`4XEKUE}yS%@RI0Q$BR-;`?j;@U`aZM480JYTvyGT18%MCpCC?nl}m#og-}rRJwGm?{~$tZt8*rN zIrlTFzt5AX)09)scHb2r?rbxX?o1=;4qMf{(IkgJjvY658hA=nC_2P0TO=$w5%0Xrq@14DQwv2 zOKQRl^cK}00*UXyKpSA79~DdUgVK2A&4!h)CySi69A@xbH57V1L(M_qJe$RNiDF`&WXCCb?OFk0v?ynz=gwXoo~plnH`55KLeCh4_b*7MwX|GiC{?#$TIn7nA(T>djrG zLC_b>r}PDQG;;H)t(L`19mIqIl|iIQ=nyFNVWHpwae`(p<)nR+j#Wo-0zCaJ5FWxF zT{G5r^|#>~$GV7IR*aP~edJe`6pe0{&>>!&s}n5?_VFu_d+OtkO}6kOL5s}43R=j4 z=HWWynR+-uFZaA*hShoVDAHvF3DFolzHn(f9g^v=#Bce}yH(~p)E>m#Aaomk^<@9{ z>Up}PgJiWo^XpEY3+17SBz8G~0JXbNN~ri{>1|{`gR$EJHrhdC4<}LeZF&0#VTgh8 zUjtHRN#V3c(fC=@f=@lW44}u1Y#DO(togi>)R?cfkN*iQeoMe&>cdGVKa7O=ze&3O zq2@?k-7LYZ3Iyk$Qqfr{Jk!5C$Nk@K9k|Wboi=@_GMSz@l${BYP!#mXUv@V=5QXD& z4cEQH4v%6)c~Vv2dX`Cx72Sa`LRpduaDxMGXGd2kgRwIZxhXghA`+w=3_{}UnKA;j zFby-Vf?J^Wk+A3y&$+$BZ$qLSpguYiQOajfbuUd*J;12{+_F@@ha&AEu7V0|oNbJa zGyi{ewEcP`vPAb13SkzUV+hVZmCr1pi9V?atYjcz_kQJ-A%M`u&s_uGxxr5zr6|;4 z1w>ecxOvD={svPT%04T5SyWY9hDOT`RZSLf|g@`{Z+U`vk1;%%oX{?Z&GE8j}hoMEEN(LdT7G$DkQtRzGXhT zvkuYEgvlZ@i#~V^W%wuChSty}#PCIQ2`m5iC^x0oKCJ^I199Ub*`E$FR|-ucC8qh@ zIHOpZzA~X8s)w2-0DKMYzV2aE3o^;y=ACn9w(q*9{Afy2OEm99pcahfLwd4MRD2}z#H>6>maoIiB=R}f+|rr=@wA5;IV&43;x%&3B>T!x);%G zU0Z*2YrElOH+@osMM-C;;c~)poIis(n~Wkfh|zaI4#KrWNt4D9X%c>jTuaMm(2UBv zosLn2tS6S-rTpQ8BsNkl`7Mc!6k*96;)x-&`873CL+hi(`{Ip#vs`E9!)T3v(xgf3 z6Gz9crBqkhTsK*jicA(3P12Q=f5(vWFUyuRI=&tvIFvX-8D)K3nUD&Y&OO%p@1+7b z-D2^Yr=C`t<{=jrm2KWE3B>rUJQ&%PKiymp$<5_97DN_U2CTeu_pZ03MQtt`AJxE| zOz)u~8S(T>7ge|}&;xIJpP~l>H+)yS;U3Cjcv>Px8B{dPp?%ZpzX&Aau=<$SBnr%~ z;A+Yv%q=Vb0Q?}8!2PUR`u)H(aFOA zEsM=xZWqt1%yK3FUi8wtXC8&Mr4tp5-Y_WoHP$DRWC8-8`ya^@a6K}FR|xDG!Yi`1 zeP48|u$-x_tazz}AXPJ&&&B5Y?0{2d`_X;T*U1_{SHiXz= z3>_DPd@OMMSWu*|azyeWJ813zmN3pA2!avc10p0?J|!SYU}fx1nLH|HVL&Vl68hX& z&?X|dVkj$cJ%;v}!^)13mvOwxa1Bq-&XG~i7~tVL zZcYJ&Aq?@bCRf2VJ%n)tlfWt_LCbKIAI$nedNxjPIRa=_u$H7uKad!9)7iK4N}ops zd%QpLFgFJ+Cu_(tLRL4Na;s5q%xkqJqZ>T~gj>L#C>ReE0*8?QaMyvXjbse}GHY8h zpy&8k2lw9<0FZx@XJLFo$Y{?qA|wWG4?%W^Y0hj}&RdF%ut`xvL+j|Yi|9u3t+k}k zLDiw_WstnJb$nkrMH1NSW~e(CnZtZ!U6dxACOKvhTSIcOdVqP070Nu=36qja2pP43 zuF*1gz|CgRZTs?*? z)%vB?+G)WWgmw10&Y4MKCg(q>bXZkGFO@!pY}p9&)mPD{ZLyNn*R!N+x;Yf|d0<5H zRS-%7>m;@Mr}=83L6-lyy+c{{QHPSl>=} z(a|0Da$?kZgq|%LXpb?!`+K7|F`QavSil86u-VdtiVX?D+VrSN_FvkIwf)fuEf<}f zYGBZO&-U44*4-!)ofo zg8cRwCvJ>P>$A0K%@YibjosOALUpXzB=Iv`S0%xi1MX0VGl(ZRz84~%U+O9*r0=>LOg#?ahRptJVh~R?s0`a_Xp;~D%;XnM9kfFQk5W8Z*>b$hiiX&#{4IhtUeBG7*&kpbf%B;n0xX2+J<)bvpas1dEi@^ zHBKTS9(_jY`Ll2aH&%DpqawPky*&@U%KA7CV{{(>X;bMe z7?P5r+1*UV49D;n9%piaQPi_tAd?8FN^UoosPz*Al=TcqVM}c;fZdnF0R>LivG-N}mXGdnsQ)>I5+_colbd{^$NFl-NXkIWUIWn)}`}o_B=;x6%D;w7 z3Du_g4EKLrZLb;@VvM2QtESzSc-{LEZ&n}DdyU-9iebxjCn{Fcl{LP!2Yd9Qxs=C= zx;5pVWQC%UTi*s^?i|tkft)$>rZG`q=Nbl;M>g)6e#^vp5t(vU;*cNqbZM3BB_JD3hc$A#Cq zG3<3XiQZQ1mXD85F4{B+!^xO9{K%6~C@(+Xb$rGoWDsxnmOjrxU$CQ{P+*Ac(X{mK zp-cCiL2LE;zQrV*SpN=L;B@4WJHUY=MP3v(5!I69ohV<4%p`6uM7s;yuWo6#C}?A| zpbF)q|Cap2(59nU0J{&8{OwC&Iy+27UQXb|j7e<#DNvzC9w}Kcj`rj7&E3L|d#5f! z#zfW%dL_@4M6KgbNN*R2Vv>>$E08_CAgguL!#yK~UZVJ}V8|O9>=&8jW_T`v4GvMn!ve-rlAR1y(c!OO6T8RZivqGu(n3@&B!eL@Z zf0awj!@mD~8C8Euz?o1g?C%_J>q`ohhPqebsgYS>3Pg(1L4K|i$yBrQBoaZc&)bQ8 z)UTq70aFZ+q+arDQB=7M+m6S%-6|+vll+-tISkEX?#Z?9K}LKYic6Fj8QqDD^n(ND z_G>n^v+5fg-Tx{(Kj!0kSNF6@{d9kutsXO~T~w|&wnBq1V66U&fBp06~uImZrj1Im*| z{boy@vvdE4(GjNzPDGfIdTqV8fv+;9o$`*?a#`8KZY#;ky!ipihn-E!<1wKzF*{P* z@0sKwlbSWtf{^!r1Z=4s5%{v+heYKSiCphqhd5=$ zR0oxhGbcDXsInZIyQ|^op&cL$4^}5}w^HPXe?U+oU<85HYiHLtOWXkyWqjORC5NTY z7?g|d>nxOu%a3?oTi@6^U^QYn3{1Hr8AG^^KojVuq zm`_dTDzJJ74(zlCniOf)SNYDbrGmeepzB2Qp`M2Fef*IrgP9zwsvmH0j|tI`V}0^I zlL!p!Y5@jUKw(NhWC7rPxFP_ejW+$ z>zkXU4SN=UuaYf>p04Lvd5`-d&7@uH+V3c3*#|*@mWLipAxO&}nv{zI;`0_SKe7Cf z5RNH9rE5M!-4}EC^+LZ7qr zjB(j&B8}i5kN4**j|aQls`sQd-DGCc47*$RLoIDr#P-S-H8c`U~kmJhxpHKqQooQKd1V`F7R)7<*7=Ola z)}Y=f1QIdTM1w{ol)fYZJbLsV8cd~m_Vx8`IeGMxzmr_X5ngu7qo1m@qV_bUb{pG# zFEOmSpTq5qpPqApJ}LfK{SE>s9Z&g?+i{`2NMVUV5B19FLa1+k)eZdN*nG;8shbF0 z%?$sB)k6lZnV-CuenIvik^si9ejXN0ioXyE5%e>Qy{NP?c*nz0z#u+yJ4yIIe!PJs zh2)rnp`5;{sThT5Mo;QL9%F>2g^k`s%GC*C2$9&`MD!^6RWol}HM9^4v`he_%Oo7{VCrH))YN|KJR_Ji9h;rP3;tG z^%F{3#jnB6K4#xoOGmjQNapKgTA4YH6kte$iq^+t4ExzXDpxOF73sH#Q~cPo8uPr2 za}#Q615Erp?7!$OSfX?k3(B(}cPpQrY_Rgf1@2oM(zHpxo!m&xWU?o3M!haCUsAZX zBan#PcW7ye-rW=BmuOM9P@UY}Zu=Q1VE*Nf3+zs@w;8l+3r<0D=dEVF^T^%H@DLnQ zjN+cP33EnBOgU4jT!pb5xU_#igc88CbwtULj> zABDQ2*x1mKwivpm*~KywMmBFoA^K3jm^ddo6~$AFY5f6G{)t(@F+R_taIGK-7to_z zC8NT$=*RmO55Z(B=;rd!!?0#~P^TNmj2TxR_4T;<%;hT~(Q)~YY;i0Zq5gM^7cZvp zeRuH>LZm!_d$^{YsDC)8lYGpf^AOuX_Z^HH+;ed@d7Ha?w%%^HWQypX9^?{@ZkSUU zc+B~hud*>bo-8;N;KH&#>4>=x4Z>gFG7!ele5e|A;tJotp^N16)8SFO0d( zVCMf?CPf9E;HNh|)-B}a+4SK&t^B;BRLM$c&rr6wN&>9NN3HVSGiBqNfshb1A)wEm z^cP2u>PNf*tB!i*c?$;g^^#Fl$Rh?jd5mJ7nWn@(aFSl~93-xE$)%*myUK7KQX>|n z^MCBDxJ6=P;0(Tf<<5kq)y*jKQ9v})gNYK#v^czCU4p~*#+GM9?R4_~(c_%PLL*3I zkmjY??V9#ug4+H4=SYK25;rJhR_q^Xtyf0+gFdJ-g=l;kl+T^y$8dJeNbc?F7qWoL zJP~KMz?88eYo?PaPr+gx&R+PZ$t#ndR%JTPSY7ZJEhhZdzz6I2mRXtDvE@ZXsO_x{ za9<}@ft9EUvWs=GZhlS$%GM+OV(`4cEhn}u=3)bYd_lv${dEkY0#Z0`vQhp76KQ{* zz7|?t&m$=85@94@fG*nRFb;DqZg5X>sC_!i2HoyzfGtyjSKU?X9S$jIEUJ+DJuWKm z{G>U{-jUSFZ9I^P-`#u@=hhyZaS5mxEcuGMsx=AiOziO!j1XZY4rQ)`#CYe9m)bAI zIiA4$3)vWn21#XCtII6G>u$3@maj9MAAaKZs!2^yh1VAivXF6{_R`^1AFC=Ur|lt| zlaq@GiJx;SvQ9ePRF)?|!tBN(wHP@PKBL>i(aqMxaCV)dn3+WdyL6?Zz2% zQswbA=mQuFT}<%bSjY*rE%swDB`5?M7zz$I9)y7$TPk@|i_aWJy1*{=5Z+vrrgJGbtwAchrbu00?undEKJsD>(%BHo3A}#VZg(SOL-nR~~b#gaR zO0IQ-fg)4FPinM(SLXxa>lX<>c&Y!^jR?5q?^xBS3L2NpW1H+qdm4`W3M+#J%A-iM z`rCDNgGoLUYP5*cwaT%%D=R<^hn`pXSBE2q1B%;GxhqHl`Yne9oDd__R@01 z$>w{4sQ%Mkx)ejYVr{xWsv4A7A`K?&c{tw@3ez4arlEgj8VxbvRcFH0Z|FNUb=WQK z(_ZFVH+dSkeV ztvIz~YQ6EhNJCzq28P-T?p5Nt^N*P(2%?6tV#vLRu<$|4X#G`R&(A10GEEF%m^?@s zCRiz>cvlEEA+W@O6NN!dN#n_qbs_$Pco^nKpW^*LuLX#(HOkk=$EhsxyT<~pDDnEZ zQ-53&Bc=TlxyQ+ZPz|dUQygZ6GhD#YVj*$&f9zeyG-w}&hcFbh*SC`-O;8TFw4ohI zc*SIZ!%CrA&^>Z7p=g=j-V8EJ?NhWII2K)p-wPb%`2EC*8FVql6@-(AlyZrB90_v$ zninC&kdN{YSj{XUauV|dQbHsf_eZzNkicaeD}XhEp;tI6#jR9i)Y#a#OhV#zTX$z~ zUq~@}*+elH1mrJYQLOy8iciDxqfGzoYSqM=buFQ zjWLnYm1#2%e=i=26*jR`{U1)=ukun5-D-Q()KnR@=mkg&?#DEDP$%bi01X-?9b`>M zIFsrv$mmI-s$3rq{oafYeG*{QNSW}%GPurhU!eCw7~9SmJ?KgoQXOs=_w4NIzjdY? zax@smD>p#QkL{p~*ypQTD$n$T8cV#JN+XapN0&6VJ}y{kUVC+qm&1j!ni-&@!cLJt zI(A(ReF4}~vm9kpByt=x&&Dyg0#3zTg1C-2_>Htf*dPDiKrGLDPDgWj)S@ZH@7^tZ z_Uzf#sKRS&_vH<}I+cd#DzZrIS-{X?{x5{@}tqn&5Q4xR=rFa8TqQKMu zl1_j%gO+vr!I zT;WA4jTR2Rtk?|_qv&gb64z7g*t;jgXye9>?_{->r2e=8kVN3oD&+im&z`+bGF%~y zwi=`={Jp6xprwnLY^gmwa@nijy}z-%Z+k{X<`|Ds<9JeOoyCX~R5 zGR}-73xr_`?|3N|Dj+b&Aw?jJaH@2NRMAW^{Q0WsT_MwG`Dq6ukB6&%nlmO8ymJOQ+O;IvdAl zz-I}P1j@Psnr~+m3NKpZgC-=&uu}8?egIRivjRHQpS@;Ki9gN4aKW->%Db0BeEPR- ziGHz``L}xN0p<2EUlxWqQ?0$!qHtrMg#=fxzk_r~VqyKYX?(mQW1{!i-%na%%m&CJXeZ;oH7_dyFo9^quf(|%g`~zN!7CBx81&Xt{xTNYj zl89GPa(QngEXa6NI`>iIGMA5`OX@AMnq^%d2gm48P|zS)r*xo$wl|2yDRmN3h}(^q z0BPlitPw}UpWV=~K_X*5Av0Mg6zS3iG~n<88<@#QN7{@IPpk>%zfzGm&l!^`c}J#v z??G?aso;M4xy8#+&cUf$h_2u1Z!L)-9-8iKl;hQv@Qe*k08nt`nTu1I7>COm^J*tp z)f(!)_+$Y%`U%0M6Z`wRLvP>y6c|+g@+BXsLHha4IAmfXZOGMhdV=Uh$F49(FAOB3 zdwRj+gCx|P!RS1I`~$q1y<;cgmcz;NH)<`0a{YhgAD5d%^>`pNMSapoP-JGC`cQu_ zE#xDw5~@q*oFDTA879}d6B2X0H-(m#YEWi~;%S}kq<5~T(uvAxo}58b6~lJ+eK+Y> z#qOY-%82K3<+0V%MJWuO2W!;elbGQE?{kokge~&7EnNPo0v>_iQ$kH*W2Gj2>Q))n ztfoqLqhB^F9HPhACCL|fq`5l_>1TA{54lV)+wI6Umt`m+nk8x!y?;Mv<|4wq1m{#* zA%9aCpk9ixjvC{Gz}1%W#LY@pTM&ryw?gS9l9G~@=85`n!F{soR#*QwLOTfA>Kzg$ zXh+j(v`Vh7s|#q2v$L}^Fg1Pd6E!LB>4~{cE$Y$iS`#=kdjH_OCBB#AQTChke#Sk(DqWY|GZN^)oLLjHX^2Z znWnUi+hg;(t`{CxTH|2Wt2yqkKX08ldfTA3VLchC7WMQgO_Qfo1daP~5$<&gOHx*} zmhaAnqwNRVhQ%-*3^%0zbwBFde3Sx-vr^!VWm79Gbp$X$q-&UT4xn+=D#$!zkWRzc zf2!65)nz?>*O##gk^%ARm7MP_QF_^0+f`2c_~N~rV~R6qL!L_uF+Hv7(HDb@Wf(~4 z*#2$NUKRb@l;_f>RmABCjBB1e3+)}qpx1T9%CLr55&^}h&dE8Xaxn3D-VEt!5&ECv z;PE2jL3bvPB-b%!HwYQ4qc2$CIbM<}h`xa5RGTbPIg~0~oJ)r5(W6I43OT1y6H?n4 zKds-uW7hHm2erIP-DWMXZmFP~@i?LvbxibuHe&Hs)8AL#FG6FRyR6X;^u0X7`HL?# zcO?+(vrm&RW3G^J_4RZjR13XvBcr9b(5K8;ywOPM^0JK!wqMbIS4>-dAeBuHgEB5^ z)WnLu5mWmvpPW3&L;LOpy0eo8_?l9|WxVX?N|sYrfb-a$ur=n^jc=Pnx^N7(7J4*e z5nlTFeaADPCqcM5wIT>J(59Zt@CAHQY0CSEErS0=?|k%AHGMQAg0DSjI@8hNu{axn z-rGA=#-6}=wtI%J`Yf@rW(C~^HEE=989$i7YG$!bVny`xuh18fxW{yEQ$9!LDN zKM44x#JQZBj$5&@%PX}1S|}|1LVAJo+r{u*$DvKGCxgO+lzDUNiA2k|9@IEw#{Y|5y zMm4~RmFZbr&{wuhYr>ll7}8-WD7_|w?#6w2U4&CH!CR=$daDqJDJyry7@~8S&X?7= zBQiXa%Z#5Xcqu#-D6t9*nyGbsLlZD_AG3tC&VV#e%ANE?>()e|%ETL;R8K+3@K}d;OoleUk=G z-kr%78BY)gd|~a`^f+l(4l)r~6HyIQBEP(GFXB1)B4NrxW@|dZ$=uP4>@KuQa78R3 zs5NmY`bjSfP34f~Fg(3hBkh8`d29Ri6sWrDiP5)kWwDLEvYE)DFTdMx5bhGPj zjITjodf(L4)U(A`6~iLbf+e}UK)Vl&q!rZ^_8kPy|-@{rv@ zN=;(X9MZq?O^)X~(m1k8y66d-? z_gQ^#;2&}FHIsG>Tg5?E6eD>e&F_967Q*?Ma))F9P#M8mq%q8IS$RMh2=|34b6E6r zJy&Fli$D>Mz=zJt3Pk9oiQk6GVQ^AUej}ci7gvh}yD zV`B$Su$Q=)csa6dS``qx{wO8ql&9pJgpdw-e-gmKz|iHHX-4iu^NE(0XsbaweXe8e z^m&GAw!_rBZUanBtmmT63|P;xzET^7`Ji&3o8gg$O2`#|e*Ys18#bJ(6}U+!2tY`& zi>qr*YIoDl?TPEI%Yjc40BH$4TjNqlp` zPk6%8+ie&p4rk&J-~(X#Z#)7Vu6-J8O)%io0=+scUW?&l*=ZfMdnWDI41_U(s74S9 zj^hvil+mL{hupuv6fel^k1~(w5CqwBd%F7Fu?uKyx*#&`Lc6oF!~-=-S~S$71&w-wi=@U-Qm^A0rEOLT-+D` zW(K`RJw`Ngq$FnJw=2h`d+U|FwUT>U^V=uQ5`f`;Z%FZiZI|XLhQ0J@UUR4@gc60J zzsSziC3qzrVy?_IxMlT=8!9FLD|@99$1PLb>W?0R$pzKJ6*H)X2$^Zj5AQw>*2?jB zVtdH+uFmFWC1-OIg6_qYK|eADfqgnVIYHWn{RVU;tSbe?yw;v?g@#Apr`mT{zuu$m z{$7CB%iEg|^ns0U9;)p|%2#B>?UD1K*)B3_I|1EC=n=5^|Er_si?^`F!jmvRdW<_A zcFja~U18d$-8I`K=5=u~u~LX@{@m2$2kYwFLV-?lGG=?UY(f*tj2n|9BBP>~)W^X& z{$*-Z*|jc@HNncc51I+r*licgo(M#W`D5~dffus))=>plrr|Jt26X%31=k0IjUZ+I zc_T>Hc0h<;B%~q^ZbRFdD=jO%`yUOm&2?5#@(`UP`=Aabf^E3K+24JNaK@y6Ic zOaOFvLp72FQlR$$KWaW>J!WD(s*?3cdQN_x>iHGUB(rciss|-#LP}-);Y^4Qc>BMp zFSF3UhS?iKaCHMDfl7S_Axb`PcgNv?@hj11joHuSKvChWBuBV$$V1%j^|?>QwB%1S z$Ipt>{00MEH!ccAcRNa2du>B9CdbcyYK}`L$P*>;CP5K<@5^*`KkHY}M$3p+->4HC z!Hk(Xoi>L%?3-ab0@bwV49114IB zjCY~;%b*j?Q?L#4DJ~hKoeN7rcoCy%=KrG3T`hIVKi$FY}wNka@n&x zXABaD;EoIYy`_OS#_f>mesiSXYG9v?*ha-kyMaLTH8uB%vg^%RCdD3aR4m$ATDMLR zJly3+bG}yPQ0+3lyo(d5Xb)bS@3+_+q0~z&w*#{C)8$}%PRjhZPLu<$7f7i+wmLK- z;;9cM*K^aavn{d$8zE(|tt>XtWSAfRY8?nBE@^NhhwTylTY=PXIA*keXX0?jVeeF` zYahikYjxOVq7W`dnJon!$j;z{OT}9_J+kAn`Gp+Z1ny>pD zapF7?(_9AE8`4mUln~H)?NtX?YR-Y!tu{7^r8ad}pQLu|wLlIO_Nb3E2J7tj(%9%L z-BU4nYm_$nob8YQmNOrJq1(nGHt~=RYnCn-|3x*5x#3Cq5e$(0-=SBSGK0jLsNt6@eM|FGzv zWTrCMA_$~Ps_F4p3Cboe(G|Ja^7XP<^P!zjC3k**?^yoYke)AHfNe9z>kyk`gx^NgPEYcB+5#B+9Jf zSC?9r`oixzmj3Ej%WqvI0Mkn}O9y-70&Th)Cp^slWNF@GYD`j2zHDF$A|@&Aep26_ zE(f*g#{Q$S!4KbF+((MpeI053<^6pfb-niI9m^_~Ch7A;LGahzoF?5{KII08eYrO_ zx>n~225sw-Bt7d0@qZbxH@DP1PzI3dE{NiJ{rgL*^Sl{Uu=InTTdA$(jV>YZhJqKU zFr$4d?4#w*A|VG4AhNuF)eHX@xkH(ykA`K`!^_HH=w}9>G=?jiE4I<`nGql@I1zU8 zRb+Tclf}1mD7+_~Qn8($Aaed~(;(pYKes<13Bn;}*_W~4MvVB_^EMWR=#v%P4nK5> z45;$7DX-y3W7(Q8g;O_J5N`&2~<*w>VIUdZkrwJH9+Nt!0w;CN|G zJ|{!kwBD2$kr}y9y$?j-gMpLeVQAqEk7_TaL9jeP%snz?VuxFW1*E69<1vN*> zY3uzcp4D}H`*!yT?KG_~R>uC}KK}msi{neZS}g$mQLVKs8{TY1sH(2rPpHDwS)O9o z0mzW*Ds*niiTU@Us5oa6GbAT%ab8(z5U)+^8^T$ynSL4!4^5{02ByHor*sJ^knY>j zTQZVQ&G9;%CMZrP0R;xg3{F9k>b}p^uCHPms?haEsx_C7geU?{-SaJ|#%KVI zg{2Ka^TDyOloEWje}3KiCAQF|yxF7sQD2_}B;0$dzvl4x7ot;*IiwG{?@hwio%nog zb?_q#-|Mz@zx;``0EJ0c9P*@V1!q*eFh?G?kePmK?gtDah~Yc71lkzX-;f5oKu-SY(0_L{bj zt}%71kaF4YH~sa3d>76GYK#^J>91C7vmP1(Q<~z?fdudF)_>ULsOEi-IJxAi49sHVcYg8q;RcG ztG*OihF?FPsT-YN_w@3#d-v`wMJeJ_LxU?wxk{b=qni?{E*!6#Q+{a$SgcRSTZn#k zd$^ltl^cC*jj4~kDw}oNoFJz@LF-vh@9fC2lAdyCQq>BrA6E`)TWlV+!|U9`xP3RC zJMNO?suQB=ac#}7NJ@Jk+^V2;Ra1G#Zcd4n6>-A8^k*sa-kE&Z;q0V;-M+o{;NBnd zczu&XWiG!x$vpE*~3Xl*fk)$V*h=cZ?#zgo9)Wy%+x%lvM}*-X=_b>RaD>S zs7f$d+|iOTu%qMZI{VdZJnI5K8NMkl&f)2K%#+#GzK#QE!O^>`HumpaGpTCI)F&03 zO0DJzddcey*53^e-NP1Ntr_GJn=yE5Ini=2U74a+&)-a?Nn0Da)R^WK+Ff>dQX!tJtNk?Uiyk;8&}qj0{*5#Pxo^z!?|gF=b>G;~bVvUPV-Qzi z5O0sq_g~^6-RUjuOwO;bZ}mK~syw8*%e&sh--t4)99fZv`bbV%UvdBHzGrEj;$KD5 zT18%zmIj*d*D9F4M`J2fc^;^R(7!gCI40kaHu_qb^_8cO?&zf5SEC*I39geT51W1d z_+b~#Y__`pZr%m6ZW|f9Z;UTJ%1Kjs@_#sc6L2i^_I>=B>1}#vDov%Rlo?UBLK{jn z6UkOtA`wZJls%y=HBGiGB|^5cg^I}*Wofcyi4-FeAw(e|JOA@@-;brHnfLqqJC5)7 zc;9&&J*h(FZ6PQV0b zo#$KMb3+ruS;}vvPV029U4Or&>v5H6L1TG#VG z4xHosoCO8s8Q8T388v0JgKX)21SPm}YWzWzO6(R^!`5Kp(c z{6G51m$Yc(MI@e%pWuxNni{zbpJ{s$0X^L~K zE?!5UR@)|b*QPw%Gy%6;Dsk%tOb)4&pqG%bdFd;wdsbq*ELX|IMYSns9U91gHegnQ z2iuNYdAh~oD(&7ZT*3#LrQz=`E-nWioHE5)=PkrmXApiOg@g42*=&0yhmMXqWB7y( z{h~W!cp=7 zrz~FAzPO^d|4h@8?(vi4Ms=^&dcL9{?pETCUz5lKBJVzmV|~tG(%TwKBSD;Wa-ZrG z&X8|jow2^k;pokwkMAP;YrPQnl+DCBlJ`BbN)JnP+*HOc;-Vcc2-;?AMl__BSQxEj3!BH-Wk^3xX{I|X z%XNMARGyto>(2N-QOPFIno7Jslf>&{ta-Osm}RwJuCm}U@w&$H9HAes%$v0_In>Fp zn_J_iWU07w%dlGS<(2f=US!~xoAu#UqO?=HM`?s_C}Qq9r;+gdxbtTshkQj1*nBHP zI=9njr*N>C&o%gc|M?F8w-P3=k4kG_I<#BmO3A%EI!cfmi1#nTJnz@lUY(>BQ!yIi zEfy}~MmM>0LzGsA*39PKj|clim?fY5!oL3V{8vu|_TuOzyUTX$>3S!hinn8DPqL%S zUk3Q2w@MMyZgRI+jP338ATa=cZ&mNxLvdN*2de81;t2i}V6MU<`+bWAtHSA3RlOhn zny2a;Q!kqqbAZK2Q+c!2bV*9FtQ>NRYgLuVK}! zS91y;ET5ZD|KmCVb(w~;kx1u><}3Bbb=vL+=2$sqemI}wm7y_iBJ+{@_KIw?sU2ba zBALeq;ez#i;e+J4^6@%dxs|FbTTf)2()Vb8tnxBmsbZpXPp+6WvQoj7OpdM>34514 zjW=oc&s}F8R#qToe2pPKyif zqAt#kPx$<|L#)z^T*c!IE|h ziVWqK%x@NUA&)PLqbqOmg8aL$7991vl(N$CeRf}QW37-kica!LaT4-{St8jBZm2!8 zO3&4O!G7VK>asOFN;}6K7wZIf2pi#y_`B`?ZfU(mFn%Ai;&i=d>Xz(SZV>3V2`Bw@ z(?wN5*W+5fb1IUK?C)4sR6y4Wq~Z*b6vX5=E8%xe}7 zLq*{teyeUW_prZ+Q_pa?sK=p$ZR9Ej{v_x4Q#r@xXyBzyNILwk`lNF4-_Igi^lr|6 z!C%DdGDS1D51*V#; z>s$B*Vm|Q(7vv;w3Sa5^o_wn3(R%mEG&O@!v-RZ};VQillzZ}d+_N$quIO>d^SVYJ zh_Fh3zBZ!m-9Vl|{a8B{-4O~zJykVy&;5QB-zb_KbUATtryhff3mj`gHt&-K#kpF3O+Y^Zu1&dw<;3 zi)nXi{j7dXdWM_ME;hQl#e%vW8fErsPVGyUMRVKmAysNUh+gzPuJscwE{z66R5o#+ zc$$!>q5OeVA<%|tb1Yx+RGRfW5*6bfD0k!#z?rFv`Mmtbt1CA{>` zgS8jml5cK}gH3wmiKN}3JGC0--YypsH&HzftHME+fn?Ja5l-X2 zq$tA4P_i?{P$H_KJKug^^ys%+q9{@JyI4|j1}TQcdE8Ms@Mz%0pw7OHXVzwH?bj=VK|>;3blYGoA0S}aV>k?5{zT^2oJzFOt#-)EX*$dfmA3G_cJslXqZub z8&Uy|vT*iO`Ekxe&r=tl8p>hryMM1`Pu@%5Sr!U_g@Ekd&y#FpiA!YjXf z(2nXCytk=x6cj~=`MgJ!E<&AQZReEH@pmd*9hQcq}{#(;V&h5U%kE{sPq zFgg)iz1NEsH%Gxwx#vXLW;3cz@+m+%=*IxUe+Hj zm$bx;vEgf;P}?IOqF1KL|EI7K?qX2>>O;u;Xwj$&X|(^m$+*~v*QpK*AQRu%+Jbre>)VxGvVJ=}b* z+j{bLTAmn~wTy?`RF?CB5MtP;i;ZAU({W}!io0X4A3tBLn{qog=74&hF@6g(y=Xnt zq2ErjJNZ7pkBtlznvb`d7r`nE_es5@Gn6UWFy}hR$ z5jKH}M2B3u{Xa9Km6%QxhxcS=B(3@z(QY4{n|X_UV`9AbAT}aji?k{G{c$_C6(3Ug zD>*Rkk4M~XmpArglR&6x5{UEk0ZM{G2k;h9t;A@pW+Efx@R2X{q?bQbx73B{ixF{j+qohPP5!zak==0ZcQIhj}dFA~q zSJ}+Slc-Y7aoVU*oq;!SJ%aFl=$DVkLF|TB$hAZq~)m?Ur zJz;P;{<&)P)BA$8>gK@t6p97u-nDw7GU4sZu^{8@{);uUc{Y=(Pe;72Zf?YwA!lZS zI!=>p?jw!m1$q?Xvd(p+CT+g5l(QT;by2xusvND_IB83E?_Ro8JL~6z>bFtiq2D7J z)^@PB`{E<++=rznPz7!)s)`FBRjMRen$h7 z<03acI2$ETf0YA>hDKpy*#Jb2b+lN_op{on-?M!IzxyqfwF5TM2U!AiGc&*CEBehq z#?N9^z8DhSb^;cd-I12waH&L;JdIvn51#El9Ye$0dpvBNsdjAO@*PztDa&7Js36&m zyxFRolKsybU%jh8c_DcUO2_aN08B#t za}H%rq+X){ekZ9A5OtGcV^4>6o-uoLoPGeQx6M%J?t6f#%kcb((eMfDsCi}wfEoRI z<3~99LJhpcM{?MB6hk#>Bt)O}>yyabj2K#Q6`)N9-srH_i;FYodSIVccMYFh8|;;uu+AsDVfj`N zc;uk3UKzqC$qfJzP`UuiIYKucp^N5Or`#+r1`oL0lSQk(@#60L-re@3uMuani9xD)!zLt3y+!uD2*Zo_ zO0a^bxTO5+vr~O{N1h3|9?uc0{mU#4-{`4R?^5KI2;?2Aj@Q%2-5`BnbjI!2X%G~3 z1=X@@wkgkS6G7In(!ekpL&9fwtqX1y?rKAJIUCUiDDAQiS>Uggx0?oafQ zGOdb1=jZ=vvn@L!t{z7LO1&~+BmWi4Lq4dWdOznLo?CjgIR}F+flwEjnI2eG#4%i=lxtr#fe?K#akfZU5eDHr^p6 z7YcKw?yS zhS~ylSP)H}VlXBJ^h&F7%dIlBM^PSIRaTEYh1zk)Qe0C08sRpCTf@aZm7#Uj{vE=5 zfYTC$2>=H=^{dD&m!mimCgKiR;((y(il5ma(#S$${fX%tU!G>_c4IvIpSnXdYV!y* z(+1G*F{u!gZiWY)&^wE2H!q0`1wFYUpXD|@-Loo$r`iJif7w7!_((>0$rc3b9bQ1` z;+P>b;MM!mQaWG??`gh}=#HbH2L8}k>p{LSi;#$CEb**c3{)xtt^@;NcO_xx2?&xxM~U!qqY%(`*VAZ(%Z)^7KGzjuAJzIKPHVXE z$*F={dX9K;!X3#+005I3-a5I zJ{h8h=Pe#OUgw&88)upkxQ-4qFza!uD-3t8pYtc8 zCqG|+xtuNgO_b4mMJ@oQC?HIDZI!?@?nHsq31^tl1cJq)?}0UDHFj2@T`%SGO=v!| zihwI<*ZJ0R3jjGJ#{-Y@e<6oxCk`Mc8fYq;%3EtHe-0B`tdhB#R2Dfr<4vy>a?~h)*^3NiVV?jk=`|yGi5;9WBL0sQ}mTwVm`qfyDu0D4{NRz{Izr>?M z;L@cN$cBE*V+7)f{4eI!SpY$er;7=a#rPDH9Ij`Muz)8yDO_h7U8 zd;};#K3ie$0QXI-DC!wZ)g4JarUTnhJuE=N(Pt zq=}c{rqaeSk+w(GYP=$NL~1&OX}X^F3StO-;P)BYfL8{~Cx)a7FLe@kryxQMBWPun zqEac+F;neObD^=Y!xR>VViMp%w6A~e%CUmAO?ma5HPYNRLIZjoaV*wpi5Zc&!oR+( zUFTwH=T5RK>YloHu6kW2v;ssrH)M#yED+R~1qn!4rBB1Z^vheVs;aCAICOdSd>$i3dz}_7 zkQ_Xm*&Es&Qg9M~H1!795!Jn2Nv76UuhGERM)_T}Gk`Gy^}nHjvErP?@dv@&D%AJK ziJ|ye9-yP_eqHT=-s_{l5i%T95ic*?N3re43+Su~^o|B_ck$i`^l^6wS8|pkkF?{Q zL-LU!mmu;ST_3Ul)%A7W7p*lP-hJv9IVDuQR3ME#{Ujr{H{S}%p%8S&Dn;dy!{K#H zdNmTt*Q|*{sjc@pe};ysSjgUn{0a#2CP}ZX`lt8b*S#y87K7L&f*nYIz0P)5?r$1u z{OeJJ$bf=ATst=2J>m4eE@LCaFB}UOW@xWRi96aJK7LFa0nb@UG#L6R&%C`DVnuci9Jq>rDx!La;bsDtCEs zZ4ywa|M9`)Wkg)F%?#8X**Bx$Dk;Lwt=_xW!O{Z|eM0@E(~?OdAS6m>B(mPvKbv0x z!Pq}G!WrIoA0xYvH%6nNZZ6qvRRImkO68k-NO;$= zdA^dtXe|%t#C@4)Gd3|A0j`@y17A7{@R*XDWQkIlCFUxfEJFM zPS;Ez-g;HOo$q?)CAr_Kj4$ptD4s7h`@Sn!mC6hMi`r(+jQ&_r1 z)qaiq@Ufb38GZfc$NG4~-mA7RH+LR+*{q;D{A^PUDyTrmuW(z6qa225+Sxp>KrIcU zuwFrUgQ@o8|FMl5Slp&v83cC7S2t6eYP^m}pu7O@Mr3jN;yw{jH>L7DF_8Hj&7`nc zCYm|*m53G~ANvp63Ce?xxjqG+%@qv|oX&%;&g@WnjWlsi^l;u3=fOGehlpri^F9KM zgEk12T)2I~%QVe95D{HFBCfp?#a|lf8c2Yi?gfZq0F)@9DNYUtWnIw@tracEYJ(tPf25#5lS~|9 zo(Dnzl>o(kfCxM$z(B`aW&*KZg(kUj1G=A5Uh~t>gPTfp$E~z&s`l&4H{oKEL5cdY zF5ANUHt(818XP4{e|Cm!H7Hq(PEReB?q~3A34(!sCh%nP#*HtJ2CH15NFH_4R?x}_ z{27#jM50zz=_=lqs2#4fNu~O9=PnvGaNj_+6FP)FUKw&b@M_R|ux}uMI<9Gvk{Oki zU5Ko5DUiG?T}cs>jUI}|e{1wmmH)MwNFPmJnveZu?lod38P3_;Zy~2GowooBwqD#b z*Oiv8tge6U)faPHSwG9ivb;Qwq=Muz}Q81q~)R)JgK z;@Tn!yZ7Ap0zif*0f!cY$lNue#94&Csr{vW4&+fNZ3yYf>>VeeI>2T$RQK!7cS*)t zSvBapD0PBgZogEj!0;*OHP_6?j*uu@9%+j%NOa&gqO7B&S6>2HR!jG1p-D+WXydWLD?d# z#p>d+W?WeNkR!RP#vN}xAyU%+U^l?c8B*eP;#ADm8N_Afl?M(VPX9wzpQgM1k6E3N zyiC!?-?O^h8O?=N$OeFklm*-E)lkJ&I6aq5nk#QY*nJc65CfU2k|ZtvNhSv|0LSt88?7Re(hJfF{MG`%{1z(I=;O;OPC>6TD zi4gj!BbP&If{8y@3?;6j_)5(pA)Qso2rDQyVyfYD?>?dNqeis1xV5ztoDZY8pkOGBF0MI45Eeey5}|BCU~0=Qnq_PwLaux7bZE^5bxH*P zS=g9-JNC4ux-?if&qBN_eJFMCRs})ACEbqQGKG>xJ;H&!eCwf+BEJn}*RiZ5E&cbx zZ(cqK!osTDs0AS@+NAq2=KZg}*C2yNEf{wk^oAm#HBOs>vsS04Yp)SG zgD6t)oLvZMxn#H8B$c#bO02=8l3G%xsHFMUW$&~DOtvHu`B?>070n&G|K4J;U3bJd zL_9^8u=Z3EUMCR0^6C4bBUH zCRdEuPBa>nN4iLrKLzOU7E0mY%O}re|7|ptawI&d3uq*SbP=@UAuk}T;qclejt`_3 zKe^{5Le$!A6%PPJQ{uG~0=B6px>@4Omu5ek79tXead=396oZr+AeEbv0+I)S*mO;g z!^2;d#kRGkTNXk@Br-3Um=OLf*f+q4fld|}33>`q) z2_p)@Tsd#5NP!K#)nXlmO{GER6ukbG?291z_Cni|yeGd-BYQJM7fv&x*9kvb<>;Tc zKkmxNeU)`M$2vM-h;AT&rQE5%&EXy3r->+cbf)@^`g(*oIylWe5=x1nI96qmIH)ut z3(|2vAq->ZQw2S~%-MDgh+U3yW=H%Tl|(^9ILv;}qxw2+TT%K@I*`5K2Ujm}d}|R(Y@aFvt1A(QGsi1>u~?bboCd-5@HlJ32$$3>4P8qi^+R=x7#_6wpjk8CGjRA&+)KxE8o1MJ)z+eO*zUnEs^7#n>ez=P(-dAKtIu|plSvA+AO!B)ZR*0qcrPdf#)LMm@kze z$024m!^@-yAX?h^cO|yhI>M=w3hshv-BP8MlL-)cjlcQ@5f=NkCPh#JM@}Me1XVYA z*uL|zCA=|>`J#sLwl48F#qYYBlGA)Fi*(3df{}+P%D{ke{X5ir`aN1wTR!&J37AWC z|KW!;(9jtkaNSGRlJazoZf24)& z(DI9ay}DAI6}So@pz<#9y15Nw9t(8JRa7CNg;xB5MN~XfD^5T;#PKh*;x6B<&Ga)4 zv?l86edn&L9bDhA$W>}2tk1=gwM;_b?V?XoT;3{!T_<&x>k&)ToL96=z zG5O&Pucx(yjLjz3M|RBPs1)_zJJskMul9R z)ElOvXSPXD*u0|V7=4!_@j9Tfm0s4SY{b$@3Lr{)X@imkgbta}Aj;91eda8{fmKRq zxX91p;Y}&)L4+RwI46t81`Sx~%lC@=JD{lqech__W2flLVoEm&y~re8ZuD|X-acnt zBL1k!Zlei@xX5IpYnVJ8RI8}2%GnwMRrQvN-J$gVKfZH}hK(&RyseRBMJfShxeBTgRXpo7x zGxhd?XmG1%uicz_*8h_S6K;p*D~-b8L}*DKHDslc03sdu&(*J03ApqekXBV+h8detKue(Bp!ysR!f7$9(pleueY2kA3;= zcvgF_RRf-9OuUs0Y#P(^oUHbg}5Q^NpP!3IwXBS`Pk*zNDor!KuAI7%V` z!(zm>wSXL|RDpzauI$kkp}j+dF=13i;X`dKchYSyx&3)nK97=#v&#li;~@kDoYJ(n z5+Kmkk_iO9`KSa~SeaDVM3o}_RT`9o?+l3 z#&t535@<^6PuU2Bf7uwSFBCb?z078zcS0*`Pe2 zp1uK?O}TxlecseuH<@+Ch|Rp_4~tMNuLfeAQSIX{x@}|f-xQIu#XwpKdxfT+s0*%S zLIgg|vIdhuw0pfy-D?+-&`+cTMP-phz7=d2)kRMZ;7#M`lQ{sAs6H|4NXU^9MZaOB z{)$#-V=?TqpA33640;h2D|E<9*#7idZ!WOjQlR>{?nrHVNBpeB67bprp8JvpAq?z=n4sGFbw*CXJ4VDL#jK}PPw3WxU0rXFq&FtY2H=Z&DLhE&#MX^!*>Q>CdXnoLz-1`(8prC{qp|JlJIG)Mndc zKJm$1zpX5^hjXIY*^>;1-_^@?p*$E&VxX=fIcZx=l-Y}W*wL=$jENn_!;{4=TF(HY zVMte=8q$TZQ|V))K)l9un?nigFroEX>PPpFlRK%^-vvZASzfM~Ihi#xU|<4Kd8PpoLERa-tjxAoddap(xQ0g*1XBSWS0_)l(C6jD5FA9_pkC18{6snY6P8Jh<1Sk2fl6?7EeoDrKskEwpvAVCp1 zc^@Gx4|V{VRSUt@9xI!U+z@c$dpC2|9LqX|MwiH*2s8#BHIaxFFo||ZOreoveVgON1^@X8bBE>?xX0QD*%n%2XzN9e zvvZ6l`KJ6WxQ&3*P_b4v&e~KJZ;%hTdrGS7NY3$R%?dLYLmO0Z`E$t)K>vT0+%Ow2 zTAoX3pem+0Oi=VBghhDv#-c_w^7r+l=eZHNSf;;e|LgM$3gFYWx+&T!y-}*Vsw#i` zK;7b;Psek%tgCTqWOMo%OCht=SlX7Kf zsT^dAtvC1g#sVi}3S2RKPULXRbOc!AKa9#GN@8R>5)4~v-;b${5$SNZ9U~&_WAaPk zc)^i)y6RTb>p#!)RMUN6vP|ym06&SVC89E9Oc}W7_HA9BkDeZW6Oh0@@tsZoT}s~& zxsi;Z{mJI7zD0ORuT`)Lp@?Ya=DO$ z5=Lwy7kyK-1D!EkDNNX1t-;lMN3~eIf;eV~Cy_W?{7n@~&H7>BwM5CvD+thWelfr6 znZ}{m@EmR+Y&XD6F&#!GClraXNZipjjmnZf88Y4(bSNaHj2-L93|or* zaQn?>*k^fauNPJMK@={g20h*5uX2J5PPUPC1U*BO@(;y%!u-S9zp4{h^eV4!dKcaV=p0ENRo7B>bGV~4QL?d)C(iI0&3`M%PQPil{t z?Re7Wr8&*blnP~A7m*X9si%^lO~OT-3;2*oXHZjvC5_$^eK*)8>WR6HKTv?_p4=$q z&hQ^J3ZwQB_hzjv9~>ZvU(VGf3-vS?%Ake76XSpDSb8?L098GTU;w zidX8%_Ggrqob9ZS}sdUB4m+hgi*qI(mWbbw0jUmZ>7h5O0qayw6`Gp8!cW|F6fDbMjr{~ zL8yF_uj;_!7I=QJZP-I_3`PwQO#-G6*z<5xn{aZ=$93&2|4RH#iRp92V0;se!Q@T4 zPFV=8y!SY~HF>`(PWkB61bMaU9$9sa^5V=Rzk051x3Bzs_#U;833KugrQZI(>?imvDmENNMM_o+q#?sViJUp=H9sKno^^(WeIl zzGwFM?+H^u{jVyh*GP z4nKsgB?Jt4Nt9fLIe11_{idch)s?FBV`ea87<($rntOC4r$|ifO(8 zL4T!Z#>o&45jzeK|FT43OQ}7^`fKu5+d#6pjG`@sF-hsUx=6S!?);S{&o^8N?c5%S z3iOcmaZ<@@XQR~SVSd2RP7$Sd-$Wpu8fp@syfNyr6KPjUdv`Y{Q>m|bJJEc!F~6Xo zfKu%>;Q5_6F97|3qtc)m-TeC&^mpY8L zGp~pL<}b0+rzIy&8LoLpdpdoVP!ZdR&gWk{Uh+EVj`vS8yZz{4-1mt*%Uern1s^r2 zwLakVuB1qs{`#WZfq=-qa>nqOKKrW;}7kk>gP1Ug>}TE2*A_^;4LTGI9{7K?Mp z6!%xB=%F?zVvmi!MC@^dM(mjCrTMgv7SYnf72+nFry|s<8*9}a51^XD@kKkVHgV+v z`Il<)xc4*jVoy_21euu!_6)-4?6K#ywaelED`4-iW=Y-{Fi!Sf8wk_UKEeMtn63>j zWs@qZqeCCRs%=a!Jwu~@Fln|_?m+bYX)278IT3x!+AurbJq|_>$mAhreN|f8sS4s$SZ+>>o;mq~}b>4W=JW0N8um z!f@&OIi|O&iBm}oktEKvUcz{n5YfS-iX{5fnf6Pc&lg%kE>7on3cyU>E+taBeZ`DE zTffnU^ReKXb|O(?icetZaR}O_1EYWWWWDc=NqS$#^`PSU&FMQC#KvxF*(HUR0T zQ6fGqI;)g?Z*m@?2?5y=4}^Sj8YKzBs8|E4B1&}ZJl>o*F`T%XtAOd-SKjT7T zPq@b@a}e85BpOq72QTP87N&e(cAO$(Y#1>lb}tx$`d@Z0|C@7UUtapPOpEV<2}zz} zUNjC5blpc)6vB)m<1)iv^mQDE3_s@mqs#FaRm*$W$jL`>-`8D=SR-KfgoNZ!b zsjv)dujDa9t&Cgy@pOR;5rYG?^J6Dox=^?fJuOTRz6rpj1Mu4>nw&Cw208wLB)u{W z5S6Z36}K%T5g;qkU0G5JlTENN$~T`#7PGNPI&#o3iAb2pI1^Dv{U^jHufzk@GX~g> zLLdd5$S>0#M|63@*kMm_4^!+|dzL=fba8c)_BUEL_OTC*g^#y~AHi1O=T|$QhmW6} z#}f8ZQk9o=ms5F*6;UtCIA(SJOiuC+Q#qomnH&$cXJ0E?wApYg88p1bVdhK52~hvH zmL#}C^q7nxMj|c5tRoeFy-EB9+eHRU)skrQl8_e-0sv>5eX7;R3kJQ zjF)JdjQJLcQHdoK)>AtEVYLz&t;AP>N46$arkTLXh(J4PABj}CYJDiiy65UH?mXTV zE4^dG)3j5tQqh6T!;*WQ>$ZVmk-$kiQhXUw_&qe9&!)X-C|Y2bQ|joWCwJex3U^PI zbzTlRh|JEBeEkHU@hXR*#5~`I@msv}!ra3W8y5HX21ui=R~*oF_rpOx1E&n>egsRZ z+`V&*9x}1bY+l*(aMpsn*RNmi7OArk!4$s4VoH8K(JnQ1v-IOA;2|$~Cp*4*S7Fb^ z1@qvh`|lD;1Q(FE{_rH3AEb1a%ZuaGzkwHb0?ESDn!mYIiw<9;xX9|1AxzEN>5h|9E9r&5sHSkg|2raUKsja z!MASh{rTtTcS5EnF312sGzopbU5>+a_O6%l3s9KC%z$xyDG_@fJcP!E8eZ?v$l0or zw?(^{B)1A#PA_D6^t^lD^bUC#4z=%fUY^stEoZT#qvL8R=NA?+#Q!nDm^8$|feAZ> zjH5if;U6*%5u`(o#T+Y;z)%UwPB&s^5ITu(_Jnp44vA2irnE$s%XfxER))w#{c>Rt z#t)1p_}68OjdEY1#b*=+1Q-T0bNIlt$(+gqBd3b!yT-4!$G!4tNB!RXPfaqiodU^r zUFSJR50AcT7(0aZm?lwyrYGgpD=V`B`?mA3M{1$0&#IMt@fY1L z&%^A-`iwWe2XdSoWt=3bgQTcRI@7NkGty$4|7pxf#5S9N?lDbgxGB?_|BHUee`u7~ zzq+zw8I%myt;C4;C{XkWSbXfeG9y11fbx;ET4iIU8$eVzwD|yT7N)-uT{Owi+F6b4 zkN%XY#Sa(m&KZqVX!`WNW-->Cxs$caUR_;Xl}?}T<&1T{_(N~!XnpsX4#)P>DQ}Dx zz$Bpu`*&THHQJ&v0)fh445;2`W9tJ%{eXUpxHqE7oN{m6jNs3c)4zlDyERTM8?|82 z+axR?}gqzzS4!|o+tPHviVhO;=WtDWdyF8Y&X3gr+9WQZq$JU}857F!%uO70% zsOMUB=9;TQx=uP$`tSh_bNo~@*to!%eXm51^yt9EctdlLQP{FxYnKU$6N5binQhy9 zN??t`H*tqjtQdyHgljDWZ`>`7pwI^!E`kNZk$Krb2F!5UBpv+C-2uVxLa__p-c#Eg z8ot{{R3Yb_6bs2Tbd^i@-q9sv=`#x!L@jVT}oXNi5yC*(Ztt z9UQX^1k8$4%@adS8f=PjA$mRABqbZSTTAZS4_;s7Yw&C>(Gr-a`9C;_F&&Qib9JF2 zCEtvAl0amoctG(=N-rzOeIHbv7bED5%|yxS~FI|b|n@!*r2 z7?7aH;v{tjb)8pOCk|H?{~jHI9ug^wn9G5yW7u7uArYqNR#J0sEO>Nbu0D3a_axY6 z31ADhn3i-uU?H!!y+dC&c9Je1Xq(?~YtSjuxnRG>p(S7~$DCz%o)1tY58?2q zl4%(~|3C>-#_xYdWIlnYp;U(>!5()P@!rWL=7Oo?7vjzL&c8a?ul+ZF~B*M>^MIFua+}UJ-ktiU#U9Twgv7 zRX)Fy0MCzw3o9)CoHL&I0^7R+e@(BDv5Ztw|5miN#Q*VpU0tN46ZM(muqVDUx`Z4K#`SnI`%JoN zy^!S^^xHa6)NGxYDFT5}v;8aLtSf_fBA8*0TiHZ?8K=_`h>mtHTCB!#EQP1YnVWk0wt8yDU<@Z8NxvC$v`440>mRwCy!v4$%zx5 z54!FPnB8xW8H|(~jvPekvNeWx{gD$jFQxO%uN)Fs?+-t!yP!P=F_M=%=&w8EzhQzZ zWElIhm?GOvo~q4o_o~Pm`^5fM@>1<~rXjmrr<0-&t_$Tzt>Awxc9WHaYE87N*C?A0|bRaS3W#n;eF#)RTCC4L!|-+NOHe z=8fS)iexd^!i78rmXTf>{~fR@H$8s*B9(t&WGxEn^NPx8>OjQqLD@Y`TbI$Um*)5N zh0ptGvB>tTEH5%mYWgi z`=1tz-ue%p-ok`LFtck`P(JteV;EpNO$OLVN&6Rd3yvEbyV+*X$TQYr~P2Vt>lA+$?*j748eSOX;-*jQCQTzc$Gg zo10*-i*aUnqdn}~$Kv{IcUd7BN5~l~ZzAe=hazCwawc$Apv8)M4#1a*qxq!e`3xJ; zC=C`3c)I#MJjwUZvS?IWuQPdQ3yNl1TBUZa#Dl7zx0113X8ru%Nql)n)nhcPPc~QQ zZLZc{jGWl>s@q9)95OR;%xZGx5v2!HvgpHiZpB|OUa$keyQl%k$`ZRl3WUa+gbENW zBU4jtmXEKY&cMDqF*+>wJ~WK#a5VH@_Z5q^nu^WTDgD~Gh~Kts`vxUH+b%( z%&pZ}Y=@;u5%YzY=;ZPZ;gzTY$qX#z{7uh>;C5T+OraUimITT0Y*XFtWAeEkordgS zGy#Bp$NusZ0eLvuzT`G^q@F8LY)gpz+hHgz4 zy(VO@dg^DnCkTpYFQ@8q0#F+IgQE)+HP*S~3A7q(Cl=McVu z*mdOXiBrE6mEs7~`!39fbW<_j?3Nq;U~@iGtPKX>-!lHCc!0iwPVWkLWt(NcQL1ow z$5#IC%!&Y=g$fN1Oh22J*phVnzh~J{L^28K*35~K^Fh({02U3lk;KlF^Fuuqfmz7I zr-&7a3EdELefu1&5E%*oij(ZuFz}h9LA=xm2*8f$U#q=5aN1T9(K9iZTj-o^Ix`&q{ ztq_ZFCEIz140JJ8t7bnGpMNo+@R9QaJ|rQWiFy#*+l!nRglTOdPT0hs6n6f)AL|Iw z;9$&O#+)R>r>CJ=>a8=Hg*|6wX3}FL%hvpdAL1-x%DU}k@8W2oWaSxdr%QDP&XpeV z4f~sEPoyI$!ASMSa<(RN=CTBQNCFr;l)PH4dk;?Yz2o($=MFy_V4X__5N`=>!^V&G z1INEsOPxUgTLwcu%B$aez=+JO>38M7KTIK6OqiAa4!lR$=h)dJXMVgWbP?jef(Yf@ zl{@;Ett!B<_a6i{xUgu--r9*IR9K+EI@ct_7>4h}dWc-DbGcGZQ{_e5m&{g|g5whO1B`vM2%vZe-S5V( z<^XLgIl+S|yv>{RZES4|$wo5E#5>v<8=5y!U+7XO#*+vWF|V1iSRQ~9<-Ym!u#4O| z`M-@`*?(p9$}+qfMXJAt{C9NIoX(a#vSijl$ezENi0B!iS=ZGjr*VoK{TzrH5yNl! z)VYh%tTj02@vYGu%F-3je<3wE+3hs&fMSTp$&QPjw0l{m%NJ$6!KSEPsF|YQ^Z*!= z8vJuX%+JKTh`~4td{8)u)I<^^j?qHl*n1CNMyPPX}%Ts55>1Kc)qlNAV>a+^3un|CzpB|JITvf&H96o z<6HC0N9pYww!)>MzTf-9HsU^z!n+w2xJWbH3zc=jQ6Ktg@8SEqQZUo%NWKr~5}teDoq1EkYM~c6lV|w`F?itY&MLCV8#D9qc|teVw07 zDF>BdxB<0hcU6;(AAB*hlGLYacCu`zH@h%Gt80CR5gc*fpLp0eBhs9G^iVEWEJUB( z)zVLug>9!1KB^(glvb?`io35Zuie`8q0J25S!S(*oZtQAhzt0IEjJ)D21AGpurudB z#fVz=5;Z0N>kZ);H`AD)cIIalKnXW#TqZJ53?a&Yqt;=F_ti^tpL1Qa;_4=yan-tJ zKkOsgSkzhs0p()a^f29iDHX8RO|?BwN{RP9d2ZAsD5G>92IgicfFST7AC7jA-A@@| z<55O4r)cnaeVO|UsG;;Z`^~0C#~6WnltBSiDYG{oo0gz17oo|-o zS;=36<;eD){QpqCZP8&1>Cx3`{C64ZRfd}zFT_r%M32i)OgH!CcEoI>-Xs3sMqQK{ z0Ro^;4u1+V5wG)Y$dtkhAnyU;pB@gO%(St;g#YUF=9_XsE=CRMt84fs#UMX2&fcb- z0*EzsElgC2XL%)YW~6@>mmxD>f*E`^cFOYd7j#moqSJak(3#aIkoa9+BP=$0*p#ElR?Nk;4d zI(sost5Q_l=p$RhOF=Kj6#Fq3V?oWs6lymp|i zBwz?%E&S>m#^IRLAdewkNlo3{%uW3*m+mlGd-Cw?G+ivoEN+CX)m=En&&ci)FX{3D@3gv4P0b=p+7dqZxzs*kmAGtPyXlXJ5+2XXXUeF;TbGd7hlct5+$N8t!XJ|r0c6ms2`_D=Q6`Yqow z0*@iEKNNEs%;hXl-Mf7W<$CpKw=z+2dwY8y+e0tx@uvoISm=SN-{FDvA84W|U_4|w z*}B%%d9M&nY-LxF?#W|<2W|J74CtSRp~JS(3tkmZoh+wq+QGk*VPmA8sbOP8Y>M`2 zVl8Q`{cH4>6d3nHZ*qgo=#hx6f^ouc7-}nU-S$Q0kKU9BzWtjcMmNvzq&}qy*>*G8 z)z-}Z596ne#7WVdvV{EpTszo1;s(kr$*A2jr(QvwUq@l=vkreJN6$r1YT5~D@VW&% zVL;tL^EiuPqM8c^;qa%yI}O8kD!cj#E2oh&Tvgd68b(C=aPVrnz^3D76z;gCENuJ0 z&v{KM>68GGtWtc3#h4zglmhQ;5I@b0Mxcz082U@dkd2jbRxyM!qsB72<8Jgg=);rD z0Zvz`iEQ3^^9?cmhP_fr7rtSRz{U?T0)PIZ)0D$H49Y0uTgE=i4DsH`f#V>UZG!HXkz29X%#sW!1fhjd_Jmht!K?x zP2~KEmAiBSKw*_aQ%ta#hl8YUk2dxEg`a~PeK-Cr<}u|H_YEWP6ahi{(#}LuwgWrz ziP8lC8q><0kG9q2cb^q#N?FFE(_)-;b3<#kDMio|`!)vcoy$=QNG6V8e7(fIU=wrU z7D)&DuHa@}y5A#T9n-w3?RcijgT`>F;e+7!G6%A{p9s?RQz0nhvbsB4>*YN)qT)h3+22H+0 zxZ#9j^lm8$Fbe!H^@L)=sYs@Q%n8>AW@{Jg_PK_Ksm%6(b5Tv+f3ANdLSGX0S(%oF zq~khEszM9~Qrod+F=B;rccaXbB2oprmAxxw+h97xEVHLz5R+#TUhd30OeZ-!4{Em_ zTj1=^hf~1KkW~XnqVkY!F-p0}J|;IeH*{;uhOC*pd78loi-GC9tb02D_Xhg$rSg$I zG{l_*8u9fR*orOuppSN`wU@H&dgrtqzL=VLmBX)2=vK$=7c7r64D%y)pY!+eJ!qzR*ripm~H88#;{`yb!JHf#$d zwhg`aWqWzdJzBML4p=q7*@=G{wnTOaBguXeolu5ngiZZK=Qj-DSJZKGmv2bQmK9(B z;C<48tkX851?Ho>u#|G|aWXW6?FR>^b`&9AC1Fx~HN|O85qxB~zzl*2+EpBo>jz+Z z%3=TZd~u_R_KZG$2AK=4K2Uu5`t+l9g>AG-ybz+lU68nO<($m_eSm#jq7GQIrB#Eo z#*cFtYV*kpHw5;lIv%$D%qM9aHk7Ze0~dv|#V`e>ea=rPE-}%^L6Q>-&b=@Lbm0EU zW0W}lA{H1dnW&+@j_?&e9N*UtVAYBn&j$F@CaPbo2!NKX3Z6he*X(1Nvm@@at$jw* z1t{a}vdL#Jht=LK!^qKN_OHyl{6!03MnR1#WKPUusC`2|c`I>%@yluR5J+Gj=+QFI z+DtYs`mdmLM~vOC=|Mhv1rQcc-64$)wd9q;T8SQqnlAv4M@_f{qv4|M^+3U3j*6!J zW*zoqmT)C9<@d-JKHK5<^<;`G#Es8I|9FxWDVNp7wWlqz$!CBS0ND`%_#_>0#ER36 z_Siu4l%yQG3o2dzQyiH-hHQotrU^c{zl$Z!nH%7MK_14kpW}$P+7@!r5#I{0R?Ols zYs?wSLRo~hNBFCgr*kM^q`6a{hgEKRvYpgw+~Zk-e-FM`g^%7z{s`ZbhxU%;ph0!hUKijckXPVbyj5I4rl~c z5J6tq5dQ#&+dJqubG-c2D>h892h`!NpRI}eY=b4^;ZfrHfS-p$_`>8r;6IW~NI*(B ziB5r(t1yP*24+QlDwr_>?E&3?VT#Pc2t3RD&SJWFck~ZC5U#J)lB9`iB}6-T8p@nw za@xsylb}O1sSNUdqnDOJTT6akOMt1Mf9GLO^ui(*#tkP6@ei(hkO_LMJ8Q8>9K17! zD`2LN6PC*{u9MYg#BE#5+SnO`9)Tb1nX7asCCZB>?*Vbw1V#wxt*ynr5|bhZ!^F0Pn%(ftNe;Jhxrh849#f5@z2QhV!jFnuP!N2 z&&G+_eK<@k9o*K2J8U5P7z!G5U7O!lL|P8_wToG|Dq{J!x>EC1r2KWBL&-Do`sQiXLpV2i=R9{UIBj($NaH|} z4sqM8W2K=|AUZWQs8br5P{D6cd3OxiCITGW7FCEuOw4f-6Zaq_p>om8l0}U>Xaobe z?)VxNz?Dd8)=76mOjB4c5|wlAJTw30F8q)bvwD-sgZBQwPrxWu-#LJ0ap3$1_Mw zL5M5ltfd+!ZoGoxm0*vZntELOzYoHIl4RbB{EMV-UH%S`s50BB&kY@iUtovYv& zaQ=yAPd?|(WBpBX{cZz!Q~g0?AW0~2tkfZSIEIh4mAAhsTlKGY6&@_skDu|ybMPT? z^5Njp{4Yx%Pb+-upzja;)LwNFdK#~%E z9kwuKBO@b2Lec6$wEc1r<<~Fu)^QBDcFM)(<+-=*D$sYIrt*`~vGruT^%=VS`tnL+9j-r@%7g{-aUgx4Fx{7!N2 z+`4r|K65Ic-(>t1m&nlYyf-~s-?Cu8YB*!dp~cMs%JPz6t806V_U1w&bAYJ;5@&`*eagJ9+M10tp!M?f zkq?5zvi1w`pUzq92?0lE-vL$e8U~3zOXNJpE86|JQKMv>+sBBd|2cl zl=p7VBuO57`aj%AT{8FCOb-EV_TXX^>reoQ#fInwEafB+R}r`t*k(dOciTVMcy=k5 z#6ee+?+yC)!%f!tYZ`>zjkX*ONbvwpqth*_Hg7DA(|8J%ArUiFwra^!(2P>#<&b*d zftLOvF2g{;>~pWJ9ITgoE1B=wR$j2kih~1{c7AxrxjScISUrD4eIp6X6cvaGM z1Ek&`L?mQn$S96J*76UjS?yA4X}rDNg~e6PzV(Z@tUZ_$&*6NbV>yllWo0#v1|(G( z&MOD`g&AXdLXfuBXr}|5`T-|beF(V_so=8HI-fq))!!fBmE3M$z^Dd=qfVCa?Ax+_ zj$mh*Jvdz6WL<_wUox3bZop;7jva;F)$!!UhuWL{LW=sDbLA8vbyu8hMm8QZR!~r2 zZT$ANaUZ^I_NGIHs}8Copt={1%@gw;@W~Y!i{tc4NqXhYBv18^EMX=LOxctovi<6O zJVq;NEsO@DFFH6BjU$;spQ6GrnBAcvjv3Ar&C47t=0TNzYBKR=#4o_s!tuoVWZ z<~`3gZym~(r1f>jCjnWhH#80=g^EsIdt#J5meX%nv|6t)pPs?o$><+oSl?R(5-+vJ z>w>DRa_g=DXJCUn*U~j&UNAm5=aI9^mM{00#|soUnqAC;U$Hd3DU7mt58is6wHx@n zLAsTGPQQH;&lWSDK9C!}3h5t3>R(h}fWu6^>6$v_ntBUZN4Rm(`&d3nqG0i&}u{nu7%B1|f;e}^bTwVM3AHTP&I#3e|bqCb#tECT?9yAhfxc@{!Xk+;G zBkLb=FDxNlKY6B~zbVNi=+@-{2gR21&h75}Bs=+P8a=YSQ@S&!taIU=4=ly?_cvwJ z0PpcG(xv}7@k_q%<#cJwn!@@tOZF*QI6%SI5)Ps%&1^6bBoviTPIkT+;B)i`3kF!2 zRo_089`yZ2-+r*={+)s;X@t|`ke7Ym)I{&VbL6w9xlOz^k?1+jWoWw_YHj4#?r*$Y zi`UjNU$U55?6;OPquJRKoK+ZoYMyI)(R!`~#Aw`7rpm}SCp!!@Unvv$GsOtbx{$iq zRgpPbV-TC2JrxbNWPS|Ja3{h+#DgRNuTtl#9;L7f@qB9?>;E1X{s6vn>4%3}S|iTG zo#iAr<(3S0=E?^QB1oaNW-O%IxP69ow)@KBqQ>WT)SKhdDu92?|7 zmPbW(@=&cANE4aTCr6W=hCAwVT^uf6yr`_Bqx0<9v%>K~s#*Or3#pT@{&}K0&!$JK z{8_j_dwY0zI0a6?@P}lHbPuqebFOGrd;R%n7h>fB`1YL7!av*oQDW7#{rN9Sn(+ER z{|f!9O?hw4LX47e6OlAu8RovAleGe1Ay^8IA7*?hJ^G#XO zc@w2J#S7WxR{&Vt^AT^Nr^khq^*WlnHZr z)37zx4;{f>4oAZx0~eE;y8V93Jev@G0Y*efWxxM{zPe<=j?&w;=2%n}Q6j4=25s7; zMh>`|lUmlwUOc;uS9V8n056ZuLrs5FunR=|H8pFr6emsr5^3>9`4Auvbp1`{S(NA& zu(|sW0t4gxPoa17eJX+%Fr{eD?r*j{Eab}jauqk(6)T$MuCA``+9t^FO@jX>6R5?g zl-(LtEJ67`bzUbQ@ds<>&7l*!(uCfvhpOxJj=Gjb>_Wkm>fXKEh*oQ%Z(KXJN2#?P z|Ea6dKh!vZ4cwLr0ZwN74FhKd{6NkyWjKoI3UtD%hw~m*62N-cU<`!jwM5>&zCHzz zEHx4cA2eh>iq5@z^n)s7A+>M2Glrwcd)XDLsQM7VyAJxA_<^gtFF!*R&ia~C`nnW8 zjK^i0cQ>Wwkyye`WUJmhCYv81i3_Kg(y7H z{a2q?H3Cn8N$b44gwLz2tch^G=(d_YjoOgd)a;)?pYsU{pb~U@-YH6xX zOWrtCo^9xv0PoK}X;m67O(63+6e3O$22p{5HY1usjfO#ruOv$$!I4iYb_3D3pB6BB zQm5a9oN}LMYdl2a-vYRbzLW#2>ix%$AA2TBt_!wI56E5W^@>l*lV$N4R1&_~5}PRF zGS=%oH2}H>*i}Ui)iYk3lrwao$Zp=sy0;H3*#1&jw^Q;^KEvP33!Q}n0W~MBg-8^j zxpm$3GzGonVD5mMcXan8dJ`k-I#p%Yg zP0gp@DyxvYQR+kBE2O+PTNqmDZ*D(~G?aJj@HXby(&*wWq8(<0yS+|K-oajq<% zOK&owx?*Evse!axID%=!#Y(ky=E}L_iqRE9Gpoa2rnzg_^}_>=C2l+6Rmaf=z)T@h>i`A6)97k>ct^kSg9`Q-;9plpo8#aS9f>Zn>4kf(zNxjB$f$Xx-2lTL_nT%bMn$v zPWTnAx0Jk%qD!VO|55(?f!^QtHM1)_Zqhd8+C1dG{?+!&E3=iZ(-v~v`7@+$kxvf4 z*O!CQg%a9rbftgHf7>|gR+vB&pgVv|Na2g!#9E5Gj;!xbfNrOj$qo2z(RqL!D@UEy zpEF!`?Ah5vjV=SZx$ZUv*On)n7Wv!+E1|!A^fX|>!A86CjhAvSqygS7^yZAe&{uj} zMirQH-9(L)xaUhaE}dNezqRS#djaDyT@Qy^ErZQyeCS4B{wWf^0omBs{}0$s8)m_`%mxOfRX$ z!&RR~hIj{tD6xK2P4oZn!ML(``5SQZ2+iJmaJTgfvkTZ-y(6nUMMYrelj9&|9G$c% znLmAj)bbIgFsl|Z+P|AHu4t3n8MtBZ%I8O$VaLR2h2M6a;nje=g|KY-# zF%m0IvZFclagjjp0yGt}mHtsclj9VZ*V83vy7d0WG3J3LkkR^Ad}vDjxXA*Hh#s0N zch4M=xwRBy>rs$^;Ya=p7~YX7cU3;8Q^-?(9+pFfBY$9?r3qYxK$o;9k5@LV8BWo~ zQT8?ZQWx4vjLV~(I&Ph{v{Y|1p11Vq8&Q9pfsM(Y(`C`Sub9?6(f4jjy?b{9*r*RZ zPOxYoUfYyBKNubHWK9Q10`U*YA;?XYPkFp)4P_-Fc*IuehOuPgLhMwnQBDzAlt|F^%d>%4az1_GbgqGcJeNGwy=c+|PSl zVD$uaLGYw}3(Pj%{)Sq6Pj-Ie(D@8mh&u8i)fNvU*8o{tJIrO`DmCbmo|aYwIcH}D ze(;_Y+C1R+2!*MEaWyqHxYmC-ofU|HF<0hy0;Mq}AGP)5o`3yH7COr=_YSxmiA#O; zp}T1yvvR{x?Fb>)jL&EYHz|{Lo+?EjOul7}b*jkQ!Q&3k={uA(UIK>6w^nVKNXT__z6cih`!6K;;8VsXBOUUmng{@0^jzn01RnR$DBjY~v^`l>JvvRk4l`%GP*{

wsHCgqL0%@(DVVBMbJIx3}@!EF3rE}j*+kOUvr7v2#rZ8OE(iEx`E8H1? zqZh5gMvp^m!-ob7(s#w+Odub?C>_XQO!g!@k9IqbyjP{Rdh&ycG(d4+m;3biG1xsD zlRt0Nl~zK);Bj#Ix*ePIM(#@%fkKg6GI!)~Qp*%-DcVQ}cy}N40FX2+t_Ef+h>F;z z+{MMkS!nI)$$}v6Z@JCyFR}g*i;LL`tNvKB={`SRxv{5tEs*krQ5c{A)DaP7lR>krySXrW4A-A3LM26AaI!rS96&_8I(i9=QHJ2!V&9M(;oHg5 zbyF5(p2tZD_L(7M01@L?2CNV&xl&66Nq&e^`{z$iQ$&M7(0LPpR!#oA_(pw`%LQm}ob=HR(U)M}#a_&b)?u|BH{nwthAU3G{dr$YZmJ6@(jJM=F>d&!Y zJ>eIjeA{6MRhK05(?zWT+#Cs=Mh5@(!DXA12GlsEY@|- zn_)a-D{+lAE{tumCPeYv-3y}mf7yDr`*=d;n2BO&b2O$x>sJdj9EZiEDMKD$&gId# z5^P4zFSKWY>Q{?5p&r%zwaGbN{(RHAYoyixgUPvjIs#|t-_zfX)vjH%f6uS)lkPTk z?RTo})xyr$$-nGJ2kYIU{9jU=IaTirB|Fl|EnY^Jz*w7ylkkGqBHb-|o%)mOCa$z| zyr(be^dGp%J#cgQs)*+D3jD8L`!Ko2WYqoG8EpR}TDgX+V&x&fg7ZX*dh+n&hNx9fd8CA_^|ahYBa#ERm_OLb_gX}ltyb33 z8;9Nc39bhPZK=FHk+6wlCVWh|2ZM^ zI#=VmKAzfpGV7HV{bIqF$PjSO+G<$qfysxaQ`EPWJS(Gr`_*m3a9D*$JJ@LTxl0AI zWIyn%`t`?jjB;_h%W!7k!DPvYFFOn2~#Z$p&9Pk-p>9FO-TSZT>BZpQ+DsaE%qgokFra zpDh#_e%zNP?^A4*N*^Je#@uVOmJ=Z}R&gsnK)&7Vv!4nDwec4Y`UU2&JJx!f$c!4C zB;{W(a}=^Jwy_CY7ZL05a-2T(7ITSl!bxbgC(n+KLWJU0KlpvWCW}=+(%l|r5&*k{@?SfnlOktd zhYvYntvhux9&Q)V4Kl=9SMAFnGt&L5xmT{2f`NVEkp>BBMEuv7&HftvtZh%R#Xbqb z8Io&8lflzJ#BEfXfH~u_;B!L@;}Y8Vq)(bNy_KrH3kXjSP;HYEb9Q9O)$2s8igsl^ zCe>|f45`sm`Q|u1k2p?G>Iu3}L0>~K?oAEN& z4(`&y2FQTjPK##w_ctiblnI-&*d}vekI)s1xnBd5#U{kIr0Op)x+h+owXq>V4ltdZ z3ew;E@^iPCRHsv?m|{>V7@Tn$Qrd8Dwa)HKN9rJ#icXL4TIJh?Q#dr*|G<7aw_XZ+Y4I*9Lqh; z>OO}(*s)g4a+lp@99CNg^?qYRK9MM9n7LI}PL5p`>JJ(tsmGPab6&UP0Ic>JeRWfq zgZFpvs#&&VZhDV1EO_dmYj)715SjJo4zQ=HILp3=ZF0KKjO71TB3tZ=_q_XTBxD*f z?X`NRUk-(A60(3;;)WqMarEZdiHfvpZcf%TQc9#luH75t`uPKcR)drxDaS9LLWJkN z7a+6xJU27tix0O8z3%Dm9evb@YkZ`BL)Afbpk+bHLg=LRwQIzWud;!95WAyb&z9Vo zcQ>!URG!nSQ@gPN8F8E0XvK+jW1lKlJ^X!h6u9?515`tQx=R%g9Sr&ba;GBg`La!O zo3WH)o1^;^B4VvD;|%6-E45of$Rdt;E|C0^gfB^FvCCWvLr$3iCh+`OiZ^aeqPKeGuls zQjm^itu-Ik_EpG4Hc>xPihRw!e0n**r;I~k5jfM9#6TqXdAVxL@p7ZhUl+o9E_6;a zQhW9HUtzui9aZm5DKnMDI&PLG@paKfb@eYvc1DIn)6Y>b&+9Wq&?KYbAEpYa-(Ui11V~h?d|I7!gX1} zPG$n^^D^H$SN`erAs^Ss7+JhgdUa9jVs`&@&l?Yrp=AN>N9RS4KJkM6Js|FfqN-y0 zDMGLTW_~vbf1A*H@BCjyZvcR;t%%yN{r1lDJ`L8{3FK6?r4_a~>fCu`17fM(Q7g!# zOF_E34vt~;;iTUMp%n-rK)lw_&=3m}Dv{bv`qhnB{>a`M_&=-BzDv6PSA}gyHm`~S zxv@YHsh|%w_GV?fadGOL_;&k$Hg|9QQXRQiYrI<5S3rqpe?i>2l1u{=XHtaiBL$`QNEGT($bIt6pA(R9g-X z&P@luFzxx)`?zR_oWe@Hl2CfSc=@`J(Lbd3A|+81$c9*@;z{PIXG(pbvXR>stq?!h z&b0U1ve(XjagS7Cx73dRq;P- zlP``{j<;UpFAiD~R>)9foPHhhUWamn)Vxx1z;)LqQn+CP;b(j0k>MSQKgOp9EJIwPTkIIo05fe-XP_fG3mEdM;oN+RG6a;{1*er56h= z0&;6-7R{Oq{D#Vm2;7X;--I-YGa_)147+D7d@pGpX?yg^c$<&o#im%#)t*xw_gnlr zjol^6nD|ceod5+Cvj24DG==bZ^4P!cOU^%U=EE6uuRlNmVwk`DHNHcFb2r2d)&M?H zoWVLE_hDh3n3Z(zf<9+6J)ht?e%VGQw6Ik+J&|;tov2u}MXfS@N!TVXcpo8%C?Bjd z`%Op%O|l}SUqLDzk~yjt^g3Ahp*%+@xW=Cvh)FqYf$gsj4O0OE$|pGJ9#C)23ZgP> zGTl@rcDXq9y9bHu>FV-A8*26RYImEHv33Uy-6z`D9^ALFu=VF#LfuE8=xns$b~hkM z2e(L0w`mEagkaiPOw^~ofrogy1u8+Ha7WxY2e+7=FJegf2zX$OhuFnS_9|{hZa?Gf zKq&&QISxYkc&02qD_iy(U{WbAw5>U{d=`LI=c86WY&vxo^j@NJLt2{@0- zN-+8!dnZ43L_ts`=RE!%rW(e>uax;>`ljrebB05q>TIHHH1D-hD}I^`(OTl>5?;AI z`I2Ap{0xXG$#00tQb@)%-n$-5sbLZ-A2Pt9m%TJ2oqjI)YfV!&XJMoX71X5JIdF_c&v1l z@FL9m<(4ML8+uUapFjD@q$r10uI{XScx(D;tLcvx#5K&6NfXnT`Ql#v{moQcWCp}; zl+YF`~(kRTIy_P{g8^Tj^0(^V(iI%(|j(t=Utu8LEsw0EvI z$z};psJpKVA=zrV)Q*K53P7r$>@oX_|}qU=g0J>Q|QwcvSDL)DShZ^L9% z%sXEnd-h0hYyf|KxY1W)9hTY2{Cs)#MzxSwWkO~~vsW(3U7`5h^ao?ZEfya$PV4&+ zGP}8@P8%R)Sqq=-3|zg+Uf>|zT_#NZul z8Rqn`m~(Yqs1350KABwL6Wz(?P`q3;W^5rBt6PqYjly7V=0~PtRa_Mg+un2P9+zi> z>)VdA_zI}%;K|;Z*PltKhW%jrgf8uqgiL=Pva*}oz6IH{Rt9D=7`j%txwoq*w9+7` zu(ez9qEoMn>%IZ;mFpJ(4#xZ?057X_x3Co#Dn!VHT6+>lZHyvmS!xv9f@`|Plp?TK+zf4x-yzt*biL0kCa_woj1AC1W7v9IM!7aQF`{#m ze}P66`=_)S+=p7&VC`vIiSL6Wz!b zHqc3em&kVD7qytJ=~fzeyvr^Jht1!OW#j%MZpsj}&7gXaD50JP!M3O_t#%yCr7gKyIAK*E_^#Bx;I--nFjl*u=+BeK9bkiksNT zF2JYEYP|(6eITlc#kdJ*-YHQ~ui{TydjC;>5151sraf=Z8>c6AxJFvuWG=U?r##husQTCtS!DyRi%s%TDhNi)18IEggk(LjA|Sg9=yVf@48=o{lSgb;*ZI#0s=QA zZC3r{&YE?tu-5OM;7LB=lg5S{EaLbkE{9NUS3gd=-n@N!=%W-x$L_Cy0{_W(hY7eh zn)Wk~)q#b6wbs~+4^TUz zrz&QM+qOv~&cCK67a4%TyX@74bXwG{ZsP@*%|>9b_Y#@fPl(QLhP31!XN%B#r#s)u zp;@_h=O#GTQBLc@I?wp{;T5w5(QkBPaE+htCfmGC!3`Hz=Vl&;`JhU>w+@aIdo6iC zPsN%1ZScJc2=xeRXf8#H^l2)dVP+y}rnFg_jRvLqh&#Z&2y$>po!Y-oex%|it}`Nh ze7e)b8@31TFYBqPQhuUucgA&^nnrjlo>a`Kh1E2-gA!p97Fg?-4qSzW54>}q9jJWP z|JoiVV{z0j#Wg!5p?c^(U)jUH1Mon910tGu6bs z*MU#hlMI*~PCV%5Kf*@9E%tenXAIwK7c9j!y5w?t-rjmr`(3#1F>Jq&Xm{-WeSprQ z@vz(-hVI?&Ywxb}ZakT8O0{iacGF^q_nRv#_qGrPI-eI6xAHlkJ?O`4p!33dRy#Zx z_d;-u$+=7u$;1%j;Pr4w7&Xd&VFO=p+Qy*7z04jW>Pq>sm=L``KF=&uZ9ZfXHebVG zn>KnE@b=1b6DQlEiru10>Cz!WY%(WbMITGKCwMebKK0&D+g@^2vDPQzt+6x)@tFQ< z!3Xbr>)8!gP4Sstm=K%8_=GU^MGIZO%58#K>$3FfZz%Os z5nCo03AXTDyd~^O?X5#|^aG%RBdeZ5@y2a-rK(BZ7F-%YfE&?~KmKurkgnH?c?Wya zU^d#SnlK;YCg0e(iTmkQ4R1Iff*m##XPE*w=E5l8ZY;n^@lv?;7C5D zrW!}cym6y-QSV}?SPwC#F?#Xum(n^ga9~+3h-_rz624HrX}1YlZ_%ntve??>k6K~- zuy>&9CX35J^pnH+*5MKG7YUnelKgmI-Zvwc9ERg)jZuQ{Ep z9L)}fBMZIA`s>hB7VMh7O}yE5cW#5#CG+yWuemX0h`~%AO(tZHnjGWT8gS3z(%XL= z{Bf8wOp%zhv_r5)e_W1w@X7bv`TV-C^LG`k{!O35(`-F!HlMv;*ve?VPb_h=-a^Q< zA<8*8{)X|$3l(Ck9kVhJqqkNX8*a3)z*pQkhfTbjzCl2syGQh}e}S$Z7aIKUSG382 zm7ejthW93eB^;rMTo$Wx8~+Pz3EDS;@nInssi5N4yiA6#)VY#JICpv4gxYC>2BWk) zAzsI~z@KlYY=_A%ynw(O$^QM~YG1_}^;pRKnZDwB{Yy)qi z^wmZvvTTG3yhkjVq_Wp&?-5|M-CW>Z;^CH(}Gs5rx6Y z9x`=$c**_L-99&9!sT3Yd~()+<1Nv~smw08ur)C=M)9}-t+>fS=EgJOAAN?2riHBY zT&T&ugxOy%n1E}HPQ|Z_P0ZVVMR(8VZ$N5!+VWe4t#R>aW@@ThfQF^#)zT$JlcVNE ziAGn8T6bE@i?Z=fX5g@H!azvWFG=)Q@EmKCm$ASm-fyq2%cCa*jm=InjPMa~pIQJ0 z^Ux>k305t5^W^=T5qE@92q(5r16<+YSjrWEFafUAtKhv6jG8N+2tD|gHay#eROBD2 zk%mrFfcYgiSwEZ%$SXJVz-E>}NgtjFN|4RCDB=}qi?{06C6cFgovb2Hth(oY(WoiA*lV;nzq1^OOp!tQ;#oibBU_l{C zCaW^tlk=B0FetdDTi1S{u}S(Z;j8899avMKKW}QR=z+#WIYjCP!h*TD#tws>s#BN8 zyu5I(xJX+)EzK6l-Ceaboa-JR#E6hJ3<1r?kLyb!a(|6a1y|z_vjU=p`@gP5O`etQgKu6w6(G_K+fwh5AUCLoDxc=L7UVb-NGR<%33Mb)#BcGF$U0nL@)X0|;8(U(zcs!@d^zt}0mTQ1B zwAAZQih>t~nub1)bv#!+<(c(L(fBPJ*=k4Qgn2! z!>HL?rac(3DDGnGIs|asz^1U7U!tc-cTX!T#66_5R|kND&m(}t&{fFlMyN=f%~~If zGt|)u#TnlEIifot-5s|N#1qEQZkHP37ENr;)@b5PtK!oW4@k1<9pXR%DhG#UO|Ok!MP3fU&H~Qg4~bqeMPTQ5Q|a~i`+S45;7>0tEDCy zU!z(yH{{w@95TfAr$+Ivv+M9HaRC?pv3_Ex3*k{ zTqA+q@{`eS-w0yfT1tc~~tcCjifOmny&t+kUI*n8hEB!w z0Rl^gseQ$0iCH^Ubl5lpg3`V0{@%%EmMaIYysBc4gYnZd(&y#i2;NkUrtgo-MjgqT z2Fr^tpJyMdUwPT8Dq@#0HekVSG}~iloHs0K1ozo4wlnT*1-tO-pWVr)ym`j;4c?~< z09e$})p(cZdI|7CdcTG=n*-HT8MRvN_rzHM7~w_H&x z_lg;6B!nF~Eg!^cRqsveKwj3m=7}EVSig?i>pOd0SR53ME0rOz>5hVwfIzwpQIr*K zF}|?#n0CBtRp_1Vi@O{ScopPMFc}(E5%$JO;;T!FliipRV6RdVH~603^Lhpage+k_ z^*uk;76lSPW6Bo5LWSciv6dMLr7lg-r42UxK$v3>KARm5N0UOh5x#ICHijExt@oSx zO*U-=F6ID>%OJ&xvQWk5n<#ekFDRX?c~bX2PmpCHXiXWCJI}u%O3dyY67({&yX}2q z1lb1y@mr&vhYsmPeY0XCXUE!8x5M0aq-sNZJwAY&Xo9@STjsFp5YA8wPn>lx41NFU zP#m@b@re*pfbaYP9l{;Q3Ry5@yEz+VmH;>SSz^OrI19ZFfZyHt(-aIjR~X{-)jXgG z8Y}lj65{@^DCBtd`SW0GEESzKl$EXOY_|evxTkhy*mx<%!>SZK?kEl%x}{|-&T#F| zW~Wkn>ay=w`_M;cIuq_|M|87-Z+c6>81F6;Sjp_%iLslP-aqs5s(ZGDSaSC!uJNIw zE|>2b|2Hn64NIwZk5&Mbx%d2DW#y_`y~0jKVKzNYa>&JC?xJJFaZP#&7 zW_9h$kLs@X$b&WdnT2?j0rmG!yjMtW?=%>?1ECTvIO<|?F0EfZ1ylrwEA7P`o}v|s z=(k7wNBB87`ufNaryay5KGO}Rowm$*7PYON|Tjsl7S~k^( z?~1&?0POm#(5BxqTXF#TBD;>9HROPe9|5rIL+r+5EGK{TSwaa5H(1t0tOY}`sd>p& zi*siWfNkJd4Wx{H_lln%WwM&zlV|YpEk zqiQZO=Gk(WI#KLvT{J3cI`lkPP#`9u&$bz#0aM2uFSn!P-P-H4+1M)@e}KSS0RI4d5^h`jKQwj}8AyXz z2EvV1AQw6KY&HoUEeVFfMvj1^Ej#ohoGmfErM(Tsn9e(>L2Dt13BTQg=Dy~3itRu=r1=dU^WKQrsK=>18;kyCS`hZytOlcle?F*X3i+U-)%RuPjWqF z`zjJ-nQ^~@(8sZfF)<$g+CklK&Y{*l94=SHKQP*n@M+3>DL!{A-{h31DPlg=_`AM$ z&(`(4402SPf-zW({sO&w8C#>ZK);z4--`%-?L2T|=zGq7ib9aGueTRM(NEBMSf)V> zjh!ICMZ65F;>`^`TQduK+EHix(`d`}6)yw~1beC1sj%M_{&UE-Wf+ZGIjF z3b5~wZ##StYh6Nimt420rBYpVVG4v1Gw4JB?9S4>?Dd)|{U^)7BHXrV=JaIeS-A+) zMS@0^$@SzMOS9K)?pXu2H{I&U!AM!5LB-AQ3R8t2u%_QGyG)pD5sQDKsJ=f~>DK0* zpaa;h9y|P;>|$ZBbQIDNg#aR0gK%7pkSSjf0pPjWRcCYo5Vi+?g69<5uV_Ckh4-(g zmm2C}PS(1PgoBrP+Ij-KnOgAT&fGPaK>x2~7L5~6zph12u?)MPtNgtY(aWGya|fBO za%_yIC|c!Tm!BZ_|J_g_uGnJ__~$9lFvfF7-Rkej*Z$HVda*LgP3V%s@RO00cjv`@n=i;(#y zZrRa4@3Tn26Radx93BRk-Zy<&Dg|gQht6t;;tunjGnm-2HXxv{A={_vH%#DNu+!=0 z4W^&j!^S8g-$9{A-X^vsy$va1MSzI?Zf=uAya}s|fepl9u6`weUED8fSxrpb$m!hV zT|jRWj)dAOQ$bdXFeOT06}W1NQUQ^;8eh>L!O&z74!eSbFcW4&C;0IkRqk6N@#aLR zHmbgfalQz|DlpA57L_MpF?>flJ)b*CFq?MS8T*dt!;5o=#pf`e(!HY*}Qhi zsXJK)*(VMTG!T#u@#oRwh4gbr3rHbv0dK@#8h;yjM_s)}gne6VvPQ;@ z@&MwnSWo`|Dp^ZKS9f2xWhu(7eZ7(W>qsyjGo(!zll z_NqjEyW%BOzZRzRx!NnwP_)TnhZ<7P4;Dn?{g#-^-(*3Oy`GwdmG@n!lFXqeh`Up| z;KJ7Tnw1GPc{k~M6W}q}0wDGxI>A1H)a% z=Xu79PkjfIh(adxU9Fvoi+~xGI&)B04a$P8=YhOwDZfQPT0>270&}8<)$*wVyp`9j z?ViE_?_LYQv`U z%a2iR%A7V)qojt>j=@Wz>UhYWaWV8MW7C*f_a!i>GX33 z%F18}MtjPA!i?Y8t;AIHfJ40F(hjG&*xNVvKvym2Il9)D%R#95M=C2YvXWv;cDA{T z(H1}k@@GASCxda^(vGnAKUQBLNuIjhuW4dg> z0&(x3u^uoYZ^7!G+~W_{?}dfRj7GxXOVc94w(^4d?~c4PPXbfn0z;lVc(i}uPjvtn z&3#-izTY%DA5im&=*M4m22B9Z4wDM@DFZCy&YuRnhWIvq{z3csLQpVfbJVHlR~Dqyq1?y4_aoI&VQEVX5E^XSii1M~t}!>o*-ZMf{X z1Ch@HY-kxa>g8Vx*1MsANC+|_v_{eLB8Xatjen4}f`gFA-rUq5(8LO*4G46fo38zB zUS4M{p;wkh|#Ny+dGG=J%yqwvoG);rgg{_Mt>F>Gi zM!l>B+bbU+a>s2_0^P8S?pqiTBu8W9M&}3I~|k)_Efxj^m#_U2-l- z5i-l&FP()GV`A#sb?(jb#b`Wm%X?j&%wcIr6%;EgD}z)DV41!BYLxx_c6F%(VpSo! z8(|x0

V#XPL$rM1sh?(?5WfXxS=^!LSlFH5wVkEZs;wLb;65VyIvz!`XM`)p_?b zcJP>`;SxZQ0s?}Rp0u=XJ2=NcZ+opo^*(pX_;xZGViVw$l|@EVSPLfq&xSvxC+^Y`P3Ig&oIQH)eJp z%gd1hz~FHZ`*!%P9%?nSdO)@FdKoyQe5P`lov(j^!brtUZn^Xa^Gr~+sKJU*#vFKzzRrmta`YS(9Hvo`LbdwUtgj|`49os?R1b7$1 z4-xQ1=gZTJ*c}*K`632SuyS;4ys{kXVg3G4xHt7cJtrnG0Ora(=|D4193GY1b4Q$3 zh!Pw$Vn4%?njmA!$mk?Dm*ws%=hwxH9Cg2RdKs_$4(qA-21@M|kk@-J{B(7CAfOOr~StXWwI z7t?n);}9&k7YlJ`yr(hz0mc&vQzD3r=%6vcu}aQt{0dm2E1h4Je4AH+A&&zg)_8}X zP4TDhmxSu@cnU~$K+OiBDqMjZkzEM?Y8Oo2w%w>gpaCEmVqhawSUjO)6v)TG25x3) z$=D%xIa15AS@gFs6&KNG9?`6q&xQ0+Y3zou1)X(w+jVQ@#{^iG=UWtC7Ulrr9iHu0 z2bGmCUb=G-cpW{xLo+tA_pLd3!aOR<8QWjX<5~RfRzcBECh}^xeB~`buXzs?eNTUF z|K00)5?%%6HZY}5C_QEcmle!}U(@J&uN?t!6+2_&Zhd7MH?ZkFC~PfxHAww9?HO5Z zzW^r<4F;k&d1x>ctm-v3zI8NXq74$Dp=0a8p64&Tw~k2U7&h8$pZ+86R285IU`Aim)CM0HlKpfwFF$s;U8M51`}a>dmHjA zazFx9Zj?2xO>8v5S68EKEXrxerrFyd9!Xi*^(#ahdo#Jfi@O#lakTmAwozQ87e^@U z_gKp^$`9{C2pz?NJUdEL@mo!wz~bGv#l?vnHtK|<6k=1hHCfK|-&kmCos#x_mf#`kBL)Oqtj0}kPT177nN1a>VeiL_Bm;7$hCl7L`39=k7W%SCgq|4%~0tOR0m;7s2HaT`L6y=sT(1S z{XzhevX&(JzAev{X92R*Gr23t7oz^cDDgu&GJCjN&I}@`7XQL_;1s?^z#BjQwyy-? z)?_PvGdL9igyab~Gt1>>NW~j#KvSMqGmioeGsY&?2g4zY9`53Rca+27_cP?;_>4MV8L^aqXu-f5e*8og(Dx*K+tnb`-P!QA!e+Kcxvs zF#@zr1tn-*V?(?)yuL0bOzon~-_5g@3 z&|;K&)i-!S7fGkI%aC}ro?iIAK!uEAL*+tB5%1$BttXGYk7ZV9L#9uOs0$eoe&b0` z0P=SDK}aNzd@i5_$jQbelzv5T9?dsk|6~nMGR^ZXaCl($|AZwqqc^z=*sQ7cl1Vd? zN5EdcvP@dDn_BuFj(^LCZ!de~Q*+d2N<>dREC86x&1OdA3rk$!wcG~cTYM*Xk3yQR z`OZ{oKA_@oY4hTHYH2Lb?9p5USrgb!+}{#8(}@s^d`l!MRzTG6(Z&i7AqfJMK{x4Z z`2IY|Eg!ycYUBU0qYXG{h#8+XzDdh)#*L_@PC2aK!5Ai1?xNr`DHTJOj2EvI$=Jd^K-25a@dyY&5)TqPUewZ zqKPIPK-iOC4M_Nv#Xk4Mm#rmeHZq@2QM(3X31eQVsmCSO{=`?%mu`Y|1=y}{10Hha zkY3ALRg)%2TD|ZPxC@#O5?Wn=s79W?M~PJcagGGlbA-(B@Qjw;Wf-NFuc7ei1K5D( z!?4U6THFQ8@I-CN#2e4Bc!GSKNc^PpTBSXxSClpwH%^)Mj*kY==);KFpfMP^gTBOx zeY+*Ir>H$loNRNiSJHXLaVzVO5$!>*OMXRj%+Tgaw`SDX7;anEARa=xb23B-HSBQ^ zApmIJ;3-rP0WZi3a#MyNfBg_7>Q>XIA=c@NwVCl~pkeuVLF*YkMO;8YrTYKk?aSk- z&il8`G)*%tB#E-MC`v`EQ?{BGX;C7QZEUF|PO^uV$-b0AWor|a7+Ykmj%6Z5A+p37 z5g}yXp6l~HhiK-$f4@JT=kZ4`?)z2ae7~RNdSCDBy57DX{$V=vw@u^nem~Ouq_2BK zuW_ii*|eEi)6(uu_hwR@F@aLt+y$ezH?^gGEUcv^L8_n5OxK&y9_i*ms_e5LFBD~^ z`&J7mjAZ+E=^B1?4xT?wi`Pe_IxlEB8l8Xo=2s4Y!Wb>z-Ev={)nVr$<3}M#>C%v zgt0@t0gXg@Lg{XCF;_X-7*b5fQ>sD)*s=FP&=je{Ej#LE6<6}k*C+%F5I6{ zFL5;!+62PE!Seu3-OjQ1$S-&D%vsAEX+w+d56LGA4%so~+h1#mB?L<1mV+zE@Mj+|lVjazOiQe%MfFYM2OLu;tdWJwLBaxMjEX zGo()B_9WP+KG^aE_9`HzI;)R8JuAcowGg|Z?5#E0^>|a!svf6$krDlEuYxzpcF41C z(58x)cXd#&Ata)NA9^h%NH5R5Wi?a8_hVm`%UhV$NXv+MGZGWhZuS-wLc8@CmzMKs zW>^+9RGw}S5ZXXgXHcw5r%Zzl_C}g64=e3T|$0 z2@$;;xHxZLbj?An4>j3KKOdBdRL26q|26z>#H>+eSR@}nRilG#@A-l@KJ8_hz+be7$@BjydiK1t#y zv8~?XnGEL{p}#10(I%ra(N$# z=y$Lh4y=E7-uV+MiY=#EXLFt_P4J5k>0y~=o|I{b{l063bsgxFx;{WBbtR_DOV;0s z>OX7>JWSip}>mmEx!I6Hi zJx8-%?3Y|A-q!WW_w42X-PHBM4n47??Q}9EX+!?ftLOFF2d%WzKQ;@y(ntCR6nuQ7 zW$cHB_)%e^@^QR}<_V=neW~k5t9l-Nu_I#t$nmP)iYmQrsBn3f%|w91W`l;jb$cp# z`X3+Dx`GdS|KYNudh#-$pR{^``dHlYA7k8c9J^DRFfAfGd0h?=KX5TXU44a#FQKc0 zclw0Ugb(>2f>7LJy?I_5Hik0g{P8*2cb59_=Q>qS;ONbBrv#-7P~xF?k+PXiqlL`n z>FxijC@<8K`KNqksF|l^^r%5_uU1X6iVp=ir3#z$?;19kXrQ@GWY$#k*@{b>FEGr3 z2x)WbjuzN2^=XjzzeY~8##mfNJVl}W+}!VzGC}U(M+ipR^~J93W;bOO(!O$SnnFjX z4P7SbdPsEr)1)~#{GOWR%N6(JKT#DS^lf3PHqZI zMwA}oB#FxNX7<|iy35#dJ4Yu#Fho%JxMh4LZLyEO>vJp3Oqpe)XHD^pI4w`Tvp68S z+K@|O*aH6zvg-;OesGL@OVeH*o>OL%=}gDbvpGxG zkq0qvNcP01#68Sd6A{ltS{m79c3Sb;Yv*^wEv{K(lUK!$7fSu=(EU>~jO4W)HASwZ z`jd3-%%7^@y8`DmwGx^#9||~0?9@%%h4{1|NW{%KVJqQJq}bQe5#Z)F%q=5TAF3fV zmJ3jH!mfc2gjvbVCz&a$g!>PUSp4!jQ>Uxg)BqhwQa#>WPs8|M6$tGjGjr;iWN_ic zXKavgDdo(k$PvfhR!^}|+RE91vFuxzd51Ko$rFH~Zsj-hG`B8Pq?*U9D6v7-=~MC^ z!-~ZG`cKvSb4x^4dmDpM~}%rRvBhs-pB82dcA8mJn<=ep+j{BxyY*`xH5@z&rr}O}*Y+wq6GhUx zZ;43ROtAc#;|I;4z9Yp-o*g1J%Ca6FGC3Nny^yQuZWFfG&!Jd$(6c^@bR%v$H#f*$ zA&CH0Lai$PWR|Ps_h&4=eOk)t(F>+%?cH|)&`M*_z`;Z)^Uk|aW5n;;Mp^)0DreB2 zZ{qt}f8zYglZ0Dk(D>G;5BI+4Y1N@5n?7y85Df=U2>N9BpSf;1{HA4U{VlHzZ#RN< zeiS{=RYDraw#-a*6%}q$5rjScu#VnxUyFGq>?4^$^pIIDML zh~CT9<|9&jQ$gylXQvxOgfRRyXp?VdmuxdrB*t6o)TjO~<|DoCxW!Fop^C*JfxNT! z{+ljnRj-Njc6u*QP*6*!aKsrR0eGdW&LW_`I#05{jAJl8ocxU;R=QD;GPPTwxW2z% zWFC>lrtsd{1(nMyDbCE>K-U)b?>f6y!A2$Xs$%WarNHdzHhO@l_wPR(3c6*)T##!F zDb|`h2dP#9C{wmx!bM+M++=cTX517)V-y6|Xj=$Izb@j+Bl>+(RBraQ?y29sd>Ve` z1_B&peeaO{9VD5)iKuRbV|4JeCJ^qTV{iDKvS8uxtNSoU-Wni1=5Le~+i{aWdbIHm zDCF9jnmP&!x7eQ=$lB*Zw++-6yfquI-v&g{;>A72i zAA94)??U$o-5rzbJbAi60)T0y{-qF?9Y6ROeGbz1t%)IQN|?ee8A*u?Bm3E(;T}Et zo1m7cz}fmLNH3=+_1`FXP_sYnOjY=CNp99LI0Yp3&LUf>hl)9Mhw_Gmy!cJi^xy41yANt1 z@-uf3Wv4`?`aK(k*opVNy#mrmTzjD8-GHWIy~WVa2;uV#%(|4Ct9d0i1KXfJPdYA6*UFecL`|9|Gi~ly4A} zxVe!&-@T1qmZV<=jT_|X^TG3a;ngT&4e4o*X49+}+x)T;Nc;6|9@4*18oX`t%`Hsu zDGH{v+Z{XacK#9yx~O4-%PGd+&g%8xZu%rAl!!`p6-Z2nO+v8IdBy8w3-v1 zQu_=gwf*DZK6x8}nsz8bl5B;iHefqcK?DR7b5k_nQ^3j!fm3?jk41~_hSm(9k&K3$ z7hk)yu!MAu(uH9`8YE!7}-4BP15n`&CmPKodMUI_2M2*HD5-#XwB1702~NVMhEP98R5D|7aaL12jODke0i=dV@&%G6=_r zaBeyH&$6VW*?(5*gDeLeZO6#KEFM=wygm2c?L}-6oww)ep|3B9*PApR`||#dabvJ= zvx<3ln-!^hn$%pxl|5uX8e=&k)4ida-rn7tgGOxZhtwJMPc=dvdJlIwAM=rkdD>My zTH7TqE;*r>PfF;O0ZNjGS$22w&D!<7o5rsH0s=Icr5cX= zjLd9u+bFU{84?cY(-Rv;D%RZT(=Xg)x$|#1*OG7{crs3&8K>|~=9T|sPs(=!)LJAn zMy@pm02t;-b7u87a^XEqGJLavz~uze)5~=1F^A`#V80ka+oc(sm_cij?JcwuKR&z= zsyIBxGz`is6bd=ErH;Z{b}j7c6$r??Rs|X2_aryGXSm-QNr`$PoSC4#_p2A(rMAww z5m1lk&Qsi`a6#s2Cp9%SScYLLO13Z}c4<7W^#^tf7D}1)Z+`9Xu;mU4b5eGaE(WTa z^u0J`*|#zgrOLr-o!j;dEsjzy4& zi{zngfr=34e12EVyC>kYw_)ySJ)0MQ+&ugwYK^Y_r(;B~K|9;`wtV=t>xuf2!J?JN z6t>uZg8*n(zpho7+Y+WLIa|Yp>N*U%H+s>;JdI0cq(%LLletmFX7oc_U&mvGHn2+b z$a9E?O}{bB(Fuo?fVtoTO1Vy|YwD>;eSuRhYyO)N$!`!)O(9bLriOHcw3ljsPtt8x z@yVZa{=4WnbF`TZ^2F(6cWzBN|n5(>>r~?j38RXt<33-Vn6d0F~TrIjm8$ zMH}Zvj<+k(?EO@~_6gB<3rkvGk2gNOhd83*q%l#MPnRa%mXW%~sXsqdK>@S=?B7kj zypc{*UW#4$y@$Pezo7HSK2nC8{JGoyVj%Aia?62%rxgwTK8AsbEU9lONgl4h)r$dA z;UUwcIn~>$9ZK0x#Ynvl^M?LVMPK~!jX46dzHUQ1k4niP9$P~( zAD98puQcK&Yw1bUpUJ*7?a{@oqKGsN=)>9Njj{^()!{YixZ*o=cbeGlK%?uH2n}G? z$MWUI%GS#3+Sc7VoH-J8fq4hb&49RBw|0?pl68E4HScem22uauh(@M}d0O|*;7y-j ztqjNYE4ay;oYVZi< z5OQMiVl4VdO1W`273S7{)Q{_4RV3leAF*iU>f*EnhQXB|)%5vmHb;6mOZGwnN&wbi z)M1(iWzGxI+SpsaTM3xqho`o}J8X2-*GsjsT7SJf=H?l4IM6}&!Z0%0yM0YBkJ!Ei z!M#VWwZ(M=8u!F4&dc|7(DM??tl?%B35txhrKigI4oqlCcRD#ykKnaJDmU2U)k&;LAGoK8SX`ZBi5aGAPqtZDKCaOf*Ym<3se20SxoK8>a2Q;B|DRs-b{pa zgJAkcJgyQ*eWKbQME=<>WrO{4dz%<>TKV-%nR^NjcS`sTkq&AP7*&abF177^iveCH zIpI6w7JpC!iYUaofN6o@`_XaJW2n%BWx0 zVLA!R>4@oC!ST9wqHD915fpMYyRI$OT+i3s`sb22YoBIIakj7Znv%x+KDQ=<$5n>u z_3)UP*~;I=t{H$;*y}kMIBL&CHGO_ucfUrSl1@ zB>-Mt6`1B!BRIKT$x%f#`KxP-M6O8!?w;TC4eovu&=NT`%ZlP}BrsQ>qzVCCro|WL z8{GJMA&Om{Y_VJMEr_SP=Eomy{lw-3_hYLO(OYz8-+*eU7OpRWTH5T!Fono_jz3&_ zPiR(GI_a{^%fH9RpYy}?D=5eqZv^TEM!N+-NyC*#XI_tti4i|8`85iG7AWQtOA~?G z(Plq8()9@fsI$wB;5t46?batW^Pm(IHq24;Pmr{?E(&nC(;8^jKG;Z(^S05d;f@@* z6Nk);eA7c5EOialh9gN@%^H|T`yP!p#?(7!);s&YKjpA0)OloBPXKZs6ZhYcrV~au5}h zD55OqO{1_LPf&4X^cSpI+Uh<;#Y5-^|C9Otc_4lxEZg*vibgm$2;qplE(;u+Z=q%W z>NCbbNsm+zRg;L`Xl`-GQN^u{;M(Ah0)A~dTJ#aiLuWrv5rMP-3uQrOU$(iE{LJ|W zxw+)lFDu*>@g=zlf@OiEs?HN>1I_gF$zH8(??Nb10&jVY+sj5}A2+urPpZO(xwCV3 zG*Ske-C|S}5vjeOKPy84JknE<$?LiV_m*uYIV7RGDY(&fWbgG4gMUPYc%i8nWL_ai znt@1m0UzWQJ{)&v;N}2YqvYXk$-z$39qciq54~WD03;SsHHgT{{2c z`ae}ek)%*NUNg+1YKF`?t*yUp(Uf`ZKj`ok6~lkB6z^Ws@!6f6ODut3vCbNLn1Q@P z4ce$sLLDtwfzb_rHBPG_2ND}AwRKFCmwDDq$*vfB^$}a2aPO^4OZKqNu-{3v4q9OP zZ!Xd6w{MjGIWA?jc~{)xl$IDoHas0oxx**wDBPXvJeW%#vfb$HYhtBS6VQ|I(Uv|^ zGxiKzfZMEwn_t#wiE~_wOrc~6c2Ztb<}Usu)#zx}n|<+3`3?ZHhb!%?zw7(-!NV*a(~i><$c%Y<=`<)a#=Q}_RZo{tyms}IjnG?? zq5}8NN!s-j|5bNFHGmP)(+2t7PV_hXUbLTG)Yy-IS3p|WV_#5>+oKrao3{JEo@7bk zPHLY4HMsUE-?08_CWSQ1@VTtS-6>`}G7(UP;kP@4P&ZK7|oqAKQ;N z5F{XDu-zCv@STVhTd7-hrPr7TXGn*l^ILwAp<9FAnj~={GBR>Dwe)C-2Z6r>yGHk@ zjmKk*294?xQJ7v0Kh)cXxS%MlU5oYTP~@XWUW4rl>k{QUosMTlHC^-;dG)z>&rl7e zf!i`b+VQ*g>P~^Dxw82PgSN$&lphWsQj&+N@-0W|tzsE8s3|;PzBQ;$nX<+?qiIh7 ztarQ2_#@CKXd9?bMfv`E+JGsTF~Cdd*5}99w0Y54r5e-62Vvt&ex%QTPduZ9jDNHG z<~(lhpp6Zq6|s=`vpgc<`I8hf4G`@TfKR-^gu5)Vb1I=#`7L!|E)n4#pahg<&ial+ zLk(fM*@lwx4a4C!1n%YsY{(%lrPQ`c0g~Ew3J+zdSR{md$xb!@G?CLm=n_Mq83Cem z5vQVcoZIPN9F1CFZ|?2%ll6sWZ2l5gFE5i&I|&3ksg6cj_IsJ1z9=GG*dvpHR&7?78 zqw8n_9<(dBDUWW3H^6)Nj&4$D&5o5#@=@mH8>_=UhnIHg;$aSdy6U{3*KwoG!iU!f-P?!zG1A)S z@Y9m=-vUaUG>e@FzdDl$!^25!Th5LSntzn-O{a%}C05kZ8VM89IhO-ZJF3;HwvA5ptgGYTyRJwZCy_RGp9?4Yy+|s- z;M@v38)@Pdt_wBW3&^oMdDGG8bb3y2wvS+hbm-5UlU&Gs>L)+kdZ>TL$D^iyf{W8^x2a?>8L2n#INC` zjl=58C7)jUz09=j5+A1rX@2XZPWNP_cl87F)b!^B%&T@@qF_szh}gB>3x zofey)b7PaGZIumzz3bf>KU=Qt85XMbMU8T$tFN^nDVOy1F1XN-YOh^#)X_k_a?icM zG4e!JD97E6sG)mW*LkU)B4i?%HFCps^XY|AeSKm9`;}+rG2_;Wz`fB&UYb)x8QKP% zKE;)lhuuf~4P}98T>y;wU3|nAg{|X&rrx@}A;6GkQzgvzMRtox%ZXN@@F6 zwLhOT?!?6{Zk+MU;u;^%)jFG3CW4S9gY`8is+hz3WqV`Q1A z4tcg{n(%T>F$~(*5L7k3a)XBpoA#9G2iwRi%skfp*uaHeXh%15Zx!E zS97CTsu1xxkcGu}-O~?VYe>35X!9VZ5ZqJk?#-{B)_t1Q;XOLw4gb&lk5MsvXI~zH zm>t;Yf2=50&n|a`UdE-I)0fvw!l1u>l|loa`v|1@n;Pei^iu-X&ISi&Tk2Hf_`h-d znL!KNkr?AW2bC5f+ZRYmZ1%@ikn70O$4M^N{X!MZm_gZQ5Y%%KWCDjZ{xO#6Xt&MQ zH&Z+OU+O_BQ;wuo+}UVK;h$kW^_33JXZCy0X_->fx7ZP`dLdEnjrS5g1(!{dLN>?AI!D?9pM?408v{~>Y(=pPJ@3E+ zda^d2U~v*YIpy}H8fAHJQ^d-{H+robdDRgMoe}7zBX0?%qZ8t4t4TD1M*I;e`1vVH zVKo4dQ+*$2b?6n2N&-fp#k4VMEmFnY%$+RvDmg69{5R_{B$naXTb#>?aG_9PkR(*W z^t&F}OsgU28$%~l=4Gz@msm=X8avpS`zU7M=3h4hd9Ne_iDdKaN92?MDtW+K<)tsX zBF!=@=V_en^B`s}!V5jwdw^!T>Z(?~dD!rKtO7~^pOfKOZvo5#F&PKP?;keZY}B=Y zhJ-lFn_H%wTggtCCDNy)Qz?nLkUmX}SQ&Fve3oitxpVDb5@n%CN=N2K{< zgHNs1nJvb6ZTxhz_2S;zYS23nlB1t7GAxEPD*SayvJTTTk?#N=EJhT|asG&z#Lj@; zZ>OUZDi&5gtY?N%>lTf+_F}yAjqG&2rXkz_-#b@*z{A~S|6ZcHq8cd_8fzFF zqICUT7eD)V#@z`Ob=2f;I6m03dVGhY1sJB-{nhg5xG?JUn>D7?MjOsr!>Do6XIo-) z)vw7(N>IY`57Kkzy9(HK*GDR}=jmz*b6oq4%4$LDy5&>gNeDV-TK1&laA2q8-K^wF zh}i7)=D_>e+}z#WyjXQ;94Uw2qXlz)eRB|;50=L(#GNpLAs!HgYw!U=#@eKmjjgtC zM^nX=KjM`dVgcIA(Xjyg@fQ)s5FsaGg7T=8luvJ5vu|z~yh@}}q;ME?pJ1$>Ok2Tm zBz1cy)&3-9;+6X>BMedOUF7Z5ozx+<6V85M)E##R`%Wj-CkqjF`%-wPQ{kU1;*@k1 z@Xl5>?`n0r6Uf`mIrfBfeyz9IrZ7@L`?K}AuGV6XYj@an6F1o{S#?Aw!}rHStK+Uts23bk+&$)Z7(`28MiN zKbkEAe<}XFw1(%>e^%)DOSf5$DK^4%2P68pGP_0U4~Lo$BW!`~B%YTh;&+2p5D>?f zn_F2~4*&YUpxuFTBEqkYI6Y8|%jD}z>S*v7`MQ3WzX!@ngk&;bX^W6H`E05&xaMFM%a9H6Hix<-eCYK|=BU?rJd;LoX3Lk$D}?!B{1g+6K{ z%L&>BoM$)Hf^0?R-hXg=d9{8pJ5P*1XF1_ACmaD2;`a!fNTT<))_spekj$8{p?m9yV8ZY5fj_7~^Ijgweirf4(~fV?nr8C!Bl&qmq(FHKnSV|B<#Rgd(T>-IN!tG6$mYa2z@$bn{l3x-cOH; zbjIaH9@4oVpl?3+LzekEQr#nxgF#C}A^_Q95tQ|mZr@MhVfTTyI;BJ;}bpPpn*!y1L4lJB&BFXF(WfTVO;LqFyg`EK`80q-DoUMCb6R^ z1gsRQSURRk4;$81f2%-bSWD3lVkv4^(1?Q+3sS6Nfn-z7&v^>|5!VWOQ&cjQ^s5`J zd>&|)uad65r7$F;o5oV#oT;74?!fL#J%&-1VB@z@xp&S^qP~pzA3r;?>s9{IgM+&J35d8)yxzfY#`MM4z0(>}r9);ye5*_^aZ4%ZyIr;omb zU*3c!qZ+igt7{3tHz0cgftYFKxN}DG^F3@8)g>iv5`(PvjZRsQHgv4WD*2rwG74Uq zF{u=^lGjm9ZqI_JZWi+z#0DuXV93uhf{i3mKz<7i5}W_=EX=6mY@sWC^*3Y~_>w|} ztcMr@OD5@-zGW8O0JV^6E##isMvi(yiXnw@5-5cHG+McPsp+>wQYnbEhxh$Q-8I8QINJ`jWp6@XrO`5Tq=}{PrMRbA?`9OF?p%H%m zoqI_r{2FM6_LGbn!L6=ccl&P~1 zl#!r2;dFKj{hM-om^f)!u{8wnO~*4yXA!+r6H}};kas)@1P!C3tpATXSkB+Y@B|`r z_`s4mkduJg6RmfS5qM0LEVLJm2od-hr(#t$}a zX6ak5y!O>oM+c2qty{Y&x4(WciRvWk0CL*DB&CmjFpE}kXC%tKpT`p6DBYZ(avt&T zf;*t2-Ul*~hmU5w`(^2af50~)ZIljy+J+UOPNkco+(ckXsak0ke6osIXe~9k#Ze9= z5Fh}-BrI9TzeLO&!4f2mp7;xiwuB6jsHQ6_CL})Xy}n^=myD(5l5-OfKUA^xtQ}I~ z;qZR!j6N<2I&R1p;Aubqg2dC31vxn^kKeY&{?iRvMNdS%r8o7e>IiTcQBvQrY&p4% zicM^GiU4RMiMe%Dt4gX6?5+)2aSYwB$1n-@cn5ovrTYfBK$kYQ~38J#WC+9P%tp&Tq3NQXh+&tzVk&!CM78Lq|LV+2c7cIF!W z-@r)iV!8-zhOiGe0<>pv@LugL3O|GC7R37U-~oPFgiWwitRT*hO$@2<=C9Lei`xiF z8&WAND^${XUE<119|iJa`wDG7Lnd7_?#{-npta)@84RY!sV3HO8X^(r|Ljm8$u!q zG3NqF0VtKmQ_LnqrdWcpNf3+k|k$z-&ymlPiuVcdGAFJK;kR8!cX3`QFmG#{pFh z7%6H6rmeyhU@}((Gf+}&;QU@n#p?b(Y{joA7;O$TS^cFz;7->u6C&~vYRVg)5e>K# zwBRP_R?3~8&1v~0VWuRJAp9%9>g~VsF9C=F(B-hh4_A#+JDC-#8yF&Q zFown6#GT{vk5tx;0j_zL`}%4p5s>O||iL8Pk?f?La63{uv|uP?F;1Sl(TYX!3-1+Xf`1R{n(ySi*vp~I@(WAbEn4Ms%w5BNg!99mI>c?)o5S0+v) zY^E6rcgqE>eDX%s9QYh+QY8}+!Gp0*XYC9}ybr|LseTM6v3=m&aLH431yIz5_SJ4z zuMD>Lqt+&5fGgJ{d81ae44rL=teeQdaL(eIrm`FmNF$A5FzEh0JQs4nqhF(NKjmDs zsM`gS45DH%K(Z@v;ZhMsI=_gU;{JB_4gK3rZ9KlPB^L-)Y!Vt{m2(JFhia1Lks&!= z%NR6ttR@sH0#kPJyQZS&DqQ<{?GVPONn;5H-Pw3-g`~tCgrcU9<`>C1Dk9}t z0lQo~@+3-yI=72M8dbKLZTIATJ#Pzg;IVFLa7hF+`pL#h`Y36`i_69@zZ}NMK)oOAr>5b z=k!nfkFM*hSvy!NMsa^&c8rfX(@3p}|A@25V>);K?ns&h#$m8+>cy<-TY!TT%+~Pu zK;9a#%JkKsiE0YM1|d?;m*ONvKd{^qY}3Nv^7`vf(_c*#_n5@)wckp5K(6!??1?%H z7&PM?DbBIt3kNRh8wm(nJe~}#sQ@B{aQENhiOG?mXfvT&A>Cn8a#HCKNwxB8iCE>B z$Xp;os(i<6WI{KM8GUxF>U{y6}%kua^}_h%jNQTAUG#ks(8 zT5sS?avIh|3YneqVqSJn4C>UE1aTAjIw?Is1U&kll<2FzgfQ+B%A_`nNs^2{D&K%( z4-_)29jYoS&vuF{Ss5ql3)UdizKy6#zC}@tk2?uS5V3!P@(kK3Ufwk@o}hd@S}=lg z1xK5muWAYj#l3kd8L; zTSTlZUW~pf+WFKti%0D|aZk{TqPS*A65JWYS;1O6jb9W-L{YQ@k0Smy{n8IxT&M&X zVo0HGQN1|Gr`{*F?5eT~)NhOWrzFn&`yNbpxxOGhIm4fA0w}y2LgtR0xh2tiZaZjS z%9!>pQKGkeKi25P@LCt54~Rs)Gil$zIKKdf)}0fj1vk=Y zy6qDY-BKI@#HMVHo+OQp_Fs|ZEPn%@h$xnqqSzlFnZ`RGP*nnl1lUa-=pZ%L zsOYuN+MwAoMm|+y_A_LTrL^EcUgIKm6TT_>I;Pe64(qPpn-LzU{xe57G2u~zkkG)o z47)!LBWX;@X%7lf%8liDI!zyKr;jqOBad->;F2GASdGyDOF0|k>%Lz5|BsroiPI2^ zhh!tLdY1o2Pc^Ebs4BYqpXQ99Mf=@6ESjW5UML$W&fHt-&Afw&Lp8mbS2R}naEIOG zaUCfibz@}OcV25u>&mPfeXw+=xB_7?D0Jaya4e zq2_~8VO02G-SLtSY2|m$jQb37(Km{8Pi;TN!b}tm&UP4_t+UCv+%?38VMNnZs=>7{ zP0XHcHQ2bi@|&x0oJQy}|HJQJlbi?x1<5kRZzIU4r!*qNgU)(a&&bRb$MS`1*z3x+ zojI&#D9Lob>;a}gO+RQ+EV;IkfPqtmnp))L>s&Rt%|r732JG8E7gkbZTf~5Lk(S6! z#t@DSKnFB(9CLq|U9|&gW3O$Y7{nv2S#Ts%*G)fjc-mB*n6`_7IWKu~@7^iNH~OKe zmI@8>U~}%h&#dU`3{A4{6aKAP<0%B@dFPa2&2dSRD;;U*28F@g0C@<2Bo`Bv!)&sU zs1wIM_r@nlKq(1fQBFN}1ZgFe*GAjjK{>KIYNYeSO{8&>&dWJ9#*`>a6dXl~!aBGS zIUK9t_K@$qvK4-+PadqWW)f$SZ2LKzzS1T)yooqPOm>(=IQV?;F08i-{W{q{!f#1tO)+u&1e*}^$^*0~A`8UFsFcuNzepbE* z&3|0fnc*bo;DmiT0h?0=vg^U;edXMW~rw_5a6 z4$aP#%}5nJ%Chv1H-b<(AP5l0Ba}RMjxq85$7{2WO@f>!OW@4*^nXRefCrMT~uE?y}m?>ntmH9s1eOpkg!O2w1^y@tSXa3 z6NNZ!NrIr~JS)VaUef%xt`73mdh*g=QDb>Js!-11Bz$9L)+Top=#t!X96#S53+O#F z5_s?!Cr8}J@*9||>83B3vS$fruQEsE!-p74V{D~!nhXa*c*>e6>%-{lPEHVD(8zJX zj>dPT<3M;h*my97gj2@s9odb(6FXWsKhP#iBco(?$V8E<55LNMm~B<`^hjT8yTjn% zG#mM^w{Iqoj;yl4$pDz^oCiKqGbIkAjXWWhLK_@b7~o4U z)ej3(^p-KKT3)oO+wLu~>Dif!s)FU|l=Jl1H4=BHwqK~|4)y*xSg9dR#ca3aDilNj zO1b438}pz8+-=I=7{c+IJwY$w7DDI#F=ci5J>O-H1MTbyT|}DZoJT%CK~=Jl9bMcs zpB*-AeZA|O?8uD2k<6EiGe&pP6!o<8;OAQ04m{i){xR#bG8`|>Vn!XY+KB`{?y$jN z{a|FUQ=p&B^Jwh-mHQ=Yq6g11{?~bxFLz0DCiMpt6a%8cOn6FAJ{UGa3T;TP3`zXw z@JgUN%z*obwuyh^pK7Z~qyF|(&X6-bF)@Fk*BVEU5|Am$s=-u^#RDa-gag8HL1{DX z2w9uv6-QN!v9 zL^+KjRu^Xu(H?Pw4n8$J5MLi2I23X@5z*v(t@uq44~!(c3cIzX`oSrif4CJzatKh% z>VUe`_Ma@o{SUFa;;-^@HLyPfCQ+Zrn5ujK$E!mc>Yj`ad61@xwdhTy_MA60EV?a6 zb>33zJqDl!(JFCjszA|xFfHuxguzA&lej#B+$67(mV;5@*C} zFreGQqA2xh#97g6(V6nTn?J@-0(Csu?X1=;$)?8z32!fnHBXnoM<+Q10=KCgg0)`j z47P_;C>eREAbG#`a&`CZsr*O3(Tv|@=myge^(_qK7-OEOAG~NAHF|jJXP1M7SZ-#} zD3m~iR|73lJ~ZuGwz1{A8QU{{F#b0Ul?N!M zfU!>y%NED~>nyRsc!fKC$xUn!&psPuitf^QB_$Rd&Le7RR@MM@FJfMb3F|Sj90{ML zOc2oO=0*||i4=&+&UcWoDf_R@$5FX3lqX#U4)tI38Nr$||DrNOk#C6!vOijHTTYs9 z@PCiZ;aZThe4Pt@%u@y>2HM_QO0JtUSkvi9;D(SNgcwJGq@Wtwu^p0U3?~F9U92-M z_Q0eLJ$p-8%1@Gm6#7!aP9ll^Zf=HlAHRSOvbMhYx!?$<8=`<8&jiG3YHt2<<2EIf^s|^> zPlHCHCNq*DbENl3SYV2BaODia^y%$-aZd>6 zq(6Om%=9^jfok98POz$ujvqRYY ziL*05v!!Hwl$31k{G8*bqM?%}JrcG&=?^Nr!|lmN2uoNtQoPBr^w>`49;A{yJ$41v z_rzrOu_FWur~Ope0;DCEAvOHAWmG$$eHeIy4xP_qr8KVTs_1$^;s;Ec>1a`M?+6|^ zlim3MIB{li8?Ds{IMmrb|OqLP@=lFa82oIC-!@#*`H|tFA3=pzs}>S;8?P zc$$MfRlV}Ns91*yGe?V2PAFBrvWMmt@#N8q5{5P`Bv+Ha{5T_HJ2_eMG zIYe3mxyaWxG4+Xk<90D9+(6{BvgbJX#fQ{oI5>8Yq1EF-vc&JfPJ8$+*Ywhhq0x%e z(Wc0@!NSp)D?5Li(=_duC3&<;@wvukGx_hNh|cw&`$)e)+LrfC=c+$MtR4Q0a9{ZQ zB`eXGKl>Iq^31zC`=HU)QzxI@4_AIDI%~E2t4fK$m}|62ZGLU)s_nZ0VM2WdVNc9tOir)Z z^OU2gk*&naTV~Z3QP2 z(-k#A!M62s#Ur55H{{Lpi+`LOvi{n7H#$ZP8`8e*gwjD(RbO1In(=EBjb{||W$wto zTgJ5TORzFsL{H{ejNb$QLPwhjew&96d)GhnonEUG04sObl!9hj?INWVi|4w6ZO_gx zZut9;IWz&=Vw>0^AR~$3rAjMS7cS)Flgg@-np<{vwuJvii4_w0Yvs8_m-&gr4$7)% zI3;N%99zl$&qOIM3ixENrfPb%WiU=trP&~!joJu$zB`rldx=FMO6AZ9aYZvGI{yKGVneJN$q z67<;52sEmBtx?G4hSRg7X!L=-Th-r_*x7HhfSeV&h(l9v3b%v`uW9p%Y zIdxTSB45HlD0;6mKh}69B)Z)cw@tx5RWq}|j;9OP$b{DFW91~TVJ0}$*()8?(Fsg! zbYJAO?~4v?73Rm4>&s1#J!lqx$HgT+v~XI-2I1ogeX?ur@!CXP7HzF8HS4xp9K_r2 zTi`Lkjj5sAiwJKPm6V9EDFHcUH#mOo|$I4_LW`Vm%*Kl=d(6s zR-OdzJZf}b^ugoDwarq>qJC{oi@!wO`!fscui->s~a|8hpzt=*_xDYtxY+1$*M|=t z@=+ss8vOOLB?zOdT#lhsV$?v(;vC`FqW!2b`iZ>P5+$uCbJu$DY?;YisJe9nKLk<*>`5 zvyxxc1>utGrmyLH%slG~d^HqxR_UO!@?TgywBTnipIwczn8%akKpMe8k+F{(UkEe0OS z>pr-uBPK+S;8l8|k91N~(t|nYynb70)NNN^4~3OqPkVdpfNX5kNv{@%=Q0~5ep`w4 zk)Ln>wm8W*B||!hH+=O*i5G`Xs{dii?aKIWEx57P!U@OtQhWp@eRt7MZy#_y=48&D z`=#Y^t;cE4v-5La79Qmni*#e?i=$}~P_C8G7%oSBl9pB>wSBX28n(*Dy{UwVuu7@Y zwsEJ@L2P#%mR>W-+W$3pR@&iAi*&L|Ja%^R=lm*OZ{cQ)3e2ImdUI|2P;CNsXX(Rr zC(I)_8qN89`^bN`S9%<%d!a8TCHoSVaD{jD1bv5qFOO zmx7X8otW_DbFq1@jE_u4z~`>4X%fO}4WD-_l{~-L);nUvyt82gk9I9rP3)wdaqP^m zc}I7A5y5)9)Gbae3F;5&71I~s8@HAl8lkap`9#?IY+PBjP^mJlKMJ4Co###I>u1;i zkD?=~Wz*fwgO9e*g)TK0cdz4M+Z(L+50~)KkQ8cV!)3o&EUR4Mb$j1UoUY18eMBGJ zW%SpWuYIMd9^n71;+f|zr>r(9pWpoM?eNTDFTj96TJU?ajC}M9uNZHBRq?j+?|}>~ zM+uFO^FyZ0(U5;!WiP1g_weC#Y#B@j&DxEDaA||nhLB7mI&{Las#kdRjbmq~zc{)hXruT34|_jk4_s;?2l(;@ z1xv8nqFAmoYH=hNllBkCxiwd5RRSh8uM3yR=+u^ras~zk-LE`i-O%&ir_tsxG+5F{ zgUckfC6yjNYKcP_)Xpw)*u`3p9MMB&$tr zvBiq9`CkK>xp@2J%FlGZo?R#G@Hv=MSoEjAlkS>}SW$;}sinpqzneGhqp}euoA)+g z)pot1d8>Sn0H>9dJ-)=cp$zQY8QH1M*K_I~KCF4ep|Ij??$I5|E5w6%pF97r@ChbU zu)ls^PbO}0a4Z2L3g5rds=A_LJ!jI{{`uu0A^N%fPK}buH)mSz>|Ek^ugAKvB^Tho z_)aWYP1n~LyO6`;$kVmH`oSCqxWd!22(kdenrZ2?@{N9BdTvjSV>`};M-Qj_4fpLj zh<)C|p!un(t~-yvLg3f~egb^{zJW1M85Jv()R`88J^RWYsIH#&+=C}#|6y+iZ^Jw# zzkz|(THXv}6CRFEEhqI1T(TC|Y1#+EAy{l3`C&9xv$%=pT7k!h7_4TRU^K&iu0eR^ z(H*m9naG83)tkze9P6+%*d@_+c#%?N;-mM?UE4QGbc#&8VE9)NIS|LfiZEFe%g?@QBx>K^Xz*ArIL|hIo!@Ofz8NWj+?cG zGx`m6uqUC}UB_q_2$?pgeCR|x?xGu4Zus4M^27rYDLHB3zelO?iDgoWPN4f;o#g zrQ@(<>Z=t18vZv(=13E8-$}-cSp?I+eSKgTg(CFDv38r=)m!3%oeP5?xr_Sr_R@@* zmIG$LzwS4&iK8cr3^j1QwTa#D_qJND$@V1vmx$062kG>kKblODJiXR3DyQznaebwO znwps7cn;Od?LSGWlb~6AXNRG8(G>R^WUV*4`z4$<7N)c62o}J9TVv?Jq;BtJ8(E%0 zYF|kmVWE{5{5e}!B2%rL{~XfB8No~P&s>l1Fa4O@9s-(dU%?+n3i5slC|t;{NHE^B z2&G{bO2cpi>Mk(>3a<-Pn>)0=UL@oVr* z*_YEzfE%o2v+J~}EqJPztFH*l=S}Thp`vyRN4={w?c>MqH#etv6-{-oz|mgQmGyPz zR z42GXu=W<1yB=S~u8uN?E1nCFg!WnVXQJ{0y&LWGpt;R8_uvcbO+6>4(vDvHg{ex}&kK>cx zbR-&K4vTKL*@TMrrTl*FCnvRo_uzD#+q#W!puSJhyclUHJjdB z5thH59iCG7To>p*_?4j2K}FZD^jb)#qET6bVuxcU7vh ztvlmswZaDltfB13yeCeWop!7>3KK(8Nk5MNq9(!1$(_d=%EH8UV_MV?lXi$&*P{YD zwLW+zLz)Rs^6t+*AHv!w@j);ecsByRJJiRnU-4(7Wl5O^kiK6?Fc=0x>5;_ zd8`^29CiO7YU(YxtizSDhF#M3xV*3gqnN@kWT@DT)SKsXcKvb&t= z)_-*$#)*)xk7GQ;( zJ@QoaL3p;~Z0Q^Ng&rO!%ggJkQPzM{kdP4A#>Q`W%!9*>m7Klr3>(jUSy&`I%qC|9 z_;loKPGI|O)WTEWd_u=G+Y_H|(qPUp5O7i^e*t|;vRpNg6?^F#T z)b@S8G8!>*LMWIspc9(tTWZt{9RLR&_6TkJ|T{D><>qrD?4D@I&gU>wKAxJG(y*DMO*DUR1*|6)1`U!&6YXr zPfr+-aqjUya4F+Yf}^U5S`E*x<0^>AR0LU7M}3l#lD(y+u@XtOPm)e1ZS=Z&CG>V; zSNU$g1q%v~0H~33eBF8K9M6|h-mzmNJ|7#c`TJ@#6MDvQAJc+M?rnI$yx+Ds$<)xR zIyT^_%&YMCyLWL~b^9>L$$!H#q)^7%z>C($IBknt8vFLi?QFb|C1l@vxuxYLr{&K4 zyMFh~-WO^@M|kazL@U$V1oq9EA$^#vmTlYE+~jQn=rgq*=bs+gWRBY@{=O>T>I05n z1Xu^>i$fEz{gH%YQ5|***Q~q$>dz5b6+iXpSHN^z5UDl(<@GaZLUH__g~nQMTdIzY z^+xbKN33$JOibPtqlh6$tr5#y6g8&^O&FZp)6+|3EC(NBw-aQKfPB`4!oo0vuK#Nc z11^~?{amTWzt+wFC<{(f^!T#|4QD*f4EfbRXQDj)6RqN+yg*S9tptA6{iOVJsL`c( zH%8s&(0Ys2)rsBq1$zoGu*8CmDfI8=4y_WiPD<-Hq(K2`zRG|660M#X})a$~Gm zom|}tJGQA%?=%+2gqhj!vWWAd2^ke9fLreUx!vzf^IKQ!pXt+WW!sYOR4oRkuc*kX zyTFF@rgrH&9CRz0)LN~n*;@K`wqwV8l%$X4_6J{+Q4(mc5?8{(3sApi$+pUb<7cVE zT2k9wj#Esj_!xlaIZ`p5RA0Y&Du{ZpjMz~SLQ@1jaVLWP7lg1fi+UR+!h zX>DxpT!IU4(IrtO9Ou}Y*ccQQy*s?X16yU?=4qnQWR(NcBg$^eBsP-8#vH|?%9{MB z)P+zWUX}vgk&jg*_w(<sUK`|)!kycwx09gsFY z?Pe)GW1HWZx(k@vcYewap!uaBr0=vFgaL+)AsKWZ*eng|TISdd1WfkK8K-wzl&qO~ zMH{uIrp`DGAYay}effIM67P9oVlLw>ja?`|8W*f50BjE~a=gtHCRP}QenUh}Afmf09rRmIt{M zA)d6$?`^qF1+bX)#!^122LV{exz3A6?ZE;9E2)nlU6^(|MA15hK>O9w()$thMQq_ol7Z!LGCyQCoLft=>Bcc{f8-cj({6^V+^6lcV#^RBKP+(v6gPB2~mYJNQOOO zhaZldccQMX{J=03#c7KcW`?xej0jc#7feHX9SwQ1td8F|%7$GpCVjkWnn2O?@4C0-|#pdY_M=9!ypJa~$ zA#3Kt&TUjHkZyK3`8g?8HmqmU_rHsi-BEemin7|W>0X*jCG)}Bj+gDC~({6 z9NpoTZ9mt=AVrwBz5NE-pDB|8&4iYp^kSls;c$EOUmFt2Xk}EnY^o!L)_!MSYuQW& zMZkuK-+1`QP~%i4Hv1_vez7HT+e{l{{P7fn6E}OspR7oxnfw@o2J;J)2!Q9Tju@Mx zc}tj`+?zRd!uFASR;~I2JyX(+BYjuA)1^w&xKe5dug?FtQbK?^p!?-i&09NMD4=O@ z6tn3{Pn%IG_>yR^A7K;RwcJ`y)id27>x(-hMaeOb&T_ywv{}d6Bl_Z@Wp{CgZp{We z350LK`%m_s^;AsJgv9Z=tUN>SG2i3IGsYNO zj8JGXL}*fkN}FXYjU`)AT5L&0i}pQRjWtsUr5KUYLP^mksZdJm(Y{G4ZR)h2&hPzw zo^$Gix%bZh|9kzsUiZH4y>rLtoagy|KcDxi{lBpibtT;{jh~ySeZ?~E@^k{SO%(QA zHvOZ{((;_B12v~mHLaU(#mU+*M+_3F~by&ap>>qc4PVmP};GC#jK(AxEJ7EZ){S*$+CKVSA7 z`AS%T=foWxG#M*!g02s6+}UE4a0de~o);jp(agpj0dJNoi-_OC;_|>E7D%O9ZtxB< zZNRcq9wuF-n5Q!Ym-QI8u-aVcEJpHL<}_C8ODN#Zrc-xSmyfnkZ*cK0J9^t>!7pJDD()LgFewE?CdZ7rIKg4F^F8lk$ZI>je#G{XB zHhzKR%sUZI20vhhxEx|3e_j|{C3GAM32Nv>bArS$$e9Z<`3-~h}x$9B0>(|Y|p%LwJV?%o3C zeOI}viu&ezaW&!VS*jZc#8g%0@$H?mac2EQ{0(kNjXe!)Y zA%jDOL%byNF*IbMx$;ltJ-A+I}y~TUI3% zj>II+CwYE}X?qs#Ps@!Td)pR|;JYJ4t3tk6RKj{b%xI!yA)vKIRr+0W>)bvp9VUwi z*yra)iztAGr}(wGJAs>3zj;jIXH7qO=NA~k1{ERu{5~HEk#@oI)rG;BA8&5R&I??4 z?^~LzcoBid$V~+3#^OMl5$grhEAZK~MK}%)FO?>^A+93kT>h9NMe;Z!$x4kQk7DSi zPAv+LNQq00k?I5-pt;NVuN zhA!(A6h24P{Fv`sn*j!)F;+&)OS!fOJJ5{KZ-Ebi-W!csvi;Cs5mU8 zWVmdqsJP?W<1?{?8BuYtwx5qL`Nb&^v3!#eR7pn8>Is&ZM}~_wj)eHZIanC>`At&{ zfkw#sLB-s@#1E3B)2NSfbG@jD>c3@cJq)V2fQ#%Z47mzp}AT$rz3 zXFi(YF)m2AsQq5(d3l|8r*L~S+K0W<*ixmu&9>G2wGJS_pmJlg`lvC0xOs7fR;9&z zIdYXj%A@3mEcMgKapaI+(KE?oc7~=rDrz0rJH9^Z*x2&jf zza^hr&PhrbGWi81CrsJ82@*oA6POvHMyiNBv?2oQ}G275(h!~3va1BSgEEd ze-5O-1a?x+-2WeVB5mPCyz?f2DIO?okP@Lma-G_mD=UhubE#W!%|z?evg-mgE2tfr zEg%+Zx0Pq)wD;ewxjc=TP+1lq-0lPu?A1e)hICUP!Ov%nI-}T8{40aut?POA>iW5A zB~NZfbt?V!_N+S*NN5v4Oucd2{;0oi4GHYr#C9W4E08;I&yRV9q4+aq;u9Qc}zh;w=>~KuN2$Hb5YS=?Dk}g>ixY;*Nf(XTM@D&QG5}m<;wmDqELUWmIqf=75 zYaSj9r88R(qJ_u@z zPsNk?vR=R|aDn`P4yhk|5Fc@Ra<;;^lLneg4smZx?-ZZ zDH%U&%(+*i3Qe}|rVsKV9V@_{AtB6qLJ|f@T%B@tgV&auz3&jVd&@0?(S3my7miQ0 zQZ~-s=%`cSLsO7k)yyOw$l@MpyWc*&VlU+RD|MU&`$G9^4xWxU<#6_#EdJvusAI+} zjR`tEJuTT9uw~uy%rQg__Rlntwod=gMyVo_;%ad=)_Zi(knqaJS)`l{h|ua87#!d* zE42n~{gk#h;WvLVr42v7GW!-z113B)4su2W9ZGKUk5HPwMAicTJ2)gacr7{gA#Tj<5M`&+?0yH8>l?f5KcafP8Ph0~q<3aD3RXXd%|5 zb2kfQrr^OUl-o1+HME(9p_`5tt?o!Q!;MW|6ijwB(GR)8kmAKws}p@o6PJ>OTD<~To zt0q#nmB~Z+iK}n!|E7829Hc`43Wq3B3mEB@H(dHLw07OT#&4O{=Xei5eDFVMUUoRQ zeNXd(2ee$i-Q7mCsGfrVN=ug(6xvJ&BMF3;tlJdahog~FbZhX4s^xXGiiGG(iSubZ ze?VkyT_x7Fj`uNX)b^q5OLCB9Y5w07Fu=awyh)2q6pSZguR+Z!fe^3-gek^up&tHn zY+NHpI~3&SAFx`qp_D+3iT+SSJXTt$R{W&N530Ql;$O-0l=$$mk5%&mZcWp-Y{W~WOJ-N>g_f>hc-6-nskdPBZsIgNaE|>f+ z#>VsLlbic2%>;9--S6VaBQWYJDS{Gyoiv4S$Uss2z$rf0+0ZaWzLdHai>7}omYMj> z3u{pDzH23B->O_8GK6H%!{{qvNPFqp+!Vh(G1F!QjGP>Wg^UtJC9x*HJIs_(b#-iY_En|eqn&7DIt9{G)Cb;gYcdbOF(tCjaL{29HTh$?j?C0YPBa~8gJ%(C<8Zm z{iNfRg|EtNGCGOT`S zwQT7_Fvua2Az^^ri8BUgH9YAYk^4RjzlrfX%F0gqXN_qi27$ywOERx}U!SAa@X0)} z3;X%iD+c7coDEY5){h0^YG7))rVwBmxFv>&_y@7kt3kR`B5lXk-W<+aBNcsEO}$dJjFo^N1DJD{x1U%HtUxxb&cdq!cH&uZ6E~vN!Vl+Ir%q zPff-%5^tlV+wGLh`IEar#Bhx#5QkHXgXOW+Y4BZi=zfk`ao&4s;^c+}i?SZVVNF9L znbo7iz2QLNhzT+>-sE*&ZK8gLJr8HChvf1C1ZGB%TuSo`jvbnkuu*^K4&pHI5BzoR z)`$?m5IV8W_X;9Pub`|-Em^RvDE1RT*tu$WdkDt-ol%MUU}8n1dEW%+ad^o_o|R#I z&EEMC-?9^*l!)0IHP= zDJRqez|=}N1*#Rv+O*6&wH+|ESK!rYU8_nD3T8t&tK)gx_FM9Wi;Gb@avx-#&CE!= zO^r~z5}R-RPuTaDec$$Uk+W4ncK%}PZqL=f4XM_y&E_A?w?TvcWEgTbcwiy?*xZr3b^>6-avhXvdJ`VnE%ip19|tj~H} zbWtQ;sJb(QN~#+0@jLbVd1#wxCetCH~yGNBBR$~E41#anTDf-Z+Jy@iiY z%eW=PTB%SRnr!=UXfg|XnKjBc1xktfShs&SSADE0sLC^YX=dvlbruSGymSpPZ7wP* zeXfo*lJ%@>N7o8bvuAcLt(6g04BeXZ*<;$iCsbKGyS`asL^i~kj}O%IH&Q?bFfw}W zB<2YnrtJgf)1m-N@~0yzW&OTl1?9faE!X_{CqIPugqRHzj6!gh&c(E^TUc#=Al-eZ zzUr?8GlbgV{?_>)z%2)K|ISt2+G%h3_iX7-UXwzjuIP)a(sJ z0%EW0`HVFuW}DjhN~kCmiC)Ak36PEuY`V3)Nk| z2r!wi{*^ZJ#SZL+Z*MTYUiC%swt3@Qo503Iz`Z6Jw!|oxU)!kUMRw zDq8`E%NhcSHHX?{`#B1Gc17B`G=_~od_f!w5pKD|TiS#t#P&A?P|(kq8N=DXD8vmIVPS(e>Ia&i)`_xw*kANJ)Y%>C>+lozwx&#EzLAXhq{^;ihpM~w z<`qA+3@UeJJ7)wqb&vy*C=I{M+(}~6^a2~XGlaip?*}4AEmu`~N$^80EE66r4!dzQ zq&u78Gkfx6;Gp^lHLg>t>WcXX@Kul<$UumEl_Bpp(?)aLqP%@sSQ#uJivFG;={+>n zMS2;w|Ldek_O0O)A;;Q95w8wfp2kL1MRzKQg9nQjqw9n%5Ri5t;@US>K_Ex&Wcrug zvIB3%H&&SW0IKBMJAu#vGNGa_-TU79%Bi~!)VYSYIMpdOG&esPFzo2)INa4-_N*~H zuA_q8=NdCG&^-ZL5m%q9kDrk+5`ZU52qUr2&wfXpnis#A3$aid8M!wGjB7cx?ocm2 zn=^qp$Nv{QF(7X|kl^rFc|Epo*D@j7nbKtD9zy1&$QKo19w!{{F!9dU5ZnJ1!8Xtz z;u9nY;GnJtYngao+h2jO(BJ`8%e^YavCD8i-RJI5dp)Qy(;qxElPa=s`^z{io`8~6 z_kyTwmBx)zOy?VB`Q@;>Dk5$w+VW=v$Q47D2V>L1tgNggG3|T}VJN1^3rO61aQk?L0HN`4gs=C4OH~GUECD*Hxe*KC zzi_HSst`!i#heM7T+(H#G?d;7|H}oHRoGJp=70zQ4XYV)R5QQuWB=!oHbmFPwBJ8y zHB75|ANtC5NYe2z)wx^&)B+JlPO&Y&PR@ce`$s|8Uixm4eqv}@oNeP@>qV#6dmdXu zh&0a;*V4IwKSwZ9n&|9p-7rN!k-n(+`9U+Mwq3u%fCG>G0p^UhOjy?a()B)y81lBf z>Q=gR4c1hE4UsQ)tyTuwkN>z@XKdoQpwnj{9)zKYt1S8-#Zh4@>Y+UNokc7x=lHlF z+uKi8-6^Nup8cL<4HHu6P90AIS-cLOH6}a$%lYWKGs70J?=E)6F3}3jj!}?)&zeqd zs+DhEzkOTq;`F#^i<)&!MN#XsmAm`~(%Ic&4o%UF4jiVxvzwAwa>}fZJnb0Mo2E&u z&Qs;>W&o5%52#>BvGP`&+^iH@w-rR@{-zmr%4t6)h9RTDx#Ovn1fxDtvhX2 z5dZYKbrl+WN=B27>E`DZ_1)S~N;GvH*+npb?HkYa%`%O&$N@#gI8doe3}S@24!%C5 z$rJh`(;HeRFQX+m#3~8hDpJQ z9I5PqKBk9jx5teY)*)ZMe7XGBU$-9}NMAVl%%?7wmadM|(oU;JPZZ6s=zm*5{V&x6 z(<{2u2IJhC#T0*7%*=?Rp%?57;}mfhCzYA<>Y9!~YCWicp8v33T&<}oi{T0I5QYt+qg+hx4}5;a zlgHVZ=~IIOa7Gf<>kyetrh!l>dHvw+0zROy8uBAY(eX815MHZpi&(JLSH(eK!srgl`dfMmI|p3Rdj`c~5NPmt*X$JU&YLNC zVTh|*OtfNS>@>IUt_D|jYZe+mNRYvC3WIU0LE9aeqEbD%oWPrONo!(|UE}2KqxaZgb(C-bI~iUeDr2i+GYv4{#Ad$Eg&L zZ#bf<^vq@Yw00UAi7N^do%@#BHtRIDxV9v88Q16D4NvcHsu&1ks&=i5K{f`t;-_5r zxESM8u>9~fSTk$|wFZCsPm?XBNdu`!G-*R58jmI|wVeZjR@?JHpm;R5@1Cu;!ny%j z@gv9v@wM%rjQ7uvzit>rLbCcYRJm_armU3n&MFr^Q?QD_Km9Ex)*UE)j>P`KD(YK z0T3o)=kh6fO?di^MgC^AJDf7efjAmAnqp!7;c8~T=H)#f5SXI~*WOl=tAK}U$Qg&g z2glJGJJx_}QuO@%7;f6_&EiMnE$@g2KY5Y?9A(H$xHoDxVa^`mEhGwCvm0O=xxNe{ z2j*dxcKk|u_yU*T)5RM4@Q;n{Y=-9;fq93irzif)8_7i1i!;?AIsj$0m4`AQrj#d* zjjg6|GdNFAgT^dn$ag-J5>$hRS(`1RCw@saNfL4HyH7bx!u{j4z4Ae+9Bn&5@n_pV z^%|zc4cA63CV1bosA6Im|7P~Ih^4afAMrURW=Z$o`cD7YSpu&ud>+?%ZzSk(4Y^b#2N$KVH#`*1`-92TK+;cYKi(tn zPF~NfM8YN`zSMq;SJ!oKH^!fcG2R7^W$$be42`T+&OqFnBS&wD}yWZ08a%s>@ zuc5E@hRS69`tZ24=>Y&~X*G(-$Mf799>H|mCl?lmIkgp=5l}0p+;O94f6AQR_nXBm zm6|`E@MSVpSoZMA0rRJN3FBZHcdQ$n=m71x@IA$eG{5_d|GQ zQ4d5Ny_@4X1p#5Rpq-V_t1%Z+;M&ZFxOki2%JwM}SLeO7v@@KG!OPfps&kV`UB~CB zqyr8sHasT>hoiqduS547IN+RCT%eq<=()KJATj4Cjd3c8&#CW^Z?6?;R^c<PfjGw4Cr;E+4OQ1X_}MZTCy@3!Ul+c>dZ%=^Gb zW~kc4^NzdRS0m|q`1*u?Gij6LbU}Gh>=Ar!2Ny4bPTO>Yh=>TH?|RN@d!sKbP2I9) z7QaXJnA98g?U@z5w(NbquJ7)tmIu1Z4@@k!JrZ!}_e1gnY}d#S-o=pz&t1H!bPtZR z-jrNbiqryC5p?!!uD7a!c&&Z~YnKT7bo`k5>{*TZ1I?Qj zB0sr{&>@5<8oqX{wr&kT2FkY(u5(?mZd2tIg0qub72h^+FV=^Ab>Wc~;LWmC`Xo08 zZ0RrvM6!fvV>U%=I^J@biX;aPwww?C}xm&Sx*I>(3`7tGtd{)s=v82&jMxiK7A) z`UCR#d`5a|Wz5BP5sSQqGAnbFDuEnQ8qq^Pu{ulu37yfRf|OT z_rv#NbU3%5?`@~{zlG}SEt_%Zt7uxN|e&qwekapse(MVMm40t;ltXX*YoJlhkWLvvJ2xy2jk2+MGRH9Gvn9qT_s>N#Td6?q61d(ocB>04mJk*p*{Z#|Am| zgw_|MezJ7pWHQ9rB@PB=%9n;}T`!J2T``@7tf_|e5p3#5e_|??|I`u=W)gydr$f2$ z%eCg~mY&pV4;jTy?LT(EH}f%&=4bS;j9QUpd$>5z2**fP`%B(kLetPJ2!RzOpXA+J zIYUUNc-X28hbc3s_C%0q1}cArozWsGA-moyKc~uxvMWz{?fEGxPSr$;UvT>dLIO=8 z378>`+i*c0;}NHig0#So(=`HFj9|jVFIOlnFs+*8 z-Wt-Bc$NZ2%u|evjdSmA%sQ!STW0N(o7a=;vZViP;v|n0vI9e+Mh3iDy~j>2f$Jac zNtiHV_w}B7SkGjrK77{w%b8ZiTNPYNnUeMyAMqmuwf%(@g*NisF40wT?Wxc!JmD#A zu2g8%)^DP(7-Sg05=n_#HJeyvvFG!;E~a^8WZi=c2nmn+)#g<)a5dmPB}ocbRUYjO zxIQqQ@^3tSY6NS2u7LU!`xO$gR6L4B?ALCV%g0@kNFGvN&?cnbLgnrUtM~!sg>KyI zous6gvR^e_Oh+`9F(ud_4{b?HQ8-7mQRkQhd@1;qCwboUH@zDtjfrh3A*qHCJH1H+ z9*BJW*8us5m$UloR2jBOeY)w4yxiQ5o@`eaA=O_>g;#V)VlIc4_&3+p<9({DAw=nx z!e|z|7_$(+@F$HOJF;b*SHFK^YZeim>gnb65btY8)sAXyf{77{EtU&R^Kg`nPtB`- z+OMUQWPUu=GHJs273|N;p^}SxFQL}ZEJ~iUeOXk3${0F91@|WM2b5@=?`=B#2fzvK zF($Dti4{Gz9r(#Bp0X9sa&c&N0CiG(=AqbnC?5_V0{|+V!yKb===xDUYQm;(B^ZaH zav5K1A{h+7d5Pno_!gG;C!J;=19-VS!Do@l5h*2s%*F2@05X$e?OsvnHTPIy7 zt^Ig|Sl|0}_qH9$Z#`W<58UnH5bpdThnEKI6YfV-n$THFKe^YkCpzme8SLTJ6sc2$ z2K)o(0w@1){Qt%(K0to2OdWU+WKK~5EAAw21J+E8(p8TiiU)`Ob`JJo%2fjK#=pas z!5EK_C2?rg<_LD?%qW;h3q^E-D8rx`i3;Lamn?Dy6J4_f=KN*Fu`a=*i#47ch(n_0 z2x9SJyTp-auX0et>ozk@F0pMwLR;F*ixGwf2K5;@CuCe^+wB7vQCO(zBsNL&rh@EV zDt{}f;m3k&i@BKaJd0Q=s={hQO_1#8x!)X&_(jEAR4Pss-|N93NJ%8lG$s zMS|4Gvpb|0q$Tu)cTPy+C{1+isvq=8z)@2CEc#uy@TF^C2eFVmMDpw5ZSr7Nm<3F~ zz5p4gjWN9}8{GMBIp>Tqp~e0h-(7vKw=gEL)*(|cseA3_>q`u-sr(WTr!*&qTh2*r zvZnGtw>>rxn%R}s8(lDR;s@FDCo70&2VXgz%~dJmlA9Ck`dr~XQdoxcge2D5D6jj+ ztRMA4^<&Ek$OT~;o>6X$BozV)r1?&#O~cmz!Tt|CNR+~#j#1Ps-r-A|F*x-AlqMkXF>GBm}TuaC|lBTdw zTl9)J3HB`JfHHDdZ;jwvS$R{*e!K7TqsLeqndnREsDt3af57kbIKdT(okU4h2;QQA z!KkaxZk;V9wOLC`Yffif36WNZIH2DdUFGZX*;S9bzcUl?52#24pN}7g!JSOG{ zqmUh2(6$>Ik;q8NI@KEyBVSjT{OTo^V`%GIST1uhyx%ki);C0;tZR>!5mpsAF~y@s zTgbm}4SWhNy?N(usN6IhoBOIid@sUqX|1Pr*ZM~I{;0&6^)T>VXfR1`Y1}*97?X&l z0}7469F}Eri^)EYP1u3t4(xJfi37mTaT3{(V(a?zYGKo*`s3!bIi4^QrovMdbzANq zv$=~At#RAt&2`L#76VJEZaXO|f~DGg7(IOsCT^V~Z|kM{qrH+CtGR=-EBZ!zw1vX^ z)@-dd*BO`67e{{cMCUG}*P(NqVQ(U-!X%-%DD)M=1;a?B+g{3C)=4lujyZ#AH>0)|oovVvPe2?L%%h#;4mbJuj_{R1b#N$L?ch_K)ULfoC&(q$Q_} zz0i>^G?k(!Rw}v<#EaThj<~7X(m(e>e7w@pk|&#ujEoXpuAUQ=Znz*<-pb52mv!m4 zbN+llY~~}?il1;yn0P3t`zs*>dtF^~liUkyb1AT3rlvO#MbPn5V6D!$5%JS*+{gkK z_Ow@LA3n0}<`JVxEaZ)>^vA7A;191|#&0WyhbLhgZpw`|(m73HPs{M|pvHHUg7IPb z2kS)_2-6?fJG;OfjBZnM5tF>Z3!=U!WRU+$3O~Ecz$T*UBPJQZGPKM51cv9Ac)q$k z08i_h;2)ja$`0QP4wCs{VeO$>iez{E{K(Rw>hw4;zh*n}vFnfKIE^fFzKN9GWmED& zyR}98&#UX1cNnh7kC#w`gP@^Q!IAoT@cWmFotWg|y{;=-rZh={U}b}&KenD~@;Arw zWZ2}*+njfL97>J2@)dE{h|>*cjiCTQQUFyjQ08Rmj-O-cvxw7Gv8q4L<(IIK8ei8k zmLrL7+_p<@;f%M`^Yo66Uv}6z7{A;C-Y&;Us+%SI>tWxi7E;}|QhqAV+I3ks+nZSX zJOda%Fjkqj%8uA&Ayv7sdE%w+>uwd5+Rrsg#b&k1jz~(5c6+dH|FkqK8w*C5x z!WK?Pt6rRb=F}5Oh{9~wiwlxQ+POqc1w7;B<+V-sdD1pAfJ&?0@$-AkZWq-Vx5~2N zayx_$DSkPJ{R4BHMK&bIPaEj=j3V0zy!zBr6we=SAj@xCD}=`gj7wLiqnYAQ)p(q8 zKE*k7l}R#Q9!}&ZAMMv6dAHI_fz7u{(uHzDr_+L;)2hV7Mov3G679)G|@`K-ylZp^C0rPL`_ z5@9w_B!^{G%M(FItN?aJj-$wi0ljg=j+a)&74!7=*EQkrFx6aoBVtsN>%3EWFOHu{ zQNuAb7iQI@{@T0xqpR1oThF?X*)kSq9Dq8LM%U5?>I=m&T-jwk>G9jMvD5lx4c<)I z?5|YBP#tOWKOHl%3^A~KG*AF)uPw7kwSUvk{NbnbFI~T`kSn$?pNdNU!N}ZvEAFXW zRd%~-oG2P>9h`gf0z|K7PZ@jqvbVSFZ?1(G(hlJph>VJ2^*t1thL2f7oxk7Vny*P! z8fLDixHQysMz8vSuc=!5=Y=>$&Y0|r1>C6sxYH4DalgL|K~zjso~{FvQIHVujKNHR zr+1>5i@t5yvWxg0hKr$ryRV(SYg)$BY5hC4@BUk4qb;K4rcMbAeq`}?G>#8z8J+mF zDnI-qbkEV>ioZk8f*~*vf_6gwL^wD3`8#*o#!ho0>G7D>c9`7=!JCXs!_7X?D3h@H z#O~J*ADM)CnJScMLL?9#*GZ?SGpdxFeMlwB-o!pnT|qF>I#i#T9X?X5iR3xeo%t$~ za+e%AO|l^^Smyl33MylcfxNMCo6fj_jRo?h(XssQK0eI(K^M-7D^f4N1bqfbNJ^Rn zs{XK@k1-;=8ZW!fxJ$A8)P-@@WW;2~2$z#44zt-nDkn2B_`^zyFJ;T95UZrk(_#AokzVu^5rIZ;S<>=!u)cggBlUXZWZy+6;_{i zL2w4K_3nXFLW@#U_hI53?}?Ln-v_+&Ljyeiw&NocKtC*AAt>I=nSI)nv|Vajx`~S! zfYpLACXI!wy9ACK@~9Ya7>xIv1-)NCKFe@bCI)>KN^b3Dqq&xS?R5sizlT)7ew7AA5}{l^_pq`(fF zY{rq8KoW8`E=qPggY0NYx;PP+EgNq+T4U*r?CSM=grdglLdq>_NQ$@;{1Ag?A2}_h zmO(v(MJ%V(;ZG{WiR*Nu?~`H4#}!yq)s-~@tEU?93iiOkwDq!Q|vV_0g>Iy6fI-Wh5hAMZ$ z_QWilGIl~<(1~DnA3MuHXmk>ay9uziIMcoch{+L?MzO?+KR?Lwn+~vK+t%lIWRj46 z-m~Pq_szFZ=~4Oxn5k)pKatPSSeD)JA&58+A$3*OW1G+Ks~k4j+dq9`RuK+`^S<7lt=0Ij77?yq>g_}JuZw>J ze|7=e{4_dlh~`1*t$B{83;&SS45C zmn^g8IVC33Qqo^ya;D~y-n8sO6Mw3LfStxSjS!Hu z^}P-bxPNf}>r3elx}L-c(*890Jf6p@I8ja5Dw8h0R9IXV`e{eNeyKO=(9j~Na>0W5 zFP2rP0wF<__@ksc}}fF0xO;quLPZ#~$5t@U=v= z!Ru)viuRja+uu~&H(Ki*AU2);N>-jF*_X(=fhYabU+W3JTB%!1aprEn_p>V438Ky& z+ZYF$V{7=EilaAPzwR_faeiX^&91NsHJLZA1WWDtGx5g)!Ng11GW;$V=rSGbXP*0- z`TCyQUoAO&O;$qULrT!gXribPNkeS8n%CKLpzkN}z%I0*j4ItQa1!0HNnCY5jOe*n zDw=?I2;e#7QS(tTo|;|1n&QMgIq@O`S^T6n2!S~a^j9Lx%qtLzB3QlLuJe{>0ZH4k z+jH!qJo(Z)*~%fLJx93^SpvL~mEZX6_pchE|GJZsyK^W81bdV#^j*BjuI@Ht$wGd4 zw6|un18Rt@S`rrbn=Mllzg89j}rb1QQF@v0 z&jlq~@z(~;I!??%W!H~MB)x(}*$h>ewV7~}!Jc1uK+?*x1vYYn<(Yj=M9OwY&=w!} zrCfj40ek134E@5&%3C#yW6B*KBr?Oyh#VxP`*o!R_CCs5o+)Fyl~tgB6IsHXWS{=- zYeGLg*j2X#@XczRXSktxa~DM~$qdPI*{cBFOefh1^S+avKwndN-zHW5htve3bU_Ii za2Vp1#kKLYLN#+pBgvWCuaLk!E0Q5@5lai|1IjpoRaX#I#<}B$ioYU7uTYr@jdAY{ zA^6wqezPu+i0Zjnpf9$P$CCOb)0QcjP1yY)-jbb`7@}oAZ%S*~9qRA;bljD8RvWo! zkCeLa^37~!(!j3T+F$Hr$xZ}iiWRclgTrDNJ+6vCR&XifkakZFi@BJCgWkU&u0<~w z3XwGfmL*58_kNUOp4@WWzA{rpvw*1wFgCK6}ju*JR08{FAszke|5lvB6H@x#P)oVnW|_Q@8UhH?mx zKnj|}(rXqZ;!r#|JUnAi&O&^R^r!9?oDnJLVd8R23rIB-u(a3W_0OIi1g8{wL)MT+ z=)wM0vFcggv|hD$n{v+w?;A4vUdXsC>3&V;9h38bQ)p%LIo1daV*Cwc4*M+9)m*o3 zgQt>Lx9PM-h9?8=LJ|EWtNO^?Jp1nHOXVtr3w;tVM?jrVqRjY3`>&WLEX!-_hD8Dz z{adIpS+?xvE~9!gv5mjAra`kGi+2KX+93z`<6Z@N{J>&cjN|Bd6XYo&s1kIOS0G$T z0NiTuhay(!4U4)~mbwo`YelMg(SpP@YqND$AN{jXRNj-^> z`UJ2S&oPNZxSyFhiB`08L$lU+V&p)~&&9C9 z_IWlj6CaTac1zFF5OFza$V@qbKIYIW=FyJ_2a;rH1N4Z%THd7FQ{ zWjcjpwJcdyyw2pG>LO-jb*k6z>AB-KJrd>;fVO;uh1ngiUjiq4-^|`1i7>E_p8(Bold}L zK=+E~dn0>$?LK~@)Dt!4+>C~S4s{FyV+dhI$>=r+tlLIdUGoMrp?-o{htnIBOVdAF%%Yb*H7MJwFx6!Ltj6jW-*+-L|^YxXX|1p zq&mHyw6zfYxFblgr`p;rfm!I$Ta+#IN+hiR=TU#W{NW@zoXshA6PtZ!k$#)wi=T&( z_8!Jx@)FQ{f%WRm4|3-}_u?Wf+@vCFY{c(#)VXm-smG{209Sez3e|REbTK7T7*2e} ze-7M?L$tS6=rGgVRK%jL1hpK@KX2pc_ngif=%MeV_`@g6I_zX8!RJ4vU*o6bXPcO}K- zGUV>tmiKIh>7n5CxU80$X9CIk3m=Ilum$LqA{5{(vp45FVX{(u8M9pFWi7oj%)RpggI(?W!UD_m+8|(VY)?dcH)Q~H?Oa(6+3-ixO z=QO0=47b%8X@OHjQ2+Ce9eMW7Esj4{_jiyR?OX~G)(pleN%6}KOD6xiAN6B4Ek^nz+&`RVmk3tpC1r?X0 znjKk+Vhu3hZYr%i!(@6}HS6^EagH9hy|i&8cU><@=G%~4VwM!v*Bqx_R#tW~eae(6 zpI%D@a->OZak!j0SJWPo9`Fgblg+ZSAvIDc2= z$(m?1&*j+9vY25aCmTFzD5XsW;p;h-bEjq<(cHQ9`76#XJn2Vz zvoDwjPj2-9gWaqDtgGCyprD}EZj+v#eCCZ3N%ThL)rw z)@^eXefcg`e2Mx3VgDEp{EN!mWD!4-u2Kcv-?GS`QMf|Y(@libui_!(jp2g&rJC_$ z#p%2kLvQR*XUqSCVV#V9ROIRcD^^sM?qelm;!cI)fP`j3`@xMvCno1LBa|&#?%zqD}2wb2O1)>_U!2wqMKe^oHpqkKt|uxv8OReP{HWTGwKJSqL$%iQD`Z# zIY9z#N)jFkR{fAHD8%kxk~#gg=LWChlA93$PzT(}iKnR5uQeZZlLkroZ*J-;F9eh0 z{EBpgP&mA9_?V>c`54<1^#4#kK^q_ zA(oL#ve(NCGIUrWZRb6d;1OL}GY*J&XOnLzl3nsK882265U(b~jE4n?G;uX&XB#=m z6P!RZp|kZ4`2z>~m~U14?qTL?+=e*Ots}?#xYy0-7@aq;68W+r&7-XydOl=aOn0A> ziwcI0qU!+#PsGgfR0lqbMJRsch-JHb{-Q?xm1D0>zaHF|(E=IFx!-d&^i=_o@jaI% z_u<}`{a<{N@*4^s6y)So*=@41dD8j0=#Ey@p$Rn-Ddt6SF^pnVl&bCC6BAi-XsdJ8 zxE=(dj<4b4bJ=JXLHA9epvlwojLC_7$5Q-fyPCJeT;rSBhh9HG^L8}h9`gjOqPPWGs9qWl0Nyv zO5ViXg~&0wyt5?q{*xyg)s?n=sQ2R&Ka_Ly+){wxVNS;kR%Z*SDz-$RVY)NpayJ;)zIbHV9`5QNG?9|j; z?j$xorNJvB^Da;=eKGHZRadSUAmw=8=?Q#tOSwM3XAsR?-14pF8RyicOTccKTmck9 z%(hF!Z2{wm_Ju^HT}VH4R6sg7avm`kz_&!Vuv|#F!c+Oj)wnFwAiDB6zo>^jQtN zdfQdmmeDaWilW|d2xL-qTs*HAk79M41J z?Mt!X_SUFo6=Fg^s2gvm<=EM3(0{AaL9pWHnBcp(~w`Q=$LZLGp? z3wwM&Q!QSDA?fZG0A=u<@pLP~N$CEq4O7OD$A-yCCHXftOf2_Fy(Cu>o#{hegpcBA zZ`Yc(DIS(jc{}5VstgM6Aae6O8Wmq7p0`(Nv7e1WS8`W}B$2>Iz9VLukH2*7 zDjxXr=Z70lhDwkV>G=hc@%cTE>})=vGCC3Z`KM!5FGhvw#%Zb|*hScLj7LguFC!^j zwLe8wI8EKXYUu)7uSL4gLZw~p;}iQ0>Mk(|=zv7$EiYR7KHU+puFo~N8?@ua#2Zkm zmtQHNit#c$J;(3~?IY_EtA6d~0Dpz^0n-=+bud1f`%G24&JOikb#T0p$gIoik}*Vh zheR_XyrZkyW1(K0$~loZk#?>kXDo3SL3;(E`N9$u+}`4j{hTU97(BN=*E6A_+vf87 z+}td@h^N`PBSkZ?R%lI+G59}c+{DpV$fN6P- zMYTEKhE6%jUZM&w5&I?OASh9`dB0e`UEnsKTBkK^9*mi(8p?QH`A#)XRi(bBIO>?E zz8LU+Oi?fV22|!>4_nXVdXt)zWNA}EtMxMc^&^CYmv*We*X!fjBba_5H!m+53{#nX z-A#=AGNoUl+OPPF=K3Ud_?J6ml*iP#blJ)xD!JXDWab%&43kd?qaULH#uok>iBWvr zZ?PYcbs(nUwZ@ybZ;+~FbpXD zc$`^wAaI?*?qK|0{R7DZFNyVisk%v8J$^07 zf}&!NwDU$!)|qx$BO-?vKS!u9q2(mQ;-aW>gTtnE>)dUcOM{W6(mP)KdhVCX^k8DNiSVa|Q-7Owa)#Ju~B%oc3+sBnWirLGK% zda?y`U@JUGFnpsr43GC+Kfk5vjwec32O4(n-y(+mKyW$dET~)}^D@lNu>ewYLAO*yI$nOyXr+(cjwcR^a>Qn9*#2W2 zd3K$5kJa*q6*<|gmMP@uwf*L*EnJlKu1*!vqZ^35jRt?AIr;~=wh7e5_@RR=S2>}Q zQzfo13Pk*`x;!Yfr#Jb>JSu)aIilq$IadCw&3m#?}FB=47X zTQK1 z5lDFrvsT*ER%y}on-fIsKm75jCiiZ}AL@pe1o=+9Y8q&TJZel*_SUY6%@b47O@y<~ z(4(#Yh_GLfj!3+>)0~`+gUhFgi+qn~%)-U)h9`dKHWM^oaa@Vm7f67Mib@6hNiYfA zKEmHZ9nA!`WmhyDr)q&TWRr7@dr-fZGScOu880BwI+*HApJr8erPT!4b@3M zfax)sV^!rdjSpY{Hs1cDjPv&Qha98+hF)jcoF$lf$CB4(!zvPp@!NR5^Lhj`2M>B} zICLj>niZGidpp8ax$V9%->hhJ*Pjy!uyPRR%zu<^jL-h*%zK2OLT>ui0Q%^_8nl62 zBHda43}a&4KuuuBfdM+>rUsu!iIE)Yo{MT#}ud6foZkqPcsfW42;72cTSxu!pa12NC@i1 zK1V^rt3ju?URhtTqi%&FWE;DZwC{u5Wa!#nkcG_T9?>gX#18ckpl_s%^*fcMOx&BW zdEA<{`N9PO42iSoUSe4Cv9a4MA2EM#Y>~F-4C9j-G@ADb-3G{Lu7TGU)(~WC-46@X zhj1nDIlE_D@=rc*FwLp!%+RV?t-~TGnU7HEdaJ~rqzfe_JwGIJItQkI^c&I} zpj79(?bW2J0h9%!&x5XR&(B?rT%mNui&UDemO|WiFRRvDeF>^Xv4=NLNGWyMgZ5q~ ztQ<#+wsVr$2h*r2={Qm#Vp__bS8#hi*?74}^Ka6`q<69czA&gaUUPKe`12F@7fEKn z@`vx#sZ?ywx~#ja{Kx(Hi|^EZ^OqOiAvIukH+Hr(xx`D54`7x(Vs5#1hY@_C9O~;% zdYwv_f^)NFz|4A2YDLCOxI@p_w3|&b%3f{f0R^5s4seII5U+s5os2WRQsyONN#Y;E58cQ(f z-&bQ%NIhUS-q9V~V5VH8Gayr(Q;npRp6fsit#{a|tLpfQR+hG&5GcH&Y-#d2_*v7A}sdpV$NW5ea#WIOCYhl-R zT_I`ENf%|u&%IuEX8Lqe+xV?~2rCuogxP<$+999MQ`EVuf$gA??rP!miV?1&-ft@~ zaI@L;nd0rf^o|92tls;r)5(C2qoo5SF(DCVTWK_Zf40EPnGy?SWdl`NJLqP)SsJ8V z7pt|Zq`uc>Zvl48Nuqt&@MB3aOe^Hq!|Lz!Z%t1NzQ6ZYg&AqXL@FF|D)AZ&b-KzU zO+`z%`B8dSedQ`rj!Z(f8RUQ&EcTYkqD`cq&Q*}?L*m1E7R_aIagU)D76-XTf#Z0~ zi6?m}Z8gC~@6VxJa4TZkx01AeZq`baxLRr|jh%85Xwbd=O}HPX5}T%Hr0|-o;3$5z z36N-b(_(|a5vV1VV+4{FnHe+F)W`Tj(X@&^+B4}K|=w)FaNIMA?m*kyBxLUU_l4&P1=Xhh&Vregb$s|7FaV_bPor6 zB;92Sjo#|ta5P8FV-YMODg>|BD&u|3?1s zbA1RA+n0sWL=Lqt;lurXG`>${ujE)CUo&M&Aj!{qiHJ-5DR)S#PL=D0VdlSt(VQ2V z3H1-I+@JK0Ah8M8la)cxPPo+(pFraGNGJ{ITBRg97OXFes61*#2lAGdvPFl9&2TZJ}hwS0V;Fa}&U9#)mzC zb8?u@LzC(r&gxJg~5@5r$Slk`<&*iBW@;d1| zgtKY~s;HGE#cZA(6hz2a2r{4#nK z#Q(6HN-Y4h5MTZ`C!|6Bw1$BJ2slBHgK{1|L*G`O~Uz;)U zfIut~9kavZi*L!6U@g;1r92O|Sq-zmZB-_ttjW{FuO&tZE>u;Tr zNX;}cz7hCjd7Yuy!#m%Z{aQ72hN-qmW)<2@KM5d5mekz;$gR2O1n%zKmEMB*VF(kA zX{UX~9~8QVM`NwLRHh6+)%v|WvmOsNRZNB4JF4;<5&8dAwg7G&_xB_cnFWg}8LP;2 zPw)=9W+Ew_ya=T19(npa_1`cfhr2F{2kco71ZJ|D1l9aWY8Hpuxj=ex?5&2!7H)V! zpiYEnATFsG!Q|pb?H{8^@H?fyenC#>ox_X9AnrN+_UyVK(CcVM^bRzZg#{+ zN%!rJ;2 zRP!8>Y3->va)tOyo%`lB<}dI$xi^(WzNS@iXlEbLrIAt7CwKjSm{$BHF7j4nIQk3m zC>ju$E?KZ#5_|*id_tZmF)@J@H@eJj9Cmplg$!8klF($L@>_Za6m``hah^+EnnW`iHxDEL@0w4^8UfVOp-+S69D{Hg3r+tJ;aP)jUkU1Ka#$1sDos3 z0f1=KQ-+#f6%PjW-oUY4)7@0$gA0`>2l_HhY$VP*@Z{(+H1bjjl!Jxf<4I{TK2V;R z(^Qzxxk(hBIZtOfO|bGwgo(W1^On$HvWDu84zA-wGM% z<&LSeJSp$l)ezx_M|l6U8r zf@AXdtIwm)oj^kX^(%471j{E01JYo(MW7@6+hvb4)&_5kWQE`EBoYonIdRB$*jMuhx9--1?A+mgRcm_D6*~ z-;6#M`}p!wse}W;;=xaQeylYg;U)g2%6#sds!6XO!;xXxZgs!!5|k>n2sJur(*ALW z-qn*rkF6f>P6*g-zvixZa5VFLLEoA2l3&D9&Q3bRn*aNkNHJ!+t&7U}mMO~@hyHw0 z=>O369`IPN|NnSrbeiZiRiZ&OjE;m#+PXvM#V9& zH8}NcJ5NT$SzO>fnjdCtsOKBK8du-0S{43>PTrfyhdS*1+P-eGl3F5)>`ptd$hy|s zxHP0D;_t4^USMf=a?S~^bvAF>VoqjN*gyC>!P~yLK(+i_NfIaVdTka4fAs4Bm_C0A-uO3sx#tHQq)Pj}O!0uxWC9ZukhF)Q3&*!~x+ zKGTG!n=;qaINOR~F5`P5AvDR-hN!M1O~tNDQ|bS=hXJ-|z^hkw+NK)qKW?{UuMjkfst=xw`nNpBlx+JnVgA3fx8_qloB&5c*8_p1|;V3^nW7ZixngYbvL z_l9=^MbULZfBYfzRbRSK9g*5?g|I+v2@c|A%Npcq&EQl$-ZbgOgMhp|&e>Gg!nsPP zGmzbq$9%m9dB`&w*AC$>LVspecnJ2rwR|Mn7`M=-+SG_6HnicmCvP2`_tmDtDO&e- zUvz3=6j&r@7(fQnv0qU^QPDeJWY=__@#VowLzLK^R^S0xP>%tBK6Ajbken;2QJ9=V zt!Q?+3zrtQd%L9`(&snKUyjvhlxjFX{DRq|?T11SpK#_4O-?Q`kkqoY z+%@(7nK^PT>h*GxrTsm7-7pwoWggUE7w;;HUwfj{3zFw*)BOC5{Gy)F5HYzjPT53`2P=Y#uR%m_tXeo2c?qO_RlfppKJ%^ z3k$0R=V;qD?8o&6S#u4$W|$PEz1wltbB8Bc6+#&g0zU7To_u5aQFRc(j%{nHM>3!%4I=BJ!0@OnAK+*v;52_P ze)}#d2mgkH`X9{W#!tDHuidt4$G=@U|kg6xRhLFX;>SNS$?f zjx62UT1Z{cDeFzpxK}3ebK_BFm6S=D?NhwysJE@q@AlLcXH%41m6tiC&id%GIcV#A zRkD&lSW_RpQ;~2>Dl>6s1NHS_G3~sWDIYm%^>#7;?9M&jZd(=U99b(?w!S>6$E{9j zTA;_Z*{JiG^Oy6{A|muBBHKxiYij@(`;DDh72HJw7pb>p;d5Tns<}Vy$byU7C=X=O zrv}r5a&mO=VJb{7_ig^X*KE^6{FqaxUb9vYj)vw7x=%&EpdZXc8ka;={{2Z7eMVXH zHN(pkd{-OO4OSTN(JU>?aCBeezJ+}HeoocjgjGx9Uyur^8p*YV)qhM~k+5dGp1LAg zs4IS4sCYbX7yA@%-SW5AU;-yXnH9Kcdkt-^-VG;*h6~h@$nafokRV=Xq>c0K z=X5fUyHQPwe}fZ>f0nTS9xUR(n1v_Ee($wT(#xs;Wso;qu6XO&ffBR~GM zdE7WWyk*4n!8;!?F=G7eM; zlJJAg0M&# ziHAHCiN+~wygd@HEHu=OxGHZCBb2kSuOrAk8#o_WU6EI~8l5yc*Ki8!2Ss*|Zn;s??6kw$eQRyH zW+=ap{(cQ>u%>24slwFjyx49l9`an+acNEXk^j&ykv+0;C0T2|?M?J(^J-2%lL_P0 zN4laFPnwGi#IA~3yx&QCpL5H98d$j(5*Nuadz0BOMOQT~h|KUlFS%}=jl9PhiQ`Fy zdSX|VwRW$ht+nkI#jZ+P|3rbI>ZScvj+0;174~%el1FeZ7k9-PVjF1X8lp?=Y&>0G zFJRYJa#!0Y+&$Gfmpf{fRpfQv2#tKB2rePPrpnaBRyFjsRBh3|wM9~E zaU=8%N8Tweka)&+G-bg*^-MO>W&{1JV{^SCEYC3%(!a`fZPzW66kT_F_#x0&P<=Js%7DPGOHvQ28+wxg=45;WVH zww~U;vXWeppMwEQ_o9!*p8%)bQmYSsIfDH~bq(3xvy?2_CsoBPTr=OwC|Zr((=F&i z>`FIYTq%9oJnr-z91^6@!Gqr>trbLiY!|U*PKa4rWnd(AfNa&jH%rWee9?4N?JtL< z4KhmBX}KIySFFS{`U7JNc?i>74(Pi`CLeypBZk^VP8kL0u<#5Qau1(wt?oy?Ebci) zqak+UNaXr`MdyMdeQZ82Z2?++UHm@GW;baKSIW&r&Fv;t1>KzvJIPi-(iTz;VamGR zb!0-JI>Mr&*BUWlC8Q&CEL?-ddT`7f8CP$>efL|*`*j|@N@VDm&FXAW1+wiCe#e-zTJ4*%< zd|m&zvnxga?R6e3nwOW?si}iS0fTV>$I9-pU6gg7HQqFUfCr!NB_s&^ z`mF>r=8IXbBKi{IsC}l-dfE?6ukjVtKTg`p4^ckGPa#joo)JJA1nI5EbW%#}v0t9> z6*If35M0t){_6dsL#Nu3?b42Z%5D$$goj;Qdjsq2;@dA-y1S60JNKY;7)DguBMo-6 zf)&r1+iWpTfFsnnk%=dPuZ@+Mi(_I9qpBWGh4#{GzoIH zn7}6D7jKj-UxIsM44y$gC!(*Ys6jr(o$uGnM0S~nkxP1l0Mt`eH8gWsV90c4i{16AQ|3E)<$mivna8!*hHPACWxqd~ zJuR{+V$J=o8FK&LX#p0wm!khyxPTdR@~2h`c@9RCF?QA0`*JeTV$Mm8a7#>Q8$T?Y zkxK(yK{Q7{gDdox$CekwL_MCJPLrd3zy^?|d;2J;BgMksUo1`bbN&m>*}c=jiofDT zIVX$xsAOqUZB5G z$1C;&PIs&g0p{8u@7I--unqS;JVdt}ZfZP{^fa}j<7C#h`&qc=*s#zq`h@huIEg*- z^5dJQoYBwH7mfv%K+Db^kKS_{`q!$X(y~)mOkR$WVH%mdFyW@!&s@y+{y_lB{QM}2 zN&C#x8Hhgd;z*x3>R4q?3frzQCxwh0ZVOz!*>2lw`GJpyRulG)N2hf5ygX=}LJk1} zAsx#4hOI(=`Zp!^T5Cq@Zgv?ji-R{EB}+aT%>P!3z@CQsZ&h}E7e8QMYYKz>0qXR_ z{H>dW;b6en2jvXGvlcz|Jg6mE!V0?;IC%a@vAorc_3r6&V+C#U1v=)2;JfJ$I8V}V zcyHpIptcX=(9qiNUO{t9t&nY$S)>%ewt_4}&uX$C#@TL^1Yw}4NMg9j*Ry$OYREbk zd{amLiRsbUFZOtJoL}Mb_9Vbe@77LBjIep4f!97X^qx2S)lr0fG3#+DFdCONowRXk zc!;JCBZ2Foh3pGjEHTTh5EAgwEGVhf`ZymeBxRt>VUK_mQnTyLlRGms*<(+&O8|CFBS35pTM1j00FAyHoj zql2Xy<<;T${t*wtXiZj!h#S?Lj~h41d1RNoNj&A(ftXeJI%hy6iS?6qP;4tHoF}8U z&;c(PkD28$%6WTFW^E0EGabs>pu$7$SK8ExR+!sZ8L%qteY^$!Nyk2Z_nqBJq~J=` z+F*en3es6wziheeV=VhLnderauQ9AtOva-vjpSke+3HV|)Y`TE8Y-0qRd>VpE<$T1 zLZDW_c2~M)xJI{nT%ZSK`zV-6uc81RV9CqgInIiC;-r^=0t1?GlP8H}iuB{2P?R+CX`oO{F&9OMF^V>M-W^DPA$rUWu!wBvT&*oYqtSQ%DBI z1z!tQ0jZSrT}tTYst2BO>W3%xv8|IG01%sns;8P9o$L(3t25X`;)H+on>A-<-}Lm? zS#nE<2ZS$Zw1#J7U<%L9v}-)=Jx{O;L6c(-XQ$sB4JEvg2$bOuy|YM~3<;Q{9+1-P zz9IwT8)-e9aYEVUmWO;wCa_*s#q5<5VUa?YL1!iu_N+?)*6KSSB}?LSf6uB(7CARX z*OKp3S13;%FhJ_a0fTe{PiaWZQ&?R`ylNcbm(gJ^%X5=R=l zb*>_{fR5mE724vCC%Hy!6)qwq2O;^qH8rd;;oZsQ@R`{IWf0tJzOy#x&zkR=<)_ zffRwAbyH~kDucQs4>j1;So{RAh+np#*04?qmtziFO_B%iHpBzN=*zL1OwiNq4I3Ma zKN&o>mVU_EKQW}AquN{QG2QJ`+`^D@#RPhV2}YAAv~fMbj&UAKhp}30J7F?LomL+? z*3^JjV0oCKB6vLxaXJ`Pp0LY`uo#EG$kI38;l=hNaP)5qe<_Jhtol3|NL}p-1~f65 z6*=Gi-eC~H0SIyq z;{|gJkn(@yZ=$cb-l~Z5E)pZCj|#vDaKEe>(8lkn3=q{ppEW2d(O>VebKPE$X~OKB zc;85ljB?-zRrgd+eK;K%Z}FuB>lM`WkYORF_`F(CJlvTT6D_W-L>FjM-eDBOWbIHC zzj=!8QS;Q|& zYP~^sj5~dDWckb?YyO2V6GyPP)i`w>8G3ZV3Oc^uLJx`*s$Cd~g%!SX38@dJ#vhG2 zWtbQO>qz`tf!MP%Vz->k6@Xc(z7Y}YcUH%erb^JsNAl=h-lMfS3TS{iQK87m@fT3O zdVPhX)O75O!0q5eNo&{LYO#BE?ONAbl!KLH8>^J>e`~BT5>Nd z#C~&ZtRS<{Ky(0BQt9>%oha|4^0eiyCs_<$Kdm?&S`z ztR)fQ7~tp#`eCRj8{73+m#wQ}ayB{+Ph|uJLptj3_iMjb5N7o9foRa5nf>-#Q5GNV zzMB67Rd(?c$1Z#O#K(S#1)@ZdGaSK?W`8lPOk0iK9Bpnu8-R$ebV#nPi(K{#XF6b) z-3FOzC2fPfv=Hx0i!c;-<>k7fg}gu#e!Y=mi;Hwz83LWRPgk_(T?_f%7Q%Di%s8-< zt2ofne+{U<9{ny-M4LZZuFbRCvBDuqNy@*Y{~hwBQgk%KN~dI?OP!`gO>2KHS;>O* z)!pw^6MJ~IsfGWohP6dS%o81>%11Lo7aM}hAe0QQJZ|1pdQv>sQ!vXy>W@!7iWrV8 zE%B5TG02OUC%L7@>q@`LB71B-PJ#@Z1#~o}&FS+$wW|oyOouHVU69V zfSnw`+d5X{R9{8JJFj#kwMtTr>erU>(bih!=#VL8&hnYEm(hm=0Ix>J&w`vzfTrvg z=oS)_bjQg244wF?xvF2%0abLb5Jc$&g~3-G08oKDaooW{ez$0Y5TOi_uFCnS;q;=b=XRj@%=;ix8#T}s zwYs5qFLk1F@C@u!4!QQ1)};MZ=T~hC>YN_r=QjoH$zP&J?b%+0z6XVwWU8x zqvVw@VBFH^imtzzVwa9(WFFVpco1N4M6JK@_voc@C9TDOU}V@E_((-GaXSJ#%Z*~t z*{$U#l?cFMessg#y&Pml4-O0CLc^a%9(fV+hOW?)nq(G8=jgvM!Jwp~c%u%!z57Xb z-h1k81Jm*Siz{a4#YAV(V4wF?wAtA@+>>#yEHO7U>~e}+Z(ZmDg-}LU)3qoy#x{5E z{`cp42OMm5BL<7=ts|r4`@B`{4+Ya*U5!%Yr=9}=!q`739L}-g*PtuoH!gdU;t5d8yDGMko+EPwk#YjyJpOQ(F@2uyd1d zE(<+K_06xw_3km0+~s`g`k>las2hypOwM(P?9$PBw8oeVefVi|eqx6ay-T6VKmw(~ zn85Cf>t}3~qLRKzeyCpfhvuh``vtFx9xdg~SIv{9*ViY2=W!lGVQr?-Xn4`QMDKtU zgxuRqYMFd%LPRF7ZMu(TLQQijcc$IhmvXDwa^M;_W;qw`-WxCC^=R+U{FdYI59!0a z`R!u*IbR7k23d@D*I=vteT=@?&V-~T1U;d9vOKD9uKdNUH4so+ef_r>8}Vw~lZXC` zuik!U`HcRq=a)F5yEtqDlRMR49TG9j7Mxn)(wVI9`Lc-JJ zmFOP1I+2NTEd|B8k@>mhJ@E}K&Z0)&(~)jXsA{NsoYBy8A=gj^l!sA! zp<}6wb&ZLyljI({F{j#><>y^(!xlC~qdYG? zH0h2KJ+Ii>5pd#eki|n_4JOb(X7z;s4gDLLNw*$=B*iFq&E(2oGdmvygrxLDbF@rP zt?+Rekmuv$d#Tdg9_VBrB%M8|T=u~{_c|m(E6!85V>Blk1&W{@Za4%#U z7;iOXy*)lXRDlM~_JskFB%JSMJi!p3ObHE?0G(gbV0XHK6G^-!{uGW-P`Mw>B0E4jSUf~!3MmOifM zIfa$Wam?mfM};P;K(1GSto!r2_@g1SK%0Kg4^p>8ck3|QrMS+D1L9wCdLGXbZsFxEK=DSGs z*QiW>PYmdLpR9CY4YQ<~(2-v6Mz!84I}y}p62o??`D8P{eNWcSu=`mqT}?}N0Vp^| zNPv0sr?5x>a^xQXHV;&UrsSWc<^Vy~Qsx*u%~`ikaiY)%#=Til;hgDKlYEYGtk7Vn z-DjNiY28bnp2%v5Ye3>8>e+@o2!_O|o{=^rVHlp4cWfC_6}>Q03d7cB>4?_p`93_L zoLBlr#W)4^TZ8Eq*??Qv7hgShgnS-tX$(bx2JWH?tdE$qrJo%-6ZN?GX!M#lbLE-m2QMJbF)GK%{RS5(e#UTK%`s480QZdvHHHhl z__N)3n@W2ojUARpAXO;0Vxq6A>4&sG<@ornO~(nUzn9|yKc+fK0~GsVR>r|pX|01N zDa3k`WSYQ`V;NAJCX%fD@t}?RyNH*uy=2RS$_wIRKfNNQGw}My&J#`ega@Tbo6bus ziA@|V!-~g5{qQ$7X3O-9Ow{s;YvTPN5}Z4kZ*J6Y59=h$4}fXAVg2x4$UOm|VEmU{ z&bC0JGHL8YZ4SF7RF4#qe>i(`X#7p5c*c|eJum=H#r5vnM#kwY_HjopI$33BRun`@ zOMU?<-u!o_c?GTvpjj|1DzuNryL1^WuN|tsvw$-D9q7a=u<4SW}ATx zc9z9z6nDx3`m6_fHY*sOO=OEQdN4cv^*r}rOx$!_Hg61QGCyCE2mn_?JbG7KJlmR6 zf1rOQMkW@*Mf9W1k1Z@e_?LbmLzR|ghZ(cyqqxb#({Vq%7Vd{PvEryqpAYx*%3RzJ zFFt}&=_Xo%2Vc$PCgXIQ2ZNh$nVIArcYRzQ@1u4s{{m?H(QC2@5*kGRZ!1l(KKBi+{L9ZKHeY zs_-KxyCuWF4)o1coyj|O#pczQ5_nkCXDuk=54z%#FfpjV^A;=BY*l6|!7?j&mx+t2 zEn7yj{{D``-P1EXA;H%2M5h7NnDrL%mMThgT*XY1d2gylZ7_RTE}h){ay70*mZ%b2 zykSjepwQq-uJtF|61SiO^!AxDb21;1sur5Rd-H~jf%oqT$OHdbYx=oTvU}y#sVF-# zxm*j3DpMkT+KCwF((fC94g z+LEAsovpJFwQwr=AArn}{wy8ne)!ehDdl5iU}Pe$j3IGJ(C^-8a+%uG8VS<(U&LS+ zQkOMfIjxThj#j3+nh!gT5T``E0-nD9tgl0kj~k4>raFKpM6@UGkFItWgg$Q9Xr{hv zhK$QD7#!^hPqLy}t3r-bA7cjIC#5IOvR5(D1>p^iZ9+m^*H>|z1#iDM+$EGFr|L`< z{5+=oLjB$D$}$&?Co6?N#kxqaP=X$_as@Q7U%({a=r4BJ{NQGz4C7#nf@59TFRfP~ zUquIhKc#%YN?JPS($|kJ2sBzexsv%{Va&t8aq4Dx4Zw*9T+NsG6P$b-r?#+8mv-Bs z(`ABLRoVKe`%B(5#r|D^&#KgdM8*6KlDxiajjp^M@wOeH-iq>6_9gw1cKGie1wR0X z!%cm>g{@-izaia(KV?bh(35!a{+a&V9nd_l<1B3(BCQw*!~$XoE&mm)rp88q)n&@# zu6Ltd$;^JUM^={J+1!-h(>^EquzV~6IzRQYyq)y3>YKHME-q5(PFviSS}y*PPPVH%XG)=nGNlgL&!8Sdie{v6uve z9ssgYtUshWJvY$~Nkk(LQ4AmL&TcwX(*_1pubknZ8#L&XXhez~8|5r>|hgQyE z)@d7?qW?KVvLfe$y<2zJ_}_2ord}Rs%-vpB;pBpp0S&A5z5YcF2-lR@#>Q5~LM6W(YVSYS%po-1 zB4Ab7dXuA)5o)^GtF@=^Z+#!7`v`oK;_5A!EU)bfQto@2NLf=sYZ_+sV z?@eEIb&F)_D^Z}h`$~TEwy8q)5hRQ`ehYSQNuBhxyYhE(xTBZa3uC}&xu4)wSC6HX z!W&Qyj{N0sgrYTo-Qmdh<$cFH?8` zvzt8nJx#AI;tB-?9yoBue$XZp7hR*Hl+9z@3svs%6QMJq_7F5sS}hY+g$u0e!3pkD zeo$TU`bTb6H>PVqPDN0HO>ZVUuxmx!rTkpJ>sS`ybnT**p>U+~#X5ne90q zBZS~YR;yt6;_Z*{zG2E3cpqmSoz8n{q?g2uz?>8)$nJ9JERbJ>Y?6dS(pk~4VQ!%1 z!V{WZ$6~Cd&1!;h@#d{{(oiah3>ROP_D!tF1tGu5UB9fuHWphGR5=b{FKz246-Wag z#(my@^b2v{2s#~5zcGk2lz;^D&}TBKEtoa5wPV{frp-r*-wjCwa6!-cE1hgAFBzl9 zIf}daPGvYG_gC*ke|Ut^>lzj%ZT7NZ0*86kQ3-&e8z-kf2*A5=R3D#2*hXDHlc;-u zFUSSL>!z}vd-zA-D<~C^Zt#YN)c(qw7I=Mf!A1Fh6dXdM zvV-W2nD!_Zoj{3T#SK+=WM}gO^Zj_NH0B+!WhFW^s=MjY08P;I&YE>IGAcE3Sfc@> z#t`i^B0U7{1RKQ@liE*#LmmS*3!!Aohp@O-Uj;=rN~-|~fjtDg5mAmPEk@Ju|1=O> zL)qR!0|*C#c)Q`oMs)w0DgFX9U~+A?>&88sJaRxjB82ggqe%a&9S`Icns96 zwp}T4)<_V*L=4D3j6mc*Gn;ARkn2Kp7n$)a5zT8m-&N%i69YQU(kPyqYSZ(QGJe<< zlUe&`w)8UslUXK z{#xBG`4%Qp)25Nrah9>G5(J=7ir~%k)ng*dI>8WO>J}-|1f2gb zYIqF)BwLiCf=VR;9A1RniTKLK*ox$gH3RTlvY!H-j*B+^)1Lm37D8Y<+YsQqvQ}}37QghIdtO|bxeu-+6ox#{b z5>7;&Kc(;#M--*-JH!zMcN1lH6EAo80JU6KAShCGdft0;xJQ8Jjfe+UpwVX%+|WU* zq~-CZK+%07RpNChJ?PP+sld}+d0$Ax5k7T@lUOzb4LIc-Sw`{zHDQ+e3*Bkn`*-`H zx*c{Tl2n{w9vcJvGTM?6W#Ez$p1I#B%!-os&0Y#K2w^yhq-yUllTBv!mv(=)hR5Ch zh{u&5YY?5`7XWr$BcegC$Dx-(kxB;fdjazb_;Ccx>VtM7F*wy1M&KXup z5x-YsvkG84491ZJgc#DPlevHl z3O;8=!taG2b8CqIT=s@03iTE6CwKvFQ*%S93$#%OqGzWWGuOG5bQoCR-abaWxP-_v zsf^ooJfAgq$Urtvp-++61fB_TE3#0_ecIT6jLz6vF(ZwH5XlF3b4R=uVNRJmkGP2}G4HL2AD-bT#ird`#`EBPyH+o)HjPU86zJjc z!MY=V=Lfz;e^5lZp6?h??1I_1Z(i}Wu~4t+zL2$VJo8zU2*hcXf%9zYXvXuzN)3>3MaWSLQc5fpe`lI^tI6-YM&yr#RaHh zS8Dxz0n0)O8_LBmmeOG-kC_3KYL;m$&3x7NSNJOu3Rl6-(Za$iz zp#HFua(tx2nj~023WPfcUl=euU?6LV-lZT8x6}yC`!@Qe4U~w2iLyr=B3}^GA4E$oN`1l2A%1J z3(J@WnvS)}Kn|w{JMq%AwQ~yXacJ({yPe<=E~U$zksy`}W!_ntSP z3N%!pa5`4k>vkmd^4fJamVHfAmw!`+?0~>YbF@}EuP1(~(W*;QLOMR!N#|Qk@kic> zf88ys$bL!FD(5~CMe>w; zcL+*yQj29to}T`E7Q{6F!D4e#dKC?Jb^#g@Yy;?@ zH4D6#Do$4j4IVvGMA;`y)v%)h01vacn%p65)f%5iVIo|AK zU7_&aM1P6EBaUEdg=j=YiLFV5V=#gIKx~2dr<_#y8M|)H!%m)>4We_53%zSiTV93mc@*q zW=j_ zr?2UFRJ+yj|6iNm5nkJS>pa!}mzEc+aKN;lBei>%9x=iH#B4sg!QHoQOi;&Gqz94C zXX$+yRJ<%;fyc}V8f3YwDEzTAWI2l1s(6Pjv^(jLgUXJA!tU<$3UE$blR)I42+ThQ z2qO~~Dn*>dn2M{}+^HA@^m#us3;#|03LHqsz9$^6NY04y4@>^0gl@9mFX3C6Jn^Ke z*6v2cTIlHf#e~ZOXwiDx{wFB>U%K6%7@iBCS#pt=Bmf1|-s1t=L?Y&I|6x>Zt19nT z*X8UQ@4joLb8oJ;cxkY|a9{j}ral#gCsLu4;%9KV+}iV(++b_I1JkYw{L0(FuIS=* zrXl4v(?w2k%uZHACf;1ot`Ua1`ULD6<%FDS9D8R=h&>N39vB6n@R;NgfALm#nA_${=H{f0f z4g^YkFM?a<3AqE7>(m&UWEZm!DJ)#Syc>_JFAHZ__`StnoZq_5imLKd%VvLw@0-wT z`H4?=h6%~xUA2l4FQNw77qnzpaYTU|2tyMzr?Tn6#L}y3DvY z?B6GBFr@b-AJ4M1gP&>U+pj6nm7}9ALqACkM8>r1FojS7zP?}XMZxQymHAirQ|Ya9 za@ZQ0^;@3INmr!9ojzwbiLyYviG5#5yIbJsNVQ>vMNN(!6p0k7Ag1M?)$CqtOUM$p>B(!T-MR2z01d& z*;|vte@fLGG`!W1J>}?NL-nu&J?AiVW;HGw0LqAQ^4^+m z7EYFU{=xgRF>EO$vSBoG9l>=C&Dc3Kyeze0!hvrKu4Q13d^pOIxC78e5RL*QWDs!q z`6zjpq}JPyE1l;1OX)BebKL0kgABV~OC6@Nc2~8{iyeOzxfC;nUZziZAbNufU>MQZ z_cAe~JpYrlgu>qF*QnA3;n!hQJd!8(dok`AoRT|D$~ApX`*&hnH^Mf zW&Q27EoMcxA&D*I20M!87EvNSl3fCdb%|HiV-~oAn{-*$mji-VEzp)49Q(2^kO*fx zt}s$S$J_{lSNqm_lrT!RhKuC>3q+>ni?+QpsU$3NCPl zHl-d~9HJtGp+$EF|6Q!2|67~R)YMQRM@j+BGK#%_nGZVz@$k1&2`WbFiu(Fd#woXI z=P~kz6ee@zmO$(UH8fZ1<$iq|bw%UDkxEMR?;L}zH)adxs3LTyz9G>Pc~Z6fVjYza zE&(`#>*}J$F2+mYyLELT88-!|n?Z5^&Z}95F}(dL8@EnB8%xo=@iqpHJ!(8wL}AY| zb+jjq_@-NGdYGu>I;eyJmR>WDTS!p+?bl$@hi&>46~>}JWIgjpH7Z%N$%(`DV;dE$ z;%GLR1+9^CM4Xu#?6v752`-pTp-Fk+3ofi}?KB_6nBignr6fGNW9F>z^C&FL_bM08uUcyzVW^jd3G$AG1UeTNdJ$ZjAOjd#Z4!Ma@Io^BUvF9E$S`VQiGmR_-+%CwBe#8b+!JG|`5kfRCKLs6vy&E_4A8&jlrx?- z$}p3q#-08=jG}JbN8*xfx+f2yM=z&b*lUW`m&doe#uc9Kj((bd7y#eU$;DQMnV48^ z34whcm;jxHWetbDY&iiyf#_xH^T|p4pV4lNo2O3t76?SML>4$W#OQ{Y&<`Ba?TWK&d|F3Dm#>mdPpem1bazVf*tQ&((^ z*}c}0k2Zl_!oePtInpiS4~ z(jo^g1|Q-P{a>y%aBT=?pecd310FwGB1+EQd8~CcWT^iMKuzXySD+i@SWTFjcI2la zM`Rz1girK@uHfBCV9R8k|FAwsv;P}6&li)$+6WRz*%3TLZqM3>DoB<4D#O9pp?kA; zR>VMT{#xsrufc_j2jk5uoqH<+cnn?}$wha18WL3{y7oPa_KJDZNGtmM7xwnI^x4Su zF!n3VUuE>c#OH(&xa_$&aG@YsL?a0pAfgb+bAbxvLCyOWeG(ixR=JP6;-~-#*l!|x zt&{n?jSvxnwLB`q@O(Zsac>F#K?gweAS5Wm#@LU_dxHs;s6z5T)vjm52n# z$Yz`F3N7aaKC+zr&-q-0cH>U5`V|d(boV;EY$)&Pwc5X7s7a6eo`T+7%vdWkco4$R zxTIr#xlhd^p}Sd=J7tP8m&=6(OnwT{5v&kBN`Mef}*o!+VWjS$pw5 zQl(3*>@+ctXfMb8dN?;58}plD^#Kp90@aZU#4GM|W$p!tO9>>1!-IrWU`yAB^x22i z&fY-o^0QYN4kNIZUM0RnjSkgZoliLuVpww2cMlN61RKv_f4a0GL)=$-3pA-lA4PJ| zJ)%$KB&}F-qYTFQM?A9#1xxp22!uQpcnxLO0Hbz~a_VDedlFp%&?~a`tQAzU-|^5;`pndK;EPmO%w7`W2kLbHO4tp|2>h>4F(D6;aM-qFo?v_40wv z^})hK)X!4!et)IAf!uE4^*E+uDjzP!OVGcM)erQak4BO^a9sXiQzL;#id#~VCpOBc z0w5~HzSC{P*MVm-3v=*dItp#T)oDZ}%Wm@ZrxfAM{5e0i>z-V|c((nJqOcTJ_J4)k z4=_co6yF6nrP<5YiDaV49%BZnQavIv6s9e^;W!ImfX|SZ1d8qNedQ3dj%&R(4D(Hj zfU+@0WS0`1r)8R_O;>kfao>d6qXs$JmaCT!#IT8f(l6-36U_utddsl@TYPe;6OU>`8+cI-rogij*W9S)*pZ0O# z(z>TT13oasLd`myC^!_JR6du1Q$9ksaqt3>Pa;+D>lfM!3CbCRouXNutkabfhcO55 zg9H=MCjBCqGCEKR*m*&`#R<$iRD>t0A0j+eVQYo@nxDqT)`ND;nMw1$B7-QCPa~E8KY-)w3%`a8`8?y)=bh?q)mQa5(I<0gVjPPdf=|YYW1V#O zGzfPs#W)nl=c^^t31Gq!9}%*<6DdiQ{+FcNR8fY1oc*I5f~mexD8zIB*3vJ4G?Ik1 zkoO1-iA(=Z`9rTt2|{=7&oF@|?=duIZ-m@>hns66Xc-AJW9BdHc=4n8`y z0WK>&Tc-!1kN&#CY=?rjt{l#c`ggX!5CV9vU1quy*CY6(6KXI|&z(#Ni?D3M{q+|X z@w{KN%xRtJ=Ooprr4T>fg0Y#N%%@0(%D<21sKVY`=c?xYg8}q^Dms22 zR|qc>t0Mn?t*)jAsRfQL@-M;`tlRJe>tx0JLp=)?zb6bSGFIGz{!E`lh5fLkvyw$c zD1gg~N#GTb_#HJqj&1n*WzCLk$c_M~mQ;Qc4HrGli8Z0w0iCUzJ87 zKt{yJu9U3lZ53+#I^T4l2mX0Lk2Q&5EzXw7P*kg24L2!@&A_;Qhu=wzf+AnA_S#%Pl(`2m1 zX-Ul^n2?!aWF(?0Z6vY!Oy!|R#k*Z^-H5aAk?V4>Y9qbd-%?MIo`BNktu*ZYGV}VE>D_1B2N-B?Z>r zonD-FDHP43w)iKn$T>O!d7dd;?*|zU{%_l=82I;6_0r;{*CX})=H6#H8VTSE@u=Z@ zUT*KWVlQLAC~X__?oVID>PgZO{Agp?MvgutZ!*)M+ST_q16XHXR1E_m-C$!b@o znm!2i$gJ{D*RFa(_6=aVBeC7~a}aQnsA^1(pd}aG#BYnxerSIH3L~ zl{NwIA2R_SFHeivXptR(i6c#rm-Wmi9YsibN0;u~Di@b`RW5^Ot5Uti9?n?{uc)GY zwgY2eU|@!z%NnovP56@Fa^{J130w&@`@HM%*JaD{Ie`W9)$pF-}+x|(CBI1>10&ZGL zW#D>;$S!7V0~uJD{47bksDtL3gyjHnZI(gaL&5#1!`?!ZR!zhlkd*WV5q82r^8lS8 zAsJVF17=%|K9-45qkC|4oMGMea*|)c?^a~AJt1v2CVQTc3-h?e9viA4juqJhKsZ>T z1C|+I)^S0I0q!^Wb+ya${+4XGeln`cq~HK~kB=Xi5apNIE*&%KLgD z!G_qHNh!L#Y1;tPLwQho`EZy$i$NPnwG0@e7(r0s++pq5^kSw6Wa8(_6v%e9Fk`1s z7JBQV^&A)IQZQ+*LkT{8e{_P=C88_L-lAf!X`P`YpJ?peL4g#kNN-4NS$Y?n!PKq= zQz}t^!tT2?benCMVZZKwC}de!v=EGPm`%L5vQ_Q-0t~gUdO&xrM>77-rsn2{-=nvC zF}1SU3HBaH!!RmYiu+=7S6kK#q`EJhJ=8!aONaz8Mb(s+2Z~EQ>E&@G)m$L${JfM7 z-xfqFd9oaqfPGD(2-89p7+M}fU~=X5YtV@f&$D=@KeJjbA2vm*aJWk!d`e>K$wW%j z8-6#ehh~c3ft!8a|7hM~gcgc77k`C0Tcj3F3g^NzA1UuLI;If=T2_TGfsR>`s{gS) zUK&?NVizFvKvFmgKqh8}8BvHe**nxC5BeP)$tpcK_hM>p`9S<#TjR@cdTnRu+ZWj~ z3_DWl2YtJxHQHUpdSAo|OK{s1vRT#fB=>SKvrI8){Ftyo#SIQM55OiyBg;RzI;6*B zOqd!WN}wQRYDh4EFw@2ziIDW0g*q6;=Ok)@q>t`&!dc)cO+bwZxQfve{I+u2$NiK~WcJEe1?Znultt5lmqfzdjM;b?o}|bs z7AW$!91N1}hmJ(*5#vvYU8w|cF`p`@i#e1mUm%>SX)<3IeE`r%G*t7aW^}dO(1E=Ikzm3{s74beSQI+jf|M%A8jQ$(` z8ONIk&e#VBOL``}oL_V7$Saq++zastX4?%m3E^injd=5`1O2(80N_ddkvzosBbLa4 zYsNr`a@geP>|CY;{m8*xC69Bwd9VWQ&{?cSQ@*G^MOJ_dM5jLSyf>J%? zv?Y<(aN0&ywcV{Y?dafNZMpoHh%xyVFv&x&qrh=y0hI(gFa34)D|g-tn`yl@;^1K> z0kMLljTz{CO(Z0RE^6*Z_YDY+kn3n}vx>OY)!SBRD3?FG`pZ_~CdAv#o~1bRYwT|8 z9kYCvBY8Y{sjltS)y>JMvR1<2g{JDGb6 z9Q)R&3p-_H$rYqK9{|y{kZ=bGA?|!=MY25oI00r-QOMrO{kl(qfN~_$5#SQ}?#>jBOI-w1n4F?5UDl;6 z727KL-<@EYGpvSkGj_9E84ZDK-%j};qmCJeAHg<1oh=K24@6JXV!!{#)QsPmsTs@t zwTz!~MFuQZ2HXP@*$o;MN7twQtPLFau>pC)Y zT%Rlo*kc|LekL4 z%{(yJ6003H4?Qox+O~IN9IOPN=|AO`{v>3Am*VfPb z!*Ri=V7x2U9n47K={4bDnREB^wPq4!1i`(SQkBfmg*fz|za)BEqMFUBxI9H@Kf$z7t4LLrCQyiYj!ZM{{zJSm1jWj>W1EnBD>y_BpaX+1+h_9w=zk( z_OT+T<;xb`uAY+;#Zg!H`dS8bLt7Tf9vr^IM{EmSMaAuIUki#MFM%b<&JhX0?sVBu zAF|N+cXT6#F;U7?000s0t;lMPo;_92cYZ-x?k6MRtASU_n{a$AeIdaqgdhFi1U6X} zuyYURy^dJJ>hFAWLgqNI3WFn@B!ZV@<&xCAr$50F5mlq(*PcAPQ2+`WT>c{p+0AW= zVlIAwV7f6A39{zKWV4V9%7`eGf=F0A+t+Katt4PkSv>Caq+vj9+@zMz&vU=$tjpne zxv(wCreC{>k*d>xi%Xs|bMAk+K04=qIl3uhrVMPnO@G1$3COlaawT5q+4vU*{5@C# zbRKW=(xH^L>q9CS1kK&|8Tdk&lFSM!x})cOLHM%PN4!(@{!hZ1xHLmauI<+dt;Hjx zbYlAx2%eycDfG{4E~^!|*j~iSlpU4#H)Jt;WwrgEb0rAoW;rTISe{#YZ*5LdyuSn$ z>_?E8p9lM~2tx?bb(x#FPmvIHq4%?TidmtNT0}6wbd4bnWZbivRZCej`?!t!Y+GzX zA}*M1r=0l7JSHCZMo2&C=!%bfcDxLAGXQ`@9#ddzjuxr(px-9=4xYo+ zL3_33aiKT`(o!)V=qucZ`wAuvNb);~mlsv1_)hZ{Dw!vX9}2GGlCpDg5Cr@f@>jte zkbY*`8$$5k$b)<(0(VI*z0Q|s{*;nB;e>mY2;B#Wn6QLaG7cfkuop~hGxu+Hra%RF zWtfu))a^q6CSg-=!a(^y6fvYsk25tI&Ttzy?)UVf$2lAuypH+JnHAjAw#Mw{ZtGV- z&k^TGS5fsqV+_UaJ_!&s)J1vmkO{jU~qLq&eDke?^TLE+?LD?h8duu$z2I6 zoP0*73}avzmaHQb3|BHuS&lYmY`Sn)ATYzuXzr=hty^UZZ^q`hjEeIj2|bOCtEi-m zk4#Z133B~y9B)|O4SUwL&scgoh-NP=v@%R}G=mg6N=kUJ_mDHUmN<>m(73Vfv24(yFdZqc1ClDC(+G^l4$e zSC&h42T$l=40b0MI~3Z#e7LP=o9BQDb>oeeDT~6toG}|W-=-&POg032d#@sR zi*#?c%kl4T6f+DrLuO~&`OPbeXUag-_))@WN1dtbZzAcGN^K=KDajia%w#6kL>l2o zsr4r*$nVOpPtl0bykfG4k&>Tb$&e^-8F-t7w}McO{LUtxhRDQ6m6}|YkC^$l^J~I| zqC9wsbyowtHS0QnO)q*2V$I#%+=%R(b&8LK`Jv`8L*H^wW4I=lEMbX2UGvWC-GSrv zd8ex$B0&VXFBwlbIW@7Lk)}qqK1h)Mnmg#Lw~Y{oLqHlvlgWHG$YE!OM}zPiHj={n zlC~O=w+VwzU83O=ky(}Mjw}*Fhi;Q9K)hn~(K;Q^KL{_*C~I=4wKo2sx2dcmjl`1n>8))WvMMsD0lq3dJM5`5mnqPLY{q^UL3KNqaO-7TT!~72| zmpXdiVeoSpwCb83GBn{31vbeQJW}FkzcG{&&Ccen1|sNPKLjH=}Jj+QjroacrzkYJ=c%f(k(;@9Zyo#DOK{j^7Ufctp% z-s0$}qt$Av=~yb=6`SOHN@o`KZ9-S($NlTNP3|{t1lhE}Og$6pDjQgsTZ@+rn{+^0nXjS=u6#ack)a+yd>%)QvC!Wh!cU0N|Q z6_;JF=#tP=L$2bQ(%9$7vt{K_B1Zk0=tu*wao?$1;pyiJ2WUC36lKjXdpr@!hvGHn z#HWS~-6+95nr^N$?81;<4q10babGG=d!Gf!3H0+?B56wf-`+b|aOh|?e=~pgs&Wo^ zQfRtYmCvwUtyDgP_pyjRrT&!qP~4a1Ft`8ab&QNQ#tAZArN*i76ZqgUmIlw+!q(cfyRpnt%M35ZKRoLOB2B6z~0*jRjCWE=FQvn_2nG? zwC~*4)swrPegK^py8cF!PnN?{EujLv8FY*IBjh!^D7-80hf{A8OE*NQ6{eCHm@py& zN#!FI2Nx;rB`(5Iw~{a;5=dwONxJYP7ZMua{<6-%Uj-Cd^5_~VafO!$u8_-w6!-r_ z7B@Nit1zFF{A{8+LL)^QzvC@eal4Vex}xbzP*Q(ZtmZ z>hdSjIBZ;a3Oa_?lPtN!1Y;x2oviDU2yWir%hew>Xqf}J2X3xjo3vI=aiN>Ia&D?x z+m%9diK#9X{eBB?x>FZQXlU$e$XCODJXbC~BYBW9d_UGrSpRgU+V587}I*05sS?b>}hS-?SJ9ChI>dC;ey5ik|Q7LwvZ zkzf1goBxl!_l~E!5C6xNBq<7|5Gq21hP{(AvRAf3LiWlyXh<26gv^F5BYRU4C6sl_ zIQAYVaqRKC-bbqY?#}1)_&y%L?_Xb!+kH4W@9}zHuj_hV&uft+F{}(z0?6GMsU?9+ zvmO|I5eVqvery#Nk|qo4JUf1a`o>#-zIrP$Z$|JG-ii*J+!1;pD7BV)A0Xjag9&) zAHP5p1`khF%uNYa?n}_&PUPP(_@D|EH-+ z>&!i5px?xN8>>B2`p#X}WT9%siaz<&4BFp?^C{yeR(pN|HzzffWQG_?a!UU2fdjHr z@2p1-Q`1G$`>Gc%Zfo&e9ZFnJ0)v)@y~UDeV8B8mcCC7@WwqF;8hbliifTsAy+aU{kAk{6De zdU7X*z8I?q}xszrsr#+Ya zq4g^GrF$AO7SjIFRNbs-&11b_%D~+2IIKESpn#fD79+AnQWZH_j$Qy8nLLsaNw+>a zPyc7{3IeY?VZWw13*PvIdGyd=`AS5LhoGx19R(!BA|t7hQKUfcH6c$gCEGNbAqvW5 z_wSq1+L4m1dST?54{X#U>!YLA+a8PBky@RnR1CN0E;twfIlbVhom~4W%>j+Y>_EFB z&DKE^Pl@y#H~HC=u$8;C(xSH8`5mNf;@uB`r~V^zo%T&4MMDX0x`87=*I`PL{zs%f zBvy;e0!J@ECvokzA~h_%C%VlggTb>jPt8&p;>q~zwHhRv1fktJaA9Hg>{EjktbrLk z4P+6Nd)pub+G4=wAH8>e^e~)Sa(feg^8F9W4H?M|nO04jiKrIH3|vWq@l$;?p}zCT zIMQ6Zg5Sfq>pfVJQSSI|ocQ_45MhKU-cNe+S3geB_5=jSelgqxH*LRlo4O7idcy<# zk%!k=8}GM#wpYxXxjp{Tk`jU<51y_3>Pp!l=BXVh!ut-JznfvXn>dlAPaH{jha5b* zm{CaeY0`pLM*6o*w2LJ$*0Xqcnz}CxP(nS} z{`EPxZEoir;NfVxt!1JYUCME3y5K_rXT0Z!npM)4w!rG8LJSZY>VqOgt*bm3Vrz~w zNNa`ev?;4yqmDBlKg(6`AD~|O{0dB7B;+wp1X|#5k>!&&5R=*N=zVl9eF9`9KnntMnCnQPc*ZulVsOR}MSAX9L|m($=J zH36EV$c~H9>A(B1Z)AO;SHw!XkP#6`dHp&&G`H%5725w6cyj{7Uxo0%anz&c0$L%t z8tVU9zK$jR{1%kq8&Yq?kK#^(_M!1U^iUcly?SO%5mzZC*4!EIY%#%w zHU|*>fH^o=4M(kxbfh0$TAM=UuBET|8gX$w0zUQ9c0X_C5oj&b%%899MmB+DoAC_T ztI~PKsuo@|ewEpe!grc5f&^m_1tjv5I;lN?7ZFXQK-;chqb)RIh&M(Da3Nh%Ru_29E)NF+_xGd9^U;k^Ml|%Fp1?Ax!}uifVEX zo3{0A!_${&7Q~LPRmU5+U8$#Zh3}IqtHk9qPmxIu$_8VgYSHENjls-?u;ILPZeD}- zI}_Sv>u}sNXY$m6J`|rbD+_N6ZIp=nMlV8 zMUcQ|9nXtI;5q(l5C7VCF4G@c%VAv$4iP8XSRxUe^%1g?GP>d`zZ{~~$hVnzhlp(T z9(Y5#(QvE3M!ZnHCHidN5DjTH2Zea0psEUKR6)4Pc^@gpMg~)^M{iE~W2tyi^F3bB z9{#R?W;zqJpoMCI)KX6yvf0QYY}scJ3dl|EMmInS1HLV~Izv7^p~afV(AqqtS6o`= z3;+YmfA@uW2o%-;JvqB z^lI5gXz{lmRr^QI3jx87sI}z=MVL9r0_E_pCDkN&9L*y_Io>n-6IMKgTS7+0;=9Le z!M$KW!AOerO4PU+=A<%Eyr`MIU=i>o*c(-+dp_+4k3UOndlfgv>` z!!JB`_NJc&`UEct8I52d<*t zOJJsJ!Os2)-N6F(%FydQg?xd>sIrS0VzaIn%M;i62cCXg0A5I26fnP#4Hk}J_y}!- zU{+Sx6B&dpA^XLa5ZI_g=1<{uEzGx)lb7Zy2n!eIJX!n+83kc;HVE5CcxiZjVB8B(H)LJG+s>;?mmSr2UY zd-}7$h--y!?nq`W#EA>V7Q66BbMlIeFcPnZY4blv8zG5Pe^UwZi9(QRQOwZN@$oMu z#0V$xvycNu;qX9B0MxBKFP{8Bli<7~au&PRj89~r>e1VJfCOsGZ5+XQN0QddmO3A~ z4m@ty-9_uo)3t;#17B=?XfPI;7;>qEU&nLS^abDv3M5%e)Jo#ao>N-VzgP=b@(v&DL*;~3OOVNa`Wx6dHh%lhGV|~C|tNYr>p!h z8OFvU&4rPZ%SY3!e35xHUw^>(2V#EaktGk$8X8X0H2!#CduX$^moop(P$r1&YL7(bqZig zE-PyXrqa<-VOQ!F!%9`57?mY#JMMQs2?8(8L;?n%tRdmyM}7s*a*hOwa1Gb#nR)2| zx0D2aN4?br;|kI3e~DT(GX-shPi;Y8!7G*8POi5 zAWKYw^uk!w;CCi^FL8jt`0)$g0uKxR0hZ>&4+jPgT^_|}!XAD~c}8shwb>i~pmWes z6iT{@84I?TpH7HF>R2zRtG^;jg=tL|HMbbbPH$KJfpK9mnDL@~1#tw{+pd7p`_P<` zv{9XVcb`PCn(Cv}xVSGq@>KicVRdv+D9}{0-nRpxz7uI@)dX=X^ePOuctDSuTD)nf z{X^u}vzNG|AV&;6N#43wJL<9^vRsOY-4t56p#Z*~r%viX*kn$uD{%a_UUC z?8vL~%Xa7sIEd)SS*#dV`+%TYA?ud3?xGUuyniLl*sfmWf9&^NYe!7SN8R{1rjgGV z#F}5ZNX)nQ7{K8BJbk3g)gsqU3Va!ZhM`M+rPx9M!yFaGW$&uw@})Pc82D znB+I;*s0q-Uw==!rKPZ~JQg%tNrsXJuX&AKtVD7$GCLe%dCvw1DFr=B)*4^Ao0b&8 zK59Y7+N>-QkF$Q@QW<&Mn3RFCY(wXKXJ^MfDi!;jC(?dKM7VQGIDcPP!n?u%!5_pv zzW+Bc6a!%_kmhicO5$_@mw;7ARqV*ruE;N$uNT}7y9K{KV|yjVs^G95WQ9kZHq9AY zVJ&0V6@iGFG|bT`{dV@5ukZYmJ!&wQt5K31OfH_wudE*P2x)hgpRWaF{^1T=8_eJQ zb8yct5vINOO$-gi-@m64*~aPk_Sf@U*ir#?jRxeJ*@u$hnZ`!fOJcQ zw;D*;FQB|@3m-3?)6P!l_BH4e>T7w8ggOoql*p~yRzUTsWmy>9m!Bm6P&D8Wr2R~R zEt|3dALx20g4n&AWv17U@`@|D^saG5Njr>Z<|~FOXYX#LhV2)~M0be=@q2u`-2_P~ zPz7EN&3n|4zK!^vy-4Rqkcu}RYX6KNYUA7Zw3V+36WNNonOCctuc6$U(ZL~cP}Ms& z*;t$lHO#naXOBtO^px_XMu=Dit+!sjfA=@jwTwdtJoXuGQ-79i`4XN0h+(Y9{ zn9*?#e|>(Rx7B6+x$v=jh?GG~98KtT3YC(n0PGbOET7K32~nUQgn3UOo&;L&z4L&`7AR1CmjNUB$iZbj zN+)-lUP8$Ez_5FWKuguJNoeCPhkdZHIE94s=GOX_5a($T=hX88DH{x}gfaA0f%*3> zcKqV*KtFeMd_#1axnLT7TMRk9OS^a3}_(*jY$Pe%j$N1KyL&9Cu4d-H~>`qo|O zyZR4$_r-Ru9pAQAwDdG7M&rj|o|O?Tk=sRF#@7q<4pB9PqXKBUb0MtOae)D+x`%ih zT^A)cB)oukJ3LigjeP4a;@Yhnnjx?Mox=l!MS^TV=cv|1kW{RF8#=hdw1ft*lC5QDb_+D=`&nuW!GpLvv7{1^7}brm_Q2X;WBD9V(F9xs+qwq24st(|5*$8S&NUMNOw3?;D`y1f?lVJq=&N3kh8THa-J zx!dGveNbQ_Uoh@#@-Q?RN>-s!_lcift?dfdBN}s|w?=ikKD1n50JXM}Qi_`xgRTJ% z;TX2-8zWanpbS)=Tyd^1t(QjPH`9%*LU+n(HSyGhtN!NZ>C3olkXym&ZM);zme=RJ zs0sGeDIs^KSU8+tu2(U4p0T3r`{Rr18dsrV5Remu1iF*!SxQ3}7A+08E!bu}T=Lf> zUx(anj+uLxsiE~TBp|Rr*youRx&bEH|8u~Ie;hz?kfk3(Nys!=U^~OCxo9MJ(_^)! z!DlME*Zg-KyMED<;-undw%yo55cWcxO8m@k2rKlo+Ikf5M-J&F)x`5+PK|MA$}z|9 z!e~pFZALOjvlrpa&Liz)A^St3e6g#%@#UB)zG~UIyTS!ESImO@z#e;AclY_mxiUnJ zz}yo;*7eEV0=nivzasHUBt5zqyRhUiMs0;NN!oMUH~r8r(iJad1nJt_a^NR!s|hhTy~glLJEnc)`ro zGYbu>y!2|O4T5W(Cu8giHo&;fTN7jm*k5K4f^rdEoiVDuEY)M<3#sYQ;8McTu~X2| zyke>Z((<8Ouo7!X95_6WpFH{a^!dN^$oj1i5Tu(aNQh)0qWi{m%R2MK268~)B?MG@ z{Cd+0t`+PL1+LV!7Klw}g7U_{S$MPe!HkGxUb@UYy`dMAQi7=puiY@O& zAFV~5eb%2;Eu(UEu*#0#ck2qZEP3)3Rc~+voBREEL}603>-=RnC6kO(Z4W2I`OCDG ztak&^b_KP1qA!ymF9b$AIDYT{G)GzeK>jl%B|nadiL>^YzEYUfgd@~s`L`~>-=DZA zU2|pb#PDa))Bzh<6JQhIegCIo3y;)b4ALd2iPDB9*o}}Ap+_nN6DNMt5}I^EW)vn$ z9zUGfb%VR=i)H|#1wVMvA^nH#ZHQm%1fRsVPla>E`a@5;{B0AZKrzwt@cj<=1vHEQ zLJ5_`f(6oVW5C9Y%}g6AS!7gzlZC7Lt*Rq3+bv#jh#xrVP{jdk6AW2iTt zHoSJ9`rV3he(SS@!V$x5yY@@8w61EBsL9tIXlX78JG&eBZ)J6Lg=RGC?e7=<-Y(70 z`x8)C#oKG2A~R492@ikg5+#G%#o{|&{25^8#n=^;uPy0TTWlYHrA6jk2%`&TzSo+n z8+g9SrJJlZsBealp2ghcvkV5Lv4=F%zxUtz&DcDF1&3Mz*Jd;%fk8`vq{N|HkfM;3 z$t?vPZ!elmGz>QFM&^E3 zHSf%I$k~|w`x_Ej@96d`;q+1Xhn_2?9#98exLWuv{fsWYPqRKUk|62XWpMZJ(f-45 z|9k|}H0%A*X6E~hkng*~t4LOb`y}2mdd5ly&&XV2FV%MW=!gVnIXl>n>01#J>(5?h zFBt1he~e6mp;0ipWusaAyNz*PuPSg2cb)$uc1hQJ;a`U1#{~xegDWkHv+JAr(F;sV zhynh_>+RG!8IwpywgVw!@B@8?`Y=h=uHb@0)fCdFq^YP|zGT2=?aW#%(v1wM)Yxdu zyB;*|k=^6Bo{|njSMdD@z(YU(Ha;0udCX65~SV84El z$Z;Igf<26{Vn6;3s1-n;c$ozyWB4-?W)Hl~T%fJAXQQw5uQUDl>A*17+2aK4m@tEwr!n+9y}Wvm6Hcj- zIll&1?Z*%f=Y%=`^$OLW4pO5(`eygDjcH|n9G4&y;)_U=Z)n|1-dDUWUGK;ApdU0V zbj5$sR9e3#4|A-baHF64~oK{;I<;`iYvh#(Cf zprEw@viNSXIL3AT`ryV-6>G7jLV?-I9?SfzW!W7kNsU;R>cfI%DWp*Qq;@1H-IHb0 zI!+wuq{aax6w+zdvMY@7@K!+5uUmt}^fDeCxycW;_<# z%?3Y?rMqaUCWJUF71usy^z`&m%%c?lP8KE5?Wk>D*@Ll|i=k1sWRiibLcuCr!ZEsZ z)Dm~;{FT>KLlTQ7=lG+Tb{|l>#ICW2EUak{na}tgFaC{n{`V{6zBjik2e4QrEG99> z*?Q-Dv1GM)*{ARP*#@%*wn#_k+^H0CS$Y(ab2+%^%y4jTmMSe7pPljE6WUbCgLRdS zIa~$2*=ykwO*RE>@fLEyyp~1d@N;AQiYjJroKDq|eLRcLK8uq3uwVM5$l$INV#6Ot zv&Ht`m$C7?IL&w`ZEE@IkrJYs-HkM4X#4YudJl@Du88Yuzv)SkFOEBS`pAxZwT9FA zb&9sG=5Kl;TCb-}1{BE!V^DKZ4R3mmYwY^;tw%})IeY9eR1R)f1D=vhcFTU=#qvL;MR6IgQ_FhJg zzaw};?o7`R2^%Nhi5{CU&MJ!vuhV82YTZckFw}6iO#w*97)*=`X4*}6Br3X* zACP<}C&#aQ)17Dr6FjG)YXy=jx>^uE+FBPjxFTM^>54S$_(sY|M<%m}j93w_-GV3m zv4XB(k0S=;55W4p^Zxvi=_?ZT6hSSt|Gwj2zl&cTaWds=Ij&wi)MFDUOp(A6!z#|N zOLmT*PB@;0&Z;XJm7u|d;&{%#!zYEFyltt2Z}_Gs&Noee(Y@?8pqu_cW*q&F+bV>dhB z#~_;m^Omh7KDEz4G@T?ilbVJvcE*1AC+D_+32PZl;0;E0gGv-1Pmhgr*P)kN2zMqH z2*()=Ek>j0p=*-lU8=)$oWvk(UrGMH ziQ*bZMN&~TWSXy}rDPhPNxlL(XTKDbl;p2*#Pj|x5{Q={19`8^p#Sw8{`FH(VmuF> z)t6hBBg!p3e5Vtb>8u7mu$C@O7UqoaRJlBwp^Dm1=4@XW*^(&3Ak(O09gWlC*Zur_ zmFDF08lLgpjeU>Z3_!7hTBFwu2L-bE-LS*6!E(8XRk<|Lkj?e_B~np`g_cT~Gu1fv z*ks2>rX8hzARYYt1&W%C&r+Mj`woBlW$Pe%<&(;AYqiutiot5>x{N(G(v({_n(h4S zhLB%fRGfDE_~Bxt?p_QEwJA7M?L1rwv~fWPNbEoZHt{B5L!66&jpcc+Lldx*GoNzf z%p2u`X9gop-S?9XF|u$g9{#&wAP5=)#oZsrLpS+sihRVb!z2SQv_{o^f_870O1=e< zhH>494sEk3z{ad7#8=A61v_D9^*vN*$4H!Z$U_V3^JcB3Hh&+GqA^`<8%YUWO* zll9O4}BKo}fJympNI?b*66VW|Yt0EeU~m zOwS%zHD4S48?vBsA{*LptW{o&*Gfe3ud{J2EUHYFXeNR`O#P((EEN|`5da>1Zn%2tcw$PUR~J~sch^@R?yS&w*<$f&5P-GYC-b^8!H zlTSMEn|see3P=zV=cjADyXi%Tfjr+IV=;$~sTze?ref#n;k5kv1QWiXos=Fi-EjFI zs*aDru6zhc!v?WT&z_ON=A5UQ-99qaD8jWM4EC;T zAcpXOW8A;rdH$l5i@IztHC812L~f6bnDE-|=U1LH$qY4AyG8TCJCrP;N`-be@%OdI z^d8*h*5$2CGLB<|M?n{DwswpSvYPz5XWxO~pQj)TdkoUY5+7-x;9TD z#(nx?#2gWNu#_3Qz(L3PNldA8a{QPK&BKnmgydm&Mw zI@is4C(#a3+ulZ!j3(Hnb0&#gj*`i=Ofr$YdQ*bdU@hrgn8uI0-R#(W!6es>*<^qG zBRDwhjlsH;BW)~kiU5!%(_AvZ;F$T=%3Jd52GaMq_TJxW)<)ca@qf|<-S>ZN+r$h( zv)YFz@Y@zDf~Xt&-%~f0aMieMzHI^iK7@_oIJW(#nI!R9GFwu_FF?lKmcb4qMC|R?lXO<&Ur5a`4FAJz_~&M0*3bGm9^@aThX< zmAWJNp|hA8FxlqYNfAMp=#1jyoEWz~mtg-2Itf3U9_nk#RV*l~wGAs(khA3P=olJ0 z{`TMz8L(Xgj+o}Vm8fOglPzhHSVUha3zp=-4yLn{4hA!WI0*l(Hl&fUsf8ni46XY{&iDsmP}mXA!2vxv2EB(A6XCWt5}Y_6MPKnia#(+Mq#=w4S< zN-}3`>1#HH3zM6xA#f66z|+XbHjCo##Hz|Sgz#O^E5!WvPbiu<-j&hRa{>paEuXvz zjM&AA!K1`SH$x1G2zb6z`C1j8f@}S7a_&-VLcusJb^x8$Ilui<31(tJr-_U=b$+yH zG^~>9aY9SNpb2xuchjid0V{mZhChT|S0T5yId7G|oBC-!{S7%Bdn-CptR%UQII9)u+pWluZ!QJp#wlUX*?|p`Ni+Q(eY&X{<2luW6*U$CQ zfTPa9x24B3seWJQk-GiXqwsx9sG%nBXoic+3U(}Gbq*KCRM@L`T;pSxr7PSwsP32M ztq%IrPK-mFE|%#_OmdKoHU+ncR`5uW`FmIm;PP(6H$8Sc%iW3k zu|pYFkBZIE@AX7=tZ=<8JU#bGjw>-vIY-6V6i}tG_p5BGUj38H6#&t8)D2i^fJ@3G zV_oXl3N};9Z_(oIy(4%lra6g{LIllh9pzp!`LGSl)9rD=LOGq)ufdmVS?I`;r8WTt zj1AI3*f%{_#rSkCaMCHa^s4VWG*N6*VAdqd%JkOOQs~YZaMwn2zh)#G-;jB{i|fg_ zu(#w+3x}o8J4tt{Vy)fS&#UxGLRW!ruV;$wp)siWaP8#_x7;Dw6ZhAC?6UdI^qazb&$CSQW z9LorQiP_aJzUh+s17$S!^?bu3Xd4GJPXLfGB0TBh69Tv#HE}O6NQc2lC>}i@)$;xk zC%k!aR*1}}xzue-r?Wwl=<0k)o^{}>LFg{ZeVUI!oL79){V|M5D&B8x0Q3~U42@{X zz53{*bevr5G35oYVz>z%Y1GnwyGY(^N)Ijc6*Ba_n^VJ<=JeJVGb@@POKfwe;lGeK z31O|-@BCWE-1o=Y>M7gIKZais)uHzq58U*@p9F45l+-6x_hpuT;oLr%Mz1Z1^GHoI z`liL?T7) zWiWamgzeIKBYJux@49F)1}xr6%ST;0JMe)>zmI+kfgSB;w_g}zaBVLrV%O}dNaUNX z-c53ZHYaoFjfH+5J0|{0)$*h0Q1C0vTcTX2>rT^%6n){bCL~~-+k_n@2pap_9#*=! z)k7u$XoYmu#6I6?QFa2xu+8=Th9>P9lUc{iDeARPdfImo)Wigzf3VS-?N3M9>x`T= zKOF(1HEGT0-{TGhjEU^3$Ha5Z1=KU2#-8_QeIB&OXFHkji=aJX>vH@0HR2#$g5?~o z<7;5$RA?=E%ualDaFCE7Ywu=DEB)f%9C2TDSi#_cKfNBuds9x$TgxklMlk0I#OpT$ z90X=m^mLp+CA-dv#+SMoq4GM8t8eAGda>~njKrI~u*uFL7y)AV+ zQ?gJUJed6D%O&qScap?!ZcR~ob}L0AEv4PMTn{&f)>Kkb(y->mU*>=PFYX}1Ml$N$ z_w#F7UYO=FYh+r%p$+thif>Fk=hQDr@?2fGVA1;Wv!($mJ}78Uu36LhXd!2fh3^BW zZGSeo`s2tIe9P-Doxgk}F93DkQFMlZZ;uV;9E>i3v@7$^&%H68YbdD;7fdTO{<`DE z-{E;VV^Ydv7e7%d3P0;Cc20EDdiCm61o!1z^e+8-4~e>eA=de8NRZ&kZA7aF#ZypF z%zz;rTbb+9bayXN3@f!jbLLuiVOXBo|8eyjZv-90n^~>ViTtkCk55i#m|?uU2==jS zTr{Z-ReybpC}Ttjrl6~$)AbyOKRt7}&b;x-KVFR=c)i5HyFt#7MseX0NcxGINlFrZ?*dfI+!s`GJ7}#6B#hqq zKdkJLb}FhJXsmg7R5kbF#>M>cT7xH4ye{%xezPs3iH^+Yd;m@Sr%#{GoLH(C!ZsTP ziVQj~F5<>=;h~*5@%NC$DSRx9QFWrDP<5;|ZCY^UH8F$QL|blC?t>T+w-nf1`iY)X zRn#NeqJLb(#v8^|i2V>xh~TjZMVpcNNHz#hUo1lJrKeB#Vet4eHUk$i^2VEVM4x=) zBY(WkuU|-0fiCBXOPLj8Gp@MNWV?Y6qFS0hWQR%=`~m`6o=f?;XZ@Y#|Hiz28DFzZ z{1l=E2OJj($%VVK9Urr)o_8E=o)SdCzliEk|8ee(H|EzLdDBase9o$o09wA;kwq14 z%)0>EeUy}aO}M{GqPQy_`JAe|yPLK|Ayv09OE!eHrwPY%?Nh~APErypXs)WnvA@zi zUNc7!$J*HU&Yf*|O_bPmqc1?zz_c!cXWxMX!;Xs}!R}=W8yEV=t2Fugt@@Kq$nozo zt-XHRRj}Jsqx3XeIS^T1UM?HT@#OH>3B95m zwAsUl4+Whkb??6X`$KBP6W&2=4;aKI9kqMjcn=JPoeX1^^4l+OyZFke`jm0$#)cye`^Kb_irN`BJe3UmiQ5@$!;ylWH61AAh~^M$L5uF2JW&mt8WNz7I9TULM;` zN-Af&=uJv}>U!tK-~I6#r2XTAQ@v6dO=l$~PhT(9)Nzu1?fA!^ZoCon6|a4t^nQ5E zuAfLmOli6L5u(^%q&LgJ3A&xCy^SJwx z@7S^P_%}zm%9pc>9LJpGYt0+u?xW4tmZt-kS60-sjNhwSzu*#}BdfX)qkh&s$~qL{ zP#&*|90YrFuIRlI)kn@K24Ue}D7+_Ve2 zH%(|F@mwK@mGrzm7iq}tb5RVeq!f;~&S`Ofch!1I$JDzlt`(F-$@JmYLv-!d0mi*U z#2;>*i)K{b?_Z+7pOyGNznIc~rqONpvkd$H7nfk{V`sBGZ@aLxWC03KD{r?85nj=& zE)R*VDZ9J7XEo8gf0HPkt`|C(r5{!s4_eJN8k23RSo-Y^DLUt6$kyHmbOJ|UYh9`G zjd!KB_cN+~AQPNkJXyqnp)lt8uyxmmtxO-bP$=&=RNBv|Ocr!6i{N$^!F@Zgb0I_r zLWrb82$`O9R6OMne@@4^*JR6flPwej1ov4`*hK32YN<@vU4 zyU`FI;OAGHiPN|Idh_PZgLzhe$m+%$(kIrnjx^i7x&b>A5cB|1b8_UMi}7 z$F_mmF#LYv-6XV}g%6>MC$K6#^+KEN-MjZ-9wzlfvEl1m|I168XpEObK11}@+sDWB zOVM9V0Aud@#(l2z?5uubw1EAw%2qc3eGq)h(c8G^KVE~r(viTIRw*W-+eh(ag$I#Q5~X{lC3|69{8s*u2ca| z9IFt;52hLbY5iCZfc=(qz4rfwsgBsbni%Qh%J2?$%^43555$*1kCZQe z_k$ZBdgKV$xpOQBz;&l{7fMuX6P!h{uI<35c{i2=cmuV%Ff^iH4W?`c5I<EdWATRr8014+D=&|EH6 zBSo(`@7AqbFU=AaV_U)YoUv>>V~Hsds2XTaQkLxUSW0W3%YFk&UaidC1yM>PP8%n% z%bTCSv9K_uz)x_$Y-v(RKAc;kuR2f@JeFjZjX%P_8q>UE-{OVE!?&#FZ#|Vb*fs`D zC0n*^Ij$0S56ktNTK_RN0d?*lTDNA8{{;dQ zg1e<07o!E$v1Lo?0PT3}`lWfyn_2zx$q4Lt>!JLE5tST$!-vB>aFr~oa(LO==?!f7+X(GO|{1M_4YEY(30IPsn*|rB-pIHT411!VY{Z@Okd{G?j zPfnX=-LMFH9PBn?dGVrueT+zIKE_oJy!7|IC$@P-;e#VVCxquZ>?4F-vcT5}oiriZ zzI}8gF}4+UWNMa4?V{mWqZKd(aP2PAOC(qwWWB%r{V`sh(@A!AyYBfFzPxqxU=u4k`h`7Z(hNF*J&lNK4#o{z%7Hm$e9SHSTNSmz@DtOR!`nClWvz6LjE&>sJjeW+5*01L#snWY z8XIPNh$N02uXo#%bn?@$HpHYkV_YtPM9AXiB+s601{G2lD`tRNnpL;4vDtooAh$io z>=6(dEF)bu{7s(@GB7Bv%V2lJSD~{9&plcy)a3*WD;F%f{7tehMnuRje}`6;eZ^bQ zP&|&KYIWf!ZwD0!zoqK~9WNy%HS!`*RAp^-g$K4G5K!!_tkSf6R!mHszpnLGLL)Z9LfBhEFm>L2nO}y--CY^?&oktTQt%CdY6{YXsbGxLC z$x&akO#|aR89wyJ>=l>A5m@dhjCC2wox%F;V7hha`w_Rb8E{H7z#(TlbxH*eC!DjJ zK>E#qofbMMisk$ErQA3D?;j!j&%5}3<=NF?V1vf$__~_!kWeCeL#9KnpJic^Z8g4_c1jSgZ(vOp#YfG^}dq@=<(Hmw^CMZOwO!ZY;EX_{1;#xFG?$;Wr(4eoq zefQ2BQ{qzOzL4dpce4X;2ByXBjI;#|2+;KpE4V=Q8>lK|3p{00pKQ0 zc#OThy;Kww&$kloOgihm69~o#tMV16PN#Ou4`Eye8rFFOfDRX7%izFrn#1DS@zcS7 zzJY;Z5HN$?2J$5v#+f!0Fu6!ck}GN&d)+*nXSz8ohDCQPIWWqrgQd1fOx8xyECS5oYJx{p2G&>F|?A zRD1UxmN{-vR)WxiB8$T>c9M|vEC=~LF`}iUe1T9+KznJ!CSuaE3}`GTh_<~q&;Ucd9?18=j>P07VvWNr+3;BH&~Rzc4Oz|)j()a&;kx%Q;D{1 zO9JJ5TstQT$g?>2h3{;htIJviEj6sOOLyv{g~q}RJYMNvGG2Th_m(%oyd?!MblyAn z6$I9Js|Y60VM_w~$O*W|QIBFQw-27Kr!9O`iZc8delkK7RfGU4kQ4)o6-Aep++|EM zrComi1M73=?zANAp`bV-V>RIya=)VrlLrAf=QOSC^VTI($RPB~GH~EG9?{=`y1KSJ zKNTDYu9zGsyfV>NU{Otqx*hVivn1nH(qD)M0DarqoGo zm$Tx~ad5~1Cwd<>b)xS+_R(l(j4}{t**D}Keoi;heNowy`QaUVM#=NU7+2rvbDaHO zLk~d7E6DQ-kh4H{L_IhplmNe~r>Mygk?vRPEOG>kB&$WBaueZky>=N|=^j0DWCmzl zi?$py!5iP6;<^(Gw;cr~`aPj}*V7lbIp$<0=VOIkQ~(-+;#b$zjq=@pBBi^#o2s0U zaZY-YkN(i1G(ZQ;OiY)0#9|1o1TOT`$5emsD75#zckdH$0uJB%Wf1PEwH=30rpMJ& zl3;VCXlBH0B_zx?&+ajLzq^djx@Bf=4xOa@0#U;yyTdbmNC9SP!JPW?7}rs@{IT=> z^a*t5E?ltq-v3@Z=i0+O>n_fbu07n-$E@sExvje`TVFl_+f~VPmeVfnc9K76C-r0% zDX{EnHa6U7|H9_hG@V@FsS#WXaIkr#De+Ns#*L}o6riMFwO#_r-|70EY+_d-@nKt+ zu;i{ssY4mMh0ozgV0pfOv!Z|4s>Cq5g>mkMB85t$UAJAK1K0@*?XzcZA+%iUyK^oz zBEfD!I>B(r=ky}3!vK~Zy*=5GdgaO$1oupQ&90u1a*CIItn%cPNkMFlw{QbVqC#|I z8EPp96xAUSS2YL`BqI8OiVE2X$>32Uko>q>(o3n_j1cgM0^tRf>Y`*fv=|MQc#2u-=NL!#bUX_YMFg1pMjoB1w-RH{;C>m`@_BCeP7BJ25Axd14n%n;y@QE z!j$fe#f-M5(GK0wJr@7!`s}GaK<3IkI+hGFp;b~UP(9%egySYi(2mH~lqqcLoU zSifkoGq!cIvzP~1X{mGPE}??&y6`@~S{H$+D1G-SsmH9!XP-TLrj=7AUXPEliEls5 z#A*Hf@>X#>;jz0Qpn63PGN8hno=N&p(bA@Zxn*Ny6o1UFsd4F2xK%;VU8dWtLG~_W zPWtnDq_;m3!v(PK!=X?10Z z$7%c$Kw^Ec4s(m8-Q9+W;3|qP#35$_lKEIR@40ymv#3b-owy6OG4S8F7&g!hRE@nR{p7OO-9r!NhwZ8MzYe;JC)ia76e4 z?;R`NMNoKq2)hW#TF`#bA0#}@^%-Du#Nde5#0xh}6O+?71Js+Tr#6bZAzbp>#fosj@%Do!_>f|_0gdz)j`DF{J11wfOwkz&Ha zx)5Y&(R1#M2MH}C5>ZmH^QnvELn1wkGfj$)BaO^s%sdGJ@9&gLujR4ygEMnNC$AZH z<8y#^A4VMP6rbGw+8$iwj7^7NSAobU&rV15$XC-?W+q@#l0Dn*xWBEgrlcV5KOBQw z0np<(`0-%crB`Pm7}f-efeMxuoW-#hb;f>%)rEJFcwobilb;b4IA&h~!4XK}C=(1D?z z5EzP3Q$xFhOZ<7T?-b?2ICJ%{gm>`s0?-A1N90=tVTx9E0vL%$k0Z$$ibG#r2X6;4 z2oSf+z4rOYK8{P=etG*3vO(ftLbOCMtW`V+;MKOgBYj^g-*7?fGox%#mXw;a`QgzE zFHIT7rw?+5)tU!#YGuVm@L8v8q-ks24DQg}`W~O{z(}Zff~j6OTvA$h(h$4t{(}b^ zpv7++edlgakpFNNW5PuW?jgD@*CO{y)y|cvY6isKwGfzFHPq4aTbJTAfC*6swWtb2 z{yqwd;K#*V7_@TDAA78=IHRWP_DnO{z8lqj8+ht`6OAiGTZKmEuFhM zFnBatxMBBfe&$guUMK*(%14}$0j95&mT%%_<7~Y>P`C&lKnQS9hY1xG6)!v7<8GT) z!+@|kF$gw^yUmWjswY>uav)(*z>vBKsC`T63*zGp` zEnwmFVcWv?++E?^gvo#cp3v4tIu5%}O|Cl&=?JTU=J2={!LLah zndP%%(LDB1crcGd()Nf&u@wpPx#G;noUKNQWZsK*bmf?- zB3J?mSzo)xN`9^1FlJ~DK%6;*8(|Jx{PPzt5UvbY440R6)(Z>*0$jA4k|^!y8;sta z{8&6*Eehrb*5xy7=o#R25Tz;Tv0TuS$GSqh+GoQLWW-T*u>65ys|7$8Q0>`MA##p0 z3$6npE-6_Q)Bw1tEA;mDrLB5{6E;S3grENs=zaviTNMuMM@#|5#4Z74*@^l=(bo+P zfv~@?0%O_+;k&{5XbaR;k2R291W*SAIF3EC;iq#Qi4SWour)$XOfkTeIJN8X>Ueeo zxYZYdaeZg5n3>P@6)RGA8{10v~5Z^OsA%#pm^zVpMqI-I8?{#bLQnra2?JH)I3XfN;70AhH6iBgaHBX znH>a3$^2c`;}bd>r>}lg0?A*P7&P(;o)mC_lJm@7*5E5)ki}x7qRfg zr3L<{9ty=dxsT0^G^rQZT%HuNduD+eNekiksM8fH(&a=H01fZ;a8M!>@&e%~-9C7H zXTW7h(=BX_u<|_YT`K&gfn+w!STj{a9(W05fU#*$eYP7u9Gii>olDf2m<-h@Aw}AcIYMSt>wnX6l8QrhM z`k|b9+CW3*!?sUcgRsgOn*qkbOF;3Bfq#imHQ>Od1K`b<$eShR*}Y__)AWQMU2O_IVY2DP_7Te*k{NP zNE>-y>RvG|q0o>w0@41_^0k!=eUAkb;5qHRRkc@JuzVN9w)VjB5dhP zU>{&0^?WNmJw>*f}T?x>^T{p_h7ig=_e+P01*y^(+ zxkwyb7xO{tfbm+oMEJnOmsgTN+n~q4X2XR9T>d^Dm@_9Oi`a5V+Gj`wU>S0o?wv;o zLcTSKE?o>>%DvUUdP@@Sm*rj7YtaY5E$cS{DbN{nGq=1y2aJCT@ICsjBg_E*PrW}C zG_A~1QP64|a23=4j_2;3JHwIIWy-(wbh(0>y66h7R?i^ke2h$tcQ6UQu03ellh1V7k<&(4TY7y2!$Z7nO z99OAv>ArxhAii+dHH5Gj%P2o%)q(duW$5cqO+z!QVUtz+LfxIacBz4%3zR-1vO%=+ z3<{iPU$-J?pu}Z5$7w=aW^#8pMzExW$4OQtRU@rAQIP>4m*c_mqu)qi#{yLv2RKG1 zSahME5`mEvhn~9JM15O~(-kZz8NPWmd6#g(l&z?PnF5ffq{F%DxJu!nK46 z^2U0)F3^7RF~V8^OHG72&dI8P$!h`ajcD?nWMm0xhr?v#qY-BYO!OFB zmes_^t2R!M*TZw=?GEp|ckT3oD#Mp*;Or>|Z#j?5uj_*C^EENpAV2M~8zSv|uzfwF|oufqLEPz+WRp?-sz=)czujuTpT zq2}};*rb4j-sAd-+hpGZ;n*Iwb?a6tK!Q#aZOIT$KCYVJ4@%tRaAY<^>`9|{*WL#+ zk2WO+P8JT)%j5_Q?HlB;29E*&WW*^GUxW|SqfVGnp;#)ESj_hz*}YpG)D(}yu#&#+ zAkq}Tfe|#`zjP@N7|d&}FSQU>i>FgGv~F4` z#xrluq|0?IO{4pUxHtite_owfh@SNiy2W0da)R}sb zgW0?MerQTLi|`Rg*%o+-*lhiUz&Mi|eKlmL=Dq%f8ypBl(5mlamcZyGfp$y=xaV(F z8=pjJ7?;3t2fE0B7Z<#Qd;SCiLbjXcjP&c*ueGwcq=g!61AsbvRZkK#_i#2-ykYf2 z|KvWv2uRch5mCpn*19;6t#)KcP>|r%`_~wgB#HJg~%hzd(_jkhaHj~F|l8X(NtXe z9j=5H0D?|=65G~|58VqCU6l6Qf1(jGHkp_w_M9ff)@`q|J>1AYIwjP>`au>e5{mrHLAwq_+jDmzsWa>5PcXW$_53Tx+~!AkxY zu{F{UgezwL^;Th-WpH2J3)|=X)nkpKTMp)qwO!J@qQJt;+(>#xFr+3O%-l7@UK?Nd zEa$QvG z!w)OV3%vFmmGRZ~vS3i)ZF>WPWC%u2n|`_l&yGS{bDz`Sl5#rUnZNMTx!!KCN;kH? zzO;jXj;+|C$&%I_7 zGK{3CHI7gJNi)~Nc}{~F(EU$zAQmD$*e@&`;&IpNi+2r)%mW7RwZO?dp;+EakhYo5 z-cz?+(bM|6M%)Yv;S{8VjoZUYRa8{o;_~(KT#^8fz)2L;9!K3My&$pu(4j-bS<81S zDiNhlO+zL)3Gig_TTCFVX#L&KQ}osexR5Pfu2`@rO%*i$Gllw#Pk|<^)I(Ao{(0$aPi-9Y_b!Tar(27HZ6m@&c%E) zMjVki@(O2vGqgrd&^SKdhKlelM%D7;jrEXe2V$lf11A!7-1jDOSJgBOsIYcd6e~ze?2Q-FD=@E&AiX4XowCxq^!B3{gp}(tKktZ%8ca^ zfi%rT!va0h9zz6do8ohzvo&rZ3eU!kF9YaVpoti^<~o||*zk#jFX>$dD91~#-fd;> zpUacz_ac>7(W7x!l$RDEvT=}NYzyc3)H(Um>FXTEq2lNx!e3rkkdkop+P-aDeqmHOkYUE=V*d zUCh%#A^(eg5%FjEnRbY%5LV|4le5?8;pa4avP>@n6~Iv82PN`(kDK~H=e!BL`vf;q z27@C%IvR9?0c?-*NvBr_ri_Pjp|djdugSm0#C`~w1#rq|aA+fTIj!i!AESM^sT z>r?G1UmfAH2#K#gaU6sXaeff52r^BkjnfJ9JGl`8Fr!6zj7kHxX|CO{x zmFun2md-*!b>OA>F`tzLKU(^Cc3mH1Tl)`3k<~Vdb{?Xy1U9eC)%kqKy_TQ-6+m@; zW@ckNI3w>d{RiCiSA35|Kc*<#?_sR;eeNBP+v-+DHH@%a4~AGuFp67-p~Mx%iXu2{ zNbF86NESISly2#BWfQ!hg`C?kH_`9h`(=e7I)i)-XJb0}M zn!tMWM426lk|;C#*>86`k6upMX6P{@PCA))6(dvr1>R4Vy=dwp6_X8euXx!MnX8Rv zXusAN_;LQyIh=#Nsx9{l1SiKjFQMPa365F6ChNK(k%C}IC%mUKx;lYNg`TZNK1I0Q zk4%b>It0uBa=%619zx0Oc`6#%3_h2`XVG3=J)kSLOr~_%C}lu%=Kpj!(wS9v+Qse& zFgI$hYas?-AP6CWfw^czKT*OntYjai)$8$ty&eK=I1LRU3&+ibv-~Y~#*>T+k_c^2 zO-srSFn;ksrny;?Z2|oR^Wr}zApLD%U~tqwiPPXR+8z&U7Qt#ZwgX~K+st&q(LI&0 zetNAb`FXDFUCqlyHhjt(Mw4-`BI-_24mf{nZO}oK@g(& zcmz|56NqU?uMnn|s*j*W(cJALZYS-3UK?S`1)3mq5#dCEmC+D>c(g;ka{4%^YhngU z(#v{vvXYyqCxIO9YN`)7oMjE8Mrx;x>?isG0Tcd*(ZI@8XDBX&KUzggcNNT}&1Lqu z1I6q!6bjE*r`q+Ye+~s7Z z5h2Nc>}@)*lM?Us2e9S2Z$2MOmS}95c$!9{=5)+0o4gSTBDz31|Bh&NCYB(uVvW*e zVQUv;DDl9o#PWx8;y5bg81nyN>6EXz?5qCp^ByhnI0eM#hsb(#=%n;I;})*mCjrDW zh00E)44FFhU9XeMZhrs}s8{7rAqEiy-4B>eewM!uA1X&7!aVJveV|JF#q?hOCUK333WzjxGm=t zNZkfPoe2Cbk*O=r!TXd+!o=A)x0Gcq5V?j?CQRi-n)P+bjT-AB0w2J&D zq^8O{7jMy@p@Z{G+~o7dB|vVSBX9cKsOVr)z;q-Kib6j-krY+1c7%^6x(@M5fWjOk zHju|-(sm!cYj;T5y#%fa((EvfE%QziMjj=sG9k{EYKDRTPfojc5x0;3T84JI(`}`@ z0gp(JEN;_&0rOP_7}9tJX$$(P1sYhACXu~glGxh)Cd#x@FaFPa zRk!KJwt#jg{S0~3+P{x%M`QmLKgvRCq;T77|IWP`@;(v}Q>~VafNk1yOpQ8aCg;d9 zjuyu9a|k!}s_N(a_4@o}_}0IQ{RqZwD~JhmRdAahc!MR=DJG-A+~=yrjqv~seU_Zb z#M6kfrsxBT{9I3{JDS6DcowjC!GY8Gp>IO&{JkBb$3Rjw;9KzK1}a6&5l#_4lt=Tt zPoM^wr_bz}9{h0}4#ugS+gl)!5CRXzr(B?-&~{Z0=(a>8M=yqTw#q{a^~=IV{S5mx zi?8CYPJKPX@-5}jr{R$ec&XJL48(Nla148C;j*?`%(`&)Sa&FqRC1U6S3{vr^DtH&~Ym)8PdobD(I`~FWbHF8dMV=>59o-UFAWWf@ zy=z(ml!9@I0GigUD!r)wXJzLkAi}5~;ole0IFa%<)(Sgx%WCzmu`ptZAZGL`;$VyO)`9JZOkPo0) z9b^@yo}VOwSs)AN;~sgHu#ordC2Q^}{w>=)5A@&OimjpoF}rN@PZPuovjpxq?{LK! zJb(4??Yk+wod;D&(kRlwSeay|UDx3h;})0{1N3cjkHT!D6Mgt5mQxZiL_!`;?gqe* zq-_T7*$0}jW@|K8b&qQB*$Wr8_uscFc1)bW*DnY*CWCHB`$^j_d7eIcXUl1f-BNoG z=%%ZX1^{Jt%kJGH)2mGv3f4q_l&*uXZ|YCX1L8NZ>3`T}lXrqR2~12(sNL4fa7*-G z7ThIAID2>4ig_zX=A_i!dmgHap@&p&-`uD3eZ6qKJ{zHNaN{jd0&TonwSP-Jg7@gs z0lm!6i_c5NGdhouhls*>a(eA%D&!`9V%8#G4qlrdhCMCQp3}p+AoMlh6w%BcX$jju zkx%hDTU_*?8~rm6Qj#bB(|6p=Xgsi7XB(KOV)0YM>GGQ(%Pi5e>U8HUSzo0t(CXvo z2m7QrWt`N|7Z4#fNyfl|^Qt~<&ObdTY7iBnA3uIf49vK{4H?iw}ytkUj`oVIxK+--22?3Eh|)=2WbLm#<<;$V@yc*9ImgXZ_%Q zrCcJ?lSge^w8ST1u`%9_w)=$S&scoF1oWLd3KG;_Lo6b>r%~y4kH5)oW_YD?L!@24 zefvi2tKbh`!)L3mK6H}+Dwt8e<+uPUm;Pm}(C4kLrj~En<8o z;yyL=5XovgMNDqPo+FdY;ut;IxIRvkUBUALk)bij@o@OaQ{vf%H4@s(Yt2qogFK=H z0S3=QF-WApHoUYJfEDj_*fs^_*w9&bTH+R}g8>pvJm{65t$S<4RSVpw#31tyJ~0q{ z9U`^h%*$=xavXN6V-Glz;?KO{8}JdC2|6V|6$a-EFVa{-($~dHRDJz!z}ifw0qvUS zvgc7>9T)qhsFGOi-_B}kYPUzxW?(@-;&pK_Hz$YkP>y8y`h`vX^?uuyvX4EL4;dS~HFZG!0_N!Z@Q zl@^ru1Fz~YA>2nYVRFx*9~pK&49eIEA&)mzj%5|vg18?!0S^v2V&Ex53$6=0e?(mU z0HhndVO6YP(;L8|@HJ64BR^Q*0tJY}WBAaIMBOtd-%PxVK8c$61L{JO_r5dC!_l0m z|L4$`i+ZbeZT=tm-|gZ}My7U3+9U!+{y}o1B7xRak2U3UwJr0s;eHJewkOk~fqchL z<>h>BX3*Vg#e9}Mr^Y%vN7j}{ai5%?pu-^x?4NxN7E55?J`L@9%bP@mB`XP_lx*SP zh_c!}=8&5S<(4>yAXA0*czGsJkj#$5idOgMXW6;tsuI{M#V}!!()$aI^J#opoEonj zd#mZR;Xb`9=-&vS_|F6#S=_D|?ywPP8(}#J)~POiNp&z%-a}`%j8kfqedzLmBjkt@ zkX!)H1a=yrg>6R2v`Zi&h@S#Qe44VP1r_hfE#}=MKECOMABRjieops7fqNE6YQ9P@ zP5(I?=kqs~u@ib9iwzhwyqiR7L(n&Um1ea$Ep zmp~+>1{V~_g|9s8OrNw}*Bq-JO75-tE5RN+_-FV}Mt=RDjllyei|b$CC8z_a?jkS_ zq4a)!37oWbH6j9`+dmU*JWxg-srirYe|#2*UvJlH|Em?h|h%L1)> z9;IgMmMtbsA)(6!I$|?{W-l+|763r=6aK}fX-k|SAgur#lL*SH;fu$1hIi)fSTo>O zX#4%`Lt>r7+5aFP%g-C7TCEfbo{~73bW^vzXJd@=lO%p3Fc1O)e>-aH>P|tTJOlK3 z?v=`V^($Hz4|d;`DQOqI5g0gBzRNbV(72Vg+Rb=o^k=)InQ@;K;l2R-uQLQ@(9;!7 z)*siAYX8ydH{6LA3YdS7K0=?MIXgF}OFOZ&xafo;O}Tso_IValVh79l>GBK?#`aP1 z*fNhVIB5x>&*x!XvB;}k9T#N?QGldDVpG~4-kFW4o1Y#@z}JT{OK;}E^hz6N`3ZO= zkr3fq+%+^(73J)I*=Kx#_*o=vqX=^iT&&HJ?mIVwd4M9q3BUmRo+nSz8wED}3qoZgbMCjGqDg!7Litz*oqSD6mI8U(8- zt}Hs5jVMslkbQ_Nj>=lv#psxSLdpPZJ&2D}sBqRF?8V0$h4(;y;igW24#XfIGS4Y; z0!h6%mty#HA1CTCazAkzesCIi1uEw)fC7;v+N6sOqbLIi!LRVGv@0BG6kaMvzW)XQ zNorb3mmS%8@lrV;aohLi(pS8?Gmr6M1W%Vg;IzG26tS>Yw5X~#y*yuyXAR#oQD-t7 z+1uaW?_&K2m-&%Fhzvb_0aHuKy}0eEVg>jpV*W(Wlm$ep9;uqXU(~EO^pwIkz?wd6 z9)A7Akc_M(*-6zIS$OOs))tbYAcfVBbd)3tp^F8cAue;u;Emzp$y0St#g7v%RK}(^ zt%9QgpALn3xAgOqIST5h`er&(FCRg=z62W5Zr2JQ@8cJkw^RQu!<{EuLA)B&cAq>j z8KnOr;6P7EPQ1u#A-oS(oZX7v1{kHw89i|ot4OQeUTH>%P?vXKG8$Y!K1)FN;|}Ed z1HT}lra0rlm#`qHY6&ni!NTsu5O8=p?6xAyFV$c~#2_R=q4;({iM53uD!KXK zRDs6~T7Zkx9-TK&;BF%H(nFr3iFF})S|}l=y6=HdreNG?`fw_>^Ss(hp}`zDGn*IO z3&Pgk7{6_T8Gy`kotR*6y85vz+1bDtPGlll{zMjQ?1y)1FYHPz7-2ds;k%-0i(yYC zw)DUZs<1xlULpy7X<|40ljC9r07ZO5j~j2%C<# zQSFPf1!Ym zv=Lq?LTD1aj_B7$o<2iFLu1>q@4}GTK?-JBwP+KSe|d54CtXwX5hWaAv$bWXg-ntV zzDZDJfHoTb)8P*=0I7W=VkRt(LmmRVCC>=;)V;VwAVadyuxW`?^3w^{bjbQT=2f7LQ_10{qAH#0KDD&H(d}NPP6gT!*6|o3f@}-VAF#rj>T(C_%ak9>pTB zB^Z*WMPaj9gQ){DLG}dkn-g=(+-;d!lza37R3BqcKC&l!8%F@|XyS zwQ3Q0G0LA@1AH>oa7u9i1@B?33G(#>4F}*M-nK!Se^_N+QKCP_ophc2ruyIPD4Ho~ zA?kxxby{Q;ZRpW?!~ZO57!b{?vGo!71bt52^vf3YNp&;v25%|2I?-yES?anj{;eQZ zpL2LS?V!h`%kZ!@l?)b)=1+iPFE`lpG=1Z)UB~6AH4zx^mT^7{iFE(uClp|i&p)7K z5*HU)afP%P(!;UEst=BLbK?3Vt{<4bQjB%zSU3_vO~hYFyFzvPAye{w6BnNUJ=d@zNp9-<8Fz3EVM3P_hGd)S`307Cu}Z?FsqSETO4zg) zj3M>?$pIJuusUJk z=_X8nIZ(NDJ*2{xJUKzUkhATBc~RC&{#IX}#IqbUcyCm|_-r94aWf}p3h^7^RKZah z7TZ&ihK{EB!}+lMOQNlcLQux^vaR9?nL+i?&y3gC+$CwswU7~536~*eTXicC^VWo* z=fG=%3#MBwb<`qvJTXe%1*gOE_A0tlz@QivC%)s*d&b8B762Vc!q#*a3b&x2k6}>Gd&<0ei=RZPKXE) zUtVGHR}M3yc~cHeXa-fGSz@`ANOz!{%#pJvVBP$aKg0P0EJ_qAt(b{ zop5Po@vPB;uWf{ts~GZVuP(exgLeMmPNyh%uJVv~ZuOn5xLt??0?SfpG#0T=NAV5Oy zDS%U)I-0$@F2rHMyw9=Dm#dXu*2l$9K{Bop|R^2$9>{B1$JDM;&q# zS3s6Y?de}TI1lvyhZ1UhUP;=C1DMhxD;{*U>z4;GYhi%$!zgH2s zPp0+(X^(5TCt<(EHj=XpFL@8$7uZjn>rvkG7V1Gl86u{YnB*tUDcJduRx7F!(|rZ# zF^u(%IES83$XBE}W6Rcw?&fm{P=3w&;U#+1I-Ol-faqcG%k)|C z21qBXOKiC9E2j>?)-8YNF;fQ^Fj5h0X6}Ig>FVlI;H-E|?HQE~v4;V3_Jd$}rwy5| zX*eP2=5^2khgYx2yP)lQh!?=BKAz;`FsJG-;D+7;yJ~1Gy+1bbuZ<93Af6jDoQ|O{ z!x=#W@XF*@s;yXe30#DxiH&FV{XvW$W&&Um+yn~YE3VKPeHRhr&tfE$+CGVgo`to0 zh`HBflplQvoC0BZmDWGLz6uTxO^9e|bc(!9x*8zfQ!y4p7D`dWD1u+*oG#lor%Gz0 zsAaTphVKF$i_~e%bTze!o=kucSG~FggI6UwB{IB%+ow*Q%08JP2fT<$!3!lOPEXAb z!uAT&n!fRyFxOgJkY3_OJh_tbab*h4gM>9g-2dblPY?WrNYf#wMx05vw9D=ftgmo5 zgn;Ty3gJ+kuE?Skow`r##FSVzm~THPk?D{r;P07KKo z>*@|d3}zfX>>xb~oV`O>Q#hXp>XNx#_^t=>Jx)qcY9mio(^w-BJs6g7y)_gx+z(LJ zh)%qHaGd->?E8d4g{tlSQjvFho&qL<7}5AN!(ZZio^b1>@LD1JWEVXlUbMff#h^9@M zQTp>;*fbf}O)^27ml`}^ktfZmt8_lMXiDoM;tJ;n_Yt$09fQdi)z-j za|t$SovK)vVEPEE4iPc|r**#GUAo)~wq)>|DrZ*Ti7Ih_uLvL+;;N_SxAWzu3gCXj zf1H3pT>RB^;H@E~JDU*&B4VLm2k&rDz`D0&`I!)AFG&G_ z+4}mm_d$Fv=@ZeR9oMD?N_Tg6|2G?Tc0)uRdlOdDZMkFsm9#?a$y!J23vac!NykQI z<(w4uVPo%gDxVv;z znKVtcfE6k>X6`th53{9R;=c#?7MlHsVXl#y z(iV79ZSNmtcF000!z9ory__zGT*3?Do{w&Jyp4NDMl7lAZ8yl&u%(;>n59srpp3xV z$aH%Po*L8WKy+SU(lStw2l2`<_--pa^dq^WagsdvD)_!fNOa_Q+=+)F?}z#G?wMv36;n5+CYmpg5Tie8D*uiEcw_9h9yDkkndeCwf`79C zst#eVi9L&?e?zaSbA%ouelpaMM1YGykm`+65_QX#vnO>L*G~@}#rtmOMMaJsgO%a} znV!_E5+1HQij_^Z^UP?vJ@LSED8}KqJ7|m(iJ?Wi)zD6W_c49Ls8!LnAWWWGSqU}; z`!b~^!tlDInq$6eY%B$L_$n03R%{Ldu6u((|{@l&pa+& zs)l8fYl5_bmpuR8j4jKtXe%wjoCO(vA)`K6x0+UHZrd(G`$|(Sa?;kJa4s@E#4EyB z6g0;Y5V;LhwV&hFqIDN+FHbZ2eqignoPtjL8e}!%z6U4Eo1pNJwWzNU=AB0}qf~A%K2-4rT@&d*6U#?47voMy<3W(LFa@J7k*jTJEvsMVfjXSOni8Ue zB0_wQ#8;ZdwKBg1W@i32WZx{ zG$EykdCfZ!rT_?u?4@}aHf={AgO-OVHe}eBsXa?Eb}NnsZt75@zHRRZp3oLq)i;_E zJOYO_n;B!Ru!qcjX4JaoS!1_x?IAE(O*lS3>CTZYUBuk>bGHfWgzN!iq1ZA@8g&`_ z-#5<9(8ErI>wZk}0-Dp&N?W?jAgUw_*Az;@Bg%d;nuz@gbaj;TW6$cU^}CQvLK90F ztB>`i(|>49nIaoS;6saZcZ=#e=(G4Ykj$o$@(Yh_PSaQf{pxGq-$oOO`;^?@8^nM_ zhB|pYDSL+2oN*!jB6#^fU@z?~&q!GRH@1jdkx1h(dLTUtIS4oP4Z7mm56L-!iU1A& z4XwpaGWlZeiEK0>>!V>w)*AwHB6(h%rQa{IYB8=3dNaxjDoJ5Idgb={r~hZ$fQR)@ zW3sg^oEOH;M}Gf*ciY5aIw*Jz2+bsXpZ9AW!?R!G(k_(94@lEZKWS&&EpwI>9Y~IX zl?BOvt?rUP4*ujdsb6FH6T#@*PPlzm;u9W+5Uhor6>YW>0HSap)MHwQPr$3U1PY)Q z{+;lC1t7x7Bp+e7ITj^ho*_dE#03g5<^#x-`T}oC9qK-^F3gP&oUp?*$HB-aBoEU~ zMDm5}J+dcyw`gH8x>as_>Q%>D?&`D%EOWvXqrSR&XEgx%3}G^<^&DJJv4{&#=^dQb zgE0AtmzQKpMx@Vx=O9K=GGMh_r!pNlO6I?E0_R6eqw!-KAzt;NLzoL70a&;^Z<$d0 zFf3MRb<4B(&f{TwFJsV`tio$Tf}$*W7)nO_6aN5$GwGl4JbU$v6XbCIEOnz;HgjeY z@B$KaxRK*(G7IF_*T`M%^M-m4LK_CoqIi-ec6z<#2B9Fe)qvgyT6V_+E8xEcLvgC5%WJ}{|hsZ?CtFl3p-1Uq;CjF;x1ErILhtN-CTTc zSF-bNJ66xAuVkCQz_{_yT*-|x^?Ec!Uz#h}KSmxWW6fB_)HdR`2npr{$fvB2-AFAI zu%ah{l`DgTvIEP=st*YHuYXG4J$mdI*-K3}y0l%mzVHG&q!Wpq9E9)>;GSe6n(Y|t z*}*ykJxQ;Zw&h9u1j+EQQXJQid7K2s)*T~Zl-y2_I8%AFdFNp%%_6>(rnRtL63T)& z#N=lm-0PqFy-GChTPA?NHQevf7L5-?79u>^0VHcUS{m`i3%ql{h>6>@<@z#?%q8EgE3j!0j~j6EhU(9Ka74DYF}L=y}V=3o+|&+E-myE z#Aa6$DSZYJbv^Hr@KQ6GvBtW%v*-^Q+fKy>=*pn>%hBX%TM^Pkb_S_3984fO6q*qvy8nd_R>wbPh=`H=c{7F z`g#oHV;Hy9dZUeAxPw-_HZS2}$wbjq@dK}rTF_}&ns2f84p(j+5^Q`90%Su0#~C)wpSHC-%zV#*PH!k+Heo))4~vEh zJ|bO4;si}8I<#{I?#}80fn{#YUomVnWKiWB1h@~ietxm$j#&X=J3+?A6UqVPE+OT> zaq+;uv3Bj+c(TI;kpRBO9s76!Y9|EK!33zQr!ZB!@rYZ~PAhLDRS7eIuOWJ4J&5ff z+eAcep%|?zA-?eBuYr|E$$9J!4>c5<*(>KKGVgI{nX|B2LmbVv`MD3%>ZfoliK7)1M5T2S+cBZ4q0}kN z%}BEN;#vow6*Zy3B~C!g59SwM%BzioFKPmNEe5wId}jXH?-+?*jN1pcU0eH#Y-q08 zTXgsJfZQpm%p@milZAGo` zV6*yCLi|m*`xdy{Hl`mlC9&1^#*nX|QShfoX~%ZME9NXTB0_KNWu8;(n}jxknE`R0 z0FQ8^+vi9h;024Ui}jDg3L+dF(TNVAV*Maz$-^JHmXKKJvLAtpfD7&nV^GG*9JE86 z;6oD&9FjJX-tzHd`J)*g^?1SV3`|I5CzI1o4YW$AtYLT_`lg2 ztW;8W$Xj_5is#JgfV@YkC{c9uHw>7wGcDol7+6 ze6p+yB1k-@U&-w0^SGDD~@AJSn73%BsgfoOp z#p2+FX0AP6Pi*agEboxt$<^ z?x)V$txw?}m;#H=9kEZP=^#I6G}&9>8*9Tv>M66_{++4MoeSIHsEJQ>xXp!^nBib) z01CdAr3@yFcnnBjo{iJoK}h2*Etdt`AeKzxXMfe{g2NKBYSIl}{XLu>L)JaTwXj_Q1+`1`! z)tQ5sMvg%=zrWZ9-ar7HFUU@9m?1yZHN@HO;o~cp;S9H|Lx#an@7_w6mbzUA{9Hm^ z`_^GeXh5^AAW14v@X1myM~}Cqnv%PFK|?g2xU+QZ6UP>Fuwl}}v|-57@}M3MsPD;; zRv@)Nex}{nYm96gu}mq518*k_9x$v~3yf3k<(=WoFzZ6!{{z3a4Q-UBu@kP#8CY&V z#!aZpUi(uhIU!Qm5gGZ(FlU9+qknhf_m*3KWdoF&xsKQcY~H$Qrt!o1p9y-ZNjl#f zzcv2&pr6%v?P8Lic~+8M&I^6ZpLu%bKY5syRFoxlbaEQhRe4ZTLuuxhlM~J#x)B*V zF>2mk2OM0Z@|-4huDG$04Qz&_{v+931Lj8>H-SxzK-f`xY~ICg+O_NM?c3|PH=6VA z4Ie>E4BwHs*iI(>(#r9R5EPxpI-aks>HX2H<`IA=VT9wF$T@z6S>}1Lw91;sfX@Ab zdYS7m{`WYOX({o?wZFJk(HQBap+Qd)%46@DWxC=0Rv!DcoQBPpag(h&EAA$(S+nLP z;?B%o=Wof=v$_poU#r07V!nCp_3PKuUZnDX4JyHl4gUW5wuh#C^(&Ykl<{1vMQ$3i zG;BsRz<}aL5bYrafz(=ytA=GcaISDs_D0sbq{CNHoK^2lSGw2ZPq6= zYJrg0yHTCgPMmmDJs>tVp@_WNJu~8t2Ko}EfLy|ika@k=Yy6i9L zcC&8|u+yx+Z-H*>k37eIW^|*MA6srnuS4xlbso~mw(V{jLT?<6N?3g<K|RJTV(?`X`;c^5TM>oxQ!E#_Pji5>+<-#g*gd3cZVt4gV3Xd&fQdWdcg+H*tAV7as+ zolM%i_n1j8ORl(Eg%=EZ414zX_TB_5zDk%ah_-%n_DwZ|E?*%v0;WRKtPo{5CI_lT zPBrLVF*gsyJ#8=Yd2umtl%585y}BE|W%EY6K~CF4=$CW*x5hW~S5iX4el!KP60$6u z3`f2$k{9N%e@Y9IS6MzXqRwNt*b)4@Wp-x$!}tD7Bj0>8FJz+>n{2P@>+cT)^y$-G zcSm6Y-K6XCqBLAy_c1Eo4K*lY`MJhxfAZPc)^}mgib30ynwGXHo1fvxQa3;gO{~3@ zLvG|&)w+X=1}iwL#;C9FUH|gXSg`L3;ovM?sor+v$5pXe*#7K>8V`W|qeTY6ofrSr z(-YQ~vR}#lS<1ycHo{PUi=>Io3Vid1|E_7wl|61<2r_cImuHGr4n1Qlxl6~#&wnK0 z$8S7#8ebsP(vp%@uyY(XGU7svmyVvRX3DK-wZD%CVikFYnLX>WhQ@}*vS=ytezmH+ki_71G;)~xnZ;!35- zw~m~>`X7sVNHIgmex#ND1Zh?nzG<9SO_75ZiK~^MnMKfS3|a_h z4eAa}`!5#yI&pd3NHS|fY{J&mm;2x{zXIUW^c|9DFo&m{5d=5Vf$91STbyFPUA3DU zJh#cpnAb#P2jb!o+P2uZIFX9Yyo&Gh@{BQAjfKg=L$f~yoc0>#OTk?o3_s0wUNb&E zzTcQz8=slkHgRifbbmg&7It~}@3A3?ItfQXwF{&7qA@a-+W%0Bt0s*P;jSPO!H5~~ zaB)kvgpebZjy>#4;C+m2ZEdsvs#|wL4I6GwqeWeKI4tCsx#lN4&)j`ic;8yDAW=Sk zYrdBYbEc-A=h~ojv~V-<22p`<^WB*}Hcgn4>ZX*1TT*(3xQYk2hhiRHFndI#T78ZNF5|B-8p!$hC^v4u<2g9x-l(ue?D=z(9SU#3;}wHq!a(y24-D@)3xI9!GNf~}4DovN zOxsQl*b?@0$hj7m%W{Uny5a@wbo6b_sYoe5ROsWaPlvrx(~bh^*X(;A-7r8||8q-s z*+R^mLPZO7TqTu0!XZzur@A0D0jQ{IBzW0#^2K<%YG^DM5<{Q4b zdX>;@I;O-!k22A&UQHH3G`F@sb+!Wa!WI-1WOeG@x2sy1&f{ZS>Z#nUa_ksMeXeu= zc!Aq)_Vkzlo)P3juu4u&PVA|5i4SxVZUzLm4PTZ1-xte;S_p2j%DFYpI~gW6!$3F=#*gXA_H0y<{6MTh%dX#+~=+P^~_}GG4v2>R5LkZ2iqJIH(NY?7NNd z+#bZjAV|wukqtdR)c(KP1~W%BM}-TTLKhY%@4aW@829_bL&TD#a~Eb$ph9*D2?^X} zM2sqw{H~s!HM@2_llys>YfQ@r`$;(DS8ip5Y5jpz*tVzLD9?6B8 zxd>Oy?FSDe?{L0a0d2`BBqT)P=Pzw+lu&Itbk9`Qv5yg?Ul6u}Xw-6Oywp&()yZr{77f)?;A_Jn7!skv^;^_&y&UY;|@mi4FA`i($woV?*>!2Wb!5&%y;@jM@f@oC`=J}qyZdfgi`#cEhiz56}{gwMSWTlVWP zj*gCEBl`OM{Cvs`WmwZcXOsQ4|CLH%(Z!O>n7K;dGn^7JMZ9uaf^tsqO69#jd`XqyqU-TaW_Z^z_<`fWz<&`C_pKO5HZspu7~e2 z8}$nWP_^q?wa{yLxFxst{T94b$d1|&vOUhNqyh-R7|s})`uRX$I#D}wpxZGtF*mW0 zZ`~qh2WhxA(KX;hk?hE}7@C#@yT`lVKENR)2Zx12E<{S%^)$U-Z?sE+*MbCxqIb0+ zZGGD-%*5`+h-b2jg2<7BPhvp3u3NXR7)hOC$@=q%i3!CcrPv*Fj;ryYMczGK6MI*T z7-NS!#U%esxpTA5uqj!)WA`mvv69DiB+%E|LBM+@uygYa<;KM9@tySD$MsV^u3-Qy zjBVr4dXZxXpLBy4A`wRo4ZUn^yk~v#Eb`+bQow-=#y&KngWxXU# zI8y*?=B)H;wNt}=DLW992wliETad+7(KM1R#2NA|eikCvY&G6jZ}9e!h8xogmH{3{ z6>FP)si7-5+}~BhGxK8)1Yk?CuH5ZA$>)I-vgx~M zC8ylwwY9a!A!EG6q>`Ppb4}Z}Ej{be1tG=!LKM)%nyb%WzjB3VK{~~}2s4RYIAFt_ zsB?ds)Am5h7Xqca79C5VCf^ZlZE9svCysA>d%ISOq5S0F4+iKe5^WtiA&!4?JCWuA zeLlihbkC~MNdhwH9;h-w7#~rJKdaHY|IndL2;fr8rQ5;16A6d2L)_XMYLQt8U$v7b z%W*SSsr9d6wW+aSJ?8Rq$q2O`69UHU7%={QS1g}pxgQUWz$EghV1l^${LAXEbHR$I zLQdVe)LFCCZowI^ldy@Ebv5D!ToVZlRv)&ty_RA-;SWM>ZxznZyC#Q0Tn3SZ zG=jwyHbxp6r0#NP!glD_DZn29YbY7Gh$C>;e}#Axc0ivL;w_H;JGe@ofR0w9T2ylW z>gw`?i2u~<1&tC9M_kiT*U->iG&Nl$2YD~zOf~xFZ`rUG|A5i($1tHneWegO?Yftg*(7q>L% z#GUz42$A35&rfUIMg#JEeD<$#%qtqeCO8vr2M3!SrFUSM!c1&!7fSB1GYbm~h9b6- z{sccv#Y-L#-X<5+{(`g$1hfO74@3vB2MlUY>ADV$8%mK%TpLNt8WDY@5;RGKRAhWC zI|2`c$14bgJ9M;J)~_!??MlUX4Q#-hPesW+l)Ac(o7G%*w6o{|4&v&NJIVu7?qG{v z!M;i$*+PK;dWl1LM*(bt#7e#eRTU_DX&cEjlqT8%x(X2H1aA+EUz1b_X$(_iH;N45 zMvKVt{TAv?IsR~kVN8$QyW%Q(t!E3ZpvFJqk~I1L{LFXT>}wIN;Wziv1;s@k!Wb{} zevK$BBo-$*$=mvx<%W>n=Y~DPqP&ZHQUw8qr2yHEr%M`e8Wy+GIZG0h& zoi#UsoT?+GHv=P*bVS*?T@4M!iVi>z5$z}E$K z+HJL4nDJe#b7kspzNOBv@!Q8@O0c@^22}KDNP@s;L9aO;`UZ?`1MSP(YU)xRBfUF> z?neZxPj`UDtjd99@gvXnTzs)_65+cYJ(}OK7-bA{S5MY-dPgaE-68YKJX{Rnins5) zffEeHGbXl`Z|V^05K;%EmYZ9pH}(fDDo1M-fBUw{#l?k&E8>axP9`wXuoc}yliHhm zpKw!HF+($8dDOu{1RGCo!(e81itDfNQ~p0uZ;J}Omv-VSe}eZ|3oD5UfB^5l*`>-; z>EM$Lqw`1XrMRqZEsTu=piI0%HBWphvvvZ+9#{=flBJ!UoM3zZbQH*v--)djJYr&D zy!7L*R1Q;W9rkV^-Q;mkRZo#Jc<}Ae`H`VV6-Le1+R+e~rR;wBmQ8C8v33^CQeFZR z7~_|}H7t;kExlc6q;PiMR*&Rc>N4978Wdw!ZyD=C4b#ZBk>O!s=zbW|v-~MVzk#zFqIL;2vV?t znl6J8MC$Id6S4K~xuDm7+U2aKR+b&?N z!)OUkr<g*!SrhP_&lxxLme;VnVkR)7$e5Dm%! zD^Pk^5)>cg_yN3BKbPG`(+s|DgazL{5Q&i8*KNbj`j$;<`;#@AR@EaP z1>Ajub!ED%fN4>XRR7tsUa<#hm-pbod<&|d-P}J@8Vj%5@^U($<4?#)B(4|1xDML_ zPJ_w8|8IWk`mbj_Q8B7s z?0G1aE~;Hrm8`}-vNhST;11%-2$|g#ZSH~JyWI4n%~A54Vq;@df7{Poi)L1&}%XerLr5bB(|f%thxM_mv8L@&&9;cf`51x-_YQ# zvg#_zXM0et>>$eHsa^rG`O5X&K>-k7xU#hnDbOElTru!-D(c3_fUYcg*e@iBmzAWh zkyVU!TW^_Awyg8rYW=1N6F69y3*VqU}J&s}Wf_28IUqP?~zjnNTV<)xP@} zz9kP1x>9Ro8N(&o0gsTduq}jZLnlL45ROBH>mtZxuGv3_`CI5(LOgF=Cs>R+c|n-X z_C41d4bYLKQ}To(pxUp7e_cb7pO|qKr1wKutU$n=os*NvEqi}P`+T)_L<;8EEbpf* zG&Z0}>ii9h65VZ61NlD>cX&KAKph)A?_Xw-by>T4_Qo;rU2SSjm0ze#B{IrMJ;b|+u zX`nIjEbqRli+y0lWnFu@BZ=;md%jaAKAMt5ZsP{Hq1f5@GSl8XCcnTIz2*F&{X@*llcVj`iyd8pAM3&HR5v zy$3wj?b|>8aoKy85m}d!lvSd_MPwGLyJ*M?m4-CP7Rt;hAv4-4(ooqW+!R`9*l8d{ zcKW~1?(g&eJ+J5WJm2qqmvMbQ=Xo5*dmTkGLFyv@vL2<`RSFTku^r(HKk&&}$;9~! zqKWgFpVkmQn;Y1`y=Cm8&&(6qRjWjx!l^_~UIe;w&S(4dM=>#woOJR>Ex*%VCH+Pw z8G!u7*(eK5$qc56RcbARZ%z_83ey0@-sdsD zKp&rr$E7H|%qK@$C{kGaODNOBZ`d9qAR(cm+x7ZMISS%p^OHFT6F9=0dW_+B`7ObTX{3uZ5$N~#qGq}3C zx|(pF+FxG#650bDnq%+L{(JPfB=q##Z}0Qt5|WLMjjcs4mj?9fL+>AQ2e+TuDJ+o% znN$cm@w`$1q3utCkMIYUV6j9t^u5Yk@;IoQu}Rin6%ur|E~x1{U>j`dG&rqM)OsfX zx=4U3tlBCtis+Rk_ zKYk7nzblEH*G*cqpU2eH6eN@elTI0jCbjL(&PBwKnNV6OAQZU-od_nIlzW%!Z0K=Z zYTQ4wQ0IFfDfisX{LSSFZaxk^zDM;ZzDWSQ0WD*+cDW+<6!yQTEyK6~3cv*8fxV|} zY!5!dqR^uQ555`rj0SW5`+8?$S{eafiK;`$Nh~Thdn%qc{bdH`&tl(k#pTzmW|oA` zhYZC(C}$7GuU2&8lL%dCjaRQR+UO;<8*ECJs|lS#N1F8H*n=H^KN(0?oFIs3FZFvo zYS>TdFC~gIuv5|IW*^u$QJhyq297N5!N{2`5h909}2;uVM9={d}DXYv1ce!W@p8=?>zQgO-d zSNoo=QR{l9_{r4<;wlPo1XbT2R~=oA#334H1~)${&3anDGV7mdT_*Lo%Xy`=+A^vF z9is+vaojEo^~Zo>BPnXHGU+Y*ZWeD;vH9}#tFrj(Rh^%Y=?Y6OepEHDuxy$6m1u@d zLL@*djUcDP-7Z&sms|h<36LKSaQVn+*>zeSS-gi@-k2Ww^fVlUj5DT%#1qCcK1&2j z?f)C=E1C$F4RW$M=eWs=SsQcP9OqrJRh6Gy;EBc6_R;%h52bam^X&E|sa znN)#DEd!AYBnbx={wP-Ucrmgx)&>xkr?e}2sW>DI$RTF~+>jBzVhnED3Xopl z$W>dmTz{Q|0paVXXKT2NwM@1HtILi71(dS;>2Xd#pv+W*q-s(FP!n%ZQFcw#&9*`N z7CxEPcWoashnygd$;Im)dfAiNbRNsz{(idG{mVx5>0OtATRVXW9i5n9iO<}2Z<~q+ z)|3pWzBo4&Rny?ANoILjuNL&~+DbOz)xa5;3S!Pg4KS6_)=M*(f?1o+Ejaml@UKAw zKz?VUDmp|AEk*YPPnRx5UQq=;ON!AO+}A8V|K!3#oamXHuJI9rqums^`SXyFr~Jxf z?WnG+V?XdPE!O3joy3m~yo5yRy6p<+NttgAGV7sZzqH2>H=7-N#GlJ1c5wuq9PBDD zhhiyy;bYNqp*r@Ds(YS_019=#*!AtV2)&xUam<^>T0a_+OvMcFoc;|u$gaqZcXFib zNRQIw55vCu_&hf@XaX?944EIGbt{=OPhm50+Pr!`;RgzgkeE*CJ2Ii z8L>|gFb3k8T2MGPQ1YT+m?Z{$n|-5(`5Wy|EJnQ^Gz5!-Hit_|oCSMV2^tk2Sn)2o zb6jTpz4x|W=kpH;uvcGMKFRF(R-1K(eYkSE|4`sZz(0iaLi6UXU7sFU29=hvF$=l6 zU@$EReBM0gUnPA<{O-7-(@5Loob$Gvm#}R}O93s+5x$kw+wfJGA zV8Wu}%i>&J)iim+K!dK*#|}*BVVME8y9NA1NM+g8Ig3^ zTW-?JZ)2K6uTjHgXWP4Yq%pq16`3nCMO!(Wm?QP4vDJB&IsTmz0IX(z)8sJgcVhbf zI!HF)eYo>QV6)OWP_4!$CQdK}p$xrzJF8$OFC#+%(FrRpEv->9|4m>(&oXrr40_XE zssy%z5Wp>DG=ertZc{YAOm;7S?7|8Zx4wZd(y3O@qqaxlwfic59*q*LhV1cW?(vPE z;o;`Bdb4);u9=mG)ZzQr+3ly)U+Chsn!kB%tFV8Gcxx!0Dp{tGC9 zz!WSJ9u7G*=G9~A7tPX_3Ce7R6z{%tM~1ZC=`al1p2)g~oFNgAtRol$>ebK-^kjhH z;rUH+&2r<*T<=@O@+IWBpNWwX&7s7nvPRFWIQ_Zy5rb^suy=)ER2}iFq-gHr!k?5I z2;0r*BRiU=AzA4L_6;b3s=$gIds#W}V@Q3J)IDb0YkkG9Cuo4%A??2TtF?v!iYh8x zK+;^w1O*43^(Id15mAOzaL^?53TsaDs%M$)K5*bxP2CtWYRPpD-9Y;j@9P4QjqD|* zr9DGKO`g%mGfTYs*eDl4a>ND2BWs`w5R0sC)&C6k1`?Yzfj{-KZ&o6Qi4);s9DDcf zrM`vsq45{faApAtfB%;+{*w61L4t!Pq@{6Di5fR;lp!au#W|pob=$UfWz+UW1S(SJ zA|suq6bn>=Wr(f!QHO!2{+M*|1F^*_VlFGmIZ4;4I`ZN-Y~@DNo$5M&NwdT}Jeu6nw3W=-%!G4p|-z|GkUj$%c| zkN!oM2dGdGYrUL8x6;NL&CkFyQZX((D;-MC6%-WkOz}^Y5#SHQ$3`#DRe$h;E=3%l zc;N3}JIGBy{xdmKC`1&DdX;F?iHkZ$OoIm6)g`EI?#9ePh^G30S=*KQ>lW?dJ{Gqu z0qqClq)G3zt`piXQb~k;KOf@SMvom}!bl4S0ivO5(4(N?hj^-^8*ryO2%UX)q)VUB z>R0&t1rD6Y?eX+1y<*Xf8AG ztV_p$VYq#0xOr4fx(#ry6^3JrS#jw$vw*N4l{UMRJFRCjCb}@fj$+{pN7tk7J4PHe zoPWhw0x6tpMGX9>1+dN4=^xMFi3&4OQPFvfP0EN^Hl|dxrl)YAkfgGda0qYxr-LPc zZwJ;>S8bHl(Lo@LKTT>eWNqicx|{K(bc7i9k^+izFAg~n?#){GR?k36UBIw}TA(Ry zH8)OKqNCDQS5rD)uxp$Ew6SN=TLGSvzy#&Uckq}6ToBB7M;B``{&m$v2U7XzlT`47 z#9N!MZvb%t%F=kX>T;ssm>B*bioupCKJy&W=fJDIbAA@%g^=kwX&t9Hi|){L!;QT# zJUGH%JUTY!G*oZb91)I?^MYA#H6_MsB^FgfQnxk+HD?K8KEw>Py+Akf z-UIVWHHut6(3tkZ8&-u;Mjf3M>FGE)IStV-+6$ED9O8`Meyrs5FBMok`N6aP`|A8q z27H|+a{cUidVk;-D(r8JnG}RyMCbH`vYeb8eb69aBZiYgXh0^0I)uSJ#5?dZ|8M{+&PYH6ir`{?`oT(@mZcrKsCnl>d&uhQ+UEISIXHTLp$5%MRH&#Q)tH2lWJ$Ld1>d~SMp*3g}!wsp;bf|q#52s`P7jsjP{Am&^-?EL~Nj^|w1cB2#=R2-0&q@YWCfN$=Cc5i*)yq%no zX+ZUbuZ;kqN5O47L$tPd?#B`NHz)B5>?5cWI=>4m_q-n(oCGph2u&`vB`dKfouO@f zbxVWL$(e`D@li|9U;sHu*g30T@a0zjNV%5t?H-Iy%N>y5q2i zkxrjSVigAr75l=^neV;|cE{>?r;s-5`;li)o-hR)Pn?}~a(2GbUrKA-f_N(O9z(8h z7624VQ3S3tNPI4`BGbO}uX;>X5cC-C`XV_$+8|@x7>!?w zqri(f#?EKVspr4Mpf6i+vY@Y2P*8aNx7Z@gU@2Pn2cuhkp4FoWzhD2i&Mx89{RzxD zGu|_H<;s;FkP9M^AsAi}8o}O3*Nn?w0!oE_S&h+FV{;bJaIWPTA)tyY1j zZ`VZ&1#&fkuXTpRu)RFUfaoj91BNz{)NMGoLopYaa|w;Q3kKe8e|Aa@xL0$a(#v@J z4el&5E0p#lv1{>)(~W)5X`q18g6Zs8QLpwt+&K>qoSB7%(4o}xcnN%be5m*z!wrWg zaE5z!>R8tCR4HI#x09WHcpc_V9Cb;X>*+ZPWddj) z7nBrL&gQPJE)|#8kW*o3)V1=t{x9C474O+upO02X!uk^8aIcOa4#chj0ojnA?u@Ds z1pc&LP@z&X0r%L0(f3}v=8Pm$f+ZuA(}}LLynqj=($}_o(jalL(5*?51M|nemt2-y zI=9{)pwzj5JcM2|$tvJOVTIHe7>JX5VaX$A-JN7eKy(ogW)IhZm`U34lRd)IP&IXT zcLxJ?mse1zHRFijf8RZoP2dm=cw>+cXFGQ&0S-BiyzK1kgo*;QtO1BL41i!-$vxajCH^x)aHdw_c#)<% zQkjZh%0N%!1pprJX*O=7e7W?B^*}_J%gV|?OSm4?{S}K_ItOPNiksClj|qT-ISXc< zk6W@3Zz2WHLfve{P5OF$b($IX1lED_V*)%wcqND?hIszHCoJOm41gs0su1>y95hxO zb$p-*T52w-Q|IMgz{goPw!tPK@^nue#Qic-MzaTRf>pG za7EN&7$#KS;u}ZA=q7$pw>Wg;8P!3Yjs|J@%d3k+GY3(_k0MATSLG?yg1y6v+YZN_ zBorypu*Sy=AVWIjg}r~3nwk(C;&P~7JM+1p1ix>5{d()qwOM(;aoX=keM|N!gY?5` z$|%4xx0O(MNZPC*^$Dp&W|Q|BPn%sZDtf%VwFr(?k4Zm2(Ht)kNy!)(HtzSc%LD`! zg3$uvpWY$6f-`6U`zY5!sT_gQZbmyX^NTD_szguNid9sFiv2?2Mj?QCDYJ25$kt+( z>Pet~X`Ga7o}=Sk;|JSd1t z9yj0evp6<3wskfBDdtb7g9n#KEw<-G*MLO(jL(QXpeok?4jzbv*3B8)ywDOPv}gFP zqbYa@l}M$jRn*_(oqsY7{xxOQejx-3Fhed`c;o@?4I6GLsv_~`A}&@)f&@iH>xBGy zdfF18e_*1}pn~wUi&U2!*+Jw)FzIrXI-Cc9)q|I2;}td6dwks7)p)x@@Q&o7o7(QD z7%m>TnW*k5l81))8`>OBLUR``cFP}{>B7@qn*yl#mz)#!B~t04XQ)?UbtD~$aOyy^ z7Wu_VRz=s`0riMi#PyM@_YA$ds)~?ZxTVb}7fR0M0q)Moka}}qHNpTmE?U&iHsUP; zMTx=q--XN(?vnEF*qIyx7%_hoTOwEq8KXHnEBVZ?tv>(!Kidm?NgFf=WXAs0pJh> z4QPbp$JFniA%g-zu!)$LJdCI1!N$E<;8`E#1Z6u^v&iz}e6sr`OAsn1)@iBe3fplB z@n2mq>)U(#?ftP81xtw(9qG$}1u$l@qd}#7yR^0#oF46FBCQaZyN1k!Nfy@+*T!kX zQjqB5aVs?h^-iT|rdsu$Cz@_aDJg5P#D=LkjI}qHfQUc z5Q+rn+C`j-C-3HJcNI7>NAK%vo?kU``|SY;)SR&$M|p4AdYGc!ewKl;IEu793}GBl zR6TYrXUfakp}g`tIc=naf)p4yW}RH~_dTKS$&%KBq9T{V$dW_uzyNM+yD5a~yD9KF z1|Jd(+wi5eEEj-c3KF?%6d5T(%oiG5Z|r=i0?asy2)ZyF<;SNweT)K54V^PA`BC83 za{w7t{YbYoqw`U4R5cFiO2kIISR!I+f%{9nPc}1V)4QQGAn^Fo^lz4cRZC*lBKXbBM7z&f#_H zR2`aW9zYZ1wMLHlkRBTJ`B5WoY^$pP9s5%7wS zqz9RxPb5A-cf$0|hzDqwBl*R_3L4&P>%%?%9QRxI=g+LTy{Kg2ZrKfwo;_O!*Mfr4 zN*pho@5LHrTzM_tj|&T(BJJryZEzqdzH&9^sav1Lk)D_+cQZ`BfX~0e1#D1HZ!hi6 z_RGu9dIQ>@m)sy%L%v`4_wP|iX``qDkOq+CEG{|xO~W4BIm|Dve0mL5wsZ66^v;}6 zU*yT%uV;IHl7_BPY?X%q8NZp-{aY?tr!q!*-c)nqD*w7>OW%vWYzrF(K-tLk95l7Q-|4zvc97aeY6V+;o|3220fV2g86 zx73g0(vy>u=xAzCwqYD#CNw;LmJ2M7;%q%XfPWJY+85bY{-!hdSkp5y#^9RJ-so$4 z1M<4cwzeJjlLvpD1_aTALO{i5W}6$vgIlcRH<*vV-5m(eHXsgXV#|f;a|}`z2rOFX z1*N6WlL-N@xr_O8+UwDA-4}hyTvvL>#>Y26HldEtP3^XpoghLIE?fx1dqOH0IM2sl zOF3e4d=W_&L8g4r&(06r@pdEJr~;KXr`1CpSCx;SpQX?WUITuL$T@Kz$gf(pO-`sW znj1kDIZ!ly?J76G?-D!k3-wwK%qcP~E77tMsfcSe+bh<6prg(Z`U(A>oIsb6&5}=L z0hj;TyTJJb1mxJ@>V(iR=-6Dzg}jN#Al#__)Xa$KFAkfaqPGvgBJiq^F|+^;u@s}rIXzG>;`ODf4Llq zXJSx^4xRE186cz~=|wOWsxPCAwb?*J!%2~YvX9&KOi_^*PTbgN&xVg>0ZZ_un+{iq z;({7ScR?C^l$t|}x^BtDBKXA#Zxm|6zF}hEhhW_qniKIKT}ARjrhZ1Vx_B6b&mrnu zIHYoo4+~vq4hA#M-2V|f)NOvKFSJBfP# z_;Ij=EQhw0RIL|VdkGG}h}f$onRUKdt`OiU z=$n^GQ~RBHP~+;5r*)1vw9&{we~_O6Z*BxGcSpDsiXq?*o}i_an59FY0_h`JUpYNJ zosCoO8>ACzm7|T72^#20Iqz(5d^wBkAtKsE+?M8$HK2?>)(|uLVbE#CK&FUDQIXHI zWfq!@Q0Q7JK7HDgz9HWXV;p7Xa+RqYs(KDCu73+w99u4Q3d^9o1bH#{gd~jsV)42RE`pC0SAP|07BF=JWOCJ z*s!IcjhKc_Y78=N2(!(4xAB;S6KxKVVXZNVkk2_@5+WjDpaiwbi!cvDH;HhBT+c;m zO=k3$HuIJ_Hr>El$W#m}BGVu!bD!#EnJ>VYH!CLXQx-BEK7lkOqA`W&Fxg)}_7iy; zh~&V=K4z@f1#&l3$!+-l!-rh-I*r$BhlTj2ueF`aBdWWP(x;}a~%z4Ch6Z38igQ5#y4PxbnUWtH=keX)=>u8rr!FPjU zHVhz*3(oXLuh$cS5ZU2#;YaG8JiSkFdc_F|@pdJsQt4;~*C!`+Fk zb4xL?5tEC3A%~31u4H6Fz^a90s8CV|P=s8@ww7chb}i3`3mhy0kuj{8P485kUL6v( z&0dpKe<AL^_y+=?W0LHzmZ?KiiuqM%ffLXFw9y`#hY#|ZLoi6Nx`_ZV0g_9CNOJ)A|e+C!%Xqw`jCoX zX2t<10CP@(D9g8Tio?NdNdgK zAxLGCzcAEUx+LiLFHtfto3}8@cqO89W+ zSS|B!Q%w(`4^`UtpJsu7#NmTh4<_%6Sas74lfJaw1Rv(u83 zqhU)=CCVT@M3Uyh(`I8CU2Pk*AMVIUN4nzBfmpueeAA{J($(4N(4O*>X=n6V-X#$P zgg_JOnLh5afD<*{^=IKMx(X+Jvcz!#<3r5sThx9c{zi-s7eJk+qs{HG{ZIV(v;>#) zrFP|+xh={f#ORH68>G8(0@_C0D8%z{C!K%bmB%U_ze!bW&W#2O7hSWIo8dBn4~EK< zYX$s|%Xb3;P&&$A6rQ@CeF*IeDn%azsI_=Rsb-QHtHY6!jmG2b<;zR3XgJ1L1UMLJ z4hZZ5-HF@yI>_(|bOTJDjXJpkvoN#>7SE3%l=UcJoS{$8cJ#i!@?-#~djqz?ne}Q5 z0|>+oZ;avcC?vGnmtH^Fj9wPCvKSd5x3dp_dvfVV<%<`*@UgAX#}*${GBesC-_{x6 zJWiN}uBi1h0ZbtCHBx|p{Ji+(?y4Zfo;SiowDhug{#%@`27UAx1QXrFKD zERH?2*8O=+4aVNZz}T)B*a-&mAk&_M(a9R@^UNFZx-p4^(^?P@Dv+0oXNfisdR4{n z-sb;Q>rUw8hDLZ;uQACWk^q7M6Wkmv5X6#;Dq~g8g9^1JbWBZd&1bi|h-n8Gp;XE5fTpJSwWRYGQ>C-wxAY}-x3C3n|Rdcok zrM4TD1k)MuN->E|Y2Eh*Z!ZMss0IMO+N{w^^^1(uT~;%6t?%pzVlu`J+Is7s}WBmc4 zvyJAGyMTGO!mtOrE~)nMcsQ>fIAdg}5MGmvmB1ssa3bWr*!3SF5wI*ID@&S$WL~P1 ztyg^GK&P2%!zRgzD-d!CKwoQj{Z;|YEHs^?6$}vzOo^21(;WiZ;Ux)D02e@P0JSwm zR|fK%d#I~i8bCA6ac%M;2$C>$m)Hh!2Swy%-IV(D>60zoDf4)t#MYfuh1H~KmIDXz z7glDw;G-sPXyMC30kTu|uA1nHwgW4+ZVFm$)uGJc^}if2&b$UF*4j$5{Med5*x9UL=g zVY(54KwL~zpKcCjbqHAFj8zk#AKW-+RF;K+d=az+;h0NZ*vfHm<$d`K9!?6f4Y>t+7GiG5b%X)r492p86eo#O zu3fQ_kwhIKh`0>m^#F6NdN#am?cZ1zJRF#^cMA!#nl%fM;s?;n_Nv9A?H|*AoPEdo za3>@=v5-UNq8o01bZ7|%K9yz8CSPoxE}$Y4`36YsH)-c>Wz?5fCCdHvvN0UlzQ+HT zD)roA$BrE*kE&l%XO=phk)BR=-9XC5wR!XA#OCW8c%V(h$N?=u+UKR6COo{H1L~?Q z?CdU8VVs13G|Exev}gzZEMA#G?+JT4vt z7mhP!rXtPc_&Yegi#F(TfUkbI#1VZwerV_e#Q(8r9sJca!$%PZBzh8sEZKsU#H9@1 z4#_D1Qg=I_7a$o)huXF^FKhoHfsofT(NE5NpFRQ0M_UsTyMH=&Stga_F}k{0T>koj$m+MAiWL%f>~X=o+w7 z{>GotbKQQ86*h!0bd!k421ihLs`jZy`xq zH7BwA%uQk;!XKjvM2gKY#r7b75!sLYKt2*Ddx8KDKzF*B7*sV}o|Awq!j?f}bQaPB zN0eHycN8$il$$yqu_Y=L6(bivZqw33J`UYeVHDZTggvy2(cJ8-wVcjddU^ziX5qfZ z#SQKpO=mIqAS42I0iFsUYY${*y*W7$JR1phGO)==6lC+e2sHbMIWKH@t*DQ$uM@^S zgb)Z0)O{yl9fGjZUhqL;Lv+jt^stChU}AvyZ1(>?9g>-tcR{FkSXh)>cV_B2y?Ds4=irq&3wFE@{T3noNL+z=N%!z3;`;=~5EF*e zLE=*&utZ0b9OFV2`;1*_sB(>HSqqk)sI%d6y%@^Vt> zMB^m-L|J9!dNawD^3x~Z9C!%%?qBv7CMNTjDvZ;2^S0cq!6&M5@SUo8(?yw6HXmL; z$q4#ULVW-#);@Cc4@g&nsl@lM?Z5zwu^Eleg?_`&B5O~vv9S>?hNr&z#T{2+!@3xha7{1NR({xh6Y5rkBN1e;iL|B6_%3=Fn<6(jA<8OsQPQ-p50d0~1nuZhC` zW^f4J{>TZ%)X#qT)O4{#FYft#<=YG!J3$BGzy2;3nij|!pm!v zg4MCJT3N~my@x{8stfrWrv7Gb!AqHfCkMe=Q{oyQ_a_38e$ND9$QFe{%PK`Zg!Ny0 zAJp26G{*GP;qfs?a9qPzUZty>j)I`4ssm=uaTpj8?g>t&TC66G2Yf?b^D|0I_w9 zmn=K(3q@+}yROc2Bln@HPW$*~KnecOe#&{lqvrg@xU=U@s|Vyx1QD%jY;07OW5VlY zivB_Y$nlejOjv>nElPVO4=jZZqxlwneAtZhB1C?5z%uE5>VP{+&o12Cew*}{xfS&v z8NXX`BVmOG108?Nt&**dbr^tB1B`46vKKTw+ttBT@78pi(W=F`)gd8289T$wX(Z0J z;pag$g*Qa=g91iOf`r~Bs!Le7@E*>GU{obG9LDbV!ml!&A;!SP6=TfMd^YHZ??fm} z(v%~lE0JYs*?|e*|0K+$|5pNsCMJu>f85z#F&FOB6i503FWQ~x-Irz~(dUvQkN-(B z-whgJJqyrWhz~c`hWWI1;^!t1zHi#&{XwGQ;*n^)2q4hg1+|(Zr}n(=eYB$)AyZy2GnUTZG0bqE{uUI*Vyo(+qA{OW|Swj-2G5S)tjY zr3PyIL$&QQ+(7cUId?tEgLPi~(K5B!`&fdSP`@X*-a;~NfCYgT7^n#}g-*{gsFB-r z$xBY$qG(dcg$as~*dTI!!-hC9*wmtdRVWv>VGf>{$(oGWmah3MT#%LZY)o(a^rQa1c5FjGhY`_z zc^t1<^x2!B1rX2~0<@{1xv$pf!Oj(Eoi=B#j|8(z6IeKOkWu&(khhSmfXtlp{q@%n zJ2H2+Co{~G3qo1JLQgkElI<5zj=2aDmP&}3xGIjTet-zk2EMNKwf7;RAt8b&#vEAQ z#1Ye4meiYqq$!1(>?SJqI+@DRey9$xm>E1~-d~#oQ5v&M0AJ8pFRP#Q!}H*viA73g zgmwA1kM=0rO_$f4udN(4s-5@)VOgY|yB0)VahbXX=zrHoV&q3j7xuW*0=S2Vr$()O zUQST_#j^R`Ban-twYUO~jFMTVV_4=5Z)ViNs1sKl`+y;1lqgyqc3w|*-8jkQ%g>hy zB2Tb0=ug0fE*3&}fyxRZ_`EfApF`T6a_>!{=euAaVRmHZg7h2(7q4yHFV`b z1m8ap-I6dT?+05JE$XZ%xft5y8pl2=D8RBD*;97VH|y-+<|C&n@1KX{R8?THfJ$4A zE~1#2)lWJ=Hliq@Sg`nX32?E5PJ9uWsgI2KSLBmqJ*>rIhBJJ zTFA+-?DW!R0+cZ_Zgz1WvnGBa4)r@@$jocnzo}9SP5X1G#c;+N9hz6nryfTpvArMBRVt`%cnc0j-sPC z#-%~N-d$^L^IpuDZl%{M*0+eAIB{ZMIK$uEFP)lPcV#~sM_Ea`)=8}8nMN1v0VrvLqjH>MV+2+3{L3);_yn}dE} zV)BBDXR)ODy&boAl!c_0@OM0&s5?)}xzcE|YKx@Sd)ZMilGyVP?db zVgx`p#X50>9k)3K{gkyQRD^|^8g#Qfw`0&FtrfBQ;BrrLHlhgbSg38sbo%+W2cgfe zA@>F1A;J}oH1cdnD2CWRO(c?8#(p^%7ieY(MvF-e*oeuw>d*3c44#Jv!mBaiUL0;# z7l~W~k^&HQCb*?1EVOs#?Dzb6~rZWWR2yyn#fI_+;_#OWRyGp(T<$ri|K)nWKI#p$j7JPL|V`Q zaWvF^Jr-J8sz*|G@e(7~He{xfsjv524a5M@sB%v(ZJivD$3-o1W?Q`~ph)~rV#%8S zQu`e7Cp-h|Etj;4!ywCyPcHP{y?c4n8eMxKO+E_ltYI+pBf=IO5vT$N6O4lSOZN#+ zLhXS`k(ZMT{rv;)SmG4Lfd0$AlFOTMs0+?`%K79$^zDm=A{9X=c; z$CJ)f(=}5U=x(my>?4~%8k$2H-y;qfX1SohrO-`28p-?PkqP@%GKVM^mm(e|;&KRT zw%pk0ka^!62FQ)I7g*n&d2Wp#nwZI|+S)GFa{hElQ!_F#S>}_a)m8Q;u0&@yI2i9m zZT0uoVJg@Fyw1*QNzfmE!(}41TRrRqRMoz-2DA3=x)r6V zzybC_g22mo{OcsgI3b(9+r2@~3qKF)UhuZB^3V_Xn~+`4S$dA` z2C)}hE9u%6Oj|`%A?&C#4*nUc9w^F(F-n1Nvu5)5<4@NwSvFJtIIQ^2Um`C~G+yJ9 zndC_NmyF`j&~OClf-7Am5t|pgf5ml;j7u^Hdgkl*g3AxZ5vq9pJc84h>06{2QQ49p zAZp{wN9oLqwRkC-nXsS=qGlU@ z19mjEv$Km!G-R=VjqpkY($p${i+fI}STfR4`(2VV4b%h(bUf+j4L@fX3I@X2U)+#W zBX~wh;P0L3%ea-{n;}W;R50uI*K4h{2T?o7*O>9WJhC1y0T71@=p}-oEMQ#SQutd+ zx``w2LpVB@%k0P3W*D<1ce3Yknr)F|i+|iNjqL@vkYZT3OZ;u^&~ViPWIqqp#YQ&sNlJUuc1h89W(jUFWj<@jY6k%;P{?C;g=^ILQA#0Z zGX|)n@a5RT4B|eV!KM&o<8787d_n5)+fc7c*?t2U2*<+}`&1htUSBJ%Yh3er&`vQ$9)gypSqXynE5s)jO#eX=eA_lCp|C_4|;q^MG`a@Aur zAjEL**|qVDzR-vI!$Tagr|$bEF(iZZ$_!eX@d4Lhp@o2`s5^ZvT{*=SAzLn=(uwu% z^5{RJyOr-fSDy+IW$ONuwZyo|Z^7$pi|jzO=At@nwEY8@URp}-A`1qvt`z!#OQmjN zb&65#_R=aU#_ohr#Yl%Fy)WO#@guep5^k`q*=!=ZE4szuGeTYu(^m{L z$uxoG;cr89)>3cK(Cni`G!5!OwR>xPIkDNruiZ4RB4qko7MVGuU&xXT{mHE;qGQUG z$kLgBc3-4E6Q z1=P$y=>vT5JgO1X3rQjZeta-hO{re^%>r61DkdZ4$<3;U>Zx$d{Ly(a{@H+GNW%g7 zpj`zwm3$75ji2)6zLoogh^a`{$F5z$6JLLV(b26R=ME)?n{+j4ci%gf`X<(uJkYmn57TxXF7kBPFL<|XH- z9h9xL3;%-Hcu71Rf*x)o2f@Jd(? z8BF+_gQkO6Bn~pwUV#4aRtM#}ADaFH+F5Rzgs%FQfs3X9^)zQ}?YopN1v^isP42l&sVw%1Ee(EOL_QNr7AxI3b zMhU=76hQ*U+E`f6KpMgHZPF00rtOwuXm{|59{VLu8|?Z1os023=|N#(NJhq<%xL+a z=D#oKU@$}S3z2#M7iVkl_mjr(eYoy*@nK-Ez-d@z^n5RfszSGb0VJ{i$bX1J%ft*!J9#dCz6fH3CNwP-OI5kEdVb-r}3qn^iJvZ&$;Ue-kKOx=LQ{=sdx-;!`< zJVAwq-zA3{B((VqC4dV88KCk8s{vzq&)fUIQtr^^+}gMEUYk6A2=s#0(DZibam*#0 z$Qk=FJj_cRe@AjBb7^k6=IK{;7chtr#uF0dm>L=0wnhKz`XQkqkv8eOSXnO-MHV^i zW0qb1`6k45%klrauFE+V%#aRZ6s5mv4r$iul{A8V6Ou zI<~xDx!}nOYel`{emjcr3G7IbA3ngwW&^Qm_VM0`pjeh zrVBlQ9T9AH7_wO^=BIkr81PCMDF2m?;v2 z;Cmvp1gWY-ev2ERDAcC^`T%z-LJc^821jhU>B<^CqYswerO?l_y+D?rO$-$a(?eRu1+?&a zpKE%bF+`A@=E<~*r=wr)fKMkAgb8{XD)^3|W@=2wqWvS!>sG(-=%|F+h)jva8?lau zAlQ+C11I?_=Pu);sqM|+G5Lm96C$PX_6ON5awnhupMDt=90}mVjN)`}Y|#Zw4P*H1 z?LRxfuHOpyno_@YvSNuNfh6!-5Y#!ee-o=o|3-S;sTktY#0qmm$PMw5D`42<01hEI zAP||mAOIi|wh5=oP*#EtQi3tNY`l&gqsj2zN^bn@dv`Y@0LEAvLbA}*D*SML|q!m0MINc zeBr;fclVNE+ba$GsB^naMdxP(b2u!17i;n0g!yZ6snh-Z#iSfi*BcVW_SwaH^)$u0Y<0 zx7-M>sTc3=9zVZGWm|m7^LvZ1&9nETqb#5!8=~?KULJok&<{a^K=NG1{Id0JkZG!; zqmv?XMh<$LAK3wa)5Fl75@5|WfeLH}r@Nm7dys1y(oB4sm7Yu!YapY0j^{Q8=G_@} zU&r?+{ez1Q6BkAD*kO6B6`Ho{Kt}QkV)Mbk*|55PqlsLC^Ck`cH%erO0lpTHmgmYX z8J*sc4k2viLPwZt>E7!Lg23gF`qS{s=kjMzn=4LBhkkV01PpdpAC!OU=ny0WA}XHR z7>V*{*lI1;k6#p_e-1cjxP)xczAw?MA^J)L8?w^h}}Y26lx@jac+CEIHh9Q;k$q$PTNa8Pv>Z)a%`8V*g}?ZtvkLx*gAZD?u3hGl9hAUKs|)xyQV%^c9|PF zArJ(z1s8rT!J;C_K0&Zk9AnufMty}C-^e;f+x%r}LTgTZ-zJI3Tfob!XZ=);A2@s? z0PG2QF57n}K=MMQXe1zIHZ*k^U}KmUoaj)PiE6XF>Xk(GVZrDh{gJ&x2ptZD!~Z&H z?Y1N_9q3P5`MK2JxS^uqxiM3@DRzkPgnqmi5sgEow884E$As@7*CN&k{Z;y9WY{+e za^AQ7ab{yKkuwpoBk=27NF1nF0W|FPLFawO{{2OLVYB5&G1SMt;{eu!fv4a%c*J%A zt?d7kq2_S?8}{%_XG3r+mhO@rnAKA@@mbi_p>U9ci|s$a847!vU4@X8rMeI-U zDR-&kb4w>#asNGZCRwz}$2|H?=y9TF=ODHxVRJxW5f;5wPDm*xhn3r+xI3vF_!&yQ zUDb}wJ#$s)W4*?DO-Q)^Im1N^j}oA9OE}LaZk1odccl846K)wQ7y>S0na10B2?Kx( zdi=P_uqUv!4{_t;ciH?A6jL9z@(GU{G1U~@(kx{Qe!;1GA}u9N6yJ=9iID+csA5HH ztQz@3uzZqjLSS5EST#v(Gi{?$2vr-nq4mp8cI9CyP#-u;P*6~L21G&uPajlB{FZYe&>lp z2y{3xq>+F#(gv6Li!{

}Y2 zknL-fox=Y6n6Pfx7hOx(RJE;&EwUz7Tq`2725Z6IAs=a#XS)CfC9-DE>V44kff5uy zG^J2}X_16&|M{`HX#zyx>=K@rFuN_+t)*dO+mo2mxTEPI7PLV22|eUGN_^zkf7$q` zf0WrA90GM15=SAiv(!sFwbPy4IZ>szA%eU$HiD>FhicGaK=T zd=mci2OohfL|QSwX=WI#l~dAQ-ot7dtp}?ywJo6v5OfLYa&S*T;{4O33l}a7j%PV- z`GYc1fEB-dl8jp}NQf+zF zQlz#e?_B?Cl>?H(!b!u;Igj2Kx$~2-Pq-6v`#r1FCrRRyf$P?6qm4W7Br8mAG%@J` zYTq%NJ~VxO(=a#svK}Rz*t9-x6(M((#L zfAXaH#G5j7V)@f6_+LnsINqu7C$01sjeyAxcjD8YE=4^d+dzi8-b^XK)YR4O7@AZK z*$1SW^!2y{QaF@l7Ri4?KUwel(tX-ajl6_t!|``&A!3Yy_k)AvodVn!fF}>-!U@Lw zX{_}sfZU6`@mS{J=Jf?PJDTjT9YsnI_;kGE`mP3oHw;+S@(S*}59^uwZ;GWc;_&7JD%x*AUkb6d%h2b{P`xNe)3z$~WQ$Xs18D znN1K(eZh?&GHCPbaD6w1DX}DncQwNb$B9Gpz)mg#7G-eQuw_@=J>>I9!O2T7=7>GK z+gSLBjcP8Btl3{Z|ANR2Fk)9#4dick`{_fQQCFwTk#$$exHo?T*u&%6JV=5$dbFTc zSP>&L`gOE@9E`N$4O2#b8@$|BNfS63(r7e7Obj8NW+kfw4|hg7Dm`o_lu}}%bAoox z$x~zi`DFd?=VNHtK&@W>{S-&_@eXizR35wWs}Yn8*txu5SEigjMMXS@Sh4mL9Sjkl zmHk=p17pn(K}r=_5P=^ZI*A^#e-*)5lzNUi3bW?Ee+$~00KX=&%>SG`@O>TL7q@4b zGQ96|u?RU?&#!akjkdu;WVNExsm~ui#AU&Eb)hy(>Z`*{1%vDUGbk2`0uwh;LU+a{ zFbl0Y8`#M%A4so-=J+c`#pDF9dyn}+iey3b>{~jgu@DBxF)RyR-8#oJp1anuwloYNAjl@|Ol8*cR zyGr4XmS*r=g!gqR4^BkGWoS69D;U|BA1v;uDx{>0L#IQh zpT(tzRWttC(2)`KIGo@S|h8s3)z(vQD2T6(@GEhjy0{W=5-{bo?L!yHDdzEe*6ggbd z+w`mE;>f>7e^2{KUSGcdiPjyu*D=&I`Za;;Hi`gQId@ z(g72K!~+DYHk10FOk2ey7)wRLiqwzlRb@KrnbqVPg>wOTeaF-d=@`+~9-IH{dXJ)CTo2(w$o;YaZ!=l&u~@I#)?n<{FYJIj6){(ZD?9pA^)t4I?#Qw2SZ^I3#-Z;< zU%&pNnaCtqK1x+M2m>#%-9d=X96zhU(=19GcOiK?Pf&eH%UnAW^8Vx7w~K-&@-60U z3)NKD#|s<+?M4qYD-&}d*@m5^jh$-s(mXD)fDDL)n}kQhAfj!&?eYILv)N}13AYk8b)4g3gR9T) z+11|hyQw)FZV zE-uI&zS!p)6EjQ8N*RYn%MqR#P$n1c%g@T3YO&L{nx#M3c_?ny&~1=ex7D1!a_!o* z7yO~~q#y_0i%&~SyK1(3yB}-W&M0h}1a@7au&7WqAKP zH$V&=HiFd=hrd<;x8e5D!9cWq2sF`66W(zB(y}k2|C|=NqVW!k2+we;Qt>QrC~@|e zZc5p4FM|m{ht`qo5y;hJ+grN}iJFk0GHnsbc)xTQxHn1LrW*g*d-aOQdh{HT@3^>DZgYADdlK@I%_81v zuYsElQCvy}NB*fu$Z#}CE6Q5$OG6mLP$XC}Fn)^*e{FHj?&*l{dd#sTbS6h*7oiFB zm+ill=mn6BMA)BP{6GZ76#q-^19;7ZW4u95kld%zw3nLwAE;o2v;d8&zA`nXO`x~0 zU(bPJbb*QhU9M))pV5Yk6B6-<;HsIFXWA`9w>fO@6wDU}i|J4qjEi9WlfR>z&Tw)*+G_ z=bu>1)Oag6)#55G9zy9MURHig8PxN>)W`q0UCQTrgL3E$W1HQRV^__-H?xE9b}YO? zpRk9PAaR@~vm!>JU@85L)7Kr>dTmlWq`aL^9e$TD{{M0H9^hR6ZTtAgN)gITh)N_x zDN(Wug_M$!nUct?kWGk|C=D_qDXWZ7R@r4|WMqdz$ja=0zWP4TbNr6~aXj~P+~4nA z#^?QdU$5&ruX7wN?CpNz39a|ZM_2hN<57K4`*GCmIGFl3q6o1VuIIXV# zQCMS4Yl`;oeA9~MP0lI5b8$ms{rxl$)>_28yiJ%q0%yA&u9o&*@C5#S2s*yU#y7fM zbQIXOIL^~&8-OJP{?>6=gH^4J!L5#x@r$voO_(^VKT`0P|Jjnti1tsluWNVpHr9uy zmRRRS5U9umA2NI_N4@tdF$*jTh89NX@&m-DGK zT7o`O)%2rDF<)WyvWI;rUe^$GPuy@Y*b)@yW43+mk%`GWdsd-xGU9AAtjCl@;=1xT zI6wPGZxnn}o26a9d?WX;lKG)(um1&96EOWH!CF^J@;7rTpdd(p%D1mC@2vmu367AE zi&Sf%wD=Q#6*_I!-gN=WA9f7EAmh=g%j?<2{wHY(n)d^jui69H0TU7Lse_+hNlctf z(xJlc)a|{auYrKdOW=?0x4CHg11@H6InWn(@=_{Fy;!dfZ8s-xhA69Wc-3x>;Gj9+Ku!9w+0XsEk@q7Q9ErN7|L(E~e!@=+^mKFg zSjZSiDdi0}c3nIM0E6UX!L-k3{)%gyOo=CTQ4W2=a=_dHNPWzqHn1JnnDrf1bY3^k z#=+qtPTyTi9bbpygAb4rKAvEV6b|X;U*k7v=H&Y;|}{x z;VURxT3RN`YY4daIglomz@lWWG5dX)KmSG&ut@%4?(jO8che~}cKydpkr~-%> z{Wv`5x3n!YlftpJ4InRwM3yY*fDG0o4BG7UZm=S@}ply`>^1WqF_(xE(QEptn?RNyp zLxg7x@+1#2q6xAFp#)nMxQ&9B+r}Cc2_Xb!>U03-0G~6?=Qd_F=Z{>b9=?FsXx1Ih z^z)08VdS?|t8I>A@IUuLf^>%}fotz|`V0Jxi(OSB9akEaQmMU;qe$KZRgR*X{mYNw zJ;R>p4HML(&>Nu1db{uuJQB){LY>f9MMfvR4Iven;9g%}N|6^9o}yku7m``hL6>cX zhLmDmoCDjLpdfnu$PZ(3{?rn08z7Qz_r({uUA~+p80uSzB82lw)UqfRm}7bh=DHxZ zzk#dBb0;uj90D}gN^1}G5j!rxuK%FrE43H0>~x$W;&N#ydcg)?S;SN@Io1Pi)!Tfx z+nf7W$@a&aC_oPaaGO0|_5ucpw{ik|qS=k!LRVZ|U^;lWEnQSnVvh+%VuKzzes%Hi zD!jUWWFN6($B|p(%G6`6Xp#(!!dxrRD--+In`h?m-~dbCX(&`mhFw7gJ^px3<(2L$ z!zpUMvw_c;0oNg#>wJKjn8EAS6tHlJFZUR+6u|{qffcvT`>U7U-sOg7R+{3 zSt2(jPzE|_NC4I zD@8&X-poLX4%sm+Md<6fBZ79&epN@DefdUh`4*R>q~;2Hf1yJtWVQmvog#Rl%x%P$ zEbmvZvFw(B=ojre3H>B#C3{+Is&xU=5 z>Fp0>wS=>L)DgpJeFkawyp#R-mdg>)sLC1fT*yDHrnXf`s64@Ri*TO<$QcLzEe46r zIHA=7n!e>ZaHtP!x$0lKW(2W-MzI0QDN5+qNG!B!Lq1Oov?!#4?161D5&D2Z2rUW> z6nW5QBC9#$miK!0wFI*XjZ;6PZOia)%wGRlFErLM8Qknycc;9lGKuqh(}(s%+#YB2 z%b~g0P23vF)X~zAYKS*;-Zf}^6JApEw}2fVtX8f1?gUt?s> z_y<~+HVi>ELk2LsrEOMgsu{=k5r@oomw!-$iK$mvxzSmoh6+Ew^gE$%k>;CE`nrJ| zM0yr*lmpR}P=%JW{@(I>IuoxE_9%E!)^FNmH#-#tmI3&S07!0oAs(p*Z5mb~nI=*S zyQZr)lv4(Tq0j>FE$dQ8i=}qjZ-{Th z#cbjm3h@+0K+RFyZ|M*+X@rCVgUhI_$F#1hId6UV89v3nKw!9oAENLR`$)VC`xdXz zQQHWJ4g7D{<#)>n*S)zfD=oi{3)X{6fV()0_@AGsit0JZt&!eJ6lR2p*fTyZu|8<) z=I!_H>%Q(i)OY{L^W%!N5DtSIOX?4jH9lgH3XthEQUvUzxZrViJ8KRWUleO4qkMr4 zHKzjm90*k&yi0SX6_x%DK%~o&F0m7V8P!A&7hm{K*T0Tx4;|eT8g<$`EU_;p?d{fY ze_xjd#AP9^vIQ}8hiiuL)CXQVmIL|}rqEfd0K#uVO>mtWylpV92HoV|y&O;zgt;fIfTBtdAJz2^rs^bk4;wQo z#f_@!!7>Q8AknFmd#cCvS2HrI0L2{Gdn9qMivY*%zt~S=c-q+ZfC9&#gB@>oDAmT! z7lGB8q?fvpr(}Bn*f;3IKZV8p@eIh*fFTrZ2el&XM<_jn9Rk6|qn?{GN1xC`!hixr zcFt1gtIekJU`w{T5(5f8M4>LVkvx1}hN*!d$&qUe5E8M_p*Rh(U{g&7olAn8h?gv} z)v@QH6QL*|83JEg85NL{@QtU)G$EUh0;)R~vh%;pRtTQr^OPs9 zD%mV!t174wnSTi`^9L2|kDHOyKLprS(FXq5iv&V(z$-Alni`qylD*KC4(C6g=@})E z_2uQG=V|7ze~JnznV+A>Ws;G-TTrl+c(2g@cdzSbU09E9+3JDSW_ADTdz0)j_keLA zRA}~<%V{KLl@IXFv*_B%>GG1qVhz)SIq*Bqr9o ztJH0hvGSO8K72!8nkuyAA#vdW^v?Y>9dV&RjmqXfe8SU$ae~w?1A&7s-yv#(#Z>gb za^5%5`XvGz0X&Oj4~`!JYDjkN7@si7r`)SMV0 zwP3g*lPEzyVI-)ERHfbOCMUb*lh;UL4qH;NzU zaG40Ek?-D-BLQQo+t*cmvwAihsb5A$1mPT_!tH9>H0`aTMvVWEX)hJUQ<^UzaN+Je zjMHFIuuFJ@gd_pftK8`#0-{OK4i$nh>#y;HKGGH0;j_O)fDhjT6c*Nd^gkJ=W$`F2 zW?6jC_4wZ>z%G!AWZ8t5@8F8K!l>AO8c6h!2(e(G#D?EZx(?y(uo)ps9%QxbW7Xf4 zCHwpAWzZgE{!IXG1^g6njf9C#q?z0LE@=yK>fb1Ar`7ov;SAbu+clNhLwM^EM1)41 z+UE+j8=YHF>DNC4tN-&5$M~Qa-|*-XQ`}^c-M9V3xCT?QHfgZ%Kp39CRx|fu9V8ay zP#;`}S%g6kFo8KEvztXhVq!TsL=U>(T~x6jwZY(w z5X33*YuN<5QZVEl!9v!ImYYw_*;_c~>C5$NGp5P|Qj|)EqPeuTr`@xKp|enFs9OAO;erTra~oAu6-%0Uceg(i zn}O+`4G`X3DS@wqoE$(9Gdc3GCPr!conp9|W2YEQfB7{rI84 zO8X*H*<|U<(k2neuo4jN1$vyeQjP}A=Eb};pfRio?j$(a4xV4mLJqBLIjK3xp|Df5053T6LSy_;46gQf( zVcm%K-0)9FQ3vGr#B381RJ}}h*zF=O^?utO*N(kq8K+VWn|I9pPE&Gs0sKf>%?oWA z+X!PmVSIdkwr6e;TLprKur2OsU8WR+WlWe?gS5yh8hr|$>)UrakGJ()ss&f~BjzHq z$Uko{q5js2pP#=)BE=UAtJ0jwAw~glPH@oQ>b>Xpb&K7b2--r}N-5VyDjx{ThCVN^ zaS8%YMKAe5Ry=}D5aNq+#8m7dul!^VYRc1Op<@yBZ0|FiC+PXbrfshGd{cegz3;zu zsN%%iIKAXuu$v(kw$OG#D}Nnfea)Fz%pe()5$&W0mbxiUetEGfpCOUmy*AA8>ea(o zI1t1l-jYhG7e>HjLk{Ala(sh2>tto+Ll|YkA6cEb>f6bG?@L!+G6@yb!M=*3pW}=|pZ57S3!<60g!ZT&|-V_y)dXWWH37efy6}NW`jPx+hE+e_rcS23)QVB?zJs8kw{It`DnSZ=(|TypXw}O69bVag zy8~Dye;sFHd!sIP7&kq<9!R8&Mobu4Jg-TpRG zCo6g;;^ic^qTn%Y*LW9S=!9F#w&N`UC~E3DI4s#riXXN=GI@^+D3bs8e)})^zy=e|~zCme$_Ex3+EqM% z0VV!`Z8b5=&uRa)v~!v+LzwDBj9{o6Rj^A9`4<75%9l1mA9lT>(pzc7A1FB9RK{hg z9h%ENvlroDj~&Y!Rml}ls^O(g{JSw*4mi0*kJc{j-wjGIr5I-8uw?zbKtJC4^Xr?f z?lC=M{tJiUV0E-dDvAwv+hgkn)ejw1sdE4c4ugQ(!J`AeA|#OAKHk9Yd#>TfWvrQq zJt?7SQBeb+!w|K3@7`0a4?}qzW&CoE|No4?4XsFq0{rDVLXN4xmEO+Mp0<&Jj`HK} z%lw5G^9Nah{uEJLy17+ZU=(3c6 zBAJ@^v@`RNvlWAn4KnRvD>cd=a9-=*UNS;S+Nxlu8>#{(HsBcmDUX&P4yJ(@2e$?) zP?+7KKM#-Fcjo${wjH^?)%rHkfPivs8!^6Z-2lDCL(%^+#5#}fNnHQ%sk1_OP_3k0 z6i*2zYkc$$=0O{sHv?3{mK$|{cf2g0-^-9_lk^~1oUaQK9SBuF1FX{7VS@g&F1iV|peXL9NBP1|W|ze3jOz;(c$pnx zaH6q}B~|v|wP}(pex z4oq5@V^{SNV;T|=O>|FaW8pT%TP*x~Cy3Ah&!7{yo|&0x=_$O63xY2Qm?=K`e;KK8 z7sJ}d^1(f~$Oh=?UIRR(GzvyVDSOhWAEgvPP3|+D=Qqm;8W8(~ywvN^ii0Q%X?@wC zw|o@?1>3nmQUPHEX_anesZzc3@@?Ft{TLx^Yk|7_UuM&`nVX*e=VBdSLb7_|r4BX` zS@zwVh${gich|A9`jLeZ3A_0H+YdwNv~MTv=@SzPxI8V)vfhS@Kg0hvT3}BTNcD#Z zn-=H5IwN8+N=tfU?IImPc8Hk;5l$3`JZ$K?H@HUN(xw>DGjM(QVOgZP%-H5zNV1;@ zuRT~iq;?&4EoUvH0v`CavxqT`hr@PvKYFEeyQ_k@f^XirhmN&QRvSN{MvoxseKVO7FNnL#io(5ry`O3ANhXaba}U}SRDuK!Xkah^pw+WjNv zqRV>!j2!O`2gnyknun`bR|$Ok49d;oA25#Fih%9mG2x9Orpmxax0R)%#|Hx!v^ClX zM@|WphZJWJTJJu)&d`*8w4GpCv597+{-fw$$+@2S-TfX(m8RM(+FId8dX+&bF z4L}()fijoHSKpR}1FR+l7*L@Q{11tO>)SgTyqmPKejy=hM8_~PGUaGX2$m3bGAVAX zI9%2Y=G4@Z=MKu3Wflcrm z1X7e?aev9nU-bf&83foQ?`-y&vJYly5R@LjmQ*msZccso!|$9Wxx|(JNMDNe?F;p> zzBjILvR^FKKK(y*xa!d3#5kK+RVAApKD=H2j~7P0e~+}UuiEL-ZVNId1GQGXHlaW{ z|5ft49LJ-?Se{A%@s&yDk7z4}nHVdEJ5-3$^2>+g%Ii`lEVh4CWM%?E@&0{wN@L#9 z8zlyTONRVw!#$WpMK-P)q}t+89u=ze#A{~~;T&_Zu6GZJ(hXgpmSA6_KCboR%jTUs zkutXC@F|zFe6vjqCrm5FFaL!B2?;QPU3*<2S9VTnsyG4M6i=Rz3fQ)Y1{e%46@WNY zieakrnB0hf_h2lVw_IefkcBJC3RVNaJB9L@oT(iFNupW$GCYdPOdKt-{1N9LPwP2b zTUZ>kTmgecJlc$&{a*GJH>cpz0E~mgXJTBn;?NBqjk5aV6AUh^1*9w#ftMdsT1SZL z0hA!CMd$8q48I}#*1$!D$g|q(UVo$#Az4EQ0p~1^H!DNyZKg>^01OB+Y$l&I;5~vW z%4~6^lQ!_TdkM57;dC)!>HdS)3ABURe{`=edx=2K=*rH_x?~^-M94}B1>%0`DyY!N z{-#C=qZi;1sHt&@UAY1+E!1Ymww!nct^Mxw;yn>h*qk!<)s4tUFBGF1IuOBn@!mB? zz)HHrT6e5wpQd=ozbwSd{|Bj!$Kjt7?+yx}$nqjFP0x4T<+U zh@2;RZcv%S-FaXpc~F$DfM5R!;zPL^^l9v4QYVdnG6q&%UdkiAo{x3Rz zoEepX#0P^dOI2LS)eNg+Sc-1+nK*Rr;ZpQ5Cx)j+&*J0XAPN(6+DWIYlw#Jr=8IRn zx_%y5Gb60>{?5PI<`8D2f7vf?oIzVPgG(_8zma*1i~B_Bo9`6onl;l=q9>DzXUFrG zkHMr9M<13{l9Ar+_tno3Rw4!8o@?>CW~_y_;xF6kmU2Xb53W&(^t*BHvn*Yz1kOtM zWHq>tP{bZqW*&c{_2C42mRBgxa5TTX2eJP~Wm2yt3nJWQ;D4W&@3#I>9_EQbx&)ct zMEFzsl2VKr1c@;h^&+WRBv^-#83mvNyKsZ*HIFBxs~Jm?B<8TZa3%hcHHFwVpd5ek$U~Xvv-Hb(VGPju00@`&u@-}{T&5FVg?}*W-)%9@>YV`}$eAzg;!WN5Ze645#ubEffr|!iqtp&nPoiAf07T3k?m; z@n@=QH16=0l>-JOFbwDhZaX4&y!J4KZsRpXnQ+eNNN(^r`#!HqQ;%9l`&pi@Ktku7reC=<-7%UUW8yGpR8unHzmN)= z3VrCWY5{t85F&8XwJjd%(d?2OWn>Rs^+?-JTz%v^c1Zx|3E7L1$oJYJJybRzPQsF1 zUuz_=7jtJo=kW~-%Gy1P<)j7>1m;Kg&6dabhi91Q=G~I%7;I+AHH@X`9&inZHvu{h zMtft?n4ocx;Gq->U;aU4DD*OH@N<=%Fy`w zvT0A<3Sn$RSVlx+S1%cM%-3maX$8Q21!lJhSqsLK7l4kNqVN91kENQX(eEe8?-4jy zf2(U~a4--*+3pYTKANdzg4L)^$n>jNHmwyXXcwYz`ouR}Ai^UE@VRa*ii_#Lm#Cj1 zOZ>;0LXX)l{PJ2^l3iJvI>vk8>RQFYDW8{hL!!2|SO)k^MiG)ac;p{zYwsBM8&}+S zVD^^oXNz4+w=t8Zpa|#y^l((6J6BYP_WyK7>SihKW|E3VQ~77@9*+d zKSZ`ON^cyO@9up8j-Hik-xUGNk2>JO>@(>y+LFpcy7U?MOxl2Ll$c|$#sT`lcC^^9 zi;3;`{P<-oXoymTupr@uHsINo=UrmC_Q_cc0!B6%IFPY*1D)05q0fUf0nbTggU4C9 zVg9AJFy9OE(Kii0F2|@xsLcEYY zH@!;~edY^npJW@$JJs#(7*(xa=$-@WZ-5rRr%(uQtTS~q zPNwsaHVOKigh9-;1evIh+3KB)68S!Dw&xMy@?_jFnx)N#tts#1r1#8$Q@h5=d=H+@ z^q+++n|rxvx~FD4?4_(51yhYz(r6sI9h5nSf(AUR^BN*vZE=JH&I_{-eulkDl0Oew zw4D&nvH4)}CLz}Z$!D5oX0a_M-cus3VV!CI(|tefyFm5r>0Ed8Hh1J+o5R?EIKO_L zcXzbOugVe~%&qt1g&jKg*M#+*Bq2#yoMnndXmJGdz;hP(T62hMu$KBqt}$xCOZpke zQo~ik6cBq7(9&No22Qa@{^B*pmf6hHhx+g;Xu{KyycMBK_Q%`i0Opee-{m=HOjgst zH?!uxIBBb2lP8XhKJL{=0Qx{}7;w5GW%wqqR*d|&9%ntC?3Z%Aox|?67ti!RTv=JP zr)Zgek|%b{$*qZ*)b<|o=t*uP^Fu$9ad_sEbRMGAfw>4{Qmp6WgAFx2iyuKM^b;&n6hx$_V zL_u)v2dX3YcWDYNE5+pV(Ji!mt$fy4>oRSixqy*fIZipCbnYmH)VC2}L3PcPsSYhT5FH6~ z*86;4BvO9f>vQ-~5gE-Q@>H$JJ!qqidXMJ9aYLOsbFgkfy9&W+bH+=7n-EnfP9t0h zoHddlb@4-J-j9_ZH%nojLWhqT%o8Lg?fuHWCg~`*eZAlQV?cq-G@G*Y#h`UiW9*6W zd3URy$xvNlLD3I_&`HF6()?ny!0=-MyI7wKn}x6FB&e@1=3X1`HA0m4n4%0=bzT5n z`~KJ}8ir%)HvS4D6pwBp*brwRO{19GHGRb$PEOg>d-5*XMX_1F;GA~;i?y9jlFnul z!v;Tu*KM{>VvgT^!iH?ar;vS4VA(T;sH3UMt#<`^9MW4q;6@>|Sm{kVXV)xugKOE_ zW);&s>R;41CLM=|axz8#9_ZQ^Jd>Bc_(%)G5;LNEnXghrJZbzJwE8l<%&CfxR`w-_ z$qdy*1~p%Nj^%Bjtg_*qB1BBf|LLItn!{8PlrX7=(qj$xxQOk;3uu2%brvLoYSPz1D^1^O@G&IUQrF zYHCuK+V`N1DMLJ}7KH5CZaq)EIu`1%iHU}7(K^L1>DTU@ge1CR%+T&bM+{3+v?bQ&DAhIc%6ffH-PE^iQ0@A@fjx=&`Z=Y1Ag;nF4H@{Eh1u(ZwYx z0wiDCJtdT{MQ?E1%ValJfLH-PM!4JNU&{QC#*Y>r_HI(v(Frwj4;N)0viGvWKVGY! z8v!4;0l3DcXF zcE)dr-4VA#%mEY}EhF!Bkv)R%FDZk?^TsgMZiERR1U%&;Fmqo^k%>}Ph#A8N#&U3a z9S#tcDx<79Thp>#eJm9n1ABdlbW-;SX4|m{&C!=L*2^1SLc)dmKv~D1({3- zpgXX@%nGt_FcPzYhJK`Bkt_b@(yfJoDVusFZ?X;PtH&Yig-a>%SJ<{|S3~IaUpDe| z$NIDk@lfcZA~{9==6JDjkowPH->78RaF-heY*_EQv1cMqa{gpFkIyj0=`iEZLgMG= zFt|18>wV-bIpHc4yM69+xG)PQ8$aAa+|818M^cr zHvwT%0{2J_nKt0((IEim%V5$cujCDar>_+ytOqb*_%|@`u8m{ z!NH84^(?fi>!yDy@zeM&y7cak4&6u$7fq6qS^C0Y18C6CLBI%KaQ5rkp#lM3B?1GorExpsu(q8EkKJuKX+@cfVc>=w6U6_At+g)IShUn1QHqg}F2Dr=4@t)o?1cbyjqCm{+D-4hV` zQX*RF7(>M+Sw3yHLVNbycmI_a1iK!*m^@%%)f&5ed(O9YubQt{pA^#IFXiZ-uJG?V zt_vC)mb!;QoRzw)MJS+<&$O!^R_KLotRN=j+ImONHO*FF^6(QgDp%9#DJ z#R48y3bB|C@_&C4C3PC5B$rS41;}Xv+qd5U)EMplOSUBK{iooqbRRk=rNyCzehjFH z+cy7N1djV-n5i0e-0sYliRy|IHk9lWucJRV2VL}MXs5Fs1AcY@H$Y)khjfKX&_ReM z$=v}9Pc+8ms3&bek6(3LPsBY7uP#mZgZt#RPJf8T^<@s0b-VkHHkGVUxvsJTeu!kXq7-EF*_ZB2V6p$AY>2lW=r}hFYkK5O#gQR{2^hKwX)n_y z;TKmX@X3(`A1s5Cmdj_#d{rLY&0o9lEYM;8YK75CLx)JE(#umUIN91s<8 zuXH?+w!Z~Wz6}4}8n~I7N0|^D_#@CX@=(8=uIk17-j8!yd+)6qW8NV~f?A`T__+IW z&K!V3d_Nwq)Sula7n}2DoT~;H4*$GsgFD_JQwYZ@9*qh2#Tz5Xa%)&l70L24L&T^EAlY>|E-1^Q zw5QYHl(o*b%iAG2`-ES(62DH?&^?C6G^3oI&V$v+(H3mI(LeiqiQR@G>hM9RVfXjqr$qb;&W=xY_oRH?T z+zTLF2>K`=bWzT*jVpef+xHn)LgPo|iyT5}A=TIB7GBcl7!unTvJ@7cE^9DaAM)11 z;-mMeOBYq=G6%?b`BSwAI`QC5(&_A5l>jypMZffEhi|ch$xF3qWf${}=^vMMSm-r| z6vkBC=BXgo8a zmLKpu7)+l%?bIkF!>bWMyaTGya&)4bNho(NS(NORuuFq&<*3jl`%11 zs?bG=1V2d)uX7%pF?)DLU!6VQs;^YeE1MpWqS&2|?&2(rz9R4zxVHCz>o?!)Aoh)LMV@DGMjfQ8fCJd5X!kg{Im=;GrCyJc5mb0U}0c* z1~emoelW5Vy;=L*8PC9LMaoWes>b~rN19%ipcmbic6?$@f@0LacHhgl>m_myKO^Xf zq%7kWCi#PVWuCBK3`+tRaI5#q(&f>w_ptA?2{?U%@k0HvYXAkR00C0D&|jAq*^cP| zzxHnMhp_8XLVu9mzfDL8`y#Wi=gQ7!-oyS7qP%J-nyA*YZS{fQuB2n1YEgP%+Co3( zEmAm8o1B4K<#@FU?|tc>x(E0Mn~CSlDV)J0u=sGA_WG1&z^&G8PpqKHF$c2{-{Y{e z{-c`nHOF7&&_fPHR2^0^o_fwv?>0#Z>s*uZUXcS1{~aMIyJfs*K2)AqR~@|XqB-sw zkoPKHOFoO<3J@r7J1mK-_pM&g3q%esF61C4G!(?b-y41o<;9M(jIIZm(#Mv=o$!f$r0OxG-$iX|#9v9*|$+c;dHgba@EUE6{n`Uw{q>s)I z1i07m-o+kW^|=XQk#2QJ?%}Za>ceSqnJO*Auepv}(X`7W*s*D$uGyPb!*g*ov#t>y z6gYi{Ah}_xIDY92Fk&bWtug|(H-C)>ubY%<4BQ8n_tjPC7!s3bs80SRc6AjPjWBv{ z7MJ|xkg{O(tsAFh3WLfQaQ0!aujSz25aPV2)j|hM)v8%;v-(45^D!B0Aa4X7;B#D` zA|@SZz^hOhj3YdgyEYnbnsVcNq^8C&d!0*6=Eo*GiRbY9!;jb|78I_ zDtS@l`*kN;C?3o6@L{h4lbY#$weabb6C8o3nqHl{i7brYoz8W~0h?i^uYn^8i`?>y z*%^3QXUDtK+jy2`PV!$k0LvP@1NWLAMm={;X3k*v?xL-!X$e6brrgqrLW#wV%UX=y zOOy22txDc9ZPHhcvF*K!4*$Qq9#Tr&^+}u4)`X?yDEd;9ycJxrk6*uj9cAm3I7!Dx z-fKWqwGLjbvp2eBZ6?bfJ;;i^`(g!uW$n~v^(TM94v6G3as4n7!}Pc;vJWMG zB-0!}#u?B=YJ&JM#$Zn8miNf*UgUSaspX1nYXhW^h1Ah!M~qhWIr!bfP1tlepoh16 zq~gGC?aFH>75EjrqL2WPfxeP79P^70>I~urM~={g3ng&(;Oqu0xMJ}`%dQ+~whxg9 zzr5&)+IddrDrs|1;c4-Ail=j4{X1B!a;|C?dpMg+M2Lm9YKUC1Ozt$H6EKQgY+A;E}o)r)K zzn)Y8=OzX?xQ%S-!m~(+q1(@$Z;rR=CJdN58Be{{7h|1D+xlY6PgrN+TY>vNbT}mE zpiQog)IOAQkY5T+#toG4!~M)eZ!d+M82CMyJraJ`qKxlHl!N%A-~&Bizxy7hC}eqp z3oYHVZif3fLfQz${MWmYw9DBB>zqVS2xyazoHj;nyqYAMpqV^}?+Hhlwsm{r{O+D% z>pAInr^_0op(}tTA-7fe*ft;bJo~!WhC>L*k0;!nq@B zh~j`Cc!X{9@BOS0KasE+_7%_ZAFqQ|r{=Dr$dN+?q`X*Pv#wbwvDJ4)23>{RWGNd~ zws%;!9T}*!h-dexl39=kJNqCc^7kUD``%rM)tS>Z)n|+a;zL@=CtGD-{7P^1w^5P| zV5dF0$^MJq#c}vlsNz)Y>3Hsa^(D3Avw*Dr)Vd&`OS_#0DlI$SVkZp|mypuQAFAK{ z@e>B&8f4|*pc71|n|ZfT)U^}Qsl<(klwFi$r_pPcDWQ(}T8y7o!7}t7mLxn3YO82z zLxqoRf_M18vYQw%oq?}Q3NwbwFg^)bFWm*4b3DVcir4mBZ8*>6Sg#?BDnp+oiU!ap z*uBq67F-+Z^Dbq}z6+a04PYKYN*+*|Ro!8~gAYQc-~sFfQU|#pqbE}XoZ`dY-9WUK zP`BX66Nr01cN^W0qIj0k`Z}AL?L7V0_^!TxiXFD3%da!3X19yNj4t11Aa-U^_3{01@iSRY{N7^iPjnbJBpU{*Z!uD&eKnHJO>?Q2<;e_ zq%sou6V$jsMOL-{h+#wzH-H*_7CpL;JM{}KPWtsAUlrjZJlvk9@)WN0!RhXbv5VXEvv%tH=HnL0Wm}$+gKRh zDfN+y;l{!fW;x#?w7;kRtEuj@O$#TzJ>jUglWgO}{{(aXy1udzE2SEGevh6A-jrWz z7%;dI5*NM(_bMV}Sb9kZs~UBjQSJ-~16?#s-BRc^|H#`@y%X!I5Dp##%>}(Yn;=O6bM~ zt8(<7gl;oAIcfI<69eguz^kqt@Bb>`VogkQV9dwvAhCLMH#;mI>oM{RodtZusk0?hCM z)rV|(h+KM=wD}Y?Q$7W}?pYdCfa5X@g3XuzSL6 z+zT?9^G$Cj8D8XwO~C8}O1Lpz>$7%3Io7@SuD5t)!}z`ae7H24hSD0T~C&t`2i05KSRy`4=3tfBMrl|D9n=!8@g!=X4Gjo0=~* z-Rq9AC|Tz~H1kgAURAExa;V1(nPW^ikGs~3PyP9?znEQz+p|Tn^?f;8B!5AfUp>wx z`g={}L)`UJ=;Da>G4jT5Gx^!)T_;MckVSIR(W`1I3WG1n|(Vd zq^u!8POD7go@6HMDLOUWt$R&}$_?vrWc<+oEeZ7AFh}r%_6#gDPGgR?_1N43UZA;SKnE$$5(fn`qDw) zt_r**0%!;>P4H~XS(RXG<8*an)QUWgDEe7~PLQD+=!8gZ+jO^r>x2NoPXIa7skaP0 zuZ`~((8z8$RrKm$RSiF(!=ZE2fCfO$Nul6D=Xti>H%Da5@r4W>-wZ6(Vyck9xv8Bj zU4N|S^M7P8AQ8RjV0+QK+Z{-L&pBGpwGmMk^(`^RG27DWR-3NQnmNa3$&Q`bVkT!3 zFFaX4^oXLLCzX|KT=JJ^JCMlIK5?)?nYAz>AS(l&mT0b+1Qkq@5b{9aZ1e!K&83?C zHv7pugFdteAFUmo&$fF9Y$gZMP5#CWNyNv+7ucE}y#bVuzYzf6&UEj?n5}OaX)vAU z*@X+N{4PI>V3Cl)|I@nITASK!%W1oge)=J#ha}XI9$tDF*5Oq|c z%A|+x`fc#B+diZ`C1{a<`0s*cXnRtxZnTmS8SXboi>3$a{9UsQ+t#0?RFvZw(e@)FDM zjE=m~>*00(nSS)gGh|GAAR>_zHQxU#YKO+x>Svy30#re`sgnCwH^-WC=;(%1hgV$( z$))3<@wk3Qn=Nd~gF#Fr6P0eRLoa(ryy+uD5cB}q3hvnPNpg*&ahA)sxP!81*bdHg z+rSzb8^zxviaVO^Q7d1dmBS9bR?_V{Q?hd8Sa zmY_~^Bh=Ez==V*VXO>REyMDUwh<$~I0a0pCT20)UDhmaUqJ+8SAOF{9Jb!4id-R(5 z?#IaEBU%JB`a#&b;@@`dNycUq@8v5cTkag$pF5KF9)7e5ah|_D)VZ%5yUQ!9 z40?sE_ey>wC;KPP?JqLDmJ-$Mr9d?-MGvQ`qoeco%-vfYyYJ-Km?I2}#5UT^e#SJM zZgLQL4H7vv>%;}`VtdT7>^ux8BIQh87>QI>_sU5?lqUqV1^a_L*ar0Yc;#F}$KY7Mt4EACh7AIr)19EAzeabl)&&j0b)OORmXKSc;x>2Q=ml|Z%_tp5-QT(@MH4k zz$x3^j5~Uts3xQ-)p#~p@iNnczL$VBAsC0 zFDc)*kCkfTFr*&Cfy;yCVup>*Lag!W>8X95J&}bv!X(S|585%9R;fGYzwlnh&w&wB z`)KJ^EoQBq)+bFVNB-uBUQ8hv0*33Mxw8SC7uj})cvVCDXF|Fw)N!AIid*R42eFm< z{nE$A6aZ<=6h2Zin2PDY_5u6-Vv%>NUtn5rWYzKkS))^h9&W@3J?1rIVWNL^&p4*w z=8s_P{J_LM=Xw30gFE-tQhFSVvdF!`|z~WjIBv(1qenyjt8S&|6ia z+~H&vEwbwBBKWCUXh&`Wp&`4b?ZuL1VFS;

Vff14tlb8&P0@YoXOA2O}r?WPTzlj0OPaUEy3MR@7f zF77l+e!Z9d=5o$y}m(w}61%MeA~H=DgrF?A9N_ zI|A_YUXXSrOQ(_we|F|c5zZrK1lWefp!f;$&^((PyVk$)x>w3;&&8`?;gkSI@g1mO zD17bs_(f&S)yTL*wW;J8Gb!q!eJ&nDWi;1=dd0PxjtK zM`!h;AmWfy-2@KhZm=$}@pxkKI!*KwOb9R6cih8Lsvc^ynl8(kaqWC^>_@MKL*FP5PM#Mhnk|ooA1?waX3dE}CGlkU z7;QGpmFh|fxUZkX3l4>2_e3Ohh(Ti#JB(WJeef%D?9@V-_{k~scqNn zw~?{)-Ibf*LT6xWXL<8a?j5c(t$fo{^c)wAVil=ilJyxM%97VUJBQjhYz2$SZW}jx zm}jE$iQS-e(E^iT!GG@Rt?R?f9HrnLjg!ss{+r$F&)2-eQ37oNk)5@67!GM1U;xV2DH$W_lA~3nxk{346&l ze4{UB&?0SL9NDiIR?_Q0UIfs86<*u4?i6t2gTd2vY&$+J_}}TYWD65zeG$z<=Gv-D z(n<;ue@Pt#>Wf!~I^UUhM2mdBs8*5qd0xB<0J8$oOo>zh$uYbTIk%WOpHrWE_6#6l^mF-+o$TOf%D_TUongD@-7~J7v zw3mq)X&2nke`?y%7CXcQoMyOmU7whkxSS)J6IcEjY)VVa{ZK8)v}LzJ#7mTsBz_5N zI|C4BBF7C(dqnpAiI(~dKL6J&Yc}`UQL56S`+829#+7UF05QY=Mx6Bq>7IMJ<@c*n z4Rx-L<0nEMQ#g8LoADo0v1gVI7h0$57@M^*ZvfdOJ|7tSe+NI}P*P|9YjT{8z=0ox z8xDvPY@beAFvt0NnME9=j&ieK&te@Dgr{h+`*9^W3|I-B!-{IOs~{4G`vFA##ks4W zuiOa*82gi^A)6MyV_EgE zapaNCC3uW4%ZDD(+K^-0_7T@3)zHC{+R8roVRX?>ZSr1n9KCwL+}xaeQ=Gb8P(nil z1WHu1-{xsHt(#?-Gq?z67f-e>!)%9v;M`iWZNdQc9m8;-;`Z;n8Tw*mYY&bkD#B)X ztf9%?haDj#ErwriE!4ccei3^$EZ2!&DJ~-zg~->-hjS?D;0R7(W(7+N4Rzqqb*JG9 z@Wf)I9&0`eV4nyW^1t@>{hhr{)Q=a8P&@k_WN`D7F2~4Lvv!i`-*vH1j&|)^gAXIu zV?INA<$%|c416;JEqHup?US27;n5-?spM11iQ9&-m%f9ELqpM+^BzxjXxHq>&HZ?4U?r8{L4#(%gAc{0K?r0& zaPH7|oyUD^!M4H=baf-o)t^nlz*_#L5RP?J;8>95E1LK#?#$cq2~h~=lZ5_B6ilB8 z?C7;|jbv_gza$hMOd@HmzX-jx`^Jz=L=+8Gd2>zJ+$~g&{AoJpWhhF^!hFwc_lw z;X*@WJAeWl*!Q%jsT{zE>sWRx$#4Rg<^806zFvi85;Qar!2xf7C4$E6l`G9Oxi%lJ zsQm&#=mdaof>{9kk-Ea~>Dy_?*7zBXPa6RZF!JF!HJh}iA9}SbKAmID$+LK$w8;yv z(!k}gau}Zpp@NFwi9yOrT~9snA@{(W=8MfOe3)Tg(Dl4H=^-xaXsosa(mq08L7`ki z6(nBaK_%uBQm@8y7jEBw+k6Gna)I3P^cM_)a%v%+*H}$km99+=?Y-DQ4Ipa+!{?uj zXAT`YWF0d#Gqb(H@4yj)7?LFP0nCs-?tT&A%b|1dSe|J$Q81Y}h1%VmVh}E(n<$AY zgU3jxzQskazh;-HS=RXZ%*T*F+yx+X9b*!){8#BH=B^+?9Owea=62ogS#}gnvk61} z?ZYrdF8tB#Ly9fl ztvk3CozNj#_A7%r3U1$7m;pdt6AmMf^GC%Zi{{zo0&;AWQV)zAZ$&HJcxT^8@s)3r zm@&XOk_4r11@EOL=usVlcWRz?CenF!w+zWtr$`Kqz~4l_^DR{7%QsL!@JO;{LUL!)?aV-70Oxv$Q@ zH7>QFMLQ?Xq%IPmoDf-}pRuyG_@%Q%jnD(=G4_~uaZn~&H5lolmy&FKiZ^=wZE3ND#uFEOd~ZPXRoXQQ+n z4HvC4@^=o5=a=B6$n#o$cb@B%uVa8s_bYwrE0=I_z|0#A|0C!tuL5=BdTBlHMK+qClzdW0~d*;r>|8 z^97}%_m4|@W3fAjJzcGX-LbgQV%#dOBdIyC(vM^hI!a#{8-S(%BnDb1h@eVF((Y(B ze0(}tKIET4MP?q5X7-Nuq@^jDD(Zoz5-$g&D!3g)UA-rcK2ov^0{se!*aD`VBd{BNM zgM`8bq&=@DX!%>=pD^YVXyr)Q93J{1RK6dbd6~4w^Ln%l>lXzGQy34Y7>#|})1%&6 z3q2=h3cI>z@cK>tOip*gfCj8G`f_&3L*kjS0SgqU(K5}4PM#Ley4W0owvJNr?)Jz= zZMXoSKuLfU-XKlq$P{*3z6hInMC-Dnia#2h?ZRm!b`jW3 zDTa-<$f}+>04bOZth)=o>>Tqk=NjqEIQl+WVHmf{5L$VXWRB6ry2`C8ZVzsuvlm-Z zG+=Nc8Chdu%3I7&mM71M4YnojmB2aAw$|;2>V4qM5UM<;cIxtkV+Wtnb+`E0C`1F8 zz`m43CH=;iw@DWsIvaB1Mh~E#$%RQj1p-9?@*z$y5CBerf`8uqz!H`HT?5C1WE)4g zL1>h|qX}pqSr`y2i@z)X2?LQ}Vgns;)&@4*_V}1faJYzUgHSEPmd>;a!jbSL7Iy?( zg-m#pY*9{l$0fgOtH4SK!=y9`#s3MI`~qeRz2l=6>3`XTOjGxQBqg z@O7bYl#_CMy&0k;)qu z(`dD8)b_1@8BF5^bDKbik}Tb|5PocPrt5p=Gi~q@Ba0$b;2vD>BIPMqQh~?w3CgoP zAb4_J%=Ut)`U;!yIOwsxY)Uik7(2LmYT&AP!b7s_#rsZpT_vlm3P-J`X}eNrT8Bs& z3|>fLV08we&p?w{`S`S;3Wo3OW)K3&42j#d1+OVFjU} zhtQCfqS*mdpVXVXa5j01BiYiU=XbG9-oJuISmx^J2K+F4M-$hS8-Ja?In&e09gjb= z$EM{L=mzJjYGb-W3UyY=u>F5@eRn+8`}_Zm5F$mS5M?B3s6-(nStS*UNJH5xg+lH$ zX^B!sNQA5kA!Q^?rjU@vGEl_r>c6w^1ilIlhSg&*)34wIvE)ygdz+yM#4 za~xaIE!^vU0IH!)5PW}YSLF%JgfGC?eII#(jK#dnp_-51G{@-t`DcIIv=?|xJm2Id>31>JaeAp z9hvpu%lF{15Xbj(tjGU&g+NFq7Y6NERG3L(E0(@IGx_^8*Ovja6Ko4!o$GJyTA?@I z=b%@HVdnyF&3vmZ^EU>q*XkM3Ja_?s^c1< zgtqrR<_mx%e{^}^4~Ooh7uW1YXkP$ot;R5&x+PG$(aqkuu!rWt`bVqhE3O*_K;in_-2>6>oH0^HiNTYhK!5&N8mC}$^6GMvZNEeb1ziO+j!B?&Cjz zWnOn?4HgI-Lvf&8xp+J?xwns$s8$PjrwEzMlv#VZ2uG-C=PQ#@F!Uu`_s6!qqHiBD zptKwDj)38U zaaMHMqf+exp+EJH_JEvDjppcg^hlMO4ST%qpGQLQG4TE;!CZZ2hn9$_W z;D{dc*NIInK1O_y1EFa(M`kviA-ZMWcnlPOD&r~g( z^KIU(>`-6tD9N5^bWexJq5Bf32>gsgP)rWt@}9JRTK^%Vu^C6vtzr5jy$HShA_bcR zTAUx(8jiQGLxSjVflbhS2k}aFT~YX=P4I4kanyqUatBNKe)p}bSRe_2j{dZef`u^{ zCp>1_^2*r7IL3Wp6qE~=`yPl8lBwzA5u96uc;G+i7g-(M+a}N(f%RB2h$=<)Ue^p7 z?r{e})_M&;6?P#SrFiq%@oki}7iKwDuwn?$GjdO&8GHngI>BUB4Fx2I;Ts>d{g8|^ z71siaBEab_ z&$AhJ@P3^HUhAY3xnuc+{pz^dmF|rV$DZwp*mpHum({lV_-mIAbnp#@0~YAcvf77g zWxeN`{7fEgK!6nbgB zP<_kg@Hr)fk<1ahy&?$l1sH1Kv+j&ne|i4ADQ_W@xQcIpx<3GAlVa+GF=Xkv_|87n z2(q_M%K=KYa7^`I(8xMwB8ujvZ(@%sGUggx1%BLvBTPf$#ym-w>8cgHl~9s=Y;Q z9~bZ?j@$xS2?;_NU@Z^y)8KFDhJ}mMG~HQ{-2!`?bizX(5NaaI_$1**Kb`&R8{n{#yC#$v_`e z+Bp(4lyQ!zd1VZ$>zd-8Sx=tqk=Q|b1mvi)bwlW|;0;ECZHDdh$`~ z2apCAZ|Clzdj*gh6lIHT_xrWUA!c(edWmGy zlrYcv;fCBl0t8R~!j5f?j|@?pk5*`U(9WcyYXrb!hRTTzTnP@I47An-zU1~qZb-i0 z8kJc{+Qgu^1pJASN8`&01`0QISFa|p2XAuVO9xVe(R&W+RKe~Ve!f>-p7>9Aofn0# zRSX~Yr=JhM<#4=Ds!S$nykKKxsqUU=9Jt)tw~zfVIgW2-tDBAedF6zRGI5`^@>9Jq zxHHVB-)FlK^X+pzo3g(xdN_b-QZx152mKhpPu=@;NhQCy_%p|h`(bl*+USkO7A?qp zp?;YGy@`GC<}5VvN$lmh9A*aYxmm3|l_x;h!O#@+(qWlkKuA$|zdQVMgmg~NUIe-+ z`e3JA%QFjOp7r=+sH`E_nRJ@?39b#edrI{+ba|5O66_KKE|5&2(+VKY3S!x_=caQT zq#U#I*RTF62qF!@Mm22l&{#br9XXf7yjL5~%p(3CdCQcb#_M4q$>mymxWyboF4X*M zKzc!wys3~$8a{HH3lB7r8hDqc%cx$qAAAc?gf)9bf5fsa)4kZ{mi*S)!p&J1JJ{r` z?k>ge=4=6C*5EN4?!x3d=59ZXh*n2h)K0ydCKjl4zQ=<~;H1Kr#2}$J%%S^kANCFw zG$G)M&1*qZysq^t-Qf_E#YuSYn)^m~3d0!}87$h_1S#7i8^y)YsX;P3Y1d=;6@yTq zHe6pT0&srG{(`2Hg`6$&mVS*R*_>+W&J`a$eYl?=UWvwVH}-v!9*Nf1G+Z|Yv6<+1 zChnM2f90%2Z#ez?R*vQi0TzUa=N%>kizz$&0>T_FeOdexL$}4UY3U6vCX$n z2MP+TRAUDj0;}kGXxzX(nW^%JDCPM8Yt9A7?YDP9xX8TVh4T&V#Trx*5Xf8*TAiyctH1cl8c~<9SMm~WLdb{2Ehqb8;jGw( z)R8ULA4+EVa-%Bkr>qV7Y0zYq;a15W4!A4iC%P4G5#qiv&Ok$>DzYLb1ByC+)l|4(;0eeG4T%3YVaI?{+RzuhaR8HVLb94FS0@vaIIAm+;{3 zYnQT}x9};9AdROaJ(Q<-fJPuA1!m0X3~P~B&fEVfEUYDdBo4j+Re-`8Bcd$xVtKb? zUZfkgTxPR;VmNmcA_#8^tqv9GKdWLY#TOEgWV2)Ge$->q3`fm;V(xHj3uVe6eAaOA zzK60XGF46@9)vsr5De@Qu-s2bAExH(S5{_P7-MLslaDVbLB)|QS#lG@xC`Rgr`=18 zp3zAW4%Yp<-h zhXNwoNcN@lh!y@O9Dp}VQ+jE7(S9Exc3lI5Gz6P4I)U*tF zu@8Q0(WGogmowgZnTqnQ<_zfQJ$Csb2pzlr%fV!gLe^JpxNrjsg+zH`s?#Y)#X}SeySB0CGwWZqW4mJbIJ*YjpM79>3(cz z>hHFD=m%zvM6Z+Cl>SvM#X|{i1=cIORjLhcSBm6~oJ_PIi3icdRwGA z8S}^U-`rrY8;-rrhBKak!-|y)>B}Gv4p=1_EimpV_hns{L`qqV{!1rBCg~qHh<~i- zt_(65C@-}Z;MSMUkJYYzI2Y<2oPq;c66b!5jszMQWbo+El||m}38IiaCo|(;v*UW= zjQ{!Opb*!CFGp}=^bGGul!%M(oRKkOr?VobU6%l&vsz&F8`U5wvi&%OU5w0p0QZ7Ti zt*2)!GsqNr_no5*cl z9ZgybI+FzDtXQI@n=@6 zp_1m6t=3XNrrg(FUUV3LK8ogDM!~9c{T&S7;=m6Q2Vii5lR*@_Ti=08qU)XMIywXI zAYe2OKGBI%YCcc*WLnGYg*G|FuxjJX%gNavA;nw-FEK>)dH`9nzm3lLk;+!=F=X)6 z0HFlap{T$GSlccQtql6DD)deGt&gGQFaT23+5{Cde@QFCqA57Tc$ji;x@<>)abM#rj&k6XuX;irfdB#N#I8 z@rftXX8-xz&D#zf!`UL|R@V%_{o)_Fd^LH1;;%yFn<9ot#jN@mIMhmo_r(Tjs(pDp zhXHVjQB~mQLq!de9rnlVrd|((vrtU&?gFkbQ=J;3w3b1-4Irs|a5r6;Z@%%X%Ea!W z0KN$wnHhLV=%JxMbeNfia}fkwE%D=U(x4iuYy_BILa?>ZZ`&W$^vp;{I_u$@x0o@fuPg|tM#k6GQd+w z95m#?qz0aI#z-t$RLlj7OUg`+&BbuoDP;WqMqnyZH9_Q{!{{`G9xJBhWC3%N}gam2O2hn#A{UyCuq~HY0@&c4j zm&IzH?x6_2qo?0GT9A<&aTC{Z830S zQ!Ivm#iHq_$r**bA++j-b&EZ=x+_a|4aKdi5)O34b%16w9w1>u_RtsA>SngyV^XU* zlLgR&0pcUCMN4DBn&$yu#SsfTo;{;?@LQ7YvBFf{;e2{6NKXJ3A7Bph*2)0_`MxQ8 z*RukRUrt%-T!)&ZV)%GSs}Icu^yb`vj;`RGegTHdTHpv1!NStCq}lEn_r7q`NyS$^ z8*M~?SL5cd=Y~cs6*Gr|*6}Nz#XC{#0d&OPzp={`u^^P>8LVP`&7d7#vP{WvWX0HS z-RY0qT5kaEEX{NUfzgQ=CHEyLfvH&LQ?aLHPp1C-tt)yx#pdGAn6B(5K|L{GpWTp~ z&p zYwcE7@=Qk`8nIX4T#;6(yFZm)esB+D_#YOsEZcg+Soz((p8EZ~J0Xpwa!0m4uQvxy zK)5*rYDNqGe2KZZXj&Ria_am#`b91|9&aDa(v)%zt@V?K{V9WQZ_`zXC!>RZ0dNzw^z&Wc^%atJ8EfsYB7pi-5h zz#gb`tI|!gf)()&r~yiYoLimzQ-D9JoKqU-9@t2W%G)z_oG<5`cr>KI@}z zX9?GR!)09)5PS&jk5-lhERjZk)Ts&1fAlRy@+Mi{);z%q_EO9s2md!6$o zaD4%0~=LDY^aYSe$ zE^WE9v_*bJ)h0wSQ!9vU0)71bynY~|FZAgsOcW^*&ZcIDcCA1|QA|HN?jA;;T>SBc zokh*hM-Rqe_E-Pjt4*!Gb6eQ?T?y?zW+RU=JdLT$0v$pFHA#qplc6Q>l6|;fBKPwE zi6PFgC>Q2<;7=2mdHVcmL_TV!Pip`_L( zS}og})Y5^uZRXieZpuxbb>MQ;@sPtNq&q#dQJf+qbp2z|o=p7TM|h>!Y}BiYmc`xx z`bP5CUjbFG8z=*efvVaO_Vl9p!;j%Ba8llH*`(7()-A9Lq$rlCjLT|~ujKSQpNm@o zeQl~$jGcrtn!1U4cQ4`IML#-KQ1HUXD`ho3H8zUR(YI4}dj*=2EcH9}P7Rk|0Qv@A ztZ`0UZSJ;v-t7z4>MaNGF^xG3V z4+fBRcN8GPSca#ey^mhp9~H2;<{lilv7_R#OHL?wEcoVG;wviWmbUtG-r_1a@*{fH zK+Pt>xyFphiuUsiUnYYp$@6&>GBz%zo+j=&XAr@~&`Q$n3&`VkDKvp!Y@*R>9xa2UQK!uMNq4bPEW%8x z&B#XAUD+?r)w8Gf^Qn{TdS~?yem~49-U7CnL-E_ny}U(8ow6zvS?kh}VD;;dSQFy` zF)8I7W+{z^(!&;Q%oRHERTvdew)f)YX?1Tk+6A8aPVo=iIz{p9!iEWwyC2F-92fJk zeHxt&42JoFFR3F%HaauCyop){yuBJBJ?2+RmWBHOGr)tT&(UcKjVoFk_V1dUS`GXP zR0$YyZ2oid@~qc%97!II9&qOy(P|C;T41~X{zm=d3)|LB8Q1XIDubE=Af|BO zI)*0F2FTgXUWry5$0W#ERuOD8Oxc>nrH9OQc7j*x=rt}IG=YQ$Kbd*j6w z_4q)|m}l4K0oRMU(=ZpRNIrP*06a)W*rNAb8@ZFJh0{#*G)!w-TrDs&xIA<9=Zom= z=s2>C^R`4=`Z3^PM>cKsKSwWKtn`Z;>R)R7?roZyk-{b_6<8NKlpU15)oTUpFovk6{!3_ArT9Q$-1V%_5^7Z0gI zmbbge{DS$gsM`LYR-t+M`##d1w=550(Aedr!yCWiXOenWf3mPs!%a7f3JNRERAoq1 zPab-vhU-%E+KX)z1AA12=Xh$;Iny-uc6qNc-Y7_(*bpjMr$Yv>wK6Znj>z1;s~lzU z#vBq3c;JEj8btEzFTS+$T6gH(x}T%XKe2pDl*l4@dtNyyQSuRNwLo#i5p+kdF!9tV zH|S^ApeO+Z5-)6?H&pDTM4CI6n-HRxbhD2z*G*GK7>X~Tim3L73yM!xA4Qkpq~rln z0;N~#=}~wFxRs5!ZS;Y$kQ$ptR)BxFzTrnG#b`=dh`NpMW*7q{mBqi)K}#ZZ?^+#D zWO)Ycxlx+BS|j`lL(9MT@(ZS0Ao5cJjPYB*7Kmawk0uL_7GOd9sl^}%Du%D?5Q$MH zFB5tV)*2W0rFDfF_BZ-i}$bG^%W=nOuS;T_`4)l^iGr7qZ6DvP4nEV37Sp;EkSo} zNkR@CO%yzM5L1zC9B%>aB&--z;~}gC{dx>Y8BD>vML;oFDqZw?BA)Utf?OBYe{#lLr1^+f8 zAp%w0EhaUoZj=1GFuVsvDpR&*J4^%j+>V!lhWDvSmpE$tur75r^k=&_!yDgnOL29j++7 zIU|Si4}6sY8@dA>IXUHkU87KN!8IJxNlXy5gkTCz-MMxQbPW_hpPLI~z_L7j>wIsM z#zk?iq$kRFr{4`rC~vTU?p+br=41-E0?!7`$Lp?4h#J^pk&z7??>MHGuKN(O2!Ma| z_RiI9^h%W4mLQ(1ze*w<3cbk=3VOprU7q=OG8K!!BQbVOQC6$p zAZkSU&6)eLuM>}p5wB-CgFjNjM*8n+@n@-d6Z9~3A~#C52A=k6r+`>v3ra=81NQ}$ zL3U9{!S4Dg0fWXddV$R?7@;&#t)T>;{Hvd5^>&)Wt+Thh3HQybd1|MBd&);s zbq98A*|H_5z&8JNw|jHup*P;G57?h;t@v}ykdQi2-deu*+%MIw3UjaG=~=6yVS}0B zxqiR0XcN5aq*O&tkIj`S^y&P*0fyfWEe9dN)OivEP_lpGnwtA7{}Us%ODQsY(8T!Y zp$;8C)=5(9_H}u#ge-q5WIlh~{?>VA3*;qtB;8n}*e6|lF&{TNt8DejAIoopcBQ;x zfYlHwaYP-KL(3e&rl~R$gBrM0ybf9(cKtogF_;VzKIKG-oA@K3zlz`Z;^oVBb{Ti? zfQne+(xhDCb?hCUm_#LKabRZ;357w}Q+zX+@1-a9LgeF_Z}ox^J5acdr?ZD9Y}AbT zx3~_DX5-;$PtB-~bIf1i)x8!%!BUoJ4$Nd2IDS?9e_a>B+f16lSdIc{uDp3RzBUe> zxF)uYHhN{IC`VP3x#F_o?x1#82ZYu`FZ&+G?x$7Rqij0`j(*k}Xhx%tTOmP+>571S z$_pvEc{Y}{cc4s9ekwBtt>3?DgbZqgv0hUU1GkDS-7K9wWcY#qhKz|w*vJ^j1i#yz zF+Szl6>6$IF~j5BCg0v-e}Nb918LU**4%=-wa9#Jm5rjkMFn&=2&leb)4g=|Z^vBH zCl8HU%!w=J&6yDk!HyiCJj)0Ql3%mY>`x1-bpJX<@kcv0Pa4<%@!uACZ%HMdX(nN` zZUHo3V3C(7&li-NcH)KXJDWSoQ4RvMTneZCd4LK9B*p2Rnp7vLykT${839)zlLFs* z4Ea3T);9_ukpcB8%6CG^-K5-nUM%ZW;hx8#q8RGD@gJ_il{3Kw_y3}Ak($Wf$3|_vVTwY0fq`RvR(HmiGola=k9Ab&q&dMvITvUncf70>R+_yHJ(1f(ipfNCwHg7^HIqu5{RqlY8FL~U=}GsjQer$LCHXc|f8G2up7pl!Y z6H&1z8lV8ZH0Wv=*UmNU$=q>k3#6IjDhI%8Q?1{Mjo)|tQGb8@;IXFi!PI>haLN#d zh=t!Y%i>I&JbilUP;{nN;+wOt#%u5d$z^|K`>7wfF+DA0VU3LpCR2<)ju~&{PUwZCv7qT4$})bhUH-fqm50CPWK52C zSBoy)Njf5MDS%gkcRR0Fh@GdhE|@QezpZB@HwcnZ^A4-NE#{KqMy+^M5 z!qcu!M-+~gbAB%z9eSsF%DX_g?iA|cVSa)uD%9P$lSmf$_E6o!&x$1l#Z+*~j z|NT(4N$!NV@XpQjm|h&wRSg}Ye7ZM4#>ISH?XJ8md<6e45Tr@S8WV;ta{H3-kglh{ zt^>%~z!wN@!F!-^^tNy76mT;MF+t0>!%L^`sMvl>hyg+PK7<=3vwueQRAm-@qfFfT zNGUYJxg_<~!{-UwJ*lL@0{RN3%yFgDT@K^qA9enle~O^VkVb+qFjCUmGUqowUoCZsMhFqiMG);qCd&^NM)4qiLbdm3m*gY;4_gG3h{_ zRQ~hF;3|?a1SY~BLB?ZzF#U!aNfZQqDZ;+~)%Wd{pPD&)u{?Di`M5_O$|z5XMCHJH z4&^8~e~PPv(a>cqWkgM7H1_AD*!pQqAbuVoljl1Y-Zo5@_K>%_VbhI1BokpOear4j z{GZ29_r^!QD(+(?SNtl|)jI(HIAqBCAvG=-wjRqM%{+-erx!h*zdOyV0IvYWKj-Ja zDzgM|Wv%EGhpFeqFnT@w3=$ZaTgc_hN!jH)Z-Kod@ITwC`KT1qtKelMY6m(){;O}t@hBFc09AOW?IIS1N3l1P(w`>N zERFh9Nc{%=kvxAMl4^%Ci<;umAN<&rNzpkXO}t~hlC(r@ti4)DP%VtT~N#OcNf84fA=5E zdzWH(5~7KK=$1GsfnLd;R|ilJV3{#QI#{<5qEOl@VZV{$RiUeWfbK2JgMA!Lp!iJS zqL#&HduBns6SG0Vv>j!qBT_CaJcGbwKStAiJp8AeHAHoP-{5cGC3KfXeQY%92|3? zs3k2NUI*9_*FboVJMTrn{EJ}-&IUv)IDbnpP9$V0(b9ESMO$P-^+(gt<3{3Vvk#o^ znJ%0ZXjr1i}?}?CH*&MlQs>;_nw# zcGHj-F!=ew`A4i}pzP9)MlkmKKF&KSb9c^O% zGcF(froMe*(fa>P%OXpI5n-D0avsZ2-o)pJ>Dn3xr;aq8#1w^g#a*6GHn5;|Flp%lf z+5q?kI^ZgXYhiBvTR3A+Vbnf`{6BYWVAEce;*nJDj)(|+F%nQhnhPnjJbIT-@dbEl z_A@Rr1K({J+=)^Ic*2V3I=XHPV^f%raL2~s5yD8gd9)jSR^ZvQ6ok_TzsE~YC9~pG z?y*9=poR$5JbYUAZ36rL&!C7MSWL0u5HAyp{$Uer8Cl-M<_|E-0ym_c#?|f;98X>7 zg_M!37%9YfbD%?vO!h!}+C5K0J^B^i!P_V?=%}y1cHORL}{*}={&z{1<`=DaPJjUhR!xu@00 z>=H}}vJf=G;o$&deiOZ(|GzIlt9y1@fvw&bQ>hLgP#+-N6-{F@?TW>2;XGpu$( zQIq>27vTWGj&8j3410>+w7OxZSQ%mrD)8bav2VrI(A9$wD1JdfBH}Z%l%q!8z`ssu zP6*eWuxNfQj6E^1CV>B#izpribZOVISzrbej*S=K=l0ENo<#+4JjRYifB`dZJ4`kl z)dhxKFx_}V&98jud#;jZZD{CW0q6aOlrw~m1N-~3e}nfH{SoPD_fW}LEU5T#&Q z*+YD<+*lL^n>eYF@b)C%DYHhC{?6?G^zz)8))jJb`~5icYQpmXd0;|J9Mfu8W6PtnQ9YMK_+WEK+GC-0K@?gBSOzC!meKwV z>C)<6Gti0-my&hpaz%t8)BT8QXt~X+*mn?HQ=pKdsG;vOd)+_Bx?r=v#yXXt2yfg4 zeYi6yrUJ!wGnuUDR3gVNf^%eq>luvx<`}$M_N~6W_L%*ZT558hMuOc)Z{7{Q)u;V?t8ai?MxAu3i)5vJdfnrM)CZQOL6IQhb;M9@&7;S_O0J!2n7}1YL0;I(q z!O~(T%C~5K#EGLvtB}TVPUu9W>Vey4qR9219#jI7=%lcj#qw^b>V;0} z`%%aYaLt!YV&BL@k>9w8*^v1Bp;q`wJw>z7FP1fBL5Ulb=3r?gf5#dFJV10W zEuw`0Kl${ygG?>FA-|&?ci-}|>_CY*Hus=!C*=})1(v8n&Cv3A_LZaqCTrM&& z$}-lUw`|nmS@S=zI)W}#Vtq+3aGeEJHaz)k_|d6thU6wGbn`c zDqlyb!MHtTQMwq=MCbbxwnK<`{4EZT*PImuaQ_0Ef;asJ4n3*!M%qTvheT)m$$6;3}mEykl^$}b_5M7;3u`*CGJaN37cSxB6rSecb=+lrWwZ<hd$+R6hgv?~bX1O}z+R~3Ta zeZ33%0!nIjd}xss=N1As>is`g&251|^FGLEb@cddas4<2WcYk_PZt@(mK3kRq>3Iv z?<-rPE{{VhIyd!(JI= zKjj1y!*!x>jPp@8Ge-dwijODQxGygEfP1{D1lZnoC!Bl5&(EIkJ$@QseSYwKys*G33;C1W;g zxVX>f#gOKQwtC3%E^~d;#BZG7x$apxa~}F?;t+NMz36)h^ba{(bt7cgW^Up^ow$k) zS46$;N{IK%fQ!|MQhj?C{`KDdMmI8SHPgXQ((VNce1Himj{QpK5z~w3dH`MSf%Xw8 zTGxkF4IJ^Z)2j_u^AZ2rMB`7Is2*>jaNmQ9=u(il((2r7c!uCmI}XL|)LFA!Cl;9c z*I~mCEGSa|3bZ@+nW9q2NwN%)^x*Hh-_nCZz`(3cfZ;ej@TQJpQpZE7Pt6mYsK4tm z`mZWtb7Bn7ye8lSZCz7H-pkx|A$cAg$bCJ*3(i? zV1f4Pm&$D(U_B4cA<45L70%|uh^#pO!q)i+gvl)eL^=c+5xHtSPp`b%0MRO~pQdeO zL<*mNHe;G)9c)_kdMWhHxu0J$%ss*kgzJA1zJGKOm)bhM!hUeop(DLX&P{8~npO6; zaKZ^}-Dt>htwgoY8bo%+(y}9nu>(v`9$aRA=2FzJB?cO%-Ixoh5`4(6LUt0^lBX#3 zSyImLznjI#ZM~DK^y81Y_WEt?iF{Il%YFXGjKQkhGl~Z|9ihY!Qp{PgqCw&C#^F&! za+D&{@F@UOzy)HsCD#qAAXnu8Quz;b=XD^)%q~c$4YDWT2W^O;+zEh76fr73>$7=I zv&$9by~#qbAfVV%&^Amdg_KIUpW_?R(2}eM;r_x;tl&6a|8o;s3>oSt`D$#(tT+J> z#Z1HGN&mnT$A&o(5N8}`fW0gYenR3M!!d?dLW-Eei zxF4Mf3>CwB2lZa=}so? zZ;m75ZQOlS7aUI70*kl&7HDloi3+}VV3?fZ-c0n5bUk%N{Wy6@u@TLWl1tM`giawp zjSK)!feK^NhS%yKxyBHHi8vg`mWTLTo}qm3uIvc#X`uiWKj z6A1(kE56hR6FwMzAy|}P z!ExjdIM2TA1hD`6U9jX~N6vBv1ChEsCrZZC<Z)%VqAjpm#+if~2+xxqt;dxhPU8eNO*qpg8T8-S z&^$9IWZK*c28RE#wCxv$qBi|CtF`Dqd@DcRzK7vpAg?(LatKJKV%g=AB$lSU461j! z4z&>}=m|N7f|tE^_k_&))5rz}H41p+3L!7x_VT9%ZaRAca}imPDXkn3G#M4}W0isp z;}StMBtUJP;4(lel%qThYYGxz?syvQce?0=g{%sg8(IfTTTLWbmDyx!D8P#J0NCLd zO3C{KKZ%Js4;3UvZMEl$GA?D4cANdFs^@kxYcc?cJV?Q5aWg=0q{0BGS`E`bQ^@I4 zpn4Uji5i5rjN{CCge05RV{AuHA6wjQ-(^cOAc4cyX15Uzxy)+)au(UzQmtde#2kmP>NIcSNw(zc<9W%Xj`9sZd3( z1!Adf(O}S-fvmb3CKdDmHocSIO?Z;i;-*YKC@{&(vUDm8OFN=e$=qXydwl(zH`~T- zbhjssy`vR62ptl16LL8cUv7X~9K3xNV#z?Gcwn!B$k|YlGUkX2j4>o!_y|}49czY} zlu`EjY*T42ux~S%K=cIgmSXnIdUqbKUFwijcGWy6-0PCGtKLZoeupBQeC($DHd^Yr zKZdAI9L_9%7)BsU#3>_j3|fA7^z&UllorEksahXa!zz1Wft1^9M{vv*5~iddKz_rw z$Vot?@$-@I3120p1bH9}8Eho_(!mdYh0vgVz8gO9=iIn@zz>Ct-1Y54AjxW+N-@i8 z038K!^6;0Aq%S&As2CT1$A4sI|44}g?iNxjj80C5lFc^+7bsi8$aw$-nG#Q#WxfZW z6mEedfvgg^)Xnc7Tm}*gYEk>KjM|Gej(GKe%Mma^PLiTz>HC!%3ys<%zQaLRjFt$} zLapl=CBir+Ogc@6rjFm_|7L%tpGRh|q`N#T>SPqv${RhD1yAXS&P@s3V{oYi@jTUSSC zd=|&$^BlpJ@XCfb`aVO;xAfjZ^^TQ)$zOYyeuT`D}4hx4FTL zgi-?>iS7ja1sP^guRRLwqHl|quOO|Y!RTPR=D~SftH->6{G&gAghxsdu9UkBOsZjU zAaKRJSF94Sa00XqffvglsN^H*X0b4qv8ZX`G)sHMa6pvGVC<@+w7}g)2s{nV@DYJEh#lY29%s#zfp4G_B+N@NL-OM`?I9WjkJrh7Pr3 zj*4EGdF0?cmK2_yXVZ;aF&Z&n-l<|ge%DEU7r9@VhoKsDgS}34%_wt6deV>D(W{6Q zqWTM>>>BtvL|!f$UC7a=;xqNuxEgR`fb99iv~iz|c|e3Ot%M*#fpI)u^kVOyTP4bn zI&BE(GP}p8K@PTOXc#Xnu!?{^pRRRyPg$-BXsx^5&ow7KPx$*kFZpVS zd%CbJwbVxOz&_4xxgJw93ammNegYQw54`;^5#&##b1NgO4W2kkK6d$`0uC7~+yLia z$JOuBToLpQecKo0V`I4+O%`?yS;^N8r>#!(y^_x2`J_4i2k*1l-XoPJ8QZSVH4lp@ zQ5WZ|Pvp!|D)xs>hO3B^4gsXMf6;srEA&MEd%aChhIpNT0Jsl00BL4?k?b$5hn9rZ zJe21=iqN4+bIY5e!A1T@$N3}ZZVagVt7t)CLNeU?m9t~bVHOq^h#J-jce?te->yar z^vEUWE53U&OG9jPyX)Y?j?dhD5g}Lp-5BQ8;`;5+RDSEGTQWd#LLYn?5)b4EFMnEm@#mH?;px-u=6*|W zKYe3We+D1~269cFB0Mq=YvsbMPYp$>o%>UuSRDFz(`KUikIu&%&qL@ph7HzdHr(%9 zNVyMKFaf%U`p!wl9hJ!S!dkf&VDZtah6>g7abXAD)!8Qhz*m(Own`E50}L{Kx@K8n{ttE0@Ya)+&XEQJnR?G(ahO| zdJiP&Wa4#V?bf^;?IbJ}^YGJ*?Q;6}!vArm{HGBflI?s$)E4SEfTfJro8DKVzp){b z(z!v=WHrnYQZz1DD}cC0peyL8n^HU7GBO-3%NtNe3%;-U`OEr|t}sFfA0P zXw`{)e9vkf`-J}W{EfQ?yu_Gxn%VH@TF^fz(Bvg2iBljB#uNw^;oS$}0n)@&YKIJ} zt%^C@c=&XyA4x!13-J{rdZ8#+$kQvK`0&5%9lvT@Q8QXSn8Ru1IjGdh%6{d0GR-4> z)X+BF>w7d4V#X-&nAqaVO2Qus+C&Z=pR!*8_p%ta%gY>vC<00t$-}oDpN}qqtNwgy zrTK6r`d{_G%f*9tz5U!r$8U~9XZCV`@A3cn7V&;_u=+WYe=eskjt^yJX5Lq}bRcH~ z5Iu_H#o?KT^Es(U_R=JZ!o>uz2HG9W@srR*|M5)5XW5PB$>7BB%n^r_bzQ)Xqx!?C z0Sp{xsH{SQyvIEM!l4Yyya#_l30)iKQtDhd01ZH91(w%#yK4DY!fl$&zA#xs(61_J zuO}jFmVkQ3baQEUSG6oRpleN@Z`_k6?c)jFU~~?kL3NI+b&|ttmOQ`5C z1>E~T{#T2l*^~JRtxfUzYm>8(0;~!mV(+Mt(H-FyfU=nL$CY(4tI!mD*G4cfia8uk zPy_>SZ>2?us;W^iTt|R6-dktoWnaec|KiG&$QEndQh+X~JtSo{KJW3k^COJ!s8PKS z1=b@ys0eQl&~(~1L9p4@Sy-TZTw?3kVw$tjcw{sE*1w&>ECl0KGCGfed|@yuzc8_X zI%#qv2cwlf2T~5hn;QK>`|FFFfQERmjrqeO#VRuLZ~|ku+Eom|=It?NWoOQuA=3-} zK3W`+xo%#o<#6;Lh`z&g;oTM}OK4D8daulre8!(hKND6kdSBJ1?#UN}CX z2M_}M$C`ld889Bwnh(tO6t06I3i6?bK@Y|9$Bnt0T8@gN`GFux1j|#vv;0x*nXt@l zHVPH(L_$6R?c9aSEbPu7oAuXgTN z=YB4y)S7Trv_HXjq_!2^9C`_Z4bjK&%ZBCfgsg8feQUhz8>IWRD;UPG>5oopHT;js z>__RJDe1+sNwqoIN$QcDAIv8OA1)HhA!R|vm0J7R3QmkqJNi9yA8Ew=>imC0Z%SO#P>FiDa z$|FW0RB7|s21N(_GqMQud~vM$IPSY6e+lxJJQmn8E^9k?Rc4In7NFwjniMWSbI9I_ zP%7;F)(t92e%mQK{on}$3)z(;O27KR^+ZhqpvLXej>x?oke(6b8NS4pf=liK$wi~6 zLjqo2-evQz8zSlMT8Jg3ZR4O&OCQ!Br)!F6`tQoPREddU92YyF7Kktj<%AOdx4*_*J)cbe+nnOQ`O@|V%F5n&n z^t@dG@r)EaFtW(gGO~Z2nU)$&v{F!YUIV-dqJ^WVs~|7>-Cs*AEsZLA| zSX5hBs7S#B?sM~}Repy9`=jOn=Y;JO_mB~uKE=sk*d5kBc3Kjr%Xmx~Ju9xVj5L9i zbx-MeW0=)KpPCi7aHRPnx`JN}-brXvalyG#*kXyxW0}zMn`Ts=@t480q7`QO0)Sop z%^gQ1C&Ez{e-`NGxZ>__J?KYmV4Z5)F^-FfJ=KPvS6?nQa{pvp0&2;aC8i^V6klC#(RB3cjd~zkb}pQ zz6ioUi`7HBl1jVqBw3yXo{Si(OWS%0is2dC3>S_BxG?~sP&WJ8h^2vG7im8d{-2L< z?~*;fHQ$Rnw`zSzf2f?_;=AvJIuqtvKGFwISWx*BTNsb?iE%5-U$tdVW;a4yBhI)$ zZf3#J3;GJNPoNk$Xq<1&An+8BQ9h1eypS=uo!R+P7!l~-H-U{fm-EYuN`T% z+tyw3SMXv2-F=9@l{Q@xOhN4_X`r#Ike&@?BqBfSCLW!HpQH7SisZD#$qWt2w=l9# z{#~1NwEy>3tUBbWvtl?UkAsn;4?x(Vxesh4&bPK8hzh01;*L`}>`Av%6@DE5jWqVX zLV9_qJ|#ZBxuMbwd3qKv@5P@=(*1{y;A1}rgMw!ETE*83{wq@aDnZ@CFTEIlM^(D`)@72tA%Fb7J*mZkpIyG(EeU^g$;uvPyR^YeLDI3({xr^~x#G!=N$Q z0;-8NheIyx60?Et#HDQq196p-Ba?CD$m+*x$nwI|5Zz|24w9W}@Ats27Iz-ra!6wg5iFdwGA?O=kyMOaO@u zrEYolyh*z+E+~i{tkMz^8ZRz!hd4q|hcy+nuWMtCf-vs0oO+<)nETnI0-py9GMkZj z)bfmFX#2e~96UBqX}i(0My%Q*RtHYtP9ks@yu(LOTYM-j5X>ndb^uEWdzr9$vX|Ds z&bLhXE5Qj4``4(1TIsuIUEdX8z%8J0vsEcd%ZkuWU-~kMe zB#1<|%gLvRg`DXB7(_g9slep~a38gJG0JwD!jr|b--%b>&X4X~o>~8t zOiT!R;g~LF8U1L#*L}cElX2&RCEcwU$^M~h&E9;MdHu)4fqeY|fn`%ckN(Dn{hQb~ z@`|f&VNF&A@?{M2nUTl`et}|C@F}h#!-^3hj`kR_c)$q&Ljo&P2UaGfuCD$^BYPD9 zfo%srO5-wPrCgNNJmwg^{)J7CE1f9|q1XA)XxH*ovy|FPPd1z29s&M}^DQ#3+7l5c z6tKQx-v>$;0YD2P0vnG2_j~$OpG`t%NGS*%wBHl0DQ$T(Zx5ZSc=pwVX7H^XdKL@K z1~aiL6oOY#J*4d{`d0Q&@Z`2T-PP$0v7Yw+SoOp5T-)N`##w|3{nkgxZHSwI>K2Pb zO5i|ChS}S-q2&*WlY-)`7zF@7w^mrT?iC!td*~&OQ^Xi11^uaBO21>+*4h;%==@X0 zwpMV?TxzG`?X9dq=S8P1a;d!*0J^<3gQ~tmZeW|HG3yL>zt6e4B zBlsNeF&-*qZGLQ(bV%Rc3wGD5DtGqo^tz8$I}_qYOw`VH87=1hgD>~jt(zusO;i*$ z62S?JC<*kA)5r0u(*zSwG=w0F-C8uBYYTx2`c-PI|66L99vch_A?kN< ziFx$3=}^R9&ftVfs#3q}is6~?4F!>Z8#2)4KaJ7Ggs=2+iTJLWKK^R0jZ``Vp%}~< zDQO;Z&DXwg%52uqDiq%j4rnpD_HGQwlcpsL+!jKN&an|Sij|_JpJ+ILiFF`Srclb= zow>nZ^Rui`bHUbDq5713LbAs{wh^}CB_wLo1zL2AD?l8-qCqH~gKYEmzKT$*$ONB9ngw_H7NMyx#qhm?6%&T^;%clk4lw%z=LGG#y7o7fh4f)Z8;kqF8kkF%x zn@aY_y&cI>4`*w^7M2~4gC!fhq*Y`G&o{EkS~kkr@FugHBsZ^$WjDc2FA91kk5W#L z&(!;`l@EN8g2vM;EP6eD0wchFMOQ+-FN*I~3|yXF_rISSXTF+!Ni01&oWq=fI8?^l zI8VP;Q$k2>riXG(XL z*NpwaTVN~cGvzP&Ap0=;uZw;If>|fTs?5QBiF%9*yqAQG<~0f`{rVzcDR;lRxr5FV z$b2Xr5PJp3!VK=BWRIl6mp@HHcF1qg~LBXYK;zNLI~@we!r_xqNpnbi~(3}=o*b)ZO#0Wp5M*- zFz@`!{|^?o571HQ?;VL{#Ebi48U48nu|D1Rw}s6ZR23IHq~ zCsX#f66MPg-=~g%g_NnJB1>k(Q9A996n=@z7XJ$9N(VTV%y`_9i6CDFkkk z9=lQLTL1qD`|^0I*S32^kp^WdDME&l3<;TwqL64(QZl6^_AVJSL?xLb88#wmj;PEO zGDoFCNXFejhFv7nZ{2my^Bm{t_j&(#-q)G3_xF3>*RZa&)|H{VbvRPvxfg1^rIG!b z;72-5+9DMI7+?3z&i!SxABe6mK!8E&y}|bW4)qgvuBfPJjJn4<0)@RkDOEZ`TlY_` zIZoTZ!MZ4Eee^hlk&L_e_TkkwA8;**TR|aqk3IH1ni6!dqay1F85V5%{%e?St`lFqi(O=4oNeS~*1v+;20@UXHm zYwKQM^InQ%R=24NkGfj%NHJ^E zx;kgqMVETN_SWQ#*xoPr>H+8=MJu{vql3fUiQ{zE3!noifJZHyzo$KawY3*l0UkV2 z_>leq=rMC|azM_3w1y??ILiA+nz?;zSeGh;x9Mq1-Vf$B$qs2S z?iQReJ1?nf)SFjHWSbe+#{T&%&rU5&;}eqctnyn_<>x~YUjE-7HhiMI(wAUve15#= zs1KGKWkBCPc1l4(!S@AyE9S@t(Tost55g;kN{rd;k(j9}#lpWLLmKAd1ehgk1_aaV zD0Y~J*@9_B#|0=7zZbrUxQyqC>ENT2AGTaND0?`uDDM3N>izhzFeZN)<4Xk% zT{<_1&4&-0lYb|?bNibEc&M{?;7r_t@?a0uS85&EQBJOy)(;NfQLw?@5Pw{AtWrY_ z^Gs%ovP5@wtD}duvX4To@);7lYgPM+3qJ9;1=Y@>t&B&VrW;MREFjw}T*}j)t>q{n zVUT1P%zeG#fxvBAkQSfI#>+e!HrjIvG}*fx7p9phXUj?qZfQ5=D>de0jhw-;{l7nK z3B~x|6gJ2nlqv@8&^k;UL-?uDW%q`B|$l)~ZxS2fE=*YRJH zAq|7@(SWWJ|5czm6@(gqN)wbUjytNcWBOaR9K!buNLLaoS}PtYedG!MKb~WX(@i^F z+UqP40q#$K4F?Fn9TJU<*9{9n&Y zl~jV5Ga&WN08X3V>a>8JHC?*;eF)jdgv}s4X$^N)b@N&^Es|bP6{oyba+;aQj>zG`?_(1#;U!JRY^dhz(wnzdrb?er8kyxw zi*RKoqX&o&`Z|T?%%mE0uo2EWKAdjOsEJP`n_MItbGO!p|NangsV+mBtZ6~0Yb^B2 zYh_w8XJ%h+9Y&bZ2yKK%kHX&C+BzMT1GbVJThn`aV)mV|%kveM+{${qxEegCSx-QxDgkQWE3JHBg!GRnt!8}ia0D-Mq4MO%qHeYCeBy|hrI4N^ic9}ESFbM z7<5$|p8eC7GDi#LA>^iI8PdR}H<_3$)M`BPwx`XO5@KTpoXiQEF)2{&I75c?V8%cT_XQn+vt9HVj&7)G1Qx zeY}4RSl%xf-kZlf#Vm*<#zS$2bYhfKV;h#^mvsPKzALwzchqvWKRGgg4-hN?>fNeLg0WKd=E- zz^mF4B5LH__=NpWwXp4Ym%FC7&E?&Am)^xI@z! zpqQ(DxWhk2Y(~U8IrM)+HNs##5bhfiodHJU4Gm1h>^ zabt~2GeBBbY}V>cV4^>p8TxSi0`>GmYT;TfAPb{)-jm!eQ*57H`6W4Kp4k(1{T)>* z`FCWRB;I0)D>u5mJ6lt}?AqC}~?LY&&(U&n7 zFFBTCP31H0&!I0P3krqptS(AX#3UpbKo1$9@^@tyL^@P$XhH&G4jFS-R2dlcZ1H3~ zM+tcj^U3BcMekEHS(x)J){Kqx(?JjHfw{r*)vNExCiwCEBNF{5HYJ^4jCw-Ycm)ff zSSoIFuGics|7dxA>Cwq!N3=2j_+?xU9zEIggs3Fd>yl#=Yabulhj;}Gc--+f9Zt{A zm-%;8Hu-qd<4+MeVFpmWbX|TFm>jGud!ho``saHT#Jx*Hc0169fA*+o_lv|C%9?HK z5(AO2np`^Z5T15Z0=#w&!k3K!SG07rU@WasuXI#M_4lhtOmQ`>+tQR=$g8p}n8PNh zjMcZZK>!xVN1R$uHaLHOvl}GkgLY4)7ugqDV9g&{Rz_C+End8M!FzWpz3E3nFea9P zdGAL^?g5=Y`tn#Sgly0l@ zD+utT_|p+fd#FW2*5Z-0dq9>UsKG!Vsc&{#!%_Z4vKvF*T+B}y&nJ?TspIR`(4 z3#K_2n>m?w>QsJ^woSQlgZ_D4`O%y5Dkd9uRwhfIk9aRs`U9k4eIujVH($QHMz}t2 zqs5l)lb8CPv;UL4$qzQ0L9@v2j@swi8LZS>7a?7pp(!DsxGLsdSRMTuqIC;mI?=Ea z(z4d!4Yn51%~%LuHJLzNkc)?KUBG-X1~tSGzz`T6mYn=AK6vN^vR zJF^JqyPLT90{0;+pYRsW{_!o;<960!a*>Vnu8gcRIO^%5{vtyfDbx@EE|F^xMQ#oR z#bj%zR-+%q%~Ie9|LVQ0{tfX=&>76=IzFf(r7XaGVU6MkPRUsdaT1cG*(GP0{CJ%y z(sb+ib%*+B;U;?nE-O%giAy4^821HX`-dXGO#lZUmk@9 zBe5=;^Nt$7{Dg0~marB|_`2>E$5V|RZ9OBqVa$fKQUQ{z=ug3rQ@#tS&b+1`Mq9U% z24O+7xQa$FIej7&k$l|(X7Xy*#~kvj?oI~qPSgX4D$)88;tv2F;TMwW3U)J+JwjNO z9EXh~rl^a_IO-aQh-PSFbaVyyU&XPCYRl%IE=oiP#mj!YE36m_J2m1nN7g%{Ea~ff z0fk=Po^A)f3K@^i(v9=gOChN0?bZJHWBrm}6<(fmG5UnV$_KoWb~msSz4?;Y!Zp^xYe-}X^_E>!BNxb@!KWM&7$_AA z$579RlmCHUstP#D7Og)h{+%v)_Ajn7gY8~SXEYTZ|7;*(!YzmeC1|i7#Rcq_xuWiz zi73oeUK6hqg&W!DI`>DDgtqEPPENsSp*=;@WX7bvM7$0L)S||r8UD?9a=E<0ghCEZDThO}(LKRM#Y~j22SL*_k_(xlMA@AYCwWQZV^k5FzLxoCE zowy=mSpUOQgFHbLpQ9*Ui2IVhzJ9j({dBhX&viLw_%eY~&P$eHz_t(-!U~bPPS;0v z)CZSw9Pxw}%Uho>h`+7K^Y>Xx-m!uYb|dGV`Q}JhC*ydnwSUFx)752--e_6qIlP~1 z@7ERSp3@S3f*=ou+_n&X6%6T7{>)&%*R>|#D6YyJLFF)1;Z1Yrc#jX_EG#K%+lFR~ zar}~xbBJ4MQ1{WB{Eln(C5Jd(5Yo?{mZi} zGq)6G7_a>+K-5ZSo6DG(n7|2iHa-%elB4hv8qpY14Q%M9yv(86p_CH;V=2|X=On*r zmP+{9*9T?wKxIOP*8JK@A8|IQcfjuk@^PhAvNyj7&$AxIo@Z+j&m1(5i2?&}d{F#T zxRRwMInQQXmG+@r735j=uW0RL`&2Lsh%kR>c?n*1{EZ-g!K3n7sF@XFn(@i776uh7 zKG{Fo;;YIE^c|$wATXYfv${&s2j;*Yy4}^F`w+S za+pNZ-Nz^+%Zr7CK&bY@JT~mwn*>BS9a~%Ru$>2F74718^UeJoMohTOqi%@@0f&gO zF&YSMFyF=X+^#$~)Gync!8{7Ffm>JjX3~a-?ffQd=m+U}7Xb0VJ=8?OeZw3oTt`l) zO9z3R6oyX_k7}^zr-tEKP~F^0%Q)&k*k5n~YYZQ`@O%X$qEppG^bG=1vBeEUgwEVA z_d78_eE$9Dn;etLe{_aSd2Y9{F_WAZ1T6mXh)8pwLHhmkpY68Xrd5%^#@MTv*1qa~ zwjTz?QVncQ5oZu+;LvbvTL)Y5(802JASMH7;p-vs2bE7`SZ%AwfBVJ%RB755dks^g zY{Fw>tMEm!-cA-Mg{#^!b?5ecsyvP|F&5Zfb&zdoHKgpRdHuSIj(y8}oQ^n6XzvKqTS@K;^n zmwbwfh#+5MzW)vF!|`_y16OQ~BFob?CVyCwUq}4nfW7D*IdZ!-(zeSkzRAjPQu5}r zb|(Buwrl3mUGn<;=4yep^-$S-fr?29tN*ry9AHa~*Rqz5{rR~77QdrEvwW^#gc+~q zQ#+B-lXo|*tZ-ktdGlr<2FpOq-C|&1aKFE~bFh)^4H@b{^~v<4@N;TrATcdXNf9Hk zAlPD@LsDC0q&PcLM$UCqH(hi`({^6-eR#$}Gd{Eull#72SarXm{>^rSd7c_Vz-<&T zY=0mTAEy>woYnA!EP%vEI6CCG?(a7LA26`rqSW>|2Z5vzS7+kmLk=d!$5G^eShRLY z!|UgZv;&&UuuleA-EK{Ow_3t8-^>S?)(#GLIHkJPS|h5$tij>PC|7@0AzFqJyomH9;Th>Ra@4h;CoQE0{jm6 zxeNX(8DRiMN;iwxykwA-fO5<=sQt*uRJia zAA83zJz0Oh@aGZm>w2Z5i6jgUi#&BT#sGq`7vB>3cSSmUz^`%>_138 z`GNh-E<^(4aZ9X`9Y=ppV28=2^hxT4&``3Mgdp@M`rcgzQE!Islv^ZY&>J3W@a8hW z9o&D^|1Nz$HXf22ft6V|1-J#cyFL;hE4Xo0!h@kCwwoDh{Cnv$$FD7wpnsvfzPa5K z&x-iwup2h63x1?~^71gTyu*_wbNcQ^+wK1yHvCWY5`Fb(lNE4)i%V6bNJ)>A+g-5% zT^Y{ueAj<`Uv5A!(`hKT&P<7gWY1yW>MYOEjx5FPo$J_m}XNG zYHnBT%pQ8Bw-d7n#Ne%HXiCIGyGI6Tb9W+jq+GwwJODir6}V~753fLJ8*SRRNn=ly z15{!jNoFCW@&Se%TG7e7iMm1%2Kf&xt@uRaG{VE=;wNZ|3_wQ7`F#5h)6jlVwe7XJuW{__ zK(?ttzU>WIQPYo!D+P~*f(9PKfde0#%~}o{yoh%C*Q`S<<6lYWnrtSMiUf5P=d*La z22g)bq9S>aD%s@=Bw1*`&N}A>pojPcnqa^a#FdmWjsEsmfH|TrJ^)YtnltIRowmz=MfEo)8oW zMFvv}9oa>t-IS(?gR{rnV){%raXFa9)c z^%zA%8p`ALds**0B2A45I1MmAzJ@Qn*fGU#!1n!r5{P4pJE&!^^>(uUsx3DTc6&Z= zMfW}qxCp#=9S6A&#j*LENrEV|$)UZU(<1<4+zhhgC^EnS@9wEFUnHCHJS`RSBB%WY z^M7NVnQY9g&bnO5ChjIWZYt-^m!;rWBqnbsMQa;zn@3#{OrVf-jdY<7T`YMna=O-K)z7sk4@Q%%<~uTVH&>hz2_9}6%~GKq;Vljku?(2UKsJzQ00T& zG4fJFH(EXDVLJwXVa?K0EtcCF17EX7cF2qknqkKVSk2dPN*1`~7t_Kzt!E#Dj?Wea z8d;Tw7GmLxN(akT7dEXd>3Y)VLWSWi*PNLoXt<&(&hC5MOmRrmiOMj}N9(#+He=iS z_;tqw(Ak8RpRqAxWnuPke(z_@W9DSk(FX^<%F0T73k95FtzS2M+QFFi7dRyc@zIDB z$pH>VcL(3BEsw2pnUi@uK5<)2-8H|b!YDI4Auq9oDZw1L{sH>p{mft{$PlPJOQ(Ro zGWFKI72Z*=*7va96`SF1qM%B0IN+Qrr)}j||1#v%@J!F@45Mu3Hhi-;J9jfT?zUHI zQ)*J!Z7;OWVUf@MR~CBz7<0|O;=b#G=J$J&vjX%nBDo=_ts+a~9Akl1_IZ##bR1-~ zw%aggh*J+>KQ^Sc(U?6SP}Vi8AidF5K_ALNNzSTY@A%hWDAzUfOYb4_Y|LPph1J!U zR=nGd2m*C`o-Iu%=a%}cGuj?;J3M5}1^Cy>knkvM5}SRucu$WBPRE6d3>_P?!I@E` zkdl(Z)M)6)wO8-Cib~0s?`I8P%SN06{>I781aeY}$H>5d zR=y}-RTW_-IyUjy`ms}YYIs#w($yumLLM*03={4=d@6-&!mms79C!CNo;inpnqcJ7 z>rdXEIQ45h2nIj&Ei2Q@S6#T1tQx)S@1H5b_-;+;PbD?F66#F4C`Va0HMtPI0pYd6U15zE_TmQ!IGIZxHqH= zgp(yVFRw1zS1UeEtr+0LTS;BCOh3qwMs7iCDiO zp|cfo#W8X~b|7y5n05X@EP{FcvBt;RZ~d18zQEgdMfq1f-r?j^aPU21bj8KR3a~|< z#Y6y09Hn-Qedy?~)`XR>oHv(SD`f0Vr9*qAq3GrE&dQg1Hhedv^=d24?BY-IIelBT zed|+Gz6+WKnja^gR%p02jH`07bMf--yqZ?o5yr+)da!s#Og zYia-;k)9goD59b=dOqmpVmq4iOn8(fqijPFa49OP3@2p|uzUh`EjjeOgVHmJQl2dD zA--zFj~G24n?-{{qVDw@H>eQ562>8VmvR6gz5~zN4GhuMAHhI$Z&1mEw*dEA@kQlF zpD2}A2&Taiq%lalXXMc_jS-i;P_N4_jL|dqUoCU{$BZ;!tNN-HXMXMi61E3yi#fmk zfS&KNV3Lz}j{yP`fo};;%XwnMN#{zIgWEcG-ILQnx2&aNJP+f=zmsa@8R^;`BBk({$ z)ZlaoS7xZyP@8g3t|G3vs6Pn-AHc*|PITplRH=a$YF#%R)`Zuu&wY~iez!^V#Ffa% zQgGf!+omSV@&EfKK7Ds5Fa#KysBWAQ$m5+lb)477Tuu&e-n0@Fg_n06*2g@ka5y7y zYxcPeV^8Fr5L=`I>Aerx5yGmwGo}iMMm?=ESa3@(up0l<*^!1zF zTUP!72TAIcS0r*vCp>T=s&h6>8Ytj>))#qCiP{n;GjJsb(9L9Ek;eGxqKVNkYIruN z;6vap5wI41SKALJZq$A|Xb$gEcq4V8_#_G%%ac0PaFOZ^yc4o!H(Ye{5$9|N0G|Q_ zZ2zG65L{I1J@_>NhR7_)PjeyyfGuJvRS5wDr=GJD@7TS21qg^sOvzADR5n&#kDu}-;xTsdQm8{`J^?VlNu+G7BaKtR& zr~zOIt7cfzL$7?Yl00v9x%pk@EjbW$2G+{>M}BwKHe;9iV#XyET=jmX6kv86|Iy9~ z;%N?VV&;&1Wg4bBa4$7?ddfz_Ky0}0ImZ^reTd+SBnn)A8#GYZ8C6%f#x9&VT|nQ$ zQJq2D#1L8}Jc|#BhoEDSgBD91oG*Zt`i6#&;+A7J9Ay(=Sn~;9m*|_sQP0%K%(EMw zz-uA9ZDQ2iYFsQZiF3&Q$&Yf$D zCNXL}yF@SZ%(Y4S@oH>iz%TGz)d6u~Rh-h^%M^>d|-+ z-2>TL43YuZRv8L5l&(QnBK#nJo*3R~wCi>lIHEt=j8^s;x{3>J{pA^V(}TFpqHLys zD5}IN9j^t_s0GZOG0~_CCYH{H|LY)SrorgzIq8KE(n*Bxoj7jB2iO2E6yY&3`beZC zu*0y}FhQtVC2*ga2Nb`tDQxY;>k3kYl=$A}unK2Xh`k;|ME+6$p})DZZHS+6Cpb zi_sAw#2*drOh%d$Cx~$cS)Ml)VxIs+f()zCm$J|XM4VB{im9l?5Ml<5%jK~aaLmru zcfG=cQnf5HhYOO)Jr5tf4~)^`SkCH&nRO_(>xdE=h4MxdlS^KLvf`{1eN9{FG&SPM2fI011Vjk?{<2m_Pak}03!(2u$1ywwuyAaeD zp}My+_Ux!<&zs!`u4CXkJo3Ew=-JVC-4ybQQ2Ud`#)LMr2h4r1xZa@BdwU*q`11!= zS5zFt&XkL2Lm|#=Ota?$?KKJf`{n- z@_X?10Vj`mU1A3UXLSb15@^twk|NCLZx&s5iZ)PIAnUw5SfCE*CeCDBu~*r;#uOhC zw|$sJB7;u2+P+E@xOD-o)8kJQN!CjsFDHw79ydF7;-dvNioFM16n^Q_p|&(LYpfO= zdJ?Gkes9z>%oVFk1G)Pe@?*y5uJoiw{mqO08h4&`~~_#qKeR-D76}4u{@C7bKP$|kA}Q|2nQXyJ7YDF!adm=q_C9YBRFul!cWtRum?deeq`U5)sK6{y6qlEJeCwHI@* zK$@9D%ePI@r@1l@yK29o1!G^ap`rOB`gr|sUXo3K#v`r;3#0;G)z`;pc&!IW{x~h= zh`?fiGiZdm$aV+lH*(hJIIY`}KUB2}d|ozoMZHBmV&aSDT%yrxFbifHD$p~BPdh0P zfwH>oDOqZ5C_9^#Ww>Yz4mN1A?Xm5$V6Kgub2c6w>50KAks153@vZ)iCOfi_1f&+V zZ!b2&-oKMEI^>mMZ^nm!*Mhzox1QuWFj6ZjDxUntM88-+J|FFs5FSo8zm7r+iCujw zP*@lDk3>XA>p_f3>{dZTCXfy(BY>g4OH0Z(;<|%f1yI`Jyw-A3;Ai^{FH&7*>b8j6krI2@mvWrVXU3; zi&rsjSIzpvn2;@yhhbJnk}yi4M!+nfwjfuNB{=vgq|9v&QFRl6`;j9@_N1c!>@R=) zaQ30_Z0MSB`$y3Q5a+JbMM8iFP^mHa;3;KuWq;YVP>n5cui4T?-zLDFB72MZz&{9% zlGOm@KJQx~5zGOG>72|oq{2#2NvnU2$2af`INL6Vp*5pcH58=gFc5c);a#JuFnY^5 z!R4L(9D%jxiE$vLLqg6;S~f`o(~wtR!F@o|0F6T1DP4UCR6U2*T*bR(UuaXkfZkly z?n11DlFJvWem>Va9mk_}xmx;@e{q;IbP~tdB@Du z?B&N!Lu%b}qTUes59m+dS$zdL&Z}tXN$kR74QSnbdL&L|7=2=kbNNzqY8n&YOfdz1 zExVnS#)fjM5#=~pd4Vi4ypwPFx1Y}|Du|4iumLf$+>Z1MBjsw44!5EPzz3!zgdPAh ze6j37CPFL1kD}`8`-6ArYMF7p7~7~1$x$0AddJ$}y!1G9znJ)7td%9Dy@rF<$ZGM6 z5+CQ7%aPUm99S(g*(r_UA~?fI@K2SL&V`&s3!Y%UOZHP1OvTSV32%;eG@*}=M>ITZ z2Y;u;v2$FyDrdFRp`g;3V^kSyCFFb65+?rrCl2O_MflvHKH2vfUR_Q&L)Y_z;N2n8iyMaOhk4iy6nDqCzf zfR8-{+Pnfh+tLGAsy}=BCtoETnE#L~D4wZhUl^m=7}$Y(;NdcxF#Mi@qzXQko$rW& za@H3x3ie+W>;DV!HM_QL4S<_1912nrii(%{EnIyuy=oSU_E1}fh8!#BNON=*b8EH? zWm--FW8)~u2(pVFa*A%=D<-29+9!RNT;?m)&u~r-C5AmKFQ;OuOg&~>Xhtu0KDG&N zx`8mzL0UK4l2WTsb&Q173GCH)Y+Iz1Ir$HO%r8VD?!0z3u((rN?>_tp!G>f2Kqm z%s3$5o?YR-KX$$khLcC8jI_6Ps5^b|nRt|QVavobVlwgcR6~Ug04%+-G;B(@gO)ZU zQ_tMpeLZrJIQTbM)_W1*(&XBvV8P~nCWj5eic@EL{*z~LcYEE1MF=l1qJ@`(StdK? z4&1Q?V<0R_8n|gjsxH_HpPPuS>wET@ig`8_v;z?GLI-d>s6ayBMPyfZ{Lk^YahffjXege2Vbfh*5W=ex%ePLHqFR7MhMq+)vE3wD6@#=LrC;2ZOd7 zsMdyEBD=Z8723(&CrB!Ku{FKQT89t4#XPhx(5$wZkB3?Vu%r&}7nR&gf7*sF&!zB; z4I;c+xLwi@C(F6XrpcA`kDR;bUW5g(gb#%d1_T*)P+6`cT$H!YqaPhX(PD|AGT~UE z(oi(vU5F`s|NIrLun+F7%=^vAs8;y7hgqu&#=w{?LA9U?))b^ckh43yW4!h@Yv>vxM%G(Y+-nK z<*4H;{W-W7X`c4f;SUT7479`)4vJnH5Kr~q(JK#YPzom}|LYt;S2X<`K(tJ!L zSllm~bSF34r!&F4%f>mIa88JGG;y)Reh%OgUi;{Q7tO@JqKjAoUcci9)n#^>#2T=g z*jy1=%(aguyZFzYxl1nuiGpANXu!@j_svs{c_bYK&WJ3fOE1r|xVK(U(U8SuUe(Y8 zDTCTgx8b`(Kzs0PHc}`V6YkVyr+~Gf^p`jjleT{TX)QzD z*48$B1+qt0a;PuUw+3JFBWm5C;Na`XU`*)!o6;6DPVJRlWR2+&Sw#l(L#~P@u{fbr zcE>UnJKNcNQT;fL7a5YF;Q#0zv_&da!bea5%_)1^--fcjCsFV~USnpqwCd84u@dy|>jO zhR7$^d*}WjrUPh0wcalJfWtdQOI%FGxoA5#dFBZp-Chm94nX#u>3x1LQhdNXz*Bxo zR%HT}5PiJ=`eh0s%>PoZMY>F*mE!~*!c!vp1BZ7Hr8e_nq`|v#2cgqjMr1#)6Mfp} z!VTwq@6`dnXv+VMi{4PWtTRZ!ZlzNA!t`>s>r(PQR6Meakf_c8pd-S?PAn`|&$|-* zS7UYOoBmnSSRFK4MB)pfN%hA>Tt>6~5`B#M{(EcJ1eKip_`(-QsVQ_2GXPo4#n9dM z@n#u@i-4*bRLV?X=;-L^^m0`J#?vAke?X*KPY=w!FSd2-Rx3;*U@FDKuBa1TX^LWm z>})^rwWVn?M_}<5e!5&d%9I$BG%>sT{)Z{n_d{ZUAp)g; zPNZ6JE5s0Lp2x9e`osPF-#-7m6plf|HHHD9ae$)&ub}yP0tKUEQe5n^CqM5P zTZig){nsyphilp#2uGkzX@noZv2fkip+B(+!t@A$LnN*~1;T!nhB&;3+X^R~`%9Wy z(0DX<%FI*3;&G^Nw?JWpqP{)p+_FO$-q$2*YW5z|5ieO5T5cicIPl>q{24@cf7hei z8P)1PU6Eq}{O-bD$NR4wKfi5SG5|AK1CYH$bN83$ZON-VW^0=&Ft80x_7H5*_`790EQU2=$fY+GxlM4jV5*g|Oa^ip4 zw|;M5)3*hko-T0Mds0~}vAqT%=S zYHRNR=xK780GY3zXBWGg)QQ9u3Q&p>0#xJY@2Q`Pr5Dg>$2~vq+Bf~e;qZgLm3$K!Xb=WK_eip+6egi2eTVTQbcG)Tdt)tI2%W{nO zYC}RtZ(3A_`gs(chMvBDJ5ucpgxk>sNpZH{y3`ekpzAMD>Va5=l|8QXC|~>P3jO?^ z{>NyWnHxKCyF@^MiZEJ#izzXw>yKS>fF&}0@B0E(2g}ExpFk0n1BHH{w@@F8W^K?E z0aYL^S+zT;WOU**j$D)t@$OIX8a5*{WjugA6ZLLFqh@0F*P^_97BCHpJM(Ij`l^nU zLSFnMG{&W}-d6a=7g1A1#Ky*oa^g#pwL=lvTUDmf)x!M32@X3VzG8(Og7L~7H?^=t z0UEfFk1jT8>gY?~X((YY?v4Faja^fQW`F5Ps@AXOk^rAzi$n9{V4&4ZNr#xV08o(K ze@?y0J`6kT{`>$s#GIpr%I`azn2?aoA!Gw=ex5K(duN|+;tw1{;lMofP><%XFm@E+ zEg{%wY?8TBEi+pkaf;}9w4TP-#9G`&iB}3PRq6?*6o(vW;FD-*!RBy zQpYT;IE+R!pitzwZfm1?4gm}8RqdOXM>fM=N8MGI~8tj2y}Lud8brGJziS3664~cV6pP zbPgz3W*=U7%06{k^I+mcVmRk(j{*xqK!Og{-^L8yHUWsR=N+%yQpfUh(#RnMR1!Wv z>NVy!wD`a4gx?akErHOm!9-IPGb3nWU{N#sw|bKzJroshV>Vo;G1|pV>`;)5`V!-s zXR$CtOjj%lFr9YdmA*N|?&HAI@g$x&ewH0c((RaykwYd>qY$rg1JM~;*h<F$89QZM@S!lJinL`iCndNuvwLTR?EnnwrT8 z$fa$Y?ZUJe*7yef{M11gd>*a0&TFSo4J|D?wBZjVu`s>tpP%oL5faL^2S-Bz(A+>o zU@9NKSZ;j;Cy+GTR^ ztd6k|s|C`|qd2{}yzcA1t4j|9JQviGqPXOQPVxhhv3R&Brqm040R@#Q6I%q^QhEn; zo&TV%r+nzTF-#u(44%Dxqaa=iuUHX)R*(V0?u_+z>I?q@&!c|Rx=cDyTepLHD1wC` zBz^!A2B+iGn>!88+cwk$*5Wx5l6zm^lG-~D*;ts#dKgG|=L@YYYE&3Ej4%tT0eB+FcW_T`w~x$#EHy&`SRix7t9wB7UKY}KES952Q;*f z-if{<6@}2Z-X?{+ZW|311FVqMh=2urpu7R5q&us2L3@pQI25Fv`odu9c0ER(MOuT* zUP8%3q<;X8B&Lyv+h*dW(|&VNU)=ztOW+MA6`%+WR>E8dAwU^EHY6X|Fg}5jWV!I` z6P#w83wDE}zi`{uA?-iCb3f}N8vvU~kV0iSk*%Q%QQaNW^w6H0c;%tN$vPTV`>Y*Y z%Sy6g2CTKdnJw3#s}!Ioc*vk@dbnnX^$@)#9X*hU^Hd^k`jP1h_$@R3#cBV6C+vII ztKYLV3-(M~WpYX<3&sWc6(DL_rsf(3#N`2cXSAMY8@>SAS{b?KKmx(-IxqIfUzSFoB1AYy-vZV3KS^soTmg7=< zd~38STKIybJs@KyA1k9OISFmjpkbEO+xVcwu~>FWX~g@XZpV%aWeuQ0*y`I_(~)1C zY-2`L)eu;~0<*+Cz?m!y1O_>d!-x*}#c4q+!~`bd28vf7f%fwfc6M~`VMlowqwFk# zhdcl14!P$>H{x9c0CjaDyv|o$wt1D4@_}pQ#v`q69^bE!8hv&pkP9HdSI^!SNg-PK zbFWrkf}%{W^0eUdz7WOb`(`Kp3Cw1D26pqw_=GzOpSaZ#|X`G83y z+T`XDyU#&j71BX%OUC?qcqFvUy7%^-`^3Wj;NB4envj1l$2Q*e8N*Xpj*i4~eyn^qKE)#rMOCBO!8k2`mWML9nPT_vBIX+(~;z zd>G}D!=(PHYxmXJ96F4KhU7ay$;P@{jJ!g{eg0)T9SV9?4fh0!NG|(>3S{spw|AKPxL|4&h1QHAIUV zr6{O48?`6|@G9=$g{wtI{n*A&{I~S;eYKd1+KvmE;$fNrYBcn;(BskLR`dm6k#>id;B6LJCINT{1R2gGqhB`&G*Q2&g zF7)_UDvD)->jU)F3o1oq7D<`j<);Ac5e)pft4b!AgCd5yOiE7f<)o?%n(Q<>iG<*( z`*;=^K_P_cmle@AgZ+S`)vT!yYS*(F5KT{AVjcVoM{y9`z}8j<0gJOof6M*pu(FPk z^F8%DM$Vpp7u=>J*GUi7?qC6*=+%U)jjNfH+lT6_masp4(&v|+c`8iC6lkI!^{-MA z@`(!+!wke2}bf4-9`L(%PBdk~)biwH(`X?4xCK zzk#MNXD1YQ+B&^*;a1qT!7yow8$Cl9}bah-M!1RtTft@;jVAp@wJiL!&3 zgS4Hybt+amdMd6+qSaAp$vTG*xcsCPx{5O6hi9RmN5`cz(|E#q?G=W7O|)%%wtZ$H zf5mk@zTf5g1?4z?N}h5H8LHqBG8co(S?_NI-qzkxCa zyO8%-O;6UNq;0y5xx0w8X3sEcWekuWK!~EcB=w{HG7n5)@#6NM%nXy{q^w25z9eK_ zU_<{1{pi9paD2afe>a6r%15nx(}lmA-~amhU~I{@!sFiH;YXWI704hKO!SPRS&fg# zdZ$;|%7#`b?8Y%!x4S!TU5Zy{O03(A8)}9j9ZGD%9}ada1J<-N6(r(6#Q249unE!i zT4fVj#;U>>%yDSg9)PJ<2*(?gh~+pvsH%4CcrDkzYHSgLT9GWg^!{Qy#k0{n7xnyD z@BQ_=dsE)2HQUs`^Q_4qVA-F0;^iUV1u4@jdsi?vRel%h=dEA<(hgrYaMX-mC$Shz zRV%cdTDAHrD9gGpZIyvvt*J4<-NYxNL#y5eRF=?7T)5AVdZjgx z);sxGJLMK0`tTu7TT4yq$vmh_UZP}A=Y5%}An`Qpq=>55&l6>#Li4R{o=Ec?R_@nC zWSd%7{$kXFiLl}@H|s<^sAH7MsK_&N_v1MeBcb%r0F}3>-PWLPwP+kvk8N3Jf=ZHD z6W~Ks002j>Pxjh}cMIVHMH=r`mAqT&;y6_1fLmNlQ%4btiO!st_j-eq9C5>$_F%~1 zq&_@koAxpVQz}uKg2nN$zu`3(Gf~x$$vS;(T{E^Mk~@w=E)3$IkWd|A_M^}wSrNZx zxO3(iZLA!vgAT3u>ow^=+-c`j-05Yw)2nf(U2&%uOT2pZ%E#EN(ctEL@DlN-fW^=k zJ<=&CL_#?c)ZqJ}KPs9ZpLYC8wT)e=+wui;KR=7KE2wRWSd8p-f<43S%9aJm)4r5R z_Al3&t&iw`t98VJ_|9YLuvDsu7YjU#Dr;)22@9&NsI8EaKx}vp7r{rA%w!4$2-69@ z`a}NnoHNfLbCWSGSqBLZLJMMkvtvhOf&2(S>*tCM<}-9AN3YBTL&+kbW$oQ)?pfmTB|>EyTs#Ev*HYC=~XAbYh=p1W0-XON=yu~ z`6F(i=yyUw?3rc9j>EzRr)Z||-A1sv_~BV|BfVe9&_duAF<${r$VOJ=0nZ`WJ1Zjo zS#lCENd0vZ_Wi!)=M)QjY!?E#zT~CL`@}H{z5Z>BtQ9_gs_9afS%tg=_Ke5V1-1O@ z0fG1E8_i(lxhKBjbqK*%fO`1kuX$^GS_$0|NQ8MtmoW@SDV>ey$3#S~P>0T9-zR9K zc#A8T!{qh!)|_WITJz5BZE$vw(+=5eT}pJ&G2Vi2$ZiX_>t2*G65t>>LPGN| z@8sCW)(Vk63Pj?7SF1u7Tw2dEgc@uJDWAEq{OwxBlbALG7Xd6t3to%P+7gq$^8Bh) z`xF-Ckrt|o>wqw{cGg&7Zy78PI$iJWI3I!AB6?Da5bz?VH{cMUJxby zhdJuGODjs4Fjo%P}>Bbt3awI4n*Dd;>wo6Wp_p?;hPLJI6lV(oISL9D>2W^nHIvj7`$j7)l^x z7Xw_W!FBwBK+XJ1XYnaDZuW`rY)}|7(M0>CbBk2Ova`@$%xcBBSA<9hVk2jedX0&E zdQ8*ZAm7Qxe+6Z!2I)KjNM!=0&cl+7Cm%2xBbX{OI@uEg^?Lx06a#fAF|QyyL{agV zI35+SstLRYoFDxZL8dU%4@4h}3Z(bg;iilg;7HLCoUK2-x<35oJIEe!ihR<0o&fv3 ziHc-<-)$3M{CsOHerfNl*QPbyxu1Ih=dNY4;2Q6IgA(p1M5L);YtXo5c=OQ|6eB3- zko=HUEpd+TVlWN1;Fw(eNZQ%k6()RLd!Qs+xAfS;h{0P?BiXc(wDhLAJc zpYQmYyRlY4u#~|EXCC^#Nc>vQWIX@IEnBzFxC#4CkyOG-^n2M;fVzka>rv14l$q3nZzNWl6acFZNq5wLLc2+uM##u9T{t-~fEF?Y8o zZD89Ac)ov3DME+9IEjKWn}VQwX>5Lh&J_o2Gp+-ZVO1q(W*V8%US)1;SPLu$ zRJvqn$9=msrWx+-%unts#QaRk42c%9)g}RAM$%dJCkf+=A(*x($mKg%JtLT7q5Z$W ztcBn2Ry??Q{CY2N%W`7d*U{0T+<8%959p=)+Z3&QZhgnrlyDT9FRyCPKs!Ka%Ggvp zZwbF}{xa~QdZmdvmG}Q2EBf0}z44fMY&cplvR{5RfFId1&+#`=4f;ZGCNz^=svMBf-P=*Az@xLfSZ3l>M* z;q~YTk*z2FXU|G~X^BTWJOWZLF_F3b)~$QcmApU#?4`{-{8d#(ruMX&R!~WIMAL3m z%-tyP?J(&g!1wz!@qIG8-)`_hH$&(+68Sx>jpO(2t01(4tDi3nVJ?$gj7GBVw1FMN z$OXiy8{p{Xe}B7A>u~awm>synkJI}ujl)Q=2PP)4UmU_G1DkNp6KIWUNxE{XX$Wr` zA!1OXO+%`DSk&)uH3w#hA&q)AryJI`sv(Vn5c$+G%6Z+6m^Qh&JD~*w7y&p1{#qfM z`sZJ2)6e%&U)<2=a&CX%DCrGg*h^;U0SvSaOk-zcA|mj^*o7CkCE4a?L2Nf}+<@+< zVD+Y9Ing{E)WbuL8h`Q(x*m6{m8`!dlKXL#K?sej@1FiRGS-*-0+U2HC}#DxZOf7v zOkg)Pt zX48Rcp;ydWaPnOnYU*WGk}$_b;Y_gTkde0iwOS0n z-3FiW+hgQT`Z$cLZ>wK1{teRT{IcEB3wJ*4ss>klGCd(nS;(!9n9E4Y0v{iZB6ymB zGaw^1Kh=5?aiROmgXbO8T5ewYFe&hGJgC6znwrBJ=-*MdG;?39v7TF<0fqkd5c>Ss z$cX)p{53%^wM3dkjeG%W{>$IKeM>LnpdP^3GGm+T>go)M!RWJr2>2A}5?>+#fFHr_ zLJN7Mu8|l@ECV5!X-F6+Z+C90{br+!fWffahzKCSL4a!QY+L^gP`?34n0NOewG)#{ zknk6?FWnvCi>5!3*s(m3*=uWeK?2VJLg9A_K6c{PF`g<*Ze#V_90W=h*av~<9@bJ( zEoMQzNO+zAWqb!#$)dagA_MRvJv1)UcimV+7^7d!R7Xig!CPoO=Vx0=_UI|uMO_$e zd;k|ky~AtE*FO<@u17STBD=M5OEAJxOi4|BH}*Uyls1-+L4)O^YuuI^K@<%O6vn7t z$xZ~U4(a-&tiq7R+rd*{^^ZNtj03t`xY;5KV4vT=!9EihyQLQLRvKA>4=bllsTRzG z`i&R>kfsWyO0OqYW*gnYAGPszcFG1B(}ZpuHu_7ff-&Y7B9e!SD5Q6LM?Gp9zHT$UX=&{ z2rOH06VO7!-dD53Uq*3H&&Z-HP*&v z|A!mc`C#t;={hVju+Q+E{KzLtPVr~W+VYts3Ze3>{60u|KTL$NxG!3VS<+Ge0x1QJ zXvF{V{tR>!epjah%3{Pk@G8#OYM2L}K~&2q(x6bs=Q2ESqC26I=z#{(5+$9Bmy(CN z>{zFYgn1n2E<(qnf&Yu*T%#dve`bX>-c|2j5$mO=irPXU#6=OG<8EXVGaX8NN%+&C zUy@DifZD)8_+PiDeI713=HT9#J)&QG@tYTa$@}*hDNJSCLP9^s#J*3Q)l^tkLZD}a z_>7@W<5#LyEWM~!R& z-chQ{%2IzD&gF;BIE^A=KK#M_XfjMHP48Pb74NyreBd zo6RH(zD)|Vbsvs!o#Ct1SX^2(KEw%<&wT__Ri@tYcNcYS zhrW_Iyy5%Rl7DJpKOG!?N~|BDbH@VI$0PjyO8N01pwnQF1U`W}urV}GA<7FB(rWrh zjDeVMtGl~<$fkVK@j_V&6qhbIlC9!B2|Z^sr#Hsjd3?|g;Hy+N3|ECC#BnE0KYnYC z>$ZF%A#PPL$MrZIKU%pPx+k>fXCT5JY_tk(f-N81*3qVM;`NfMWgKr|*xE7k<5lWZ zTz`x-f`2T?Z-mQww)MD0XiUtDK1kEZF43L0cU0DUT~?eP>y`(2SzIpYaj4y>ATR%; zC+;}pjrri-<=kG~l{)NjgtPV?6|y???i%f!@=@#l%C1Hie?wzBwZ&ybkOmCT$Ji7; z0mJ8NfVClU6POpr(Xwr`#ja8ffy}v>hKPa2Q&Uruj*U~WGAAQP(srW}E6>-2JC&fy zCDewmPFwhRb({dua+KxlnI=s_cn4dNU|K$SxN|Uh1o9boqbv~e+yQ9xUftfgig2vZ zpwc=|t*(Z2f^576!&V!VX|l2gb-vc*kj?CUxc?Y^)xWpBvg+tF2AL6ouIn0P80T?2nAyr8A|VC>2{MAue(~U{$wIwC6hUy@JWc*EM|BmpM5Yf zCe9r)1>a22fN^%2)9wf#S%+a4DRW|Dj~qW|W9E`d`ZJn+f+3nOGw^^h!a`95ec56j4AFneM|4Nv&Hr zi(gW1Vhzv)EL}BMU$yz_?>3~X^zbiRoedohC!*EdG76Mi!o|B`e?lB?>cKD=(<$MZ zXLQ0@nLhj)*c?tLc7^z-AA_!td@;aL+p#^D=*EoJe-axzCHzghAcrDAaE~9bP3ol| zGK=y$L^!ALD;W|TOTH^tH-WfMcE1uH1#|?>nj@}inBESeL}FM(lF8{>NPaJ@E*CvMHd=Y5=|0+}*xw4wM&9o@ z$hO;lycDDPMNBB3c-Rox)X&K$eGqz3%ZQsbU%tVo{V%WfZu=!QUI;2n*m)I*izqNB z>@N1(KOh^CThmA6VW4|+;q1N`JM&(MmN9iNl16jzmW)a)x)!1o*kEvxsW)|V#Y%%X zXO?tL!5PqxJ}g={(oXw$R6r3P)F8KsuUymG$M&6u zpl(vJPIOEy1QqN;j9a9wul=$Zf&Lq&2*d+|MRHb^wzA#x(djl^qHk!+zx+SKzB``l z{eN3U85KpSlu=g0OnkDHrbLrc*)lRqWQ&TXQCV3H4V7^U5fa%njVRd-5>6!RzMeYg z{7&cBecyk4ACGg+xA=VC@7L=&uIF`KPku{tb8wlz;}kRSbov{YI_11c@1N{Wmb0KE zEd7wQ%k&>JiIY46KP%StI=zIQrHP9PSZ~!OQYs!c<8yL)S3?w=YDiry;C%8SXb_jb zE?m{xOHNc^Uz8A|_xY7+(0r^lm{k}PaS*@)sSDu7|EB4P)XZIURslQod))E7K1VWy?#6`oT=8lwPy7Gg2Yw(Vc$%^pWCG{sdGP)s zKq>y|v-$Xn3G;*Qj@PweeV2Mz2$Fw4$k(Yrh{5{3JU{F$>g#6+w={^t!CQ+VBvvK> zT64rg0Cf4q5K=!u!|4|jch6mAA#~NA_}T}3?qMJoFZ+1IFH?3u9;t81JPx1sgcMwt zO>o1=4p{(6wH=}aa|jgC1DAPMt{Q9wj}4#;0U_>3yQ6knVj4234h#D5J%Cj*QQY@U zDH7oj;Q$4~67P4Qkxl;G#6}4wk!|wQH{pr_&#-nOvdQ>qDyhL>?M!B7}d5m+jPoTe~z{wG7qAbjfZ0SX(jAVYUptVz6W|M^wE&jvw)gf+rJ~Ksw7_k)DL1{8YYY8>; zaRnWLo6W{x=V6OiC@P1xAE{|^N^94xJ29#}%x34^-(Ep#BRi{nyVWxMSJ6)qeQD^S z55uX$pc<+kOq~NaN6FtCge$v0oDyR6wr#^|Jg%~U^7fX~@>TH|hTx8Pqq4@m07V$2 z|7Z4Bw?3y?U(uy^f{T^q-AVsv0m=!wNWqe$UaH?-ZJQNYX!~?ZNtq^o7|t|_fY8vl z4`(T5$f5%t8-CW=AL#X+Z^43tJ_~{ZLpH&L<8O<%|GX}J_=aJn(=l4lg8fX#mls;z z)!Th-;~cV8L)AQejd$AZtR7&DVL$^?uqFhpZ@0?i0n-^EDxjo}1^{B6bQ!ceQ49`Y z5RBT&qA5wp-f8g6w^derRZ_UA;A@*pv zA43hoZ<%)xyJXSD$eKN!j_)7Vni<)I!L1J3R!S@|1gz(CR$92Nd(GFeL&7gbc~#ER)Sy~_HM zebm(vB^@ks+JFV1mHozbrg?lsd!8lf_Orp`0tpA9ByW67`4pe@gDuRM%ZT5L0e^&+ z)<8R)q8B{LQhyc1nFz8~zz8DHPy&m8x3a$<&kB$`8iBrhF0 zjODDrmrBMA4VO7bbmAo2#A4+wqW(IJ>Xj2#3@_)O4r3)RxK!v8dj*b^K$ch@sNt_R=Ci2&>;n`1*IjTg*!@}p}g^7_4q$|R4)X69hNRPEa3$6D0lG0h}^wL zh#Htwl#m<@8%s>a8{vK8+P#}j5;2uMW;=Kbpv)eMuHw0dty3D{i$f`Q1>7u+8?zKM zVj3bwKP{T}Ox1fjz@@QLy>Yt;mBX#TFTU=Crf-`umf*PCeNHD++u`+Dma#{ZfmLng7#( zu2;VMAE^7tAgJY&p&q&AYA}3Xgn^A3(3E#{XP>Uu(rdRJw9T}>kJ{>#z^U$0Bmqp4H{k_b5FCgRy=B&6)#Tc6Xz~$T|EVi7QAR+q|EQ<$jku#qprOmknERcu;0*)i9-JDOqA+dW z#&m{b9v}G>X|55u^4(|hVu%w8BEuqae7^#*#h5eH%qbHbE51)Hj>p|&;7sxY3N4^| zg(zaNE@QxazWm%`#t?)MEcxCuEkn<`q@4vY0v@s@$~7RHlcn3jc{<$gf}n>5s+TY= z57=xh%kgp2c3{ys+T^1e0t5@|EFVE&ow(CpN&W{W6Lq_|T=$hePlNX1z+<2@*5lck zc0T(od^Sr#4MCEA2x&S$+MLjaMIsDx5uv9Ph)f4P1RQqV#U@K4f?yuJ=eBkK4<3f6qX3>Dv4-SulP5zjlKCvn6b? zvP;*xpUloKpXh2^COX@AoKt!1H$Pp`k1RwsE%acKv7bD6zJghK{NRJ?`Zb+7Ti|~K zpnQl*L4>K0TIyBH7CBEj)&%P_pLM2 zDqYCU9$N1JsZ$$P6d2=yF|Di60s@Tt-id(%`G1KE3n!F37&eG0CRDr=r*N9HB{?OO z`5*hsIDEpri%byX1tg|5qZHZe;!VDBD-TBPHMUq*w5W6!%_RT#$*SsK^QR2- zFZry%@(jjS!j$#bEO!2h;Rm3!g9(H?^wIT?JTgYtiPq>WpPOCaiWJrQTFdgDFznp_5RhMty%YCCJDJ|6yx2vjC8OCtL z-9T9kdw*b&zTjHq8thkihVeffsQI>5*w*NzKqkr3xBujyM;{SM6Dao^+V%lF*Y)MO z@^*IHx+OR1h}D&C{r3?&2oXQIwGjLP~Z^;927CC0tu z-;8@?{tITI4|q^F{coLRn*1=#Sh7IQ-~{a8V`=tj^DZMN7Bmbqher#{KkWs418*xi zEOZ^-c&lD|t+`m(Di^aT*UHZA=M@ayK%=$rc2YXS7J7N!*Ybf*&f~C&r%vv#UPaH@ zB|i3o8Jehf*e+o{c;|qQ(#kVz?n$x<=I7X0O1m;pCOILBI$=xr70U!*?6ejR+RHiZ zVuHH{mI)k8$Ok0!91#S}C!0%XfwXvkJ-*?`Vk&YyaGVOHc==hGQA;m*#wgNu(hTj= z=?T8c;5&3Md-03E9nxk6C8uR2`*N6mr-QId^Ko2A9@iJ0 zjaI0q1XYjz84pSr6N|UPJAZSzRVXXT=M^Wo`})Qwn8*Ved5Y~w(=hDIO| zYL{*IVUm55 za??*q8!Szo3bod5SblT~NAb>!X^x2)kFRE@FFTX$do2N)g!0@D-r*(W7w#!hi_#VH;9e6 z9UCbO0puY2PCpHyuF0E@4=thZMnep8we;VE~ zE0v5tGowT)L$A6l^`@1m1C%*^!#h`q37~w-c^;wHR~+DPnnv{^(Fc)e&qW0iL`RI2 zeFsuAEgugJHX#AJ<0hlffZ5}T%`H&fbpQvUB}Klx?As?SZNraY9{W%6`u+n6;$e76 z_fne47u)yb<+(E&F5ml+o}LshA-z6y??CRwH1K|(PJGBUi~Gvn*(`^jT42eDusR4w z*=$dAEM*1dGB8a?qsmjiGU}k5xCr@MkRQmEb>}+JtHAJ);37Fo-2*XUt15!(;l;ff zeOVSfSm8f35D=U8H0C>)0cU9lh!H`+<%)+!QXX+LV7rAS0w>-+$h=%(85iFV=LQ*^ zMWI(Z*10p`OLy>1I1RXwb@@sMVkP=Lx#9H)?K!wP)5#!l(2sEu0R-8ukG7oyY2z(F zehY&d8=EDVG`lXkz}Uw`YrN4afB5i0(l#8S$tm}8Y4KVBxCAA1%_Vn_5Lzf{h zt*v4LJEfSMe1T_`d7gpBOW@S663);ys!NNJ?{c;w-*r>I^F+SGFtKz2H+xiR0e*lx z0*bRvi)+n1@^=9c%I(_F62%gBf?OXk++$O6=ABS2!`2fU9`~C!7#aPI&-_mfscBuJ zxw!hosJA+%a|7g%<2xKhRjVbOW zb{zEIo*1h$nTUV1WEn*DA%_%+QQ{8s22T;4S&o>c!YwljxIq}zX>+Dlx(pnB1R@Op z_zG|=icZd2bl_RMrvAYn`XtySv>XnCDHjHyZN|u_8LzGlcu&c<&#xncsLcasZAwC* z7$7x(74W0$$oAbMAi+_*YiF9?dBiX0CPDVlx%)3l?JWc1jopr+s?Bo??gi6oObVs2 zw0g7X-7d}c7L0(fsm3c=At>Yh@8}WED(KbWB|4Y&-X74834H_AfMl#Yk>gi7LKI4^_vG zt4Ob<^SIA$G1D5u_S;A^t;ImSRyw?^0DBc)KDYOl%VWAlqZ|l)j9lw281{Q&l+tMf z0gd)hTX707IFRnGu(O+%W@PU|A5 zvjRx5tFBkSGV6o2kwjLjmgY9Zzgdz36P@N_e{h#^@yzjxHEysy@jST?tzY&B{g`0R z>-?mm!5rr?Xk2u7DS03`fDN?`jXahoknfJt$E#Sov_81?*4x03K)tEACE;?N_HvgO zyKu_Lo&q_>d^{)fa>=|7C1{a`2}UKSPo=(6J5YhYL(I7eooKL)%a)U+qk!7@q3Q)Q zQGC&&C*B8Fo!bU)rwJF81MnIHNodiMB}+saE_l7zw=eiY#~`>~g+S0Dp3R6m#_*=K zZWMM=_f@`pK=_U)obWVmoc}yHEV=8h0H#sQ!TbHhV2))WycK6mj?S3-Ta>1ukf#M= z>a@TqZW5WtkdB_{Fd=b;UdbDB9vg!J%MtNADs-{wKr6b_$Jl$`rCor2-PRW)*J&pY z@b)e7*qaUn?+@GBvv{p>gk|)+JA=X%#Rs~&ZFoq-N6L@7!a~;bSBBT(Q%85cN}s&} z$H|?J(+aUQp-%%s6~)$+;r9^qYxwA>g0Gh_yL^){5N0wX!T@rL=pqi9C_q+1^<#cW zx*lE_7NA}>I-_6__?tuYN`RJrfN<36fOVM6LbVJ?s%pCM^qos<%>HVlOVIYAIz$E!K!d_R0Fc*>8nr7!Oo z2iVX8DJU6|V)va;*OD_;x0x}1Lxd{Aqusyk-pa`N9p4^eXV{m#ac}fyb85JtUFt4? zE}Q1`0Qw@xKm|pnJ`UbVyGxQrVn}lEsYf+frlN2#(##$EJ80??5)&ivK+giMCU}Mu zwKz;7K;%5}R!Vu>ds5cW3+V2Ft!GmYB_Q-mTRyn+iK4DJ$qe|Saqxt{`P1{LIQuZF z?1;#*9hjSs09XOLn>nrx1rZZxN~}1l(4*xyEJ7B>!t`-qy3>rMXHGLboQLi9RPu{7 z$_1ASF>X$YuVq4gPH2byVJ{VNhhI9Z|% z7(x98EV!-2AF^N?7+eg?Ot)&rY@D1Pq1Tha?leqbm>dh|*#Ll`_7dE4?^$ZcT7K4P zzc?6gbe;*wv>Gh;NihF;((bI?l|F=6!OhCtHN0Z{9u9k5UjG#!t|aXRJc%%89M|t> zQ>Crlid4+U9)*K>a((kpON&{ix?3x5)mCw17q81z?V8|IT`Y4ugD%y6uIyFCK8YRI zpSO_;2FKw($0qv>WjifhNNaulm7#K1cYoB>#W4)zFcl@M+V}TU2tY#l)ZJ;x%ef+B z$^mTgBNj0cZJIl`@>G0w!epHk%ozEh+UV+#w0g1$5G?~OeHKWALFdki0|I@=$+A~x z#hLAddq>&cP34%jVAY|LZ@mh6waJF^BuqyA)$Jb;X~)OP%nvh2OpNz9Ji35P`ld;7 z__4VX>U|P;pogm;bO7(`9%XZQ1I*;PXS&%Qj=x%0Ka4hqb9+l&^ZUzM21qkFUyOC! zreM&)42y1gxm5PD;)5LJ&&7Hz1*OYAEq>?!+p4`cVl)HFF5i@($5?R##8e+~HnFkL z$faic<^6n56D>sp5m9~>)cKi1UU_t<2$n%7#e8*`6bkoC9z0T_ zieGQ$zbo-$p)tpx3poqdtNQU+Ol#ln;R}7wF;eD*PRmD&T_D9CII2;*Pp6;R$1|BApWi!F~mAF#G#@uS33^GG#t>i z-&o+v_kfLCj06-Og1E$S6|^i1q=PLn;FnSrCBkND)NdMVM8eHMJnIxuNite~J^r+vgqwuP32C7zOM zK{T75&6&Pm|^7LcFPg5bLx5tdqY+*?#0lS2#8xb6lBxZui!(w&Ui z&>6gVaBvT8FTzP%XJ@y}+S(e1VCP`}EsIheE~&Qa`@>k^AOYdfx zCExrU#+KpXYcMQ8^{}+8GJJBPY(m@CUra@X3}2-u#$G6hM-*C3SZG1KtY(Gx4-C%1 z)iSATWC{x;yT=JHxUfL8Gqc=B_sF8R#pIOgAbiQW@c*h)a)(bB|KK#c$UT>?*R~UqT1tDS5`)@}9@ZqomGV5-V{P3oq((}E_p}7BtiCy+_k-&9` zNCJE!n*TXUJh=BWhI1n$fbW2sx$B9hWIA#UjV@Yk#+smD9=17J)zcYR8lH6!&xJU! zkgd~b5!y>8SnzEG_ZyzR_GcRwR)T5Z-V!8U2LmI0eSId^Z6T$}hTV(Z=WuTX*_=^XgVv8Xz*f^{H|IjOhhsA^h3# z5abN2RqjyQ{Cp*r%zz;>yz!Z+Q{P3z=`~UB1i?f(hi!a?D0?(_?~d|H)_Hli_H#SZ ziZpN7kuzqSTMq9_%}Um3SOQ0Qilp{xRBlf?8-2 zFz{9QPD$|IW!N8CfqR5Y{}mJNbkbHi><(jbF_~e-3xH%D@<(R#@%dEJ;<;~B{>9!w z!Wz05d(xcMeMQvG06Wd-A?W=}B&WxW;h)m)&!kA;a`{mDJK>iAa_ zk{8ra-Xc+9eg@q1m*@aQ>ZRsrvcE%V*?qfdmAJ9tML;9V4`4u;+taXOBX%@{SMvk} z;HR(#qv0uHm`UDz?2f?5Sn=&4ZZf>U|L%Ip9sR@X9oYR4l2?EwvD(s742Ow~_3E@W zT;x2^COLvDLH+~q2+i_TPiheY!vuaEsTln*4Y$3`S<5|pwi`HC$bg&bxa7<`_>1P*0*>^QRewe_D4ckoNYs|j7R zjhww2YM=UF=^pEqz9pk!djX6Il+b4pT-H#MkzFw6ZrP&_ou4xRP=fX--86Xc>=g?` z64cY$FpLOff&Ef}sshm%5zdIE0}cBYST&tfJ_}|sWp$e2jxWFLC0hxg!YTpn2yN0v zQ5v|J{}Rxrf1Bf46SogUAoa2Di|A^)r!%C_ZinyJ@`z%`FP;9pe0jZLNdjeZoK^1kC4`$2-sR`aEpz#XmySa1Gz-&3%1hF^`kx9piDSr3$Q;QTHMp&LCR#Wzu zhvoJ$SVkxH;x#Di0MC07(>u(1c%(a&;Jtg{r$v@c-1q3U}|O1)0r+^paBC;kjE&i%;lIpU~ek7NQsDclg;*qv)W9qo$v22D<8{kcDC4i<_}zy z9JG*x=GU*k2K*#Yvxn2-(%hvm0FnnD?;vQ_|8NqaPq5bEb2M!|mDSyw7G9gYz;?F7 z04Mt(jGD+x5o5Ked!2ewzk>=w`8w)=Yt3c#8e>kF+6c4rXQ<*uZr4_Vy$*!j`Q>Is zS`E!pSHNrWf32U`p-|j7m3T(%q?5RJ1tSZpEo;x-oKXWdEpYF^QTu%uaSVT#GE55~ z$XOlx)=lp9koo^>B5dzLLKz04?HY5O#EBh6@UhGsE=Ve%eI z^yVP(SfQ0Gclumf==aN5I(+-z+k@5OdNSiG=ZkOkSlM|N)xf_j_JXJV^eht5UOmB> zSaXu$BwJ~(d%YwTD2_;7+Or3vh@8Za$NN78QP%7Q3-&H<*!CjQv<3@fFe5L*-Xeu~ zAVRQ+@EcK3y$fjsDSwCvfYj75*lagb;fzd&`I{BYqz5I3K?rz!G5V{ue_EQmz|x}u zfFAy}n8Fz}wWIy>?KA!0Y~u^8ohh)^fJd@pC=W|S;ob6X-{yCWgj*8WZTQ*X5UdZ0 zcSLhZqCLh9Ip6_OK}R-OS@wmp)iiQY8T;Pq2^t8k9vomF>|l?oYEo=48|$`Xo&2FO zOCFClAIp@rTSM0({fofGY_j1nX<`TRE)NeOh+RPyFi_o>-czBbnDK3R6tObRz2B0y zxlvDvIRe_t6^NF}tg#Kn$~BAk<&QnYf4XMbXOBMl=V~$x$^Q9=&^;Xy+wbh2Q*ui- z@<*)oLu|!Zh*FZ+vD#^8D8kZjzp$rL#A)cV`EFjg{Ac|ql=1#1l*xRn2ltTruQ&1a zd6+(`Ce%Jd%8A4lTp~jOV9CgHNP|0?wH2lUFV(8hvqMlqBUneAa5BU+Q`1G!;D+%T z?NrAh5LHlky-<<3rm$D!cQu(&`s>>EjUp~%FX3%tpVPZmXe9lo_wll4Z_*i3fVron zR1agEfehuoZ_U17#ea@loGbjd*BBsYV9~k3#S2#qn{hZg;h56~()IFJ2jv2zXrzn8 z4$1^Ph#R49gkO$e+WtE-{L_zbzJ3!rbydb^x0o5kAVKu$r>KVBC;OwQ$IElg%L*A= zlHtsPq`Hy`(ZRu>^;w9069}NR%>x7B)oAx9_EPd7+TK+NJdm(P$&8@WFsB5%{Xc)U~hSk4OtC0Re51Nv-DH#y||v? zm08%G0qOJ!%0I9bI)y0c1f-ZIORbmApU)@yA>0L4ql&;fJDXTBQ$g$54q&Ri&4<^; zPuEC}b-!CJzTQ64s|K|l?YqSVGw<27Eo^t1IU70{I>&*zaYwr99z&OU#=?-)ykO42 z{}-_v2#Ean+od&0qPrh5CL1-oU@=+PR(oMT9*KlKau^Q?4NB0Yi1(E_@{gXlTzK1j4Zm#>s#WgFyEVL-d-xOw3F=`Lw}`s+I8Uq znMy^RRJw2|K3yPs75o{e$(ipgq#-@AzmPIY9^%t8)emv+GRDitT^$PSmG=Y>(<$Vf138BqYJhbMvm;542j9)>p5_G@l zliM>jf*4XzQ4DzPv#tI~5UuAElP#`lcW(Z6bW>61*zCVNKd&D6u#FSjby#U23&jC( z5r|bA$o(<~TN+6!@t1pXmi4e4;2mYk}YHKsyKArwE z)!{hn~1yD%o8`s?3%VEsdRIwpBva#JR(ed4`frf|S3UG@9wWr5k{wem!BIVU_eC!p%`a2ST4XI)_~ zW|d#@hfucHs+^f$HS+p~{xLi%N5YFS9wh}P8Ym5j-m&5jlrDf}aO&X!q{0A8ED@1{g<$JNTBSf9X)29n_-no%nAp65&Y>PwPQ-!0ed@yvB)bF9kG&5}3)q`l zk5LzF54=ae?ZQ_q2kurxJL9y%+@N_T?@hlK^6nY&zb*Lo)4mVvARztuK&PIsmkyZn z<#>3Ra9muJ`}LUN)tT=xO%;RB*uKVPknq80p$dg| z4!H9i-;_6t$WsETEhNp0V=cZOx2`_-9pJY0?SK~dXeqFt<9{NjDwspm;gG&+lG&{_R`$T?$DBgyj*p{r)2il-aO9D_RiNoQj z_Kb&k8m+k`n<=pCA&9}}!d-+h4?v77V(o1qjD7e%NmzOhwL^WAJa;UT>js2s!cm`y zWkj@I2D^^X2GEHn2ACc$o;uqCCG*h%qXoDDFGr-|C!PJNkVw+F41>-mJCzmBnHcgS zx0wReLTnLgF=}^1MgC zBXeL;&LCMIgZ)ChvVs1W29D;xg+xxuS1C=2tF{u{;xdBoM~)C67qE4qLR1@al@9O|Mtk@+rY$x)019LLc$Hh23om=d+@91dfn6WI4GCWfv_R$%M)c<>0JQ8D$`XfNfGh^-8&&e(l;d zeHe+7o5q%+OP;qtDMOYrBZ=UKgD^+hBG!}HI&+`T=wXO#t>Z?>1OdHQU)=b}2^>$P zj{|X;7y)~HJM>azk{;%7)W1_Q1><)?OeSNE z(d_~w>H}5EDz|Kgn|i`m*!XIE@aNJ0$ohe$(4Sp`eF^K5c;Cdt+RwZ6-|v=^NHo=) z#q%g}eaO-m+wFuzmIxVW1poPN(|V!6#oN4n+A^wRHcY;{S}tzT-E(?r1;eV!+v+mI zwr1Zjh_P4}GwtvRMYIMNaBSnATq)0wU5|OUEtwX={YXP^Yb%(lG#uDpwEa43U7e!C=V@hH^X4-inVdjBISiK+7=4%}MWy z?90Yvluww*WoiuW*{Y8r`3WO|*)0cL$vt`9(YFQL$iMHXn0I~j zRLly=;FFf!p&P+4IOf}ThD!&{8*yL#VWJdO(e0?D`Mo`z8wWhCZO)!_~Wv>F6=DBk4TwFNU;?g?la!k#TO*nun@MM-DD+dQL zkx3n~F(M+G?OyFBXe#%6eVc=Z>>8@C%(q`A=he%EsS{v{;>PR_tL*@q`j1fC0JoG5 z-=LYG0i8}3N&>LhR0i< zHq?G-6%vyb=Ctr#R`}qMXdLp(l@kprC{b0uOa}hR?qvf_mzM8YS^aHF%g9CII2ybOHp8eO%200 zDFAPiIS4`qP8u>@J_6@Ppffv0(lBFKRxPiqVO((b zNyu{z87O^`DdeEe8|yviW6)~$z~M-t${-9j&5_$N6I8!(qu}EqqAt>Twar-_^7X5u zt=2r|;B8E3m>KSUi-C5nl73ha?UW0Yo4hYEQ%-MWQUW6k@M!0s{HNrdHF@UXcrCP4 zLBm^wn!=xb@q(Gt0Kw9#_3KEsSkr{%kysO`8I}&;gL({FXgMEp8|#|~ULFwd9`y$3 zO`H()l~-Vo1qshZ%-P|U?XVzhuE(|rc>ZieZigM4Mb#;{T`$ga6bSJ#|Lbt9VagKf z3!a+<8O}3U2m>&;bvNmY!zONko@m?Vs(C7zJw2V8Q{o`c+7Gi1Vs0Uho)2lje%p(- zXslJS|EjF%)C^%B?u{##^kh&F+tHZF!h}w2J>Iv?pWy4kIvO7Ft%h_op|C0%Ep-djLSNs zeiD&?hqy%T87%<_bt3tnzX1cR%o`tdpjDrrkBjuLIfE12j}*nt%exj~6zu3Bsg4xI z6Z2J%t^mJ|g)3sa>1=5T4BiWle_yAz|E9#=hbyp8XA$u`ps`|I5+lQ%+7+nWVI@Cv z;HfWY$;47c1B^NPS`4t0J2eN+jGSJI@&}gP1f1>}mKkSUu5#HYbVy1zESh}e5Vw+h zWwk}|g|V?V2=M1|Z27$0@4rO83Lh{D!*G;xh8y6vUjzy+ZNnqfxOym+SFz1sV&fq% z*1bd$1D%TU0ha-z7K{FXO7-VY;grEr5#-u61_u00_yxFFr_i$zg_q8bXtun=liY$} zC+df5mQ>&&6Q=^7h%ALHBMUztT7`aIoXCt|%U+xkc+!aLpCE?o%j7OJxr8NTSK^f` zw9JB5doY~aPVUgL661>&cmPo`h zue49V$rWcx>OW_Sh0fGGoT(-{Q)^{4BYHn0Frec%#zft49=Fv1(`2cL#(Q%YwQ8ZT zvc{=_M-Cs_H1;?9J7Q8sQ90k;OGU`BD3*DYBk{z)+^AP___gH+UMpKQl?CL+-QzO7%v^&??2?6jhcg)SJ*TYfL7da z|M_U?8ZZRO+W|3FK-raS7I3;Qb_U4}bU0$qfEAo|m}<-gx4l0CliU{wQW{~-?Ed&{ zE`NT|N8Bnx@8r)e&QzXM!JXC><~Ttcg@l{Ng9zAFPFfYGO;Air?6}2)-%-<4ky|ur7Vuq>k#qf3UK3y~; zODK5WUO}xefHQm5=9lyE!7JbU`qi==?eF zIewOSKg*v(I6hS@xAri6^mVv8Cl>`gdpZ`G3N;t1p%|QZ7O#!~V@|m(DG#zSf44?) zinG{^%Iq4Fjiyvg6p$p00py*EnI_VBJm5ad46T8lNPIR;kbBz0LzI*X6@Vi(;vxHdA~eAA0ET` z;D{6gh5HVa;Mh*XbiOa6XgSf9&f?=+2Rl5hO=q(}d?x|e2{>_r@7*cs1PQT#slUb|iP9UE+B^z4OnEp=+GkAtee(@2NNzh0 z4;b9N19yC;cbOK<;&02BT_wzU=!FP6Wf{?D8#2r@+ykdmPZ}_wsgQN{;lUY^JzqoiT-B9y+ zVNivSJZw+Ck9s~Fj2{|>uzW2>KL(GE-v8#|3;6;TMv$j-nu@scpZlscq)GyR9?G0S zj0$u?85sh0#DHHP*GN`Hv!g*=Q`gcf5kUpEVK8 zUD}cYf$Xz47e`Y&nz+6m0%aXsQD08A@A*F-xu2FCI|xPVIhDTk&jyV|I&mat{^v;E z#F123s(t3u&MF5UZXWL7c}2`2WXaJFNMb8+hown&YDc2@;R2Lg9opR7j55;^s$nPC zJ3oWhx{!lZL^oOgXb7E+=-vU+#ru6Xl$QP{zBxZ^Ot1Z{YWt;LWhHzKA0}lq)vkjt zW%p;Dtm83l!i7UYEz}32fcM-UDH*^eVxB6_LGnN3G!pj`4GXx0YIi@G#nSp+nwO$Z zW##0|$X#6hQ4MjT8mOl`5Ll|7&py#9lPMBFeQefT5D3s95;N3u(4RX#*XCmpRRNr5 zVq#LGhPq!|H|9MKi#fPUBVZ8J^mLcbH=|H zs~ccsM3Di1RS3?Xh;B|05^%#Hfr2eo1%ljG?S~v@PsV@6y=_S|abzEOcVeOeRX6%q z&9NBCxwqC>qweMlo{g-13EXA-+#yTwU-JP`m!fp@)kqcPS`k?@#(1ss#rJdsk=_=1 zbjeb1ICSXHGqdV}*qNN1Nf^y}q8Kw2;}91Y2iCOS&dx3&W=e?PZ%e%WDDjB+f%XtB zZoSQHD>1{UovCDfQSqZ-&53-54dh%VhS5>SPA(I!9Mi zs)&zuiWVgUd*>NtlQ8*B(4`YT_IK0;s$#>#y~a{t<}~=mwGc-Ual8d}s+h3wB!~)z z=**+mEhPN~@{V;)a^$y>iqyqP5k-=ySy-9HoaghAn9g=M>a*U8|Fjb8_raIv{$iBC zOf`v%JH~QB)&@aFH&!JG%dTJ;lk_s^Ni%jQvG}H>$e>cwhTY1nS+igxjP4nFEKr5< zhppQ;;r+rT4+(R!FZ%fRsLW2Dq!q|`+xk!^5eg0KT_8|9y0vB{WKsvkD$LHOaFSb# zbyjFy*bE3;iOanVq#!hTOBp~}?gY&^?<9}SBvqJ*lf3*f%uzPtj(n_77l(qS>N(#t zOaFi;&F?|rBo`4tX9$E$%sV^ERp4NGcQ5H-(7qB!2G5TSedq!P{|Vn0pV4o$po8`S zL(Vs4r+-oM;sXN+>a;opzvp4Diu7VRzS~4xz|B4nC;-?H-5?PWLh{VBQY-HA%V#VD zIRrHX3&TQeoZKpfeHPhzI$*L_YveKm^S%`Jc&6L>S0s%9P@46&6aj(bXVp0b`)aVq zp4xokpEb)w!Du=M$08LKfs*wG58vA3Z6k8k*qrE4wHw%VU1zS)BFWtk4_QO)qICEZ z4~CDxV7J0CI)3WK0jIsdo6*RnvuENRNFpQQ@6sk~=Pc^nV01z<2CRj_E1>v{F3O>V z*PxUoOYhZ(A6!uYk}%{e0q^bP>C(3F9P9!xoPa{#W1WfXfo6^*1_&PYSlpvI}*;?k^z3xSu+##N(jFsPbngl?-|uW$lBo*kh4U&Hb1Y`JzxiHBQj+JDO4 zHQUn@qb|7kAua5uv{1zo%vNxA?zYy)2=&!KkLqB0icUgo*|O8 z8$4oK_qU@?N&>h%82jIeSqIMLj%hIX3U2D*+|9!|+2xNd>u_>cuk7@c`n$mjJT=i^ zu`;??>`4+ru3YQR3OvShDrGIoH@aUwqqu1Mw1KOqIOj zp~nrMJ0d`X)%Wz%T26zW`P|%Hw?*e6Ss|iW=`7)$I$$Qt_6k_upUKtbQj>o8j1T%O zxvC}QYoxT`q1s9R{L>crWCVBKM1>vaH4u6KXc{69t^x)vMS_5_Fos0HA#*>fQ80hy z7upinRB`DL_`8!P@6V4sUV`2uDt{02ZwqA>ZJp-fl(=!$99Qv8ckj&~bHgVF3lyKo>aEYdy)wmaBj24+x|$rU)rD3NPun{7f#8x~ z!tj(3W#=%Dl|wBcDS@IojhXl2v3!9JG!N!kNflqLNkiDli~j3YnRXx%pz8goQ{})pD}M@8YWt-E&nE01SgHT?f4OS+Avc zjMsc*d_;*D8a>?3irQhi`b-7)ejUvUK-(uF#86R*0iC+w$rI~WH=Y|hxVdO`barN} zFF33ESOKOkLjoNAYZV`h%5Eqd48@&zzxQNxMP$(lBCKuO$9El?LIldeq3bQ*qC;L?&%_{Qj z!0^1uSK~zk_yD*fKnR{~luZ2k+c@W3{xP^8T$c)P0clDK^23eS{2*G%Wj7|yHH)j} z3*RQ^E*ciLVn!JuRzwYlMaZ5N(yIdWbXb>f_m%#4N3>PHGT3J4^jmvo#Ye&zA6{bp zp9ALO9OW?vz79ezoc^kAZId}wUhl$ryvr!PGj_&+N~+iVfw=CW17Wo?gJGC+q+f8^ z`BrOcFku&Tu(+cZm;g}>SUkY|7&pdR;e>o;osQOH9{mXESCtz*1`?-+ZO0EsIe@~j z3V6#9;I0ig@u2^{z${|1Fp}D?y@%(}`eh7LpfXa7QF1C+B{kgIHKVIyr5s;NeVBly z)>I9=u3h`R6yVao7+>aU^pi<7-%0&PQY}DjE?sr9XBVo@`%?+RP=xK9@Z1HhBMC(< zJV|i-C9ei+zf!4uEOAjGv>V&Z~YuQ{&ywDgVzFtz40|P=D(1Ba9-j(BB&XEH>&$@<+ zE4YI&Gu8w=>jrKP=~PT|+s?0*75+nSQtnTz@-Ms(os;vV?HP#=FE9nO1+&osJMY&d zcpy0gXg@48Dv41CYc)8*#i+Q?#`DGS0|IMFOvBnAG}vE3s@7Z>Bi;FO>?lgLCm?aK zm}eTdAjc*B)!o&&RGlzWuPq7e@2sm9?R!7M7;rgbq~vpU)Myc3Rh*gnrIKU#zfLJ9 zm8)Vgt8LZ&+RsFw442v)g||3ORXw}$*l1@()+CRr{Hl+AH8_?W3*X_mP>%C;W}jL# z@uL--n@1R~!T}|m2@P}BvI?&$F;UB*bv-K3>S#E+D0zek_yU~RnUXHPKyFJYti-8Q zjZu0Gg`p-)h%y?T$*YyIO3cL!+nwZT2{lHdG*$-q;`>c$xXfq3tAXP~#piTBg9x^{ zm-f2OMd`jAXq_UpVzMXZb*NxC5Dd6Q#(Tu=qV)bMc+T7@<`=K1jQ_bFjcEk3JeoqO zdHq1R&0f}^%2&s|lLm~m-0J*&<3#k{80FI>=WRvoaV)6~C);P(ZhZwQf#lio5ylR0 zA{Hbq4OQhHNG(y3iVV9vP;B8{5gD8{UF@mmTH)8I-IzpG(^pFcEPSZ;;fLcYxqAh+ zGI4o*KVnmTrk*}|@;#=oRRd0u>aFc8tvEiD;W+@VsKNmc-J~$c`lE0@JFSNk{D0R4 z>4#H6MhHFFBm~xi>bIb6qvcQU#1@`Yyej=?~Dx z1r$pvD74(J74e~A-~YdZ-o^uo59a{(o{5BuNs8=a<0?PYR=8N0Jsbnb0yCf8pJgLE zS3yw4=}U4eOqOIpHzBhx42td?yj(uG%aV^aDVZNm0DpTrzfAs*45RZL9Zd;c{wn9E z0V3r?-y!9Te@+rt?_8Y35snV7DXQPjyYtX06jGX590=Ge;_53JR#wXn+4U;|33<|- z@&Fy{v8Y$kwN}pTWsGT8GUGFi&vT%K(NAN2R3!tXQ!9!{48V;;LPKYVA3#@24GEg2 z3HAb{KqSGVGyz#2dFO*GDhexP@cqrR3}x7YA?JDLHMkeWBe_k0Ci=RLOywQiI17l)4pgT4vtRaNza6W4mXH?8YB1~Z&qsH_d#o`vH(la8;kcy{kb z49`KH0GqV+&??$TiD429%j1jNSDM6x6?Z63Iv8g@g1OeYd$~W@OHQhb`N1!WEGU2t zR@UZjocG({nq`V=lOhDMbrEj*p#o%zN!<`rYF+g7sWa%V&X8F5;w|=K^hb-n46;^- zal2$0?bN(Gsw~l;iOr0mkawPj22wKb&vD?QNvnS?4@H*VGGtpm5oOa8BDi)-h)Ia? zR~Rf%JsM6cfM6*?Kbs}XSVactqIL%-%raq>GK5Bj-aM^VhB9#5GtXm*Ph)Lkz&58{ zfIxT3TD0&aGCQJdQAOVT_E2js)i?`FjsZ=T0-S1uSkLKS8e^rqabqH8qV;3T#bcHa z+I?9q+&U8N;kj+U>H1BZ(!F)DX2N@ALrSNl&X_RzAzZ(fkNzQJ}h| zf&hvl>C(aHYb{ZskzO*Z^{g^DoTcvRKN_+MPmkjwx#9B>zJeiNO=bL}Cs_;R{mMqB zKQ59-@!WPj+CC9v1}1G+vuJk#JV6^#_?#-Zm^CD7bL{izR=9=j5Fd(n&Mr|oOIHzs zWDrvbp(m>KoXQDPWFR$Mvu0_cqk5V44iT%aW`DbWMe2J5ZL=B!Us1^ehhDG5>W>;D{(NEA0yfw@)5zIyi zq9TBVZP;K-SOHNp`(jX~Qy3!58L*c%ny6g%;7xMDY$E+x>veQ028ZeO(o(c6m(Iz4 z8=ARtZl$Z9xIDkBNqx=QwJjJUZQd*@C%2X5yZf4jB~y+eBvUBXSm>)#!iL*2W8${N zBt~$}o475fIsO`Vf|wJGI?!MSZ}$xMEuGBFDvnA5+u`-KQ1$3nBzwMZ<)tkJ(6;a^ z*ld568WrcICW!KsL0-m4?%LkTo%8>S_aH7A0kO?u>eE=rY?fCI{8f-3QHqR2K0zFt z%C0tO*C^M3<(IPwydnE7Wcr<}vw$~#UI^+$S(zwir20T)5T|>GN5er&K@*O{0nr2w zG-AI$re`2=qG1k+jnze7VnZfiFc=3+a`mbueOvrnw5Wm%;1cEjiG9gY09O8zc?|Jr z&juEkV_UIv31i81z|512qpv$ehRp%1f~46b(?PX4#qYunj$0dyI_6)b;UsD|(j(%F z6ye&@Ce%{4ks=XFws76Bcmy z`NQPm!;kpS#~$YtzkKQJeFkBd>u$-UPC*=FjvHclxV>WeF zHdhQOa@%8sgT}9gh%NB>v3gtc@u>`u7I@|uFp%-6iiU5-AANC*dfginRrlOE?@tU2 za!y6Wrh?>W^k(W4%D7>9t!<(ZYGWa;lc~R{|Kro&a2)2YIwXNxk)rmpUfe#qH)GuH z+>{O{%d^Lt!BlDoPvJVo#o9lPGSL)*yspV;vTHASvS1<;kFG%5*|QV8oQdk^-?t$q zmdFu^xA-$G`PzJY3-LJiN@Q8xtDXPKxa{L0XOgHQH-cptrjj>xSa#l)GlF85E)Q9~ zULqkSAq8d%RYK1C$CSVp5MAgZPf{B>Z1n!TISy_uSjDK<|#l+9*JP0AYqeJ^SR~is~5_6%1l- zq4NG?r`ie@u8!-klhv%mFQ(&A50P=UvD=Wyjt+jQBx-r0;a&&$MlCbSJWuvF z^-UFkWJ0ikb1T*uYN=k-qiuL3TB^yb^hg8tn1kty#@Zb4TWh{I%-_t*gY#|V(S5&W zNsMI!(0fzJ`>ecjO;?YFa1>1hZf-JDN@ZRF@-*r^T8J5yJU}e_b?zKG47+pqVFSNOGs`4v5*2l=WlS`(!PaHyu zn?jszniM~(L;;LWN$$yEQXBcYdctJ?pnB5K?Zgt7IeZfcXuz*Lxi#-V*eumJycB#* z3Ghy)QU^Fyz6u?5D-3bPrKF@jJhBeRZ+OAMnKH3-y(om^Z+_Hml)5vr=cc^1N z2+hpqJz9Xc@cOPH3v*%u*UP#;v&1abu~7|8Lcz^ViYXI`f7k z(9+}Ow7H5&%Xj|~kH+;6o7cf;z{)NzFGtNmmDq$$9_0M@=Oxx3->@&T`x|;ROkUAFM z_$v{s4&VB=5H;6J)I`KD1vEeH&Yz87pnzf>+Ro3z&Au>8FX#C;&Dv?loJ^2z$+8JH zDwlC6S?>6T4jDxcX1hyAHqCXQW@33pV*xZjI*8|Yx_OE`r2D-|&;+T6;GvujE5uT~ z-?_@JcDd?t7eW8|pA`6?!xiYV!9MDVB%i?w%D{E{zaR&G5(eATw~pw)4gR!~eRfem z%ejfTlu3SAG+J?X?z7&@1Z%NLU1AW zd&V;6y+VreWO#TXdeAb|9E8$yb@?-{qtm2y7+Dl^=i>ZRv-Qti$dAcGQScRuF=eoI zkeGEA_J3rlCu4F*`?C@{Vk?yxmv!j?or#Y?jSTt5}DHJ}?r3`#)O1_o&PM$d$mpw4GBjQ0kIzjjyL- zP`cn1>zS8^tVB1-J#7SU5`)l%UVlE9=hXo<^a8aDU?NOYZEAMZLtY0{t`HvOB^X${ zBSkJdq9c2T(S%hQ;A%7v4!d_mS}o)3+p1*pz*exFC=2-S)GmO*<6X|udc{2!PzxxX zbJeGL0XE~Ny;IWr44=beopnAghG-VUsBSP^}c_|YySd8f7 zj$QR2QJlE1l{S>ksO^Y}YKmme7~-5<6pGn{2_R*=H-7}-XuDu2!!k#Mm)A9k2osvA8(Pckeu)`Nf!z&t*89=W=|V zuVAwVOyx-t%17>gdlXpe6m9e;#tO@N(AHZh#OudMxV8Bq5d4J3B;0TUbAFz{c{qU} zqaZWptOI0TcwTsQl~NFuzcA&D!M$&B&1U1|@%t3U{&>AYzkp00yHJ2#aaZuv` z2a)2AcE=2RS^LB(3(O=kI-YbTJ_8IgqwjlNDxcHlA2bIgRwvN%idxxlx0>CdeW_jr&^))t=` zC>WN6MxD@Ge3G~F1$E9K7=r`DwxRE6Yjnugs-Ij|-%%6vH6R2u( z6Q6dNl9MYyRg6IC-eAN|3o0N}E1Z&8Wic zMXy(kRg8nUGV}bs0yg|mKw_x5&`hP`Qe!p8=lD4zT^nS#Eu)1AWSBrCQo`%wwn?Nl zqd{fTT*|*nU?$CdH*83t?X?emxRb~-$9Ty2Y%$NY&Xuu2jf?n#rKzcB^1IrLU(XMl zQ;dcTgH0>*9__o=e)92`)Abjtv2s^eSJx6v9=YYf-3opt!&>87<(VsR!^zSci+?AZ ztGg@0p&#({HYq%@i8}hf{V%;5M=5=m%zn-y<|1b0U+E)UfyZoi1}om$qyKiJ=9LJ& z9DU4hd^N7*BP_|F=mTvBrI^?3LHJ$h5O}@o-a-w%chB9Z|3O7u#=>Wm5Ag|jHwpkr zFLDgH&m?~Y4ZqDD6YI@TDYsyt41i++hVxm*`|b@%BNl?kfdX7wk1U9KvNIR`?#6g< z-@q47LHzjquG;v^6ToX=fKlN_#uRi?mv-9eeHj|j_@5JV=&uvQKD^OwAUX(9k!fxi zDZZF8ftL1vD@s$9bb2V}G~rNtTg*wj0th8Bf$HH}!X>dvmfHz4WqvS!ae3~?!mH9L z`6GK6JDa1 zm5+3QMt&6_an_qlC37vOZ{AwCcs7deBD_r$q9zGnntBFlHyLaOJ%pa!m@Q`-ZMyNf z@Ly-d_^BPrCaiDmIej~k$q0xdt^C2?H;h9fY$(P?q>2;jeKU@BpojY6Y4 z90x9J*KVE&TI+trbrQeoRc`EEJ5fL#0P#qX3aIn4ya<}jsQfKKeW~eOeQap@Ai8uC z0`AEh1i8J{B1b0d{vMFvLB7i;-zdDwWG~J5o5q@(JJj%Lv4_bV4IB`(7xP?RmdGgc zkyRP=;!i+J06C}fo7yIk-+X3s<1a%*(I-aE?X!iaPds`K`%t1;O@|#V#%QQB4<1Ys za?S(R9_^vo9?KzE#8SjUWu(^0XPOfn@o@f%GaIoIh^Y4%XlKmJU!d+Sz}o1B?I_L@ zMkJeT%xf1{;k`j`9v>~>VrMgZ*LRG$y+?Z2(K>Ea(aEU_EHa?^5Zi(lrL&$M|0s~M zftl+r25E|!bO5;n=K`n%EsaeAkdVdL zE5b*UYijwPzCHaB9g462hEU5B^SDP?zM;I|;yR9;0<3aG!YtbVAO%N}_EZ#l& zXq3_^0(V;|aU1S**0uVLrW<>UyMUGkV(c+fKWiPkv#)yw_efC^B+MO*H$BYF8sq+$ z9sIA(;co!k&m$KTH8}w2aI*5xeVI|ags3ife;4t$t+MaS`gBFm6#uBgAx|}hprz&& zS!d6kvqC#Y&He5(&tlu=p(42GAN0~hIwC^NYn4Mz?^NWJ8W(7n^(F;g9^v^voV|Hi z&w2Mh-bxBZCDF#T5v{0DDvZgLGE77zm7+)sE!s;cQDmvKViYPZRBu{HEA`G=QnV_j zO{J1bzsCtP%oRkjyqD+eJkNP-XG2739?B8)WXO;SefkcV8i!yI&2U7N zAWUoq6XluWUq@?2;X z$JUQHHi2|(w9NHfyX(twc|yGBbIyN_^Jg@28|`nulfM2q%{`>{NX>$#j89IS0vZL-INbqOw;2h=X|dsT z7mPjbahOXbE06CjXsZ0Z>F}5%!G0_V24$ovbQu^HXbAy@SycCTPOs&E6V{}IE=CygKC;W;Q@?+6xuIJhX{s34 zRZ-REJ{(iA50vM=Y~zuJw4DJ1>a*iflL__!Ln*VaZYug=w7+--UwZPgyU%!Nr8Nl4 zCBHLky~me=D(D!EYM*7ztc~YfA3Xi5oO?#pmqFg*1e7Z{Uy#If=a1NYSJ1CXkq`A+2pPcbj#! zfF+QxK+MNDO`zQ}MKZ;=YYS|M1~rRP{}@Re;I(wnK|pd^L1&?J+AluiW3G0b#iPv! zmeb**VCfO~MEZs>2ognOyjGtx&=FI85V#1(*0LEa*Bu_badO0y@drF5`WzL9QBnap zrt-3)-J;F5u#J?q^m6Ez;9-Ig;S|K?yL3I{xc0K=`8?XNm8$OKZYrZ4Hu7<1TT4kCk&Ho6t5XS&by|4P%c>Xo5Y0J*89*DSD?iT zOW}cDYYF5z?A)mv&A}An62j_EY?A@Djil$vUb8A5Vzp`G;YhOMVr`=`8nHy21d7X{ z{B4THdDweo`OlgbJVq}vZFrHjK9!!elDngocr1aaqj>CE+LpB{J4wy7yLQp$#Cqp@ z@&E9w{~n>I?<65B1%8kYKNmz6DGDk2taam`hgRX%-}`DW3nISJS4N}oP zX^4j9Mfn`~VR33^FUgW_dX8n^&=0qXh_18emc~$1To}khEiq(5Xx;N38P#~8fg=wU zx|`{k-~0wRoH9rQ&oyi@bKm86Lw;L5Msgk+$z}B_+?T`Z0bqXtl4Tw!yVtxspzPFI z#fbCJgED_ zC!pQo2g!#8hlVv+S=9urheO$Z4ekVlJWwJd#LR3v{GP|2Q8Nu!11|1_y%nM>e$~^{ zOOV>@{jq5>mMii|C^FG!;I5(z6cSUOXhxE*aLp5ZG?L0vtG4L= z`mKcEPDDxiVhQuT8-WvwyhTcjB2*rD+wzuv3VLC=vLR+YJc_~yjHeO{?EPkz>lLnjo?LS?vRd`ZXf& zfc378T}>2^fPxUf9C~=Z0XwoQE7Z4P&Zp2vaK{4tcQs^ig_O?r%WL*w{= z(Hk9sCD-Q6f_15L4-~Zq8Uq55c@=fJ*t~Pw=Y1q(?o@zZI5LQ0vGh|ts`4UyCfSNG zm7rZ0;gUaHRxfD}HpFPgZ@SG$K)i&PMnU}5j0L~ZI!zdpSs}Nt9t77!yd;E28K1qD zcdBNp*f3iGDXO(pc7g~^!l(<$H$>f(QA>*@>~zBRY~J0FxFDKhL{qwU$)}c`EbO~^ z8D<4!43IC7;sSBwGy*o`;kn;4qyi`eilFm7WB54`c|%h+hi|3)mmQJFVP1?0F442A zRC47+U4R~Cf7ksYMPGpD+t*sOxh9-6k@c2;`zg#Xl{YXz_!9OL^wG4{^438udCCoB z_{lyx>#E;$S@$xs60S!)?YyrUJMmMlW^G z&M6KkTtmg|Cj-sI<=CJE^ZqgFylGt-+AUhT$tv8kE8o=#@*d6{c)t^$mcyY+i(msL z^R}%T9}YbmzH()c*T>Qx;G`w`|hHpc#J9!uIDLM@wdt zvCA;bD!?rUW=w;nLnT7^K54O_nQBh1OS|(PlE3JKGqYa>l8xHubAKG*kcLCi_cW`T zP%W*2cu=@VY0qSj&$kt&3x$^c zC;0%wrl28CYG@^#$?^5hj_dxxmvjUsT@#@*ovftFNkSiF?BCp1R}CMGJx8bNba?b{ zH_h$5SgJy=|IaATfJp_GNiBFy~2~i6czAp^I=e(_4;_iCT^v{mrA`{@!`}@K4 za5eS_q#f?h8<+)d+9o&hL{Jd4?|idnS!;CT4vNRqp7Cy;rNgNy)s z@Y}g&`we?-wFNfAZv!RA22gn{$<4F51P&lgr58hCI)GN_)plv9%H&@tjo6nD3{EdX z@!sDT_bWsM2L(03HIjK+Hi|F8&qUS?=anxcm6!AQyrKEKSXfzDbk}Ha?tP2T3Jwnr zpm}4==o;t_wywR(spJl~Irx1h;enVs3UM*}H7eY!$mh10xgTWAFx+U6U(XJg)+e)Y z^}<3I*VUr`M<;hLq!vucY;`9|u(h;))aVzrrvepv5jWf4VN5f-Q8R}(ldTWnH2qkj z48bc+oAb1j7CB;4X(i+hsc0-9>5VZIpyLO<_oGYVm^oqSwniWUBLRTkqcR!>6LE0> znp`D3^Rx{F&;2@>>iv2wF{7fD;)vM-$OA`K80J=oHtFLoLXa#LLuuw-ty}LRM7h}y zFf=FUW)F+(eqA>lSz-FeH*Ea7kulyu$#MyjT!3eu?b2~UPu_HHyFF{)dFN#<2eYJm z82i|KuGD8nZt3bD{s^V=Y#=P)yUU*T#u*UFOb`nq4GJ&1d3DT(6xpiq^BWJqt-uu` z6;irFm3COO6Tge4o^lC>5v8tIq#fHLI`y0BL}J|IeiIRVn$l#Dv&Mq9u@+PlG`(y5 zgf>7+Kmj@uVTw)+6WV!(K|Su^%;OA4awWTCcNnZYJfH3`B2yrQeN>se;F%H>no;zy zqSfQ$l6Ocis5X?3mR&^P4Ys&p_y+6!s_(i4&$Wu#W`JUD+P|8^CAq${rv>H$+Q&{I z#d6wvPRckZD~&JfU5F+FNOCxZ`hfFcy(bnKcLNn1Fo|W#Ygsm+6ey;*H3gPgcT$=_ z{FIC%R2FDo1}f#Q;qBH=%iU`v^Ko5S;_bP2rkCbGFy%XSyi@L?z%$9$fbVE^8`_YF zt_~rT3c@@m7>w_}C5Rlj_6b!)>_dhf+T2VmXdt6sj8MO6C=^Uidee`x!xDzj3GZcY zd9UMrAhIZ>B)&zax6j;UIQ2&?uj2Qzietyq;Px_$hW*T)K2A#l*aAG(&X6<^g%)cNTIWkt9IO z`8G|l6XEHoIefbn-wjQk_1ly;<@GK{HNT0(;`1q5nnGfDOao)<5+LcxEbqsBoq;hza*9ykA=d_2&^ z4MtzpjW*U@?0gZkZl>3W3ptECtP|7h7cr|PnVog@5>hfUDp=EET@iuV?T-YJO&f{H z>}Zn*H4Nbx{Do-1sBjWjj#5I2HLUQ#Z9pdjZ?YT&FzV|M(CMT|8w}~Jenn?-JdHD@ z;Z<1CRhdj#x|GDE6|YqiX4+JgwDD5%*qIN7L4}vGM~?y7sRgz=l(XRMz`wEvNB4Q*Tgv z0y7CI=P=GPbGBI}%@<<&OsM3OUgUYG0(_t6yHvg{m-ep#zl=LLcd-0U9MlEIGCfRe zjm9SmB8hc2BoxA>8U^8Qm7j|vf0mrU7Zstj9Ze?0Qa13eY7c9U3@>eH2OcfQ`?lB3 z|FeC)Qn7to!|&%7?L=M%eT(Ebbx_OL_qq{01(rntw~B{K)Fx*3LT->mJAYKk`*Hoc zRIZr)c}kM~UrDfEBHIf3P3x-IeGK$RXqbRkl$;dZ-Wbjh_k6ES+sHdIFConVvj*AmO&(!IywHQg4I*cG9WNXz6fC^Il0a^I4e=Nztf zAFnaxxlz}Sh{VK1th6H?HcwXscQ~l)%5C}za7w~q-ys4`8Qm?kw*A?`l?uR1DXV@p zuLg&eUIreex!9Vr^HI@-r<8D~IX>zP9pXNX*FdvXGx4al?LZnB;%wTAdF}O4vNk?hDmZb)Oc$BlG0g$}4z15G-T71=ULnM$zZ4OnOunasdOt z?f0%duQjShM8QTPPJRUrOM2ACsDcZJ*Fnlh27F|2k+m1clI960PJn@21jk9)o_O5S zk;jf8KJjM)PsD`2L^0vsq(>U1_sxUxbu>>*9DiBLHGAMLID1?+#=T`Bu5G%x2q|hpcp~?qkg`=j zQ#R}Hs?Z{H3JyS9`NYt#M20W8}=5clZOMNDP)XTZdD2KLGA5>auBCr6f#Nk z;l<^&94=m6;7E9LP_HgLzR7y3imD|4S|~jf1zcG3f&Ad$VTOVLsEBMLn2gZ0X}f=b zI@b}o3$&1cwo+pxKI!yo(k5SCVg5#a)_jPHI|iNrY^Ey6G8{gwPYYK$1}$XBJ)4Rb@e)Ll4f%0d|#CVQ|~%A;^sR3jGGmMxMBPaFlS| zwa~9Sx7K35iohx~fO|uGOo2=F8Jn`uybK2*JYB#ng&e?B<%Q27(iu}S}*HH!zgeDoPK5c;d6<7ZxrD0!+K=9&s$#)Ose)f8o*lJG%hQyuOHN8QN84q>yt@?LT+W|GHU&N^j4s8!*n7ig?D*lsho3Gx+5uxI`rH;w0uM7{ zNFSPLAWmE}jyOp=2=Rv~-DpF3$H2DXc(4i7x8bV7zE;C5{pO-!KMVgKFA(jdOHpT= zjWgLMR0d5Ga?iPNBpZ-0lja4Yu%W|e9v#!zW*E-sYPSc6jX@h=3F!-1qTTQDwG^I~ z`kmU~jDrclCv)+c095TrmXA>C1}BeHoN4HNGfv*q{5-hUaQ!DM`=3bgf+sm|5K=G` zd1m!Gx(7Wi3ZnIxd^U|wq$CE)dj`PQ_5#(R&uyO@fL)*Hg%JS(kV>=+594Rf1b1F`-YvJ_}0f^7{7PqL#^rHvwo7?LVb4s`} zG*HJoKZ=|Tk#+>zbP4kYi;;7VOstzfEBhptt#Tg)W%L}&K!N;<&!@!&0Jc{h$>dr0Wg(loSH_@nx^wzgpugomB3ipZR{3voKV7ql?oq{rCO zuxRb7$Z)4gnD@U(#0BmO?~ht7n)p-~Wga$H8)RS7Ppx+>6`lBnMR8NbRz?6vB+mg- zg|r?qvRsqJF>+x7dN`mIVYLQC1re06k=}|joU^V%Q(xzq$Q`5yqXvYkj4vC9MC{y! zL5mC5Q*U3}D)fioB&URrj0vh>tv)0{l=y*|b#AfH$r!AV*+P*y4sDJqmZZU@q*XND z0kM{Gxhrr1Ki%*;Fht=Y(dN{-|0VlEGZGl_OsEgZWQn-E7La5vlF^d#=fEIxoS$h`)_HC^YN8h5!23!vCI3)MblN_{HfJAg3W?&S34Tp@^ZR$O??Mdyy{Q ze`}S8{3p5-P+BkHv$yd)>9`YPGCdi%B8=tLVuUM$adnrGmK%S_FY}Ih$%`(RP56Wt zRi*os+$vZ_(f!nifG99&b_)IJaL`sCRlGvazNS8rvO4h8w9B?%7|tDsfRz+uv=x;e z8iuv={yjG|%1lDd`#2!MN^oE6pOpN9942PYUm@!5l>BMdU7xvAR5jzwdCGMLFhmZ zx@SY(lTs)=(js~?25-yqt}pA#kBtH3v!u2e@&<}|{yf@m zKvEA1;UcvOYF|LsF6OPg*$Y;2{)tfv=AAx(505f-{VpeLThAO_9>UvZlbL}PT)a^S zcDtry;;%=GO5ap*R@k-pFrMND&I$HoVoMt3)?iqrM}lRj-kBE)f{IfH;D)TRaEbgv zea}L|VL8D3O4;+!rA!j+urB#R8WFPxw0BWl#rZu^McY4qqb9!(zvWsChK?3!CI8BE z#o>ADD8*es9iLLdb)=%jI^RyYJr^T4sjQ$E+;*b4qM@_c{)%or?f~wtrNN^K5fVT8 z75Qu-)BjkL41#rY_8$&3A;?OfNschrlo{x(4Tsw42@Jp;Lv7J>s(9>_T@jHGa8e2y z!^wbvEfJoFNUN=-?846T*PoZyhX@k#mwl-x??Y9Ls0NIF{&P5&$85kXD8=N)vU?2bc-p-Mx zO>5eM7IK=Z?Nr;O!`+B?gHX>|cNf|!@n>QV0O)OsI9rR@LDOt|(`%cr`os91VTMOI z2T7yJbK5I8m2l)JaR(tq?R$KrW{gyhlVOSE7$mO3P_O}Vrm$18itR5#;vB0+HCOO* zTRhj#tU^ie1y=bqWxFam;K>}P1qZ!sG-r7@rmCBPCy_EDaW2hJ6qSiSzDi9!`SLG9 z>re8KfYI<7;#!V?H#tcV4d$Lr;DH-IOxL;_fLsxZyGNIkJb#;tyXvxK=Wt!ozQOpQ zSG7AepFYRp0F2j@#+XH_`pt_yf9B3Q<5}Sb=~XoRjKQGk>^v*oSXbhCphy`l=B#BI z25(b5c@iGS@*~tqHVF5&71A5&?!o@v@3Tk!{hn z{XA&(fX@Ly;I?Yuda$w~WSCY7VLb)$PQJHh2f0W0mOoRm`mEsm8mKjRQ-bM$m@&^c z6lLW?P(o6W0h#rOVtBipM3m!_9suk6 zQ_QFM4MpT7m1}Oi7SDaH!i54LV%?gj*eS;u8$Xpa=qfe^s+L{1FHcBMdp~APKm|?M zVS%U{Gx9H(956PFQf5K3()T|@`VtUH> z1vtyyI(qynfjc0lw~wS-y4HbfLC2{b?zZ;x&k`*RGnCqW>T?`1>)v}%Q|qjS_jGaT z;GoEx+BBc`4}hckh<;e{uBK<`De`^Lb+dDD_aYO`Lg(#8HqY;~Y7MoQ%1Pa@=A7W} zaxVdJj*l>OvMj8XcD^vQey3g=b4p1uKtBf*#rT$G^3;XVQn#AAWjF3%1|m&EZvvzR zWX>6F5g(VpaQeH+9aIBw2?Al6oJPSgA3@K_^1VkOZ1P$^;Zj^YtXnN@y6ZwkSCoJT zLbrJB&1FX7(#lxChQ{d@oXDM%K-AN63LMMY*C1xT)M+6ylWy5QZ-#la`Q(c3Mzp|m z^=RwIO!fBp^_OA9PSFTLJK;08Bn{T%U@6Y7a=j<|Rwp;i6@#nBD{&@JTW zlar2`zi7qa&}U5DEN>wKNIhB8?T)uZU=R77Xd0n3Ey!-G^E)G2r)XiM3Oaex8nQ45 z#g9VpI!R;M<{0=~YrdV}^9ly&sQpypeuwrP1snGSSexQto-1xykn_}|6E|@z7G%6E z==}>Hf9>aKWiu~;0xAs^Z^yG?fkOv2Y}J+#f!d!X5++z#FgwT*iMwIe8Fpp&cpyd> zkXNzvZLj*h*CO4x2yBqKUE3DHLJnjiN?GI#q$yNThu?$sGu#fz=@llv4QOj91(fZO z$Mlu>Vqx;9g(=sK<7yF7ZGG|9g1ZX;kA&*HdGUEH1!RkjTFtxmUoV5UY<$$&)7!re zh}{Hx0B0Olx~JFX0vrF|&v|^IIzjU>3mruNSubT(t5%PPxP6jJhk&=HBlP7&3nhwQa^(I9ZE!p={0Amk^NNfdSi^q^|rD zljo~05Sp+J2+kzQ2ogqE0$)nX_n&~EH%RVX^i_w#H%`Xoe_fQ1o^WWe0W+IMXy+e9I4IEWuAR?LBp}|h$i#*a!$GvjFusFva`g?(d;?;72hlh~U zfB!z6V%&6)I;4_|fnv!A4#AUZiN1`2FDP~fw>y^Baa)C4&66t_%Zmd5;@pWqsR{2T zppJIvAd``R0LtG1Vi|yJ`iQN<2S+p!T6sf;(=OUTiH+Wd^JSiS;Ys+`0#`)Dbelx= zqXU)3Cp(FQd#8RbDG2aQYk-D$_%#2*QoCPqE5HLxVO|c%+33fNW2{ux`80Pj4Tk!! z7n}dIrC3?vZVH?Z<4L-=&F4Ff{ycB-Jf16!J5j&vs0TKwvn)L-XOF(&KOxkz$RPZy z^=p57+L^RIcr0Y3TdkC{BehJQg)?aD7-_P#Hq;@)-{lS^FN9_P>etpQV-R~YYp2Oy3hg07_~TZJxc?d@!t$)wK)a&B2tuob-cHCwIkB~ z_e}(Liae9pfM8;GB&SOw3lq(H2Ec^t4hX$>!gq%-B}5-40=WyUmYnW?d?BF!6Ae8! z*o|sC^Sl8w>-Ot++%jMQeh1*eeG{_w4vg3=5K(OpQUJ1m_6Xi_{YRG00q!vniUJzs z&@VzYoTO}Tq5$-c7T&=8licvmdRZ7BkZEz+9n9*_oM)A501+Xrkbw}({gLFtxT>QF z%Ro&=1*CoQm}7F#fl_uJg@9m(!i`%m6RO%DK9O(&l8H$W$Kl*c(L23l21dn@-cpyo zLg;VJvcN|Ua1@$~Gi55=V<8cyjcz3`K1;nouAV+7?!G+UO`9WO#G~b#=;Dd=41n}( z?DNcmzV6{*lm87s4{Sb)e3mZ?N!EM(?hvLB%nbXzdurCzHitsG>3{SQYU?6_C}HE* z%EzR-hXkyY2#0+Awq+v(L9!-$>6EBdV>=Zd1~zv)f!!vGC%C#&FO*sTF(C zZZ;pq{FBi9<8ca`9C%!PKZs?rP10F;(En0s66FOGkKE@uG8QxD{kT;I>^6Pkn7I~1 zDmj_Wa_^0uN(Hc^3Gl9b&w_^Q*2#OV^3bxBf;L_crtOOrgu&oTlYlm##0Tj=zfgr>nW|HqgU0V z%R(%4%Kwv{A|?7f`SA1!;B*5{hA7n?iFRP5&eOT=l(W^_F;lT|*>0F39JipqC3=ru zE<%AM+@gO!yAVNCh6UbU$a{@@qa;OAi~m}~xOtsllTPGo7;?%SA&>6b`qD^`HT|la zCR=`Pb5Mg4m!g`%BM_ZIX>E2sSFEK~AR5A#1VbG~T)((|@cNruW3!dpXXprjHyh8E zb>g0PSWN*x#02dSc8nhT@Zm$eBnRI!$!=6^DOjuxM$%=pL4#G|tQ+7{2cKx<7^8bA zqP2+pm5MvraCYS)gyt5$-~s0&yc%$5vc;P4C2Sc28*sxY0}c&f7%v^qZg>RM9SQ>I zRoDE%UO0aHab*~5MqYSM*pcy;jn69gPP*dz2FQU0Ou*U&D&5cUT;UJJ(81|3F3Z1L zZ1xQ1EmA;JvK5pkjnHJ2pAI|R$d;HGdng#Y8`?(>)`s4X{jizs!v~a+30)ym$OZKu zPe;ZH9_#47ZDsE!_Mv2d-Y2rcsg;WbV%!3(7V=?^*GN@}7T?O62_5_Cnc!?UC#Bz<`xht|ktio+!a7u9y(*}2haxmTOT*{0CuUY#uj&uOo+Rxi z(MQ>wDpK1XKFxzb5fZ0-6W$8QLyJvx4>1(@8M`icEIZLqSv5&z%gZGpc1UCl zcvia|))^Z>jrl#=zELSLq8Cn{3GeeVL zBiQkxVPtU9Db7C)rJRnMaOo4-*<@l-Vuh=m1(kwxOaoR;sIOSv=gNeZvfo8H@@=$o zLHfr)KL!amBrH+|jW55`&i0*}bb=Tp@Vb~+WjdPKNoi3^5e49YLjk$8%MNW4K<0)E zw}sACSx=l1%^z(KvU~7tADH;IGgd3jJ1}AIjK<}1!iUF0IU6jPrfQTDwm6e*fr#Zp z9&HD)u#CI~$ulX|`lOuC3&I(1VRigDq z#b73%vGJC_ChaIKd{Nqwvx;G2S|xb}w0ZQd9c=|z=XMlG4F|(jy26nS>~{cA4C5P5 za}GdZ@EBr`mEx|trP1x6mx)rZw|6zX2xpIqRT&>3fvBExy^=h*O%aU9i{((9cB@Jh z1UJX5z>RRJBS39SL~jE$`e8~<)nXn~2cNq+2L%06FgAGMmVV0-UAII~vRnBBm%0zt zBgp2T9bsE#zL37my~RHkt`t)&)#z}OP>h9#hFI4!_im+39^~l#`6!DO zD){vP>-)V%rKbFl+AG05xkVpMH08_EcQVmdSxBNUuhzJ9bur=@C6+Ib@)_&cK}Z{| zMm1u;R&UW)b7gvAH(bfTNK60f-u^WHYg+Xh?S%D)K%CkI6YXZ&#Et&(3>ec|LD&l% zqEj6T^}-fb6i%TXhWuh;Cm}PCwjUnzMwS-Mtdug1#?I!|d9Sl25I3EMJ%n{@v6ggG zH59=>dixGmNC`Vn2X4z{CCRVn^aP1(kfg&8;+JW_Trt;WhX4pLJ6#Tv{)zb9hYw_< za^)r_u-Q7VINbr4S=ChKL`y}4^}3y^N7<80 zX~}w>{hbpei%!~V7%f_Scq&pWX8kBNJR`X|b3Sy2^L;J+SrGVlj{u+9=4~PZt#FQX z0jOG*wDB)t1DG-(2Q4CVrKg6d%S28lG7vd9W+f>mZ8XO+yYJ8>_Kj*09(f3trFG9R z*6+Y-7rI3KLe{qG+2^3xK*$gUDTyC&9pA}fZg;OqflGA1_Gg*>ycJx}Zow@&>HNdL z4xRh%D{a&qSxvi=E*8V2=!Uizq&W{#o^MZ4UUfm*G0bt(X0&o9Jr5;yf#3ftiP>?* zraNE5hAF~A90d6i8U>Haw;g%>#z_h5hgcvxv`a}P@LefGXaiAjkdbY*JTHVgR!UZK z@MbAvTJgu)(a9%5!bdzB>WZ~-)8o#%QF}}PurACg%ZXg7T8NT^e$*LU{A3LS4iGsI zi_S%0ItN>iez*e>h?rpI)5aoOFM%sST1qnpQc}HCM@02z77WE<=4GMTfNjO}Q-XL# zYK|I~dW%(X9042)mjin1?BKoAhD`%&^H0E60x@#@f}{^4V;BV}Aoq=}PIhf7rXYMc z6gg2h$IpEMWz()mEbhNt4oc#><7yXywa#kyNqK@W5EcaYSb=-Jx%=@& zt@>%4NVwm5iosHV%kBS8=)Dg})OIXIk^8-dm;q>ai?vPP39i?3Uo)OD2^GXpkB?P= zpMyi%89Uf8J8|B{*h?On0S}}*eh`D!eNosc1^Ed%$&O3EPLq?_-5i)_+=#BM3g>YR z06=Dh&k}hGwaf)|q-7*Tc~Ch{lYVr-$%<6uMI!JEAPX6FYxHkLts8~pD_6G7_ zF;7?ImH`l249FGn=%;V-mN_(oHu1y+L(+Y63=*}VQPb%Rz=C!gATegTL=sy7t>S`uR*-KZ#y_yNwKcx{K%1u#R+W_1t-1YlXI7UJ!|m^@iE4<=g5ol(`FQWW z*X}zw{|pR8Q;%&6JkHZWEYkli?)6~?Wl#n0tt_em3qfv5^2dUStOPk_?`8C`%4$-7 zF8ZTKFK1bWBq9nA10rl!5fm`$)_mBJ-yD{VikKBb61xW)p+eaRh4@NQXWg|wvzC;} zK`&nR2TRAqa_jlng826B+eO1s7xzw|Oxtq6K2gpl-WF5@TlJgI{Qf|!m}BGSAFbB$ zlJQZ3JJ%hQ!>#fHfsGFKGR~^VWCcv7gbney^9A|!R=&7pCxkA_y16Jwo8}(;>syNJ zkwgZ6#Z76)3!_pPFfy(6$-LUZ_%s@!@gv_Jt!ZE$#cUt&tZmnVj*DO#n-R4U4O6q8 zu8JIK;AhYVO^~NZ8fC!DOqUU&z-_-SZR0v6{uP*bIQ&AAX|_25w|;3PnmD)B)rCPc z@1X>=GiDRoi|$g@6Sc~yxiUY6@@MU;7m?Hgin=wBLNOq8rxY|OS4hPM%rXOs*Ah8T z84F@}zq-8?=8-2S_D)yB(z3I;ir42xr6=tu&M-^*_$e2Ty)UH+A4rcC# z4l}-E^=|VLXMny$4MWJ|2dHbSk1NCE91plgjz)aPER9Iqq4)s;Zng7U&!W<8Uv;6` zOr_QXAe$#VBLFuau0I^dRPdPT0rXj0t|#hGBMD2LjPLo{f1!p}ip3=pxTPvYTeu{k z##;c>Q@{GY=}_)Y_$DOo^E{PIg?q{q{Du*`@(lQgqNn_L8y)LJsCL?=+k#+nJ7uN0 z%9cmgH962;QBnfz4rf%lfCV^2mLXg>#mFCW z>R-#WwWbeOZhGO|f}p!`+HmK4SI4ET0C@&~Ak@gSOYY3KMFI0~d@<1WNA;)ZM(6#K z1xxHA8@XtVn$@!-3lXT~Hoq#6-+IURaFq}3p25J!voP9sstls|L8Z5masuQVuzfr; z4$EUGdi!PUn056;z&U;Up&El*V?C#?g^!=b|1?`$J9NnL4yW0y2Yv-4OcGOQgQ$jb zUt`X5(th!HAi>JA1U%=7kl9N9sHh0Jw9t=Q8ka{VzgDjPkp-KsAZcWk12Q`MKRh)+ zQxtong+Z1w8lqt^zGcwQX@o5)s5)0@)g>D6-TwQnJHuTougSUy;a-B5Cmvtz@bcHE z$#07e^M9uL2|x4>Fehv9;viE=mmZ9&Z$wGVfp|8>ap8kD2kSWS%D4LTu#M@Z0S2C{ zsHd#aey0QBj@7aLk=mDVI$~?I#(H!1%$L}|SAoOA6!HIu4dyZfaVH(UgC*zNW@DKS`t6mpr>V~) zSm#^6{ACpu-}(q=T!0%$Z$ax;Q82NZFRhOW!butsAejx&y+y#e)Pl_@BW0tDEVhKb z=rIP2^bi`|=^P4H?FiC9FMxtjf$f6UUw5$`F5|J~lR3=SoZAjRwH{kS(uJbh{mFpW zAq%vCr3a8lrn%XHV@_8Jr)+x9cJF$}n_CWr0;?%sfyi#Wty7S7aq7?Z*K!Puls$ln zf+nT-dCvFNAPJ5PJh(9zmTlU65lN+NY+_pG2McfBTp}@I@yh@1-b|blstFjnikDBn zvju(YoL2ubm~Eu^`!&rV3dSpzoHuXY?u)6yTSnYfS6xVlh^q5--UVR1q*sW=!Xe0? zegilkd$JU&LY0Ec$wv}{{0Vo!*v<}cCZnNAs{s+QJNEV1Ucptr_m1wqCpFOG~ zmc}Fw^caro=zT|jP?p~J?rK>s`J6^JLaN|kO6lJnytN%aM%1-l8bu4^1fHdM@05#%$DMN?bt?De7F;xi6_f{a-zzF(VWyK+&=pVNgq~6u}iJ7 zydF|!wld|&x{!6+dTq3=4%M5o}yVn|Est>&yMJ zm6FLwJ6L%mbux-aAuM!NU@x)P-s9Za1DO}pth|{afa3A@0M{oE{EM(J!0rR z=!SP7??G-F&wMy(gb~;%xW0n1thX5w-^Nqx#F_vwSM&$}g2CbvL|tG&Ik756Xr!d5 z=n*`tq&9}d1uB*KpgJImR)Ru}VsnAYOy=;JXYJXAg;8)8abWUyUtaQm7A`}+*`ef` z(1e2~el&J?7_K*SaPts3u?04WFk`b1;j@dt#QvwDjkE~*3Pp4**p_j+VUwW+q33Dg zPuV;t1iqv{?D6u2hBao0^COtHC0GGdJFAfwsssRmYU3X@bB@#B=N&WQ zoy9VR)LA++>Oa(#vYk=bkEEhEbt(j zFNp?n@lefAvX}tPS#uwp*7oM%soJd8ggI=V(B`H!h&e;Y*$i>36&`5qR~41xnfE1o z^w@fkC;0|VEL6D5-MzaK9^e6^bYwoY8*SrY?TL;IH2ZDW%!Md5R_GFq&U-oQ@kOXz z@xNBMTK>%5171nt?KNh9D7Ikb<8%y#g%s2?bjoXyiqP*-K6S_EPD8%RHowbwnkTl| zATbT|Q2Vd`H z0(6uTHpK4m(Ea63Z`r%qZ@?C+4^gN{tPies`b5xET8vfZYXyp`e2xsA91jqLg1Sh6 z&RBl(^v_gnevb9|UMtXMw4gq;147Yuj_zWxkGS7z5dSph=U$Hcu??to6Ra{8o-`7q<1{mS(Qc7jeo=&dnX$2HR&y$du9q@3l*Av z7ARUN;HIoE+EB9J_87bs6b3>a_@V70Ob{b)ecQAbD1KI;^EdQG3s;FR(M%ofLn~0mLvr zNh8zvg@j7+Hw+`hC}8IO!L~bOK*79(g`TWm;sK4Z0o71vg>!SQ8qv!*{<>(-Y5hlP zsKVmKCn(ATd&?h26?F_8TI^Ht(OOlhUh+td+klMo4?N{2T?Lot|HRrc$$FG1HQ2V9 z_-c}SMk_TU)Z?t9^%rZk9a%rgxnV07*3rxjS1$u0>Ar=i~cU0avyi#FaqVUM>z7&**ga<$3F@no&&1T9B6hK=$&u=ZLbO05Q6eB zb$$Fr*4>;rObt5TkeOuhEkL9gZF}*!Y;V~Yt+MJuQHobaEjpn!cn3TKFLYQLhJ8Q{ zkuWQqP1?n{@ljfaU?VK&i@zb1c6RtcYo%Sa0}?6P-ib>VD9L+Xb2t|@*tww-x`k4OM|c#gI?CEK}DZ$gJ+FU-o=g1>)}B9nKj>0B!KsSELyAE zqM3L&lPzxNdamlnlT3C*dhx-Lc-Dia%^_`>-n4MeDTyHFq~WU$_)ZXe@wu4`6# zy|pK|g>tr|JTgh(FQw__J>KdzAJsVi%J%IZC-0yQrWS)jZro#cY{bU-|FzJvtJq`w zvZNiZrKQ{3r}+zsmgoK!BdSs|=;7uZ_txdkRnt4m5Q0GIN?MN>87hCDaT(`)63%pr z6hL6)qGy&1;C=qb{d{rFmlQ3owWs!$RlJKrAMei%loxjuafgt>XzU`3JpXFrI=3&xF?78>vXhG{BKWKTGLf z%MZNO0{;VUK|ggzI9Exo55?Y^`HUTWD06q)Jw^EYgjK9*?e45{rDmWKS~WP5=1aVOrmZs-o+dIXEt;P_tksW?lMTUl4o01YVp~Nv@tL3Zft}c zsXi`w=1BlpKN14)Q9mk)y+H6O`s^0C*eUfe5%-}MvmjBaT@BwnDaD|Lv#N{c;c_Z7 zBC=l;eNBTm8rH#7kV}#kKxd%uDT^b9xzf7X?{?}qQH0|EI{wH})Zaal!OmI!tvUN`tCocE zOKidu^BHwS!Qt+jt*7S!ZqXgTQd+H{7XZf0Nd-((^YhJ8F3g@>h{su1b9CwfFJ$*N z(A04$RMS@Kub2(wB*HcnS`7jP#DS8o2&GWGv~UB>CJGj0&XoIKF_8cJLg=4}B>wMINd1(KHAa$BOf_h| zr?G$(o&hq-AoW6#unZ-xLj`aj7n*;cW}=;dE8f=j%6jgL>h<*uXJHIed?vd)F!p0@ zGPV3*93!LdkNJ>rLW}d`!n;*aL43Dxqb$IGH59q9H&Z6bC?ar`;l0sPn&zbEDIV3FPQoKIg}=&x?jEF|{n( zcJmmXlOwny#kiRLFNci5QPSstfFr>JBJT70y&7@6BY~&m)$Kz4Ft!z}@L|CLw}I(< zTN#4q0{#(-ANtbX+zp<>0>YDm<~5*o8>H)tdSM{xE4wn`Z}?vVcis{u%P45U(529=f>^}x0oU*y zwpb;Bl6T{|q`c~IBxZ;{lv-5W9ah^9*bSmG=0f2%t!4k1mv(!*+*qgWRmm-L*L? z5QTuzD}phXB5*TWqbeO?;v_>4ZcEBIKgHxat0G#RALAhkYu__T`8#tX z4EImZhH&% zGBTvnY;US|>Xq{WmF;d&4WGdT&M7rhq9xA-C?EV3dcZ-hnlLg1k8;mc$s&D757=sI zyumI^owJ0iprHy%L;Q=R0W+|zl*DkA7O41!ElOG_i(0#egohiBf(!a`t8sJ19`0=n z;B9m3ljqolpWImRAdcE2pz@u(mfLm1!3$IeiI9y63%C;JMGqQS%<@Ql-T@K%4uq*t2vM zUZI&=6DLtS!AB~+k@UP4efDz^d8~y^IFZNVgEG+x!0Dlyuy}^tU8trJj*CqvUxQ@# z2sU0wPg~J?V7WT3cvwoZx!YmCCI%!OA>xGVc6rCj2Ck3H7;1INb>5~dD5e`g`Sm|DCqFE4G z0f(R=Hf%$3fC-K1QeVW?8a@Y1gsJPRVsv%}h`It$hTkBf-=3=;iJAMZfbb3fdq$r+RDU?7A*%jL|sHi#LF`mf)q_UYuiCqK!$+ zj}%4PKZjV2GN*g>JGOiYXnlNjlT2f$dQ(NqC{W+9hzKqY)nh6Go4egn^~khJ6#GQf z*p0JtX*Fw5S+`~F)y3#b*}^2xA1+^*@QuNB)HF={oqx)_!(75&R6mJ`8R>j^iw7r` zvKsgaSoNL1ZzipD$1;Qxtm}d+;>hzq>tfSD%Hg*5ja0E=OVTwFQ&c^JA@UL;Cvcc1 zr|h;-m=D5O9fVlp=aC*6h>m3=f)=&g9@NLP3X_Mp*111by)@$nA`DFrW=fFm&QyHO2Fg7^ zBL9mL>3&kb@UY5UtI=zZX85^C_f#@E;OV23FcN+j2WAS=!HvpQvJ!Zl3J&zGy9>ANSr_n~RGKg$ahan! zZ^d%GJ)bVJjVAy1{JzKeb;GuI)nv+ZwC~oh{PcT zt2{Uq*M4wig~y|J2S{Pe0#WmDUPDhQ(j|J3h*`?ETiKQM7a@)^TG;n3ueVLJ8<#)v z=e5g@;$FmftZY!(L=iyvnxP8$6()#D@X^KL)3-Gj8zfM#bF+;U2-< z_lEZFfP>3O1t5!289$vTz;^KuaBq=t)0*|KUse`sGoNe-$8;8Lw!aA($dl-Y$|5o zfgc2W2GFUbv#;#I|C6ihL(*YzR64KJrWy6GB;SN9y5j)3BOX8>Acd&50 zc?=%_c3o(t4mnG2={gyt^3#6#K-!J0PdKF+=p^k9{>hb`#Z5aS$=ME}hCBL|Jq`JJ zXbG~1MZ@|3M(H5aAw*^m-bFUAe0qindJfhTn)W~r6hqd07qjE;eWk4&mAzs}0-{J~ zDvHpy&8us3fZYk~Jc2nXay19Go<1Mt-xbkr9Cg1TV9oj&!>XQz#l->5Y@xSvl!Y1< z5MU^=#eJ6tt2aiw#i5mUO4G47fOefKAwvxjM;EUs;Bf!2WvnRyF(ya!-qBhLKQCK$J~Q1=MQ?l`*+18pXf=AZ zXg3;Y?6E&Jx3i^d;1CkbZ%S(j9p`(yob*R5H5*>&DJ zooR4Lj>q{mHjjlY{Ep94jQve)Eaj2Hqpn zdvE#NUD{K3X=|y6vHbg+c`IMs$b#2x2P$A@-b-6bpn^FJmGD7de&41VC5RkiT(J|> z9rcU|^}kJog@UkQm-G_o$=IRvA$SXO5G5JG*Meu#37r(Fb1?0~&&a*4ex7AlG9DFb zF7P9*q9GVJ>RkfLU>pe;)jgq+*8iTEI;#!B;z0vu< z19}oWv}Mm+F zVVf4fGcru@MJ;*}cL^E3tmZ%__BQ0d`jlVQ&U5Hw{@_bH7U~_UvvglC;9+~<$05sO zAxpopvS#Li>fMCF+FI0dgwcwcMuH7P#iceAJ;EGVdQqD=MR$xaEVQ4+qJlgiO(F;* zT60gN6y?sym(9M`-+X#ai~sBg{>P39s5Xts>#teUSDY;G118=9I#KFB+5LXy;Ify1 zjdK#d1q;OG&1+buqJD|)a-_IMC0RDOWR7jHF+Y_dWe9c^d(eVu;B@LWz6ziOpfJMMeQ_v-x!*-U2olyy(LNE3WKjGv0CVX^2LX)ALG0 z!&&Pm*g{0)VkvBMp44kuCRFDIUE@Bs~`D6jn=KpK;SCd=^s886tojM6zHoSe2OW&SQ7N4R6vhDX~eZG zKq!@=Wy)9toMf~?h>jJBO~|)gZ}aH&Zv)_r-eY*%K4cda0JR+$1WvAnMX^^Cu)}m) zW$9nH5*9u?6WFYOgZH$c$GtjDB`m7l&hG+QUkY!LiToBnp@OAB*=~ljiVSCQoME5x z-ERud*WQ?L5!zlKEEYxbROKcEja|>%Ax^xao5OiaAHAmv$z#u(I8A~7$7nF)Q*QZZ z4Wv5kdfNJ|y?<9^A|TQySd8%6yI6 zIQ>jO;icHK%?4g_e?%;i7Vw$FU(}5{S8+i#6n11bKg)fC2XXDX;UOn(=26w^#j4?q zF>jwoE2m<(U-94XoG}2%KH%@$CD2c=hXfg+!8%vscAcq<%0#jb9t2NpVL=Qo1=Bu; z2tdNYZ|0?V+bSec(Edtt+CMB<_1%{)$~J%gX$6~YlFTLZ1d8W7THis*uR?m6`BKFf z*{|})tWV`2*5N(49&9r>6=uIf*D`fZ1f&d5lmom8`}4h0psX)N(ZftIP0iVImu!;b z#ScnrE4JG+0&zmIl0fj;8IG;|9dP~ADqj$0=2%ROc}h+;Kv*WMc6`5>4W0!I1XJ!V zJRG5i@B0L-Z&r*L8{4D@e_y)*N8?HyNX-kc_>J(r>)znCu&Q4NXou!=^u~id4g1IH zv6vC#7zW%amsi<<)x@Z_?YFGo!`*n|G9->IE+Ql;LcHA1%DOMJ19Yd2ht>i^KU>jbHBHpI?)-@fY|P zs^Ep94o;`f^^D{ken~kx4w|x)A87k##4o}#o5AwH&&e?7S>_*}SNE?C_Afcy{JW+9 zZ0qsvhJrW($>9lKl!;X1_wyg>7(vSFp*b@0G+EkNMoLQXrOUYkKc0o!x;w54I5O;M z%=49w1-&J68TeJ@CO+7r0_aVpI@*RAtqa&o-Rsj57MN)%NLK5RQCGnhzFRi^A7|=) zyL7g6t2985Cuw)Z0f8~Q0T2x2QDWusBKN%4ddG`c>yhn3n?S7UIQc+e1K$%1EnVJ( z8rcTkyL!Ikm3b0qzbRfU7q~Jde89CVC0S=QGcx=h4SvwPPcrqZj;;W3qxRPJ(Aww6 zb}P3#F=$~O3s!6V^;~Udu!Znk8LH9~3L1UI)6H*C2U>v>Qw|qEB2r6Q9QX1!#gK}B zw1E(baAGo(rK6D!+H<<|@CYG8fj`*P$aVow@liAJB9Tp@rK=G*a*OXh?j2twbKn66 zI0{kpIyB*X-~%M|2^(n5@o58%MZYwRE>#_pm4M(!oW*+0oU~d6C;&T~ex|TYug<5D zXOs}~*5!=Lw{reZUB_>%&H%HAl#_5kzLis|aqXmAtfg87yNT^suA=RdHn$q&aUhT{ znqYVkIM8$yAnqV#8$XA1+{+95Dy=qTrd&B<43QziJej_%>`5@4BD8Vd`1W9x6H}a| zh3CHi{}}rcsGjq$|ENd}p_CFOw2)|{&`MM)p`w(e6rt5pQ7W>Ok`~$*+O$zgN}Eth zn-D3|jtWVojsNR5!%uVOJm-JTGrwoXnAG?4{@nMyukH2(yjoxHW`f^CuL~lHvD1PD zH{+h>Vo!uZYqD*4J?{(3h8DUG4~0$D7kIxoY?7n<0{^EoBX)nHWt`*ICNEAIpJ|W0 zOPC}oMiTJ2NzuQIEq=zdm3tqamr^*VQu%|g2jL71AeAM6$2|GQm$jv13Pl)PI2p;c zYGUWveHfNLT4Hcyin#TD@s-~Zp0?{}RoaUC;iZ$XJ>eYwgtnxP?p{Zvi2miLAUuq# zq7%&>J~5$0;YY4%!A-9-&9#0XZ$km?4I8R7^t~U##YGDbaIqnBK3=s2(9K4`3hOhB zfhOP;u!Tl116p9*IcQfgy%D?s(d))=?Sy_4QWMdFdFolpRt1?;QVN0G+7>+l;p8Ah zA9UEIFwt9W=s-B_$HWlc4rXO}?cuB4s;a6_k*tgoNP*b2hN6U1Ah8*ds{Vd{Meu=O zKVTgCIru@{Xdps13B79wUx2=m+$$7HYFWKNKwhzfHPYfFUQ-?%yR4%>>&vv(2R&&Q z!~w?LAO%_BoNZr~4McW-8=hI9c?Ajr8;W`bEcpcVWNK6cck39zM>})umtg*coCG2M z7vCb?mNuDwDZ97v7*d-DCWSscfh$ZRka<$O(&Rt>CSB@iC}7w=5}V+lYVO!|RF;pH zXL%OLhMO)(Mtri7OaBY11T+pIsUrYQObar#mdQ47-;s*)<{^w9ia^lHb`&^^?n`B4 z9-T`6c~(W-Ix`k4{dvN^(+OLG6LtwFtaJduUhK5s3sZMebI6+A(4~F=WLDGTtpPts zL#JsT`gY*mDB?S>Mcs^vAD;=L{}~Sar4UmC7zLOZpfn73xZ^1)9*{Ijrj|Z^RA98R zm~em1Va>`M^g_z$>DM~uT*8MIMQv9#9?3cs5vO}2*@ z&ObJIVs%+o;F+M^Kv4Ype6?^K-SmLcmwqW)*y%5?dZIb*!9m+x{j;?*F+xYbpx-= z>Z+@x#oM0i?Blwgf@K8M1>q{5Ug^uqSgz6=^YYvm^Nf7F+Z!*=y$?wwg63GAcjIez zjm60Q06&0XxGv3@jY**qYi4KRV~PVe3z(x@s25Yh!Cf*JR)7BEApJgCWsi*xqd~bb z3KYA}Ri}8@LhOKzj=QVDl25Sh&Ig#2+R17>3`|cn*_4a5Lv2Vw=H!-PF|v zL!VFmB+|3K@Kj;{+5y*KI2*pAa#`+jIQuQ!c$G4KJbit~6 z=(-&ZK^r!BWq#?A<(hSQ@;=t74ETN5k^fHl1WJ2qFFt5yS#yErLrQo*8g$R&?E*JE z4guRavH(Fb0#+eYPX$Vo+p>*3Fhv1s+zE75N#}l9XWvs8S1E2>;Je0&NA3rF-M*=t zqW}XQ+~vbDQ+9~?E&%rzE4OZ_kdS>gC@^;SEuM{LltKi4nf+2J$B{U#?eC?1BrJkl zuRc*lA}a_*bY{CQnMQV5EICzV6`T7pDcKRKexTJ5F?Kj(17;EsadHv8uEUCOWVINy z%)6nYY9JyDUN6D3!e*C|7Ac zh>Iv;Dr0-*W|C?sgU(|2&1@y!FYYI0-o3=im-1UVr$z)<4on%vy|SxeYvaeZB|i4l zoBrLN-rruWQFg6h&NSSoOK_j2Hy}hBL=MyBqsEzAZsvW}z%?^)tuarm{*oQoa*~Hp zgX5N^7Ry8FR)`}u13D1;&}GWMy2WI?&}dQKA#y8&i{~@al8G)$s}cq@@FdTv?-kBK zO|fQt2OkzBgFoGZUMAJ4&H&4E0#}$s!j?A^;iD0&LsgS>Pl!A6a$~gGup#y?Yg3xp ztXZPA;}e7+XNjO43IXACXr?B)vyW_vSfh$^o84&72JV;Au-5^LHslkZr=1W$u9_2r z)&eetp~bU`AV9eIvMY;shgVoQuW-eS(=H19xSh=~8Yt}-o}q3r(AkJ2{9uShW*P-P z{&*bgby#Wj9=@n5eY@$xX4g}Psuf}?%!{!+REt$w5%yNPj5Nbm`QS!<*Xt;$^TubYQIwejZ*sGa z-Wz>>RkEWKO%Qo=X#gOf6;=hUL@+gyVU$!y(@w>7S*=#y%0 zIAhCX-mwAe`gN~uyM@8xYod*+iaJ2A>k`V{fY_idML&vG%(?+FS^{6fmuXi)|8Kq! zs$ysc2ys=Cc^^IwY5m5w+ES1R*$8%gUH|j#v^*Gd?j*UvwBT0q9ul z!S#4mFh?5TJPNT8aP*iF%WY+y->)C96;^_BwfD_N?+47mt#`r19Q5!t9e4qz{mhg0 za);~crsZjN#o$_|bD+s$4s<+DzMz!^rLjai4S0!gI-P2W?)c*hP=NPPLY(?*iTj6+{yC%=cq06p2BUzq({u=4T0WrB}cc~Bx3O^@^SR& zngQ!eREeC@6Yzd0hlu2eHIJ|#!eWkY#kmg5GfKl{hPe6@vEDJ7{YmCt?QRM;7O+*7 zfAHbRHmpq+tiO|2{z{e@Vi`@&^ou^agv3D_iylhqK<#4MQX$ZJb}95Y0Qz$f%Z2P@ zcG_l37Du;l9-jF(%HJC8OM84kKOgmkftJKlyF?!cpq36t=J$;mHm87dSA8tdShg>w z?P=F~M|)X5TK#~wI}8l?&K)Z5MXu6v^lie$Oa2D(N>KB}S_e6DdXlQjUNXz{W4>wocy!nukv-) z*AT*SNyKpp89y$5etry0D~hp%@CGIZ&~~>5?CkcP^2K{12ig-8UXZ8JMC7e3jHJJ~ zW8C;gHUXH%P(y7x#V7a0Ega*MS+gdTXg|FA9UpqN6euoeG^FWsweBL@93wt?=RmZQ z_#u4P$?v{f`|Fszsdrfe;?IBjvscJ`8b>b1&;XSI&Fbp6FkC$29kdg180m+cjU1ZB z=e3t<%(oUm5$WMAp8NQoIWJEV|&GfH`GGDL8f>kkBa`UB5we$agmxm+Pg51Y=ynOnJG znj6d}Ux_lrWU{jm4v-3*W<6l7FC5f8`c_$^qP9gx= z8K?l^SL%B89ro4z518y85oi=;EBA;I6Jd+ydo`$hcRN?0XLs-zW!D1@IIML>hRscR z*!Aq_az6^`rzAgcE#Ma}9p&%Xt1>X{nuVzZOl8pT0A-j10T-=vMT|jd()N$Ck@7y% zcsW>Az}bW=NIg!P)U>X%I(|}9v#oi98#|Nn-M>8MeIbMj-&^0eVE>Rm+Mplk&D*wP z=v$07C<;SgzllS-^T?U~ct!D+VWsoQt=4JL7PSbLlxCe7L) z@GPBp5$>S=3r#pajwYxGtp=KKvOtWR^070sf_GU9TL+IYIfAf(?Q!kQ;GNqj&3s<( z;)522{Me@Oz9}9n5fSN%RH$D44j<8!oHy1on&$TlR&-Y<=*XYVzg%wq1QCo#N=9N2 zZ6k!@BOXUa?rnlg3O!fK$wCW9;SV%a7ycObhO2BQH}}*cxz6zQ=(0$1jGyKj7zE`< zJel*GH4W4f#LVejm3E)Q*LA?cp)Qw@5Rvgcz0fj2IGfT0!r%ey4u*R}gl$cX`QNuk_exWDyNw=ol`@u(g=jAv0I+-U6DYP;c3)&Z7)?u@%u$uI(ya zIVJ-{>>a6N(eVSL7EK)J9vmSF1whqFfu4;uzsgYr*LjXvm;I>C*?n_0#(w;Vq`H!xy#p9WcQ)W{ZL@+wHxI^&05i zXeOk;EE~mHq^Ed}Y*5^%Y#_^#uW%*-eGwI>gN4DJ`*CT`m9qE?CyFIEU!LDD@-l6v zBo3c=B1}A2(8li?v{bYPYh{XU!wjBlF1$qA0-g&+DQLZ|bpJKD{E4f}m3@4L36riD zSlpqg)%Gp4`DyeX13^1sH)v_x`yHOOSwzhY)vK%?ttilf|CZQ0lKX))P(4~a`I>|> z>uKIE%>N^APW!9KFT;bL-k*Agoob8ydNYq#Id48i51JoC3T7MbGJJtlkyXpLBWbr^ zBSGr>WFbQ_$-#*T7VBGD8G7&7VV$%Jr!la*mi;Pi36^kY+hLnIx(n>uAT%2XA7I84 z;EEK$fdzQCtQwQ#Ixm$FB!-EnO4`HwyxQY>;g?j;QKStkr&~-$I17v6Vrx1n@Uqi- z4yAV+7B3r^lZ}NE^J!QS_|kk=)RvwPZteF>LYLtGoWT?-QqilarA0DTrH+6%#d~~P zNqp&YQ{>L|#E_HoIy4?Q<+G#OqR!I-B~q=q^`8`D$vF@!Io$|$IZ_qrLNVKPDav?Z zN!4gY0%F)@5>~Rq4Ppx>mpwuA0|H=HAkiw@*>4K0CG( zy(&_YjxoKdt5V3}s8!?!)*T3{wxMT6uA;+QhoBvq`@>zDWwRS1M3>>crwkMd?0etD zUxkjKGSlpPk;nx z^FnQhtnarUH@O$t(egp}(d9Fye&`GYDqL5n<??A{t5QU2K_jNBvDdBuGI_z8KKo3^$o+4>{%O215Py z__WDhSWh915QzsVT4etV1&8tt2gxItfZq`{YTr%o-3gIG0xQ8to$pPL_SP!p`dX4k_Y=qxR)jmU~0rz`D6(r#O5y zxFYPIX|0}9w(19zC6rBrxq-0;3HZ^6S5$WU=%TYi!#tnFs0-Jh)j>aEG^Y>0PHs0k z@i=q%Y{;!bS~ZN5sTb^q>Jx?z$4WK`S;mX0nLg`Uj|p}se7bP0-OXZH5xd3jZu^y8 zDGlvM3-yz9FQBzX+dw-X?Ck7d^aYDv<7g_>JDFrNfM7Yx?R2&O`^@YDceV&^7wT-I zb$0)dUqep^!~w+UVUz@;x4!E*`5LfriePeT-myQFCCMc;C1lN_*wv5jAiIS=100?0 zpvSrf5MN}DEFxmS_2<`O=|tZUkn(T^mr#;Xg8P_iJV)Pti!U1k!xC)mu)fy{oIg$!sEwWB4l`yZ zayNjeiu21BLWFq(z*P6FTVKf5BweV&dgT|`_z$kPvUHKS_zbn12ZCG8tFMGCi`WrA zr5BRV$se3#yuXC0)m zh$5G|Qm8q7R+wyw$s#c`HZY>hTSC=c*J3*m&%V9lE-}LxYQ5gwTwY%>%@o5VP}sk6 z%Tq-~Aiy9&xqxkZLov}%Sgw_F3e|Sap|mtho7j)YzW_afl9LYaA|-~x(!?uWwlrbn z*6MBOU!e|yQ3N~+#b&I>638~2sHDqFzXdXjvO2qn- z3`0#fp^hT!7DS%g0n(#jY9~m&&yU7pTIhfx*1qIjd&D!KX4`-0Hx?IyC|D1S)BPJ? zh-PrfD}*?O0k4b#>FZBYJ`gfgNszDj1Pra5?K`)A7(SZT6yPH(-v`iiE;8fbdFr5d z5V}AFe;D-FJ`ulnaXdZ}L7E|V9y9`bG19rS#FCdIR%G8xH}?BJ_l8mpEMK_UDBx#* z$7ofwLZ#dt7vcC_ICJSogUi>hjn^O#&N)8Q_683gr^_F-(k*cz)23?b=L>(1*<3zk z0#pYm;v5EAwt%)`4Ww*DK|ZDD9&4w-(kclElv`+^J;bp$DIWQw+Yx=y?!jxDriSn1MZWlwe!|# zKE+b_sWN7GRq^TovReC{XbDW+@OIt)MDqtYBZvv&I9w7L{xXSLRcAx zgUFyPoaI``k9!tmEHKZ^G$oz64NRnaMe;5q6Yp{`#+3*`Rw zX|;ty_UY{d+XWmYBoM}qMUSn$w+_`y;!8gu^t-_2UeMZ+7Tb>Ckug^cIS{9|BT;=H ztm!zYvspApr41y|qOJ#AegIyInmd4^;0>VG7{BCsYlnsc9(%%+(Id@o-EQ*5vJLXN zjZZ@!vpyN<%%~0e#+QgX5{f#)jXLsNT8M?|ZL9H-r)c^&?Br&W=$oZ!?1bA16jVyh zohKK~u-1-JGW2H<{)*nj0j2{+ch#2efD;0?f@ZESM;pahEu(JJj!2kT z;qMZZl&lT?OVG(|pdL$Z828M%%#ghEDEK?5=d>YfmZ=xg9d#}2|4y6I;XbX~>lPo3 zeC9Ri!slch7O?{0O-p-nj~>J^(Y!Ue>XlAV@6s?dO{`Y29^b}{p!XraFyuEjE-SL1 ztLe$2^k|^;7?u4>?j_7) z8q^7uRvd_-L`S-yb`9>c2Llc~K9^yu_;*FTqxaG& zEG+Vl_Vxswjc*^mHF$_9E=$1X^VrvW^6{(W%}V}42bikf&On&~m>IVDNHYDamPm|1 zis}iReI?ZT6txQFD|GDgXwqXbo?%ypC)9xye?)ur1Z)>Lt?IXQ;Qu?-Iz)_e1#pA1 z2)y(TYN!@QQiWN;=-16%gFdtz2R~uQiF-?=eCWDF*$MyfZ0W37a?1wdFTeux6)oyl z>`$hx(wJSNT0g($c0Sm6SAsH|VV<&tIa1$2O`pHGf!jlB&{QZ;X;@tkXNMFs*vql? z^u|eKyr5hwP@XUrYkChH{u?rX27Mb|Nsf%3m6fu6oz=8iLRt7#{MKu5$Lcex9yp5i zK49D3i~8=nX03mw!~6w*;&v#Lx>!R8HYQ!%Q7=8X9eKp$YrqIJjY+Y(`r+>Lx{LJQ zi?VHd{%u(2Ykiah^7Q7yu7aIzBx5qH9-;s@Xw0wT5}8by%UIIK`fbPxT@RrH@4fN| zrZthQn>I3FiEd5xlAr&@dW*7j%%x;;FvXIfENAd5T!id+jc>=DfhIdyKK;Y9Y7cIG zX?%P>h4_3gRs3-|!K}OA*)GS33MCw*བྷZp2TNLl!rE$%eQZ(KFi)tk{pQDQ8m zu0dMG>;?pIGUWb@5Z5aKo4tWOx>)Ej&8n#S^ELOJ!$mfYchiG>q~!Irr+>Fd(P4C5 z*{%e_d0Y@KlKMPT@R2*J7G~~keET(LZdbI%;~S4n>=eX3LOmL^M-{;2HDR&HZLG71|#$TXZ1zVEnLPnIuels>q>7_i>^wDW$l z9(_I>l=8bf@9tjlNK8=BbXn`1ej&}^A`pB$=mqej2*}9|T^ux3>;;KDg+ixjbDX%mGCA2gG+E$m(q-1k)s>Kj zpFxJd55OMWgnqaQ0pBeN!#bQu2XdZcw9HaqVT2czkwkFF^CeXj`?c;t>has>TA8c{P!d+he~Wx+*=ON%0Z z305=|kkxI-fOLfty1IJDTnlBYzEV0iQ?`X548*jW2hxR2#xC1wsdxzpmiAw36Y4wzR8_YR_5S(ygQ z&CN~ttSOdn&)eH_33(sl!|q}TtF}7vN7+2!E{nP7DdK8Tn`%iIhC5nUhtt{sy? z6(rX@Z(tK5fDRc$ezahU} zVN01z$dt*C642LCST1aPy6?2u21?3dV<}$h6>=?NC|5l|$Y9mcz>PTiW{6)fLU)p5 zmFV$pnl<7ZtJ5!A!38s99V>~qcIRE&g$Q9_551MJF{ws}D>m_ce@vUyNGnuK`_RUq zt6^|gf90oeVG#AQ1=D5S=X&AI+|p7P29f!bF4X&4(JymLON5+5yihOK%>PjQE3^7P z`*7Qy7C93xfFGE7kIO1f9-ag9*Pnaf4jVegM-&A3laRExh!*)N6>xd4EJWm%<#JX_ z$8`$`&&N*dQ+G~T-H1ah-t_NQMOza1M5dnot;toEZBxQo3VfcuZ4?*twO9#d0iByz z#!9LvEF;?Wd12*XKw&g05C4yJ(oo9PrQ0# zrH;B!pSKD_`3mw$0t`)Y2}vI~ST#~%CsXQ-F$OE+!Z6}y;sTC#J)38J=}CJFVv}IH zoBHBbrPDjK9(3GHPWGzS3`*50!7Wz>vHjy{<iYnU=l>0UkSkEfWU+mxN>G(f5UHs_3X7{jAF@u)1j) zx{3kSxpJ3HJ~LBV?m^b$L}bGBddpRqk6gecv)?gYk185~5QSE~RVNVd-Bx&+^&QO_ z_;V@Isje3j6u5a7+{c`_kJkVvoIP*eW_Vc0apW5{_TVP8hynioK$;$YIFe!0q=3Z) z{NLOSl)d2m2P0N3jrJ}L-8um2Wb70FPZr+8+{W)9)(C-5F~@sKSB`FyZdp<<+A#X4 zXgL>!B~gEe8d(Gj8&m8{m#=*}e%KfNd64Q?a=du~yDeZppiitR@p`<#qHO*{1gK8Z&k0Fc;*&onE1s)M~IJVO2mVP>hy|q>C$JT6>rz#UaZHp0q4wLV0@`h zaC$3*ObQ?`6!?sXufP1^p_@a_=(O?x8xZ6PBpAqm8}y|}&4bJVLr4jPkkxppS!aGb z9&$Jz^=PP5@#g`TEv%8hz;RzbSC=FV4^Dp1T5QkeT-%KYMs8koebq0`>chquAOu%~ zQVhsHq?i{-TA7$A0xBdtb2x1AApY(6qNLHGKFHUP=v=N)P)@=cZ2(*H zM@%Uqq?B;RN;&O_?ee2qG3*1jXT_u~!18o|=YCXcXB<>e?2$e&aQXQn=*t3F4vlz73_ zbUFUXQ4(E8)Cmj|NH3i@~xrSB+a{&Sn zgRvW&j2YhCE2gjv-{bTBwoLN)`xbp&^XB;caeO`mNHzgE_LBq@<0@=6fXuRQ7}Sz z3AKA6xPGFRM1wbz9RjOL2-Z`a$L(Ah#HCHv>S*d{aW_~l2pFBZFc|VFycvD?YLf=s z*OIh|wd>@{);l4gYJe7jrjaj8Gv;Q0)F;D)&l$kP6Z08#-~d4?3 zlle&Oe0k9(fB0iRNPvhc$EFZ9U;_wJUK*^J!Bn0UQ~vmq++N-rzvc~2&A$&KYz0N= zJn^RA&e{{3mce6K4iqV0A@C4*Epxb@NlT3_uTlk_lKi?aP(5)`dyX>3rwH|ivUf;d zSS|e@1BU09Amg5zlZ65U`?LqQ&K$J4(+@tBGQN?B$KXC?k6M(#FlDTvmS;Mr*v3dK z%uAePGW~lr?njV2zqs7yNXGsYwXNHlG!=m|GFt&WWPHUg|1Dn!20OalI?+qTKpa(f<}Zhe z%9sCa+eAFjOXo|Wc6kAfTmJ(?E#6ep8szV<0zDh$Vu6_52Pf?)kib@0{*|H?0=B1i zYT6HXCkSvwN%dDFi>AU58OA1{3jpL?f7JX{$xOu&Ln_2B6h8{wkK=Z*my7>VEX$-_ zNfgPl`pppzL^*l(j|}8W?2#XY^#NzV4Dc=Zfmp9>yo9Ij*)|64{bLd-3l0kgV`dr-x(0Vq)xP)vBm4Lf!mnVX3oEAS} zYYf&gTD zZZ<%N^75R24^9TT7Ey^VfyJ`SwVK70=0}7xMP8pYd9#@I2=2@x2-Tcel@jn5fk^u`p(Kn#1Xd4DCkVL+CFnVnamqg{E#3rEV zJx|?rl@_I-#_oN2(W1H?*zH`53%x=Ombd1^zu&Fkq!j z74CZakjM}Ibu6L`kx632i2~;_t`YppLNjAa(L^R#+<0i>RMmbdpr(#1pzH&i z9yNFG*b!9w$h9^H^bp~&0(b2uPZmxT;zwdS41?ZKxjloz1*;rM7h&}dYzzYkf3gmc zg9wDkif1lZJPaO*0C!rgg`gv`*uuQ;n>TMJl^!-f2SE}ER{x+(=&MVBK?qJlV<&m! zy$;1h#)W9G0=HFF23@d8JHT;La`V&H-kP}0+_VW=aM2=CQaTwytGn+b%0e4vs~Poo z&anN*yczRNA}bOqFytqaS79d>?&8hJv^L|?S@|vwA%4ADnYE&w~`c z^D$o`YQ_7m%v5<{Vc|YNm=ah%j|+BEN?-X!Z;g-w<|0F*B*cA>V#NWgVV;rK*;Gz)Y9BnG%FZWSgbT5f zNH$7J%hfDV4~(zc|4}Ah9|lxWtO%z^3`;5ItCCz7(-pZErz+s-qdjorwy-Qc$Q0qF zeBYQOMW1?BzP}ZP9r3-FBLC!;YBwiI^A zW6kA48pNy*L5IH$OesFQM~JrfebD?4)J-^?U&!gqIQ7#Nh)ha@rObtU_)I9qzg$J) z+60lyqZc@36k?OgoLBi{9&k?_A;zjk@4Z%kk-iOMLo*;RQ#1k#ljRwa@E=J@mH0Vl zqIZs1KN+}y-<2y|M@Fzp@-n5}0v5E#z}D6!5%=!bg>@~Kxdl$*&4zs1RS#WjbAiz! zF|7Hx47a6Khc!hv9 zoHsN4a zDN5?N!%e1V!8*78UYY-rXUx`wM|Qw?8_DKGPU1x$|N}A zq7y^PU=ql2?G#S38ysqFlUM(qY{gYUyk$=U=R@-4No^3A*en4iLN2V7k<#Z&x=Jlf1yQ* z`++@Po-Ll-@~B=rw5G+@U+-*&RYrP~H9jfL$aNurhR{NtZ|J$BuUP%*c*W03mj;`I zLQ_M|ULXh@S`B*27>{<=oU*@gH8@xe-=Lg*8w*}QWi3&QLJOnIyQk~L1!M@skOV29 zK|K#?5pwiyLvJejTs@B}C{m0Ws5^qjZE|SdJl6kEnyHlS8@P}6(g}Ocz5{b&kX&BO z;9eq4+u*YD-@YFlIUVYNSxo~MEN$5KSg=C3VuQIG=JGzgIG~g`1-g<`5r}*aq9Dqa z_dC2@wl-KN7o*RgJ_%?_FY9b?f*ngjSw+jtqiqLx`zDvc`7n#L;#}xg))v%6jeT$U z8DXcae8RXflJ>-*x!_rC%}2wFR|k8L)V2xGa!VUtTGBm`uyTY_m1!D73an&xK6A$a zTyrT{J&(zFSC-`o8%@Nu&!cQlRO;7src*JiHtwHZGs6MQ%9JB z^YHM#VneYVn6EC@0!yp#R}d3}PB8;`gcME#juVK-sF{{FW?uL9xtZc+8O6boF>)d_ zV)xBDh#$hy(|Y!Ia_<^&3C5NXh>FoAk_?`(ewh^DVW7L9z+MLjUe#^EeBLAt__ql( zDc+3hw^D+MOqg(uh}>4sL&&Ju?z~`3#iUfCges+gIje-Wnq^9+3EymBCd5#hkn(x=&VvB7Urr=ZvuU`1( zifx=k%#p2(1#bUKOPOg68cS6jFDN|3q8doype=FWQ}H^4_fi-twfE?vcX9i<6%_~h z4UO$C5mVZulI=3v5pw1Ih?%4iXy`1N`#aSaOmIvot8;R5Zvw>w_uZo9%crCK1UmVDR$Qz4s{=&u`&+-~`e$*To38w%86CyJ>r6gYEf(o0i1g6d;SXl`ULu z6OI$#I!8ZZSKwerLR)}0{mN?OX z;k)+JJO9n%rs&A2`XKkTDR;I5W>%+;5KBnYGaa7&`moN~&W`VxS4>B!k8A*cS>uZ5O1pD7F z)0#T)VGPNu4t#phXo8-Z{ww#X3s{^-J@$O1MMn5Bc@$mO(cB1cN!_ZDo|Qg%RAtdE zB>-1t&Y12GUOn#IL{YKrwSQq*CCD17z^nt2$23^f;$b_<8O{d@BY|8H5(J}orhINV zSjNLWRoY~Iw5zr2G2Dc;;G(VXHcvs$k*?pVYi<4$=SSU!Y@1GCthCyvFF_dO3vn+ zv1sixb1&S{Uob@9!hW)YrfbIIrwmaC7F?Q|-C!JjDuFW}G_WTmfOM0m>PI$|Obb|! zeXWp386X3I<_s`7!ZI>4@9zx6FCZ%b#EVF?A}aw%wS~jWHZK_I8vwf4264k)aLCl$ zOPH0=(M02VY9;7F957mULvIP~Gp>z#NpuuCw_3f`n`Qqg5xeXn9v<8uzI5GxmG)lC zfqs0tG^KxsqASywFHO~(i=vF-I9Smzf-98=1$!^9Ri!;pVR%euKr`NsV?e9rlzv=N zEs2+NLrpjhrvrSQyl9F%#68S~j)XQr6}bTT&^!L0-#10_@A|53rt%?rg3(F9i1a!B zV7f_X8VI8k?UE+NKqfv|1{z;cv8!Z58lWt+&!LwP{c;B_vO{{Y-skl zHfl5EPNW|0Nz3Wr#e-k7!1?_`Oe1V@k)k0Jvc#0t@4%RX!QbqK$CIFCrSRC$)U^MiRlPe2O z1@Lc=TZFRl;id+L&W#EOpG>*c`y8;UWJrOijD}y@p;71)vRZz>pO~5M5|H~V>EWW5i>xGx768XrpS)~$G9)QQr7;| z7(cpNUkb5Rv&qk4ZcMVb#Ce(C9a)pw+IRERm^T`mC%JRCmQY`8SPdm!`!wc>R)Bz z-;noNc{Yn$nLYX3c~~oBKRR~tXes(pGCh+Ylb#ANvGp1n8u3lZ;Kl(+w|Xs0dq;>J z{(m%}cZ0VHoHHi&ejgyO)WGM0UG7$GuPWux^0>_c^(7N|$Jvh%`#Zv`GPYqPxWVQc0 zm+I8d!BccOm2WR`PH@u1-HV>}0CEa(Lqvlzc5)UIos02n)kM2=^9E3GuRy`&fr2ZJ zmx%eE@%c${V*9=zW+|J=(Wv+28V3Cv)mtFxD{7JU5L!vPsA*Qh?M91HKv)sr236Ro zW~-l7@Q1&Fp9v_C&Wwz`QOSpzs?lrU2g5h?2*VHqK4D&T=}v0H@^I7h@^VXy6Q&8q ztllANNC+~j;BK3@+~9#oT7!NVVc2zNSVs2R&N#;i#tt@lZ6-}KPtkS37gnOq(6C5r zoybWJ^v<$@&s3<~=ve(e@~G0vYCI)l?*~D9>t!?{*9E-@Wi)m6xC1(e73fKyM2;w> z*DpK86XtYea#$AZk6*_>-8c7M4o5 zfzZ}chUYqZe}S(J{x&SYg91%Xa|UzF=BKK25vvNQZBn#)UR>aRi0yRN#l^+-LYR9F zW9<}h*Sv3Vp+2M{g54i86RXXSuj@Yg08{lDpbL}ldq1eR>FgVl#Vb!qSac244fbt2 zbqHbzQj9@Uh`N@nZDmsh#MT%{3e8Hy?n0a1=8~Z9UV}u`_UCT=^XF6i5j|kPAADGb z%lspUQep_lc7qb-zPBczX#hjtaUV>2?q*UH$Os$I-w7sYEjCOW~`8U=ils3kZ3Gj9R~X6TaI-!$Tf8}p%dS6Up_YmL76U&xSEiQvhCDx?haoS;0TN2 z!GnA6xTxF>Jo%qJg{hSTk!Toy>WZyqo}BX>HOt(P5Bv@Hxf4*bf`&zKCC|JM-Py2o z<|9yp!6`{GR8<5U6Dq*=*tl49ApS7NsBVmMFaS(Q+O=$;-OXQ2hjs16(kI3JMB~8)k!2{e6BsU@%|} zL*ua;jmImbpKdJaRC0YKY*&bTmZ~Cfo+PEn>jq^9Vw3_r!4~>q@>PJ*>&3|nDIfjg z4(#^K@XNA&;$ct!I|*LPtz2I>8Hfx8o=)-HHG|coUJ*7LdYm9CXe(na)?d<(K*60D zmfs_Zk5q5e2@>MzyaPiBrjb$Hm<$Wcw6&2Jw>ohk#ctu+W?6BOaZ4F==KFKJof(1l zMC7}h-uGbs3z_@^@0U8pA1LPJNXs8Ew0fR+A3}*U51J92dCPUo8T}X_n1rK+10yR3 zKlD0MvKN`_AOhKNC;54UV%w_1WpSIWwJo4h+K4y$Xx1aaJwnO=LVTcX#U>NZWat?c-WiY73=)ufZbyp+f$)vIidd2*ew7XAGS;n-gvvU5*N&&l_x*ac|H!@ayk@CjGa34_)rd;DuJ_Vz zO++2jD>T71Sozd-yvo8(wrI_rj@};bb)X^;ipPLiHof3fd}x5dA#q@zL>3Xd0xC#! zf6TqLpYEOpkg=;ipk=+8%2TNkz}9}Dq2-kOhGai7O98SZ2BhEl2D(>NQpz==G!t-C z9vE$4TfrSNwD889&LcqsDO{%}8OO=r{MW(y)46iytb7;j5^E{)5d74WK^KZK5HrpV zB>e^UK7h^Ohs$VJbm`)31I)bYHG(skW29d$3i8YoO@)m8Yy9mFhCt7}ZCv)J_{&ww zV)@#%4pb^DuPptJl^+%Wl}}=?5k|JB!#OZ>I4f05wq3R(?I9NZf_IOaW!-z zD#t!uHSVcD#6Pnx%hHF2Hg@ zn>~4>|MA=Z{SPW4l@V*N0tgGi2haFn`hJT-cC20_mmzbuY&dE?7J1RKE`h`!`)#5r zo{hX5I9Fvye#%K?uXXym7z}YcV6+58fY6*yv=&|gIj7vh7Bw?uOnO|oI_2ad2=T3o zeuBVGDI22y0^Ihr4+HV>@fjpsnuIMJ12^%Ui1p&opZ?u)z1;7Rzr_cEgcZj^nm+EF z@|hN6TJ4C7)c`;Ni48#}0@+_v4ehpFnD;3F)wS>=9h5w-{CG2$?%E3Z2|cc#hgsDN z8fl?#A!#LIIFvhW2HM3d>?HEVgrqFAk6^mTgA&J9MwSo9EO(q()xhgZXJlMnF6g;A z>Lh}a7ZG3r9#VMu^5y$8PUr9*#Oz5A6Y|3>u5*2O1AAaCII8j-xt$c)3xHA`qU>Drmal>W#nkPXZ!8Q^}-0dF+irrw3es`Ic`b zSfp2|e`?wFMsYMcj9+)2PrDne3N=Ph%CH@Y{lluDBmeVC355Lnx;yK#;65A zz&Igr6A}Uitkfx&UFt#u@Ce+%vV54aFF~y8f#27(M&EMLO@o@npCEAfGvQZMw!`S~ z#OSbtKSxd$MDu!}gDtR(T0PzUEg9Qs66_jWzvSNQ%ny1j*{?JIVX-b3Cu~tf*4hR1 zxnFRY(lPuC=mf9aQJ1_cTbu$cNs^C~QTJIcLOZ$lK6zwQk^5gm2hI7zlC*+hE1*{% ztU?1EiNG%;&@Motbd+|ADk_|NEzmN;?`V;JP0#JeH&kg6J4(4=N%&G(2a9YKAqi zqYkY;@js2k#0c8)GzQOhJbO#%ba6T&$lii{DA3-2wXL&Tebf5^>{|h-Jx`I)bMU^@ ziLs{zjt}a^XYYV~ltxLQu^z6wYk3Eyh4yQ*%$Yb^`9l${W=e7)1TnA%l$@yn_k$JQ zm}U^(RNQkc6Zg51R!5cf?ChLV4tXGM&Xu+<6clVpxnp3!GX3B6VxS+ILn3^DI71e@ zt5hU;*d288N5++ktDBHF9~&S46{(hTS z8n#J)`?y$e+e1yMMzj#bx{{7mdqKsEPR!X=nCuAL=)3*(Hdoe~wI za^b)ccqe0_x54(_n0G(bMD>qI?y^Vg42Xo*4Y?P2T!v{0g5GENFR=(RYu`(qeJk$7 z$Hso4u?CN62;pP(GTZivok=suJc|P|=W&ycA-a>fptx)czPGSVVPEi9?B_zF^bxB( zP2??Lb-SNPefQ!Kso*&5%c1$qZ)MG(}=v8fQ`HhVXQK$5q}BgQoCupbzz$0D>@EXH(^2s*?iD`dZwlK1fosL{u5_rzY;o*f4w@|9g5oE}!MI9%DHi(Zt5SQxH7 z^DG5cbBjQ%Lj?^~NhXS_-{H-FK48`*c)(B_0oA|AuNVoN4)D5^DLJ=qOQX4Y0lYa|D#mw1>uI#bSy`4vU-`aDzBW0F{It zfkQlxH?Ok~vQ!Uo^Sz&5HddqJ3?Y#Vi|O`Xei5nV(%3cv0cR|l;e`#p*JA&H&11iO z(9_ru%Mub0N$@X<0o3X;)8O;O2ABKfy%o>@R=Y0BD3~J-qmV7QymTb}HCi9nxeQY* zQpz8hU6Lmgwc9&&+1Z9XVG9A1$ijhv9tBO|Sxgh(>a zF{o@)a)Z$JEU8?gUujz`JA}d=jU#ao-Sl5Pl1y$Gmob^ke3<0*bMae(@?mi%u7?4X z*YZ;#P8|_9G~ZHWWwURjcaC3&!LwzcIJoo^Y}|z0^MZ z;Tzx}yJ2n+&^cF?j1K$Y?Bfe3*u)s+)^<1|O1qlYuP`XrON4yHLKnjF(WiBwZx~-8 zj9!P{V_a%YxvBXkk6bVN1H0zL-Li8e7AQi9koQ&Ha+UG0`{r=wb^(EK2ZnNw2r}CI@z2J0En{v;AE-S; zT6Hv}>A(f>9)UKC!Ch#%Zg+=6Dvm)Pmh{cy=V#yZB2%;SGX+Z`YO3m(o$U6h2jN_$ z?jJcUcX-FTcLd3>tqxXg0hEs$D67DP_n;mx$7C#95bFys(_;LA%%LsFBQ_mNLs6vQ zRy<*HPeNArDXjB0)b#i)jzIqbj4L|+yN3l<&k*>7t|%ouWXdSeR~#1G_zA8dcNhLM zYx`5q9Is|Y=BCF1bj;DHjff0citDW8^I#u(NBjUbl_m(SRUp!cEU*GDvj;I^tY3pw z%2l-ft2Rj{w9`@F;PHoR!uJ14q_QmS{`^m7SMrQQ;!6pg#Tav7-xL9`%t*L;(tZse zJkqnz;wF;BI}0nu3KT65F5%TCP%ZZ1=yRa>LB(a9nAJ(~qWIJqY7l8giCuwjL;u(H zc7EylS|%u{`jJF=G-b|g}wdgU$~>upXt?VxSM$KFv6^`~%VF({g=6-kB+W z5W^%5RZdG%a#{XOFe6XMkVURixHf)uwuE(2cN97mq3>I)! z;5sZtaK(g7t4$l(RABP!lLYNg3Vxkd8!@AH-J^7_uwdy8Ukf)KZaZ zIhk#u$IB=GeYl-z?Fmimu=4Eu-1FCo0kCwgb?^-C!v2dlyG3;Boh;*LCGHm6_ zw`?l`xbb*8aH&*2_kmE3FbfQ23xUiC6R1YuR@z4m@`{nZl3Xy@lLtj5v;-7Oi`%U; zcza(a;C9J9MGTY|c=&pR!Cy0K!`L9_HvZUkftLdZOk^h*4e~%kQV5A&>6DWreck9M z%bvU0m^^=a5~Sc^?5lgL0_+hbXdeI`Q)DQa{BQD_`RRG)H8C95Zfe^x;wb>o4wgT0 ztj64$#dQYA9jYT}=qOlHy7_l?H6E1bNAC<`DNAkw~ z*FQTv7UPdK!-iUA?nPx@a#0{Ng;7LX@Y=P*X7(s)AP>YP?w?1LvWL~lZeJ2b|6w?1 ziLN67VeEO(ikDFJ3<9_-rnci_>41FzQ38@LAF%*)%?laJ!@;4gTSUj>K&H6Z@GhGk z!2Hau<=8#qB|$#1qkFy^bRmy3`B?X>`(y%2H#^rqm$JvQYMn&9cl(FrUrxg3i!CJH zz}E5La=_%_p5umJ<{hd}R_FofOTPpBrbP~HJ&uQ)3L-$MqS9B1%}c7#Q>uJvA(pwN zF|(-y?>Z&7yOr>-H(>cJ1}Ef^Tb~n9W+joNI$Pqx>U(API+8?y4B#EysTDS42U$-? zbyPKq=_U$7a1Bm=fI*PxA&ekwEnAk_m#Yj=ThEe!>${#U0cjF`PF(i z-nO_gLnk=HaB&*!a*an{8q58NykY8scZkyaXu`D50db_i5$tyrK5WD?Y)$KOf#<3X zr1>65JCu)ZIB}=DKfN|jExhDRH%{5yi8<-T5zhL7%>Ak75o=AMYsE;Jb+`&3oXAEY z?j&`r62rO-8egd|B+A?4j1XJ%52NYK69>6b9z<$Vq&(&J%Z=}fL!vv^7quy*^}ID z*InWq5D_aLMFAll==1d#1-Ym^gCvCq*C66ogdq9km8zICi_O8O386{7=tFfNfnRp% zY+2e^(?H?Vwr3YrL39H)no_RfXi2K%b1tWv02>3@ISVq&{PnrrH@UMLrWYYS#R;IO z+qVn(JYjn4do%-%Pa?k=uucANqgBb^?VqAjBglsAj;VvGC{EZ!f=?*5>|UJd^b`9Q zj?egK5ZrmX+jBb_zQ-5FZ`(lJz9*Uk8FjeqXImSHXbK&N?V#~k!(kZux?wO5dBg`P zy7h~tZ{X)_`0BV0UdIcRn*))#o6!sGL?Hn;LecMsMj!l{BG%ff}S&DToZ{2!TeT(5*1*TQ|-UH<@h5LwO8Pdg^X1yGQ z=ffIS{;anVVt-1C#C5kOUiCfhwQrx^gn?BK81MXTYI<>ufvb|vci9R&86wlrT5`QU z7J7IMC`&LxD@haKYGKoo)wE!8z*9`W7?0Z1OBY;6xvFc57u0aem>FqM)3yGCd6NyG zUNSCWs#%;ks}5w~j;&K5uRGyBC&OXc>VV@tl|jtC>*NX4?yM#S6$R1!mS)D;tOI3< zy2Q2-x*oa5yJg$K2_Qur!-qCVf(&kLx+8^~uXq**rVlv88`hwQa>M{icOxXB`do<1 zB_TIScd*1@9I$?S1-~MJ8xRRA(kv&fEqMDhdr}~0rWqdFpxTCdy)c2|*!d}L9*RR%XzA(IP7fXTg)j-_^W!((ULx*e4+wocCeJx)` zdnS#r@xk<`(Q+&m{6lRhU}@p|6W z{q;_)Vhah2^umcd_sg3m7!$W+1gtqXEbBhvrYTNA%6f?OKx`poowneInC~oM} zx#UO1N`Oh|#1Ri)>1K2N3)P$&j=?l3NB^jsTCZ^&q&h=W{v%Y36lgnS$TuSzV>em)vu;uvb*AN(uJnq*g#37Y7XMFJ4Pd-4doriReeLTe3Ow#HWRre+|v=i?`ZQgzkFD3-f9Z-p2?9y5<~u z%^=8?tICtk{3|=o!otp$<+_by{XuYPFq<3Dk3DihOrJpQ61*X z>udqC1Drrlg*}~5qg&0=boTt9%@IUC0uP32f@!P*rXm@Q(AMmOFU1u`x}`A8j;&j* z9o!Ret$A=n4$YU-r<_g-DMc#C-WoklsRf&!Ni*)O)b%vDxRRrsFgQ}RP{79F>^D>P z7%$sH-yM(j{{D|_6@{E6DHD z?HK<)F}B_I&lZ<)amZWKf57exByYcj8V9mA%y;}5eD>lD(hYC{Kz|KcrY!s?_`xUC z+^?Ml>cS$&>ULN0&&V#Rea#N{;e88PTb33yGK30*^!gR%GHPe6MJBVzjJq9yJCY|F zH;bsx2PF_u7vyigAtxxI#c*8Sl76rKmFxT*&)d+c1Bi&g_=QNzt*U9`%q~>Y5T9CP z-`=vdR%PY5WfW*EDJjPW2bMxqC^Wd+Oxhp#Z+iCl3$Bu9 z&>jHk+jyyIm#`n04@6feBNxI3WCl5!zF~f(9~Ig1nDb~O0!pk9JXZGyMlH84bwXG5 z6qlGo@-^i7C6p}Q@mL(2fVRz?&zW}-4oM95`~l#%pLiz=*#_RbB-1MlG~pt8HEmCo zMme5FG)ru=l5VDo*v?OKWqPP<{O=|^*7=dH3oR9-9En?T!Id2#MfE^gGxZJRMk59f z40tWk2~<7Y1YQSb#Onk1&TH_D=TJ*HVaXawZZ#(GBZ$9sEc5^- zEc~g~UMzv#HvoplDMCVr9=*BgSSKxifm!V7HI|wD{AX(u^|XH>V13?OaBX?NDLMmO z%NAjNR+irBXVcP#(I+AwW}V5AsX* zYoZpqlnQR3?P4Ij)A4rmsb4#M_%H(l@dR#rmyslBlHH76B~@taGGezbpRmb z%rjQv=MHdZQw_lVD&D>R8qanY_0jM;926#+ov~x4 zZU^Zw;NrA96R#RC>oiV^SwZ<=qIDxst%gEFiI||u)GMFk%fb;k4uJ$mYFYTy5&?Cn zNuuraPK>GX<>;0{71{Uhn4U;q)mrmWhA)!v*d_da&dwSo#J4pZGJAoon|S+a3uA<8_5 zL>LEur&2V~;f4qeWLCuEna+nDf$UK}Wh4)Q#+!BKsl&0k)usCQO3MHxq5)9Z+hP@) zyPEm;nTpA$dIUY*c6=AmxzW0TZjoM4VV&qr-e7)V0!uVllVL3BBgnmV1%53kfc=fWVrN8 z6Dm3orFHbm&ZIbam@tOk5|%sX`KZCf9g9P-;;=MzVnckT6S3AQU&5H30&V4EK9zMid=t zL}=sh?hUIRC6%#qolg?}6O z?PPYli{-*!l$F%2=kfBAYxND47_hY7=9WNCtz*qVlJ-qBH5wknI&q+rgywdd7KZ^O!$fF~%0^=S0YxIZw^;lIf6<-}O;M{*0gbjYVE z;Q+|b1IMI*wvLRl@R&IZC#{#BFKtF32&g;ANOENtZsv}=Y4R0P6$S|K`JQOuhEYin zmb)v^_%*_b=HeMlN+{`eR$pNBdM5njQ3b2=@Uv2w9hyc!8oM=(&0{^3jipc8{D%}X zC?uxxLA>K4w)Oo=?z7#JsvVXsN817Tb;*_52n3})q)c1UN05pk#)oycUfFKBIe1i; z{`IK7nbsP3(xW*E;57Vh|L1KI}Ik0$~U5fd{hp)eB<4!*-*Xvmd! zQQ!a?r9+rcfR)CX{J}iX5TuG;bQ%k@phzf(sXlY$8QVN;0t9%eY1Y$deeciqc*Ac` z(7=4?R@llp`>-Snacmh^(pc&aeW1$%6%2?flpx%8F6vpF%}};@!6-y}Q`PViLlKhD z(w?PM`S*6ENyo2l%APF#b|cmxhC^x2;;<*K!T@oKx#(_zYiu+b-_5>=apw&8(@0VL zfUm)a*aK{Z_+i?c3#*yUrWwefD$Yev7#jtXKRs4SfjgSs2#y~!emFo!6r&zKvlPhHy+-S*{L?dy4Y0^j8`Om8@&qPXSH z;0Y)F>j_gN4Jd{uf60aS)i`(yRe7DuulWWgJlj}CsvQZ-&b)Nn@e_UKn&rGF-qKpq6nl*x#zJP|7NClt)#{$3wf)Vn8VIq*Qc0Jakc zS|@2elDROxqZycBx6g&Gbt;3f0yU8u@#3@iE44>Hj6iO<2K2$5frSY@82V@9>{iyR z2?-(EK=I!%E$ZvL)cb;SWMReNK)kK3ul38;EwpifJ_V#xH3*2oQu;!$1k5DN1UP*b zKOvTGHkC6Obt-859;8(Rn#b`L@Cmb?=@skdE4HQp4*+Xbi0afM`<*a^gzUTLfm5Cmt4%4UmKbr)R7=YWisZ_cuxM79%PDa=7Kqxd6| ziJb=kDO4r4r5NizC{G`@iA_tf`9~S%*fQYIL!&e$i_-Vl9{B{ULPfC>pSR(c2xh6ZlPv@C^F2BrvSldVnT z5*)nZOyqnp-@4@5F?2oF6d8h*CBrxoFgk`G9hv@Lo~xb5pQYYUO z$5!^i$hC6_a4I^Tv%oE}$P=pxIZ;IV+$TbUx&iA%zo(W5e7S3j9ZOV<=^aM7di0`| z_0x+DTWHQ_K^|RZ0kjv&7~(`d+X9#D-cB_k;vul;cyLfizZ0d5$;*?Pgsha}DO8D4 zrPb8b&NZC?gcdV-Zzwtdqt)#K>rHJDI&0o_WaN(=ge_`_KO$5YPwi@)Z<1fPhJGQ!-GyK0lDa_&p+MWea1S zkD6iT5Wvi>KvbHMB{cFewjvV_&rU1c`gwm#(Dr=^4-Z?Z6(25OeQhY?nzP4`v7u+Q z!F*JylpU+QKq|Ub)utHiz!!_<`2#UPn*Fe800Ax1piwnhL9YqtG+WAj|v_ne+TY-J@j`7 zIyKj6f&%i*PGI7YFR+N$9@~TmK{<(#){ZBow@O)<{YMDZxzu>!8c1P?TEUFa2C@Ei zt;7XJHwP_`YEI}>&%~WUYiG3lzEZ>#hWwao+dYxbKvC|@MH{@?k$=e``z5YP1ZA+k zD!X1kpP5)Sh_9G~5`qCT181S6IE$7wtI*X&82lNlr3d`3NoYf$_Pnof`kMFLO@05& zqVm05Pv>h}>1%fle2Pr}!HFxE;zlyWjb!p9&SU`=%sfaArHy)E@@##Ys1U^F8ANIK1pfR{KL7Rpog>^WUlb8S@_|Fwea^B8(BN5e>PHEXm~2m1n8W zk-3Jz0NfpvolMBOMRHYKagg7@w;*eg8bfJe=(EuDF<*~R>fGxC>tHSxdh`u=oK%~> z#$HIlssi^}Jk%hRzJ zAXjauV0-ywV$<-g!SGaZT(G}4B!m_8l_s#h61*gt1#C}K=69X;5XBX6>-(LU1P${2 z{!9AyJ>B8FE%uLxB$S23fG7B4aPAVw8!6QQ3@G^Xn&EpEb`tk0n>#g=MivRg<|@}I zpNP);U6ou}&}@~8MH7hhrQ@)@>@Y(5RT*8rxw+buz6o>7HVoDK53~NH^`(by^7&I93*|WC6}{46_&T)B7n)&Zbe7fI z4L<(YWzyb_ceM?9{0x+TJ3uK8>nEtFCL#xlTDfsGRG5vheb0x;!bYJFH(Oi2_k&&d zjPXek381v$^wk>Urd|GZ@TlNw+%-?j!QcMl0o>gHzQQjidq)81%fb+g$yXf(9k9~) z+Gbq3Zvd5XaNb>zVezdUo76ilB<{I+e?ts~ub%te1Dry~}Q%RgyqBE8naR zsgJW)pWOIEE3xuLqfw_?xd6BSf^5H*2M|O+8+d#{49D*SxkIm^kZfRKnju;MNn(qP&fS2+ReCveMbgO3DTMJeUq8nn9a{HYw2 z0`Un7S%9L!#&HuzS;g_t2tEf~z?5@?-C{-ivnj`#!mCGHII5kIE^A2yY zQ0ueISz@NQzAWvK@Z|^&JBe?kZO8@tl?5Yz@}p3V@cMB=hzBWK45&T9gZw&afpqjI zN%KLl@rBesI9Ys!vb_zP*eoSE&L#JPY85b9&4|2gkzQv7ogUO_wKh5ER-At&Hh%SHxcY zvL*LiThcq0*%zkW0F66s?#4m{u4P0DOxq9c=74v;^~>zNwFqJ2NWo3i+P$j;&6>LaM|cqXj>(;h+sd>21dm zrHt_SWpjPD7h}NKiHm+^wmi(NXbngah8PJkmCA=L&SuEHM$fKY;6Db)dGg(_$&90O zIOF`Xq47a!q$m*}eto!LK{6Z2pB_qv|M^3iX>1ah<;iM-lhx3N)7GJ?<_ z^*bztPUsD!)HYC^#7{HLI$*sHVk^+750EO#iy?w)4KAQvt@mFc1mayB8>6agwY|i3 zw8SV?hWLImN(F1acs;{Grp6YmTPHM077Bb@-`m*Vt4^CdcyLSq;?7p|8{R{i)B*ZF0BJvv5;wV1MA8 zB?2X|RbnD=z;{m`PAssSz8IE&Tmqy(BD{7WUs+J+LpHMXRA`KYNR&P38s`ozn*z|Q z$m9_~XS67H7kS+elp4J%~oZR^#eZ-iYa3(q=*h&=L= zLSD2H!HGw+kA#I4_Uv~<^FY~G#5~yA+9r#c;cg_T33T_|rI732Yh7Uc+7BoNpwyKY zxatSP+Y?GQdH$EV^yzLmXqN!w(Du%+1!_9)@X3^AuQ3!-s0}pVvvIs&Z!FKmT~Bcp zh~2<#KxuSQ-@T4un-8E)q;4L?OAa4}A%Z6!cUcUEHdeV@!_-78ObCm2?yOBfBVA1G zB)nZ^I9ZvU8g4r0vHU_*xkK&mzn(rHzgV zwww!a^q^;DNvU<1xe5L-*aPACunAmZd!FQ} z=)QGiW_SJ=yk-YE25<$tnUv`9Mm>USB7 zEU|iOcTn7%kuOLS4^|#Qx51-#*Gj>uAW@Ry73d&9*Ln^gmNY={Hq8~Awb*{@W{9Fl z_?833#5n0Lwb)Gr>JJ3KIRN=ZUuj&P9e_oRGf_mXiByH?Y8uY{qx}W6-3Mql$%k>M zPV|o1CpR&XE(=}i88GrXu^AGYo9vUQJEUkA?Z=1g)_(fWGu(~af&%aATvSGDtT`;P za*>)=qm@>F^Q; zk?UqetSvdZk6AEhL*dc2a~ouXRM^hxi3dg*j(o51u1#7s%Wmod;n3Y^qZFBO!jdIbCI!CpIXf2mI2bU;0JJAI|MJyzUe2I#5r&9;2qjn zjIa1JI*S{SrohqrNp6>1%C>%R*p~Y; z?Y>fV7o3tSZhTNnO!8E*+UIGVmRVcwK~8WZ!E^O=SDil?K}=raEEQ~9qj*>tft=z( zlCdySQTmoMz90zn<5L~fG$u{WM#Acn$hN9c%K3#+!!CDgG7pX(FhQ?SEI8zt#O+Yy zR9Dm}-mmd&v&Spiz8P%4T6Myb2F))czL(N^FLw&D}#)&Fd2=~fFXb1G2E0N0w_GdbcDU3EAmLS!}HZo0y! z%Lw#SadHb@idi=dMY?i^JHOP; z*bVI=8W%c${Hcbz?mghx-5aTF%El(2M93Pk(GYPMINB9iue^=xh64I2_zL|;2KyWz z5zs;e89P6;S~|!$_j&woz-gn62Z;ZkCTpIQk@|4gVpi_UByGpTNDI2GD=DP!|D;Qg z5wn5+S~#yoq9;$2VPdPa^|A7^(k*5IR($Na;@YBNO#d9K(28?7H-sPI=IV5c$^m@! zCcb|JUk{l}gV_%$-zUu}3djUj&DzTFj0p$UgW;z`MelT-g@eVl#H90;Y+m`+@B)>i z+p2}U514F~%_Kb54hY%KU;@&luAZKwc6g6Dv%+@%a^kg=UH#% zHVHQxkAa$idz89QUmA`H1zI-4kEiV%d~nRZtg6N1bqN=wCJPCl1akBap9x`gBo}y% zCV}DxF|S)>O-Nvwl2r{T9?HbybmVZ;rVfNBRc1ODYh6nI%zS<7`luiAf|0t`q5L3i z<-Soq8VYwirqp?ZySZgPgsuabor*3U(z?fs_Fo8pcvx!h)md2iwRyvZhKG6OB0l|D zSVP&>uY~YC+AZVssq9-+N|2n-Q$4ozp(_-^g3@XaJ*C~nx;K-W6Fen8y;8a;-BK(S z-v6w`iG6rI|K9J>^bP&?CgN-COG6pl*W02g6NMIw#pgt zZqxbsAJ8HjFnP6XLRL(y!rBsw6+y_;AgczrMVe1WEDs?~jxxnzOrzbZ2t#y1^RqU4 z&t}@f3N-Z+L|ViF;0MivQ;wh*oZYo&bVGf6SMStO($Q!6{V>T^U$S2&!2TjrUm=1k zO;J`_e}1Po;m&-+#&%aLp`9n`Vbp>8hnewBT8d|$y|V?alZQnKG{XWI+%y?xYN8}S zxF}%enR?65emWi%$uyAv>$K(U$l{L5pY{3A`j<95rzh7TdS)pVZe30Z{dYy}_1W7c zLMozx;t`?e|6|O9me^uY%w(S)0KKs@qhbmzsj;+77=S*>ZEK7Gx~$iDRoc&k4k|FhYAbfNy~I)*N=HR=e&=d-M*a0MOUSzxU_)OoBTpSCIm=(Wynv zb;C2Kg~Uk6v@EZ--o;~rWB{T%$t>^d>wBm8;kRwa^k5>o2&*I*GjZ7)lZuq6aiy4` z3hT$yt1H-YZK8NwnHhmgaFH^CbQCJsqB#1*1FWKWM(mK&Ods#s{jU+!j+8g3tVNTiC--ccAz+M1`)Bj(}ik$lEu# zy4s-ry>WH7+f{rj3>6ntooy~rS`?;Jdk`m3wk&OwR#3{_S5XzZAB%SY!P8a(l7FFD z$m~(cHS^&g!gvyVQpWh-<#NrQT!)GerRE=Vzx;jest@c4P?rc1&)G4)Y!5k0q=$#P zjw64*n%cB$^^x`6)oKlv#uILax<;vEuLtln5-)znVnLyj-_1QtKF>3&8izrDCNe!n9NTP{V{xNU_r zjmSah3q^R+(a@2gfmQ>73|Myb5hb^gVu3DiG}&3!wTrf2oO9_erYzD(^Dqu^UycyV z73YrB5;LBM>*FHuLdxDFi5(vqt?@q31fpDw4%`zIGq@JYU}d^u6c@wqIvIX^;YfM^8s=wSFQ zXU&>W==};qE?Cz2KP+Z!sk-zO*%TD(hdKE(YP=;Izsv)FWyZcmgy?!-qd01F=T-A{G>eb1-j-ljNCh%m)32vF<7H9E`=kp`j^h!}R3HK@_~N%XCs5nZA@7`k4nnEE5g{-J3*f zljGG<@NV=uuikwx2cha8$KPIGDD@q_@x#KZ5i~r@zLRiWt`L@%T6@T>94ZTK1WhE7~U(X+!DTRO~Q5?*SAS>C46s`+g5gxdcNF`^Tx!vbu zFFD%L>^Y$L;dCw$MQ+TVv8Jwm(~HJ4_uL<`7te{yyxy=m2QXbOCS;JV)3r@HE>;?e zLX!ZBh^|gwp^uHHANF{(hiwbmrYnWyzLltMgpDJzB6%?5FL6t=l%6Wj z#MlkDGm)MM1b%e)eoyO6qvV&O{Dy`s8SHxii39n_kp75P__rRVGJ4ZT4JTvW+%B_Y zuY(jT*d#*u()OjUnJb~cZo8QNZYb(74;{i13?e74I2TuWJF@#IFHQVWm0{c2StOu` zp|zVmO*Xj1?o~@x&7w%?b8v3gVX(KUe%J{e)%CWXoFkn}FFvc>dGT}^`Nsi| zQp<-E&Lw<%lR$Da8nv@A_pl5dTNNSZ1S-XAysEtfb%6FOA*uCo^)3Zkq>EL?H^C?} z$xjDF4mG~Yj>iFLd{fY6<1U}x-CGrKc(D8N_aV^m7Xfg2=#x*fkIH+ zM0uSUF>W&xRiK@y`e!d(T@uA3De$)c3?m-)M=VQY635Oyd|v!G@$lt_t9*W0;4p0i ze=kaZx_RcyRf(R5Ce2{i-^+gubq(`_khW+=FeV5ER0J!Gro5O*Lgr#a)Y<7xC zDa@J`eE9H0P-09OB8J$efwWtlN3wGJ%Lxd>aC!*=6?cZ>`|}}rMGrKTTAwzll(jTu zhxYFi_N5jd2R#11%`* zqsSXbO*V~GWOO|(nc&kJ+}HxqIBd$+K=imOLjTph8pua`Z^_^?D4y_7r>31^sT+2w zv}N55n<5tPp^rwct~cGyY`7Z@U8n(tfBC1XJ8rH!HZ^1BRa<=wROh;C&)>&MK+MWK51t}-i@y};H&{~xqo~@WQvq{YH zHevyA=#p-QdqAyW+EgO{uO92y;}a6p%pU$|Q9#gisXbs*jw2ufrIabq_tkWVBpoX& ztBRDSKZKA9NlU$GDUkN#0NYMK(mBGnQP+ER{LmzYJs&e?_Af#gSNLm3_l0glj?0_4 zwg~X+yP&bsgfbE$1uP+CVc|a!*S!aq3J~;-xrMLOaHvQ-Lk|&cQESBR!TUV1YYtTn z+qXPJNDM+lcfQ&6Mz{U<4*z^Oxl^l@v0-a$m)12dgX2C5GiAr&3H1909A6nzb@k_U zuKo_&-Mjj0Vo@WZty&9im3Kg*l}zL@;H1;A(m_Nz^!V0JGjd`Bnk2sb5wvpj*J{{1 zXtx1;`*nq`_L{kqrfz7EI+E)}qz!(#Cc05OEJB=%oj_Q-m9^uEB++~*E98tOewIdU zr1G4FNJ3}Qth>=%nz@3;(ABIpQ+I*Lo#}3Tbp${_$XAGnfla4kkppgQoFTAjv~CYk z+FJvUozDG;7uplVALX=|JY`<6eHqg4D6Jm+Se+AFyDi&#d-19-U-mH()Cm+Lux`qv z3N}*_)S6)^(=p_lVuoYF?VI6L)Kan-nblguZ4ELp-OxhX+=hsUNl{=TIPg@iXimSwNhT2LIR^HT4AlqpX3o0Xm6m~scU}!A; zd_qaQU4qCu5Mk;t(`QOrjxxDI+YClH2_w7~qsio@siBGC^|LI#f^ETn-H|_kdx80v zEvA%)zBe!C2mWA%Zi-`kBl9vO3uGLafo+X&&tehtZT&mv@nTcyrPNgF&=J`x%IVpK zNZSj0j+IX+Bs3HWKRHlAGNVrZ2IuMQ98WP7(@LU*Rm8+HonJbXWackk!2fE_kkwt& zal{62#PP{r-pvjPb0pcGyu~)LNx}>EavCok+XvpH38jtPTRv3fIgix`PfRIW^ah;$=N^5{6QH_44Vi4zSI{sW~Mpg%xF<7rjd^L}HX zSkV~-Q^b&XYnGS&>}x@C%1*L{W@|J{!GJktZn2dVV(7X6aClt`uyy3ksb^SOjBRRD zpXWIFA3bu!4*e;0S~SYpcGskb*V`O*r~?|Ib^XyARGw{<^|$<=mlB_&>kKIcrzzdk zAEL`cHPi_X9K$OPjjOzekYiQqHe62Fne9fJB7(@DM6F#vv;XDK5r)u70fiJ6IMmA=lp zsZEnX8lv6w0;>E6xyD=l76LAs{c|)rk6x8xRe-&auNc8mYrFTp(R~vA7wY@xtamX6 z_d5CB#ItI>Flb^qtcdKZT!&aO+TA-I*5>u&LbDqq^(PvtemiH)06V8dN&0IX`%avd z4m>*ZVkS6epd^NpzaO)pL!xPIOW4*Q*}W0k$7?1qoopH`OQfNJLjMCM)w$y zc5Hdd?NshITsv6_F94CIr>4y-Jw)9{fRZ`AmEai z`P4m8x&@Ncn^2#23ooocS=*JB;K}?KUT`LFLH!*B#wK{;WIw`1mwC|6jBgkBPPlBC z3n{~wBXrq7uF9kboEUMW%tB&HOda7JFD#z8!NfY{IFOq6pk0r7_bTL;+0jom34 zhaktLJ=1oONYp7WIP_ra*b=P!C21p^X0BIA;M zY3f};<_N9`E@oVZrhxG847{qESZ8y!5j=BPF=ehO9ki%47IyYcp6d(RVGOn~ZX zTg#mS$T$evm;4DNC&@wjD0bXU4cql*ratdVh62|IUoU<_AIMSK(U5HTs1aoHvx+5I zekn2JoZ=og;~06qK%cT!+d3p{rsan@E*&=~O+F(UI0aQ4@U+TxoS=}F)1_MWNB_Zsbsp7M*fW3WSvwgjC&GD0s1NzcLv*R`A;B>6R zue5TloDHh!toEbD>qFPE`{=xYPgEDcJrg1xv$@lagRH`|zKrmFZ>Tkb&k!P?vpB72 zno7QYw6mY#KiKF%r7iGrt=g^xPxy3$?G(AH!2+$ry5fdpu`uno5>uD&;?Pz<|{K}o5lTD;_;8;#nn zpO@$CLyqQY{4X-YnoSJDjByGsX`pwRv*muC;8JluGe{eG7$3}BqTEd)X$cU#B`$~% zdx8Kdw1&EXR94$=N&`{#3w_`$N2w<$b{v=?%7g`~87ZrMK6*haLXvZ|Cd@!bG6D$atO4BjP?`!x7eq17^c+a5ksC!fLr!aVmKa|uFxNGC3<)w z(*YC#zhOiF#%-X6UT)G<5AN%=+yt54AL*RY;@fW|>cyBs;0o2?lBgeU2QKrZPn2%- zdb#uBxEv6J9zX}nx!n;`4BdTaY7c#~gLnczX6;w&6Px&ZaTRVCTz(XoH4q|Zfla|~ z#z2PZH#{}U8{`OuJ#eD8$;RgD&e?%~pM`)W)NK3-M>1DTps9WKDr*c4T%qEMxlw2n z0m$MtnVH()wjV4t^0uBL;M1vaQLMb%y;l?>1eWji#_aZWXMr5rtY90C*Y{z*zobIy zr6$w(Z(PLJqfilSnD#0-Mpsq-$)We=`qpS=#3HzgZKSVMMC97@xca}^vk;dc)5%%( z8tAQ=ZZ6!IVvwde*>>4)uX@O0Tvq2n64v#52K%vOKTYrfsiTUA?|?>{W|PzC{1v_W zY*THzWV4%8!V{3yfpKev!{z7Y>@B&$c9vK-Z`RjU`eKF5^@10_<0D_o7Sxwt>aao{Z z*ERswb+>mGn6Fvfb1SthC)U)GJAI;k6SBuv0fLTn-`sxGDog?OqJ@^zBD0ez3Su3| z!!kSUV;)`z$F27n+AcSEfm|?=vXu7T-}$-+7yzJ`Np}aUJ(fKy;dzWZ9s#bwca|13 zbj0AlLj(jHXLf{&1YazKLP%1QfgA)3h{T8O0aoK^wM4>ZAUSD`6QX$bWA;|m&*JG5 z*H-aiaxF5xb-(y;%R$^H<_Z3OagYW=97=C#s}I2`Y2ycEeXmWgT{6coT)> zyWS0r&Kp7#ZkEyHEN5!SnHm=rKBsUg@Peak!)Orj3@0$&K1wgK4(P>JzwmGg%=dWI zQNZ0mwpm?b@eP0^0G;KOM>;-}n*`b>P!~MqbJ8YJ)-ZcsZG{E`NcpO95}BTS&~Z7N zaN@0=yS(IlbWOaatXs>q#7lq#Bej{f?c&{!iHfqB^fXa$bmY#Dq~pi+A;)VdI8ZQP z&2I)od@YNQ6nErr^j#P$3g{#3v@52Y9}xr@4*i2%%?8_n_|!~a;AaSjIAkI;2~)t( z!_ACS-x$e|>KA?gR-kQ~UV$))lusfBsyu zwOaKD&b~whcbx((QljgLDd?tNd#sbJ}8l@ukZ3#3-ybxOT7{oHM5jeu+3q%CC{ff zI*L!;GgpLWP*n^w7h=#q|9M<=-Q{1~qHqbJa2?GLA4TYQCvXpzR1EGo%r0fq9h{Mb zz81n4(vneLz}mG_(QjSf#>#qiPF5bi{@48@a}SC4zodOKup@QedS<+s%}2gZ_qQ^q z&!VB3nBA2exHjIWW=_|{_@PYnLfHAK40RNIf2Nn-BsOO~Xtx0uNwK!-@z$6KkGs_b z{e5ow9#`lAJt`JR9YYn+9z660FJ3zW|GC~RsygBk4q`$g5g%|Oa_=hG58dh2=hlx-NaZS++z76 zaLZn@5o9r@@w37@>iCJ*$#%~W*#mnHL<~AORhafP-;|a_hgpUBQuS&`^gxyirZ-e? z^l?!1Ie-joym}U0^%bxB-l49>yixSgp8*lWATETQ(TKc!Iwd`_mD1K(T2S2q<|V)9 zoiSb+hVIAlCnt?3_fbEVLtF^oEuEcQab8!Mge6S5Q9@5Ti_@i+f)TM#mc_OyxQ^mU z1w2bh=|%&)gYqM&#`Z+<4EB&!8@swZ1o-hQw+C^5a+7|^jT2I%pNQYrRgZq(TlD)% zzg!|aXI<9RnV0q~V+a8v1XS2?W1-6sMo;i~+=zk1^)P#OCkLMBT0TknKfnyl3jF=>Dx{-}!;C||1OP-#tbV6d6_)8H&tLmH{1Y8CCfb8GWpZS* zo3(~RM-`UT>Od75xvqN@;G>;5b zmeKFa?(+1tG4zA!yigA`I2O52h*4A-&W@{tacGCj8I`DdaToUSfIR!5kFStm8Uxe} z-q+A=@~LR=QFl9rM#&e}@<=5CZPGOHZGA^0xMXB3$B&Lo0v(#$x=`yoDgh9rHi#zX!$(ta%dp7iY*joc!!9 zDAN-h3bcUAO+%_3aU(5PdfjIKDOLKZGYM1aOY~9e8e7+y+#l-(QAl=VlABtCTP+A$ zY0&2Eq*FDo(OBy!7!mRJNpXFWZ7rey96?8DmqCRy74cS)UYO}hl;rWa85an;0d#~W z#6~qKE>YGk*iKC9BSfTgPxgfGGj#2O*uDcUBr?E&Kc=LQ}bg6-f(5Pq*Wdskfd z6PucBXgHwVsPl16%i&4?(4DNdTNsdT2d-l~G;Bn{ac!O=%+O^3?fGO+TuX1On0S*m z%vC9v?Gr(blH#m;$?3yM2_DSYN%s&IBJdGv)H78MVvYwh2Da->^4QmfMtNunu&pjr z`(wimnRiC~dvUvDNFPSS!_q#gZK%KieJeCE`U5VhrO_X7rvwo+ED?kDX+$Ol&9Moe z*8!$ z_nHN4!5^5~Ef5sx2w~|s`DUQ9;=pC@PhwcP2qpq%nuDqvQ}daYPL1&T|C~K8l&wZO zdjdFn%jxW;WKkFk-ge92B@Kx=)gv51{%T#%CyKruZ@iGjM%5fZ(=a%M;Fj$_?vaQV z8}PKg4{rzHQbel1-H95l7C*{%1#`|LAr&8(Kg=1-ed22?*L-YjcTRfR?6#^aX%ciB z`#v;86FwJ!I0LzDPJ-OOG(RrG1L<=@!WPip6fB{Rg!6oPvf2@T0-XV&`reS?AXPUt zT}YYCILOa`w0cloBtGjduHCS|Afm#%8vI_{Yg*A<6a2Z#*m$y>BSp)l`k)n|HU#at z3&v|?w4nc^VwQnz#K=**Zue-KQ{!N~&|y6$h({<5tsUeDe7JYCCqrpQx*0q-$U{SK z9~q2(l5ag9+RC$#Z6s{I_6Oi*vc2zW$!nBLQwmk2y?h|ZG`??MC9t+}MwR!Mx0EhB z9vyUW#vYrt!a($$?D=a1M05G#-}X!7ji9iU(&v&mzVx^s9oy-WFBm zmjGsHa|4pZmSgx?1t7fgK8%M4|H7J_wtNWRKfccg_&J#h(dhGNUWwJ5=8Hf=qG%w3 zpD=z>{|~md2mbJI9K=3w0-<3b|5@+K9t47;Lw~cA`@s5%1@X zrUVe8^to`K@SV#TmBP*ud;-yi??Miu%cTYs=D+aw-8{+TV z5TZ=HleX>Gf9`5x_|hQji(34Iq2rR4aH3+FyZpF%3Mwkeh2(oiV=+Y~Dg|>pdx!D_ zCf)Cem)!2MSKr?0QIhefdtfb+1Mxi>1KJ=69^cbBe2W;x_%1$M1uoik%-X4W!30ZL zc7e&i0SkQT44I*cMMIIZqkXhaXgEB;7rU{*UlTaM1oT>5e=~KqY0R%f_)N=D^eHqw zuGc$Bby8i+VW&$sT1ovza~aq2=S-;m6GrlfS0;=&WOXBrje8~B6V zX@>hgHpjy#;tbs5&D}g;S&5{Fg6Y;7<=@F%&uzjkb|KbF(7!^K!h;S>yMoBvt5=-U zY)L&%qRHT^%h1zXIGiqpv1%30Wbjn(*`_P)%3EygziYuQf&Ih>QLSZ{hw$a{Shu{ioHWlZ=(=$(!Lz43TPd``-tTacx{t0%eKhLg+op@#=w)rhrz$@i0N`?`=7qxwEN+RoNo6NM()Q z;PSnr-Gf%w#~dy`+gUnAS@Ha7bd{h@NP?gn%AJIj&NQw<9`yta8>QgEw13mDb-kNM z(ZBjEV}I9L{{xjqCToRVYb;D_G#3AZC(OR}ERsh;{|^(Wd=V^HhQ=T>aayeS2+%*o zYVtyzDPI?F$xPNRz^5hf-)C@i@a1e>un;@BehO2Rb~Px{O3iJrt~!^uJ732l2jJr6 zD5rud!=g5`?~@ks?r92R&43+DcIU@HfXHx=)0@$!bP%|B}2l~q7;<*W{ceUjf`BMFai ze6Iv32vLv=WNYO3!R34B=*69VNrkl>|dJB@lLN+OKmnOM>ET z9$;X7ba6a+W4YB=y9T_2>nLvq>~0|q6{uL;d7{yaNpcLHsBwAbkaxZnEcw`pQDb0R zR#I|&rNoJmL`apMKpu5P=W$*#@-v_+!KU2v+w?f2d7?wX;qbb>%7p*p$G5Jfv{TnW zIykpNF>!Ur@Zd%QoN@7XoTp#J!&^6MBNgU0e-1V33n*OZtkxd*J?9 zf^mlJ`Zv{Xe|VidzkM#(%RW1MT;_D?`j+_@T~7`^wG0rO_m7lmf~oH%``LXauOei* zBexi?fU>v{x$UtW4KW&4@VmgNnAt7Gyg|enwNm@>=ZSV7Ei}45 z>HU=LO^MH0^L-ku5%~>!MXAZ{tU3J%<~Dr@Al-I_De>O!exCjSLuin&qoG6#7&-&M z8ex%<^6T`B{1v&zMd|3k2S2`Y%6uf{|DjrPoDMHVXNUMnMGUy?uwqoG@mr~EKl%(| zu3!dSjQ3W-Ct<)1NQHJWO7<0JAIeYfKkyM>poggf0ZrUO_CEn*?f zZubJ0Zd99#@H7P27je#Zm00k&OK%u22Z$mYM7Poz>wDE+k<=p#6$TEyP_CDdIQ|wL z6)SFT;8oOV1h{{!s>L(ejw_T&!s+$ymOKsAfZDner!5CCEAt2`u!cSSMV)Z{+R_9< z-M18AhIQ2uZKxNk23&@J{cDF}gAbv4>9x>eEIRwiMtG$a)WfemBKgXBpj9 zdjOPct*}Uq)|u$mUeZI8Ew=Hyv=x!e*UgTz`1%AoT4&vA>K{0he10?A@EILZ^1^`S z0V(q|+|gW;1N@EZ}L(qZ3hYBm^my-IePYj$BGHOKg)RGjLnyw`YpF9Hf+gLj)s z8~)TRblX@0QHVB*mQ~_{slDSnDt=&aw_U=J@83o1!}si#ZBf_m1V^cgK*J&{H<+ zIqp*Q*XaN>A9q8LFnL^Qce<&$4YMvm+@c4*xri&LuT5(D!#Ga=Da8N->%k~Z)5_!c z#A3vQ(zYCCHYZ!;(wi^%fVF3xP-;!SyGj-BB()7-qL{e-_87M7gB2wyG2f2w3)s?` z$N&NiVD9rp`X32FrtW(Ot51JF`Ry=6gIEDDm(jqJF0RZD@@ahg3>f@W9VuLM^iWX8 z5SS^ksfvG@o`OF6S6|M(H2vb&2Uhv1?pb`=SQH~=g4*tcYff?(ZC%arv+e|#k7lt0 zxBShk9;1d*gVHT%cCtuG`r+cv*T^E8*!S@M-V5O#KrAT@V$wblXmS4k+V}Ru7=>Y| ztt;Fs-OA`6$skR@(tar1m9`eM0{IYA;cTqD-c>MdzBTPm;b2|r);Bsvs%U)D*r+Hb zhF0RPoj_2Om>2MTN9=n1a_ z%;>WuB$$C0)pqebOi!>zQ21^yC$U?_{`pPWb2K$)LWlu+koIn5^a$W<|Bcdiq?1QH z0v56r7-u7xt>fHb08Ke1km1eawY>W1 z%{J6H3gv;kquH@R_Naa+P=B<|&*7v5s@~?TPJIdPdcZ-S$iIsor@K9`kr$*f-k9kK zA;2}GP`7)cP}TPzfT0O%t~+ujH=LC;?zynG1h=G!ec-x@sH5xPJyYK_Q6`TkX7NDn zXuYkKJNIjvU4oDC9r_OxRyOV}=Vsm9e)P?cvDP0qV1boaVh%j_7GN}?rEopWAJmoW zcn|Wq41;w>oZUC%($d~wXp5GQd`H!=I?bcICnc#-MtPot*+ zglFL*>$1}t__+o6B{@hK`?~n!D&z8o9L9k6Dgy&@T4f+cvZ)n*xB$cwVyEXY&LJf- zp`EwK(E+D^9dGll;MH+Tl@T)F{B6=USsRz6Z2NHtVU{8F-SeJi zdErt54GT=C^IUEhwB}3#azi3QLM!m?HeyFKe)k7G`#bh6TD4{k$8oo#QUc>H6sW1& z1KGp)bwg%%9weI)ZG(Y}sNDNi&wg4PKvHOcMcb+k>`C!MMq*42a&J3Jn+<*8r6ju# zf?Do0MDSM`kpW6|I5A%18oGAWcR+G0A;G;pX|VVxE!jk)f_38Mm*%G`juzNg3nLnF zXTH5P{%-~FsI`ONV@)3ig_E3a+59*~AeQL=>F$ALiyj+h4d@;xsdjF(c7L&+!rZCa z;kuBIwwZb!+G=LmI-KTc?Ct`E{032p*Y7-eSCuqx0UoXV)tn%_VnXiq%clK(sA-Ww zfO~a(3i_$lpm758A|-mtzrljA701dKu5tvR zGeNBJKhylwirrD_g^t%@&c&V{%AO*jAJK!@8kFICYOFD4&bi1UanUR}eV?q{vaO-@PJy=OGHxpL}_f=?5-0G^udsZYxeN)(V)VjhIZP z4CaH?Lk&0u(NR&_s7yQRGU5k@rca*^I^?>0w>iQ>%g{eJf=L5q?Eq8;2rs6zskaRK zv*6xCy};vt>20KtwCH!aqbx|!z!Ppq$64fAM8yL5p6C2=j5UXKZ=IV?0tUwj%CtD?J3wR~ zJ1*cqvqB+cDcXSph7;zq-tQjq)xvHUi`$NZxJlfv9&lRPi6+?=I|!+Vqs8ucR|W2d z*ZXdn`%c>0=joDlyxb4Q$yyT2{nzk*=N!gc z^Bc{;K}spQgM9gT_DmV^=FC^Zv+*Cv;e{c)e_fCFN>jtImbg?F-(THBQ$#TfEx;_q zDTrehxGhL_9+?FTzkVdyf!Hw-4Hnj##M-V|==tN~{;1I!-GyiX;J>#{o`{LjMWc$m zrmcX5(YB}HV<3T10Oqgd0Jxr)Y>xO{8_7no$dKglax#+iGy^wnb*%*qQ_}7%;20F6 zT%#U5PB&uCL}t!u3Lt{KUUpY)wCtk4uWYm6KWiAcm`<4d+VLxj5KFWQWDu62NZ?bV zhQ%Bh>)z5Rys>JMHePplB!q!2-;NRG%b8;V)ipfv6x_pUkQ&00l|e&EsBovwH*}d8 zLB!|G38yO;c}x%;*zG>$Hv@9~hqOo$>c{D~gCFR0Y8YXa8Wuljr#>!$;Q^#J*-piO zEDSh*-lo`wTPFFycQ0X;9ZYZS@2D%455Q!37Cj|Z(YoC1Ee?Y@Hr(mZLPbYSTMM`? zLc^_-DPA9brR$oWEe;djqGFzWBHyJZZv zl#FwI;78uVze#EojIZ_EPj2GBmRq-G8UOsK9-T7}+G+HM0dmEcfFgnoe{qET>a*09 ziUx(px8tk(qSJLud!f!_q7)o%;gvNu&1e+rGOpNbI3&5S<3;vH zidLWF=5f@GkzMg~uy%2LEOUlWGDDF!BO7oMuF>(FzZuN2w}{XHpS}d>S2K1~Xh_dR z8Gr zMCX5!?aqC~i}QIN-g)FN%PYhk%*?m}@!mn&K8$D0vT>3ZTj|Fq$h`5yaSGHvo7{K| zrJfdOKnBRdsO^Clq=IenSKqLS7)l;uRNCXW##LRUPRkgJ1(8g^!m7Hwt1i#UUr0!( z98S`mSe=P(`(b}vymSi^1D>2)#ov85TjpAiQ64acZNTFs8a@kzD#fs36Y zKw+>EpEYZiWP*nq(fvdX|1?$plr++f8KzPt?t;QhDtw);XIhWr(9`)Y{LwguE!}mk z(+^emg8qIexWTFjAlNKNXLTGNuz1{`BuvyDYf}r0oau?^$O zezgDe%l=336i_xi6Pxw5IfW@;BPk}LP;$;3u8NVe!sdy@yrGa=PPiE_6*^Wl!ZSb5=E!le zhf>P{Z2)XDGBG=fcN6|IEu}j*zMSzAmL$N%;0y4l+i&i+;AfLr z{JnE%!)A9uWFoKzjVbmjaRH~E=v3m9QoG?2@}3?s;rx{YCEK(6-m3!G7M-z36bK-(}fWP8E0i@$A_%x7{tOy-HcV#GT>(9ytf+ zM}ntOo>I9ugdynl(JArAxSurt7oGB}sW;SFvLH7#k3BqVetFBpUCFZ=XJP^~Pkw@c z%^|`aXVa!k3cZJ5w#3KtJ#NFA|G5pl*JXi6Yep!_KEl4EW{?o`7;mTVamEe2LohT( zBJhkY-~Rh3Dp^3#!c$E{ItmQl-&%IQt4V0AHH_Q#Uv{nK^ZlU?^Qta1wu?&B|H1q6 z|HAubqI*X33&4p%V=B8l-U>9601?O#f!$ky1J)Izj}D;i%jryTqTvi^^Ft-(*PlJf zrGQ}vHl%wMqg(HI0nhivo=0>3u#s!{m~}qYT7-QQp#}Pbb+$7$!2LhQ-aMY`yn7!< zL{yZmq@sjU)~rQH3n8UVyRsy*y(>gQRJ2&jnypPmiXuWNp=@pThTnh$RtVNnyAUT?gU2RE&@GVizBZzsQK1I=CjtsI0 zb7O^zcF3DIWjeO#LhPZ7Rld(sL5^Z5d9mCE0)@~Cb^L%*M9%v;D!+L7QXN&4_{wBH zL!tqc>6pXyHr4lH8`R}Hpg4w6&t{;L&w#)rv!}dK=>G@^@D3P1l;4MKec$%HnWkaw zV>sYjaB3_uKvo~FJ?(`p4ZdIpC}kPrgTt2N7Hc~TSXvk$0X9*mJtJ)<2e1&oa#6*#(7FK)PerVNY1%S&4VgU@QSvlMGOOQRaR~wm z?oasRNFcGOL+smNKV-73W06B131XdR!}>Qg{sw+e**bQ_L!sJy4>tmZ7NYI(ngs%D zY@l!t^5UZA3Q!2k;hqeMiLuLtI`0;2L)+EW0h{dle=ERJVX-H8pUa$CD_&p=Iq#+ zn|Nfva`W}|epnUDL%`9Qd({aeG0-*C2~;0F zpkH>?x=9tgLBPt7z;L1DvW;r%Sh{K+W5rzyBmy8aDKDy%1NhXcF_hqt^^qaUFLufAml~CO%Cl082DJN*dSlUm*)~EVbLwz_%ezqvFuuz~ftw zTBz8O;Uy`}Xkk~+`!3!85QewCpy&Wc(mOPD6UI>=f@z9kQo+EcP>iv-5!;e4Fo?nm zk?fElrll1-{{8un%s0tO^E+19>~B5$lNd~nD4-6F{189Wx(s7i# z#t4E|AP9Qx8(?Ykxwy|IVR1UFk)8l?24QYBc2SGu`Wdna>sn2Cd7r4nlsVt`OAHukq*xg=s3#uY6f{VjRF`#@I$hGFumu0DbSi-L^f5C_KfdW@o? z;k{MMd4b3E#kZZUBcf+8fhgKTv-Qb8hwNAV6&W<@KEcw{dE`Nebc8%nY=~$CqpkIK z_N;(%qC81DhHFJ)RnHK*yZ-WU3dyzlR(&(4eW4*O|1J_~QxSi9X@!_#$F!#0qK_z*|gA1sQ zX3tbcBM-oMMjdBT7CUnDXpi$e&Mg~o{7Ojg_3rw$Sn?)U*AioBTqNKV<25Bi`R2IQ z7-K0&xi?UK?STfo`b_B)92;;Fvk(D@1(_1YZE^L<==GH-7Kr%1Xn@FXPTNA|BlE=e z5k%kSc3D*7J=(U2w=dA$c%D2ML*ij%1$8zu{mkr6i4wXj(tJdafNXw$U|DoQa0 zAOwJxhgMz+p+DH}BWe>od0rf>%0p%e3)vQyOG{czO;Iq1V8Bi2?YOW-+b*Dm$y?3w zjnBjAD9~?pp|{pYM;k$_g~V&3FVifbWcoiaAfFNB%gQp>xL>D)L8~LcZ%p&^S$`g| z4BOBekc^LZVAUVX)$hg$4>{c{_I)P%;0#_rM{F|JA*Uf|*IrD&7AP1xniH_B12VkL zA-4#|OMGdER?Pjgs76T}_GI1uy2Ff7NlC?=lSR3nYL3&%;%bjw!~55C%oQvwB4*4r zWfRTMpkHf9PlwNk8<_9t@sB24uzAQTqIqO7sgAWxT8oAo8pSBk5BMzuCCO@AtY$)p z^89pI8pOJ`P9XQAPEBTu2PCR!O*8;IR47D($T=PRM6jAB?EkF5F%$h{D^A?PR|==t zN#aAJukB+e#&T>JtQ8qdLx9F{DW%sXcqCbfmPq#~BCI1x5>2Knv^c~)FpMHi-d>$08j25r zi(yE|tiA*jt6+9Z*9P44h1d0ntAF%Ivs=f^m4UM+vGX!*9b(Im7lmmW^ zCWTZ($IU+Hp<9iZa2g2`#YNB`)cLO@j;t?6qZ9iM>ddl`62S^Gi2wQjbZz#M3Ja4a zyfWA#XH_SyJ5G5;+w8NhvbYrEGHy}K&!I5UfoUd$Ld=yM7(jw`c z)w~)((Z(3Ym+lEs;8-+Gfn$RDo40v*`)SpYCHqifKvjkH%hAFO#iMf*(g)hsvFpGwGdacmfnRC5;(&}=au?LU%BwF8&! zb9m5W@#Xa5Ks``T@=^sHrx2uRJk5|baHeTl=$9`sBF`s=?>sUexBLEDq4lL4S$z93 z938EFcn$VbwzN>vuBeF_IU;w9xnsFKq-oPkw;Sc0*k{wET;kU_Cety3OsCOblFpUi z*txV!7W7pQDayj3#(Wp`jsl76Av4ya~Ey4bGJ4A>SQfUvH7LGWp9bKVZvQ z{0IKep$ecwwO$nZX%AB1OHUTyXJ>b|!&c}m@B~_5;7M9M`x?ox&)Zx7?)ac4pMVkz zEDxUIPJ%VKASxeEsR~wF%|s?Z9M$+QD>2oOVFiLSVJDzB<6re{{#boG_*C{h(VFv_`S@7~L`TaZOhm;YpLym?8GI3<(n=qJUfVX;m7LD_JbeUBNS zaDkaJaku*V5DtDTH!`y#1kygwsKqd4>pfVWZ}q-lRu5suV!B*eC{P=v2x{1s&v*}4 z3NPAfA-F!0VeRR#GDEeeVO5OkL#3V5JhbA@tLX!J#GOUVgM~Y?KVNQm4Y-D^DUp@K zD)#bPa?NFbeXF6e@7+e0Oc%tV8u-*&^BNf#w4Phivm1F5=G#`!gRDOZE|drN9Q*VI zR+oK%62VKeG2?$T{$cYjyYdGt=(cEH`eh)=A=noc2kT%en`n&5102Q@CoJBaSpBVO zGvEI|VTrDBgy?qrACr~MI8PootbvZB6mTmbJ1qN!_t#+zyNsszu5HaZU4A%Uj&bR#;diG^X6vRS-dhwpvYY+#(%B2IgVYJ(--&;NOVeXtQ&3BJ|xp}nmm%=NFpXy z^>_qCASayTbfv7vX1*@s+Kl$?>nRZJKwc?E-zxsgTiodCZ-<$&-Cv`hYQ~BD4H(UNs5boFS5P-> zj-hi4-Y7yRL6wdKbn__=?m0Bj6Ri_SF-v3-mwje5c@DskH^|``RNAia>`FP-5WOm` z$)s+%4c_uDRXIP+A2($cL@gSc&xl?|TKsp`A(mhi?Z$aPI7s+Uk(zhN{0R7>V!i1I zFX7uDTiAX+l%9;Q(~onuvTPy`1qT6tU}e)U08jqKbyKNWk3RkQx5a&U%uKTJ2W>@1 z$ueR}`y^noRKakIJ3+%ife!eCvDVCN+8g)!g=b1#YB8BlLc%#!SlH#ofgzk@`r+$9 zoW?Rty&@;_<>p0wIwpMNoxEZN@t)N9rX_<}Hk)!zD5eR=M;p>FI9Cl$8 z8-fP~Blnb!Rlh7Ou?#oq-c)-jLd-e(%A3)N84}humEV(>r{4{q*r$!w_?Ljdq#Tty zVBG5zUpWFgi?Gk}%D=M>xI4yJUa&`XaJguVS9^CxiN<=2P7pbgofdtz=qn*+OwZ{% zlB?6atlSy)GSsM}Kau@>dG0`W9y)ir%a1z;10p2fen<^+_i;)+{gV#dQbC;fLvpdR zovB6r>V}?mKqJy8zLD`^4`n9AD*QXMl4HdNrrYUEA9M+zwoe_f3J^-VqRe)c9zN4fW z7T_r}(!pkLw{=v@U%;)PRE2z?n+4aKJeHBXltY3|O;AdNxC}mQ@~Fa0 zJ%$=A#YPY92s|9jQ8r014}gDM(Jp!OuVhe8`2Gb><}k9*ssJf^omw3=X}Y2>SYfP@ zE7mY0B&DQ?W6VOwyK}hB8ZU1DTI!I=G;(o&n!fgvCr_NoR0RxZ*#}*U__HlWDmwb_ zK*mn!1JKfMxNVX6dH>MS+R&&cnt{cSFn2uz{!Ubjj5UO^j`9LMy^Ps!({0qygoj_S@P6uwjSfK%!Ez2gd5nbd{jS&jf9b3?44@ zLzV@5o@Mf2@2c3Q3g+2e6XJ zy%RAeuhc6!|LkrWDWZwy4oDfTSI3rjTyR(Ux+|+amWPoV-rSg8)48!H%lGf?`6-{N zZ(cX+o?3r=y3;72qk|t__+k3mTFQaO0@E74QEm`PBxPk~lRnAXrmtM5P&&zr2)YzB zjiu=+Krt{0YPRn`29{Ox$4vX|GFz`NOBl!ZUT8)i3!1W4r)HLN7{F&>w`QPiRUJm+ zF`!n;I!Qb&o3@j8RTl?8g&<&siV&DBs@||Ti$gtK*XRVWv zq4wLQ#1X-$D8lt^PsVjz<}})I>^y8^0mj89@r@oHIUCC?D$dSNIs#UhcfMgvmU%@f zu7NE`PRVF?Upi~ShNOH_EJ3hMBZFG^-Iv(Xf$=>5I7W8gc4aB-Eiy!3|6lAIdnk<~ZH6 zFb1eji9XaF-B~@FZqs`4mm+TPupUUJKN&-RSi_HV_e{b-LsSDOUO6YlZVaNI<~RbS z$g~MPzam<{y6AS|8wfN!k>D+AX> za_3CNKo>wh_kqwwC|OS?02h!8r(&hI`VXe)wJTS8!94AU>&!NzW{x5|YhYA-r#P*m zNw5k^QPll@|38caxU50oSWC|kK@+%ZEom8PW^bHj&3G8T?Pveg7AyWuYjajd>)$(H zac+apS{M0s!xjK@2f|K{+vJ{)x5Cy$xx_;{k8-r&t_@PfagjN?uR#JB4FWAawF3p0 ziEr^5&~Ds3whs)aKAAXbHBdC%oYV7lD5Z`QW*k%{fyn9{f7HH7-hAK-3?xB2pItpP z0(;VZbZ?Od%Ak|<{H_%Xf5nL0<2J?zgi`x#vyML6=#;_O|T zZK-)Mcnm{q1P(o*<4b6sSafPEt1s)K3%>t8@ic{Fk|zSiqQK#xCe|%Z^E#>N6kU26 zw|}kcu>eK_e*`O>djdnKq>!gQx^(Kdb8FOg;1G_PydL~eh!(qqA~UgtA=I0bJ)pGE zDSs6-P`dtFvwaIZ%W!{|V&_Ba?fEsMBiP5?0XDJo$cK5#y(_u?AMZ%)E!;>RpP#Q9 zMlB}NA9(rFyWs3OQ`8=0^iAhVf#-u_Os0`u@%ld8&#q4st*An*2l(z!&_h(B%X^CE zpJmcy&VUIm=eR#@^y2MfY4iTae^q-{?K}Y{SBE?Gd|CtV;BN za{wM@E3hfDeoVwl2UkeFREm5B9Lj)WO_Sfv@?)^_7@D|yU$T22e-;OxbO)O=AdpY+ z9C>oOpM>LT*p?dE4M{R+SkQ136pCI*())we>MMg1IcD5w)kLSL{r1qn^mq0TM-JQG?RI<|lSy4YJ{b0yfyonY<-oC0l&r$B%;z@Hj@)}6v_G#s ziz042AQG1WU{1eEE9y`N#yfm8Lau{XBGFDLViiHk^!@v9DDTfzm(+@@T#)2bRyV2K zEp4_fs@)!4lk&xiU{%+;-+$5@tz z%p*m3=Tiw~Fu18!L*UX5qV%K%M@nrp(YhjJ2kTXGJ!c8j`Yw&}2~=FNN5S(1?mveG z|G}iKCywqa%w4w@3m21wN>0`-HIn%tnUfhT9cAfiiJRptg_2CWH`(X$p-w1Sm{z8d z?~o)9xyzW<*_VNjz5`&&f?Hl~)BP&C1JjUT6NXH=HZ9|(u!gi<} z@IqgQ$Nnw1IYWxG+`AOe#FmuxqcKBAN^U3Q5(m=j{1+;5PYYTmVtBT|;8e|+J8AN@ z2)>@bd~dwjJn+S^*0d5V5aKZPYNk}ho;T6&lqSKGtm>EY^}I}m``({RMCjOjVvQQ& z){`ln@CpP2R7uxx_W%_m}%i-$=|a&d|FgOJ$!OAFiC7#RUM%ydM; zV|mNzGvhwv(e{39i*b#t$5SK_j#4~t9ZDa?dBks3$6e2zE61#dztpBO`Lpk6nueR5 zeM0z5P~(3itsQr?8XX>l3Ln0>sWs1j`r@i+-@%IA?eow`(SalPvaSj?ahYLc)N-GbSr_0cnqCr6yDq-IU~j>PmXRYQygAUG&N z#SPKwBxD$S+6Jlivov8V2}krefMO;1p)1ftQv9OL$i<_Qg)#C&x#o|?lgHkHlY>h@ z`w`^!T|z;1D41MPSEj_HE)OnwE+&$_>G9 z#LU#b)vRzjV5~cuL5mJu*`}|DR>@Ji5wW2ImkeXC}=Sr5M8j<#E@%On$TOw zGH$Xwl(4eN6fPC2S=fyq`wAYgPNh^A$G0H$HA9Hr@_HY0Wyim|pB(f)@;A)pi@=!n zm;k@eIzwp@a6m-+!5m1(y>&CTd9l-Sri7qb^!@(hBOdrhHEUwxusa~!d8Wpzt*dn4 zvwHoc zyPG1>;4gXUi|W-W=o|V`v7%AHh2F)6PXzY@gIl#y#Jb08PdsWt1{x>=l@JH*o_D?M z;~6D&m>j;$hTQmX&9w72qA^$%$>;u2^$tP#wG@ z*2Fh{uyp5S1<_t554MM$OM*t~VLU*%CM+CYK=bHj$6Hd?40768lyH5c9P=}OiM7D_ z{;PTSE7^@!h}QQap9-4fBSDlz$p(6CmH`;2-viwCNR&-Bfu#7?Jvuhfhn;gago|S# zui)F8EVJAGCCxns!mSrjniK!qGzZjgmVgN>mMiG6EZlP2i<|-9N2wY$g3zu;2~H-r z_geq(tifUduIv`d{?!7dtRA08m>ynkw*L3*FsC`5eE~)$X@I zldxvZB_O#h!0fktwH#>dhu;`oWVN^rR#!(iu+QVWuUZ-9uA=Vm6M}KH)WyrWGT}F@ zW{h+jCSn*Oo~qN@^&_%|jP}z_L?=yCH?Mx@oP`c`K9PQagW??s!~QcC&iwK~V`FJR_h*8|h>R}$h&12vs=~R~1y&_C~WSLx9a|TwE zm}N_{6F7WAoCkcXEyH&_f$OJNS=$bCZpkkH+t?9qBB*a@~{naW!%CVvJ!nEufS0&19!!PFqCLS3DeQvmcBF8i|jfgF|VLko&Vy< zr72l_Gokyp#%e~R0zNJT;efO6i@m_P6q}}tAZD?UybCp&-6ymG;x0_*7oJKsfn9l` zOUGPd?tj*`_b|}IG?Y2sEPdH*pBPU6HGncJ-t7(4ywLI15q#$*WGcnow819B9Lqs+ z(Ys>rs5S+@Cawwp>47{EA_3kT+e)XuPGQI1Pg7d54Zrz4p>!!a0o@%rpptG!2&VS? zsQc*}hn_9U6mW~zF8NFjaVSr$-qWly+(6!D(eVrb_2Z8__R$}fzc3ab1Sgi-O_D3) zyz6H4G)f}WSfSF!C#3bM=JMMPSl&$jr&XPJLrWMVSfA4Bm=W=8{WMAf2SSxlD;)+o)JJ^)L65uFYSpuHDxUDx|f2asmUWd7+}R2Om`5f5b1 zfObz%x`{1uwVo_AksfO55CBYK)`D6b1qwp_{?;$9OR>VJ*xyu+o$AZSOF+e-?7^gSGEz{sK_2>Bsus*(blV)svjDJ?DR?N`In8hLPR#^$3HuUO z$~A}8)S?S$`5q$QoMNir89e!j@uh+m1DLP1ocuZH{IuWxiyvv?2q&AYB>VO@Z1k3Z zr*L{tVJVyhiAlHz=H%J~^&>=?M}7fI$&dUaZpu#97nf>nFlw}Fc-Pvh2EsscH?hDq zW?*gm-f|V7jo{uUqI6^McAoGW45fNcOvtmk(dTkUQWfanLjP2JYRX(d!JYG?uP^L8!jnF8;KVu1#nhy+794*R z*}MxPPO`;8{%oS%;aDo0FmhIa`P)wSZe_90`l5^&jPtTlD_uU^`GKbMU;i z9sAGaN*A^`euC6V5#g(&NMR!OncE@7u8*jZ?})VxtCs4C`Bs|D4$QuRWC9P^L}=f# z!i$aEH@a?FFG^BO9UO^F-x2loBE%!w`*=lR0j;NcK~kD9FOnm0C6uC9URU2=SZzB%KOy(9~2s{s)XK!eL4m=5|v9SAg zsB&NXB3{&8So!0!ev=YMJDY-^Z{$B^oc`lTf)7883(8BjshQh58?q5M zls5jNNNwCV+o+FRYkmRr!VN0WO9$c~utxHZf`YUW3@*OAGZXT=hOjbBv5c-c{X)JS zlwc4H$xuz(WKY>{tGaE_loMGAzz&(s=%{8gufDRomZhNbkw_(AcUYhq)3)M2=a#B& zOi~Dp(Bw<$S0C;ubqxt^1yd3Ahb{zoE2J0`@(otAkVEm2MPao^dte^AXzN|B{kPJD zH>99I;f9}P;+!a7;#Z;_$Lrlrc^@V9kCy;`jc4u%tx{=w_1LyT$L1$*4?eR%Nfp6m zMAFTSzC)`-{%KEsi=8|T7J}sriHevgVvCj#^2N(Ne^y|YS4Ye4vO&A<05O&|I zq$yf)a>WHLT^x5?UjaqbN@Cf?@$KL2{T0h0=Q#eqO>ED=E@=T@T2z1^zc!TKS=PHX z``~;mWvajItJt__Pom(OzEEDR*C-grup~K&i?X@_Fapwtc;Xum15W+pYS~lp)0sCr zvtrR#53Jj^D2vTqc_yC7TfN;bJNd&RzMMbUQwL$stcn=*mm+PA?ar3{CKmr7d;tNZ z0}9*Pw`6sU+ecC)pg*NGI(nmPhu`sdjYwJpTBG;(cw0STZeAHH+o zC1?QCf!+j}vG3* zKtI?Py7v96>-&xCs|b${?vW41R7< z?X!UK(n0PF>^`hedvmZrK(dD)L|Xh1o6LNPX_$p0&Rk*HG&xkBl@!Nxc&|+z8YgYK+xA#(nBdFDL;2%)S+1n}*DU_%30@6qx>#H1 zng7Nfgpf+g>Aiijh04C3rrd__E>4psTazFG>~M0}CIK^<_c^LzO%w1!bb)T*9^DTs zIKIK1%8ljsJP$EmUDz8G&VHSODgrpbcNr}p=lS<`Vx7yPGpZwB6^0zoD4ao;HH5d(-|!+-~pBEVLLfg__@8ydZUS-jS50^0{mt-~=p*QtT6ui|4%PUMgz)`K= z3`gy?9aiuYLA!zkLm@DwV0KO`*-0Xc#gLx{di_aoOncGcFaS(pWw__!GWR*|smbdHE)FE1?B8uafZa4jR{>Hck;#O~?;fh>4bJ3vC=WI_-8f*N z7T^RQw+dJ3_=Y{#WlY8pTPAPG5ElvJX#r^adC)|pEhbtD;$^CY>I+B5F zp~cV4JAOjr2^8SiL;)Pjy87U$v$r$K?x+qvZk;7su^Uuc z*QJ!k@wvMp95>?4s+6$A2$Sy-st&QQ%P#)(YEEyDFk;PvK@z|fCf1iSy==n(I1v@l z6n}qj1zGG_u{ zdgcD2J14AE2HE3(n>J*w7sZJkTba*0tKNL|WP!&rr628?)(Q8c@1v^xo7UNl~^Csh28EUQ9R_lmkdynQ9><%a|@6Jvx~iM=sN@`L(48yIZZ zaKqS6$ZZMZnPx-3;Jirpimg9_qN2989X%uTHz6c3iIV&Du1h_H`*<91`cibT-VNPX z0dig0A9O!iRRx@VE5Kb8QHB*O$rr_tdDzb~3SM#trY-3K&QDeM|p_=@L#R)kFes2H|(jQmOthh?yv?saA5nG7$L3!MincJP_o<}(k zBSjl)1wWMdeOeyJi|%=IqkX3G^59&5)y{EgA$F~lphnzLx}~sbnf#$|MbB^9%0E*! z02oQXfiFLTe@bzHQW$&tZA@eZ7-{vuW5h=lJ_3a+0cp!P&@5UR3LrSKhlqr|0i3=+|IA5 zIZI`FB<<5R(dmLj@DkShV73Ka`vJ#VE0Dq{1;7ittUr5u>F!!a#enydj(?nd2V}&> z3!bVEjf^g3RFJ?J@-vwo*$2UUyk61}jmFt-l|J}EZm00D5XS!AZb;7gpSe6BK+7D$ zGM#Fj)}H(CCIic}JbUiIv8wKa8g+R`mry$cj1WtqnAQ~D-S&3mr$sgwNO)NR(8UaQ zIB0e3w*#h$%P(+6KjVgpXN|?-4Eoc6%s&mAzqeeCGqNLJU;~R`gnYq**xQxTn@!4^ zVU}Keq`buuxGn9wzyp$g+QaSy;TJ9UpJNXLFflpWh`~QGvbeGH3+#Q8f8aqM>i?f} zmQTQg3Hjx2Mzsc^Xs1bs|8qx%#@{;oji(O@g#c|g01d|zIcNPQO!@`cUgXbpUU%8K^Ta8+!#lV;aPt8=SCJn z#!c@N-z&3kMOGgw?DJAJ;pds7?K{6De~W=`;FpA z9T(sZy@bzArzYq6plVI?b-PxTvWrW?NkReTi2@#@?E~k&hgc@S2#EcU3pV~PTFT-0 z3Hp$_9+EpB99%#Ga!kJc+gDb5S9Pg(0AZn581ah-VMt{=sdeWDkb0JXax0ntHRg5r zaM0$Sy3cAZ@N5U{+Bh%1j z$9xDVf)a!kfp??7-PYA$WzUXGh^>g4SqY+i+B{p`9ccVzArQ1F*@TX$ z=ubJbU3z4!c@EYE&Dtx<_0q)ue40E%y|kf$Jo9d!VwMxoM6Hp}rkJyeKz-oe!6n0v z&VfzvZAaW{+5GUo&5OsxDrpd_{Lo9|oOc$TF-Uny_E#=rJ;6IB7vW$oiT3AVMnq2| z$xKj8Ow6LqJJuUD27Rai1F#S{{qgVfn~roYvd#K#zvbYcpsr0DE@BcsYdcm zqbMd`j;MWeuf;I1zsI5tt$_mix~b1JqYsoKsw_fOr@FBp;}I=B;pNC0#*mf%n`*so z!{_-6eWg{pRE3$6^8{k-AV6r@ZUe^nFG}b*MTaUtP`yLr($jY!t_KN7UPKE(F+Bf1 zqb^hpsddm81dYWYqB}w-wro~l@zy+aDe$2Xzqql|81Fxk!)a=&)EZhyh$n>CSZdlb zmol`vRtRdv@y1oI!ktB)4LIXgNM)pD0jc_^O?o!4a)BAojYfNy42cu-*Rv9rfz+@r zh42Bt%N!hmfZM%QW zpLQ8n&f@Or18Wo@Rm>vJXw!q@qeDZRq zS_?u}6a8mt9@$cQX}D1jgi8{*uURvWt+@D?*#mA@yCItlOzh_f#=~pu1TR&3sebv} zkq8wi2t=Q7SIEcCXAKcoFvnlVY97{ccP?C$*xVw|u@UD(Qd#-6GdJ6vM=>{Fz1jer z;|Ht)&hE)Qr=+NeAe00kgQUwNA)%Pp0z z(yPmKSv{mIHfP*_7J`$2ywjjq$2*W>wTi2aZv+(YEQOBziBqjFK+a{^en2k)*ofM^ z9bR$4!K{26uwTz&SVOvgI%nBcmWS4rvA_pZ_leD5j2f|Okr|eMV4&RRh2>A8iA;;X zM7b&()0h643Uy`N>ttWVWCDWG2U0U@%r@FDxntC#xM=$jr62^=T|Th0f6xOZ<0PC! zD5w&Gats#L+8#x39v>KGf=vfW;fo{X!w(_LCh5RnedWG?5m1zV^QdK?RM^(*s@9pD z9uUTWIvF^dF93>AY|5a?oN}8QUWF<;wVeJBU~7gBh(=aaXEyICcV|vsbsb@~ zJA*!<_dqO&Yxlhx1c9ki)(&<{S|;D#tru-DYxLvCZXT`_@jj~&US9mJ0A~5)M*Kw5 z4_NIY5;qIUV6h_uc8}E;-|X2ANe@ZCmz0!k+W{#O7=_*#i(?K3-3NsoBFuS$xBPW4MFbt& z;FZee@mJ*iU!=Sje*gCh;XdBW`WE**WrF=*S&H{@Cf1&yM%?;NJ;e~B^;3fi$e$Qhr6l64ihEH-oboaDG z>ujp(vcSpcOUV`Cs$7aRZESOViFVdTs&m6Nog&&WRa z%cikOr#jgmtk$U-1D693PWyAn@$Hir*#Cf)*fJ1&t)Voq{U8R^X7*=y-r9@H5_PS@ zU|QtYT8Dj7Co3EnJ39fuWR{a0GqE4v5^n+4vh>j1<XyrvVb(x3S3-Y{rX_EtjI`B7Pz_mpDzgSL`wMFv_Rk6CxiWjKaa<2p z5aq$}t$q>xF~ls|HGI*w@$9?C%~AZ%d&oIr`%AKPg1GhrfMeS9;H@^z>WcCti4|!gcem+tWo644~(@OG6XiNn*#LMXEtTQZ`ksfHq^T~V9{QQrQzk-Od zsj4^ghaLAPl%sO&rk1A#US(MZshD(6ZGZb!mSb3TgTW@)pmg>{z`sk8@V`ru-daO> zekB&ayo#<_OJQP8RyxEV(8~U=l}EJgN5_CF*L>d z&NBH?loLq*uD?FGH%bIm(>}8yYEn6TBzR*e4;@Ukm00aTtO%3PF6s4(?H0sAbbGjX z^3-s2ncXf|{6eH-+`z@+Ble<{kD#1+QnEINFrcQxei?ReaOh=5}=Vj12RA%^Alq$@q2M{ z{^K_guffu5B0RQG&IRcLMx)@2?PNcea859H!99E*gpg^97F;a&S629hpwF&c;OqCc z)D3n#0NWvro&$H2gysJU<{BvdyPG*R9I<AxOWv98$dZl z@)P^a86axC$;T`Xsoj^jqk6XBJ>w@Vg%Jel)=3EDGu~EapOT&@W*sSp!Amixfgjdb}J@GP8U4je{G)>Momq1^QqL2gWxR>EMDw~k{UA{wFQ*tNdxVu#-Ft*!k1xtyYQZrmZ}9p#Wb5zzY@30uuL|So(K- zj%7ELL3r22-z&dtCgtWhso8HEuqXNmbIl~>x30zl5?`0P57+_M-YZ;mE=@<+9B3P$W^WjCCj z3brNb`#?q}Dl}l0qVGGBI5FmOoxiHnTId%FNyLvX)kGq5QK(9Qaqty(jqpYZ+)D2d}4xNXVc-VBGj` zj+6_;laD*vjXR*`B2B>q4Lyg2oa-~V45lxog5b4BxA#ApCSjg{o@N1#2onG*3P7SQ z_?|yUvPO6dnIS4LyB zUh@3CcMG`pEIMDi!Qmw}VU8O^?BmgUv->IDCfYvRAKN@Yl#JZsN{1dd;4t$pxS!DW z)D&S<2JqDRSKo(NpFc$wWr45d94!>vc!uH~NVZMGCm7ji8`+gtM_l&Hx_|5tcH0W&4And>y7DV z*4QrV?O$}dRTp362@Zhs&SMBqM1s2>8<}S~t?%;pg53WT=El(2SbM?ib*I}5ePCGa zjrv403aQcqAjG2!A|Z6yAJbq8#S6qDV53{J_cR{0i{ zUp~Tka}n^tx^yd}ct`b$7bmuJNp3}@DA2o!#xZTor0cK0KWsSDe&!X_~xwbQWYRv70gF%rm!a?bk8iG2XE{v-~%|?ANPamBH5M0r_%Mu z$X8M^(F+N|R_-Cw-64{a2|#Tr_yB=I;K$W1us|8U5;!bioL_2Pqx6jSt5s>`#&?gDM(s>VAr}|3J zm1+1xsb^itsrLcmG7FRc!qN(P{(XiJuE>W?IyUu4!>o2u8Bz{0RdP9UB>T|L+3sC; zwnyJbR{a=*a{MH=w6&wb{7hFDz)Ro~KFOBsZtm==x!(M-1DANZ=ivu${4}hs9tbVm zk`Jv5^yIuEWmO-)LOk~fBbFiB%zotp12u7Bcw*t6`I+B)Zp>@ru+CUy?nEdZ+EE~Q zZ%j}qwnQI_jf=3DbVrnFMV%UKDd2Fw7uOe#a?=f|(n)wYa9X)C=d0aAUM?O`I&2=RITNNzd4L5*smN7%1r(yooFYmvo}9#$fGCT17Ce-eZ;n zohTl_D#Z<&fapeuIsOgwNM0MyK-HVf`H6V~L6e_AA?OEa{|PD{vxaq5GcFV&V+&qo zV9?*tY3`xkSShiLz?{*KKSp^=w0JH{ zPyg90Ki98jGF$BrSJdb5M8*Om4uNMF_WoFZ$-U6%+uy33cM%_d|YJkH*75gsYEz}m1_e0@aA)kRI47q zqGm>AWK{{6-c7}cch=HrT%)%lg$H!bpc5^Dfq^$f`gzOhD^F%^IyrGVr0M+MFvX@V znk$!<*MXOw+$bPoTU;DLP$1?2p|Dkf0@z4^7XIh=bQKLaWFCTXmDaBvKrZTx{Yb)g zD5$iV3M%6ybOvJ5-2$)gB6AavQ~LBX6YgTJ%DtmL9$;bEqHQYgwNzf1Y&RJTIUBn} zxhEepKXFP>S5J_P1Yr)f2j@YD!-YAXLdA%VHQc%D2X}U9>DJxWJ3#{>V|OxmM{hj0 z;d_~pANB>XpFh}L|9ZjB+furHrw5}kEK*J~CJS?A_lTn~n1CVxx@AgAqGbX|Gty-( z0UBK$7S9t``~;-K#5NnUl}bFq8$*awvLyx3s5X1N6gki+agkPn+qaA2{@paC94!OD z;)3qO{m*K0UM}9&B6|8!u z-d(8Fmsk8Q0TqT6&b76*qI+Yc&3>Nt7VkGLg&vhYZ_N%8vk_3&9lO+%A#k`9PcNun zypldh1c7E(4OtX-e(3`~)CbZ3)=r3mDMgob#@Mf}dYmln+|q`X<6PmwW)3$;=fnae zmJh;Mb9NTrD6p4M{z;T1&HKDnHwlI_?c^AQw7dxm9*;xj;%pcX&o*)WI1y~%HvFsI zV9HfrUTr2NWB}?I?H@oqg^#QHE;nr``Qa118-%}VPt4U@GwsWNq`Ne?Yz!M5DsH~U z!4_d67?gNd&}H4hnpB?u-H6}A;VsB6$gxiQ!yIKVg9!m_KI<2rN)DJh#d5`!Q}-84 zv0FSv$LFAcgs+gH@!&{svy!zWM@W#bM2F{&@bDn#iWnu;%1Ae6zXuaD#JIGF=Qgdk zF;vT4J)58KN4y>-*eZ+m7ceXF&RPQPqSB!KB+;hd(_*uiL!S^AJE<8UrZ{{n<6T>6 zD3J3IT#spx3>ObC^G!V(hkAmPa&JXXLulvSE@-ueBT4934sS-tLggo)xlL+L?~Ss$S*r}%Mnvgepb z358l?kM9DKoJ%185lsq;6`}WY?3eT}!PVM|a@m3N^;9vl+gaVg_eI`O_pKRr`WP~k zi%UN=r;c)gNQ+D+ZyZv_xI6Zf1n~wJ!c@7xjwc zxDGv8pi=K#*BZ#2p-{D#_a{>+gHm2*ZQ$$=T|;dI8Q_VTaZ9lj`zPf( zCKB8MMqc&a2>$<-@$Zy$(Y{bvwOfAQ#oP5?t(mR*`3v&zI0WL|oHlVPo-52OK#Dzh zp+>T3`C*QYd=*@3QrkucG9OaH;b?E*u}|R-hyD86pAl?)35r2iQ*JaiAFNw= zz8MxDHH9}%XDZyrBR!0&SGSl51uQ$NCEzf?k#SX9A+;!Rpje*28LgN#9;;d2EwQGY5!2M`4XW3nd|Vv5q?x5U}93dZds2)bJe) zwMTiU(ouvUSbBdZ^Wpufh#>c14ap>yWeY=iKAk^)dvyh?G|yoi$K86)LSgjxI{H_m z&WGq6l3BlDv1Hgp5q^WbMp^US@nwuFP5dZOKi@Z^&!`F zVoq5+i%BN91{!`p+Dx+DSAUzjIdvB3#aDoSWJOK99oJ?NyZFx9eG#W$Svg;dkA8dw z%6X$!gk&BtvS%T$lBEzxRlD;|;1}g+H{_P0*Bgw`3;1(n9oqgH!PU?>+*`QJMY5v0 zb;n#jF08aF(N$5v8%~^@;AxlhemSN>&R?fJ)h)d&+P1@{49|KnVPRX3LYQBOzIw@n z7ZKx3g!(SswMvMObc?}-7z&51Ej;sMWs$#Mx^N+)aiW#P9noYUuMa{erIyY-IXWK9f?Jsx79 zEHe)r^2S>_37GH@En&Kqepi6+dLvl!j`XwMJFO3EWCuD2PA zy9xd^*uo?%BO$VP4lhpo6%?H=gk9i$fXb2g{r2_mbS=*D#4nWQnKNh3yl|1n{;P$U zZJuBpk#7t3>1LcGR^EEpBjF`zhqua82IGELPb;@sbY7j*cJ7<{jpRsoXD??w(~(15 z)sO#bQjvf$v81TW#tyDo;m-M&#ilwQm7~_~1-{%eYc9la-%u1Tjc|Ir^Y&iPCaraQ zJgbKdX;F@4y&FoRe?Y)`@r?>s7YZEv?jgOiYx*ou-RVBS^SlI9hVlx~(q0@mWR>Y! zgT3ic9RwpFVfCT7374qEbr(HYDL<_Zlj;@V^H=ut-bxEP_O$T&?4K2BSjGqysl`y5 z=bGn4%LI${llxK^x3h%#DXptdGvQMdeiE5=3G(sueb49E?qWZ#Lrx9g1acqPmS#KE zx>=S5S|1|+VP>He)$`$H3fuR$3q{({|v?R9HCU*=x*tnfy*ZRZ4){EYK{|eg|EYxP5s(9|{u~ zyScrc`L5!RqpRG@nXgp+_Qx75+exC$vdU;Rvp>7+&@2;2M#Yo5823>$T?B}4Ja&np zf0id$Mb?7;TRI-WbUZTPIHm)(@*sMplu};PQzp1?DDZJ!`1bW;&+B|RN>Wp*;z7x(K$s0S;*sTbed zSN^V7Ex-arC&8p7DgwsE=lsl`YL_~pJk_V~jeGC)$s%R?F2#;sJngZxrvnYu3gZoW zulzhOv(mQZ*fuo-p`ow?JT~L3Q#GU0;A3x&?N9YkRF>Ky>}qko+s|ZV-Cx+W=s*~U zLKp{YyLRDkznH&zU?Dui*WvS&;gOtSWlw3W)T1Kk;hM=;Q~RjLtk~QIfkJAMx>HkT zGe`>*z=@If6Ery1^F?IOo>ozoT)p!;%d(rR7v^uCoEGqy?#7vTwdLn1@I2Mo6dZB< z1>$A@EHYLKU*QS;3=4;pqKe2L7bBNJl?7{OyP4L4%!dgup(6p9)?gdI3=do{w8etj z?^kOH?>}cg&fDvrLpM)ecWN(mV`Db?Q1KyRP~ho*mKU)Q#_naijsQa=n<4EBQGAY8 z@wZV>V!+$&i11VXYByFQE}!FxoX_*r=hUgy`j^H#%?Vi{EkHULQbl62zR4xd$AuIS zw2c$dsatz5KAFBYinN=^XNv>fpC^CaG;>K+?ri<-xFG=lwT7&bWnEmb&ct}Lnz@{- z{OskDdk1{NmCTLp%Joyx^FTCYBCU7u;9^`+ckeC+PFl0WWhUjAgn5TZKk!+QuTW<` zI$Kb11;8gj#X;{BAZY-Vm!y2{*qL8Kn}H!u$VAd$s%65rmmSM{$Dn=J-aVl5Ub-!> zq47QJv}-s26GF@JMa~sHI8pqr1)ARs$-MqGr(JmB(!@@riN%Yqj`-~t)=y*oFr@T? zDNee+-kOJ_f>1!9q{EZ;E^~lZ6BHYZt@JN_-#rd_X}ewBmUEMr2*}#FJw&d)D5a9*+m1Yv{#_A>bUsNO0b!cwqhNQ)dwk-^CHW5j(x?+K_ zBLTs)6XqR9nQ#C~L*x%YN6~FGW2uqLvqJIP~#ElQ{oU9!=oz7X5emEVAz@W?r ziM9O+MGySen(-|+<1?8oVffE4f|M*Pbx}cy!p=gTHi!vmp~({jBZ9-L?bo;tyswkPCf?Wm?oV*7f{*KgIz!v$T8Z>4(6VPx`*TmNr0HUP93Wk#x$A;NG)$P4jc{05dSM?rz z`UoLd^lVQ@j8JdAoZXiXFHhnn75vt^;{k06L88RBbOjG}^*_(@@24B2WZryDlIN<; zaX_^W$A)GA_mRTh1^VBuS=rdXyCVM~;NZ4n$c#ll-ch2VcDf@?<|#)U1t~fABF_x`4U?yr(uTNVz z_a~09aT8319-9cUhmD)B`R`WH{8%7f*k>=UtVzFUdpEtZS*U$aPZ}Gd8u7uM zbjTYdhuq+(sQ*Pgf{zfClU^9EoWXB<36Eu>yLCD(z80x7hLy0i=bQWuoZxS6*NFZa zQ`HoM20a!DaJL+OkoDGw2P_OR?Dmg;IaID0Zuk4Jx_<1rlj;F{`*GTTg(E;wwxK9QkyBY z6CG`aY;v^!B&Fwxl1?1YMloqT?0fJ{@>;Rh>o;Gzisb%3!oCEo=Cy0TB0~|8GK6%} zK$=rq6HXl>nW-eDl%!N!lafS4qzTO#8aOgENGTzuqJhw$k_Nj&6KzT=zI!Ep!_!>lXj z+1?kWea}meksLiK{@4Fm?nx4DLOaLQ-4uxQ6K!LnPWu9w>x4C+(Az#pdq5C&h5s9J zaQ7tEyu2P!b*mOr3KJZw7Z(7=l#f5V!hK29!;yva5kfcQw_o=Kcl>LN0V_tF9Lv4T z+mHb;G>f#qtC;5eHHcIo#`HhgRER%pS?b3F=%frcAozfcG9= zItO_eeesu(?G6~j#)i@rt!gL|L zjoLpeb+3%s(|>8IUO)KF-+c)Yf~xo0cNzxm~5AB&9VVv z-H8Aud5psFU|G6hg8-n5#jYRhthX8OU-+l<95kB(JZN`-tp$#LOxJXOjMyM6B1b+3 z=UnKm)9m1Uyp_O-1hT)pn-3`(kA>2-6rqtvm(FRayN9UzT<9Q*VB5zN@|9(O1W$Is zZ%=mLWy`q-Hrx`8JN@u*-_uZ$svguzO38u*8|DXdAFt9S;R(v_GGx)3kM}lo=df~- zzg1{x$C6Y&<+kgQx0j=c*&}bDMVR5ih?Q<4bFBR8Nj|oQMAEH6H$U%f`>J0(Yb}x}88-))^F;p%sRUw%_P3U7pK?G9N4! zAvG34Foe7=mfwG_jMb4$`fILHe8QJ9n%6CDq_@G>*Ed9I*F=eRMdSqqVBZl8p9>c* z;72Mgv%Y;JJh#7D>sviuw`NcPiB8GE(7YB_S&6W4s4N=*Ogr{v;eZ~12_}7KJX7X1 zz2o|f7X+Sd%M9Qxfhp4z8kO?_ZnDqC+CR2-z$U^9b$>VPh)FhT7#kdggYqNaw(e_} zip-slWr{CH(Fslw+He{v6F5Lz?*U}7vE1i+=RI2r6V9{dLl(DuD`Yaw|IINPIK0W= zUYQ_i>^qaol@geiq&xy>LP}5cqGX^0boCSl04ukdf&bwCJ@VWATZ>4NeQ2bYUy!f=s3gAJt{*r< z`Sy0q-DS{3*j#f!XGx_2QiO`6&>0H;iZ??u_x@I3yJ$cpE*VxYTn9O@6PzA6wIIfs4v=EpN$Bx6mavj0(X|nF6z#4pu4e6I+dP-S$3a;r!TF7^%o7Z9GU% zL>qr=#P&u485Z`hb|{wdUKTx>9H0TdUlPcZ)Yhu9iac)b>^gQ9*7>Kl1 z#`@ah+M*}NGB6IgeZu#xfRuF1gczqd6{hpqAFf~(v-?meE40$2tnSW|lKuGk|~kMK*QyYRd@Gj9aHG=fp-8+Mj&ru0Fu5Xo3TOB5U+z8JOQ}yi;2eI?@`J7 z7e~IQLT}F3=!m=9pkOB0kyudJyc##_yHo^U_=wr;89WDJH2wf5o##@(ymeB%`d>T( ziZIRSpb%AIlA0(JTa;CY0=^P?N#GP=S!~UfDX?y^C;ibkPy-<*t6w2jQX-uQMWQA& z5D7p-tjzpU6xha9#b=LUdQl#J^-Y?F{L<6!Hi-qHT&}=@t3Z=Nb9KNc4Xqd3?RpoD zTfmZ;IShakDXT1fgC`RPjU2k?`+#>Lo~BD(#Jn~Q_|U{La@J+B(lF;ACvY^wsyD-4 zZmQV4dB=ehPEaPdSOK($i~7W7J@^PBFEd!a^BXFyLFEZx={``s&HESXL3>tl+!dbEgF3 z8`|H4CxREi17&}0%cmgt+u|_vfE~c>H;xM{EWm6;30^TCon!IG-FUT)sjw?XM4|5S zwWF6{Ea=kGk5&=kfkIQ!l+oB^cS~)h;@#Fti8#B=wt>|ZG4`hDhR4Y3#Bk6{K>zi+ z$T(NKbG@|y~r7Ew~3 z;gW9&??iNsmGXYDlE{OaJhnkon|SZ`LcV&pHdDC*bT`!UJU%Nsx?rId_jz!V>BE0) ztqKO;dJCJ?Wm5q*Ec}X)(WOTPtaxqnVg?P_-%*t0OO|=MPV*jTb3v93to>=j)t3fY3U+BK+lP^6eXcXgxC4i2RkZn0?7ke>ZNQWjI=w*PLIPJ*!UrXg`-5Xj*Sy z-BI&$T3c&uDvPU$ARx@*YQbkW%?tHD1{+gHQ0~}x+bl8^4>G2~nav+luvM^P?*YK# z4!5eIlcN0u&7uJIuFCwE5L}%z*8Q)=RH-p>^RAh5T9OwYk`Bp2l4P3;Z~K=ozfreF zHp7_rf+Hi1|I3eDr!)lh{)vM#CTF04MXI|6L9@w5M~%_aD0j>JjPvqK^p_#WYsgTn zJ?ljxA(P7iO<+y7d`Y3TW-^+8vaq*#msegR(H!1si*v%uRNvL4i0u~#b4#sFZKRxW$hDPoJl;Kd< z6eS%PFx090(t|b#K;tEBjVX)(;ze>-$L+91XB?i}?UmPiGVi3%nL3Vw*DWDqLN4}Z3^Iv0>%t^oZ?)c zgo-4X$FOTLz*Cf2z6O?$Z&5maM*d$s)H- zF%r`tGo%Sn8d`J)&wEOGhS&9)*QHxtN{$AO@*PT9F1EJzxNjv+?}bzZ=n`t=&CR=9 zUq3B;Bu4RNf8B*}sSMQ7aFsp!8#Zjnj(*(Q0SEzX7NShSV8lWA%A=~`U31)87Bl-< zR0KB{C)#cXzS!{scQdROGfQ;ze!MJ^tNx7?k}|_80cZU$8JYHwRShKDg?NJFc&u&> zog%vmOSPrvCz71utg&O^v{m+CmO%s3==}}rn9C@Rr_?*g;6)%0BYY^7OOgY66#&2J zx)1!}ZEa?^6t6g0kdr$szHuCTE};Kh#4VteDDEEW?obq5z zg-TVOl0BF=JKDIX-l}z;0jVAc5ey!QR+kFw!YC#J4Fs-p^CZxJ* zB_3y;y?|NIdEC(9y}iAR&w%^$t}j6@j}Br!85meaCP`uV6?L9IIrdT%45ERyBjj$-;gGg)%j>X6(G`hF1AoPsz}Wx3?H%SN!3@F zyYGJgrw#XXQI@x!N=OP8+7jp zK%XBcVFS0o{u0)C2eq;zqaV_$mY<)WF$3JJ*pZ5>8qhg_=b%<%6r4_MR){7h zjT<#~)S*oWqpI61K~y2~SI<6ureiW74)D8A)pg`!9RwFe!2Ph`Dld(XkDoMi=J-nt zVT-KUgi28=0{F_8oPwiFTOf# zKR%tFD!7Kaw0%#d9rq|wlulc?+hA7RT>iiU@Ym&gakE6xwSpU55#jOBgJY%G2*pmR1%9=`kRVwPI5 zN6mHYvNon>jGsU7Lj0~>fs=}w*-t_4A-WC7d5NQTbro`Zp@X}D=7a!1bkV}ajU|UO zyzW2}ozDbhcEvEVw0T59MZ@^BkMn>SyCJ9$Mhh_QHI&dnyW!H8Xw7%Fu6f>qBF}z& z!v<-)!!DZUL~cOJ@fl+LAeT^!^)Y*g%-(dctfwJf152Irc(Ks=VY_?tTd`*+g#Vfz zp*Yh}pmnL=W@1MxCP_6{4|7@pQLtPuqcBAM$d*((^v5J#58f zZkX#!x&EkuKhQgZ@dTzQc9Z81tu%m)p~G>Kj!!M!Yzz81^q{y(ns+LR`>dn~2Ka>% zl`&{>S`-ErJ|5CWM=~FCOJP5XZ|2{(EBV*+0k-|VM+gjp>`8Qcg8Y)>hcJi`b&>{H z3hLUCoA^L3%m~S9bYY{%6kD)hyhycqK_uGgnC^XdI4FY`KcL`O-Vo#ULy_YVg`VwI zHPRisi!>frCH&}ukB85XO&J$BNfv8+gc6Yuh;Gx&bKWV6d%qW4WzfP!#Kpz)GcUc_ zh#?cBR_dbHBg%C?;(jX%kZlYy^a)WqZZNlxbfc&g`b$w@7niSc1lkj z=Lp;bTZ_Yro*JT}fv_(Pe4Ir-- zfkl+?DXXu-<3?eU9gEddBU<3%Xfwrl^6hk{@WI*U; zpmOzwikMQ^Zu}15T4@0R69a%QZ)N?BjouT!_krfKBl5?MIz--8;AHcI^7&iQZEdD< zTVBJoc$!Xx2QYnfbjCMmuNeDG%&sQg0X8-dlFsEAn;roO)eTJUDLCLFH5o6Z8+Mi_ zPQyqoI&a>*ic?nG#K@>0(u{YmMYL2O%ym?J*6V4?!f3LdPb~2C zB!vk61*A00X>kWYjn)2I1YzcvvYfKt?p@VTbW=9PcGslMdvPr~{I+067t}BRp4Wx?79_5Is@MQ5MR(Gm=a((J(b@WYRsVu%`QoE!D&_1@E`Mbg+&*8u&m}W znj`-fnRpKCsYPK^_zStoKZ27|QnBkJEzcG|7tZ{o*EsF2fZU#i zRQ`)tN=1#hj_In#DYU8Oe4}@Y!I^I^klUyuI;3m;*}jfbyqmng*O@v-nn>UruoIaO z6yAkK?jlx76q$fOu@jP~6o@NnZz?j;zUWQUk)9r?BA-AMPFZWackezEArNL;jr?OY z1&P>mRj^kExu9cJ1)D~_tI~XLOZoVFfq(Umf1b9dt^@-KxUG!E(fNN#y$TtDw&eTp zJn%x`vK@ROh!6w@mNtz72!A}d_?|=XBz7~Ni~{7Npnm3(ya;uU`X`91Vlh&ZS-XFR zKC|_*PR-n-|JUnbJSWP~H)P(h`PbU%!mZT`9B>dniED(wKRBI~d_Mg8{LxuZRb<;6 z%c(qgOe$sU($nEsxw>a+mn~&(flwhD@N|_Hh=f8fmz|uhgHDBg=W5(WY`Gy;CQ2xt zE#vkHT;g9L&twC&5`4*q@n$%#@qxj))Yegm#|PBw8pp>AaAQ`zjIR;2t31#}neuod zMCtI-x75+r$8{T6oHb$g{*U}*sD;EmCnff$-hK{(*{)o>eH`{iFLw30FB3cFGt!3GvLA1BDULaGUj+0mGop3x zdmC4^aBeW|@$$mj%VdUd#C!!L{@udQ4?fqr=bgzDx`)Xl3>CKDt=GoR^fK@P1dlZ1 z9m2x00g5-_u$T!xOO0~3f|WWHTWMP5%q)%P&%lar!q2K;dTQ^7(>V<-F7Qz7u-{QCSq5J1PkzJ)Cb~&%mjoW+(fD6iU z>{Wq9#RKy(41W;D+d(iV7)coNId>|E&A_>!fMwFY^~*~fCb}92kL?m*CIXvH+~#6d zRK(rsFAY${#vR)jwJT<3CKk8uIw{N-4N`-Nm>%nn8!+e3;5;TaF6`Cr1W5cAfsi*{ zg*v;wD(`ykJ4kNLcR1JIvwr=0LlD@(GMUR@FiP_yyYe#O#bOFlHt8impPM~qn_h6_ z5UR{C^31irY>_nvUnDscy(^4JDGQwRP5jdjM?9Do|KAjaS1lOulCemc6~na6g(LKz_NVa|3($oX_s>Tn_D{J^e<6D;>1DLmd2 zh8F@1g;d{O>ES)G!=sjr7#vWEp9fU7fsCHNW2$5!g<7H$qs$5I#H z*);gu0F>PH_ab{h5va=q{dd1|mn8}eRN0mQJV_itv_zcD6c`h5*aRL4o5-0C))wU- z@xEIy?=;^mb>F&<(sd!G2WIm!CjlZ`K^GowG|u||owBYJm4g-To#}nYdadKzRH^Q z;2&tOeqFwU^ZOjfu$E;cJcV3V4x27&M_x(*f}f|SsKVu#SzG=Ua>?@U@yz}6kaiv$ zD~7oz4=;BK3KjFt)(q=OR$X!7xl`1!Rap4@!M*(B;L1_@0Fbgy=z(b&gvGsA)&kZw z;Br{Tst_#*6Ir?N#rk}Ip9+?)qAz4xi9*|AiC5- z!N&qxNNPJ;t8bOQlz1(@M!C1(jU#oy1MQ?$(bvMP&}XkU$N*!b;RO@3@t(T3;ki|} zR|N^p9bu|sS->Hl@~i-WibGfB!JA}CFl^dU>F1_Vn#bt{3ER5XBTW?OTdg@4sc3- zP0Ho9a=pq^$5ANzuJ(+n&wh+Gd13xzv{z)bhUbP^+HzOHFcQavP79^>T)HILlKVh_ zRrPI;)vG8Ngdr9OOE4CjH572eZS%*tZOwJZ#6gCj zIw4)W<~VoGtWfydgmWkN$DRH|Gcz%x=u;%`|Jup z&<$5Gh1%dlnSt$LjJ+^I?uG8%4h!f%vQ&I?idCs4kXy_Jb1SOdZi_pxZhGP9jii=k zgO}{kW+r6Rk7j0Sk!U-B6a(Hw>_V}_d>v*LjZ1S*>8WWmXBLA!Rf0u3MOo7D72qIk zt#w36s~3wfMWmyA1#@6T$<68O_71;9b4X#264ZkmD3rmBzuCdNtDTWKBV=cLhHKf+ z#}*E*k+~OEvTp&Rb5Nc*P=Ru0Sl$e9 z^A?Mkt}-P&x20=xII1rp18@$-g2qA5e|Z;3iJXbStH-Psyt*~>4DL510%t5$lTz|y zVw;msKO5+N4V8Qa28&_!Btsl6j4V7m)U>s8bz>&P#oQTt14X{Y>;3i$bVvB!b-}LY z-Jw<_`U-~BNP9XAbDZPit61z4MckYK!DBm;lm?s&L>zaSY7*sZ|9X~uPsRQ8I-Pv zs9ndKcfulaIlnK91g=brs=D4K?l1^byx3l{{z>RL`TE@o+@H}xQM55!5^*ufw7hgM zVO@hs=sjqB-%WK%o`3J?iKO{V#idgFhU^2@8G+_VGg63Wn9WyS_wkEaIyC|E6+)17 zfHn?^KC~lR+fL0KGm^qtsnGz-kxV_vBaDnu@yx*x_!|T8XY76T@$9eGJ^O&iO2WB& z>Ic4mPks1mOry2RQqDokY~@#&1BL+SAo_i;&G{J|*F2*9SW4Z(V@3+M+@g2_H09V} zH=o&J_w;M2cBA*h0fjM^y<+n>FJBO^Bl-`FS8&N~3RYDt49Jk*GPqKRj>xTAtx8%y zEVRfmg>1b(0mt$)mHY2M`#qld#Q!>h*R6+C8hDqXeO<>)H-3q28~Q=0mS21cce_q_ zGXy{@(U0&t&CsJbdm3p$4=L_y#%gO7alD9?8SBs2Ph6Dv%JHiLQk4$_L9s2NF$hP z9o#6s6w&1lt1jUlaN9M>>VVApOE!BZS!M6(+9Nj=K1|{y?OQ36p}ndh87Q`SPL+jh zs?Uj}1h6rQHcs2*x>GYCLxznaHA;)qDjwc3k;O-j18a|FPVUZqVh4tf<4S}3M#=0} zrbW+I-`z1bk(hN!2GIG*JET_Bu0Zu6((Lko5)03CRD{%akg5UWAnOEl$m;}?g{%$N z@ZqndWtO0qO4|rB(N-NJz7_u#8Q!SKgY?873S9>XmEQDzV1~PoYJT|^%YZP<_UOt^ zD;`*z=k?I1Ri`2SoV11Pq!dEhxK~dp5#`VQg`uO4d>C-lZ@F-r@nO+-gr@=RMgy{F z{8O2Q~Boo&G6&z@j%;Vy1Ts)4(W z>!4TogE#B2Ed~W-@k1`5-l$e03?t{~>i)F`dVJW!VP}+@mM7QSi9gCGZM>*xLo6uN zk2UYtUfrSs5eSi55H$w!4S`r9joa7Zq2W!c%KpZE6;w1%AE;-;L`}aBgqdvyH?HPoCj6>w9FGo?B=WmP@yFj}*rhnWS%F)^Tv^-e z{nt~+j+u2f!DYan)f*PZkXT>y5^O6P8HjkYzf^OXg=4zueJ+S#6Vz=p-6DtKJBb%F zqI$3@v_ugjx;r)tg71V4bYllzw2+J^<*gU7xOiWrSPGxrfR6lyF4hO&4#b&kJOiLO zN+e*UhY(T|y@Ti74&7N&L)O~X7jIADPkI)*Mjh!8EgGum5?)Boxqiy*RVHc-*;$m~ zTjXe&dPOA4j$g%eLBm3o^wJ5vCv`>4HJ@5k_T5ohE9fYwi?@DXnf!Jlx^V+gf-@`s#+v}qYD|Z}7#FPpb zA-0oa)MkvI6dbfzRjA8@`oAZISA3V7yk##rQ8c*@TaWE1>4@ey2)YYlAZk279{|4t zu1apYBuA)iwraKX$Zz<*(H@9{m_)2vGc3O;$`Bx^LBY&%qi(vYjHUxA$@_>)a`gAx zaQNSZUbu;=Cm5F8gmEyY_ipuzacvlDmtH?gB=XpRSS~J((;kFp2T~-sw7F<2_aZSj z4b9=g((CmE0k`bUtcB{N{Q9<1EP>ct6E{z{)_W3an!wdpw$|%FJcQdrL=|iJv?B+n zmKCFW)$6gerL=XVu#|lIa&X6NVoCfrx?IB&lUhF=}yC?6o84`fEMzuKR#QGg` zu+PD24XJ*XR}W}Tt}51um~oESp)hNEf@PhDX5BQ6)eg|^xrmw>uMu>Vm^6U<`z%{F zPLyAgNi(XcvRO*ZrKVfDky}M3zJ?`%IYhGqW;ZZ>qn!=50JrzPT}pBv4CJH+#f}>^ z?)7kzwQ*P$8(kTC;cqWMdqw+X^*?=$C`^FGGsnGoMu$&KUWQMHZxIU7&0aYg)=H&Y z&M)N_QU72i+XVRnl#@q&ms``aw5VaT7d|xE@?jDM@dA}z-m}kjAQSgOpxgE0-Ypx( zyUsxL33R!a5q)C5mfKXBZ{g%>+%!fvqB8C&!asw@XJ>WVRlyF+3qMPYX|~{7oU;2K zt00Lfsu0EZdC-ww8}BH$*_6|cC%xxR0*JfwV%^_ePDqFJm($Ap)-#mn@~!%#0o>vz zxZ3=T1IG~@#$265&{v8O1{fSCx=~^ReJxLAHfUjP#jt52->-|C? z1-4%Vn+!?bxmvuB?qAkf!Kcj=^nFXa^sTAZs~Mzv_ju0gWo^n+DG8!Bg{L|F%` zGVALi_ihEwUW3{@4h(*|V?c}pXKt4ms6&@bDp6xZ5*-6jC+^B&L>>*tpO8@!1OHLN)mD`08!XSZ?8zl5nZoS%z#**fj~=JIDuvW9@YNg@wKr5bfoMp5yAPk!s`q zJa}r81GYR`+D?W9RaEg4Qkxg&uBjz&9bc$717HDAu2Vyv2Vmo1r3@&m1pAxSnW5DK zQuZW4hrj>H2Hc8w4GSwcOw}m9vRu$0BmCPN(ABSMQ7{n+PxtVA*r~5nnUgQUjt9Vi z!OdM$Q1iKYoZEYLTf%-`4%#1bI>2ka45xZ!ab(I5d@~T3`=r1zXVwR++M8HtJrj2b zxLFl(=iEK@GyQz_a|8ReiDztuX9ec)n_hX+tHIM06beabP8Qj z#4t%jZW)?BGnnu!eruSuCE!npbJ7;TD>D}E21OT53}{7AYa_mzJPt&nUU9Z;igsA#qcq4i<55p zI_P5(Q5HUe|8J-HW*DxY;s%lD%=frvm}_hI2;pgu@V)70fHNb1F@vA`aK-4*?sjA$ z;hD);rQX4Uz;*Zxtzcot^$9wY_h@Gnu&;f#@W7*XdGH2pl@IdxECo7@DJcUi6i%zZ zI{N1E83Hb)E*tXVgCO6X4QE*WK)FP9mMs$(<(Ekicd+DNzGUICloZ*IQsX6b!z41u z@Ep=1ph-ne_1expr%kR4oqeo>$pa6DDWGqG_IWL=P6kFm`8J7MRw=nR0k&sctnip; zdA}GGg~T@R6TuE}QUoZsvn=*lqr0eRT%hLcNW}KtxR$}1KA5&G%q!dKcN=2WPbT}+ zXMH#BOY*!X8QKOLJT5dX#Jg15T{*F!0J+u=2Snc3(Lo*4@`)_5XwM8fm4KuMWc+{J z3I%+a1pyEnTM@4M?HbSQTR`S%#%Nsd%sjdn3a&f4k=FlFu$=NyHEsxQA<|?gq)vfR zOwan4jvSAcdP?yo92hhYNl1M|hxd3eNbY6UdiG_Z0doMA0nmp4KhaH@y>Io+P_+c} zf9^SCy>-kWau1eQUTkqTxRwW``SMT1MGb=`wQmZy6)e%>^bw{Ar%R59EM`VB2;~6 z1olKV^u%=Kq(pM>~n%MZ!tJrV?(0jO}Ta9uM)@Ar?}pT zu44QT9%xS3Izh)lH<%JV>%xP+=Wm}*L<<+FXTEgP^vMcj^$JL$ZcGcm!aDC*3a zGmY}*KrIM*#*N3-sVT8L%?wlfiK+4z41^WWSPrYj(CmE@VHuaTIZx*C(XZ+&ET|c| z3&qbc*|ema>Mpt|$A7A~M6x1RD*if>52nofw|bx7@}#qH@56nM@p=c=vN+c^YXkN> z#uJ1I3>DtR6JY}SR6@OL@(X~+orpd%3^oQ=!0S7owp6%7CL%(=*^+-F=yF%{^b%+Q zA_4D0yPE%0t(9Ha%GFl-#%v4+0GF21vqcg(TE z?Ygk{E~B@GIgARu!>!cOUC8oO{sRo2wzWQtyBoZ=r4IoHv=J9Qk91zK^7NZ1V5B&W z9IC*O5R0p`<%)XE{O4lZvjX6b*ZSFt(QifV%rQ6;+E^~?n4+=BQhNAZJMnEpW1|}c z*B=HZ@IKGG8<%k~N;R6DOM3K@O|@1(k7Pdm32LtUOx|+BdOsmecVi5N{MJi3jY+R1!aITb1ksd{j-4S-oc#=d}DF6cAAm9Pz_;f;BJn zPP83xDz?5QV4#4e*0;S)UIc2xl#d18sh~QL)Gi8?kP&jMzoGmfc&P8O?P#UA(HO87v@_}<3S7_GW_p*Nc0<>Y|0=Vcr2ANJLjK|JIE z6YvA_^+GBxtvv+yuc!ZRoDU!|MY_i4eU#IPgB+{XKa7M7Rn;cAA}prDr1J7CR?na0 z>tjrdb=>ZlRgkC*jX&iJX((H7HlnqMEY8EmzOBYR72^a@N+977d$@IWNR zOq|UjoF7EXd{C-oKkC%5lCEWSuAs31SdF&a)S?feYcc-%|MqY}@@?K#7m?8nfX1M> z9_`K6|BCAu?yg*nVgR3ovyGAKm?b`|e~|r`*OwWdST1#XYf4(pLVK`;@gI0eUklHT z`Uj70Ac6y!D*BM*Q=)!*IkkJ&YzGCc)OKU>hCj6-^GF$V@- zR6tkI!9X@-tlZLO;V;|9>p!>SPiIY@sonK(TR^6bu1~EnUrui~tUpbGg29RVA1vGg zVgB=*1jqOAWs24$g1wS>(?8CB8TWf$#eqi!(P&GD&rRQIUw?lV8D=3n#~7~Zc+{vN ziRKTBR&mE+>>a{6St4eGeciRLesrSbTA~WqXZWB>rp^eOifszkyoW$%Q=O%j$*|gS z-hSsGADCZCebIJ)DWBFn2jcV)X=8Ep^z<-sN^Cg?`suDoF4J+CHF00Mr1?y~Zz{L4 zJ{M~fj{mA}?K!{7(M^3;%Li1J9aHozHOzE55C_X9@%xd=Qd;7 zH!QVX5iao=#MYTUwBjMYA&&Vv_^Ic2fH`mZ#RIeA+Co6yQuxP>VrI=bmb}LGhpy3AfN!&8nVpT;x<^$Jx8{n85pX$>QwByG+VE@A zTFga*#4x)WJjBX_1Hvc+p;yIYG5nDX;s+~td&leB!^Akw0y33;W5_EAuP>JW3qI<~ znB!u{8+Zhl%R`3>Ew%?mB#se;^EmiK%h6O=KRL&V!^KoTv!8S7y=z-D2aE=Q%UsP@ zENaAJB$7@R9%uB^mWljQ#|HAqE^HTc!s2-dD>hlrAR?y(%s7gip*4Aac5u+2jFf!! z(7Y<++x2p)9i6kPad0=1(x<<6;n71UB1Faut3LVSj<&$1#$hyCZuG2gdOnD`U_etJ z!Ycc^>hI5WPWWZ$TH%g(cR4E>WS5@Slu|++@e8SA0Tb$;q1WtV6~VlIj>MB4P@?-T z52Xz50iLz_QakjuKIkEmKuFC0!l~BF)d51uDePM~xifvMWk7v^2xJY+vu7#_&KahC z@U&bVzHtFN)W#0di24!Azjyr|&a)U{}y6nO0<1zrk#kqtDVgw6g-m3;mgs zf2r;5XquwS6LoQ~Wk4*?08=#sG&%U#NW)A6k<8?g#VU;{MOE&>Ou-?{9T#tU0a6Vz z3B#_r`@}W?c)D>vi{Q;VGZk>JVKhfslrXS1lAQdXW7iK&*id_&Kr2wJi2{g5*O0jo zVE36ZysBNg6sC%4tw|$BC;?F zaWa5ijuwquv#;aFM+Vt1-SgW~v*QAOyM5Nzo*P-1dTCIZO!|wYGHqZ!Rp;tD~FWm=k zKo2J0fDpDHyptUy&aB5wTT)irw_DGVlZg1(d4E9Zu#}3FtF~}7PpgV_MN8a+-4t>0 z(5C|fp$v?Dt*t!bsLZ((R7VPwk4~tB+TSZ7qn4I`)^n$h16Vd0EUJgtDgCRH6?fuK zJOe|R$WB1wNh#X1nam9+uvq=1V6#G@a*_g~6UPV`qHj>6LcdzGC-(FbwlOM70}8=8 zynEd5WeP6;{~m>YtI9aH*9S*a0{hjD9VSJvx4|V9k2Z?nL})kPrC$>mW~(rwkV2a9 zQSz~apQ$35q{u+3Fb!Y`;TR_w$9@w!jgxLjl$QZJ=dm|48n>~Q@~t0c)mnYfR{T#15Q~DGM5={1iA3A*TC|v((DZ%lm+dXA$c_k#_-U+mR``sB6C& za=E1-H5O)i&g@ASF z*3*|qF!`R+kg?GJU5Cq;wTdZQ^w+%~MLAhzz7WIc&(_mspl9yW`g_!?*E;uf<=?$+!BX zP35*;n=o5b54`}%=K>rY*RNUC;P_0qL@DkH{DcVZg~aC7G>yB~lT=Xbujm?sX8b7> zZdf@znRw5K?-l!TSg-EKR#GyK6c=x@InWH`?Cx@rHc>0dmM+e@BG?bMt2o20HG z_VyIO^^h_Hu#dv*j)mR|kZ&hOyDN1#k#WMoHr~vep_w0tef2QkD%T(JaZr)}uT-Hf zU_OFSG&N0nvqOF%1n-T{_igg_3t4CB*nk_H3@Xr5k#KyOVp@&nLjd~3Rjq`72M4~k zM$|E8pPuKNn~WN8u~gx_TjFZwPoWQhjj*4>T{5#^Cto+3XUnSWiuNla%cT z?!No?3b~uYKj_;symh=zy*k<}L`nT|zo6Yf>VH$$SaxT}9PjBFMzvpYCbJ>j*5;QY zYQb~0ei#5jPUZE>|7t)%mr9mgaKinH$JOv@-&hO`oYB^;FJMIhw#&Lp#r1mSNJo7H zh>G&9yc!t@Q-6R|c33a(_P4or0>@K{aCL2Lv7><_D;HAb&N5Zcekx6<&#NF9;*m zJA>rI15STGg`0(j-`zkjZ=7se^4Lqd*)rT+X6vuxHy%x1Qj*HiOf-`{=me;72Nd=A zlf|Js3}X~V@(FU1*m$?Dl!%DnAM!QrulEV8W7%8143SCs_;D|KvpkG66rT(Y4B5#irP+yxxWolo(E_^5JitNM?sOT^SdU@>G*Erksf2q%AC`U= zg~6tv#vf?JOL)AiZovxkB6>gADvSfArFeZ1eOOAeH9}yC?m`9dl*0hNeLhL z`hlln(wVtvuPK5?s7WN{DEM@0g8L5n&s{lhShF4w2`4eSm?wdTAu_SRQO0_gp#FK#Rl_ffd`tDN|0j1pBsv&L2D(QB_DX?&xL@I^#)gt2Q)7 z`9Y7}hk5h)wXJH8yL4^?)B1oW+eqSi)hd{}Zeo>o!BkJdDSVXe^eiD9igKw<_`Y9F-t@BPtusX9G1sr$SPF6 z6+X@9G0=Q$sW^p4Iv|QOZ+4!M>f7D=Cv%`!IizWT=!zYFJ+)N}78J{$H9yOk&;3-m zB>Fx+#fPTg)m+oU7BbDzRATQd4|gM&m5F-#>q6>!{I{nE&~QNXh>x0XhZjTrL+)fK zv-~lP$z*@t4l|1fgF4NXm>OWHphXONv|{BQd%lBb709U17L z$fl9fX7MFDDY-hLp~PX{4es&gUeBh2zzvB8YkRW~KlwpNU6+dIdiZ_L6nCa-Oevhu zfYrTuCkBheYuI!82Ty{InKogwKqp7D6xQ4(ZC6~ck9)5jyLv0|mk3BB1CC&0^4Ned z46FTVx2kOyI0Ap# zv54kPf#bKKT*6Mvb=6SBdCSHS=vtij;gpnKz0kKZD za6cJQ5^pa)0(?bu*K9l|SSPbw1O&1r_ zVGo~#VOwR=Au{)XZ!bAiW1ocg6!rZmoQwi!Pews)E9Ef^ zd&v0urG=fk$|%>}`AuaBD|$dLFoCe=a-fMq1Xv)S=xL0Brxf%w?oLl0v+@jDA6hJLm!R<2^rH;zR^}4 zVPKpVHB+S$Mwx)1U~?e6 zd$aNNfL<;xFCR!kP5iWFuyqk%9jymY9-{~BSdQTzj98QDY#V*xs~adF&7WPOBWxPh zWexG}@okD8tOK%<%%GeE?5^(Piz-emJP;HT0i*aH-arRIMZa ztJts{8~V;<#WSR&jsY6`006|GcOMw@xp{ep%u?ci0fF7bR9!zgpfT*k&P{0?_(F;_x@#W5OK0bs&JDd4U;E+q z5ryJL`BnMsP_5p6Pr6Czp<^73O6b0t3RSB!>yG}472C1*h0EWo5B(Tu$>O_3rY&ZF z*EFL;kk3fe<~!aP9?C;fjpwDp=JhyI93Y?1rRa^H!%RD6Tfw zD*h7qlw{$|EHlyy#Mv0y+3Qn01cKQ_u+I zLNJL%(Ai*L%8Onsau17b&qpkK)SYH{{EY6b6ShMc7&l8jlp<7bRkx-0hKE0xasd0? z3!pMAE}xvgeu{pjS{NBB#eT#+`*w$a4^UA({A*Y9(pd&iH9u@k(4R>by~G-Y;s%0o zX|ni2%k>5l7L;Lo=x9CrKW*k{F(?y`csYB3+J=@uY1ebfit2s0b+W{nEGNvQp#1$( zM*ds{gi2G_=Ya;FUP`Q)CzN+@l{u@dWx+b3H^&b1I#{21Pp_Ts8 zfD}stN||8zhG(`I1MltIMDauXycclJJTZjzfH88W!1Id9{yu_Y)A$aBdB4bSQ2_&p5<|h}|oA zh6mPNYQO38nu@S59FX7GfgrR4mq~fjnf;n>(2D`&YdaAJEVx15DiA99f-e{eZHO;0Y1l(yef-C2c^{wP;pbZ{JUY z5l3eN$@v@=t4!Nn&#Y&r17~L9DjC7OKJ^SZaL`?mLJLC^r@hkA`My5O9D$sskflfx z@)$+uM;6E^&buMNFJ*$KsW20IZkXnfSRcp4e|B;0x?w_;?rbZWogU~ILbVQPVpe^W zJf_Ao>8c#~Norr0y7G5GMok-zz2o-r{NbivTKEwISTZ0$th+fQ4Waw_)=76LNZfcI zMq60iYZ6iHb-GeiaigerHl{9cSW)Xv-#oSYmaijFmZE5l2;k%+$EZw5QMrJ8sPeuL zxR?s?e7JSD|0GHfIQF=Ja1()bz7T^bUg*?oSHl=G_%%>gjEC{>Y2Q73R%qe=9&Wzq zD=_}M)bemp8z@sSyN?0Smuq0SOzx^eX(Eu_OL0%mW`q2oE}a@^dz&E-y=8*YB>}m9 z6nN*Q;0ST(kTQ%1tBDUp&^m=NRFyCg<{FBAdo4I0B6~V^tgStl8|{NNj6HHkl@C1n zseKl;I_0mY?MSZQh#6c8UK(B=5a;tR2#tw}wWtubLwYdxOJP0*eS>QVWTVwTiCDm9 zfIErhjJT#n*f){G5~lBj?{1#?y2({{(V&t`9KzDEi%a#;is`p-#x5fZeRFwEWUFA< zG2enJc>V|FNtbU$r2{~iBz~UriiiR@oQ1&>YJwf%9afh9YQ-Ncv83=SnjNET^APh_S&(Wx;pAgalo(wmxZJ(VL=WV zQjj+SIhX|8R9!fAoRQA`_-Rpa+{P!MpB{c8k$A-3Ad|q9GdwOMa(^$#@h~hLd;Lxq zE;bbM$W~?w_OzD%tF{r{8X{x@jZChqR4_9#GQ^|5^Gv6zIZLA;_e9T6gUM2RZT0Ol zcGK6aC>hFpF>bI#CM+F<_i-_8U)QDU1vkW9BPbsChp->{G2{p9= zC@539JRs=mB_0Wdn4)YLhGjuKkKB>XK9j+S3V?=gREpdeoE^HuksGpG$czS|8ra4y zE^jQ1#LH_FV}h&&cv$B+=L*p^FZOtnXRwY1k2d1X`l}?pAJx zi89-n84B%tZ!??qDL50ajDAEMHes|+C6iE7jk95A6{_vh9;nY&-IHeFS@G4VxC2<| zAsF{Y!N!vZUc2oI~ggdvGWFY?4NK>?txK#KG_FrAxY!FTmk;`}~U7pYWMu7ZOGatkFCTbamKRMkiMm=}jU69+43j z-^W}?`dk=@5T96Bs4Q-;>c)C-h}0W%juizPlXijwi;-rugDkCYJ*UhMp7He+m;m=E~)w-KnusRjMfsVsFMcYph<0HYYbOa5_0YmtfQ;-$R^l^38jG5qtDUTAR0d06b zvYn5mJTPZV@R$0avX&ql0^4_cnL6Sr@oN#&mKsE1&-PA{k)zfttyL++CU+CVb&{%|oxDDWBb4HB>#M^0Ec zY4;nPoF`8Oc100hfN({tHIpz0*iFuvYA>RDjosoMUJr3?-7dUtHa) zt<7@;yT7}j<`Egzs3R~;Y}8JSK~8g|uKT$ZUuY-aJ#4L>ED|qH#GV5CG(mey(d+s* zeZgk{PGXI!iY83SOVGmNPfJ`$Loi))aS0zhAjMw#baPEMTv@vvij$b5bXj1_^Z0VT z+~Mx6P$WWk+;njCEK`8B{&}y!=7xmkB-U2R2;R&8y3j8@CBkNd3|5+ll{Ja<0}5B|aGb2U#M)u*JY#KJ&DzG%e|fp)Cx zx)&!3qy}Kq*%e`Dgqk^ee~lG@SjN9F}ZWxd+k0UAdtk3@2de)M{? zxE?n}>sxrrqPC8bqM~jWOOH0#qIuh0dE@@=$G*VoLEZkYI43GhKriq1Frf|^ldQ_5 zyKOwXK3ofQRsal{hx?EPZ<!BD18fk}n4`CT%;;&{F4RU)I|4=@U1*?CZ>ctme%vR7U;WS?T#GlP8O7jX z4xTVAumCQER0<1-133E4hb1rt>|J6yop((t<#ui*;`$YT`nvJ-7(evD;rQO|bR z6T{dV`G`+G2#fRL`-H`y!T9_XHQAHeLxQYC^UijE)DoQ(R@4xR&X{n@pznSSx3@+F zA8%nrMHlere)D<7^I~_WRJ(Tti4r7 z5RUFx+2`R!X0DRq|rp6v$-bD zcHjN#C~dxg7TAyr!Rzi`^3opxCNL$va{tkS4;2;h(4dcHQg`DKlVL}2~yp^1!{ld47i<81um>~N* z;4*<=ILVPobGh zxXsJNWt*5OXJfpEugJ#lBgB)YCJ}f!LMsH{LO>TXBCr9GJm25W8>QU%1ZVMbb5h1F zMaf{o)<=`zWk2~aW@Vh>_2<|J8HH~R%38G)t@?4DA#tEcnte9jLF9F4OSxz}4lSiI z54*qcH|~P)S|%p(%>&>F9(x5H1n86y+cG z40rf>JO?t#t=ZRdwOsvFhRwhK!`OSk^}N6R;~$YxB$YDCY?4A+v{3l8RYqw@LuqKy z(hwmT^+{1$DiSG`v_(cmLqj{IQfMKS_V4*3j?Qs^|Nr}Od>{9H?(=o@e!t$=>$;xn z%As;TCV|Sx`D#z6YN%eG|86E5(AHZBH=Da?y(f~#f2?0|?H0xD_Y@!2M~p|f*xS&| zw_>xctyEC^wa`tx@?s-gmjY22oFImx|(*w`RFpN&;aFf^R89WXC)WMUBsr546p1rI_Mu^V2nY(=0lPKi9&+htZXjEMz-y7}O}W^} zTb%j-D_Ei6NV`4NCtg&EdqRUqgrJ6!G2hdSTkB-XjCKnmrPbxdL zhOt&or6uN|iBRZzzB|oQUm8%Z@l6<_ijD$Fhez`?#3A<)p$Vf>9>iPWCbYGS<{qIb z$;X^7$9`R+H76<}3ylUr8)(5=kY`J8C;m|{lpL}})w(q)!cJHJei?Sc_Fi1hUBVXF zev{gO+C$<(-pnl$JOjVQ?KjLt?ibgH7nLHAi3l8_3P+d5g@p>cB^XP(h~1Bno}V%{ z_P#2TOz~*aJyo%-xT4}3bFh|YV#Z;0rYY}RPLf%mXR@m)S=Q%Hh&&uUli)<853GhR zAPPEO2m(6Y5DVfsn#)hNDF%xiE(fN8(I9nQyVGmSw4vN!O>xQfKm5dK#HN`%QT?%8 zQJo19hQE#$7Rh7AMJJzpo*rc<}s~B zp(oBp!$2r|0N}0tF5q`??TZjGb?&&!GM5i4!I+#uA`yGCU2Bl{Q_q*M~%$OAxd0#+1H_agHue?`8CTak?Nt4|zm*^n~WWisc3ZjGq}tgo<^pR^dS=jgHg8z#3XzxfPzJ9cz=j0fa-pah)*9 zB-fuv=5<1?l4(P?Rzo$PnW2a`g-C*RWIZU-V5V|otHOd)=m`b{-Au~(BjAoim!>cO-)7yGXkJH}(FsO`0n$TxRR#|P|A*L*1XP?5-7a`fZDR{jk5Q=*ta$$%s z`k9#T-=eZpkmXAkM|>_o93q{-6zUA-c?f=k48S76@aO<-K_n&I6dga@;22AY6%$#Z zS8uMCKCMRlYOny&5HPjhX0?Aewop-Og$e*6bwsE^gZ&X2RCvygE1Lz+5wz!c3x!s) z>!39)Ab>1G;^KN;TncSD8o+2GXgDMvF`s82=wb?{lMv;ZBoSWZJ|py`K&c~lzr@8f%~jIkB_hn5;&zKDh0Z$1F}bn&80G4n^mHZS7%^M z6ggy|!@6o@M2;G}lT3HQ80ii-N2IJGgA8^-iJ6H zr~i%|h~dSqJzezQLtL3O(Qj=)%=1SE_=@si z=T@R!M|BeHUq?J!02!~WJ5c4~F%4Q4t_fiVvd;x~CQy^v2&A|29SbcB6`l#pndy_a z#!}e$jqn3ln&U~QA!Fu3>^C=p~+4jQpdR&{9Tf8a-C`(qiv z%oRrd&xzd$f1gvOWuxpv>|QqgBaUx{TJUXXK>;3%6NNH5Bsj#c1z1&hH@eT7Vee(- z&Wk?^$(eN@Hkac|K?OO93L-YrxO;_EM%ZoP&=P=RS-JZ!>&O(( z79pI)T@uNReIf%hZGZq^^dKqkboBcq4mTOVd=>==*N(e~B+jCncD(m(F+T1Y zp+m)rU-OFiU<(K$cmY03@|b~O!1o37!umzedIwq2_yQ{ z^3EP#VDjt1ILdXOOVM$RJkh`hk_ zI8r=utpQy!5ko@Oh?kb#eg<{|TqGHkCijcAY|V*!Syejg=N-KB6xYQ^ttaQXAU1Qdmbs@7^H>W9J0Ic;(qK)mT3zc1m}BSh_A z;ASI!a|m}H6>!+kI<*ytX`ikwJ^4kvOhhsQ(Dmxz46_kk>z@Tl3u%W1Z}|sDcxz4dUx0dBK#6JsF zdW%>EqKn(h${(iG{(Gs?t}k#_uzW#q%TAJ+gY8@9Zdf0!&FDz}7Ex!A5TY=OvABRA z!b3*pTn>3wh_P~!nB^i!FzAW&5+J~IBp85??YrWQ_KwiOOT;V-$cO-l%+js2vtNfk z=+^p5Wjj%r$bixYLUh{X$(w;#nCWgZSC~3N?U|VMZRRb`4@Bs68#wu!AW*g4WBECH z=`69O6kO7DvTa&s^z(M;6aVpo{Y{y(?ITT(L?AVr<(Y;&gQ5`>iZTIpnjEtlLm{*w zk4*hxDccJuiHIKp{awuib5>@dZz6p=h&Ts73)vO4=KR`ixLp427W|F2@&p98lHjz5 zz8UjI>SlW4>imi^8gA`RxPZ z!9uDlCQbEAowhsmID*YNffdw5D4_6mRDgLli7+912Xr1<7UJ?##tsfhm#a$g<6*U+ zoU-y<5}%9ugHze)qO$p_l;|IqM2RJL)EN4N6R60e$UnQpnBU%GBolF&M5hYrWfC*L zcJ9@R7*FIaU|&3Ozc-u8#%;iU9xT%UWPFc)>)|{><_$&9Z3Xm|pFG1w7Qc-cm=IRr zgaw?tcI_(YGunae15iMSV3Q}&t{`@GF4)ya%|)YKAY4lTq~7zIqhjZCF4PSeiZt1y zNr=PemA5{we+&`-bcxIL=Uu+^0910u?{o43of4e4&;%4Bf{+BG_Kkb-fWt;&<{xY) z@Ke%FvTmIZ~15g>geSaDo|&H-YHH&RMD=m^vX_ueU} zt8@&6?>x_HcO;QtjkV&;v-g+cQDd#1ne8oM1Ixc&FNMFam$ZcXYu*=thLP16@W_#l z8IL)>37gE;L&=6TzFX>PNq8{C)(GttQSIdS3#Q~C#pXS^0P*uw&=swU+ORU&Sm^Pr zj>fCq_AhXXz%jwPfr7(_n@ad=yDbGSM?%8 z*Y%G>#8Rl07PVpPPp{BuLH4z!q4~5k!A1^0c_@G**YPp3EM4mQP^|G#dvt1VDNKQX zv?cP%*)>OB4>rkzV*}g7thuc;y}9bEvOoX z+*NAF-r~cjVyr1~B8C(!ls8t3n6HqZU5)xrL{?RFbFaQK;W8rYG119YCaE#Fen}Wg zgL98KYxbvEhoz?1SndOrun0@0KrM%i3Q_)AJqA%7wgAVsP#73v=i;(zEGPh5uyEmX zjPFC(zpZ^+A=Oki5z6mr(`S+e!=?f)cM)cUv+Evi9m72oy8$8UZn?i!THhDz3%gc) ztQ(tCLIdeWi|W_WZ~sGUr^xc=p0OIjG~zb#8mk#?D|=ZC5}qt@Zbv(E6Q>s=e?+m$ zRk~YKEPVc54kK?%D(R7A6M?M}nkZc<_n&JA+aDc~$_V|PcfRVmuXVrBf_%aA`?2CA zcYJs)zRWVt1e{^$GF>O0Wven1W_EHv0II!FlDBg_ys?DBn5_7=S&iE4R&CGTazZq| z6}fIp602X~YA&8WUy5R^du4EB%(*$7P$KK#m!z*9@iahzr;@g zr)-MZrK*>0phfR-X0zyT7oK({qfI`SqdJgmJ?6D5{`xWqS%Mq*(6UZ((2BKZR=3)W@4f~NUNdNpHnYbc}h>- zEwx)q(aL*UAzzs0@dK77yYiS{u8cindo4J%s(-}Dy|**7DrB+!dXa1W=}p#+>(5%~ zb!LX=RQJ0ZBo2;jh75qaBkm4_DizS^@x&SX?kjfE!7x*a+~E6(+Ce>q&_| zSTJpFGUb$@+^SiRYrQf9vv;R?naN-G6R$g6zQ@mkibZrsFwx_73fh~Xt9`3|xu@WY zrIMao*$?{B?`m5S^4_s$o@OFg;BUAnK0)rKp`%l0Ue{@JtG9a@Uw&M)9{yPhg;F9Y z$b9y;(dKcK53LV8gLR3j7XPDSCtBjr~z0=!g&re7S9Kq(5R}bSK)EAgyd~R(gSJ zPQa_Wz2*cIYZ~h3E6>*Hu;}FHic>Onc4Vxe_+no6}tHB#=#p6-gnv? z&@fN|s|o~qv%*k*7fN^~h6qpd7kLo0ce`ybj8Zvg&DOkq)hru7`bEamSR^SW4wd)V z<{n#nj{U(L`t8@%T^Q`vnUDAMyQIS9tP&xk2h2J(e?nc1Jn-ND=Cb%mqtX{3W4=M+ z@&T|)+VV5!l6{U(o2iYbEn#oNF>iN%vLIaY2MpZ$1Gx)3`UKb*@SkyLxHBR4;(339 zX1@D<>aI}NX_*jebRu1+0&rTN9=A~9^LIt8dy9*|;02iDf_(mUnX0&B($mPTryhX8 zy{Dg}gV*M8lH-l9;I;4Yh_v+$b;WkLgN24>{Ax^#wpOnLn=(c2!Mna`hgs}puzU12 z7#F3Nk4iQL*cXd7Q&{85UDxzy_Q=%&tA9bpE3~QFXUH|oO zkKDh0m2LamONQD(IiIWWVVE>c2vFPmK@P1EI6-$yJKYdrV?3GjZC>+w(w+F@sXY!p zpSh4?{a}uS=6lC=AD7)7aLK8=Z+MEZ?#4YE&ynxo*NZu4N-sGGfbOd z!!DE`{QDt`*j#1W4I&Lw@Af(@#^J7Iy!$*qzw^%B1PyQ`zGI^H2e?b-UP|Zi89`Jz z#$bME6y>15+S!q>+>BLEm(dXA@y^2BiKQ_nrlyKCvJB#Yb;iw_CM9$7aC0X{WRcXv zCF`yW-kN%n9cR83ihv|W-k2_mVIPaaqKC30CbO5+nsO zK@BLA`U>XR*JCWcKE#o7r5&Rxzn$m3?f47qJ-!PGt{Sb&D!8y6(@`}f!+LFlzS;gi z52xoT!N)5Y{R*aW8n1Mb7h>UX4p&YJmF$4~jt1aEFKWcN#2NVH9;CfugczB&vwgf4 z&#OBzx~=R#&;HEhv#(fa{KRn3xCgdz0jvcpY%pQg_LOI8UctD&fR5XaxtPJV2V3@{ za5dTYL{qxyP@*Uln_=5nCX)~WYZ37m7a=*O($MpFtLB4ik3z!}So5gu;jU0a>-Y!> z+igVPyv*&}?(W7c@%+>CB1&Tc#eIOq?k*NqO?cmn@5YH896?Ne95^1DLzN?bu&3}w zqF^LeuYHy501Vp={50RX&~qn3-eOkb6mwRv+Z>MRA)+UDVqCM%fPt41>F{~TQ^Y^m z&lSR|+i3I7U%WUki(RDph$)>F9&@VTtCCVUDUo=_My=HrVa;0isvN@DoAC41=ALR% z_)9@yK|vW=q*Uv?zQY3}qz<%}ae{5leM4+Zms(8JySW!nKi^=a!!P+@jvxQyonm&V z-#_1y4et$Wde5a+bR{#D+s|xeU*SJpqJTU-A5QrcFR939Zaw0bHIb+qc7Hrq7uz%@ zlb{T3_s`8+;Vv)-q%aWU19KgfATs7N$KPMtg-f*xj6^g9G3ubK z0=dUOWPohQ0)=z|X#4THO zufziF_3zw6!n4xz@C_l-xeT+uK%C8D8%KSCYZTA&=N;2a-$%v1_p2ke)l?(v5uu9+|o!p2$RhN(CZf$ zZP*5IIv1M1doTialDi!fbQg8e&^o4ld#ztWZ_|C`b>vAsr-PR&3ZZ!v5$X2wL5fgY z3_UA89!LVCxfi143tbzSHyBViPb6L@;(tD@=s6DvU zP#-)+=#Q`iKWR`$5DP6w)YjZf;dqIRPyUf=WXWJ8j#ZKpl z_mgU3VJl&Tf7S&!Oe!iKXa1{AM)2VuX?m0c(Ek=#mYo{oY86`oz2+E!V*XU0mZ*2% zaNw-h916el37JU#RhI`Loif%ALcnbTnmvR(@e2v5y5}0eU`4C$+@q_jdorz8s3&yu zu{#*q1*ik5v2ASnA-bSM)gfOdmyx|?bYvnA6)*cCNFO7EElcoK;m-@Sklgy+6C6=o z+AA1m;unkLidjs_a_n>Qi`!0)iPQL`;FZ&`AZY;n-HdG(ZV8R^7k+h;Vwia<6M5ME zKn)bv4Da7#t;~*x7)uT$e#?RHA+-@_m$~dBy1>yjbXIg$+srq1m;sY=K~YJytlfxM z`S%jC8pp+iVP}}PTt*lEa&sS9&UrV_a-yRBxD6-h)=@9W4IaW5?qw~mK3^!ACNuh{=fzij@d^dPAe|3AAYo3?7l zwOvCc-HwujhiBE1%Z@^c>^gK(;);N&dMobpPi4_(-A*+o3*sKd*Z4{(OPPFdNEy0` z0e=k}LmZSTE`X)A5Janyb0Y_THUv$nUl%{hw06dNuGM|Hu|rv|8h#_~6VP3M!=i@M zT&I%FU~nU+W*MrVb36#+W33m(KcR)I1d!eD0ox9b&U*AU7>$*rv3{d2*~Hha%MJEl zFCWdtTGJ_Q7fWZcT^ZRA<^i=<4MTGo?xuL0TJkWzhv5$x2T55b3ykb8?yWlrP0wNeT==zPd>n`5766JPD7c1+ zM&rw7&j5iY0{TP+=XYJD?ONXgI&HihZG4=PXqS>P3nZh?+2IB^z7g{t*Ko{z*KI*2 zq}L=LQcQ~;w<-g>cQ6;x&Fu z7g#5*&}gf2o0cCp(tFt>59Z|n?uw+NQGDFoW?x#K-`pJhq^klI_#3z}bEpLe263%y zyP>|HuV6Is6MBuMqaanQVODa$4^ZEGFJ6f0aNAx9!*oVIBr~TXkB?>Dwe^twB>?Q! zCG-nk9WS|sL_{=M)GDTXmgjX$|7vupAD>r0nmr3-?GJ-h>FRZrS~jwzNZDPq+Nhl> za8gbVu__ve{}f#u5pdrEj2*j0gF(@ZRZX}jPl0&u`)kFnHP*e9FD0jIHEbPQcScno4pvpH4Dc9vyL6-eMPC8QFnUR z)VU~X!Ov>%x7gV;CjqKK28EUgAX`U!?=OFM@B$Q1~m2V1IUS~+UVGa*IR+J zrCDq}Z{MZe(r}e`vOKI9zAwl55-y7YG_BegV=M8#?btH4gG?OF@V0+8HsfC|dtg1U z>P;8>`i6K}4DbjYzDM3CnGj+^*Vo$=2eZ+tg^I<<{34cLFf6Qke ziJruTho77_WZ(mSdJ})x0`y17^mu3P^5cr;zka?|_gZHRI)@Jk8!toS<u&la?RYM9}}>FQ7_e0$H`AZYy# zl?wCoC-A7vC(ZBgs={R7pcX2Dur#KRKL~)XK6-&ewj~vonNq)6`m0Gha}^ zOxDld!!&ZTX^3KFe0D>5L)bYWuV%POGi2t8Ca*Sn^cHd17da)9D+g;E9+VapRkl3$ z?FVu30Uc6t>Qf#nXwI_Lm!+a)aY6>TFPQ0YmPpj%bvMkqt$62P9@!KlGydHu2cLxt zhvEMIHfi3nJ0EDB1B!W%;hXn4R#n z=+IO2%TErfV_?-nZ&Zo?5-*p#X{j+p2QFB&K|E}VcD&u0d<@qWPGaRK>Yw1Na{Ni- z!7;;Vf+RfTpX`FkJy}o*?#KJS^2JekXH2vq`w7B%JXWRpmWR>!2}=FeRkPagXbZf^ zFL|bdlcxmzypvbh`AekKT$ME7J%E|360m{=M>qQ!SKT8hl;lU_@fKz<)ea)pG$y>e z;64w(Z&N1~fVOR#%pZL-rFQZT*fn%fG_zy#wK;VFAl<*8*4`zN=JU44HV2GSCk!-N zXw4Y2j}GZ6h1ja}$>6#dCD6)0(SQ z6LK=jaJb_kaAZ4Tu*g*=$BYB_8inHd;W?cxzdggo8s^$UEOpq76H5-T9)u+~AC>kj z?bO!RCi4%J9MLH9YuBz-!b?DJR&H`tkGQ};qJ|cK(T^$vRQ(ND)e)3lL;5S$J##kT z6jegf!?V!i$!W674^K=PA8RzmcvV)J6C z$G}{02I$TkT#=F?vuRagT7!53-Y0nWn-EKU01oleF#^DosOx}bF-p5eNnv%5W#+8r z#t~NM?ZS4dh<#n+vgdy*9G^#rI`;IO+oLGKvo z#)VGHGlB%U{{6bqhE+l7^$ocoAMkv?y5t;h+-2?ds^LWhPe*osem(#{oh9heNe{BE zo3&&7+wU)YDfz)`*D>ICO8MM6!JzvN^sn!0~36&pli))d(cN z2cd|v#=G;&iih4p=}6`UfH1!$%6(Edi`Z{#lIS~Kfl~<+78JIEmGlK ze2;*qzqxl{AHIEc0KRX~@Qn->dl}#)2bl;gn!ARV_r>@)$e4Smno;jsbasFihz4qjtx2=GuE} zo^DJJ66Bm(=>xps^52S(g4gJ9c_jN+^|+3=>$>E;y+5l!J1DR-{Xy14LwN8qp-tbD zbm%!<@6wJ(4rM}^I{-Ev#3Z0i|51KdGG76*x^c*uM1ASLLu5X<$jQJgl526Xhz6ZJvg#k(=)~~+nO2z7DT;$3$U^ryNq-tQ2_viZp zHFzR9hIo7d9giJ#bwfionMdK;y$gT{Bi74;0(Afb@P{Vilo4o!HLh5!0smzjLmq(< z-dY^ao4^V>vOGW{J$mcXxz4KQ{#N1uNL%jQ+HK70SnRr?$>PnOz@A4V)_MS6??A9~ z@=`_c89_gKRBU=5PQU51{x$o(OfFEsTJcCXq2g_v2FbGWF63QtLQ3;I?ILEJQtfae#)PKw=#ncC9JsaD>j*hH&%w&`9a-5B1DIs~K% z^P~J?AXoZ;u;Pk^`*i&~1|Yz>g;3u;B%Kk5VEOSJ^M^*uX0D==g+T-}!JQWKdZftf z>2u#dAzRO<-=Jl+x35HLsHkcK2uu z)}O$buKg@y*P}FR5^1FP(6^|+SQzuV*g;?U?E4o>k5&SiiX(c+O})}zTZFD5w#ZXv zsErlKY}UbzX;E{i=PzI02e7UmVE7$OhHR=7xy4UAp4JkpX9ea-9?4S$?PmhD9#FIK zZmN+*BZ|CzE@_uanckk0y^!qR#&DI1^I+e&12YQtt>3^Uj^($`ib_fWZOR{@_>$x; zR|IiQ!+jOhF=L+N3%FTg@R=V$BT|Qmjjr zQ^cPBkJ`>W&+Y)XkVc!c)!Xb0e9&=d`x}-qDygWvK_~wJy_box@vfKS6OiqpWn%Ew zLQ-&Xxby)WY{AsQ{kmqXW~_<~^xyx$Y$$E=KTCW%3Y!{4#OO-3Yv`VsUU=O*Xn~x8 zPvtj#$F^i#G|UuP{R5rAv=WQou&p3)L?@Y>*S_On^XGF5R##>_+oPTHn)13u+Out2 zcL1iUU$(uZd+TwGPJp@dyN1Ea+fujqlTEto~Oq z`RYk4-pB9VKc`#Qeh%2pCs}q4NRb>SBuBZr(2;_G!?*_IKCsBhbK>-k`v-2PC7V6EPoY zx#74>5S!{O6@H&i#Rvyz7)wD3acd6F58uDx2%mQo_fk{{D2(E2YHDcS;h*HU^p_aie^pyPk8L8Xt18jC+@h`2HAi36?6n+Viwb%UU3HzK_x6{On|#L9 zn_NQj(_Tt?8Lzy!y()*`{z%xe%#p=5;4EJ449875?&3&BOCwdnPtA-f!!>=vQ(+anA?+G9c2;WBh_Ckv=kkw z<+Q@pdfz^ONl;WA}TY5I78obj@ zlCCYuyM@DqC6OBqpCYF2l!$DLnK$q5xqAyg?;Uh5^l!xjyrK5AAoX&KV*<2`_4ldS z8S~tc_pAfN@6`rv@x<<>$Oj|BjhmR#NOhz+fE5F$0;G03D#IT9_6{JV0ZQxRfhK#1 z=T>iC`oFIB2j8*HC=kC}(6O|FIX8dc=zNwNB>E;!7p4s*;$M{sF{UWieZ7Iz#-VBd zh9BdX>eTVe#_ng)K4;w?3WW78*b+r7u;1WwfQDd)C{R@j8t-1T6MWg32@PC1hQ#i< z8EJ^JAOd_IK0c0*ZfwTOcEIl?_zJ5+E1Sj##DSFE-<7y$?^Jju4YCLP7c4O4A6Gfc z?OFnD6jAuUf!KJ&(eAGEIjLL%U{02Z)BVz`J6RPEb=6i{?kcJ2o2(Kq!u#YVzv9Oi z>06jZ8Mf5CM%p#O%%S?1JT)&iSke(<4h;;u(%OtlfBhMvll75pW*|HEEnsZKwAmGRO6I#W zOSur4*~HR3sw2c)-N@SUv~4>!g78a(%1yK#6W|$b|dcEVfU4$4pCFC z<+4>9J5S23=vsC)hI1k))SJ73QE?I2ihJZfhrm=3GSQpgR2A6?z-k*sS^mf1<7xTx z?aAq*%ZkB?eIw%t=RrvxP_1JKd)T$6K_YB%UtkPrM1kigEq7S(vD9Nc1y%#$k8Sgd zB!UCkVe;CLQvA>(n}r%>RaoOC<+FLqp>sVTyh|;*p$mSYUJ-4;)&|T>CjR|HGVc) zerp--fK5oZl7?6S4RIJY+c2mEZ&bho(^$dmSK1K(W*2w!U5HoAASxpG=a_UwlBmZr zLbbXL1e4c>&$qC%-!6PwvtVAw_FCY6TgY!h@S#$F6K%ui7vbh6_e!{P`(kRXcy`g_ zhe^1S(1JY?3f&}r7Z>N=n8GYtzygp+04L3$)FK_{Q{JAC2;2ErkO%OQ=nh_6D6re? zFfGMc>eFO0?_b^6pGdsCZ$bQZzF5U~9hI$LZUgE)QWoWn7W@du`{&P|eSj|xQqqgx zkSL>qf#D6&OaaomQ6$L3M+!IFe0xzI>9X;qq{3SA zSm3_abL~zwG^)gfwaM7Nr^&g4YYduC@4CUfx2y(ur0{d30u|I{bcJoS_1 zEkO4$8io6)zN1pNnp%!D=r}OL^#;N(xqIM(tq(}I9~o=8_H13)s7|?Z3vyXgq!y1K-|ooEjF}om6~d`zE2lAWLH7Z0BDp}c}ry}YJSnUo@01XO&_wg zcYk>PE@m`8ym;iBOAZ(S)T=U7rMNv3b0(oY%1p**EAOxw-+!X_f#jK>=%nmwFh4ZY z9m8@z1eG7KYUMSAq|xM}28gB(Fl-ZiC>V=00p6i#SD?S-p`tLqft=3)_$Hqia_)?{jVlg1k zJUHV(N-yaQp%hESIJ7Odq%i#Z(s|-f3%p2^zB(iYOiWqmbRWTVe<1Gi_Ur{GSBKcI z@TX5RE8V7jwR)8;Z^SLL1T^=V-&=h9(O!@nhlJ65BfP>eQ4$m*?*z?BC4dgWxMuIC zhgs`p%(B|{Zub=I{nsm$2*&LJ0l1JQXLV;k85;;`g)W+V;fZ)5JC;GDVIEX#xm2WJ ztI{I2Dv^g|&0v754Syz$PmfwM!8&^i|0ci1o^!luV#EWlB&jqI=Ck}z)Q7u2ZxN1@ zAkPE2rD1B<$*{XQ=Hg$Uj=M52G*pmkn952Y*i>VWTV&e}KMW>tA4UOxWgixen)9n| zKed)yJ}gg&1#-5{fC*13v?vA#&q#sJl5k6y_>&n|68@Vn;zYSnDA_1O;QEhXU#_&x z-6m??upcE+cmKQ7k`w6PAJ_Vnp8dcq=~GRnu23~GpXvrmJ&Mg(-wtp-S_ny`#>x!6 zkv4RKt^O0n+M00)Iq=uWVR!7vBkBP#WF~gy$Ed3&xvXD|Fp%>o1#91k#k{eTz|=R) zym!G^9f4jIVGTDRIOboua;2rK$*~)=>SnMloZ40!NB<=Hzao7i5IrR1I9jwWTV0Wv zP~$4O?tbUBIV>Q3-e9UR1~Lc41Aas*qY29(b&80{1_DfBogA0c##s*pU-gIpPYL^k z?{KU?N{BAixR7NFqyPD}jC@O7VfGzZrHHpB2H zEgyvzRSsOBVOtzOLCjNyLAE;)LlXWXwS#a$NF&`sT2p|?v@0no!Ta8wt__zbcdN?x z6X%vWs~nf9<<{>L>AAia3?-B;RmkKJ`dFE;vaun+X(#wM2+dEcv>X$bupH>ab&;m$Sr??HCEYf-R5r0S(fs!5JJ;fl>(o2Ir)+3&2_j&I!I z?)xlbiZL2k< zx$?^pv)CH^UiS>ZAN4Y%2Wj`uYJgJ$ zef0?}Pv8|-_Elk2oX3Dd6k;WY;Q zi$zv4_9S({-}n)LW;rgWSJ&*P{Q_!iT93>*%qB7|&jERN`)`1wHGr6nv=RNtPVB^x z_J%a`0Gd0AJ7?cxF)lB!1Sc!^vkEOGg6-)M z`&@1iw7g(EI5v3~SyTxAH0A!Bl|8wM2hvo5{sw4$e|~W*JNVK+W@Y?e=hz7!Pj2I6 z>mI9M`c@bpqwEYNfRVDXO|1ZvXO^J_J#86Iq&;UTp6%KV86;i`X;9VpKx^ zYbJhtF`TW0AG8t7HI*=f#^#Q`7j}VePt>@8`Nu$qodVzR*^_;iI%o&6E%pW`)jOb< zm&lw*e|w@&1%N2K%X%+7drs59UJ&cJWHnI&U#1=;IiutsgX1F}c4)eYvkh3iHh^cI z=imv|6u47_mkU#C>7xj`@(#kQ0%~?oOFsS+oM|>$5d!A%Nw}ZCzyFQ+_;{9j*_8PN zhPO&ju1n8B<=^_a#2yeW8iPYZNz8`BPrn%!PakgRUtF;C*9|A`qA78& zA&iDg`4jH&N&^0QjCC3e14iUAnSbz_>N$ZsSja#WD;Txlq9%$JV5O>rW`kia9+kO$ zc!{&2f4a!2#owHWcn`M2|b(tUX>xVs+X^ zh+&9;si@U#3nJ$G*{QaOPH*V(nFGCR*_PhyXlMr3NJ}?93K{`Yehi`MEKT{)Ad;y8 zK-B%xw2YEX)hXB(bBk6GGs56%^3R(A%PXcJ%vZM~pC7Qb*E z2V1m?+}5dZ;yKgF*=B3-oU-=N;G`E`p|R!ZrkjFwv7!w+m^g@uHw0&$o^s{+xevYP z0PMDD=fqvG6o7~X)pO%FB@ETCrhto6M>oKu5u6g<_dTs8K z(Z-xz4X9FThi@*zA47}m?U}GTFTi7tCW1@`JPR$~zA5bZQkw#KQeO`k!*2Vj|@(jr=IW_r+>oFzE3cuWjZ@Mq%}5vkCZ`>wT-7J+XJ zd=QqhZN&^58n)Y>Yb}+irhpcgoIbDlUwJJ)0bm2>5P#fJzu}dfoa_LhJoYqGX>1QE zI^byK-1#4${YS1zk>BzY`-xmpG~{^s3mPCUNjv!wxc>Td?rq{=dz)l*7WR#G)M3O|K(qpI5$vtT8o3^8kTi@Pt`wjIPNt&_nniE)=yG52qGWbbo z77^s&AQ~EobZo}Qpt=+`{XdG1JWeD} zq0CW@<3fZU0IDfjZIN9b`fQzv!4?9N(Gb?KVI7!bT!?9jJiDc)Ja>sP>fq2-hP{mH zOSIR!^^H{+P}lbv4_^BdON?B%%={J zJ%Jgk8rQMqw|w^uzxDfm7klZ_o;mV8UUS3)t|KjoXTWB7!4yqm6xFmlA+x>YxF=I>7zc)*dank3qO&K_ekRzHrZ z_+{CoE}Z`YCfZti<{}SX5ey8pnEQQNbph!fC>n&Bhqkoqi_;a{FR)0OLxJP%@w9+- zN>z`IFh4R)`Cc9Zt%$y zA#$0*jIGw}ZX%3NsO6wSZouTa2ckRj^taEKj)9+WHzw+|w53pQZC-(Rr?>5R+TYN{ z*R%34tjYzNImml`FlaH4@Mx#Mc?_M?Hk8V5Wex45kCw7stLNucn zLdHb6EM6slgBaQz`F)0oNa^bKo|lCQi|{Q2y>yhkFGG(zwr_op8OZG-Z=t?fO|qaH z>=oca_arSeXn&sV4uqJ;TdiDFyc0gE15qQ)R)6a#pSBqCp-*i1S1D(&yoY`U`0;Dr zPzG395aUba76=e%H7$Y z6vge`-v?m>B8B^b|980bD7r4V4Gzr$?H4Tu6Sg^a9xZ{VTYBI^i^6ZfPDQtuKKT5+(*P5}M!!vo!S@h5=Nb}L~)5j{+TSK{!y z(~`F^n@-zof2z*Gj5Izuz*>P+8QmD@H}-u-Em1j&*TktAy23}dz`b!P)48^t= zk7x%G$F>L8^Itl0&VMduqPJ!CnPQ@l+Z1W-ow@MwMR0uYwDt$x#QhQn$eNb7Ty;6z z2=ITQgM77YaP2*d7l;Wct#$%%eI+Q!R5mhi)EUR-E}(=9((J5LhNoJ5Acsq}j|=y# zqmoaL?U9qS`DMuo9Bz=zSGfz_Kj_0aM8ID_^K9euT4R1yDtRK@%mfzi;ew$X^YFp} z2Mzl^5Tz9vur-wH4^eD7o0_MsA z33n4J2Wn%pY<fDRj3(zVLexOeR#-SxMZuu?;sD+zP;c z0nV)Jmg1F+x-V1w_5&h$RCR^MnNEF~Par9F9nggWaTM-~%@P!1K1b(`fQcF~37-C+H zhdAFuNZf4@__5#K`m;{9u%5dbQn$y|VN4`c^%!0*Ei^VJcxf9rze*6u1gj8Ox$?<) zP63t4bj8zAchE?Lq=5LhYT1kp-cIB#VF@FFvM`7UewN`d@lS_7qBE=a5#!q7?M_cf zZpsfc7{OMc^Gih+jwuJHU4>?^jOK?hw1F>^itgFu_yg zaG=@;Z_9v}ATS9KZNGdN_`e8G_Be9tdkXsJJ*jgRZFmG(^)dA3?M~lMUCAlDqm{%U{EJqz1%g~8qy6Re9HGVw8@C(oLW-wxA? z7V^kd!ZzPOH$?C-(ltPXM#sL51+HoA453IXti$ zoEp(#5>XSLH^--AFELdh!e$4YLjo|LTG_6Yd{Q2(=p~85Zw4sZphhV3(zmBWR>k8(@kUs zeuUvPQ;i1UEl0nj?!`p;SXaZ0OmB>LsS`%@T zWFrj2?;M9QXf5;X(+gEYs1mpMaZA-sw>EvUtxp8H#eHbOx#6K&A$&pxSa~R5D~p8I zqW%ta9n0q-rdZ?nOkaDJ#0)WIB_+nC4&v}H;Iz#d(PoqV?Z5o(QzOcfe_JbGZv3fyq_eoQ z8L)2bo&K~c*P!jHMmrOZj&GZZftw@!Ph~{EDmM0haCI$k%Xr$*ov{#s{47Cpc}^e7k5OvY{0m z1L-*6bvUS2X_M;?4GYnH0(Mu|a5@{aUKO_#5OKZz;y_@e5s5v}xw;;pMfb^-6}6l4 zk$7_iW-h`{K}*-Tpb@%x++Oh8mLJp(BKA~r9T6GwqP_fvoA32_Pm^AzR?`-Ny8Qtj z!!lwL+P!`nWe7IT=K_g9Tl&zZr#gJpAdrH_7XSd&*BCj3v|L;=rBBxAjwot?DB@XC)L7PRbO`XNd(SCx&Z35A@_qi;2z0c);_)Hm2|8B`0 zzo@qqsl12!h9BoOjH$1m=t*#U&=|yhp5#dA;)-|xk`M3g^f8+T->2ke_jmXtejj$v z1k~LD%_b<2?OrmL1@7?}u845Jks?TyPnLTv|WY^B&~t>i=ygY<%a~< zL5r-oCY+jWIEx!m*{5 z-+d~|D+1qMe`hS=Hc%8UuDh<|4Vbq^lHX^;VMq2q-Kg}pv3EKVQ+Z}VF9-ehUtdnM z>N3UWgqI^K8(PZj(2tbSk7^zoV1^k|b1G6LLf`3g_aMvodIFYCQlXX7sA;ij5OhSB zN}3T2WJF>W2+7Uf8SnJhHJp%MH1`uhP!T!H6Q{N1j46v6`O45SqTUhz3Th%)oPmG| zB1dg)-2>W(NYKgj1n23YT)vGI3SWN5bVWt$0H@U{wD9l*B&i%hFbPf7iY3vN*%fn! zPKxQpv14cofX3i26dm^AI*!;gzumRJfiTi-J12wA4O>TTjo4^*n zc;_QnF)ATm2UdUP4d*rQN5$z!snUMWj15r?LpGY6kLeOjA9?)GN5^nx@=JLf_j&)f zXX*^>(EH#DspDd(D1(+FP#=k{uFpSt>5fM9Jb%|vBr^eYl6ZFT>%gJJJ?z(7Qz41U z0ZR!rSTJtkyppV;#lUd#yg^M6sA&zhvBo(vuP%L0B>F@ONT4sY+XN$pO6M*v@;eY1 zUmBc7DOescpPy>rK6DdiYNCW9^;Eo|C`KpXE&OgjIoTg-laA*=EV48~-24xbZYx@7um@=;%iXNCi&f9mqCE_U{bZCm(*QWzAzp z;o%l{h7^HOar*Dbx^ELG;yL|~Y3<6MN!g5yfBsQOm7%4_j(~lrlYodJY34(%$Ik$9 z!5SfEmUg1B?Qg)R{>Rjyg}w7nlB zD0=l+0KA=G`~^Ob2)z1>A4`2RaXXs>iLFDLRzioqT}cF_5y(k6j4!tR3j4`5X7{#X zCLjntV9iU9+9oa^KY5%;goz<;6C5v!R~GMg`f%gyH=MhdU_!Z0;sDoMXQ@Cj{%{u0 zS9{)dOdamhgO5@q8VcX4GJ-veX zPRPr``XqFmirzB0$iR7n4ZRo=gIbrQ!oh{x={i{o1G6Jr=g9Ua|23rlcs~wP^)Bk{ zyVK~%J+nFeGDM%R$qfr@s2kKhFN=z_uuS_>lX>NBe`H-nlAi%jSQGO=S8rI?=2G2P z1h<4n(FGPQp2F}0h8y2Mdf4?gCkspWZlKZO_^`QrYz)>~72J81gr(8XAC+dFjg&)E zjQ=_{Q76mJ|3y&i-BF^vd)N#C?A7Q7c_fW;dt`3c_V8TQjT-9fP+Ai{i$)9{Kn>o& z&|rT-l8=}74jf@*!q2JE+7zu9sYmQ~(8BPmK(?k)1`#wdLVk5hjtDY6|AttMU?fVD zJH_RUx6zZ$R^4JF_;qhG!&e9Crw;5(H>UlN6b%XUH5|D?rNG34An_$UURQ8AnFI= zfGl@wEjao2YF6?uMVTM#PdWHYF?&luMRy_c)xpl4*Z9&8do*2W4Sqg>(~ZBRIbcsd z_NkIebV2a_?Q3q$!f;K-c2YNDx<=ENv%L#1a)Vyz4)6!^xJKPQYK|8*oG-Ab#iJ#C z*lUvHceGUZZirO`oVzZ_1H&huvx+M>`Sf4j)w@+4OMB~5m4ZynF@YDx9VYgYP~4UnQb@8{bsk$S$n>1YB6$#K9AgdG5Bi9a$AG=9Lt zMi0dj?xXhId{kWjl|-Bjv z*tZjf-)BEa2#^J@zH9+ssNv19&;fhl;~*nHQgQ3zSWdwEinbnnx`Y4%%CMF?(4vtM zHu8scHd`O0fTPC*Vzes2)qqv6ykEV+WBv@10~SnlanNkDe@YKCH{&qlAbKu*%q4sA zF|#;gMr%A?Z?)N`*^@iAuLV&$MIxmiaA$A+xXt=zl0+yI%361>$S8c;Q6|69b-zr_PS6%u%Y zt%mUzF3hFYPH$>|aXt9-?kQ64b6)?)ORw;y8%^Hr>2arEnk1=6$0~@&ks+`{;CLE zYA*>LYU-U|e<}DnS8(cN;VC#~E0EF+o_rtZ=48}{eKD<$nPoz+g~&O?>%T%zUjHDl zoz92#))jcg-HE}fML1_~U@@24vW6!|BD9_m?F5Dg826eG*$^+$AZ*&|?#+7U;{}X{ z>X)SluA#NM3(~yeK{dsb_5yFe9G(yewm$c>jFvm7U|+hzx-G1~IRj z^so==W`C9Xu73dEUra0oprMV6x~Io+j%E%H!~l>(`%8%E+46HlI9<^JCCVEhf853fg(G;CgAF5iH%TCc*^e<6=cpGX~|4tp4g!JSvS8qTWGtqdI72E+k}rx^rSs zKvZ-O65Fcan&OeHcJWU%N;o|Iq}-*k4b>U#j>V!0Llu7-)nqIZ7Z?85eeS1eTQo~d z8v-a_BF6+ru1aDhuBUV@GdeyY;2kkP8nzXe@9)my*3#T_Pe-)L4GUSz;Ki`#S}Han zRzEm&A`^?(9N;6kgQdzxAbP2J2nO^4BxFRt^uy%`f62FR-|lyZ5Z5%+Zrb?r+qJW1 zvvt#T6DK4jt8&^;U>UT950exwlKeDnc7-7^jbMED>+B7D(PGoXR02fjp-q>q$8U7D|0O zG$Ou56%=TaoDCQ0YY1Sz!GvC8V6V2`(cr1#%l=HUw4?%ce(qPv2pV3OYTc+qFhB%2 z=tZ_3yS4H!_EM~z=F!PIevdp8YOL5mm;Q_+zw3%o#}_9%>4^+3kHR?m^+R!}cqCyQ z$zkMGN+vH5;Nelg2Ya4`zfw!~r?{S|&!Gf1^nHyAcdGZ(NvrOHMN}~W=YE7I1qKHT zP>BNKt8ovRv9PSB47J!!&uJeGEl(vx^KTLj3}6lM18WQJ1;-AYraNH`9)-W^zOg`K z;MhT^=(){_kq(-6tw$$@P7L;f!m0*!Nv!K1ND|1v>O>1Qv6J=L9DO;s)V_F)1k2Kc|9)9OWj!7-+Fy!|iF?tc)7;Jm^*`LMa zx9jx*BUEU(Cs3J9=D`9gA6!xf<5oX}lkc52e_B5Nc@_~lZSKwP>v|GUuFu&6n!)8$ zTWE!+9P2Bn-&fF_kZp9-Z@pr^zkNX$B zWE`;LJ7A>9RtAFm0Nv;_ci+js=ID#Jhc!|e)~lhUMn>CfynQ&}r4S`T zWiSYpu-lf3wFo@K$_q;MP(uB&(vyfd4uEjtF6IfsLJkbtWyx$BIhtVGI6uk^qZPS{ zNbCSs)GLG-+4BC)C;s$;q*ARmKZbmN4v&pvIk@~kV$&HRbMsy}sjxR_`?krv4Kk_i zkh0^f3yqj`zZy2ZDhAGlJ6(_r0RzaXD5xex3*QibmYO!GI-1>B&(t3uFlBVxCYwm(Gp=zugHr^l_$@JCU(tH- zJmSJ1l_c*)Xk!Ne4pqSRyV0h8P=^mjH`5#$w zj|66!rZHE1?%=_WnggIf5~fp)ms*cym}PeD|4~MK{%EaVu3|s8J?=Yvxg?GV6T_=z z|F69-kIHdx+fH`am0{CFhBkK-i4=+kY$44Ep}|xU&4VUo$WTOgNtz|4P?R*6OeqZ# z4K$FHW=bUuzT-?G&+dJ{Z>{goXRo!NXKf4J_jUb-^E}RDIPlRv4KNI81Q9bGNj?l$ z?@9;MHz8YQ9g*k;<}L9m5%m~KmXJsXkS%xPZIO9x%UdEAnY$9H-{DiYOPm%eYt>7^f@+C9)l_K9$%(UyL2Qyj03-=Hc6ueL%9odf zqioy^@_SpJ&K?s4Op_J7GD>N}-Wc_$D*H2?d^ttGegg9gd^Q^05$_rT8D?zH zNrMB@7QI{Wq?oYNUfMIt(zCqI5u6da&xt3B68gl;8Hrh!^A5%J>=do-U?6*U4NWj- z9{#cuD3JGQKekt{A53`5aa&+Nix|@d91cYHD_{-F8y@qO63Q*yNtA{sMaZpSvUC7> zDxld_ri*{kqD4&}>!HcQ<{TLJ;?f8m-Tv)q^HxIxg9ePuNOazphC?t16)M>XDQqu& z=Zm|#{*1rV6U{=T9HP$P# zLk9@_tI8(J6dOdh&(3<)mf}+m4KLjCW8hwx-S1?1llX7M&1JIN_E)q|08&b<E+4W{Volq8xSc^_vZ=;rR{yl)U^_N|ajdqSAm7s@Nd{HOkC+MSTG(wv z%K%t7-X!`8WQ#JjE8vP?3`2=z;Ec^NhI{u84D^&$R8&fBpv;S=ErJ5%f=yHDUw6NF zy_N4my`Tx+Azah^8y!4kZ2_1R;po4Hd%FQ-Yhoogwt(0pyKhnvGhSOBb!| z1`O4Kaq3CyoLpzbAe_Vt$o1N6d3z*xFl}PPLia@w4g_4J^B|W4n4-OJ?{-TwTUA|e z&teu1qS;}^crML&Yr|@UIQ3-o1O*fx{pQ zu!d`8e|D|f>IZ+A#J)LID|UYLTB*7fvpGyiz+D)?X7T3K-RYFbG~gzN_o00)MP#DL z)W!R~0g_-C01wM1iMPO1XjIdH3+hN$UV!`WI@H^B?cWiYqy(h$^WAaH9^ea zm`&zi2b)(h@HsoFuMKFXomopz2r0=h=_TKSz>p`xK|VFrX5qVG&$t-ywiB$JK$F@F zPnQ+}kRB~j`&UG!RxHZ2YET>Q(qlMrvJyMLtYn~WSJ6S}sG~9e`v7jg7DN_J5zX9p z9;_n;i5(Cx?Sw%!bJkf{Bm(4$LZ)wnRqNdKeE&AuS2XrtF-o58%kQRlF4c+y!u8{^ z3oI5TTrz~iF}u}#85pVHF3S$e04;>dCmIC-IG2BKEY@YL8X&ESHa6dUzGyaJNxYl^ zV@8N-w$zlI**t6(Cbb+!u9yyQ0SBT4>YY8`(x-9+$vY(@{BewuEB}?n3LyCb4-W^S zj68UEx+}b2(%B0pSOY5UdC!dBFD+P>(7N`iwB!)ACYDs|b8}bRM-3W*Z@nI9VjRck z^1$FH9vYyN3M)a$@q(Ic(WW&SjPZO6SHOcod(YcJBggS3=$py!eYRUcE^!_GlA;ZIxUD9e z8CqAnRcg+^z=jD%2d9YKi(t|F%{XY3hV1vdaFQHUWO*H>**w!H{K1NSTL|yGJMhlv z*)|UhH;9j?EweJZ-5lJLY>j386_@4=fDO0Alp%s`sLIHIN?sKH;vKqw50e%b-Vmvi zw`l7Aw@rAa!~@D;pgbm}rEz6<%ep{5Ov9ColS=V&f^)3-2czkx;e+;1W_RcH&fUJ^ zFJF%W>fK-qD0YBlPTS)I2aP$XbDVo#SGif6C)U)|+U&kk!(!HewJ)^~_J?!>0{j8(qST!q@dl5AQn8Zw z)}3Shx0aFT=D}4pJxjWFS9IAG;eK)McO>6IW^8NO8jmuQ@A7DkfAJC0+tQ>1zdb$tYj%?6Kky0F6n8>4UYN^RJ1zG`}qZtW_k%85Y;0s_Ro}J z>tKhtV9>5Ow~I|>`{!3m)#|wN{IvQj+*VC?;C5nUVI#zd1Tk1~rlrRoU7A>sM@v(B zl*5kFrXGyU=vr+qy?y0bjAd+6NuhM5%HVwY&4Dad^CuVT#g4DKJpH<|zccUEA`P$u zD*(cQPLA@41|ef5XD?p`_f5jxKmeU5>M*Hh6Yog}(o&$LQp#yrm{e5-tC(x_O;EaB zuxiUZADe5%=L}gO!S87C&ceTJdgqH}8^Ib(#0e<>SUz~c4Ur8N`4yzv0N_Nr@|d(v zKlnCoc+)p`9b}0YN%#Vb?oox;&}#U>?vn;tH*Sbkm}ku(=rk;J!HzR=ASC}MhiTxT zOP}M%@iBW)H@Xyn-Wj8ORv9yt2QHx@-wP6}9OFe@+0lZ6Wpz3h(H$_qUl4gHCZ)XO zCe(i$P=H#f)WWt6h$5h5`Uqhk0^`i){yJAaX0-XmSa094=aIQ1aiHZ615^qXtWzJW z1{t&99>5P&p6%>0{CchShg7oj*ai)&As$n{!H$l<&rH2%rojDDR&GAJQ##6)bJVI! z-aB8l;#hY-!QBXJ*tFrx6s^1hU;lR?IAJh#tLtgS>A50BWu9Tqh?ek^*nJ|cwI$OJ z-e%P461sF*?~0hjj7J!6G!jCIrmA`NIMLp0yOxM|G)Cc`xA~6q-ZXCgDRnJy6ark*b#-vaOddv?*4WzX$*W$byTo%!e;)KrUNm6!}}bN44<(dEYs6eiA;h9NN6354+cCYVeL%?ItXV#LA}x80$se0u?Q-;Xc|I2e7KsNqM9O& zlT?isj{XaO98Kj*m^6gL2I&EkkdE;iXeQ_{yhd+>ENX}`4Jh^-|0`Y^=zIaF zSPWa&)~J&4Ps8PtGh>&l?{!+W#w53=5_;Qyc%n1_xS+jP;GXg<7RWqo9#7K;0I5R+ zbvU^ULIXywp8NZIyTKg^KHT4J*mK%`L#N%J1KSo3zRzGOGSnP^u4jIvQ!>_U6%%X> zkRb0&pK8Uc#yMz0>&Y|i4;Lc<=>oWCG=T_sbItIw^<4UIpp1L*l1#2*A?%plPf=aV zj{y)Qeq+E>;l@3-FONQP8Ca^n!bBV$9ogrCu5+MO4@WAfyz}m55Y-cd!PPJAWVLEo zhWLjpY=b16b#{8nevkUgOPH{XT!sW)n9#71atO9ubLRZ>o!7U(vrkeRvSCk?l;$Nw zs>RPSDh)l3DJE+BulNO!ocneqojAK@coi})H|b(*R*;};#{-pFaU>K~>$}V_f!-3A zyr`U<4qB0YrKO48Nnmvo-4m7$`ngGbKh_O@;k+{zEn1gBMZHcgmox*Va8a^JK<^kt zp2c{4vh?+mKSTjs$zjsLiMjMr`&oD54f93C}oS<{Sf;J8~5k%%1sTVeVOL zC3_H0ftwye+w(b$&B{~V0eP$?*!y9~P$_kPaH-@YrqsL4$P$BWE6Anwn1Y|LMTNkL8z>O7^&-sl~y7Ok+{_C}R@Zc(4b-g9;3O3|N+)=)&=du6~ z^!xdFM#q0_z$I2k(XaXMB4>}qz-7Rp9YmJzGqg88uj9jdrmuevQq5<$8yZ2BfdOa2 zxh|+gqEW(mf02I-;1HY%>gMIP+z+tzx=E4_M0s^GOu1(MDoB}>*EY?;CfGr;Q4h}#q1u#+QrI)B2`I;)Up8Q_AcC>`$#8ozwYQ2qal`kz`<*4mML>* zoz?4_vJu3=Z~Lnso+fDu4h15bDs2E4F|FEqz<=ZRH+NKTxXMU^ z?~hee`Rg8eE8;EATe4(HR^!?!VAT4B_>5R z_tZx~YEK?B*17kN!q%_Am%j@B#)TjUe1;#D77;vfB{WxMKSki@fLYrKev|WDhv!tJ zrcZS&!*NSf6#^G0%6|#bwXYZK!}=&j8A`yS5vV|4vlRL-gE&kkmR~VSLpdO>6dPPy zTiHTy61s7ElTFs4XJvv(Xi5o>BmvrbDS*uPphnPwDEt25r3t%h#of+_k#8<$jg(~4 z+{VwLM@ikr6F<^~Ssy*hAV z7z3|e2CvDqZ3*2v?de#%6#ovpx7WIhU#IHck;v-Aojl;F)`}DNMxI->x?G1hE@hF( zH}m{@2J=j41_wN@ETb_$L^J>vO$5$_3PsKq%q&A?WJMVmA4gcg8>b zogE)?yzs*5SQGG1mm@1c4s3fG9st|C8aqx83>dUU#!7F!;#F;6ZqV%XAoA~DCHA4( zCUupc3v)k}Lx7UTmk68(XBNz>;$h>d#MZi-M-liSup(rA3phl_c~_j1vdmfvmpw=p z@S1Xeyr%Jqua+yTpy8(yok6-ed&Czq_E!0d=fX6>|0Ts;kfKjXW|^451>>T>%@#~E zOgD5ZKLbfrRe(GS0kXm%`xn{|?G-VwXflGTwV^2DTv(IGX$QSLuc}4yv=xu9w0S7zee*>fZpRH)fQm>fv06f2-!P?Jmy>>ZI=l`Pt8(-9Dn;q0B{!7B z#E_*w@t^>bH%~aO{V-4Lmcg>+wU6zwBCwxmjFg6vEt(4H`t(4!0Mac&lLK6LPg}cy zN4(!UPAw<>wn=AMQ;fr3G8_3m=!!%Wkp;~<&8R`CeWh;cZj;g5<_-b$jJtfnGT(?1 z0E>eI&S?wTrqq@qV`3{AZxyT!qgZodi80F5OXj@ zeuhlnx~|^6MN7*u;-ju}ZdMv#AJ`SehkZ;Bxx^pn=eNI=-;H+&Z<3svmMK||Q=s|L zTTGvtI6k9{p1ojl0w|Yhu$Eco_~jO9RU8afCV+TWf$&&@8Y>U?9Gt%+QG*Os;XPnV z5@nj3(`*1{PDoICeT;L%7nWlVA$w4zQ~hU+`Lb-+)6@j`WQxehXv^7urZ|pf*AKa! zbH?r{cV0R}yq#+?2-ff$ppFTXkYa$3#?CR8}czSSHN$mI*n?ADU zJUMzAm)1RbQ%_g+M_LPauI5prh+CS;>MWF?nkhC8EMxxQiGvT{Wh9&GqdCpnp4Y6& zk?%KXc?gzrJZ6Enk|qg0f>jl1ofCSJU&1t?uKmJ?U0H>{g-s;|Iuxyy%w@&cvmR{Y z=XhW1dlUUjOkdrJ4EuO{ujI^h1d&ayIB7JbBYC6pNaum%R#Z#$M3pvxw+^*nXI+L>rm|+%<3ZSThX6pxZ5ogC$ywgY&J@oyv-g#np4u&IDM}$m}{+vc%-F7F^@zQY+9ynezqn(vLDyECN3j;=(qk^M0cFB7U=lXR}B?oz*B_CtLAFyPh~ zKgeHduYjQv$U-lJ<{a#NghaLSq@w<1>&N`q1b^91D9{H-;u&~E5Efj+BRtNX%)QrB z8lMAZTOV+!X@U5Sosc&FX)I+75RZT=7mCeNQq%O;KAzVXkIv~j>TUuXXbMgC;+^Ad z<5Wd^*L+39^Bw)nqq0?TJqCASib&uRECR@U6=M1gh`)S+(Q4n@iO#p2ixFM~e`{jx zKyyrP2Gomze7uDoC_rgt3!W#txtk8)>V?k^;Jxz<2bU)Sjr=Bwnf>-F1O8;2a-#cN z7?ud_R+PwdPE!`mIQdya_0(0@;ey~h--lVOZgUf|%WJ>p$|>wZ+ldjQcUTkKI1az7 z_tBz}83XCfjn|Oc4yyluNXGTkoF7?&)-8FTI?&A7RbqqtCZq+iobux1Cdu7rOy5R+R3g4o*Zr|sVfKZnt*3cVkXg*ExVMD>Q^f}j^r;UP16 zuu8!?_rOxD6r7MMXBL8 zyfIrn=R3muW>dg{%$<@|nGnNaj6z*sSao3T!TfsgbWzots56#w%=j)`lW4shHhgX% z7Gt?Urrir}K*X3uy9MIZu3c`{pM)R$46brm=awB(Ulhx-seZ_+1mnd0tzw?rh3}Vp zi$9rdj*T{01ac*UdjpFQ2OQqgBn?!>q$JDk}p_g9{*vPLMBT+>E(2k~GKD`1dwY zWCD8kHS*`>fp-BEB04Lm%kg1JY8S$Y&WtLtvjmo3-(?9@OkoZlcz^jnJAYp+w+-8a zFJ+|t5J-B+`A-)^sRLlfW{K>~b8@^XiM1|Q%zm^HCZER?EK;O&B((L2_@Xo-%U(a@ z=-Xq*x*bb-u*mU?Sp^1&odTzN)+Ax-1Hkz+6iox@%tO_WMfYm%adKTca)mtFpp>@@ z^1>cjh#5Wgf67c4M2yCuj=~3qR)Vz7ZySYU(7q?JX34|{JJ$LcCqU^$F;UY%SjER; z4`dT{__WHhA`wbIl((wPCRx7{VEeZL-|TiqSl964KIyESa=!V}x;@DI@Iz@HRz1~W z$?K{l%ZMc=<$RsiXxnhgpf`FoQD|lTP(ZBNGEG-2o*ZLOXLBkU~(LKNMKFa}Xg$GuHf@8CH* z4R~#T`H0B{`~BRat;trAu*?z=b(oycV=sj>4Lh}vc!Em#s~(ET4M$p^HkYrR*+piA zkg`HOc;0IN?t&H-Dt^IHznuZKCO6VNF;+&s0p?aCSLIYS?x8B9$d#=+e}mmkse?Q_ zy#f@giMsYAR*V>G%qISFX4BC-)rWUQI_SRx8F33~{i>J8`37n;K($OS0$3t^S}RI< zJJ^(vhwybWbc+7g0hc4}0U&xKY zp4@Faa1>8VJ}~qt01n6GK60do^0c(WU?Ja~cC2hFONUm@MF`aiJ$mnu2;4 z#9)M#rBPH!n<<~&?BC9p_PcpoZ!vJHk?rA25SwAZqZyXIM}#Z3J$7hK-skZLy9>L1 zGpeDcn75yQoHff1iP^gv#*^tAE};Qroc&pge<_WOp{|DE&s8)^v}W$|?(@e)%LIh( z$rV}XhiUg?k$=Tfd$3GG=Mc~Sz^UYg`=hVE>Z4Ik^S1nZnFPcd#SBejY z7&2k3Akp+m`XZ=H!oP6vg&M&}NCB>iBWfxpNI6)muU^WJG8W>|R62tkO%((#jX zxI1mhfvx*n|N4C@?R8eppY1*(mWXc$Ve2&-IN&w(^@M-h|JA)kF;B1_BaveGTwp=( zG)1UpPdt@}IAhPcW)-X$nuf0D;COi9uWq^VT1*Uzrh}ae=i{ROOIWqAn$%QGPSVi` zZ@&#~9K8eFH9LF2Y=U>`XCyN=fK5>MXVldL0AX<&?0lgr;jZl!m^;%Ks&6L}c<%p?dWQTXoL))G_?!?zfY8by3ogYFQUVoXJ%Q0Jsog z24>4Pv_c7EEJseTRj)rHy1Eotc7NN&j1L1G82^^sa(MYnli0O7 zl3Qh6nRXBa(P`=i&VrF>#>q6^J@p{Vz3fjvZ`X~#IULLzY)zjtfm`}J>`L%%|N0#9 zsip>nJKOaDK9Suy$ruoTqgJ0T!8Pr->CN&kX49u#zpA)WO#O$iNd_d(B8aHX1q^m$Q>Q7U74}WHv+j3h_S#{{LOE|@7y4Y@ zp&%sJ-by@$Th;60o+mPhKL-KG+`^-Odbv{BK^P60W{5T|EFm|&OcaXc)q#!CNO#o^MXzO)85`JYIr+ycaY^(0-`~8p@FAx zyPmF!-jcRD)M+%)10GS2wnF4)!ikNm2fM`(#_bh5rh0D|US+Xt!iGb2J0a3fMvT@2 zp7Zle3|@oVK<={FL12EF{>jAT6v7=pfXhN^N%i{E2SlW8HQ&*1z$K1N(o^nxbX{C^ zV$VK@HRfC@$5;K`^JmI8%&JMe+0#*RY=^AoM3(hqe-;{&Q-{H*F0n3y^`~mLYHjBs zSx{G?^3nj^`2rl~pkJDc0k65aIoX*JF@`*>sI|hoarCSD%gt?g-PJ7@!mt=666c%f zaHj0@7S&nBy$CftsAKp30#$M_bljM1${8`_J-0$>=aIxkQ3C4!KpjfVV9)y`urjDl=ip(b0G3p)r?9oHr4fd@Q&%Gn2j7#Ii`M>|6 z(12xeEE+k#Y=y|*wgUIiRuFv>zXxMJ;z&byfpFwfZF9phhQ9+Pwk@i`QvWY%67iwC z#0}|Y7tHPEPZoCNO$>DI?*{s@88i=f$I-G124-^ruF^!KLQ`V&Of;>qbJ>8owOe2O z71xxI5Rpf0KUD0l97)`(buF?5sT~Wew)3lG3Mxz4G;N(B?GZw2Y?HP-X@)yl0yK33?!6;3`tg^SKg&id_G^h;=l+ChFl zPSBU>P7XBB#CoI>2S(7>fMKl9(Vd*ukoF9o((7LxHrSQ3Jlk zKqHfwn8<2c`wjeX41-Z^YG0FT>%4 z1zj!iPEmN|_1l_@Prmql6OhpmWI;&Fh})9n6NYkUA9_0AMf2~L$i7bE;KZB+*K!0T zu^(UvwHdQ+RQ49VY5*n4WQl4!l$Jpl0`7Kh-Bb6%N{D8pKrRPTi1!2w7i~&hewnlIwyT25=17OvV`2I zhLhb7B)w($NyNqb(=iLg-uN)E0YZ-h3o0gtnt31J_~F;E&+F2-uv=r#Q~yoz?`Zoh z{knLrp8nwxv+GErh*ya|u#8Daa)d6cf!df&vhELIrVH0mX!VIlLQWH^7IDo)4utiE zq0`uKX^)Fjzo7UVrsj2+b~9zg{XXQIZ}M%QH?n_#@y=u231#V+a}!S&#Ey-mK|&4x z;4bAQkbyP=iC<78C~Ve{Y;^Kq$pK-!0hmJDV{!ti(y`E)t5~Gp{kk(lYKE_~!m~fN z<{w-^J=>mRE(mu5Fjfo0l)M-+-_%2zhNER=-*wpn4ozig_Y%7uKuCl|w0dBRj-u!f zC}i$d0daOkp5!FBdsJ(rJKB*821Z&wS?_KKdu-+kKP7<5fXK$=-Jq0z)~Wp~P0+B< zEZtgLK5qrmK^i@B?S3AKL84muM1|fE@TG`{#q@r6I^HDQXm(pGAY7>IvXW42lhXm6 z*O)@rr10s%hC$cvJ+~9o(#(KWf7+U#u!|zj+W)GT4H{#FaWDX~eaJNAQehF9LO!HF zP};A<)|<-*xKj=q>9ML+;MpLsiV_=$(2ASM=OLrTXm}t@+!MRk=&qq6j}^0SDc6QT z&nL{WCrQL%sOjY5=6I+W*&B3Xe>p5PU}3c0Q!h(Lb|IuQG|=sATohJk>{|766uu!}TRL5t)yynz58cdJ1{!4juV#J`e9nLq}WBLrnm4t~-Ci za^JD-G-lZ*1y1^tZ9fl(M=oI{4PSMhu0})ikq{BAB)9kR>vq&kDT;<$0Tt(H5;2H4 za3rBsd>S|eyrj!sw_)pFE{G3B3KAJJD)iN>8|r8Viu#_9b^j9_;21ok?L>GEyN~76 zl@8XD9K2^iwA9;ru-J02lO2as6YpP6A1|kIBPN)lu|>v>3H73 zt^H}}OzazPCfq?B$qa?VvQ46PEvr1dxRrgocBPj@Y0ay^9g8vw^S){#& zt*G4L5gp#DKd;0AI_oAp$ezj z?0!sTaLQeU38V%d;uh3|gt|j|y$=lJuefpHxjI)A^Q^tvcs}B+Mo_+yH5ypn(HiPO z%cnRFN_+_!{WN#|+v8K;l))l}zz@hoUT5uK!o%eMTIEaQyMCp&u2Y7o@|LJ)OH$?~ zj(-jO+!yE^uizqiRb|Pmnuw!F2h{vcC=ziIa=8u*|5!(x-hZa2ZO&y=a0uWDntQ5K4LENr6gz_|n}J}$6~a&vQURoxbw*JR)X zLm(Q{;Ax1L@b?=YBLWfGQ8vHh1}x0>(XGuYjeumplo&tZ3Cb4(6)(b>{SptEwbftr zp)~j<6#4?cy#`1v#?K2_s5q(R{%Q1X*D>>zVP~|xO8mLHe3NFV^9>KUV>S) zK%^Plm|8=a-u%>jZlWPuL=v}JDmGa%WYX-Pw{^=cUYgLlkugeO+W-EE?|mz&>~k$O zFE(XyHpusTEM&Y(?Cy!pnqHwv8j;T9#_7a!afNAK=AEy5syKA(1(WMs3ubRSwZ!{u zq4I)FTn~BXj}jGGX|!HT#7*Gl<-d8x8?kMl7PoC_i%fe;X_bmWk*dE!ibJKU{H(3a zfIvCJ{D7t&-P{N55O)?wOI9`4Fr^kRCd}XfMv3!GjZH1OOsx_&mc7zn(X&;j#69QT znk$;NlKpV;c2583x}XLpNFGE^i>6N~v69)*R^7Omk2AUP`3W{0Btd9R7J@YH%-pk( z&+4sPFIbM?F(s_N=> zIMyC$rxn*9Io1E|WKl+y4=1|{%SG0nV|_!livIMP%{7}t3jFj{2j6(HIGcIulS}KX zU-36L=e6#6yXFhXt2h|%?va{i?P}Ki3BXtmFvVcBG={LzO+vjPDIrnF{X~b;Ct0`I zdfDqY55YeVjEdsnt_78(J$3Yy2xC2C$Y{GC3d+4&jiR7A*sHma zlUbsOsk^S%w?F;MqXo6dJI_IVmELjF^gS#Wupw1U(mIN7;No}aznGkq&J);bm1bjK zi3ghUZaabtp5T`D+HvSj$gJmyXp1axC5bP~u(=PDaL`OW_4*pNRCp_}v&A8yv%jb_`mlfSfta+i@i!pZ%Bydk!4|k7wO&0srCFj+W3`r68DfCt+dR)Pw9^}lC#u1mZ#l>x+qeRZ z^dh1gTV%=NZIf2KNbab6Nw*b-HtNC&7V}mFFCKic`Uf>9Whv9g3E!72aF1wry&u*p zYvA%YJm5xq~>&jC16(1P+7%{<`3)uSxUc^%2oScRU3!A?u6dMdlxz&AbDW0lqNx$&i1-uC^;mA2{a zfvIoDn?8Y&Xv&|qmX?;?uue$WRaX-90M|!5h(*#d_AjsVsOB3hyawo7gR)I2Hnpga zMO>?MsF`GWywZg2L!8jyWA|2HIo+^XrKq|gIls5pky(RpoS#zF*J2)IAd$Z*sw6nt z7M)-3=Lm&7Qbw1Rm6=#nKrRrBC3547OtfBxJYH@NJ&(6$zDT=JEjCuD(gQqyDKM~E z=+h{(oj-TOnPcVb&kTols)+wl>HO^al`aHt&pX-k{9?K{3?Jt!mFs_VR%R1^rLl@X z;1C$QZ4nB6qS>p;Dj{W33zEGAuCkyKZ`js9uM~>J*yR~pz%N4ltJw9Z95ZPZrYw3j zI~e=(s#c7CbHDTlA&PHo5;qQt!7r{MZo5*(5d32hy7wqZ3$UoY7%=>-vxK9apymWyA!XGPWuBvdYH3;P zWv?>292G@hO2)kUp_|%EV=9Krkb1>@lzGY~rxG`5isfki?h(J~OxqPQrue2ZAbrSp zB^BT&bXoYYtI;0hDpY%I>?Z_aiqTBCQE&%Fm82UmQ*rXB#!B-Qo{*s-_KgoREIV>W ze2wl)7KlFqfi7^hOnKQdIcW(#P9CR>@tSuwcBS`JwAO-C*y9$okw1rgLm*f1;XL`k z8QHYJWD!DnDL7c~ zQ~NZ|G1{mTx>^sf2#t#39CM62_~TeJ(YO%~mk`cvY5JUa0s+y{jj}+>yE7*qJ`V>( zA*Ib?vV~HrI885ZNpHIl85PB`c9YfxY)u)Nq}eeX_jZl+HqQhfmFDx3#Cuayy~EEr z2H`jBVBHYNxa8~mL8xVZL{yXoz(iRLjNOHL7pr;|8oh8`_SLJCW$V-t|IUl}JFiHx z2}eaW+~+u!DUbon7)w}C3TltrAZBP5I)2r3G#%Il7-073_H-qrzMt>z!KS}=q{q2X zp1oQ>Hy=lF1}<{!d5m%cz&u!*-8Ke4Wj#|CX`8fhSMh6U?LB;0Ua8H)Hg%5Dh7uo6 zmzTSohYoAHc3tF+Dft)w+&h#wnw@RyTxrh$zg;C`L`WE9dbr(R$wyeaQ2rZ8;}+ls zE1oN{4or@YgNDl_Y9`C|r~TjmH2%Ewlbb9~#eaPNZ zoOh1z=jka~?94oEW3S8X1+A`tR#nR<~lxj>s@Qn(2DP-NPB@u_j?)5}m^Cn{%z<7rIe6BDlaWz#t#u;y`C4qh!gV9<5F=kcgRN+T`I8)9tc8n=p$O?mM4mVdyO5^G*M zC}|6tfIWhHa1M?eL&to*-fCVuOf1`wi(^&bCH9BVm}4tz>*_2Yac`+PsnEN#^2_6; z(Y^kEya^a-#on^udbslKz+N4{{7z-(-Gui@n9NyT)t!%99h(YnD6$}+(SF10V=|^p znKJe4nhFW*3X0uGN8*ejs{8AmGe=3=j`)Q*+01W^fyt1FEz)I}MIG32Le+~^i<|

taAEgYM@4PpI?spC z)%EqrX#ktGa;HcO(wNS{q?0Y2M_fSCcl!NIMoCP$nU`kL{o8U6}R ztqxT^7QjL4bXx7V;10Wk0h-JEJtOVXHqA+6jdP#Uae`%%1oA)aMZEv_$W^h#!;1yA zL`6-_00?Xn*w1ko?KytfPmbw3STlEn~nAO+sjyQZ5!z>lvY?{ zoH=bNdMG@*`F_z`w6t)PEZx@%2~A|w{_+U+dvjw)STk>WH>nm$y_5g*&HX@motS-o zXVzT75=YLxDCWGXOexhlD@K|9Ia0;^ru@Y_`BD0IDykIV8SWpAZRr=D4KP3X-a{j) z$-XR!4@Y{3W+qYL`~1v<|G4D{4vaS@X!N+Q%<3~r|8&-rp*>FJMZDTN7v&2hy)(nL zE2r-`4xNlIu$A|Xdf)6PANiAoXQZF^u?=fRQB9xK324o$~7%`0vyB@6*5&`0v5^>oWN7 g)A;}XG(I_xdB4E1K4I1#`m2=`)mBHZ(m(ot05lKa!vFvP literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/bloom.png b/docs/assets/images/mii/bloom.png new file mode 100644 index 0000000000000000000000000000000000000000..e78664c69cde4c92f1f696e9951eb3c1c8431992 GIT binary patch literal 271878 zcmeFai9gr*`aW)EPBrZ#X|vQ2vcx1?AvGxxk$qPo*^<2o%}k4ADO;haY}qSIC^WJ} zVX|h+AjzI&-@e!Vo;l}?na}Sp_#KaPW+LzR`}KOh?&p19_jO(ObFQi#QCPWb(=sL| zrj?3^vq+xf`$@RE{Ig`q9r!zKoPBxY&wz!x( zI9l4B-YG0Dyi;h)DJQ2hj=M!fZ2$8M!gdZ8A{Bx;)p(PoXAbE(GBK?_PXGVvfn2;L z(^pJPit_t3&))8DwK%I~-Xu9F_|wJTu5H!olD>yvn5p{Qtws| zsZz^+qFR90aFMQu&JN{`C%Ol#pyb~z zPti-~^|^K}$$iB?UvL0lN7dEUTTN#KQ1KI6K1A4YGBcZ&_$GW%{Kv0f(EQTZkd~`3 zQTOtH7-Qk;{4Zt~t_RcqpBJR|AG_m1xrVy>8f>;lr(1Kk@$)AP+GD+b*WnQq6nvbX z9_=#FbR#50tF=?@AHe&9%Auo2kG}8jR@k?1-`aKSN}4jQ7rg+&-z)yAyxa%7{bgC% z)x|3}$-lU~GTOy<$FbnbJAZ2cA}(kpX%`n>T5ekG!^+HjG|f2gflivSm}T`=2S-P4 zX=&-lj~{ah3MPf~EkaLVawRY@5LbECxv#z$A7?tyl=C$Cq{Ag_H>re+U8VP=dmZhqd(;(bu1k$ee!$q6u^#$aHXAv1J9!Hm<;p)f-Fn}1cB)R+GgQf99;*Z=pafQRhyYi0obg}S) zJF)q?em=$Q`0?X5nRVJXZr;>s<)#}jzaA38S`jXO&26-|xFyHYbg(7o#}4mrzp|h4 zNj7*Qyl3`H&DrUJC!Hxz-uaXT@s*Z`2)=5;$MWU64sl-LbUVd<{?o5-?(X|V>DJiI zWy_Xzw7Ry<9-qS%%PT4>hE0pvHi?blXHp&B+$XAJ$CEEAE(nqfi4W;{J#|SrLv7-N z_RpU`pLn(XiSI7Q4@EyOT7Dm~P$#>lPADlxW~qkS{Cww_&cZUia zh1bvdZ4&qC43OJ%dBw&xqkRpk)57s>mg{%m^i=}}G1;QBeEr!Ic! zRePinN;m8C+0y6EQ_-m&`TDP`^*ruGU82?w_p$y{jgQU6tm~6=OX;esj}3Pe4-Y(P zJ2*Wy5Y33fN|BeV5_f;y@z%`9p^Rs^Tre$0c5SK{;>kxa?V0G*m!6tz4<{Kh(2Ce2W<`RO*THLKP$4_8RG6>YS*&HSXz(_Mc` z<<4&Lcjrec1eRX8=6!awPN(xFcYrUyMx18Z4 zU&@oz!K0oBUDWEzP&@7>((<3qZohnXcC?}M^DB`6L}}jZ`BxjB_CG!>^X0ewJDfiL z_OiTODLWOT{)4a6I%obee-8I3%!9}=rx}{tN+HC@gr%#_wHhL<-SlYFTS1(9O zoOADgq#!f#`!B;qEUHEAS2i2Bo18sqVNu#>5yjy(`N||PHA384+joG&&zyGx4zQL2ziVd=#o_}*xO--$Rgg*4YUp*R{ znrpG&7}V1-mQdUvzC#w#Y>ie^`B%}SvR+(ni`V?@@NfH97e~4ehK@%!elLc4l{4k4^N2cHi4f2fGyGfY1}`T1=7rS;Wr z#{d0v{ORrqs?h**uQ)oa%XN5#=E;%HDm))mZG* zZ(ELgln3)udJ4@MI(~&?U4yZ^ZPPF9o{4(tCx@@d&3xYze$&`3p}KNmj077YQ_IVv z7-@}c#V74G*ZcP;ac}Wg(bk#~F+mO#TD*$=mQzL1RhCyRPZH;Jr1AX*l?VwH-6)xJ z4nPRnWkL(Ce{b)sPD$-=Uvw!W-BkfVKs?EIDBN07DvcGlckQ&4 zt?6F!w_j#rdK*kj6fS0|UhRhBl63OAL(|(C1KUh3R+R;{iSfz{7o;BCou3;oQ0$|f z$*Q96a&5hc|5X+~-elMQw>vgTo;JM1vM`?Mg)_NJ@hOZ<+LCR*v=5KZ5Al+}j87nf zFY+vxkB`rqdXzoAOEv$vLME)0!0)L~2Yos^Ixdd>vU-n{6{_SVmRMmqIk|(r8*VsN z+`Gc5FaF}6Fa3@g&#@&=C1Rbb?5Jj;8*K}(nPDxdqm3=d#*fU(0xymZwmvQOmmbvp zzb+h~U4HN3!ykm7eJZ31n*HJKFROe2N^+OQE441!eEz%DdnS%mi(A}r)@D`tCx!gn z4Hv*ZhZQRYE@Y`6`TV(~%4^P%^+<3o5`6aWKQ9lw+&u7fDF4k<*CB!1#N;phx&Tb1 zb}}u0%BlqkT(MyV$8NE#wyMM0iWq-KEei&uz3L@UU<~?oSI}P2WxI8|dX^ zvI@j*EMBzU@H=Far49X$UkUFx`{{Rq+XBunuB;ANZ*qP|Pk6yhm<`qo>QFl91Y$ zTAO&yI%inb323Jp>a~w7cY4N6__wTnR*QFXYSGfi! zqH0qga8h%N0ScLOz^5c=9ulKu9-)A&0sKENf39N#h> zQhYq*+~-yDOB6yyn?3;7y;ETCZ{rn-+_;SnAHh`bKZ2Fzh}8FU-ClF?KD(XqSbVhW+v$)HP?#!s^>!iD4gO3t*bagkFmiqfWGA%s_ z(9@ynHKnm)!w+|JOSyj9VOkPAb*?Sn|?4Nm8ne;I0%12$M6tq?%lmB+C8{()AK zufACK+A>LqS80d!Xf5k@8t$U{X(=S>h;j>D% zop=0#T)m8MK_oI+<&@ix`mEC6ow2LT(TUHT7x z(?*csV@@{`#k8L>uz7TV+Eao=CD8$UHX(PY zWM?7&+JE|W2{q6izb@JAi+*RTp#e+Uk~+J>i%WN$`ThA>{54$=Hq(58j){jjcucS1 zbF)*U2)D5Zm#+dvmve1dr8JC2s%ZO@mOv+!p?7(maS^9|^fRrr-e@N1<+(Xi8XX=k z-TX)PA~Z$|t0lE@!9IA%uZx!y>f_xzV}g$OApI1fxZYCV&`7N4YPp#+O2^Ia>~>b# z$XAT2puX%%l4Zj$ zk9&T*_u^m!0qn=-ueeaHQRmeMWxeKRq}ngCbd>Q$`6AkM%#^Fm`rkf^^?Zl8eOXbF z(qTzAigr)p)0Z06(mEZJJ5DMAoU$D#(=W3(2O`6=%}*E1vkm1>pRRVkY2>2EZePT# zwR0gy{3%I|<;Ba3FFZsvQWeGrTZ8H+V+n{V_gPegvj*|1_~;m)<1uz0$OQSLm^;^( zR^abgxr>iBIGT;bmk%_5qZ)^&6I+kQU8k7PNr=i`_L-vPBG0}90d;uln(rOgce@7j zrw1vm7nVt+qI<5~)qVS-Ty5I;&4L*oPqfE6Z!{My{`0hbDl52IR*mN==hDVJlzZ8y z5A7h@obL(WMtO|s8J_D3*R|{B1OJC8E-P@ zXS(I0A3l7zERO32BVSIQembxY2_p56hse8S6<5b-gGqqgY>z5YA!k&GEt>e${^r5a zXo1@{oxlNv7uaSmbzJ{`|A~*k-P00dU9ec6tkr_(Ihwy4y(Ma{RGk@Pf~wPjbmUJP zwkH<}Y_~C1OCa25WeB1z+rCpNWptqViBCN4B@tEKbCl721C3M{O^ppSmwvNa<~#6D zcwp*|#jH9-HL7xVoGP$sHq&Y)ouby~zhA#M+>N_(W9gM7y@p@Pospr6PDa>4R!yL+E~HCB}tt7{9X!0gSfe+4a5rouQDT9KA_KSb#5re#>V^X zC}n54Y3QKHsnF?zxcHC>en5NtwBwWR;-KjYNop5 zO&OLu%wApx0#MD41Dx4$qR9IcHZALs(D0{&xq`G?Vysume0rWeH`T{2ZLn(Vf7cp+ zy1s`lsy0!}I_S?$lEbN)QX$bSgd`RG#jPrACtub(w?3s^^QdyB)iH|0Xvs@Jtac`) z9yr2ze}8tTKf(LAXgRn6d8@mXB!=i3AX^zT=-G;h@$^2h z0jE>cUAtDEnDX`O z6+r0{@F8Xvto1p1AH0@@C@CUXvRWKo2}HST=m4)%@*D0fEd>NHMR_LBP3R9el4y0) zq?9Zlkhuy~k+P<^$kg8=H8;{e?X681e&;?!(3yGUL&Y_;F!VA!DZxz=HXEq$QPIXz z(oA)%jj>1(6VyrdCH|H!o!DeTgzJQKzav^t+`3*K2(>oNL=GuwGriggO(Z?pkMHu_ zosiqg$8y3yR|c z`A6u=ghx3_KE#<)mySND%bAAECf{}n=%rW6p!+H~=mub1_NYC%#|Z{5FFD}0yZ z$=hcNmp(f6=Ke)%bk%F9jM^aa(w?KePkd0dIGAmd7W-0;8$m%D2Q)$|rcA`PDkU!L z&Xt~k8@~loiE0Aa=|uC(SZ?BvByxTbZ9mhNRM|(2l2oVf&cLyW%p72x4#0YaLXf{T zHE#Ln8$`ekty8Ni>-zPjMDQ|*KmZzlkg=j1CSXtAI#$Utki#C2soeVIQhmFR)`5Ss&e5TQc~2tTcARd>CZG})0Hoti9M>wjnNLdd(zD?Di-J`5Y=l5@ zo?t&~Z(Xup`djA)qs|N8tn$wHm_GQ+;+27^1)mC+cZOL+Cj7Qb9v2{uYoeAOT;HsJ!#h{4+J1@5VmseaZ@nh3D^&x7CK>X#sV;|pe zb9zp+o1C8mC@G$wZJRINStS!xoi~cEe&-mVxM+n|P-G^f3?Ub7Qr$CYrhMy%d&=(3 zsvzx24cBU@7E|rHCFkXN$xYnm?GFsF z*;e31s2n&%yL~dzu^>gjytR*u3)i(C_iO?nnxNd>ghyeDR6{iMP3!u{`p?eVx3~}G z4HJAq=2zd73dL!7x@~^X5t-qlQ%xB?RaOL^h&qqubVURZ$}+kaRcOQ zeCeqq)iN&H<)-F=-4K`&*`F_zgz6=cn)4xiV#sSgA}ccXTu3@CNH@Zz%3FaQYUFi{?chCqRVP&rg{QBFVq{5j;u z?ZXqD0el{=!Nq7DIzx1fuM$K=+GAm~A9xVk^So3_0yb|3khDBMQXy%+$EBw_)*e~1 z1e`B3GNU~^A|&L8;|@Tx{#5QL*e_kW6yo3?{v|o&1HCKq^De5O?Z(JiKFBS_KtrTC z*_ZRj6SNAx%x_&OZ4KODkH=j4sr^NeXTkPs$aV~loDdc1Jj6>8mM*Uo)EIC6{I8z4 z8uSgUUbB5Buh#xBtwBKH85YR?%NvDK)H3aapUhEIbcswM^7(`5O<|)=uJ6uS$c(); zwa0@f`Th#~=l!-=+@(W}k1y}|QR|T1zxd4_f=c8)vb!f{0%8D5Y64ognLd+wpFdc< z!xe}GHhNIi;?!n>W7r9$HR+VVg0T3#;Ux93*5W$=DE|nHyiH=Ku4m+9Ei!I{S9y2! z#VYA!^%w2yM2y*ytlVaX1H6*eeyhApG=dZ`O($9>WrHet%Kxb0!kP7HCIuN$b|@D1 zfZat{8I#mA*-=w7BUPPf)BQ>}d&#KO%vQg=!A%`?%9&$zsufXka+PQ;Cu*YR)}lly z8?<466z^NYv6fb+snACv1D7-bu3R8?JkADi)gH3PO+og-cqo6H)*GJLw}MR~S!w^} zd?1(&VyDzvJjPo?)~(W^&A%NA&*Igb?8vbyhoxE=zeBQ#U`sr2UOEY@U~Y6s&S3O zp+Z-&@g-9W=BB8Fskx|@NvQJ^(swapXf1eP05A~O6rKkGFA~o@u6;Jqam{|Xcs-v- zBS|BK#gtTZpo9PA>rf2uWx(BO<2b_r-~hf9jY)}hMZ{e#i!ud+OS}fjT9@2;L_xtZ zo=awfaZ>OcG=KYn%!U$H9mAsuKXDUp31~9kF#?cSinMW~-jON_P*Ep(F(rel!otFA zkdYlG^HVxaZU^P5Th!7qVuWzbI{oZ>MD}=&KW!sDU$CVwZ~Wag4OLljl=|SyWryOi zWRfHuW$Y=XTszag+Ur5!^=!f!p3}{3C7@jeW4%<|>m?xn$B-Ocy~##rqZP6)RB?M$ z;o{r79IM5Q+#2l;OG4w50?4SDYVCWmTF!T8r4)C{ufepDzu$j3X7uSH`6AE)n<>)u z-G9_#{;rhQ4m=NI*8+ZGV?cAI2g~4jT95I&k?cu~Z-QKQ+7kqK5#nS$wBoeFvcD`i z{+O@xXR{iVst~ouBQW*eXImkunEPH;GX!qYQ3lojaHG1?9?Tza-jHN)P!J0V@Id{c zJ#7};{WW~vtakO zvz>kN6J4D^PRx9FoYbplT}%nQ5q1Dlia)PR z$OuX@@r?)j>QkbDvv1tl&bhUf+Ocf>G%z&*3WRp<)gPE zfPxV1l;!J$9%K+=wB1i^)Cx`+Da7T|;D9oCKClxHG-ugHLp2~Rmvq2p<=6zpp=1(K z^(^kNVkan0-oibdLL~)ND@?eTSSqk0A?}wMTMN={=W4Hs{nbOpjg5_j%|+WZ2nQ1_ z*Rx?KiC*22?x(5Q?lMJM`t*Xc6F{l zVn%ThH=m9ZLxl828qVzfdR*y8^aB0p7#^X~C`IeJj(i`K56JgHV3o9K?CGFL)3N?W z>b-PP&#Ij%KBz@@`|^>Q1RKa{{T8G2LWshqGZVwZuXk2$UHRf106;WaNc-12O1H{d z=#hu+9fCvM8n{@cF9(P`MSZ^p_+3+Inb)}erMU2x!|BiCiPWdQ3%masWm-I|kT2}i z%Q_9V>;jjje@|~SxUjP*K$5wqn{A)?{q)U}gTQjsWK8M(UR51?xRdZE6+s}-=_J(~&H{;Y%<)}Tf z6snuOX6%8vjz{0~XR~^9{{R4VyG0ZwWf(=2j7;i(cs`28Q2&EzVl|?-1DG`cZ(&jk zUh4`2`GIs)Wq{R85L-nk)*A_|QXy5WVqmIO(*56+a=04mTbK-icJ+l;fE-5@8W8tJ zM6Mk*eUBPXMjSw&ME68xgX}oZV5J$x#q$i9)I?@RO#ygotXkHL*07VLrB;{b?Rc9n z*QWFEBI05$l_~jP*i%TC2(&F;c2ayQXhjOBNkxJRmJpYOTED?De)cPg# zQF$a1i6WHDG#lhR&%=1|@b%7F&A5RlVaEVkhD+t<9@P#R%es&G5?V&L7mdDGZuy`f z8Xo|jokVe-1ta6=!%v!Y}+!DFnEj{rF}1B_NGX}j?=Dc&Qu zH^6jbh(z>Zz1)*lgc6k;ol{31Sn2V^dKmnq@*F{a;?wWH?m$)s!3)WTOd}-kP^%QD z9JVGt(#@aRC~{k{sqQ*ajt0Ye;L0kFa@MzjtB6%5 ze8zk<{omXXM9$mxB^wnR=9{Sb0eb@p@Tq3H{@+ds$$Gvx zj|5k#fs7iuW-5e!mDy*9JE&hzXx~STD8jk~wd%&F36!QHDsafA%01_q+pfegpb}AU zAV8Ijf^Obi501eH1g>aoWCA&34tsFY5&&eK6hp(KJbMJF?v|AOiG)nDtay6PRqcUt zFwhZhovO&6VGnpV3~*EIC;3j->e~u84rv!(qQL>WUWrY+(GFO#No?D)<;z)8Em5!y z&kq;dW0NarYu6ui_XO8xU&eFO#>qq)>{RD%$4dU1XQUdVxXHCYrA%vIr=?^2CFgJq z!Vs)II|`};=^rkAPuVlXs|x$F{2Gfw4RtvTUd^tuZdG( zm^pq~wR>CaoL%0iHZYUWhpMQkGb-RJ=-s-O9ii6B>*dL}YL?TM+S=L#p6x-_@vP{Y zN`U#T1aywl%8?|?swP$1!u}}Xgv$a`n^F2Zv6K8bTw5N!?!JW`P1t4_U4=9r!CIqS zr>pbh^K&L)ZnYV%_25gRQHqwn*cySpoDg!SWWl70()f$Zpf);SM-bCcciPUlq`?Sed1H-S97jc_-Q+}>#1fI4Jim`>8M$=v6gL{})y@?n`6+F+C* z^25VybTAs5-M*oRF*E2|aQJN1L3wI*Q}f1}s7=-A-G%7Oejj+a17KsOv70Iu=!g)v z*JxrkdI7$uZ+^DStgvc6_6CQh^JB^Ue_2J+n#|7)&GW9jl7s?5NPgqSRpUMJQ6-k~ zcS6P3Q<1Z&G1|0#)j~$WW|NiMhOjf);m)!T&a*9lVu|2IZoJnrEBUP93A&#N02LOJ zvde#!1UV`w&MwT=fi_HeQ${-VtpxJXQY%eTP$$jwsCxOJC`l8a+#3!+Pzuy{!=XxZ z>#~-&woh8i*;hT;!M50ZfHYf&_b-DB+qv@FG(h5KpY4BCs+h&D1VcrRT(ay!?(rA8@ut(sWy9xGij9k3UYtlo+*iX-TMp)rSr(h5n44)%CWIkBS2pvmbf>Q`D>fE zNo1TT_Ms+HKvY|4YJ6xj#c*uY!q|DROJbB*C%N*Aefzo$K**2-33@yVf7dF{Mm3m6 zIw=K&Za0XFZ6jv{w}QX2E{T)CMuXn{{rwJ6v;pn0yNa*!Me164mt9=S(m{Y8RWGHb zTQ?2nE+SkQG&<{Glrm{PSJ1ksaEn8$G+`|YIwI-QEvkf_D5_jfl?Xx zFrPXLpeUl9%?IzGH6YUhxZ$m>Y+HhsdYc9X_usVXOF@(ap=m9u2{sZK5kWd7)uJa| zvv}8na(W@0n~%>TXcZ|`(aPDJntq3cbG8y92W`Lm(rdMAIFa%Oc>!rN_D&IWjs_J{k>!ax|xNnIF)=r z9=&QQ+&$q=6(CDO@zW>)>$6t$teUDQJP_Asax9eBPun_Re_unyRk8LGw@;!l$?dWA zxDb=n3)yC06;t%ps7QtgvY*aF_-DhB&_NgvnIiP*YR#KI1<=6%`gOs?uWjaoWP}Z6 z>CNVaqjx`}R}-fGo=gCNsn#$)5gWL@Tc#^m;{>%1hts^hNq&0gIX$3mooQ9)!?@XB zx!88&SMFm4V(W>oq7LiRt`gsk8ok}{iu$8%r1ht4FJ|}dS)m6J#2?X+6i$IDVY$p5 z@&O6s$c8xTtVNqN@nZ9wTKgU~4u%2n zuPe(BKTsp@Mzmdn3SnnZ{&rea`f426#WRT7Bf%E>jdze_@I}m7*zOg;)Ef?{t8DA2yX=h*} zIHDT`IydZRGZ4G;Ta&xqzqb)Pl_*4e=q-oF+R*p64erIv+wsXB67~}kpw0dnSG-_jp3)QWjp#2jk#eNQe>1)DHW~p4c$`ADJ5ZkkxHqSzOvjhN=Wr_(N$= zE$z#5ANMC;%(;9sqJnsmySlqe0sn5QN1E?{HlcHgYt)JeGY8&;dSzBrbGI z96z534uBOcj^5)g5fCF7}Mj z-940qSo~v0;Qh*LMH;JCLom9A*^}S|6H$-|JB~fSkTLp8qWaX}-*&Q^4-_Weq*(SOFqR=E)?0fs@{n%ozgPN*}Z z1qm3)Y8KuDk;0@_Vyxdr`TlnaSdn4?O|^UP%3`F&creyNt4u9(!YC+RzSuoTN&%o} zc#=269?^A6IA?BlnnW*_!DdGNB0x8gjxIH1_cYpvbu4^GFx=p#c~-l82xvmZy$Y*D zUI#U+0Vw2txTuUpoE#1>zbEY?)R<<3GYqhtF_rvG0o9amVVjv~_xggy851 zjfMEN)L`3Rwm-h9{Rc7aR%Z|4M zjpdU)=#e5+n&^%P(f)~pJIV*ZgryEL|8-|GF_r&GRvzUw44{_E@{S3qC9$LLPF&m0 zaN3|7h)JlY zs{x{MNHl@;6X8&$SgcvH6G2Wcm+hm{KSJs_kFJb*_xLVDu;z~XQ^`6(QJMIG$Z=cL zSARVARKt;20~Jq;0rG63J5;v@$L~xu7t+#`jMw^{W6jlQO^+OX>f+WJ8`vsX_9L&P z7@fq1@k1W z+j1&E{wFzRX6j!Le)FN+8tSL2?qMtwaFgKi0Z=-ARFO|(u!PiwS2CMfYmm(eK{ri* z`0#;u)N$N0F)aTs>keH(l*2y zhB*SqB7rUr=^DTiRqjE0`Nd4~Y$5~=;&RmMP^Of6mWLep`!tu@X@=ni`b3W#$zMIY z1xz9&Y^f3Q1!Uk`8N?re?nWX7D-x#~75T6pMIa+;C_eB~Yl!594@8BbK$x5-I~o}U z%jdVYcbkc}Tu(fe{+~dJXztJOIk*Wu@6sxo)5Ef+I< zcx)d{boGQInHR%pu^HLQrP>*415uwuZpq6tHUzdKc{XVFh5Q>7hs|;qVg#lmRkt=q zYOU*er^;Ljg&F$<3ewZ zu05LG8r-hZWaM!ypm{%QS*i&tRk=stz%GJoHOl5U2G6KGu=)GpAs0lGddNy6I^;%G zPO(A56A+Qw+f$%usN-k?N8W6cuEOyBz7l>)b4Y?7m(j+?Wwv1|t$eM#dI;PlMjyzv z!9w1XgiI8CEvJ|m7%??1^MZ`VN!O+I)7bc{WWty+h*2evRZ_oSARyfd_lC-33wbx- z%PB@q=U`^02FJ*yk%z&lf@vnhL6>oGs_4+9S)N8tw+6B98DAz706c7kPCbTHX3mkI zZnJDC8BKdJ&rL%HouV!y>RyDSH89w{61m8T*O z?@5=)o;0Lsva)iR@@R(b6Do5{Ly#~QAk48Fh8Fblg}=vCr)qIgV@m@oNJLo<1I~wU z3X|`*1U8Y*ChIgm+SIFLzx*ck`6V~a$C(c*j9ywD>rfScU-Uy-LRC{qZ_g^e?P)4q zcf?^qM-KNRX9b>w#s|j~Ol9#Z-};V)SJ|7?U3!$?EM7shAQ_9xTkQ6aHHULI&T1I7 z9p1uX4tL(due}wI5@=^lqmHeX;6QddVqNh0Up1N`=&VFH?_@~dZMa6VqOfw%jybHj zt}?h>Tsdorj?2r#j~icY`uc3iMvJ2Tt7X>`QGTG*hfESk8SLl-{18w=ROgnB&t#Up zci+SM=Vl1qrh7WC{GFUeCVyT!xgySzxtx4IpK?HPyvhdFW>_l79C@-T(x^QWw?bnP z$5RjhAujKnmaN-v&hp(t%77`x2AWHo8PTt^+gMVUY{g`gW-E7$3U-Sx9D=p_2$ICJ zn*?4FXG0SjM9aQGkNvVTRrh@%;99`t7qi{t;P{2?xpi;G6}R?B?a>$_nG z7lk%KceMi2BFL9X%T7)T);Z5p1r_JZ2E+Uo$!XK%nP7^f=H)L6g@2oDiAY8_Ya2*= z(=&K!!#5JVu#Q>gA+AtSN96^OdNP#zaWUiQYBNG$saWLSn2dFq(whc{K0I zm~?xcSMxm#V8?Au$4$5}KS{%MMPDsp{|=3gEzOORl5!S`zA5s3S1;nXd;&81CbCB1 z@gy6{dfJLkMy3gikLm$Z9M(61jzfc>hK{i{abGH@62MUj0u!g{0@gF$5PjcS1ooQz z8i_bO62`0vO(~FRNAOEqk!M6>;J}rTobIj7c3W7M{{5UI^)cudHV*%}clD#dNl-fB zTOb?Pff`Ln2;?xtZXt)ElAM?GC3D;UO{pdt9QU-~Yj^~o(#28yLu-Paj6ak`iQ}V* z>YkeTQrKxue12cH!@NA0ye(l?(UUiMAOuxF3k&&HTTTW}Z zS%k!4;+gcQBH~?`nK2paL#T+jX!7m>q1;SpBPlU4DHTHj*<9O48trzfVuFIDUcj+R zaV(w9jO#1=ziqPe50!T!GK^~q{~eSuR=}!QXQMEsql>0DA9HvkOq2bZgr(W7|JzSeiwc{kGmIl>CdDR02sV=V38cTqzL&I zx|uKD6kCH%>;zcYKOD|HJI^D3htx|q?g^rQ7emry&X8wKE;^Fiqe(x#Sigl(1{dE*(h z90_Yg>U`;Mbc`+q31}N4XZY4GX+Pi=D#S=mMX>Bw>O45HYhqtnZbvWYPbOcD|JB9* zJTBqS+XHqbq0>_C?Qamlz!bo~hhe#yKL`(Sz&>!T-%~nCDnaa*?a;R-N4oiv`JQ_A zkzMe3O77QYS?U?4AAm-=(C0X1kz8C7WLjh~kao%?b=B(4Cg5agiI1>{e=6`S|SAkF2YaO^A0B&MqHySnld z1u2`#BAdxo3dgEo*RznE`Tgb(j9M|{5XLR|R6BU>`r_W4Ak&^(2Hb)FI@gu)aYMzb z{H;Erudfx07_;$Qx>|6o;dIOHa83uk;I*l~#Z^Wd`?PtE{7{wJEET>oTj;SvZ*gFE zn(uvc6TV1EK9TU*4;>F$GmL~h+k3Ye-geCTc4e(Go7V}*!!}63HT$?l$;dcjDtl=04BK18#XO+g-d*7UW1R zTmRa7Q9$HI9&4Dj4W2e?zxg7$UZE^8c6ea_FQ%chUR|O%j*>^G4eM9Hv#Rff2u2r6L6*|`* zerVmQn+*v%Vy*MfbVnyACI-t!BvbnS>Jxv?8yiPv!})kM?~>bFSd z7*OW{^dRsFC;4D*65cvfj12#ga?BlH-;tJtDJpD?DN?@O6^)|p6C$_QQ4_PZ6DsH|+XWxMaORzy%s|)(7FHIt#S5!)YQr*qJWSzOWIZrZAbAfd(rV)w& zC$RE>WQQj5Xefw0fpMefdo}F_XOBxhhOf)f-b5c1ze6BK42*6e0%RsYa zyR@lkn}?vm?r{T8P3q?`;Jjw;+2^SF8NPw9>}gP_x@mRdk7RsjG7F70g@+phG^K59 zmzK+^^84D1uv}3n3;#K|@TGpFl+5E^G4DmIS03T_6^bvGc=Y>?8V$_KJXJ~4ofwjf zUAwB`Sc9qhsw>uy+Zr)pBQE{L2P%+0yQ4w!)XXUKGpqX1iP2g0zv7o;8ruJ!j(`zb zxrt4pHeNLfhzJcix}3CUpOsiuH6{M*#GHdj`X3qv_N#bVXvLXLHwN+?PCR}6U)7mypB>A-{vGovcfNF| zJ*JtwYS~SqA!r4D@2;Qyy#8um-81R|`pnBmPgM`yE^_{BX|oCr=LDOZ`oVDZp&0UJ z12tY`pcW;iHS;Pa&TJ@}2~p!<7|4j|6pG)86_2sUEDs&ja&eR}TrD4_8!dmR0vCM( z9jZBpNCHhGA5XzAKY~RVq0$A$J?Ua5h_B%3+9Vy3ao^zHyE%NQkgwa{WW z*Qk4bZtZ)E6sS=o2V5M5l}LEqcBPpobzrv((gief$I{CKdq(Osudq8DDr_Q2>YaN~ zw^VR3`9{gAXCU_gLj_D1f@>MHVcV;iKFZ1+;(xvI+6@s7|CR?X`0X0Z?p#w(1J2Mk z5(_QWpMaQw+d>laFYuW8#9t-IC4+{>vWHHTA@QWp4dwL&hNcvA+Q6qpKFk98&4w-I zm6(Yrh}Wx6-nj`LpCxfT%pw`NA8n6iAsp(EiK*_u=_U-4n>7jsK zj%&I(Vypm%NlwzvUah$VpzZm=x>|bpVXm^FzWMyayZ^cbRu`c}*gpO*WTl7y7C4xt zUIZ?u37DRf&fKphr%mkQN;)bBe4oZjmIE^&;e8KWTQkpbA`uM*dZX8ebT>mu!x;pz zEp~wA0R+^J&!V3A;hd6S%aZDU3&Szij5)3enV!J@{L>(D$~|Zd`(h>_?oG?M@RMla z**rGKodBgyheT{mrr1MD(Prs59 z&W4)oM*d`}ug8oV22DHx*5X=b8(>Z&Yq5|~Zbpmj-Q5J^0~5>sWD}CZ42Dgn{<=hJ zd9YsPDl$x*)K4DLg0;dBr#<{}?!o+I)cjQ#UGA%QQy(D%K+d0r`3&WRD6j{EG@9k) zr^SoCPgV@f#dH)nw%kJPCBwed@M9X$`0 zuxn5L-Md>V!3HyC?@z*d(=})kV%3qJWsLi|4RTDe!O*^#br|mP&;&aH+WQ=nm#sRa ztt)TuvWEbvrT<(T<1y8r^8E%Z#4QWDZo-JVo|x9csVkd3H<137a4(mQ4&9)SEgUjb zW1+)f*CaBy8msEjzpm@uxU+D#{xN_-y2w}Nbr=r)L+vU+ypa>u6t)UWxpB+_b&@p< zU_yl4=E45ND%p3I;HVW)e7j2p(`lp8-8slcBcK?zR?9;~d<&@2NrE|^V(M#09(5D| zuc?}-6L53H)<@t(3Nagt{RCIgu7&6uV19_ueAlhVAE>jv4fUKsFiyTD>Ym2&P#c#v zd_g2r&*|f<8*9~+KBfU``6?#pemL%dHzsFvDRS7+)HybNaIG{+F^nU2^lj;BADkwnhpN6FlPmpId+f03&M927qCT$p$#pP;>5L<}gP`#byLs_EMqinY5=3W}*u&jY0{g-c?bWd9WJwH# zZjd^2tor(GlpCIvO^s@3Z~_moR_KjQ@zyc zzG{hvncZ`D4L}q-O+b^L$goT-T9hhDu5hHmpgpd{#-KpO)u!?GKM6h!BD?9-ZyiWx z%&-#q{QAq1Qe>MGzy7?aKsOzZy0epAHyJ)%GJ+LM?BYi!Mm`xQ|5YKWf1s>L6=|9u za0|1S{p*8%8(f?5Z@KLaN_tH9B{Pznkk(De3}<#pBJR>pZo*sNhP|wL%M(X(k*K;^7!|N)n5$~};FrL`s zV5Jfiwav&ZXZw*z<=hV*#@HNrazRs{of$4Q#fDO>!rT`N9UAcd!-r7!N*uURa%sKs z1|1`3twM3?hiF)qF;YSk>fqJMUp?2~1O`F~s48y*sNQFv%LR{W22a?@aFZ988J_Iz z<1Nh0pu`!IGw{nYlTVM~Lq%UT^J$(Mc!rtL9!)Ilm?lMNh)lnHe53e_4;L4m1V@(3 zux@D@vmm=n;S>xyH2%yy?2)aH$HfI}c>5Hcc;+uRHz8VeRI3u>XA=ztbF83;X4})j znij+Q$T-n~T<4FlPm9(W*bJw4!fCRBL`SmyQNBL?MTW*GY#@L3a28b80X)%!JaTq1 znuaK`f7#B;wnF%o`gr%36aOyFn8X=LcSeh~14HB!nAPdT?(`u^d4l7+qv`MT*l1Re z1`g@eE28e9pAA~L>w|*5kIhmbbsC}*ov?NbZcvC2yu>4K<5!#+pCA{H+}v=0d8A0q zxPg*uy`DueEY0kMoU5JL@&jN{@+6wjWF}UO zW)gKt3hU19O-v&1NYbZd+~4`vk+Ra{u8rvyM;7e}u zUdv1dQUD9ZhTyIOd*`<{=@kqHPY#nC1>Zg(KXfOJUU(0+&C@YKpxJ1#g`nGaIg&q$ zMmA!d2NS7>1E=%-99l3M0?2&h(HmcN0=R$#2ux50SWc7FHJ`ebZlFi8K z35(}d%!**sHY95GU73U_*Yj;v`RBIJL?wcY&^eW_++xR1XGL{@P4dI+M-f;|_1(N7 z<8Rz3Z3P|K%}*2M$lbkR4SCvaiIwF%eX*2Kv$5sFA0$n*^MpAlJ*TS^R-^z_2nI4XVTzGs+v*zWDz)1M;>f{b(4D@okB%JkTh=+`$aWg!nTQ4w*%9J57eSoaqMPj zFfh}r)R=`0VUEaK4pad~>_f65A%pr-HP^}=*9;AxOGbt|O;b2%g=MSXN#7BjyrWfz zPHPkV6biJn>tvM@&@Tt-8XFtqV4itz96KO}1=G1_Owt{l@JK$w&0~`o4~*%)tc-(l zm?px&4?ew0Hjie~I0nLprOr3O>(>!8heXD9a?H(Fs-p?19zL(a6)=;aV_l6dpGni(JbXX+-g| z>}y2*rwJL_OetzVsKD^Z7|kQUlx@20HdK|%t2j21U9}&JVcF9N3jqmMz%Cttp|ur;MJMnl*1mm!~J=o;-&nsu^O z;W6XNpFi!22}}7at2hMASxGj`LhfVyU+}L2)=lyhW|!nZO0T ztN1=J`&?WMeHt+B7A*efL0>BVq6H?Cop3c&?-C@Nz|>ZZqtaPPJ7CYlG|P_+&Da8@ zG~+ZLV%q3rodu`ykpG>k#l_>_pbq#!2NZ)c-+^v{e03LhLJnPcfDq)ni`MBF>w!`i z02-heL#54JVP+yCq$dxDG()pXDSCL(#+3|~8B05(+Qu3jA3_4Fe)GESV@;sT>`Mr9bL+C~XzP_n0noj#vD;?HpTB%z7Q z3dUOEOe)4voix!w;&NRy@6eO2Pv9{s2CQR;4gCmXgc|#;m}bmy`~j6+h;^jyF%Fr) z;%0&JehWBhibqTbE33QnKD9uTXskECcH5I=Q0GOKywB%LEAJ@i=tWL<=Cg?IUGv@b zeP-&n?>=B%^i}s)i(<=atJ>7)@o6T)=@S2 zM*_byz1%9g|J$wKmkq7ur`;3gv4o)N7e z@pj*Q>kET*YIcXu_B|pvp_YWb<6(*dw;js)sp+$_ASDueNJ|?jq!Zp#m|O zIZdHNzJb+&?A`t7{~zJi#blf$4xC&ai0| zCD0Bb+K)~dBI<(XFD6DLFH(todHYXtS8OPviQcbl3-_FX}RB)nPmR0jxVKonLzN=x~?Z z61pPt&%hDFI9dfB(rdW0oTKS+eNlSibN~a*xRLFO22E)s2?)+>a$3*Q(vp^g-T5--3Uhi0KSIbJ4>HW2{6b49Ms`|tK30@+YCZdy-wEAN!+4wU($AmjZF{W$*_cKRDViLOk}Vz z-DIESj{7Nm*shdmuewT(uktR#53&I()8}uzUw9I#m}+cn>@d#R)0VbQTy&U;Nu?Mq z&k2;vx=Ny!aLrclX>Vq@JH3U7S^2`FA}@Tff&O3Txd(PxvcGZTRHnBsLg1{( ztiX30#LBOJw-#F+*~iZG@X`lkJd45QVW0nYA-kZs`+_!j)YOP!pvIRtBP@%9m6eqS z+2k?ykU0W!OsfCki%pViYt@eAl7h-(@l^QD;H@aJW>L|uGbS&CyZ@?NF)>xNQmEqO zy)+jWoNOfKu!-)BbZz2{q&CA!i<9UjPjE4A@cGH6l?4Kc63M4S|Hw!DRSS=z%UVQx z8_tAy>BUTYOF+ma(OJKA@(i8sI-ZXKuM!Y38)@#Sa3{e`xT))>g->m3?7>0XW}%6n z15fJxpZAY=M?R_yxY zokLVjn)}ON2EXw5UL$e=8pJz4Z%9GkHaAf!S3)NYe8f&C`sEEuFby^k6hM8HkT%iM z!r@BE-sCDGf`QT>2)xsykzjTBiAcLVwPOpab%ufd3m}1z;^5_ViIs##r5*+>kbsFn=Jn< z2#QtFqZfv@$}W5iI9TuwQWm=Umga{vSA7*lPLd|6*2eve`Sd}YHT=Okk)EOG(79yy z&i0t@zixN>vw8|_7Q;9~s|1A0oxyEt-1NaQ;SLnxuiZI|cb4$VF8|xBYzzS6T8=%1 zqTG4^ny?S@t`HsKviRu|_3eDE)FIZ@zQQvwe>d{7^`2_=!hM40kk!>KH_x#8(~K~~ z%|w$5_|Fx%4fMmBNBEqODQ-&fsQM`xx+p90E259hf?v$K+n|fsUCwbTPM7*RFe@%rons+V5( zJoNt&_T_Oo=I{5S=vjAwEGN0mMqEA-Y6o4 zN}*IUsB9Ib=xG}zsZ^3SZNGEwgk)a7=Z|?A>Ur+xzTfZbeO>1|=Ul?-yK{H4dq#)d z>gY*hmiKL}N%pSo9!~I+J1RDYD#2__PY33VxyQIpKl_eqM*OO27OSJjcCBaO=OP*u zekvdF>9xiR(e&L4ZBJ0J4M^R#$M5Ky9@A(nDtV#$*(^wOM0&L=w@ZYkqe464Ms7tat4<%Q9a6o)CJi(%nCay}W7- zMtpC-Z3QKDU#DAb6QXLqpTs9?2|syu?KUBxZ!Eo4>1!aIGN1hx{g#7-XmQ3lH+AnG z%}Y2L^H2CrFLrp=_~L|Jcbnk%H%%_<|K5F@^#SVfKMs~I`1~{YD@X1jzlwx+gR=aT zlSk~NKfTGF58;m7Q~sEDqq2^6;^gwRpZ?U792vHk%!`HiQ|qu|Y{4hu5z)JcUhgwc zIFdg(3Gs$IWGCP~VjtZXu6$hoo{!C-dfH~moj@Yz4l=rTY|LSi76YRL+*|1{|T70U!-$(NeZeC~s% zl{;9hju}Z7My>dSGU0?CjcMv6>B3R?|T!?;?ykGSUQ)#6kLYNYdu+tgv zW@zS5|N77`o$kE(`?n|{C~M5>5u9dB;i}k_(^_|7)4RL*nGd|RB~wx)y8YAeO&w#( zub&el#BOw+16ME=LKB_F-%##6$zV7CZ_n z{Y9b&PolE$z9x!3YlIEF@ot^T%+|1wSv?>4iK9HDvmOi(zUq&`0qPDKb1edA$aed5 z!=`(N?fp1?VObPPzDYc?Sk0l8i-d@z0Q#m7F$Ip{Q3CqUi=eCL>hM9$R!l`V+t^FpcJ-+!$V}h4%?NA!HGo%ErhIF}_NjGi~qsH1>mK4OQyOE{75F**B}prY1>dN)4Bc&4Rz0H6?h z$g|iZ5t0xTK_`&K$F^0|O`2`=qPA-LdJmkUYJC{{ z{vi_|4B3D`V!AtfC&1YyX!W;3M`wI%JDK8~(L*4`wo_GU(7`9{}b(4?A7^j z^%_%drd{~_7oSCk&c|zPHR<~WY$fo`%L04pjygWNklnv4h<=;S6w!ZXOlJ&qFMDVf zAEMelJu4Udph>BCbDxw#K!au>bMX#&+^1?Da@T}kzUx)?uw7qwT$}gD>q4|3kR4a5 z5#QVg#+I`H6_F^`fG7R?1bJ!3m@A{oGrGBZsX6+iL7-8F{b3bC<5l6)KHe0XSLMqy zI70aSOU+Rd@X;;2QI5ZG$0)vQdEJb4ZM$VCa^arfbFws8!y8+mIYjtAyJ)loa}U#f@}@MHnoCN zaDXbdwT|Vd&xVDyXY4wD;)I~>VC$B)d#txflBZVyCTV$Q2Cg%|j_k^N9?$cL=S$`v zckfIPy4|Ev(RkX@elaL?)15HhMS@keGE98fpjE7C)&_zmxKj#}2zYJ^aRjZ%kPVHG zg?2rS>acT;NBKh?PY?53xTIlJq2nISgEQi?sCd}lkiA0>j@Z>3PTWV|r9{@Pd6B?! zNvLs=b%pAjw)hLD$?xLBp@(*r1vo%9pyA2CXam%BkS!V-cq_Ve(C&iHtVF8m5W`4y zu1}L(UWLd%|NbMt#$ zAb8xMN3Lx;EB`TinG?$|x>(Zuk`$Gz0JtkwzO4^L>uLZyZ`9JD;pp$_pO!{ul(gUG zU;OVqZO;N3VhB4S^zZ-bPFR`B7oyv1gp%m8xa86%{m!iHlRV>l9tCt|JVl8K6mU%CG-Zxqgn<7`73Hpw;y)-r;9R< z^lgJBiWR}cjY?%1(Ej<^%qMSkqf$p2Z(6QUm*iF-w)NTg**t61bWNDLJ^cw$f%DJi z-ys;zsdwN2&&Bx;&>=~uC?&qF_)K1*_i zD!>ey(8Lbdchc~608pUBcNNr~uaEi&wGMOR?n{4q0@e@(n7it}?5i+}#${fG-WV0d z%?n1^bVr!~Q8kYS!~C^t3kMma10xCiSMZw-fa~i?Rn_LejV&Os423?~jgu9$a`8M$ z02dw%3%li(BKuBIu7LSl;6OKe@NTVJ_jFL%#EBD)-ZhUoeum&W$uih)Tkq=lvmHbl zhh_a55T;?cc+j0%d*n`6nqu0~(IfYJDx4fh!N6Ip6K@Q^9veDIfdoh!I+^C7KBsUH zG+|u~0jQjBM3YZ)v_?RBPVo5lo#wFTI96uxbGZrVN*QUoyjWFAk+M`!kJf`NSf5^r z)oLg;K$mI2i`@&hL?wLv9Vb5^O&6LulL-px~zXA`kJKu-iFln8S)XU ze>gwBV9=~8r}4_Z(FP}&rJkKAyZ2Z|;gHFz?e`6eP+I9EAD_LAr}8`ST7zugM37JC} zs+zGF=_fIeDU$ydE$kRT@P{|g2Cak3b7X>Jy}0564oI@cPoWb)pFwwb%DgzMg5pP7 zF=zgMDZ+syE^vIb9G}yQnPwK;cp+uvkdX2KEmI1Zt;M^CTe%}>>Nssf3Q&7Auyri+ zPeo(v3Np!%4-~$P#%7t!Nk?3V{as}N5K@`LsU!lZ20qEeXzI}M87#Yt&{Ke`jY;$|T*^0JEzka{6qil#FtUB3WMsM|c zPOk6HizjYM(u55mZXVdWX4lt)`nR7Z23oMBJqaI>7-M#csK`TV_E;vg@&80Yrb~gm zILl0mpZAlLlLM=Zf~6*Bwm`wqH`rX~mu_{V+D-Og+3@1OZ=7BJ_)oOk zYR?EOr$!D^h6jJ+`0!Szc=#ggfG%(X2vu=SbrRNLqGIr}>HBovCW=sgdh8EFEr-CsMzzG&uqbi4(Ro{DR^DSPq& z*A@#NV^jsCc>OtUIa;Qqr^iI&5;>J4q9KA_jTX=qoXftj+YXmCpIw<~@`Fg7@G3Zl z#8kEUj&{HFqXQ3HeoI>;uhxVuus9y;2P+9_5dtI{!T`1c%5tMoo%7;Bm zUu=)~9cpKYW3qobIa}uqga<_6t^4Z-2?=b=eJJ~R@HPoTB zx~k(83+DZ0n0Fpx%lIC5XXBPqbFgpvp@;0xLwg`$h*oPNZ#;E9?_f(J9E344p5&Vr^Je#xev1l3_KbD8Je7&Q51n zfiwoB>5paBhY40b#;iGA4ttm6YPl0GhzA#pXJTSA!A`zFGv_Dd>0fCwKALvl(kN!8QJt z^MXBSfq*|7Vdw!x%o8S0_CT9!*U%15OYuyHl4E&^^4F4fOQ2A@BF@DEgJdKuGe`V%42c~sGgdPEp|b;Ix|_;A7rPwFw}bdZ<=_57FYsGoE9eS{OapU@cY zHTm^^cN08FGGO!>NCVYZu3UMy@W`1UENYn|@9IUdNB;R5&BW5Kc<;2EWwEpk+8! zQrgP3b2$?Z*G0ibjCVKYv4w%P=Ug5*C8B8I2K3(Rw%U9W)rKr(GuIl}M?xc-hcCKJ$s z%RjR(3FM7T$sd0d(wJjJqPoA-Hqle282!bC3bHt65O6MP#dnb00u!8*$G2RKyNiWb zgfX3*nyXmDCZ96hCwYBv-y1<_7XAxUNt{s6z!ZgCJ7( z*A7`1oIeNA4IJx7*FnbW7+WlG8`7k4dQ}X+_;4V`wFBE3`Pk6S#ST>MwehSJ@CPAq z18NxweTu#RzNl%})|?Lun^+*x)ZqX`#HSNX>i&t-IM0TwbR;}B&C0-bJ_J7dtb?-Nmlld;Oz=|mg9M2tW?2lgFIup7}Ki&WDIgLkST!>wwLs`BAQvSP>{R3QlAVC z=Suyeedm+(|hBf0GUdU793 z{r-b)&6`UViQE)-CrANkZMn$tIde2R)_3wMb7Stl4V*Y>Qe|s_j|q6Eg3m0t7|8^6 zcuZW!Kmp3@)$u_5w@pFS%hQukL5Y6RTOE%?*ax53oP!8OSj#^u-bs17^9gQ~pe zL1nQCm6)btAV@VH4Jfu!a0DIQT!ls;ZnVIFXD=bN7>wZb+fTVEJ@!QnQfwMF98 z3{hgO3LjGs3M~<}Jp-yhy}6-hxvw0gT%KRLLd&rGHF%#!sM9Eun{4C+f^WZc0dCrI z)&0Iq0yA+UjWE(3NzIgiwvj=|j(12AiHp=x>K6oBXvZyleOviog2yFqF!QDTAx(;$ z??leoI6v4Rgz0$4o`Jn7O05%_{^v4*K5}r6K+WWVTAqt}Io4YZ(Q5CP4*JDll)d3w z=le+g>w*Iibu^s{u3#0;gu0On7NvQA)AOXIUzT3_a194d4H}dqud|HkPI=;P zJCbj-w6ugYkWj9tYZ4OJhbY%~)tz}`KIdlbzr>R?u5*ntOba>tFaWi_nX2~IS!=Mm zT0!?*RGz;35pEH=qy5r#p(n@|tS4^d(y?y1yv5n&@;-xwTNBb_FE%YhI40LpBV+8O znJwCA16*jR*_F3#!2J}d6ZU5kJJR9V5k=$R=2yIQ z&Ci5F4oxR3H^i9cGsCK2-E9yPh;5mQb-W_aBpb#yF0Jr8)w**bWyP{(o|~b>zLZ_V z5y=NaRC)0hQ{Wx7eG%`DP!| z|GZwB{luCGv3yq@AfFtQ3?!#*u+d3WE6m3Ir)4^CXTTlVV|S_%-gAAmQ;M@1 zJp+{R6*;SHz7nv(j$P4IB=Le*Z)}ZYDF{YRQntDJ))ZJ`(5?^%xmV3WcY(BxXLNoW zwdWi;;L*Sa9PN8H&jyx;9@V5%$Q-6jHAlmw$b<73=hOsM3p@5u&y;94z7~h%4Wirr zeH01hxhP>%K{GEHRx=w_HFgW=LQFIL7o8KP`aIn|yy8tkL^E=5VGil~gR{odPZ{Z* zW^Oh64wOpnsw0OVfy3;24JIe=1-pH`-yx8w66^eHZXqi)J7#N+Z&eIJt>6@z2(Q^x zke-fYwuKyjU-5R|FL@_fjPvM)cwDarl4+4)>K&`i;Eb1!qnkLYm z2eH1RQ2FV#8kZWU7;zr<4i(*WvFe>6P>139)nybn{+J{<(4TwLso)0c4q2huQ`Cme%4Cr_gF1 zXz)Yi7NkW2;rWq+wk?0mcdG!2kgnjh=x|4A4K-g6Vqq;5nyEODuICE#)qva2MIC4a z?3CS8)L(Li@ib6@F5j?h4f7kM(kVpULz33=GvSGxLStg|fW;{n~jh-C$WV+t% zHNv4br$I8yT_ceG} zk6DC(45QXLYd;GP=t%!cU}CWsB*b&=Pup+uDIQXOy#Vd@F{hn|XR-HV?bael8pgFy zY6A8&hQbuN%H--UM^rL>)N)ohX*UT(v+kcbs*R`-Wvp<2Ol9GR6|%6r9i?6hlp-Uk zvZNHCUd%DhMJ`IY$iV`ia+V5)u?(a~D%Y=~YvWtV5V| z2jH5+%+@2TmPr~j`V`Js8%Rb4eJt_x!%8L!d_MPkdt1Fu}{nWM_0O z_oMrv+fsmiKCCDUdz=-F#YaSOVfZB)I>~s>=Es^H@#B8 zfU%Omj#gvA#u95;9X4?Ojj%)(toW&_BRGwSmxRu%);goizuDYzh(Zt!%ar`jlUVL$ z2?=%%W=MY#7X7UH=&9D|?=vNLZklMnXZFL=uMeF8@Akt>?Pv`8cvPkkk02L+eo_)x z8Bt(Gy-l!Cy1qUy01IL4T1bV2v6AqiA!fmj9kM}Af8-|P5KN}GWIv+YJ^)Cb={^0m zYe!NLF3{uwJun4hhl<;PAqzR6`dkjk#xj_P3_FwZJ_mUn+8vgq5 zk3BO_{Pl+a_DE9r_CtT^5uNb2YP)aayjpMq3!DBQf-IP162zzVDA@^3tpYy)Mj!LA z8wGj@Di}^=4JOey&DwQ3DEnEGZqs?VQGmg<^yg;rU8jRfFw$l%Zy-OF-+n1fuFp(2FgYc&>jRs1LAU6D{^XL zA3cEMttcEkC|SUQ^nbMmr%^aEQ^HuBM8rvjk>~)X%7ps$W1|+*h^X3MKE+INYY+df z;1k2n4(l!cDkqnl?p>`3FgBHd7J?r;ovDn*KzX+@zgOMd2SHJWc4dP2D%3OH-5s|p z1&8i%=#Z>iUmC0K2hO6LEc0o}0FKq^AxJ9Z$utkjvAH-pt zl1a^J*2CTiLw*T{L~Xg+VTZFl70-DvjzF2B9#l(_TG|BnodeE=g$XosI#kTO-F^&% z#aIYJ)fN7*85ja7QB1;_yuI;<3}Gt4E$|t_w7XCC79M=X1Z=rc{?zWUx$0c!TSOL% zI$-%za44sQaRN$g?7dWaVc{9nV)V*%diY0FJA-N<#C}-*pZUocF@n>3gnFZl#+S6{ zVZA~F14IZ33E-#_Qm#$wnDtNvo{ZA4fAw|%&BCcD)%wrZGWWVOk9d{*;_H30U}Zr6 z5@94a%C_5g&b3?b1YE*}j#46phbO}4ewvlgtDNpao`p7OR;1GE0J2GKjproT6Lk$S zPKUm~>ER()DueXz^W`lrzl-;=J# zyXr`|!P>02F4f~Bc8Hh$pdKxUok()r@HYaGjIh}+og5kaCt_jIecEs#0T3uxJIG$WTu5dP#{it&1r#JrCDpo=&u`3!aMp6s>8+443 z{PZ@aW(qC2ke5?Fy%Ld#{POxo$RudBreKH@#lJxdjGCw6fC)i7T@jj)EgY#(ZE5NC z{QU)zU)db~&z*0I0gRd@aDs;5Mpg$Kl*f)`Ib-vhp_o10I=km@#scVA_8UC$dK)AC z469EBlsdD#)wig1Q-r;hW+#OSll?+kwR--tJQ&m!ru;PU+IH1x`735SmLW0)Za#kO z*ubKul$mA;w&pNC^|cH5sar$4^S#cYonB#zCX+@PC%i=1!`D#yX*j4F{LYpB`dQ>; zq-(Y6-#~h>+!gTKr!QaLxi=hq=g&58OyM=!utg(@&T%x<>trGweGy3$4Nn8lSIB^jIY{Q6HRO(&<+kMkUH zFZQplS_6*+mR(P;qj4G|)*XIlo9x=_SO2c=Mwk@7%0J(JQyg@V-%tF<&9(j6rR(obMX{K`v_P?Fb!>+nfbj8tlL0P~q=dfs zx!T!5PdB28gFGxta;N1j)D6K=QPT>he*Aepez+oLPPkV@Gws<04f+DlkHT0!v*5Qw z5#iX?EtneH8@G2ivouPd0ile4>W#>Li_kI#AZ$%EHXmo9Kgs6&p%sH}S!&|c#;mHIdxPEy4fN)`kk!r`{;4ua=+jC4JOm@K4Fha_`R)LJlXH;o_;Jm5KlUY|>jV z{a-9eUx-8jAM!~Nvm75tQy|1C6JU99CA_*mV<9F5tb1hX9~MZ`%MV^k!^i6bLK(`* z)QkNU<;t(I3g^_n$1RCC&_n8QFCB}H+(sPsOuuqGLLRW|DZs!RTR>}QRh9a9*;b4P z`wQF&B1qgTcB%Z=VQO?Qz`uZMe{88_AKXZO@@<^dx9!M=rn!$s2I(z=1DMP;>#iFg zaV+C>R1d5cn{%>^`_8eP{jeQZdNg1typ1@nTPA)wir8{zAO|w>m6d4aAGS-3vH1AO zbpH7fP{RfPuoq8v2#pNZoWMK`=b|rqx7rfGN~QyP#CNO1(yHIx!ks-8HzJU7&*o54 zNtVsg0NN}FW39k<3VBn_;Vb3Nh`urjHLBw#x#z9^%;8X>J!gb-8r|eU?9g`HL{JGZ z-WY(4%Us>C&^*(1!&e=7t<+%&T19VT{d+ybj9MFRN5{`____)XVlaS}r`|lVFgxl3 zpZ?AbgjKjh{iLy;FKz&x$0oH`{AVI2G(tN&ieM#COnegs?Jna)keF|w1JoZiY#*O- zdPDQ@XotNl+z=lRJny&0k9|cu@9rxISbmV8AiVX3y6Qeu662Jkfh;7WKpalw=u=QZ4-Iyw5GEq6F%EQwXQtkx``Y3;ZN`NjFY|A2Dj=)zD!V7cxcePG7q}f zN<~OCL+Unk=g@Voq1a;^Gk!lC5uWKqC?mXz zngLNr`)$RD7kK>pzEyqfk+H>Q)uN2d&cI@#O*P9(!_fr1liIF6a! zXisKyEozq>nVyA{=v?rqgO+#6f1oG=THdkB*dV@5NbH6XjUF>TFB9r&@69?0I>7Z8 z<{Dro&|-yqpyU%T-}t$->Lk9cfGe|?8Hp-smoeq<9JUM#-76ADSJr@Gasa5Gw4W+S zd@|QU;ZZ<2g~H$9)3lE{|9h9%D z2o?R(?Lj3Yd*ky7xJJJ6S7~YpN}TCM3#MJh0PSFqQ|*pKaHdg|50^5%PDmOzD;&kJf=NX$%UGxILN(KP3 zwF?iMF@H;ySpDjv=orJDK%9|+CRM>LqPH<7NB319VKWy*hqG&W4iHhKwGPKh*rAO@ zM3IsPPNRc!iLP4%4(MS=SiDd~{5?8JWTy2CVAn%Hi&53e8%2qm-8yPIlJU_}*k{@4 zF9z(Jb2mU6xP6mM$G`_Ybm9hHzXp$ zft~ZK0Hecnf(8_%92ur7!DlOVgzaK_DUqKQI#mYJrs1U85Bmj2m>*i*q!aEkX1R(% zsEa^52$49NU5J$Q2ihXta6lXpKo4K5IGKfu7F~(6#$iC~0m7%pUM)rsl>eUfPv7oO zLi0AxgW**d07S}(4MK`gItU!|5nvn$kqf1dAO-V@0qc9mZbp6%1U?!-aEQfkEmHh5 zf7h|NuYcQ4KMOF|_5!#at89xq%;~%RDFGu@0+2v+X*iN84hrMQz~YM#0qFQPxk02r zCA-1J``xZjt#0{JJ&DOt`bav`PTGgN1~@$T=E*ZgR`%kY8a=d^$%XIZ__a3>l8I{p zL~!sO*(dv0kro`mV$)lOy|Mz$o9yYJuvkosc!r$I=HBEhw@6JB_70gjR&nKLglq_H z94zm_K>H&x|AHJzk7}6It#Za%&|bQw+XV*AGCfi-+V$06-y1cQ0eJjllgS}1r&|S~ z`To^IG{rD(q^5=aCHWUBSQ#6TA(H(siT^xhlV)cPbrSNYoZh)NWKXwP`A~)dHoc=geP6&2cbU@{x z?bGoXfAWZ!*J$Zf>rVnVR3S*z!otkgm+K_e!6EJz=$mPr09Da{ph(p8y-!fk-dqHh5}r(?1q)<3h!-ki zF5-X^Owf27oZ8jxp@`Ie%|oswfUMZ<77gs?abB1WFsGTf6L1qE8tTxeRcDtE1=XDD z(>CO`b+osUPg4uyR8`IhCsBxf%koB7s5n!|5W<0B?H9Dc?>*G~r6?$u31(0OW8~MW zO>+dCi*JGRSHDt{73rF-*(;~o6wfW)-2Fly+ybVW3~$9Se-&r~`fM@AeqZdT022{R zxnzTTPW3nyZhN*X==MV_H^dkd1p5=Q87?yv8~`^;8vCq!r{1(nHQC(N!J+s^(2sFX zB7&yUl@I6gQV6rK31}?D78giL^FZvRS|s)kh6GFL zbR!JjWKh>cCzs*EISy4A22wL-I*4d;bfiwbf+47@VE=Q&mMt$@MnM4f*u5MZ(*b;- zG&RTozJs(2JsH313Egjx+L)n0_K9o4;xQ`uB{0(k4s+I=`p;D=Hc*l-a~$lw5x3?5 z8aHJ`YR4-_14<|1cLeG=kHO!8OmI$69e00cGpa}C9~iQ<$^!nOOLcUPN5#|KkgVHn z@;I2i3VwaANVjBIdulsA2c%;eG6N#v>r}^gd+iuMap|l)>aJjtx*)e}ms`|2pnb_~ ze`Dj}YuF*hlsY+X!pisXLh0*sMUc^Sdn-}J8M>*GHpro~$*7ZUIIP(#e>%3ZnW_|` z5OV8~XMLv?!QvOR<@(fcD6j0xwG;YIG~1Ii+VWwzq@v!JfM z`)kHJESL+iqpIO1vzH#1NG~g5I+z3@2!;r&yE4Z2%tmI^iiXG=(F?{HdDoaa__d^9 zPwQ22I@2Ne`vnyaO`5XwIk8@7Vh_J=NxXm)BmhCgWevJvz@M08Gt$jB-nEbNQH0;jgL49L|!w$V6biv2)z^e_Fn z{!5X?%Fl4Mw%TL*4_n-^O&ldp=d^664jiZF+peRV770Q1V<%Nj(gax#sYIidGJoOG zW+Jh~$T_mubk>@!KwZ|iHq{EPRMI%n2k7H9O^(VTq@e0CZU*2W$Rt_gi&D%e_&DMQ z<(kb}GY!E_pwqg75SGHsi5|VkqE7!j8C{6aEY)z}Hz1WQtK!nC7aJgrX4fSqn$vqw zktQ@`y}>gd=YQh(artGQ_Zv_urefO-ffxPR#{oRGkZ>45QbkecfYau2NNa@AgOt+K z13S@;TmS}h6g|rmhl_&bd$iBg`p?7`GgA$OSfjPlup_Ll5GeUq>Eo@HIAO{(Xq5R;Nq6a~r`&>Z! zteIk>u}n^-JEdZrcU2ji=Qs`?JIHE6AEQTXTm=e(CksrcWw986J&e0h4{}|BW-AEu z$U3|3g!G|yr&Q~2=f}CEJp}81J#LNQdQ{;AV%I@vex}CR*NkLQ^wMJGBuIY_Q&Qjk zrPfB9K^k_!{CL1DfDgkS!yvnTL4mnT~Peg8}S#c#W?k zxf-MGC2*z}8s*^ZRwxfSDhS(<{GkR&3~QxynO#E~CS!Ju#=TNPh4lPJ3YTQ+F7@LW z4&SB*ko`$;CV&Y|MO&P$RSp`#-H*<_jvNaE)CX}^w!l!QqLW%1-10sHr9?qmJ#YE+ zpZTc2Ee3cuP!{}AO1@&mjTk|&Lf>ZvSXh8IBW$&NE}%LZ@81Y~_K4?(Gqgy?gO+dk zi6-)J!BRke$)rzQ1I~OhCb02@fo6$QKnZv$V4_t2 z<2PLl(6=uk+oa>|#g2uJ)yRepqCQhJww%buX2hWW0d|=h^yQ{$|Iy<&$7KR<5QZe8 zNwKe^C8=XSppxhdHeZOA&F>k3`!fPJ)Uho^)=r5E&8;bu7U#kXxuT!P_-aN%G@-|; z*upMv#xkG#j{&y`6c1<6=2lL)lp;yP?84w^8#Zj4}*?OKQp^91IZm41iL zn8E~+Qme5ltp9~hkj&EON~ENMH+P|~F&YOdViWzJ#wyq0?@|C*S2(MmiAUd=e4gHA zOqodS_c-}qQBZ9U$#(Q6 zbNMwst9|IM&0lydm-fh*BG9qCMg>X>)>>ozaMmayaa>j7H0>cKs2pt%5N1X}wwUd{ z7X@ffQH9BT_+=SXmAxZz{dT?xNmIv2P%`q0>u?c6W^LKvQu1&vxzV&ke|Gx_3CAJE zd6!N5Q)xVA+&yO^x`^7RH#~+d_j)`iEd@8o7M5?(KH~s}4oGmyPg`jo4bXXp(P+_* zlpYivkH@pMG0A1o>fY&KR5W5G_*2jboyD$|5fr2HOFDQK-e?xEzq_zjvY37{R9=mIX2ivi%9a_BO__>jUr|JSCBZ+_h6o z+&vsFlkQB;B*3Dqi9Ja=s|}`g&qt;hQ)GZALT)Z7B_M6yJvrld9vwn!I)Jo$obz)5 zU9e$Vaga?nD$W(hTe#9q6^#3Z<>_cLL9Hdsw1T=0m zqdH`XCDH#2(Tv$WxE>Gg(@aME`&IVh;dngY*WKTpYj9!&!n3mK%>IVU|5Q55bXBK= zNd9jv>X_<69C~M!^*%-(uNn)03oo(XX|riohEvu==PMh!`>kG~fbcti@t2~*BL(-p z?)NA}2NDz`_*0(W1}e_gT}6v#&=;Q^#BsSMZ&{0A9P@r6#+~^1oeAut4clN1r_Fxp zEWmqRn{I%bJ7HHe&fWRYnGc`ZN2KQXnT)KM$=~)be+{+TIKL`{Xpg}l&Ut+1n6P`FO<{j2V?JS0 zav8Sq`EA;`L+jCvmB~=gV)s~~)N48WwEM_o?W6O1&Lhs2hC_7LsCavH(RW3I!7s-T zg_viYALiP`OB{t~h4#(XD+{~dMA4`SWWb!>&3J9LKbImNPJlSk;Q-zuU9X}3#+HLK zcJ^1C_9CZxM=|K6NDWE80{MLna?f{=FPXtj}hS zc$&W$bhz0)=;%S-$Po8QjG4beX*i5JNX;9>iQHiHHQdKZG;G>PJe2IuoD@%J`r76U z^^U+kK{`1YFqi)QZ@>A5I=4QO-}`vr#JpUUe(#?7rTF3cIEQZJ0=gMK6%y;WpIOJ@ zP=!ZAceqlCbEj8)d?fa{3}TE>_8gmCP||2Lr*ELP z5G_Bm!vzEtZ|KqA>RCF{#?y;#Q2KB?^*?+;5{}=gpcmtG6-N zNwbTG!4JA95zt7tJX~;R-$TJI5@Ca8(7U3Me1-nY%6-y{(Q$qm>RuxRk3N38EtjHH z>X$z3)TnA8^$VKQBOFlR4c}lL6}S-sZu7%RSsOtcT&$o^H*3Sh>pUJpdQzC5^y07- zie4fDu({f(^!GUIZmEeQlLHpTxi}9-B1x1 zKgf`Ga;^go%6|)dmTKH!L#7&?F9Uxu`@g17bny7ji8B^0S|re5<3mx3QY7wGb^aUy z8Bm@5GW~&jTcFvjt7MMYEAV&|WD7v4ltQ$%ztId%-T8^pz?p1iAH+5*cqNyOo+4RWW z6v>C7U^?4z#n z4uapV+oFs1QO}F3mZ3_0%I%%^QgQwCt_aDRs7_zsS+Z!lAhu-wP21ZLf=GJiJ1e@$+azH84+Vo z*cNhwO++F>j}Hd6y)=%*+wH2uRj}x<*s4BW3Eg})sGqi?LUyME*uSKjq9D?(>8|a$ zc{UOBHxJb-FSk&-L6jN@K;<0RjjLgVZqXTL&IP zIe7KOsr{Y-%^+b)k_#1#M6s|D&nKB8nN_ zsHS5zG6TyufwGc<;2XZ%u>3;jOyF3P&rXTszL zL7l*U)Ew8{6=Y`9yL&sjkV&oK$S*i|VLLUDN$r(Mf?Y97WdN#q9C0fiIC{QS;~J1f|Jzo)LRe)!Bd+HnZs@vMkFL4>V9Ij zS4{CVnYqD(ZlT;w7Oc{V)({D>h~@G}yRR5jd(KxN+n!iIWHEwoLL#ZfY(9i2IBcRZ zK|aEhFfA=K(ADS7OZ;~jA2;V~NJt2R3{W}Ij2KM;AhH$@Bu^>seMC!SKa-&T70a-` zpTMl#Y(pF(#1%0Z#v*0}n}7Rh5K-^3N*W3~cd`(WvGxy;!8YXZ7|^24SN6SkRjyIjmUXNFGcm%;dV1xl)gRWI!v)7`IoL`pi57Vb}<$%g*qD7SH@@zxYOmv{_ z{@QU_a4u$J6sU8yxi&DN5)kG+_~nlH`?o7{F{C0e4eHdT`1$z>zX7Q30DC<1%6%>% z-4*<6I2DA>Z!$$iSZ__RtE>UVe?haAXor~OapC2Y6n1yt?YmY-s)x+5Eu7wUIVK~V z?8IYW=XmD2{*sx@WkE8$Al5^!KWvs}0Z|X;J;~%&(EWv$2Tkz|n(P?cfaQ=KzhO!? zyQWYQ{dNF4GaB7cbL^4i3(`H&4f>zarMlPR+!;*e1(1*GF!V(Zl5)((%w@Vt2xH~3 z+Bo&+09M|B(62;i6T%S&!tC~lVkdLN+uXUGgA|%Np2XLQ;>*B5y*OQe+VtG1u{nmWgXl$IcA6^CAmuiwgb^qXw`)5JH! z-YhB2s~UOJxZUv2$GD5%Y}i^gGk9Zt)R3Q_K07ORSr+bKqBCQP!>AJTw^d4G-X3aM z5$@Fbd{u{?WlHq1c-OJLw3H{yPl=nEGv)e}(U~2u?E@deuP@|VqpV5V>NkIvns+(o zjEeqjyRsq9_vb}kYd?0@x5MAWI@hbgBKNPSOKo2!KP-%zTJhXqkfn!tX29fgBZlle z6tA$XbmdaFvW<}rX_clKXYRJ7{BvjUe7&@@XX7hho9NXyUN#O6nbAwqb?qC;80$wx zEo~-_&E;>5$7HXs4tBn+HFZPEv7z=|G?^U%zhPHLf_u-1NffW9PCsvT@FS^b3;H@7 z`a)AvlPf-j>r*geBXJew$b4H{TmCfN1gYob&>Nb0(P$6-%K?s6TEs;t3aUMk)H+VE z7({4Yk8}leZvz8Kl8{lVrEmm;RP$|hBLGTY6<)0)se(KBAoz2MyN|(de4r*)gl~@_Cy)RB_%HBR1_tUn!{`Ked;Et=>^KQ23CX4r6 zyt6&_?XOuz|J=$lYCC#mew%^r+%rE%C9eP8!$+e`z2sYP)r;2r@Gy2(Rqnp3#(jSu zl|69$nA`qiNwX)5eX~0DZrQlg-rWa}$Z7qr;aA(AndH6NB#s`JopWeqh7RBYTwE$0sVag+!We! z7tBkWT2vj`_`bFKZ1+D?wxl_IM-4pBG7j<^qsfzMduT=q+u%a!78_p@8i2pQHYkO1hl2Zo{wIO*S(UUu_P( zaeKE*wGWorFQGS{?)KkqaCv4#%?gG5Sn0Kc&&_T#Dy^5cpE%-z^NT5wuT#AJF&R3) z^uemFmDljc$}1hcRbF@R8}8H7__LEY6J-BGU+EFmQoRwQKYG15iE+w&y*5hwHQI3v z=MveEd)ob7967s=uV?d0Jinfnvwt$DE5`S;k@|9valVA`Pw)N{wCcZ|-MOHD_D$hW zDS2*@l1p9dX9H33hu01x4&=mZ$L|wUI1SBOCBp_11(*juL<)pcbTu-z5$$!Rc)Yg^^><)sq>CYNM&Ak_HaB0r^sfWmePlm;|Gx(s2#Fi!o_BJVjy+6LP4^J;>-&8(5 z{eD}eN>>hboAU8bef02T#C&*r;}5-6lD*dy{eHXf#|P|>`bAdwJ++z@_c>eD(_ZS_ zzOVzT-6RKUkrUqEjoHb<$nNdc-PS?SlXq)s)I~>OvUQIIy6;HWDn294HQ=P%?~}J! zb!?zx=hZpxxoe=>?@Q@`oG~|V@4A^VsrQFn&Z}K=f8jUpWETlLH(&jsZ{zp7Zg%@( zc8lNNc$% zz?&f6&6)*wUA_x&UMKE07VN{1%^A3!kMPRa^4WLI=AZ69+Tx=Te`OD ze$9)@1$8(0l*dSPf65QO>{?T@!v?jUH5ML^`n2lc87|!fU_tI(Kc^~QoppSlEO$j7 zV%pBWsTk1P3PVFY%qty5O*_KvGMf2g$k4}EkrIo&&RImSp>Imbit zQ*qE5pY0Y$27KRjQmmNDx#v!cJgbAv8a0Nde|S3h(}y2h9LXPBROrG~bhmA{pGyAq zT@d(cX;*CXSTyqX=0P9-RM7UW)l<{?aBMz#IPdrV+WI%bozl4b$;0veuu~sq2?1-I z%tuQ(xtdKST1faS#xprX-DZ3JVZ4v8FC~;7yLa!_@>fM)paj^3xMEgKW8-gV?CBTO zS#nC0c(rch57Fm# zL{83|BB$RiUX82_K&M0AYic?%UB!MS;LJsT>i+q)c;kuJO|^&i407~16^nj8`7k9X*B`u7?nk2P zYJGiM#hv+2d>&lEH%KAm0#56!e0W{HaDRVyBWyIqtKY52W!1q&iPNX$bcO33FCCu# zaU4X==f7P1B7{s}zpro63Nf_pjt9$ckRudbT!GLFlhH6NvhNjq6~)nKKuh~Ly~=cX zBh`5Bfbsjg92BZ?dYR;GHzcyOx0~gQls<#}o=Yp0j(o=%v(KMLeA^Tr5s~ x2rL zcTsi1Nh2-;grilBO=&|v94uy~ehC^c8M*m(|67M)tNUP#P-Zcu zU?!0H2-h@RG`+=VJpl{r_S_yDsWPx3bou*}=s`T?@bEKvw9XO(7iM|q=I4(fDzYaA zs=;{^kKMhqCe9kl91cX)6HQaLMqDc+#41f%?kYrFU+ub!}KI`vJr}GgARi@5`7pi?Lw_3+E30&I zI8iT5Ep6#*j+W+a@rbAzB7bplaed9{5d-^FPNE~>P$O@Qfx;+!kKtXPvfuOW|5Ox( zqt4@~&Br@4l=1E0;8Uh=bk~mA9rYxeCYldGM7s0^p4 ze<-2NAQ2M|Ok?8?5G*jRd5<1F66jq-aA8Xnde63^va$r@#t$+02tM(Y{i7Uy_~gkD zjKVw!jUg4#M(9xrf_o4%29ss7{`TP?^;%885P8n&N*9kzytwaJ(%WAS37Pt~l8?{X zH}mnBUi>`D>gkO?XfNK};D$GcftTJFhU8=sShue2OhDzBb0+6a!}`pWkP zG&R1Hw?{_~>U{+SIkJ{YcJ&`zbXG-q+!TbM)_+I<9i!;e04YL-WAM(yOc3fSrnyGfj zOq^-XQmBa18cIXRX(;b87N|0{)zna5fPgDLYJ7@4I%ttr0kUXkfs9ke>f*u z{nuZAP2WU+OA52e=d{gq!_%RPU78rLiM4iziZw>zbrrGi^4taaE*5L-tW!SZQ4g{_ z0CR$J5m2w35!CzW`cMKQ_TCDk`*+s@*yo-AeXNy71zzk4Ut4$@@~>f8Q)7&9AH1 zw=IuZnvi=edmPQYP<~)4#XaKrjf~hy#m>7zap-|H0LB?ix zZ9eGiLLa2dE(h+#ube6fHi9AxQ*V_4^euV&_U+{aD{Ss30I|47ZY%T`joSMTbUAD< z6yq!)6AP?9zAe^2qYc>i%VI`G7(8SQ`6lNG(wq7E`gwyg&~PD}qvCbIv$fc%VzGq! z2nieyLk(4Nj$Z!425m}X!B!LMJ3I4KcXx`6I1R+AQKO7(Jjhk1Te_f)Kg-u(u~lO8 zb1)5VIt8$|#%2qZEPLz?)~+3i_%1L`*>N*BY*{UC%mxFkc#>iBLUyF8`VundyMZ4W zP@9kqGcW}JrX>Je53=b;4d^X|(c!)`?OXk;DD~>A_#$YCild!4KqS&w@xHV&aQsC` z8a_geUT9LS`~>Z9VZ$4rZ#n(#P^F5q!KdQl9M88Lk)o5AGWbBIl|X=q2flrz^VRna$dqW$c~Z_?N9-nzJ1)a5sRg&61>bbl zT$l0guT1QV-d=66RNDkj#15}#6aJ+9rOh}T_tHGY-&K246xim% zqr6;i2*de;fGvn3~YdNCN zBjGyEYhfi-+Fiu{oe_&k-hIun!TeoSOP3?l^#e1V$--sh*)UK~K~tbq*=nUS$D`8? zI4Y-x84r%>%QtD{%ON}*-Wr~!1g0{%MlUZl@aRMnI$Hi)(WuAoZ}&#IV8;zy{`QK} zA7M8;++hxG*W^q|Jh%{zE^XE@pofK0!Jv%g=dc(L9C1TJVQLPllrdyWQ~aw}LiNO6 zsn{{|KgHs_Ji)G3854s{GuTWff%BQo`C_{EorE2&j#D9Aj7-e~n{G)Le<{_Zrc-td zJ#aW0Wv+{eiIGF!tZ3ubks`GzT^N&WMRnZuQ~}Egwfz{0+I|mB{PoBmYAzj@QG01w z{(BP1OsGsMa;%=EqaRxWm5im>QTImMw1np%nC+uI-g&3zcOg=}$$9nSw;+8Dr8NSl ziX74(c!-mmvHlOoxK5rtnQ2961Wive#NloNl@5LCJuwhdJl`-L*Vu&w$gb9DQ*nv> zuTaZp&5~$*X#6H|=!cnxYSzm-Po1hu@*qx|n3`&|h#`Ygp+jX~bA#SP#<&SuZwrf; zmvj&@@a*z+KxxiU;b;icN~B|j6#>^aQ(hg0yx{jYYOxiu*eaCwh2;6V7s) z21@3DJ?&fTk}a8?oy|eucB`B)b9l=%oCSls0Qgn&hOq4LPv3j(kdMaT4KF_+010Ur zBc94J0+!k%8y2!x5fC`&X zV9rtV-X++7EDzxp8|}GG%6F}U;(G1(#qJAB&cp?W&uvc(_|_pV26192IELaY>*ad9 zB%@ln&F*c@axGu^hCZ5t*jbQ}!7X>+sxw-kCdL#i&<=Sq; z=wKQ!bJ7&{+d$vl7*mD^aUzzyz3}w6#@;u6Fm#k@v%iQspsUdRW9cnkCeZLc-@1B5 zc4Fkb)Hm~ypx3-=20LpAVz9ol8-icvZt2sN^QLhYhNUH_z_pQwT*B`qptGMx{cvUu zE+;-eUWtVaRbd{(mD&TWCLY^Y<6Nf1mQ<-jbaCDPzIPr~sdC~cJ6ne2*Z?=G(N=9ZSw|GqE7)uu8T7vjc~CrtrmT=Q6Zw+1J^`lC^q zI+8sukaPXGVbb4#Jxb89T%;e!^c`oOc?uQPsB9#2$R62_DGi)-Sl|;WMCg zCX*wNvN7GiC1A(Zl$4aKs9Kd?PRF6R;PkRtdC^apKGuRhqhj91xZV`l(Hy81=!UXr z`feLdf5n(QB}toGLOT^D;*(Q3g@q##(zpn=IcXWApuNRHi8F1)(>q?vcJ$-fx3;0Y zH=Mi!vt8lPe4wG>P0`m=LQq}~bo@%RL#qpiSnFjuef+6UI?2Fy?_SAMNt?86>Tfsh zERAUiSylHS+{AUg#_ni|wg!KRwp5piIV!HUGiG;e{-g`8x^g)B>Bk!^K5B!f`uvgP z{(9Z|tboZaiHwV!kN9a6Msh2VP1j333VrZ^S4QZRbI@#8$0x9 z0}A_huLr(qC#MGA+;No~CUV0aPo|epchG+mria!xQ&4W8wzf?)o@#DkaR8*tz zAnt6~a^v%j3e&j|vOdzJP95F}dTE(2E>Zwrt$#$hZdIhf$8E04(=Rl!{vZ zowvVnrwd&KP0R`@nJkIFAK zG!z)$Ls~b4Q4gSB#7#$}B3fS-W6YAIYD;cLW)9M?7J*cQ5IZaHf9bi`9c=&96^SM| zzR!t{XD`!Lp^{pGC`&mCf3$Pr@9W4**YQng~J4pS20r5 zw2JG%xG%?$`$rE)n!o87&!0;tcAMJd!J!e2Fd(>a?YYkoNH=qgqw7KPo0g*vGt7+} z|JbX1?%X+<+iOHSBF6RL7CBzRtQWz;4vNWVgJN)~(R*{xMJ2pHnp$SzEB!TH>MKpXy?j?!(v06y+sSu=)_`Gd7wR;EZfhdVxz7SpS z>Q8j-B#2#af~Apwy@YyUCNie1#NCbdi0P;M$#rctcsPT*T>xnDJ(VJoRWFj zn4>3H>qN^>@690&F9ybK^tFt=v&!1U8rHZSAO!Ox@j^p8d4b# z#bTd&2k@isIA%BxgSGOWjzkCj$PQuULC3X-W3yQ&CipfWPBn z{Xp+g!02wsNV;R{s$#@yk>n@L@rBexM3D zq)a~#L5XXue1)X~io3eY1NF^Gt_MjgB*GAPRe;Dsm6+{!qI3EX=5ikhr~aaZj>@F= zNt|UCi=-qzm1DdnOuA>dVLobmu^0}X0rd{_ILL8&o(J&|{fV$!q4b-JECHg2=MIxZ zJBpWf6}BI4$Bz6(ok5^cWhRU+lr4VM#ffN%hbUebfvE%}GyB#240$sWg)bhnX^r=E4IBJ%oORm4P0d#&kLIoJ+QeE9E zE8BP|keZvB)6!TYMWGLAOZwR}hhwJ^qm|6TSK?5Gd*X0{rh0w+CV4^2eha$H%p4l0 zAlzh>%*UcRU9#OM$b?qAq&(i@*Awg(*)F5GIS!ijka0k|J_sI{hxjH0f{}zrqJJO^ zq-Ay}_Udj}AqpxRiX)EW-yVXQw=Mw{Aa9$p^*m!0Oktr5_n8ReZ!GV{QiEv^yLp1_ zr*hsEwa$PoUaq)E9E_GfLO>>Rz_8-{&zDj!((e{TVQAl(HYZiL_Ez@ut{BglIWx7- z%=`ycvo&gA6E9EaqQ2%d4I}gO%wliPvIvoA-_8va+}$BqY*zWvoP!Wj=2l@*%_c0O zZ1w-xd-HfK^Y?xDZknmjR8v#ap3tr|TC~Y_PbsY;T106`k}agNn`tUZs7NA|C_;&B zg_`WLwAi<@MA^o^^Bm`O0u^Pi)EstRg2FCe28-I^AqhT$fNF81`T=+t$d;vUr#F_nop8gx*@l5 zDP?Ny0o%|5s{?}rF?hoPPl^U>K0wD8p<(@>k^kpC2mN+KYM*TVZqPmd%WMOtGqfXeXW$pZ&=hEWPr z4udGHl{%d%ku}I)+Braw(`6o)LdJi6vM{Pe|n_D`HZm8Y4!{g(DAkQsc8% z=Nm1CuC1-D%_ChIIXG%fCfT8?)5DM#cqSDsaKdQo5a1ob)^f+f7rvOIOeR*(5?FI) z+SVn`F?cic;X2}EM|varx%z&~dSkDhDmvQQmnrKAFGak!yHySA18E(U(Xh+O$vNxU zj?SLK=ZjN_mxH*v-{nBoX(n}$GPHP@V0TqQi7S^Q#{i>APOu$uB%$b(7lhp81!~pZn4?cvlCX9Q_SYVbd!GFnOYFhY>CFfmBX8f7K1-wx7S( zqIj8XEirG-%>i1O$?OmZb>rnd7W1j*1Me%(JcFoQ$V4+V5a@g;na_S47`U7q_>O|y z*zj!WoqLfbVrZn1(R_gTbeeXiBtYPe;IvIbC15u77m)KT1uC{gDB3J=*%4Evi{*`S zzC<8G9MMF5OZGY_^*D>QZS^Qar&BWypH_GvWkV{wh7c5t$MUF-oDETJwtpb)TVi2Y ze(;GPQ$js?GBpo9sNHepn+3Y$skT{Mz^#Ka@6e{+ANhO?r?6Iw@BMv^kq#a#JI_hWeAr2k`z~RgJb=eA%g%dQN~YMV+{l8~1N0z%%&x`Yu9jOaCjke{5N& zLr`%|<2soL?G` z!R3vqdV^qYzjs|pD>8TqOzx-zd0FB>r%v<3o{UQfvBSwkLR}zA{N)t))HQLddz^0+ ztq-d;tH$l`{3cCQTPd4Pbbyyy;qXv8)iWYBh~j4IWW(av;Yhq%uH9&^o6m#`_?Ay2 z4?iW^cdAI-LePiQwlt}p%K*aIfx zeX5zSTojyK(H!cHN&+LxbIB@Fn5Aww^y6#{jDw{ZgvCc_4Mok&lO!0p9!?z}^PQw~ z`*dAx@c1&EQ+k&82v=z4JI*l)L<)BC9#6jYMW;4jYDESi0bG022rJ;ZJ}n4bRqw&| z6i-zUs+yx@_Tft|eMCbf)DpF%n%&?eq<*@YvKGh>CXGkMRqA7FMB#tk5wMHuxALJO zle`SZ7a_U}^!B}Z7mo17<)ndu!HSkCPHQMWrm~}U4G~rG!7UXl{zT$DSWC|wgiFm- zmDd&O-f&SS14%D*n+j@?{p|#d*nfPB3?Gzq(c1&Ah>VJQ2uCufzys_#$^s2qfAr~$1!s{(EVB}h* z!7f5Q>ejPi2uK#YaZ1saU{l9e=5@fnkl5ffv#WCXxP;yDTvd`2qH;Ps+`ZNN>;RyP_yva~zg#Z^5_KX_P9)*%%)uKgT`c1O3vYfI_2j4gM|AahSsDuT! z;1I&D{aP*%(Sf+%`y9-Y(2ZPN(clp_4nNwS|LZHP^xXsJk`N+{iy6wQ4(`KPxrtLw zqQI-I)7sjqd?&l0U^aCZs|bL4e1FyODME*3t-`VSHNkJE~w&;7EFC04ZU?9BceSILX5s-1)RUqnHLljUAZ61$; zqnAJk5}^$7 zp|bDWljzQ+lTesy-J5H@TLn@Zxf-b4Qx03k+#IFCob2ob+?K!rRYRmRz|`+Zp0vs{ zZhn5~ti_6q`$Ys%%%TD&81qZ5z)~>fdq&uTASdLz5?dTtF03=7=w&mUS@7J6^AJ|a z`OIf>L%=Jgu>Z5#uto~bwK^B}{%H5oHMYfB{t}>Dgr*=5!jOptgU2zFjJ78d zAp{)7I$V9apCSaxa#Nl2dDPOOcQEvi!^oW3B_ocUNkIg8Uc{j_e)@7-v=E(%Dy8bv zFJ2uoObZ;<9t;(Sjve|(^d>gn)VsvhB!x;(5{h$-;w$B0{4>dQKtydIBSWN^A*end z*GnQ^SiL&3HB^m8lIzwpQ+7KFgw&fjK4wfzJjTgAgdJHrxQc0+=#tI~(ItZylsv4c zs2KL_*%mlzVd3G^%?QaOI2M%)A2>L(=gj$~)m#$^H+YvYCr1F`ntF}kmWZK$rbTDX znl;CKsnj!r!emuA>kKI8Kx~ss;Ftv8_kbwE4^8dl^KK>}2aFwcyiK9h0Zb!f&?_#( zpx@vz=wmPmFeqU9aW6J}+9yxi;rWxmAuv8B)f^GLEE2KfVz$B`nrIhK)z*$&4CcTe zUiXGlf)3sX)s9iwJrEBdQgpTfbc|6ngDyf?L_`v~+d;F~XYh)qn*CsQjk+I60_xvy z<}^$jWv&^~Pf#vq)zGUG+lyoe8rJ2M1XVDq?jERK3)ArTGn_uIw=gb&$VBa{0v@o`z#dJ=i zq0~S`i|FLO+||1pD6A~umf7Ex3BIE0GX-Qs;-0*gzm#NE9(Wk-*l#rq_08N7t3-`juiia}kGlHe zYespVDze~RF8LjZGeQPgAoX>X5{GEmyVF(#hVowOOOH6oMd@#Wl|gxM_8^=SN8or@BI3%bWMIHUXJP(v+LKlR!U4iw zYz(C3E7PhilysOQs;Kca7CD0lL2+l#G8I(OARc|B3Ph9(N;n5G{8@Ef3?sKx2WHNbn>w*psk+~f$V$x*f0fm7z$)5vTwVY7oKt40A*Kg9kTfupTE7if2QN+R$}zd zs#&{st(f~A|Mu>>e_?QMryIibPCd9{X-ctlxSLr-SXhPbzubcX%2z)kxbm`Zh1u{( z4}SD$r%DZC^#A$t5Q9lQs0LESjN@8gB@qF6qz}J^a5*e@RElA-jN+@TX#VQrv4A?h z9o~mjamBHEP-#-uN4o}LOLB_puy^C#jHN}U<7ydxn{^|mH>$r*ZLD`Y-KZj>TtLIN ztMtQ*o{k3jBgCy)5Moc38>#kw9bhHK&96)e8Siwz?>AO^HgL`&=uRp2+!Y#|D z0&Bm`o6>r-YDn3f0zI8(iU&w8q=1PVVaDGSNDMozw+@~Z8&hny;SY?85<~51KCKAT z#-W(MFpKEG&{wNn1H>M+?aLy)7oi1Z3n4;X*=xb;ER&u(?4(XVgI#ijl}8F<)8pSp zVN`q%bq`ybjI%NcVM#U#8Nb7x3>b^hZ>{aH4rnDN8c@_F8=%Nz#ebJx-Nb_>FSyM@W?xYsvuzN{OLwuSKlG zR5^QKKD?+Al?|mo+jBMT zwm{;K@s*78kVn zQ%J~p;_8b)9}qb!!AGJ0p`S3DyMV?lBDL zY{-CIy1NPv>uzLun`6}`ObBHvSH!gJ{g9o#h3r%=4X=&<2s!Y*$_SW+Nmf$9?7W4it9TW@F!JPh;x#dNnN`spMjL>CyKYJbGz^LuyMlL^1055VUWY% zyLb2QJh*H3tRdC3|8v9?40-kxJ$z$AsURxDyqyaBY6pOO3af0c)xJwdKB2G54r%&F zSvtqk@s1eT1VWcG30zW5pnN14K@)L)b(9FGZy-HAo!k;oBvJm37GBE2P~Ztf${xN` zFGs=EK~&5|SDNs7Z47+DN^4;ryc2iLV3=9P&04R*|(I^?=gk+ruLGBhfn8?e> z$k1S?d4x38_X(I4Y)_m6GC-Gsb!BGlrTGKR;@tZ&dxE+x3@HaYU?aC|u!Wer3;pZG zATn&&ui2Xh;6uZH%i8vz0f6lAU;p^c#p&;Y^9G=C!*^cz*Faa-uwUE!HPC)O?6)kZ zp9i22!+$*mq0sQ(&`EZ8W;eX#W?+f|w@ZfzLEY^RA$FTSD-{CRrb^LdD41XQ} z9UjBq$3mXP#ajr-(dkH-jcVZLrkjEWX^!Z?(^~@WuPL}a$EQ9c?Ve_K>3f0YORxTR z$!iAKALg{STS?zOVzp5*_ZwoXiM){BKDyebf-KOK*KFQ{2-aL*xfqX=5fj-qu**(021G5#^lF zQN7VN63Uydb7YVCy5UgX_FLUT1Gvycsi8OFq{nqnZ0_wl3r?)Y%;t9jLQAjA^Xjl_ zncI37%tK-!Upg;|p594>Ne%(UOSTlxb&bEM_4&AmWL`chPU`htb$D{fFN4Y>r;u4bRc3zyVe4Mc2D6neDjCbXF+5bai4YFt99 zn%DHY^f`bq1G4;KSM17yIdjw#T9?QcUR;k~`j;%exT|0P{tjXF%Ez%kyVo4qfyYyr z_F76@ICacbTbK17Q_|D=uea&0D&e}*6)J0ZUBOVAGrs0tjJ736 zo5Cd?ElDaY}>FS`CAtFQ5Da*Ar$p)=Z{`06QMy4J!N&rE(I2E`3Y6W3-#=!^*$dvBR9h z{lZHuB`AIPdcDz_Nii#6=kPTKoopx45rIqrrbcX$rpOQDGN znsQFySNGNRL+_n6>_d*?G(zz5S|uY@I(S|7wVHf7$uM3k8`XghUD|7Ei09mIXr_`c z4fci5E?N6VFZSRAo7>lG-=522DQ^gNcoCo6h9c=H6NjFOy@zM83mwwozn#tD*BHjX z)VY|80Tnt#genJDgwv~R*b#3zfzQ)WmTrXEhP@+4rPItUU32b!DArbHZ%$bQW*cg} zqMyt>2KZrQie-0pam~4tFwX$v_dP+qS&QnWKmu~pP>pjQ{CDt*FZ+{9Vq}#M=bDf1 zuZ?=S^3T!bKW$la|GT|H%QvTIO*}fb!8GlrZ1a6bx5HBx?G)I&NLX}gx`T_U+{Mot zM+6?b{pOdydI4FA|0Ozhh}imAD!Mj?`-@cwXJ@9!N=OTX_)`cdS=#=#);AxUA?lPZjMnkEMRvI5k`9WKF2m zilM!d$8xz#*~&}vnb#fR=y&PC@m~F*y__bk-OjxMCt+WDqrn|**S_xaYV96+?`$ch zMWF5&*w`Y@W^`}w^0_ao;s(#XSGX8|^ENBod3Lz}FFQ=#w43*JKazSq@yf-Js{c

7|{*7cGbOVc+GU%HEL^@SsmT)!%-XnCc;g zTAy%{a_)pmp271be-!qiH+k4jQ{d_^D^fbR*^QEzmK{u^b>ZY`jW^Ipu!!Zf&0!i~}ei@w`aENuE)7@tvi+%<0VUgSJ zqWx*UR4+$@dg`gaJ*gwQ2yMcR5EheFRD8d`AGgpo69UByR*HI)(2;)g($c|E(e%K1 z;K~K-^^?zW%ZBtOrd6e{FRfknr#Tv(7=98`UzN-`(}@Zj17(*0BD1j76IG6LC^nR% zl<1%n9JN@gV zxZUi&2kAJfLP3O{q^hzPb?VHtp`KKuAimV()o};KR3F^qnd}vAe{M)EAqEheMnl)+rQcbSU`GbOi#aHourO~C!JtniZl-njntQUHhv4{=g1JJJxd)z#qJ zL!55YAsImEFkAt+#wy4Z`( z&+asEZb5;VQU9fmC3flHXb@aK#@>rhr@!%7Cyn@;y;HfIW;Wk}vwTqnk&6>OfQ}eL zt*}$AYcIjJ-h$*=KNG#~TT%CUQ6<@M8ROz~Zx2-HY(W{3K1;NkDsqqSpC^s=@8OXqDpxx?9_M-#TJ#?q#M87&3D5+KagicMLnrBdO zu!C`i*b-{@f9Rhd-hXlugRfZdAA86j$hw! zXPDp~T;6cZPqSNAXr3Yz9k^%P)iS8(^zN5vK=7v%g7O8VHHxJkgOUVAXAvntdn+oa z7)ZTL)I)q@h`gsFJ@Kbm0xn1BT5_<)edUWat%eo>mgzk~ho=#M!6Z_PJzb9!qo|d- zWt&PJ)vi9t#F-m!*BBpK)7PEg#-%-E^FtGRe*0^IzLh}b zysrzzGp`^`uJ87Myo}l4&qk4D0@xy>!=kAo1tqA%FE0?kFMBuMXhH($U6$_LP^FAeOF2!xko9Q(or=^d^{tw2P1P!EzrDa z9kuVh`c98pGH7Ikj#M6|2UIky<*vm|j~9O#Wm7NN-_uN3Lw&D(2M#2mD`~vMAnb_U z=wGaPVvXF3`@6_SnJjbQxyj$@(XwKwRBzEnOmFCLRg3(6=(&{2C%fL%=6rA7sm}L< zq5OS9J=tfrm0I_ICpS3jF6Pk-=c{%clJ+*m1c$uk>E8}|3 zxG~dIX*ti!*z{<-8SCgyvTH!|9Jh#t{=@0+W|YsUb)QTfk%J{*4)dWbZA%Qx5Xhmv z6kC!JCFm;Qo614)aDsYhl5=whubKS3E$60v=HTL#pX6}K!>v+m@XxFY2xJItqLNjj zmcP<6fF>SWcPax`R0IVFMj4GL@#-EE?FSI~>qy;*)G4Sz5x7rkOL+jq7-6!BhT%=I z>)@%k$uGT0pIv@e_d|hIQfb^OO0pwkL0iBUwxFC;4Yy#F<*N0gA>9se;&p`QwXZyt zPwK{`;l+U6!8b$9OnSEm&*Od2`6Rvnvcja)CDzWB+;y4rOjKS<)Iw?daDOvFx=4Yc z9BmgR^_i+Ls|<+?Rk{GWk1i%kK4#mtX(=P9}qR_Xr~1;2@CyHI0 zX6wE^A98Z^3ug>oQpm=Lo+J0}!4bZ z)CXZow>$oJfpAwKpIa;mqbV=AddY=_#^15nm8i23>&8RvJjpS;UfkDZ48S%s0g@PK zYkL#th4{l1ZNdt<&kx)W2)Mz2VRde-;pVVFl@&N1oBV1HSZC38zKhtPjvv%#G5qC}wAI(_XQL=KE+nx!h2G|GQFp8X*@Wc_ob?S)-eF5K2XvrNn z!3bZ0vG$pK5YmR`&fH?U4FxG3An5c5^3*iGG!}cXZj)CzKQ0Q5fh| zdbXwUZG(-XdaLN!_}gOb$GuFTd9kw1Jz=J_*rDkem$AUg z>J~XQ>eh*D_Gz@;=@X>=`||$E2%nf|i^}tFUn~}HGSx9&ZhhjUuIlsE<@o_pKfOKo z!FgMDj_mu_iavUk_w2P4pP9Vf(0Kjftd$$cI}H=D6f4i)TO`|S^^4D^kNwVK%y<%7 ziY6jQCvXm*ZKb#_Y_nh@qjho;K$4s`q}Nqrt7%0x6$doAMe)% zrE4D8I{hxzn2=f`x>IRwpa{`7NYpr7#C*}A8E2cv`o!Ik{yN55=w^w@5yvLkUfV{= zjPtz{H~1!*KKZ>?3_pxcRGUeDmt)h>^7x(iOj1hCoyU&F_)Ds+6Sw{q)v-D765iwl8Kb!1DD4Q05CQh(e01 zm#|n$o|Th!|N5Mwm$`^uCQn9{-82_r4*P=B*yqmX{BPQ&=(|g8mRdG25B6^C_nzDI zco`PkUt7cesUu?m0S(LN(hWo`M+PLb;{_Uh2tlpaUPY6Kwu*wpXTqXV#+?$oXR&kL zBTj-kSiq|l>x}&G6|Y1pJ;#m@am4u2vRuLJFP{&Y@X23`pSM5v^oL7EI(u4PodW%h z0?Je%32U`l?l*Yz7za z;m=J+U3a9M-#);ADY)r3zNx$D9SM+q6@4;HGT!_GLeKRoQ%22v5kIN^r^T<8n8~R=%>8AIP~)^`y51I>w!5 zgIBk?o^|fSRM*VSvtBMX*iZXayoo+gbo{7}DoO-&UndgUX_@{9Ln zy}#?|p1|^M$wmPF#Fu1sAkeOX7M@}$f*2l=YA-G+#KQ5yLS*39UWV_;&PlqiB308A zwc3WcnW+vEdHzMsN2S`|o{RTYmY##@rRE>-_28L?TpyEs;lt83qP0&>%ruux!}Oons=_E zo>v3DR!i&e{eX|KTl7JUj%hc1yIHLZ63HK$s-xDUcQbS{rlA0=)}sB=#*9gm(`$)ai63om*vQ4N;%26tQAM8N=o2J3K3eX;lhuTK3R3^7zQzYKM7 z=sre5DcT}H@I4Xzo=8O^s=U*N#b+f;zP+1GZVA*+zyw0*z*nqmx zESq;eK&yWs;jPwAHMFpLSbu&VV~qCbluqufKc6;6JK+rmy!UOu1`ja4i@`x5O%Hsg#p8fdA77sw=VbW)fNA!nFb2bp>@JypaXjkEF`mgm`c8)%Em?6JPH>B4RB(0U-~o8^^xYt;fGx@;GtJ|A+S!=9N4 zOhBOaH!-ssHz^b5z6{WJ%fu>lcx%|h8(HF&-RX%0N@ay5TmRSSFBK;aOGtlNC8UUV z7hkVc>eez7BX83&xu)%K0&G%0z!hb@((|jUs7U)z^@@Ji>6YUDbBwL?GkP|u-~Jce zd|_dx^mKz_NwJDoscGlk(~_i)$*}~zPLG`?M~PIoI`E%?@JTivnVYP-W+FBup%;?l z46iZf2YN8D@)UJk@u6j^uyrtyVny8y2yoDBw~{>M8gsox$l1T(PW$CMj&aMrtinkC ztM**7n}W3Zi?k{%!WBB`BW71Bw|=m`Bq22Ev!1{+lUZJ)Sn?ZbM3bN$qvixOscvH} zdNdrUr=n3BI>!nsD$jQ}sD+(S*mY;)+8Q^|z5jahWY6Ql_1L>3jPboP_n1+2Al1oe zgC@&;FJtF#N*w%Ry3(urgvYtDIX zDox$(kYSodCetS-ru#`g5S&Dmh7`vYr#>!V=J z1~+eyNQ;u+X0Z#_)%iRVn|~&CBRoj?S@f+AY~^46QtISbhvako_*M!oEi85>ny?HT z(+#Xd0|Lz2)QXAe1RNk_=86N1B7Gb-E(4!xNjcv3&}Op(PSRLSmUdyZBRuAGo-6F) z-=&KI@61=XM_!%SC_9M8idMwV=Q<}(mi;+i#Fl8>0Zz>*oiubtEK8}C%@b2u@4|Hi zbPgQ9gSkcvK(4#6Fn0m?aU+_R8AU_7Ev<$gP-QXH@YJO32o9mp_M!9x28_RP=Zo{J zZNeJBI3d7csW+e>C$$dm*9Omih0ks8Q0GIrL(IhYhoQ)1(wn-v-Nk&DH7=Ay0c9749b9Dlw9!Tbe zXJdihq}{1Kd@P&mnE$*txh}NJO?`xbR%momPYe|h}u~U#f;61`! znZkg~!%__vA**fVRGtl10Od_9UWGl)IR8T)1@Ue)`&ZNK=iPQ(#&We{|ClUo9XOvK zoC8ZyA$hpG>FA-Ts#lw82NSsRs}b_@9>>~oVemlrHU$;0R;q=#Z06tf$X4POY2XMg z;+D(kK<}}b^2`ll3Jewu*_s$!%NB9Q>w}T^W_M@s66W06VdWlNgA?l5cn0hsOf`2C zIu08cdvF%}!N3T9_I$$bVeX0;x9moTInioOkH4K_izhMgo|X6~m_E5dLo^zW8TZ`y z`BRf=)fJafr#UOeCYAS_@b&HgQweRVWBYk4z&J`?w`+p<^4W==<>cmzFyB&Dxi8Hi z?R?YGBXFkJUmlIT`*o?GHX-uh46Y9)1=kzhByR1`pk*nq(MdE^Vl2yBNr)pCG;m`! z=?DqF&?Pu=4MGX#h|-~q=#C5A8Yvx=_lC-nG{T%BEI;IEQguLSvFXa7z~-Z)+Q_rg z6dMauFk+Ocoa$7a#qMjP)Qdfal1=J@Nq!u^mQq2>5rzs)8}Z36;gg$%uC_U=n>M)K zamLo7idXlUS1{rtlCYAw?k=NVXsEP@^js4b^@JT-b_tKP)Ks3nkmSCRueo;U8ktgq zCBugkFn$+R{zf0BMTxiIj^}5uf|099Pi2){i=h1dw7Y)4w^uY@lF0KFYVxMK zUwvxefV&i5=N%dC;4y#FMTuKhA+&cm4|;&!FO3rB88*`mJB>#wsL5NRBjXCW%ULnx zF7s?Q>hH}Tzg=~-=Yoir@aXD$bcJLWED*+!`>&9ld zjo$N9&TCiR8QUD^n4MX4trgGb`y01FO?RoQUw+Zvk(2iL9VW%P=>8iTEq2_|@+NA1 zjWUb1g9n&EM^?{s&3x}X>U7>sOT?RZI^m(oE5Ly5NIt(ExpT%*qWuOpvzL|4YcyGN z!%YMbn`+{bSxOTyjQ4UrqFDe^7lZrK|Go2~EIj=tc)xBNvTCepzx}@$6UvY{#nCWv z@ebItd2+=hL_>xZ%*g6_^v$=dWHKQWnaN(4O^M5W?bnfW$Y63KN5_wtA9Fal4sYh8 zUr^GsTpm>!ior6IPLPaJ9q;|?`kisTi`h4YTz2*n8r$sPHIfqhu!b^2E&rR<$31a@ zh)JgeUMk08?JZ2*GRte=8%8)HmP_lx67-G-aza;J_dX{qi7m5ZJ?LZ@rL<=cUF`{TT%p| zOtal(EB`Lxc8Pr)6sXRfq0)D zG4%OQ4IZE;AnetnaMKv?&X;%T88Pyo4;^4Bz{zspfJJn`(t)*V_yO$coG)o%*hk4U z=Qt(3!wNK3HivM=itaa~@gQe3HYacf%X`Xbf05oc=n#>s#ryTW2gZs8V{?vnXHS^P zw^LtoC-&#$#fss<@E+jq&nZ>JS2iOk;Fv1G^+=fea-E)U7a*9nvL;hxFo0zs>m?qv zQ!S%M9*NL;)0Jx7)iJr%feqh%`$^KnBGx@e#^&Rl8B7*%9OX`Nh*$T_l~zVMu=U?s zmz&aSvT2ktF92>1H(i|4CHJj;$b0kuBmX1h4a$;m$$U=z&~h^SU|> z6gZAC`H=sZKG`{t4`FMg%EtTB8~$cS^kIXN&z7827{<*H$b>8Hq z%Lfz0FBp%TwJ&?iHR+0188c?cG6%f$`^G798F-RFW-$_hA88l;N`R~k<7DJ{3}5c6 zFAm#ZA6{hAyXP50nNW87NYBo0_dc&Nu9>BGCwOC-ge9`^IB{ES_s~dlqg*HNHq{+q zGMRvAR|?-ZmNGY%>}K*fBc(}+zgCGEIS)}e&&m9k$((-wWQ|73uU;nPUQj5&8Yjy+ z2(h?C*}FimpIB_|hu^qLyC?1W_K?!<6FP@(wKl^S`Nl-)3D3~9_{)7bG(9dm95!kmm8-t0+{ zwAu*jskFTXR4iw};)w6t!M}@?+?=NmA8Y@JFeAY!JgEC15+7QU$XO_Thg%zTW1NS7 z)@05|)9x(50F?jHZUJ&~hYdt(uvE$4{vXE7JI1jr)JWQ#Vc#{mymBx>zwW?D7sH-& zGkzR@C-qastKF2(;zVh@pFY9w%*Q8rUvBv;h#oz)+`1z5L}}zagv-@^r}mk&2&`WU z{E35~+k15e2(oqA8HtKFRH{c_#OGjKN5XQg1ziqOQeAUT?Fh8`>~OIAUBgq)I-X>= zPM?v%hlzZijk>@+CD}CI@X`F_aj^44y2A6CZX~w}kil=LO_^a_#>daF!Cp&e_8CW} z*qQVBU|;nBD<}0hA>F)?0%<@6{}=!{#^RZ7$Kqt+9DGVl8Y&x-&becbC+{K3i<~B`PKUX zBdfJ)yR+R93&28|&kMbfIYXkYX>sR51F9gO#GBc>FB>&xx8`;P>q^0AyQ@5W_XCJR zXw$b^d2tCMUa?X&9zc-pI4l2&Fnzq+)Ei3sOqM@yuX!6?>vCWA?yl_m7kg?gdiF7w zoO*Y!FpB4{`{6Eo$Tr4dR4=~atBize9+WH6Pj9lJ-&y3~^bF=MX2=|GUb zLpA`-JuUBrO{g{FLa8T`raui?g*v$xIs z5viO1p8EgN3rWqmcy-Qo{*j;ms5{4mx>TiJdTg&2_fHK-lC~^ceARubtmDRw!B(|j zJ1ULi-gN2gnPYs&!z5YRuvu|^Qc$Qty4jhs*k8);y?sdSF1R`yydw z^JjNelXYTI?&qx_e9>OA>xz%;+_`F<2?xrmtb0sz)XskTYkYa&<_tSwn2q4h*0su+ zo?UMhnWt6hJGFCHtd>z&gqFwx@t)C_lBOl8PIi>+KWL$!Sar{2vrb}Ycf&JlRm-02 z#-e*wW$^-DGj(!;QqG;J7TH{GzQkfhiCK5hhPFfvjg+s8W@XkF802;|`b#^Mtc}qq z`{|eRO5YV)5`__GOC!6R&nA^zHBSnWmRQ%<^i>V3c~J1xez9I_^*2td3=8(`xsv2* zy}Uf%C|@@GZ!8$YHXC)0*P`T?vHGd##Em{ln#Ww>2{@)D$M%0|XpgklO|;IiRg>!m33nezsqaMQ}v@m0yOtIrhclIZtu$r(NEg&IZ?w*Y2tc($c_(=QrSL6 zkv|ID`uRbBOA8KgsTp}~BtOX@j1!s0;7Erzi*W8zeh1@(*`ykOmEChSq+yQAO|CyN z>_J$*evIXMD3SWf`L*5-io1qwY@j)Cv3jjN{ej>1f(-1s!-ca``-*<4n&tslU_y6Z zXuqej)*!rL@U%k5$;=5Y8*n}jwiydIk9eGd1b|b7S)*wYOLNE?AXt-2f*s>yiMni{sn4WIh zx#RIKC$FmeOFQIa&<}=hC1-3g)-g^DHE$~w4HoZ=J{YRMo1P@(g{f&`=+)M%hIQL> zx0aSzmiLHfI_BhOe~KH$Ehty?k>1BMGfe?AYu7Il zcj)~T0$_y3Mf=O|t;`NNHhsi1)En|k)HsEjd5m4LvF9G362cBq^ZCvDLij6R?Jbbq z?P489?&aXd?&G+WF0x`CMv+V$!7}pK(@7mKns-~1QLTa zO*x0qkGmQ9GReZDiUTFoXWe9_L#>wJxTka+11?K=G+m3Q4F-ot?PVgTtbxK(xTb)m zd3$#W9gT&^>q3hw^$BAi%m zpj>L+7P7fv?o%si?B9bto{RH0`!jy7O)3IiDmQY%!&w&*+BEjyj;3v9(hA%Qu>C28 z!B*#7U^f|`Coxzd7b3x(#}q}l`}%GpANew};iwU=VG@ucFJhZahKeBEU7drO+^Bbx zg>H3HS?E}+9G4;yMRE4$mWxx`cQLo|kPmm}%Sk74_jkU+NkY43iWBb0 z(~_}n@C(PA}X+=Hn5 zl`%l7g^A(znL`y2^5HZsDAB}^3^hQ0^V$q+YE>w)&E0HNnWgP5C+&PmFFy%h-<$t! zlq zO+TBJ_0vGTCC74?>CqjkCO!KIbHQ?^9qgK+MtWx2D@GL#EObQJ zb)8@Mv3^Wz4*aB1?7ag|4KsBvyUd=Dsi}f6gPi0i_i@=#@;}Swn?ob13<}r{%Py!q zUD&+nnNbTk@rb$K74RjeYfRp9qi%!L);h6e<)&ov)_z!sQW$Cg;MBvq?34)iE9vg(?3sD%UKvJ-@mQK(+WAd!z_MHFzk4 z(yZ8I`MB%Wsow9u9h@mth!&IvQl(71v{McNDLVo-7rA!=pmORvF4}RPizlU?C!@yQ zgtP&v*k##WSW1lI293=`^5r+m#(J%G(L$a|k4Q{ZSGs*B>R~i$i<06kmmH_`C3lOW z9%QF5V6_`eGtEiKXBCMNp<9AT)kqs|iDbdLZU8D~Op^HL<#cIKMO*U$%tflHyXf&t2N=+0N)oe!uuT8&3{2{!(3j(h9F`&BRbVE`zV&)U3Ft@m58j z(dvIl)`YZM^}P;*vheNmvklb3Ct1a*VRjb5{L1%zJ^9scK61-*?Kmh6p0hYpbp6DV zh=S*oc5Dp+NIEr*(vL^i8OSUsnS$5?vqshnq{rL7Dv-k}4H zdSMBfBuzC#&BE}zU+{XEM7sBRn(%@%#;l!S=VKsoXs8;&J&UhD!F2W7VWbk*=AO2` ztB3AFkO>|{pP;wanVJZI{2S!QK!E^WjFT{edRJaJIOjy{FVxSf`}Ko=h*|5`U#-nFaX*88VaSO+%m*8Y0aCqH*bmfaP88asn~U&N|{XS@X( z>Ww6wg3&~#cVIL{4Ge+8r;ec$#e{3%v3nblql?l>h-3IwJ>}A2JfO5#@7M;hdPoxZ z7HW|=E+1gmKY0F!Wt22j?_V!%V$4)jnBl!>(qbOjaokz*A`GWZ)aKKH3D>XJ*o-!f zNa$(&ato(9rgi1tU1`8gFQ4n{tSTpwxVC)K{2*b5apFI-Dr`BcQ-v`? z3wGxt|A3`-@UPDZ-5j}a?xmIb<#Q6=qxSK)IXU48p^g?63rJ~WG0d*6G{>MYYmDyW zP-&|(wa=4c0Rza}GCUXHyA?0mhBV@+R}+tgwIZ_2P8h4%qLus3jOc9B7x^gj^0|LN zfF0WW?k9Qw$(cu+gWE|3GBBhlctZu|HCHqC2dOBaIXJoyqKJ%ReX!S6Gl`Y79pP#0~o;T@eN z<_&YH=li=~THl~_vswyIrAmzNVBR0MVe&uv107vP9M>R`U!{{it9ol$hL8!RL4iM z@lm36x$hYSST4dBmM&_NfLy>}EI+67TTUh>{Lsc2j4fZ?OM8w(wMhMn*ZMVU;^9H6 zcKpE-yi}svdwgJ(-G-irT}5ZrnVtvDp;dN9v9Ur9W<7TkoPqH&xeNH&Ur19ZA~Uut z`*9*vDX5>@dnKpx7>)?M`jl%cJjjVKiolC)s`Wu$wFah}yEy?+hR>7DD`d*e5 z3j{8s&nx65){WnX3b2hIi^hgBNV4+`l5AxPJ@wGW7J=J0pHz=QcHPKX?}dXS=c93- zmS-zwc}5|(eU)WS3)61ZwB%o%+J{Ix8R?*}llXp<;F*-PdXd@#k%gl>>2SZcj?NV+ z%dVa@YnVt^q9P`uTBaQrxi!9j`*JrK)z{11kU>WO3l0%`^n7*Lb<3pDsqf3tq(@#vW@JA7Bi zT7$`d$Vr65$aJ2fS@_nC{QP(5$t$k&WO{2cUdePer7@;+FsUHdTkXH9%~E5ij?BFQ zxZ{yiLa~6JBf^W6C0^+KsXyBYz5x@-8tqw$eiu%`cqm&Jqv^7o{=CG?0^*2}MbP;0 zP+=9M!Dot}(WS1{yhMXdXIIyeP@Q3INYdvDQ8?MatCK*GeJ1#_=a5Lc+-DYxv7!TQ z7p_JdOiwZC1MWQuZ5da#gN>!7=j(wuJ8BbV=)FNdGXGxp>X6-OrDpe*RITsz0MR$d zv*q5ni7tVpc~>5=`HZ|A^^Jo)B6gqG?#v)4ctLVX!71|)tXHBIdUA!6Cjj^peS0GV zoe*SFDNf_1&AYS+(Zzsyd2Z(LxH7vu;^`(qk#clZT%nfx__&+TG?$%C(-ecZSx{!Y z&axk9cUpfg6Rf%fEZd0>q+0zqe7>cj4!xI*{0Ufn#0-i|Lsa65X+~!;Ylf3>oONfr zvRB&qxrD>g5un5-wFGcCnmcxH&{tjF*WKVqo$DC2b7w?6b@{vU`2>yTb}eCha}p~u zFYP|E%S%eq0sZ*YWldDvrgtW>gm#~wJtpDpdj7g$=+nHrOe5RBWR&42s9rk~PH*lyob?p|rW8JTbP&&&mapQxDOD^uS?k#&FXD(Fu!z1`4DNGsF@YSv9k zCwjQgbjc`_0{Pn}I_j=g&yU?1=ADGQ{u%jK5VSWL%|o<{@Ym!7r5&7*dw7+~S);?( z-*$=>TOQGxr+QY@q|X8ZVWizS6Exx;P$;I(h0mv@fOq;8mXXKO@vW)wCLk1RnWF^r z#E9w}EoUTIszt7-dBc6nluer&zo>%RRA}KgB!vsG@f4ffU8=ZL)$h;n-F0E_l>oQ$>=UYO$Hw@ltQWOW8MYmo%I2iQ=EpZeAs?Y%^yUD#ZBAe;!0h1huWOz;c)5@sot(V!3SLua!qz?9bgZ^H#1L{;)Kt z#_7TlF+@W#QH#ukpxv886CzBP7WvKa0ug)}5lE1D`6}H!k~}S@kQ0_l1==n%{!E}U}DjM7$6CH{9b3tWD`M_Q7WHR3i` zug5Ur_j26OZPg z=f^YK?rgkLW`!2KBuwx*Bw9)W9(Hrj-)Mw1@yfIcB zfU#XC;ahSyCmp=oqbb)HF+CB~YGqq3w7u|!jNW_oXBk)I;0rXOMM**md;y6|Kt6MC znL-Uh{9U(J&Re|e*3Yg-%(R*J3H`oA*s|%5Uo`EIk%=hPejf*g38rs@gswffkvZm;q|ck9UmB>B3O4m64-&B z{Svpk5${NX5EjG`el^p~5edN~#sonNX{*d>72DiS=@o~g*DziI89!?5`o&FvB@`Gl zyWLK~+`;a4!0pJ|hiPRm=Z#Ht9_trhw(p9@2Tz~q-nIVQ67Cz`D63YGP0DsWJh!>z z7JB?-;1D&WpJpVSHx{s;wI|c|+WD==CCm}PsU!3% z|MJ70q!#7(p*OExow;E?Ib0O!iCF*Bl&9rtx!SWBWi_&J39vb3W!)kUfBZCBJgdZH=+ig@}BC ze4j&6LvqK5>OC&sa>ii{RO}3pEUcStM8ZGArQ{Ft3BUGp-*S!&fmxATZh`U!c zh;or1?U~<&m+%0cvdydKg*0nf{hUo;rcG{zPYln;A)9081jtABM83(-y)3n4WH{iP zokjvaZG6-AsCLNYz+AFFrMOqT_alVJLy1Aqh~^EklqKZ2E-C))f+C_Nm;0gr`PF5I z$zk2uu!$DXjJ-md9B?OG{`o5kd{2*Q{VRi3+GPj=H8i3JZ3AhhlC*Bb7DI~10t#0J zJPIhURmpUi;aU;r?3}trH}`kL_uA_jdJHG0?$1RB)@dHQGl(_f&NIQ%sqxdY{1Q$< zJgmFSy7zJaG+BPrV^HW@BB9(Dg9|zmr`Y1%x2KXSXA%4l>InRES^^|o^vLkFc38m8 z$f;86F37zX92}g~Kz(PVrYk8miB(yjuxJI9=NXC-mS*)QErE=#7as;${ss$H+&iMg z@(&-dHj?&%>Zw;htg`CqxSzB=9JVSs7Xy9ixn^5}_J$ou3IhY*s4;P?_A$K%lDB)T*cFO>Nw;)tpw-X)hsrb))4! zTMXbU4wQb`vz9U3j|mZubIlr@cvuGUU~G{gWIS8yTb4oWQLz`@t?YTsdnB!t?T!jl z2U;v$ym-3A~VK%h&X>*J`hg(^5U#WD;_`_;zGR>=m+p@CTk0ep4-{?_pTdRnQgL_17bf zWVy}iO_Dc!sQ&v6#Dyuay4xvHjn9GK)K z>^I2?{iXfR%o%g76eKpZzq-}fVn0*t#IeLs-$=&>uclsmuVKS5>N-i1?lJq1_fxKE z8wqNY>gJ#P+~c3(4K24fYt-u)-{8NrdhN+fg~s>`dlnLAve-#1lHMI!XBLcR`AFJ5iARVp=%GPkqvt0W9M8y@=ojVHg(8Ur4y3}?K3X4`#d+t7n5mN|!(X438ID{EUG0|m zS$#$%HpswIc&j6S%wex#LzHuNmtDW9{z!a^)FfU!Jsx+HKw^E6Q_TC^PAX-FuYQ<8 z0^J8z6+NGp0r5Op_2lWw)qt=?r3aS|KV2Vh6$ox zt&NevQMaN>8=|BiJ2up)=0;_{k=XO>>p@k^$0}z_Q7==k%?p`4c|MQ|Be$&YHKX`# z63r2E%)htp&HlMsJFhRKC4O<~S(u-A#Wt2FB|)a{T?MLR zBR6lQALFOlQcE^8VGsUt;mX5tVT-m;p>}Bv1Y!D3F$R<&1v{5cea!Kx!TL`6>5kE% zN)3lY)VI(afq+hSh1?~8EbYG(%C7IZCF`+QrcrcCa3`y_u9gglYc}+{~ihA zwD?zJ1^AeyR3$=x)!F)@bIs}NHK1}$ftp#|`rYq4pwv2-QpHfj@=F2uf8@qqI@wnE z=f|;KC)2C48^djFLAJF8#AvrqXRg^@jt=)jbynH!d)}|PzVI@!X^BKv->WWoR&&yP zqF9B~1}uGL#JPy7+32;UioKSTCqpva5njQ$g02bk+s|B(G1- zi}?C;A$)_}3Uev23pT&woq;AV@>jWzcPdpH``O-wg&PvxLE5gI=0=O#NISw+i{t#I;sODUG>hU%E=1Tt~}DQ{LT8bH8MQ^xck~K zp1955Nyuz|jzh*48H;B3Qq>3DqzYDEfbbw!Z&*;qIf;-0p7v;CS;DN`jZjd$ZypOZ z$iJb0qHengt`?5`J`^{YT_?PmZNrx7V4A^4&X0 zHI&Zm{~zYw1T4q&4Ihq~VaC|9mR5u;McUIgv`ok{Bw9qF67s6h-b^U1grc30(I&Lf zGH5}&RJ2kkMM`2P%ux-hhZG<*#oSbK`zu$j3v8nQY);8UP zad{mcuIO@gqoezuUw-$u1XpPEhgm|QMp<6mrBLrpz@?BKF1@#n&kY`HYJ6TtL|8Dm^bg4g#Jf|AUP|yuYEnz)_D+= zjI3&;clN)BB&0zt;Usq_G?+7Yu=T@e3F@HGCaPJJ zF^SM7OUDQc-*l}^IeKEaZ^7`BOKh`_9v{lmiQ7MOynx-|iX+(~XN5FWR5U9@G<-VS ziZ(K^E?UraW=f}W1{}I)8j0~kC4-*`0^Ukr6W`f3hyw&`_iu(2WnSVJgZ_4dMl18- zTMYgtJQD`t4S|vsV2YqwAGd~&ivRTp)tYV5dfcR*0;@GW!rs7YMs&CBO{JI;1iS#6 zUfARk&|X||rzvICu_j(jW4lcy&){W;f38*CfoExvdb#Hu0gwCVnHQH#{4c6&GvAqr z<{q37;`#F8PqPE6CfL<_O4lH-qI1xc z-liPbC5!dxs{}nV*ykypLy1)u z9HMqw7BV%i^J~!E(~a^OEfA2+0Hnl4s^hM%FeGXnO_fGaBJm)pTAZIeu#)D!rC+lR zzD|RP@}bOjoa*5_cK+a;UVBnmkMmC-p)bg&zCgF?JKgsbY`=Jrm&9E4E+}B_M`oy96WV|s>8saKb}7J2aY0# z^=k0!zWWGn{B^kJ$nbxC^V{D)I98qH=Z>sgWq;w!q8}|lj?X{b^vc1)`RyDMX(Z&G z4c%bqjCvEK1W|2Xi@H2I0@HEOkxQ{7V1b4Y^_MK!`Ga_1!X;(Bv*d`9E9RC4T(<=| ziI{lb>!x1UzxOLRO7PD9?tvl}pY~4cz4C1ZYe>l4Lkx@Qw)~|Pe8Uev_*>*sSmdKc zn$kp`;Le7^ut?^Yy<&+6PLvP1tvok)ln%}l*(2m?8tQatJQ-uWCq zc&QRfxH|PgmT8QA6o9sKLzpP|2Dh*aSSenRk#-yUd1@SG~gb~Z%QQ`UcD z#=kLSho!^cz;C-0C?+8Ak=kxD7MYV%G6aqzgp9lin!0zaIk@&)HK2j-J6)d!-yG?Q z-u(uPH2!Ms8pm^BDxE%-O%<}`M&BP|9Jx0b*SZ-=1f5d2S}-9PS`CW$Mt?T_OpgL) z${dVH<#{jU@Zk>2biQ)7kQ`k+gYOo2(4W_2xNG;wX%@jKK<|A9TkZHJ+yLq2ID(U3pshv?*hX5(Xtf-B z);y@TW;w>lB~1XTB~;E&=pSZ<=~36U(O_i4Dv8 zg1KnrO>Lxm9{u1d25Ye!DlCgM4d%E|ZZ8o05E95&vrel0LG-Ztir_ZEVE8^4Wi9pz zY5XT#+$#L|DnK8#qW>C>kh)AFt>6 z{Zn#C%*WP6AdplNhobNf9 zLjoFYzjwA;`3Rl8ZYobLDvr0*@NW3Qb4tv{MGIq%vWQ*`Yh2b2UO5BK@@oEQ@)qIG zSArA}7k$li4pj&cr0`=SzX3CN6|0qU-%N}R)91>V?n5vYdR{*o%}=;|D<$ZpDlLl! zExyk>(bL5EaXI&cm(aLQF#12shUbeZzOIL~b}qeE*isf+`eY$<8T9D=_Y>~aPPRDp z^M#o7NzxTlgH|1jPVyDfK-=!j4zvRxUDai}@ylHPml+RD{Sp_GLG%!W;;J|oIknCv z65Cc&x^l*NKFduMvcv+D;N3Xk`@2i{Vs|&f0q(8t?=$cKeI)KrJ^$-1uU!N(L8ag& z^o;b!qY-;El&HRrYHu>p0CX+n6~4dVt?MxE%C(?0#yn_eP^v$DuD)x8JtF2eKW(Br zVl_H=0f0Q;zKQ#O9zzSrf0WSVNiojdG=eq9zT2689Fyy<+74=(sl96WJ!5ac4;^P7 zyuZ7mujBMo%Z|*v_Pt4RTD{G;@%y_Yzx!8kXsWjIkK&Zj#0zhK0Gx4f-2DGMGW!2dICpy&_Hz>dWx>u7 zL~!=IQ2uwiYxX~Y@MH)#)AK%?urEKDPYE5TY7^4>4o0hg|7M*6r{OCU=TXlPbV*34 zAq2l&#yafw?Um6u2Dn!4`ICBn#oBd!*7N=zJ>-zp!qQm#gz`sJF7P3t+JOb0pjw(k z-w}nsHqf38I#$I^V#m~AiE;6dDEz=-e-o&z=L@q%JW~1Y`Y@(f43&XEv(_v+>#3V{ z>${AogsM3NYUIGXdM1(E{^vK#aNyT7+CSIa7KRh%>c66lQ5Xd^h0~y~LA#cKCOgzO zxFW83dnA39n8mxCL@1ydKD(+B3mn+LBki5z4`NzjtFf#dzSrOD?7oW-Y(XBNBtUMH zY97z`_8=)%IzjZjlB#@{8}M%l-{1+iPaNs|B;a=se-O$1*MJeea{6hqs|&x4u#eCV zk;&Q}t#F?4t~U0*lTJHL={GS@$7B!=GkvkV_*T6;(rbmJSxh>$U)gefJ!uN7sr2nm zyw|KW&(L|2lq=eWN#1|-{T@^f=EPe@YH204ChCM$t3D2_R{T`W&9n_g62VYJCOdW| zIa!{hkLj%G1ts&CWn+nmwt3jj0sIuHbOS@wJG8*>D%OkHX22z?;e(^&X?AukMGJvP z%S?Zn`S1s3#aD2u--4gUL;7IL^>lQ7<3&rdgUfa%Csu9JtBjdHkLGQoV%Hvd@Lx!b zm_nlu;Jw76^HwJO>S;Iu@)~+@uj`rt>~=z6v9{E>FV6((t{ot2_)%D|J+5H|+*$f_ zfZ?7%;HBJ~u_i*TBXusQ?bz}8EzhV8u#4~4^u3#{t*t%y733x>v=tC_t_Jw5CtA@- zaJfaKZtH7CE~6H!&W$LWu+aEbj)b`J0@PjhnX1`4iP z;QprrnbewoB4U+pdIKObGqY4b;B{F$jDu!^wCZ>#2Z3gLQroj6G}gHgp})?@_1wlL z-toKzbFul{FUrNOd~{4(w!7X-Z2#7}#P*UdR$b8VJQb_<{MiGA>gbk&HSRU{8sDi0 z^=ny+{50;aXGrf=TOi3kI3o{5Za-_?teyHyE>FqP8D66`5`l}BEETU#G|u1Fl5KEk zs{8ElmUAIFUdSn4V40Uj??tYbM@3b28d@}c=&TRBz}*pV9+KYKQW>TlQvxdQhvRiY zZ@D{s{nS0Ui$umvG?_hHZ^a^XOEb+(IcdiqVjn**zUrQ)Gs-JRe3ZBRn*R1bR0Q%u zCu%tph($!-Tf%?*)!*16R<--h#+5XTtE~c%ew_UEzsK|6cv*B)E8>Mon5W@6O{vr1 zAIeBURUNqy*Z70wYWI(Ryn^h?=55<%0ju=9w$6{qz7wSmhctr|gxG!7u!EmL?p55r zjioo1>~VV-l>{W}0CHGN_8Dh7|K7cOHT#Gs@AuB7Ob){+X0$kc&y3hcRU?U`uw$7<^b5D-Ozlgs59Sj z<@jo;id$NGtc+8I*EOfjSF@WYzHg7{6|4F)*g59Y0?e!AQd@&IqGO}q-iq>jR_Vps z<}(cXZSKpwznWhYn}rKZ=#v*sJ$`7t|8sfPp%JeEC;2)-QI~jN`ibZpSp#;X2fYU( zFYKF`p&H5O9$uvfrBZIJG@$gQwToAd3wNYFUp(@0mE`NA&$ewa0r~GBX|@O1h?weNkI;IH9^~ z{;Xvtdb~|v%B4?l2zpwhTfHCuw$#9MdKQ0qqD+Ni&(`!Oi=;&-Yd@}wE#4MZeY)2$zzBT_A$C7IZyZ z+=M%IzB#vXp5aF>c$C~f9WL%EkskYH@83z?R}Q~@`!*KnL^`O=*Dq45?2|%?ZxmXN!8&g*Yk(BZ6dlW#Ys&{p;sj$gwk5 zq*V^fWc&O22^?pX+!WLnPS##3kT=>=`#US^@i!pRN22hoXBvw#;Gpw$RN0!70{@bw z5vzYU%dDBV%{xhQ!JHDVNh$l#9;FssZg+qrs(|fv-?Rs01EJHI{*NuCQ?O@Um#22l zD>W)M%L|lk`SfTC(>#p$%P>x|HHF&=TgFMb2(`|^^`$We)JGw<+M!4C(OnLXu4^xh z_S77X`}`WL$`PN?B-oQ!gc9BgUtFp81>Sru9@C5R_;jFcziU#szo*0_K~+LwhIVyI z!+m3TB$S?DJ)Y~54r+G!)zYV@2$DtTk63@<3PkU|jb~Gz!{8e|Jn>r#)kHtNaA}P| zZi`%a;E}b2vw@Mwo_Er#()n%gE`Q2(Nk%WqPG+?4U_wzAWi_d% zW5#Y5zFE|d59Hm&UxD;6l-c7T_msN1?sdzIa+mzcg4WtZt`&mgrttGpBKkpkIx?2L zlyi*E+P;Wj`S4(yC1x=l)QOE#otG2b4XY1WSUYU>Ih=fjCNG+-?I$xjMmP!NjpKr! zUlu_5Ipk1L4ky;~{WHSx0ZH?;P~$i{y#(yrHs=JC9$r8998x`5*8>jCp(a1Hf8z08 z@Em+s)V=w0dpA5@6yo^R+74+ z$Y;TRJ4Op4;My^0IxY43J49;yx_&wZ=ZS@h4+Zs!k zBlR~6Futcq{EK}}m&*X{3mn}Jp*FkT@5Q~Uu00)2h zVq@WcTx}t-U8pD6bdXr}&o#4h#In*Uon{enGoprynBKQGSC?zy_$NXyVQ0CsPDMn}Uh8Ww&APSwQ4P7% zDYozOLj+JGuZ$Dx%1Z+QlpRly#Y6fz)?t)Ua{@=B&i-o!y{;VVeC<+abHeH<2wd@< z6_xjoc~kF4>if#dhXXEzgxidhtG&;je)P85`n!m<;IF4gP);3x`}T)kZ#eW_x%Jpg zneDXmeWkY+gZRUNqa#}dFIq8hG=eQL{%;xK-N;qAflsok)&@sWFMxu)8sutnBQE|Q zOyVD>dr%Km_Np5h)r;4VR`4$^xct#w>VFyhbIQEu69KW@deDCIcb>_>bKgWbPifkI zUTh^RQK#!L|A?iBmr?O^O=!9mGENWLXD(eiUL8$nUxxH2->NtKA9i}duc*5XKr3SB zM&!13`2P%lT%q^yyT?0JUS~W`f4%?hBWuf#4XjZK1}ERw%gZjO20ix?m%h7Ju8w>0C{Dgwk7)*bS9!ZlNU<9AEuB+oCN9&65|fBl%{&$2D4zK+@h6Ug zhZC5SS*C7OoWcPVZSvaZzzunDT;?XG5}g}epU*I3R1?b@n8#7~QMQed zv~e8-ukY|5#}WE=h6Q6|ez5fX=!y1}BCIk(9xnFC-gi?o6?;BR2k4bogUng3c|v7b z=Hs(LZY%%SqS`{q^4pC#lH8{#b_&+osH&<`{nbmJUpYcgx;{GnsCdETQEOiLGNqrD zzc7uQ;nCfCpg_r5LwAw*?aifS(7jk0o$D{T6xb%G`|RaAn@h*|Zv=~i&W2JW4vB#9 z;G4?^&m3=8R~#LG^4M(QP*yqq?q|1r;^i{P350^)SXT)Jp!ouXh+A?rq^zl@(zBXN z=MH>EFn0F@gU{)CZ=QyguF?xKq9?J-9w;Da?&m|>vz({$NIAQ|FA6t)8|Rey9{ZZk z9PkE0UxLJP<%%0@Lh)0VR+H+~hJF*Y$o9AqnbTW6o8v~!rRl;_bjaldt&+y5InK-H z2kbdlhxaBd5-j^uYMSb894d}P|~+qd<+ISL}(77y$gqu$P@{(iGI zue+C}$8)HhFy0W_Nnuc7n7X$fWNtTomN& z)I4dxCNkjV;Ec%N?x57JLtmqTdiaK*+fv#-LdavEjVt|$fk8PZb#}-+1HvfF`?J+mIi<5to9KC#I@(~x;30V$lKJy(eb9f;@ll~ z%^}kS@1^spcB)Ut6Q~u04W1uHS$TQ#%SP>hNEceocRi>sfq>&qIH2N9rYP#wvntji zuznd(_cJ^vd2!fhr?nzpUdq0N#FM95OT)BwM9|a>eBO+KSzb-PHs&mga5WEGB1sX) zjhNo<0KngQCGPz6&Yr9+ zH?2#KUkYN>(-PYruQSU0z%91#^TqAScC5^lfMb>N(Y&m^%k=K8a9HtvyqIxIpoi^! zZAJwX@@YI$V zn;$&f{C7DYm(eM&(>*)yFONRn=Jx0O)_l(iE%MR))>a4Zv`BGx49*S5HC6Pz%b{H2 z)pHx%fcDL=4Iaog0UlRk&y866stR{aCxb?*yFeqDo5*AAb6+?U&GoZChJ*!En6%BQkyb| zFYWlN*{qk(v*%RweAG|lqsa*fNtMkDvO9EjQm^vOP5EK!b^PDXOE@xW9J$6XZ|@~o zG_y5dsrpqaGCG-vR)T{rV-4RJd7?W#lepVokodM!*RbL4zn*SqD=nTWi1nR&k6$%q z_pZ$`Sedq2>~{aExBXWv?&q=U<6NYcsFh#^$<%bupEJ{i=b7W5ytYNLo^Fr$q(3F9 zWxdW@RiBI>(Q@54@nb@}JdHPVD0{2?elZ!}R!9P{=aevI!5MHOkA4C4t`|4#fccWt>Co)eGxihu(IEf_VY)c^o0(l1^4If$>6O`wY|8et8k@ZBb0@2n0 zw(sJW=cRDWqj}0#ht2e#$%Pu6b`BMl^GKV_F@BeGi4bi)?>~YQQ9?-NS>}iH!Ng*u zc+Oww60qQK+JABm+wg(pCwr+HrWv12mK*r(j(mURty{d7OeNeIPEH19(g>u0Bz_m( zdvIcEflD7aoKN8eS;M>gi(2ylCyy3GS4;u< z#wRV?D$IReb;`;vE1&;s;7lf9?xG^jU{e#pPjw3#@P*{@t3UY1U z#K;Uo47($6B8OmFdxO@_yQ6RVX5gkp;=y@|L*;zTBi@voFq>2IGD=3^ zqIrQgMByOFo?fnq%;y7bJD<%my+YbTltI~ErED4o5Rajm68)SG^v~5#79CqW5>5$~ zm}x`q>fvF)N>~#w&m>n+uU);pT}Wepv?d7Wnce~d&f_Mz!8=~Q6W9sw#*IcP2y|Ib z7l)^#&Oy8hg$oC3fS`F1U)_8KhtpV;Z!C2a9pT9eYi5u;K(RmD#9>a)i&7pQP(Oun znw=ts?O@LA(OXv}0o=(5%2cejLiH?y*^vISU8eWq@cMr$E)k>U@WbB-e)g~>Fp&KH zFx!qMn%>aEdX9a`1Ky(F0D!o5;}1+%lspxmtT{E2DQCI9A!yF@MH@T-S4lmSIJjck zY)-Q%RTT!`NbB!OV?9~mmk#)xYrdL?YtYfM#b(V5W>ZR^5?ymr908C|itevhS8~J3 zlI(V%>jDc3FYEzQS3~kW&b9=MhN*e(Z&L`^uWvjaQZ(!Q_U&AnPOtugovb%<6oCUf zNqEEevn7a>F0f%f%CwVwSPP2%R}uD^h;WWca;c>6k4ay6M&AYr8KQ4YuF}u;M=2V@k+7Fd(xAm#mGz?nHKyn*a9qiT;6|J3>yIXKgmJxu51>UtZg`Jw}{8M9azK_+#$g)&qO?G@B5>HLu> z8OBhCw%LCKsCwFLuCZjrT3yW{>{ zm=x*{Udqzt%e~1jB0A_rIjjAFmd`X|3)OgvPgKt{Bw7};xO|uGm`3m`Rp@vT5slKH zC}YU~%7Y$S`?d`Rng4DPZ)Vd$-16016_>CBp=25FKUYQ%t>J6xg3+#RA6|-+Tpd99 ztXdQ+ohF0SX0^4x+IY@=tLG!y31 zJ|?1BwEdo?DZ|sFA_LmTB@){R4G~E35l}jN?B~3LW!pHMu>(YUB2V^JU1mom{ z+p6!zAV>=g1}w0!0^3oG9L{^J-Dr*HOJL-W0Ph(B?iaKE{3a1+XASuG+Ji3|>njyu zE%JWjnNFr62^A~VJh(VXRl>5lN+G)$jGTGC%QV;<=bKw*kZ%~_v-}&Ye4RG0Lp?I8 zWM^Ca(s~!*I8#a|i4QmhJD2{dEzPCfxA|B~cNwLL{Pd1!e8BuO;s}%r4@?21v$jaG zG-oNZXJFje5PoRe4BHWtjOpBTkJfXSF_JFsPzMl!xpXIsRYOGgj;e_6y@`_ZSyuf~ z_qW;GgW}Zk%6UpA=onmtpA$_0dPDp`67xUbN7Jq6sORsRvS|Qt1)^0Swn{3I?IFzw z8MV}L_qDpTfE=x0v?-6(3G9|B@1U~VLL@~0EV^4|P>0kNe>j5#iGo5d9`SvD&OcC7M!XcM%f6G9HD9)@^cHJZ3G6p zJ!nQ+Sv8cp2g;nXr1&f|`xH80Q3N&%AC@KfCv4X`_3FmVxct3)(dZOuT3&gB`zi%_648`RG9p zv%=c=5w$HPgMa(@%bF`5Ba8+9{~O!DRC(-(X56dS2g-=?qs)>v8`1kgWbn_A=!oV% z9g&LkFlE|EpemD4WWupPv`t9Y9m5f-!nh)${dq?BKbA=(vL8xV)KBS%HRI6Ue3NbM zQFd^v^AHzP*_6{YY$v54Dqv#B{6NW9O(l~5Co=FIthZg^gaO-s|`#nOZlE16<`Mo@1)5=#vXl>|S z3IaZ7qqu20F*6CB5XEvO)`wZm!Q$8i$=btKG3r}0s_2|1CIS@g%;JRguJ6*T?8yVr zhWMyCJt8L6zfyV%hjQWKF9T=)(DvAm?SXly&0J)DVBqCqjyLJh9!Zi0oSKNzS0fN> zh-BmHTKvx;t0?YMx~<*Nzv8C)-iZQ=^2iy?;pC)rAiWWCZIJibv2>w!b)(46WSKl5 zj~7w6gc7FMr0SI%91B9XgkQbEe&}JdJ9G>dg=g0P5yn%&9wdfyeljfJ7o64e$>&CA zaRcDAGY#fA>bBan>wx*euz_gVQWCJFkqmw;DzMVQ|D{OL>-6+RnV6msu^D(nC6PN= zScREz%}edf0rUEhZ3K~h)LL7+aS#MLT49$pu4Cv(0!R{_ZU?cPA=3y)$IZ1&M%TV2 zVSoECwFGZcJjrYId~+sm{~dNbr5n5anh$R4Uo1l6MC>ZMZ?xu=oONG`o-O4~CeS~qLX0K?wDR3AsUJYpEu z^Udm#?*V%Y>zC?@$1u5pZ;#0jbWASkIx@6gmEWWM!jLH(^_N&o!lOh!PJt$T>hV z?abVdJbLVq%xz;VfpoN2F$e6=HBG_j$B{a5OK2gGrdDrXv!gd0OncMbsLOGXL0#Y_ z`|cvq;Xn(xr@F5R93}8+L@AW9d1OVpUGB-E*sT8!ZORQwDQ3=tPUy31`hHgkDj9wy zxHE{w(52lNKA)cQwO+>13Z&<5&EM? zURum>un_K_yMRoVu)|Plg+mQ$Yq*)#?0kA6yAzd(ipZHqISDKAVy) zE)Uejoj>CyHB;=3_8NAK_3hFlH+YOcsO-$6!}-6cV0%>Mxms0;N5tLjE_bR@US33t5E1IIR`M{_(DXzh=k6_qDb>@X| z5`iY<5*V_AIg&u=xTM$ZY}Zn7hFurL4uMc zt>(qcjnMk(m*a3jEfxhmUjUL)vhZj_TtDFd^4T;MymA;CEKPW5gFz1J6(UmO$s^ya z&Nq|n+=1Iq7Rp|V`B%=eermwM_#M$%$O8O%;GU^ z^AYt6@6wG!qH+?YHRf$lOtPRzFR{B}IZ841a(Aey@Y-8uaY^2^sIY15g^T@kT*qYY zXNt5HQCoNg0%a!)(;VNWe9M&sKxW-?A!bii&xM$~Uak4ng8TCH_-7l)OUIYpw8~6T zu#I;!_7d$NpYOKn~H!hpMOno_1(%~jF**%*+NtDBn;WeAb^@NMaf`<5|xpe0Z} zMDbF;E*rYbh#IkkFh$CyuzkvAr(Pp8ptkp9WnO?00FqMo{K;KSq0W=toF~f(Q@&`^ zD=9$Sk$RWZdcN$1z66hCcaOu@)=6_xI&#GCXIwHqYWBQpbC_J#jm<$kaUG$~a+!aO zw>t2=O5ux%Ku1-1>So)X4s)J!3lEmv0H_U(g0Mnc^Tf+7yBiEwd0$vsWKz83$^Xd* z^~0XM`Ss*2SilU@ej^Kl!ouXfM?DV!MADl+{wUx)j`yWh*^gbAGio(v9 zov+xy+on!7`{-LsAbb#D&O#`EfP}`644Vf9#we^g*LW5p;mcEcnm$7%1bQm$bJS^G zL3FbI=I<;x?P}0uGjW7{`HZ1U3D!v9`R8>X7?4@q@yx2W(H2{+1FW^pp9_Sz+|W z!^7+(7vpGetYdw>Tqk{8Y4AUjX&N#g%F|~I`dG*nNgUt!!@v4ZZpk1bhisDcam0x1 z&xmXNCnwxCfZMrgEuE8$sE7U8r1K5;KJ={HT!O;BNkE^c(uM!-5g;qy^Wjc=G-Nv# zAB77Nnb!)lnE_FX2K{o-f39c-NbLw>n_pZy^cp*U`j*|W=^iQpa;|EeGX%Y=Jvs<1 zE!p4F^(}HG+oK6jXMuBJJb*0V5>CFG7)Rk-8iKOvQe+h(1K^gap~BLB>(;I5P-6^e zxAp=FjC3-NT_?w+aX2uQNcepFGjs4#Qg-06nS)o5{7#@s^9d<+z0v(kypfmeLOIk5 zwO_(Nd9f5DFNHPfk<<+Fd3S{THu&{W*`UPI)2ck9al)ynTiFCRR;RVgKW)8~VY=%9 z?=6VzyraA9a+64q@qqV`H&_sP^u+-e#&;Ysv>OY7p9O=0hoaSaHyx~_y8WI~#UH}R zr!Vye$ym=MqAo4JvSd~VN~aZ!ErynjgiB*^V)PInqr1Jn^i|^!=Yc#>8~pDS z;w+uIJmq|sUp07Zff*X9cBk&8wNWI67&(b+c-~ZJ(wZhRMwZ7*@i}((gV$i6%7~)- z_9g+)RRkGXA9c!7NnECq_T6o#kvmpTZ)jDGR9rRq6=gBu!KHukiP4-ungaj*Z*N{0 z*u_o@nUeNA|FdjqnEL9pew{z!^@LF~2OW_tC@CeD$fNSu;hVyA69)#`Kr;A+O;{99 zAl{bn&a}?f<%SKqTWbININ3vN4f=J&RL-VNfAwrvm(i}~ziA(5iHirHZt3v$o*A)i zPeu9vRYtewk%#S49qsfJ^B1W!PqN8MI+ySHK4g7e#6=$i2-5Rt&#iY@ypRha!uOj!L(zoOyRF|C4rS99ck+W<`j7TI29H;mQeyHi z+X#f-VkgR=#?`*F86czYq9@CWmR;I6WW$7KeC z0hficV0k*aaqm0w8v97dAbbcm;5hlJ{Jz7gaptRD2!yzgc|H{#n>qZmk&h0m`y=03 z>65HW3R$rNtnp@?j&K0WGS?v9&v zmEiRs5gR%gmG5+B+4h?pt`rE-=KSeKTy!1lt?N>r!0Fh2BEjP$&OdD3wJwDAL2e=w z-49&*%ZH@0QqluR2a|OA@3Z%P{o{{#;*eH>DCnPbbAiU~k`IHU=g4@-zI^fdRudCk z23#8q`vNm;W_vODEcT^Qhsp(WjcS%DcA3{g?of##yDHFSXBdx@MLzGD?)#E<3F zQmRM74}PSpD-keILzMRRKs=bW@;e>-+T-52xtf3kWd{@_|K)+9Ny*I}n3TV0jOS0; z9si2O5pgk}IjEc-l4wy`{6QXM)Jk=f>~4774g&fXYf^Y6GC%I{>xVKZVAXbisH&nV zd^-aDbkLT_P7}}ws3%Y7smkjQ*cB{{?&M8c> z;g6ilrk5#8BA8jZ=kXavz~)NBcudjecp(jF*)aJJx-cqz?shICRyJiqQHA4On(R=S zG;baxF8EQNW#XPcba)m#`{r4cEspoqmeTD!ekB8e&o=0cC?lCt1L2$|9VL$&Jg04e zbHfYZr1FCJ7raz$K#2{id$B2vsFMX=9!4jw)X+02OYtW6J^0!wpU%-RjlJp3zkcP( zcMcw}`$7I`_ET|oN{1~zLV-CQkFGZb{@EJAa~doy9bQl^TF~QR;VjXd-ZiJJ=p}Vh zhX#UAfFZFLwA|4owWfUhbvBxymbxo{*+Z`$RWwLTz3tN-8{^K$D7S; z#SRS4N?en*-+o$p028bPBaqKz~Kd)LdvTH#WI)ZlB`Ca zo!S6=Kq8%$F;kmK$Y#F28RIRWqzGrf=uPR6zwvl>N3xx_lH%;2TwQ%vc{k0h6jf--D%<6h_k|tOF7C)fyi0O4@hY3{H%QEIJZ9QTi5c9KAewb6@GFb z$Os8KV(xBi*&w?BiujxzmB$RAc2hQG+08Wv%}!O!64m3%mv3DUzO%TeL(n})Hiczd z_E98={F!}a_0++;D+f0er)Ymj?DA@l=1Z*nn@-}y=D403U*kik5rv;=QT)kvibv}d z1*PCiE$j7K>Sq?37ddcud}%CPE~@bP*+ns1wcg&;kgO7(gF&q3cly%aOV#zbfAAkGC>iYbAS9bJqXVsEXJ__Ud= zJj?E{$>*ziAkL4t*;DS|jE+FNjbUHZRC-%%C-u*D&@XlD=~in_G3iwGtm;u3-@CCl ze@uVTjg+wQo?eB{YSR17=LTmDum{zev9H~R;NUrKk&IDg_U?A%;A(D0M7#YinkYzJ z_lgYo-&!;$s38{Q`6pLwsI?__1*3{RDh3o=TkpkRUQmNpE zS4Q!q`%w{+%}_ZYMi42tQiF_d#pB>FM0WYsk7zBN7+_EV~cn4(YvmCUYS9(&*)o80oMnm>@W zcK?RyA*?y*)i>%#nCNrD51N`HUC&pyK~eg;<4T2Ed)Qi|in+Ch9aZ;^9o$s6{<;ncNqE5EH`mgGz+TJz!-p2dnr`I+M2rS(e^*AxX zz}tZWs!Rk#hBAI7fsbl~R|w_G0nNbIL@wN@{X)_kU_==~rgWd%q`^(A1I;h`Kcb zhvh}J3)NAns-&kuxSg7h7y|bup+v%t$<)J@oAH6w(R z*>B6gT^deT+9K2)wb&FfYZm|6J;X#?d#>(*j%uI$#gna!F})Qu+3fIgj#~~t*Q|XE z?L315p!D7Xv0sdBom->XxnlANUaa~&M#%Wb%T=#4bs_; zJv01Oj#vFgb$O58L_f9rJ1wnh+Or*x$L^UqZQT0DV^<2kSu$q)-V33>iblH4b>q=h zN%m7^aUT*LD>r||vL!1n6}}lecI-SswJ|%EpHUW^D>U`bU)RmL9bQyv)o)_E?wFR% zzfCtAy6u9)Lz=pSw$vqOjEU;-ZvBPB}1B^*M#?phDc8vw@ID+;es7?ybc>al$cBRg*x2&`>+g$@Uqc&Kod`T zgkQRPfN=FH-Kg!nX-fH z=#;~)Re44gw^-bzy`H0BffvcOu2cCXh^>^mIanSN??f}51=-OZZ z6596U+#*uK<{@1M)J~|Q688!ye>lSLryLy~9c)litCBU?&o!TuRyesb>v~UV^4r#D zUhl{Aw6{6jiop6PU~xlTm)6IaBxKj-d_B~CGQxBPqrZ(JT`vmK(aiQFFMl92`Mgd>Y4^1fT{*%I@XOetm(-enJ|& z7CB;6OuTv^kRKT74A)~NY3Giz?ckiaI(aXCN6SnsLdH1LwO8fUJ}h6JvmNDP)_7Uz zP&qCA041qOj^4;7Qs5=Lm=*Z#gKw&5FT~BZ`Lz${>y_XM8U`#qUtO$e`E?Jh+n`08 zBbAr!xP3L_4tfr2_@o2)BjK{~T0?jQUGpXT*zm-$(UEh1QLQ{iCPNonr*ttJxY})X zq&&B$+F1)}Oyey4v}zt{Ft7BeLRQgMS z6u~kP`B0C2o_k^V*7)t^z%!h9gv$mk;N76~;yi64dAt`DEO8xssC-q8{!+pH$b{^= ze!+>MXKB$0@*J|RnS}#u#XMX``SLWNEoz=~iW~f5At6^6I-)Kz^iG~^FE1KNb183O zZ$Ik~aak+98pk)bu{Xz?y!qlKzp|HcEU2Ri-FAR{D)ojCczS{}lje`}Kubz4<)2;y zl~F!cIHlx09Z$hok|AjejewW`H1vh(FME-11y%(b&;xLT=eLGP!)?^;OQeA0# zczB1izCHh?cbJ&DfRuRw)gT{Q_(=`~g=>F_IezWko^VKi^3o9oMa~dA#WZVhh2|N3 zu7AQ^3dF_D+u_!Rjm_il*UIOGSU6-;Ud(OAK3>KULt9AW*}xE40Jzi+9y~Z|h_@dM zER#3#!P~o4G9}Tfu?DMb%d);*qU0y>!-(%8o*gx-%%)$?mz96NVs zTG53si%s!jdf@HlL${IY(FsTY5 zn6cP)XDp0R+<|@)`CdYk+G^o$(2D$fFJE^?;-7UFqM;IE# zey7YL551$V6xXQf|${~uh+^O;pERnb)u)cr@_E)_ZhS1-UBZ{HRsz=$s)kgZ-OBzj?O1}P~q zJs(Iv6l7FUO02!Oy?340QV_R?KMk5nd(eq+?!?f=%+=#7dTLU*v)Y}l$lcMZ6T6Wg z>H!l|pL|j- zBN=u5kzJr?_@92XDD&Sm#p02UpWJ}s*KjprrNcsw&{i13GoNc~bJ@Jcf!PtfW1U@hZNM8;`zY(pT9dW60#LayoK$A4Uad06z9exg z@!(L}nQNqBysF{ zA&sj35x4$`2G(R6aO%via@d^-<2rH&(t+{O?|n9#tT6E1O~(h!lX0F=j;PVf5}2Dd zJ3M+u39~nX|9~eV3tbrt{UNBBzItXaM%oJ0<#_msqc&A9I?nQvU)u3wsJUq5YC3}{PrUOo6pC#`K5MW{XMMkA~IBQ zC~~&g*JjXI-*h&=D%iT8WEX!749gPWo6XPK zT_N5aoY;O^*Qmzzv}xT2eg#?45E~|HPwajjK>@I7ofnUsD4EAt%i?F95JPYkyYHc~ z>KOdHTK{~5={)g=T$`)A`ncqFnM~wcakn_hxqG5p#D;rIy1z8=)a5BkFR|6S-e+h3 zv?m?A)C1ct%cI;_#p<9{S<>DGds#2Tm%BTw3-J0?*`s|&!pe^o0O*@PwbmqRWM$+i z<$822eP`TjY*+S2c*b4tjqU;bDgUYmW$nD*bm1uY0H};LRuj;H$9}1 z9vQPQ)wTM0d$?ZpyCm(v!>R#wJ1w89vI;lPjx%kdp_>YXlw40bd4yjwPI9x(UuD}`iD zD3jFN+0ycC-EzqfI4p7GG6f)L1#@6zD-Gdo3sQR2O*Kr{ryaCeW_N6NboOPJ?oZJI zUOXG_&n?}HMPhyB%)khk`PXAl;0>lJ26bFRO~)(^ABhvUi^~-MSwpT zStSTb?Vae-1FZ-FYEd2rt^3MtrRNuKsX@J<`Em4_!W%md{(acZn^5LnaxV(RWL}tJU#2?1a%w%Q|`XHy5+>RA6cuA$JbHYkpGqN*|SswhM=?HZqLtM|=tm*9N7TM0SP;(2ya zio#8ViV(L={emiGf2d15^TTtpskRV{U}vDZkZBgqYD}FVRpTCSFV+Tm z=hUV|n@6W6xzmf4jf~DiF^nO!5kyvhKx-xcFt@Tp_%}w`6J#Hwuf1Kg*)kye`7AFD z5?-}Pf;MDM(Cr{?O=@ca?!P|8As>@b@I?@s5w+K&OcXT-fwqZP(S}tbiXBP$)N}iO zjLE3k3te~c-zuM@k*^-Y8c06&Yb7K2%RwDVq9o2dR;QQg%^HPeedVTRRJ_LmCsml$nW-d%5>El-3eaid;*@ z$LmCsgwWA>;-Wvz--t@ zZ5M#p@JL%qoZh0G#QGcV&+^!S+uu|z@1L^a+Fww8r;;7Sn0{e5 z{nu${p;O)cB%gOgWr0qqu7%E;cp^IlC7wA)ak=w<{8nvP1jx#Dg-nHl+=Y5 zQ%}4~J&k+|?A0u6PhVEvCF5Xj5;LO)YgB6qnAg?y*|TRvtq6pUB2#k+&Ht7bcAZ1(X_{D%}VZCPML$l5wQ|u=y7>$nUxP1?>Jk-Rf z)9a=xs_6^YyTVdsp&Z3RUq?lS_)#Qymnl$&xopL-lP?pvj>l9pl+RfL9`{R%jYqIq=Mh&vv({NNo9cD{*XPZ{A3v zmja)@Z2y@TkI&Ax*t9t;-|n3_)8-jP)qibzeHfH~=+C10bvPfL;sR*y&bKp-gjB1n z5#c++MAuTsBbU+u)FI`&h`Ni6KqsWyXI_p>SO zgFVB^DUv$F8`V*>7tBmIl!<*ZNxSInHuiVkK zD|@_Y)&#MEcQBZNVuRlB!wB`yxDy{QMfF;G=bz`GI{(tq1DN;9?Hc(l*cur)42rxQ z->)-x`cx6xsw8N7jjx~Dh(U-y|LM+A6Rq4dYeN*LUUFC}jJ(F5q2)`FbazJtq0-7>Ui z=6*W1Q8EtydNJU12J1nsOD5SDeoiZcXgHydnyCPvsaGyO3InXBsi~==^2o2YyPHrn zVG)ts9S2W-fWNeH6}ciR)v5NlIgZwvH3Hq6T=tLNY@c=Ky1)Yg$vN@klk$~h++&sj z>pn`zYHyZfuR5Wkk%~f1IMk^4kmObG$6Y6jii~?v#E=U{=VZg-kCdJ_*%mhH6xrRHf8@o-t%VII zYt!}9B59E%-*LcCvT&(vpYn`@Vp0YNi9xMGJiOon5sW|c^Q7XlRTDLntDfn}j2;c9 zjdLWdlT}i@78bpiu@x{G@0&d_W{ijrPv5i*4eW>|K^WCyuJygOAQ9Jiz|Rlo#<)1} za-4U(R_1Q2R5jh;V}7M7;Zg4o2S|N8w>U8p%Xr@HJK~wuvG!LQs`ciV=O<+@Y=oEP zlHVvDrF8gy*&-1^*~)K=-`|khy*s<(MoZR0^UF5RV{ho@X50%OeagDaf%;+1vTLB& z6&18oWnwmJ#6~PPsJ~?Zvxt0u3=Sq0B_UDyc{`Pe>v|sB2Z+$%5iM!H0mT@DCYMt` zUKCS*EL^UR&@X6@_gE)F09lt`8$i7Ep39b$_9kk?QvLb4t(&`BTwT_^_`l63{&>c3 z;WM(b`CHhxr+g-wzp?7}Q~DBm@?+6fJHayGJ>`m@-Ws~QiKHBenld@34R=+6BM$sLO49A1p)h?N z)oVh**M*GnpGyq$w7`IcTxZ0sL!;lX$Rmf6cmeWJJ9>5?kCd?UvYG_aQYiz(%rNr~ zCR1=?jHX}))%sKy%5^LOCOy@_t2w$fMNSU-nY%%Cv#mY()n3pW=*5G#;nUt0XW&_J z+^3Q&Jso@I*6jS@oU!mYaxEE&dBe1uNVw%uifdSNwElquj0z;4|KYbb3f^P}k3`8O z1-3}39APLvPstH~Wy$jUwqHG#Z=-(%P;w?$7yYKfoG5`UqLZYW3AL9eB`06Y; zMB@2J)lOIK-EN5`h|)jeS>3=@&J_>~{1X(N%;*69^c0NhoT)SYW#{^9-E$2)vfLyq z;L5@ilMSeMaNX6vT)E^j$36>VhH}8K!2%%tb z+l-H6i#36MHEtKu@F0S+byl?Y)&0q2?t{^2P1u2SrMeXK| zBt=r)lG`CmaQatJQoi=yw`1hoB-{N;G5G}celc*QW4^PrIIW0am|c}-#WLglJLJx& z&<_dwW}D5{Ix{ zh3Y)yRqo^tVkHPMk8t>xab7UdNm|VR?aZOLQ6C^BTvza%UBRy9|ulgqNbXr1vhkS_}RvWoa{1;VB^T zh!FE$Lph{j*1_ETg*DFOTg;jGkGEDF#zb2nBP04TsR)>%niDo-i_X>oaPV_EwPdpr zn96Py4zidmBPw700NuoN$SSOfqiOCWL#iRywqsu+#{O=oZBdu>L5K)BvLX^vr2%$8-Sru380`;6^E`F6{U}QdsEY7P>g=$ggcP z@SoKu5>@D5gER2|kFs#G@fq@lu;gEs>2RD~P-)-zgBBL}k#a|g`dd;BQkA7JdNi##7zY=ks0c<%*U!uA8QvZ7~1j(ymuQG<`^ zqC#R)Jb3PN_gd->wa?nnGtt8ANZ7E11sDzQq@tj3D4x}VhEtIa>370mrz2$$r|^@7 zCg73blm57S-B+&yhq#~;BV?%=W*f7^i%uDSeQ}(W>1e$8=3o6Yd+D5{OjHC^KIv^w z;3j+sF-6?yaPu7s8QD}wMZ8b}s!3FE*EC!A7w+-)~ zHejNCZlK+xxtK2{l)}oC0L-bi;Nm^g@A^)sb|?F6(2Iw%hm&|l5aM#6pSWtK(G%;& z94ns&myi@1r>`~9T2#Nkmd@y_momganv7REJ$zefe>Tg2>}yW zz$9s0cHIz_ruI8x7MIQDB1?<`^CYSeM89sv%Qp2$7_bkd3|GPm(5}6cYDcgawhGBD zl&AY_xU(Bch7Zl2?EScZE)*5!srg8R=*$B$VN>hG_4>+;ansu*N8h`EYi#Psw{3NK zszdO9dLDgCv29PL(<*#xJNL=ip9y;4dY@Rbp6XyKb|N_;eM@j_S+l*6GyEoNQX6_l zA27+t3^3mF3IPYccI69qzf}@huv^kJt1=xS2e3QBOR7XH<55`f)nWx2LGAjlM@ZBR3^5`NE_wA{zLR%lq) zr|88uLOYu1VfwnOhB*EvpC*TE8NYLTFk9kvni2a=;im(x1H_JoiO2D-@s@gL5quA`N}ErONXBH=Ypu0sJ$cohwly^F>`L zaUoVWl2|`)whUcVls6bQ|nCod82Jd@+jVPT`Q!OEgkhQ1-NvG0e8xau^hPUUn zs24v8sbn6bdM`rOyt}%9xCbF!!HcLxAUt=0(>W>9B|dlZz|2;%IkKO|bhHcOKL#q_ zYo{IrZ?P8Mk(5j4jQa?-ZrS1Dx95-YFILC;iKUs6gf%ra;sL(u#K2@zdp$RH)_spL zhzR9=YX!P34iivTaJ(p@!4xc$uB)U)tk-r!4e6S5nzJt*s8v~zE zX?)2gclS6o*h}KAqG}#Q9t*A|#oeRi`DNq`Xx1b?N*~d6%dojep=TG@S&%nFQXlPU9nIRH19Cq#wC_AtG*eG? zN$U0&+r?9h9LrDSJ45dsWP-y?)HfumX=w?Pgb9*LvIKmf$pz8-5KVosZ3!}(kcEiV zQ-@0%H?+@v%Rfi=2)y_0fb<3N-~DCuk6c3N8_TUTxmfp9ygVl^T2x3hs8Obg%=u+lo8dgg^TwN zU%>7tr+YYZ%jiqwYTec(xwcJ2x>D^eVLb~9({4%I&usy(6nU`FZSg4G*ykZwMS}(y zFEPQG2x*Z~k!w4GN#w!ihZ&U^GcrhDUl#XyvH>6>M94{mX!zuXGf(pfOhy#?EF9U; zw}hAJwb|{SA}p>z4Qdvxi~IJIrAf>KtWGz?dLaMU$0Y>9E9!L$`YJEb3-Qn1=h&Z! zX{@_&H^Zp!F*kJyi}6%t_N6zb{ai_YA_H+jann{W7eSe4*XQ)@FRW*O5o-98i_O9` zk-k@8X_6=1&+k*Wcy2%UZt)8|w09I)MfO~c@kbCe4dqqw z-uPvEBB+?2o67aQ+nIadAlM^@B!T4WGiJ#ullmP*cjA8cpPwk|d%#Umr0Tws9Y#!$ z`N%ZrO?Z=`8fqDpwH6 z>S|L{)0GQPKQR;<8fvY+ZQ!o!DeczM)142W(SYd>*#QPP4G8WE_X&`hou za_dvE$|9eo19<>isfZ2TAUiBDBh&BKI4m-+?#tFcpqa_RLcx*ue7$Yoamf0(2@Nxk zByT9pv1*h?QW}nA(EUVtgbRYC5R&g>NvPeo6n=cWzJo3;J;2`_ZOG8gM=wyMS3Kgl ziBd{(axB41XxRtj$-eKswSdrA^7Cg?_UH>`Y>Bvb`-n3D2%TT&okzM{RQy$q(%+zM zQW^?wZf>zCqsn*NqC>mXl&!@`M(?7E>=H_o966xU_f5=%p*J)E zf%Zvoik$7;za0Wl9HnIfRP~_);_u=MZIFf%00)~MBjf?BzhttiQC5f{qdE>}_t_rX zzUM6}CV*ouEadlEk|WNv;QIW~+1v}jx7jfPN;=cu$b6C<_E7Nx6wC@j${8j=HH3@9cKrRVWD9cl9qqXt(z1w2Ej}5$D*4w})YNdycYl9# z>C4uBOw+nSbVe8^U-GtDt7b!1ehe~C3Cp*PW@JiW-HCY`Wj>un8Lsa)r}llunHqEg z@-WQG;7XOnt-D6r-L`FMwob_3oz`W;eMwcK8XiW$qs9H4gLy10z^PT{g!sP(3mrO0%kfBkI>_ETJ zYha_bnKU8{uBE~K+{UZxtd^$+Nap@hHW zpPw7qPr$+tR7b#tL(oA{It11IJSi#3DErxN9R-I?IiNQ%0~dZJk2jwV`(cN56pZA> z0p!flM7R<@%;zd;JUPmNmult%b+N-b2ie!5GlY1=~P4WTd_C$lXN#3>!c6*bU%+yak?vRd?xD zjg_PU5Qvnkz5TucM=;lXeR)EfI2L{GuF0xxXQ#gp6ZGNG?2?YG?#9s&cJ3An$6SVH4VUS}XI zP-zoT;5S}L-ko2?hUgq!+1jfB0T~?b?9|+iulkM<(}s;eKPs{_oB z;A1zCJ9^d8>eUf6CEKtCISk#aHrGBFK3jcZdi8l1tLafKUncV*kVX2U|Db*4@m+QyA85cyx zWea(g2Z5?d$Bg6<=sbvFW}%TC_T!p9%$eZd&+0cnUzC+71Qk*Z%=aXy9P+$3=nYRj z(+o4x#{q-+T~q3DGlGLbt4LT{GFw-#)U4W(R28}&tx1GyYOpgXE-@NO^=*Fr^}kbg zQd6hy1_;ZAq&+F-ABJdsJ^}i$)Cfcw(eq(&zanp#)F>r4_jUGIGC+!6TY@z{pLDhn zjg3Wd3?z#soI% zGB%^4ckg{LDShgL5ohm|Jo>-Et+-7|_61N0(_~~mUe&_q#+XE0weOv%9;E;l73zX) zaYEw7>93bB>SwPj*lUZ@z;AdrJpDLVc7Id#Vc+8@!t4A9mSVVg-x067%tUcxH9aO~_O6UO$cy=MQq=H$Pp?$NaWLmOlzLW(mN6A!~-%|9=bbRuPm`fTHn z5dBhi+d&-hFa+%{ul=VU2RIS`=(~NbMV0~5p3cxiDjoAgxk#YT1z1O%>BxKi9U1Ts zfdfc#{W{KmG z|M4is|G@yszo+imjs8ytqyBB{|6y%ScJ;?4C~+n~<_h$4-(8oKAYp7EIUL{;F_9{= ze_-u_*wnU;1gHVWB55cxgMdA`4Ma(JJCtgAZ>=Znq-LrbMb>Xc{zH@y+f=Ld#|O=z zeM$sKm^xJp(5R*)ul8}HMzQOnKY!nKr0+Qv64Q#^J92#x zdMM{9M0`P%QEe!D9J3n@Olj3cd;t$1o`yEU!AqyaySY>);zy2^)|HkZGVgeL7dW=@`QRlnBtf4+F+! zcY^DbNrml%-@o2ZOrB_9mJ417yeF=jD5VH-YkIVe)KZ-!b9mt{=oLnwB7(|I%#QYb zP*x@qp#={J9m$~YT!1Vi2@vH<1ixEiAKdr6X8ZxV?$VL{94kEo>Got48(lUAv_OS0 zC6CHX!K(uAjGc#mRK{JM$9{>FyB1c7L>6^Y{QdP;F9b&txi#qkW>!-jzkWB7H|MRp z5ItWW1c5~a)D3{Bm z+I=59c>T{u0MzyU*~#D9S{f7k%df0jP7TJ69|1WCUw5BrG{7tO1^A%dwY9OYeRI>4 z9@t4hdU3frxDMt*WyQCJLEcz0v!Pz%>h0L#=p^`NHzJVOBX|f7g{CLf}ZsAO0QDC zH$(B@U4b3o6wv@38_nKW%gs60$lw{0L^kR}32qdQhqHFFN-e0T~%tAxXHO)p38M#w@*+6F|! z@!%S!Iat5;8r6nC=SJ$=$+3AzH0zrx17?)3E7!6OVmiKG7WM6IALpYgj+I~ll`V## z6Tyn71kmzD+V<*LQhHl|W9cH&oglHa-9YTT|3UCEgpJ2hIQ5;Uj5N@+o@R*vScDbd zyT04Yy5V(+ZeJHpcmZZ(Rs$GLDeC1xgsnSY{rN8lH`xTc3mu%^C6}~5g3w1 zlx^uZdR`e_VLINBrc>`|8!JXPXHg zs+&A{@@@31)HIgevnS@2IlKY_e)Wr3!u7xcrV`-p^YZ8+d(L}~WeE!~zmgol`V0)? z9w*PS!nSd7ako`xCO1zG7Z&CzNk~surxXdn8mI(p(|O$}2&Yz`lCYW*Rdq@tmN3FX zO)mx$!>(UKkYYx(tt|uoNJ$*-+)+uuQ)LWwmSgh!H8xlUc477Rti< zb(mU;Z{`kq!ax;ym|p=;Sq$volhuhsT;*CCD_z{(-M4E`n}}6`5!>ZO{o+|8z#z6r z=hLfhZ7}LE)37M^u6_wnsZaZ=Rs?#c`(wpEHh=Nr3A!G}ad*8&Zq{0{VufD-i)C6! z93B|@|Ao7NUhRYW@g^(e`IC>^wintu=R(Kb<%YDmdv@cqv`BGi-FXyMR=TJ~y`$Qil4#SEx~vlvZLt9cctlq8kVSNqoSgY0y@HWib?$Zj>nH5 zU+ZNIU{@^i+X9eywaD|7ur5}lx)5SVAx&rW`P@nM9B$LMCm4S5i)n zAPW4P0nQew2nPh;xw)Tgyz_c_{=3Pz+KsT(jEqT;7;TwjmO4b#V-!lzkL5KZlR1eb zisqOJ_P#cYb%w591cTySrB$Ky+0y9x9ns^H?kV{eT~*UCUWJ6{bpMX`xvfg8*euvR z&hYg<)}?V4NJlFAcf1K`UH2-{gWwieQ_hwmBNfnewa5O_Sp#Gd46Zn0xy4 z>4zr$WJK~Y=7yH12Tn8gd;a|SfkMIJjQ+xL4;9gfAAyh^lQx-;&)4kWiWSASBK%T^ zZXVF>yL@3%+4#WQTg2FK|9&6loUkCaNcRgVcU`Bcsd-kP#P%GWowt6^M*P-Iqq4a8 z!5eUW_1+v|Da<;8?laxr$m|r9T5I{&IE(lCTANJTV6!>Fwoi6 z{Ysb(%!8vHExAG+t%)7WiWdN~Fdo5i+1($rX)FONc~Q1+ZrrZscl#4=7T@P+Pogya zYnhmsm}+!D2}&9R9T0!gvtwjO)yV3Q&`2@lw$w~=Nj!AeL~)OgnBjVFS3K@**JiCT zqrw2Wwj{Y%aBXgc=^i<7!LUfvlJ>Xxmy% zKGhi5HU*;-jSe1kzBETI4&W>2W=-~$W+vWRq4OzS-3N1qrK7IZ?1`ins)0~%&@A}7|G>_t5R2tYOpX=JtoTcMj zWSHPKUV8)fVH8%hG~ksdZ_r4osU%Q)dO#fPMCXTGMG?8V5WT@po2r|czKH)(FJZb6^Ncu ztgw!kUb?$F&mWS{nn<-~DV{#xRuT{bxOVo~(p0C|yjMH&MBw%DZ>nBLB5$ zWBhovWkS!*Mdg|bE?(dLvnF3>|Ney&#I0me{kWv6JS=~5J{ALqS(}Dde=wJwh@R%5 zDrtIP3UC}(jq}dFH_q6b_zKtxmF(-MEb8i&X8r6|IY6GGW%2>*0bdgcQrzSl-Ryhx z3kAI@n$e>3b1xX-W7H{8Bru7UzUO=ai;j5->O@;wfJc+gbKj6Gq+ zCrst`%n{%)>E3F*AEZuXy-}MvE>Fzpn4-p!PphiUZ5z!QR3cOGfBeoB-lgSFlkzAO zPlaZbe%n$N!koiZ$;l~$XWCdEHMi3i-DgEj2bfz+R5xwfq(SmblwJjuoDNL|R|6!xtMKrcoVx{^!ju@2@Kwa6929aM;toNUc-1{U(rVt{$jFJV|K{;u4dx86+5c~KK1ZTXuIk8 z`0+~7Ewbs zm(V#m&ax6Q3MssFKV-|3~U((~aqtX$=k zf}=WbBl%)b;=!}#k^0;Jw_i$An(dd1;r(6DsLPb!`AbyOt1nlFc8>0+!Q(GTaL4-# z{^E2eySePaq$mRuVr|s)&ikeRT!(d?@8&NhM7WS0wlGEKgU62o2kxnhVhZe|oHs_& zEPzSrVA!r5HG8?~SYnum0q&^v_eBcS8T!X>g$I<5gW~*u{B25NvDndo95| zGj+dYsde>}F|mqU*Yomcd$#W8I+wio!W7PQkXXXZJ(%Y9D%J>a?PX`Rzk4FrO0lH3 z*ZiPIUl-yu#h54j{mtU5IJ3B_dw=s%dly>7Zpf7d_h$w5xAJ&#rpZBbw=a~r*xx?U z-TN$S%zpWwE%H?lR7U8|rrCYN_MXH4diVRVZn==wIRdbpe7gN1)|9JtgoU~;DgSG- z4z-Q3-otqK1!Ehz_G10)XPj*H%RJB4E$iGfVK8Tc8`t$V;99)qPB++Dao+#M&6U&! zCB8kUcurHUO|W3!&p*AazZkpmphmA}?_O@~CM}r!hX7t^pM5y04p;1a13W!5*rv_K z9_t#{3oH8~?f$yIeYjp2Y|leKXn;5=D*mHYrmtplXj|v1?QTK-*6SW`quT`FdK)j~ zbzgIzk!wZ1vpIm;@UxoBB7HOWZ0=s&ZQ)4`t*2g%Aq1tG&pqBo8OMhCzwkEnl7?O- z^H5S;tf!@apDcZ5v$HU%Pf5C?I&%+AYuRB>dzyd1FtYcXH^9&D)X}W0Cp6LYdAn-d zv!Ao{X~qobvlrJG@!O#=mTQo}>%CCg*{SJ2t_C0K6VL7kn_XTxJYNSXm+p_i&3est4Q7T27lP9)5S)Z@(?od4KH&5+0rx zEDIBj9B_xb%QL@#IZh%jJN&g}OSkX-NK1y_+8V!VzJ^pnPI|;o`E09NRgWVZ59j4% z1o4sG$vbJ1!}qtL@$arW%|E;^e^;ZK>7v-LJB^-(P6WN3;isG&*Cv~KE$|r{s-T2Y zAy2Evb8;q@NzPtoR{TpBPDs;C6Tm;~bQ!OvSjQ(`=1EGCdn+SWV@|gPM{(O`tV!K&8280k^NzJe#Vx;g%S`nRP$bSF8SE zQBjdkeG2yy%)UnxMA`PdL!h4_yRkxE;4H=K{+7%A_;D1#728|$B-(dJd>dh8p9brVg#&Y~1k ztu?%BUsh|_4kLkcwjGVu^QF@H%F9oIhHeKWG;hVW85*8akBSie@lMZ~?_l}_6&X`u zW1~%*VlRk%`IMVG`NWAV1U0SCS`Is9zsg}L%5@(E9!POM+Vo9Eq|*Aeh`{`LA1CRj zmN~Xm+6ETn{H)EQ$Tq0D%pYk*E{Kb}(Adxz8vEH>CeWrWC+xfXt(o=a_Z#XL#h)7= z2~~^ZVbyWvfx0R+Cv+1JI$oM%sk3L#TzBy=6Qs=?XH=Az97tF%qb%*WepUf zuAQ&|o?Rd-%G$E^rBLf~;_(WO>aLPd(2I9oVVv(IGk@>8DLFqs6y_D*zc}{OJ!P?2 zT{-Cm%F6iW@YS={giK|o7Bt>%CDr){t$Z3jeP=D$ZG4IK#VfQZytZq1C zo33dll^rjavjhK?q{PSHadDr>W1c7zgbv7#%Wn(RS3CCYP-Bktf_qB(346NlLK1gz z#uKbxR1rmeS)_5%;>B?px?TcA2_8|hCEy&fEqqEz8IR0opl+fkub?2rC(UAsiz7YE zSjP(38fQ_@qf>bV*0p;}aysY3JHf%Dj4XFF^!9qz$l;>8r_Y?pkF2yddVMG?BqW3& zlDl{A6h`p;Sh|dWfB?qOXu=Cp5j2E-e}Fm>00jjh>xH01EK1&J>_V3;h)Xl z0_MYY#d|phcFlwfc}KdbNOmLbjZwub)X@~54@jX$axGHD`IJD2_%3F2mNM$bz=g&o zKiDM7MO*Ubsi}UAcG0%Zh=jKwi2X0IC4up{!ZOrt0n$*SD(17LM%JY}^6v<>?O(WP z%MYP)YCb}HJf7MutGv^sJL-gSb-}W$)z-M2MsjgxB9*=qacN%j^_;qUy!`x+)Vbm_ z!D?N8U9i6SWsYAbZ-GST(rQuL8qq*xg=74YCw@iLEDE<&rQ$+4Q`pVzUH&(4G9NB* zT)s7kpQ{dzs=ZMe3v$LI>A^^~hyDCHRK`j6n{x@lt@52-N3WB_7r{Hpc5=#&$H~guvw&J$<_JUZ0BvP|kK{Ie{AvdmziDWEGjo2?t6dW z+^Jzfy**9&W(r2jKOqj)v-**dk<|<2Y8UiqlcbWAn5&e|;r4iM%Hkc4vjoy&Y#P=u zHGvpFj5;?}!|RfwgDjN&DB4?ZTeG1Fp^G;5O{u_!N?-UhEq$7CQWr z6$ejJOem!i!hXH3_w@9%w!`HK3| z<`s@WM$T;q<2Q%_jXDkgxJ+QtWzcmTE{`oI&q&4lR1F8(SL6 z=Y+SP0R7bc_E2YMXJU-k0iZ!{!q7&O8#iuLgCe4o`C$xU^H&N0+a3j1&X~j{hrW_v z-*kGm{f35OE$tv8)5^=F$uel)`*XmuJngOHnk*AoE3 z8Ok6TRJaESUs285`>%@chcbaAjSYUK%*r;WUKpBuL-sfusjzWM3HvD*R*2PgzQe=e=O_k7Z<{1M@C-q0$4A)hM z3qxWn+Yg5PodEf{NR7!Q)pbcK@t!iTD+jTPmK-muE+2w*h~Sag7Nx3Khg@teOG@=o zHfo_$`(q%irYxOA&w`PfLAsK0Z!Sg7|KpDdikA;M2uNQT_~H4p$-56dmPASlOk^QP ze_SmvI@`y7ywp0t@4+l^ELId%8acguSsrL(ZIYvKYgFL=4Z3sH=Pg3Ocl1a(jppZB zBA1WEA}WcxiUk$X(RHD*J>tGll#$XYNO>8eqRBrOX>cjwJ(SMytF!gK;0dS~bKnFx@ zdu18{V_d6&*{F86fq}uFyUgV47cE=}BqB@ITp18%f(aAU^TGYXL3U@sM)~4=vE=c& z&$#pFQ>Q8es57L^c`B;_5Y&Xpxt;)|68iLgW3=tO#RfBNY%S}3@5d@h*2i5`ac{If z#DF+_A}UXS6IBBwjinr0_vZ2jdk}$RGs?)TFbeD$A-@0*t$*0FThQIlp_n ze8c3^BTB*ruS;G|@3a6e7PRc#a4l-&zCc0NFbGDFo~V>%*xHxHn9dl#dsM`qQK5;U;53NBVvBr=7Yg1A~>9VzKf6de>##o zc-jq|ZSzRG2| zH8ORF46BC0hGNJ$hZUk0#J?#VV8l;JNf#N^-j>$&UCqXdo3wc5x|qkyQ%gIo7=`>z zUXvTFA3NhrI$dX)qy6*Ew2I=gvPW++ygi?T_~@YrhwaMMoSNwm1K01J>(h%*nV}NL z-Z#Fyz0gYNQM|?6?VE3CjJLC3Joqz&tH!)8GB5+xIa@CUfs)fRECd#6k5hKSEf&Z8 zHfz%2gqo_(=iH2TU!~V1s~%9V{&?qsJVCo~9&W;jl@-0g^tYpzugI5a%LMWdeuKHLM+OQ9zz9&xjGP8p^ZArordZ ztX{w|IcL6q{4(t6aK2<6nXUe1V#lyl(RO8S?qG+{HP-Xq*0T~oSL3={Mn;Cv|FQLO z&Ti-FwiC(y>w18M!@2sE>$}dg>C3EMy^b`(SPI`hPLUA>dGJ+g`DWQGDHg1PwS+rx zTMWYuBor~b(vTG9C926IM~-YRi-;GpjVNdaea*JOAm(o92j@l)BLSZ)=+%<_=z=){ z=1+=Kcbv)lq!4d;r_VC5Bar5wUaMBSPD5FodiWE`)vM!xE;I!4aerQpzyE1o{xYYA z2IAKi^E@^q+}uEaPHfG0nfbcPhqDjGgdhl0aQZ>7MXN0;HuBV;vA0*LP+ zwH`95y~2TNmcjznJ~xi1wxgB6t~3$m6VxtY+h87kdr*VSGmiB06Vs}_mbRH| zMq9VQV4k>G%x_4VTwTDYm@^5N@b7{q+SIFMf$yc_tZC_+Qn`cA9!PpxJ&9=7>0EAX zs=9H4?r9@EkM2IKVYcm9+gE+J_Zxs;Ul=WT;4EM7}_!RLS8!J!wxk%x!sDK;blM+gHe`yPalK0$HlF%(Le|f`~D6 zTYbim6H7%7M5i()4OK-AWW$TLwFU+Tl29|)Rz-{x$%u#xXnhl~o(rpwHH8^l>KOuhm1i)fsufZ9w;w#MY;23ym0YrbEFDbQ{JX zZY4msXlpjY7(@@W3_NfmjcuQV+BU#~9Ma8iVpju1)Ad~^)+9b&?oJbzomEDdu~(O% zCCg3+I>U6m%zA)L=Y|g!E~WTj6AZ|oyu7??G=vZpK_!AZq7VE_H|I#0wqfBg$BkE> z1>Q4yc6gn)>-SqK4eRf4_Z2MFo5Xr(3sfAz?YIB?8~WYClNpz$~~FCfn5@ED@`6 z{nyb8g?6QXRTyqNPU)_ore?dA9*gxgzId?1lkAaOpL1|11?SK*Bbk3FFgp=Voko zdHFKu0?c_<0hD29mbUE-E}btGCiC+ZIz=`bNQO076N`p`UNM zR7e^JF3_$PCX%0r%Q*Bob~T*?(!x){((+aTg?fJ85}Nx4gQwgA2BXB!?}WOc(?1Xz zhmmQGk*Q2x4RIhfB`C;_LA_i+!UDv}Fx7moo@iS=k!b1E3a)aVVZA4wL2uGxpF#^v z#eN0P=_)sI6-7C9U$PK>&NJM8>qpxeJl6=)DYtt=m_EDL`UVD$Z^fU_MTkrC)kG9% zg`vbU56j5as>99A&3OWqM-?K<4UHTCiQW3GyV2IWbzZR1;%R7$rS|}qa3%W3v68{3 zNSHk32pc6psIYHN04y`agcnhNVl5*Qe(7W8 zcKgssB`Ahc+hW!eUID|I>e|6Xt=5U{rL)ajdwN8i_uh12tlprQ-}S-cUQ>~RW&Md; zLan0-#3=6#1j%E}3p4RN>$<^YbTBMBu36V*mBYQ#!7JpNrZGaa>oT92E;Lr^nzgRT zMf(*jl}}y7!A8*;R(e>QDMplPCJpDiy9YNBgB~Ow1(4D5E}3=~E8dxz56~?Gj~c51 zajjQacAR83gA0`2%xQwWPDw8Yq5PQrkW6nbPPgIWPi6CwH3E+i%$ zx#l>6Yci)tY+PvI`sy1kNnQ=*idu9GGYJ6Du%4o=H8dC2+f=D1GwX5VFMDou0hw?X zyKzl<6V3UUsU-EZTxRCb*Ap?COxAWypltIIXV<2_QZ!g!6NWr@O5F7qcoVwmHw8-r zt&XYGh(NqH`%~AAcj1{5Mz-zTpvB19rf`6rxV-SI;e3vYZyzs@OyvLx!*6#DHP{$6 zTG&a5(@i&XAl7v6Zhv$x{TePv*SK44!@iQVZ}DLLZPX$y+qX|0j}df8V>@&0ON?oa zL)lvu-W>btX>8=7uqfwBG$2y8B;|+YU!w(>7Be3YiluQjJ0aclof_Luk9eX%*An{t z(Y6Sao)&$P+xLD9&wZh}kXu&W#*Gx%7|Xp@%D%huNwwFa?5j4iv}*`X<_0RXveURZ3TT(JeWkNMtriwkVT3yHrZJfjFMXS1{K1UTY- zzwTZ-pPR48SYybZ{KVD=_|uG1-u zOuZ8wz38Vm_SHbw|9*?cEVJTLALMkYKAl(9?ewk_@Uu`tR4@@YY(m!AWt+$DG@U&O z2%InQANSpH-n-Io<{;p~pflO`$t*}L*5;(|msK2+RE$1hI?CdF+^k?I->%{Ga z!}&U`!@IXS$b5(-Q_<$#jXC_va_ZA@)RP`P(7wkp>#uM1Io5%Naoq}&ebU=}~Aw24iQkUlx7}|2jeJ+QD?; z&**|F%#3MU5vHTgUT0V={smZt9yOjw@sI!UUS#or0=pjbB&e%S^q6f!4hkq~aHiWI ziAHn6(FUD%2pNE=+mbjoxc3l%$@iFTLmJZqeEr@zX18&`Z#C2n&M-4}qUV^AzaMiK#I{`LKVGhaQVE8M{`_oXcJ;Fxp1as#T?7Xaj~jRWlM^Ol2Ls@gV7jPb91zq#7nd7(ZKuglN?fel z-l0ckZ}z|{+`A%av~J}}!~}Tfs5Jo3pbn{0nSCD&9O3(oY&oo{If5&x|1?xGqRzuB zId!Z3_qUe2fm9FscG}RmYUlI{!vp6CM~%umokNE#4KhqnHrF#Mi>XQ_8yaz34z6iw zgIl&ViYex5WslcSt8hl1fBugTN;!yPjZwVZ5ZhdDiADBXQK@21R$3bWoUL_nKi&_{ z`*HnD-rbAW#y0M|B?xYkRvQemhDov#V}4V`7(=XZd!c96J@^q(E796}xm(-kR{nhf zApjoW6`023AM)LWvXZ`Ky4KA}3*P3moK<-@)3OqQRQJfnYx>Xfuj$(~%kb##ytx3t zzqgr|FMky<#=pJgke3KKTJ?gY$>uVkjCY8O`!hohe$Jj1ii*6l z9dduO&aq7~D#GFRKfB%qi*;bXMONK@gSoN6X$QfMzpmAN8ULvyWqs!?BMG^ac&G$t zU;k@@@d9aknlyM{D6-F2<78AeC)^)$;uxjd>#xW>4=fPqJ-ZuZ9uZz_utt!AlXkDK zEb^T-L42W=`0$+`w99+Q;7x?_!Z9iv~8M4#})b z)23b9Ae_rRt6_Kf)rL79%X|(tMXceK|Fp6MV2_P8uqNc&iw4KT1Y0(t9uDb_*DXDb zC%RI?q?p20XfF&!Z$-;>vKUMW6WhLqVKBwt76fv`Jz_ym!wm=7@z1x?nm+vI{y*xe z=<_v2Ea74BUuIAK6p5m9b2itprxP7c4j88tB8b5M=9+Na8pck1xBJ1_l|Bd$9_}@9 z$iSIPuUYV~Z@+`znEZ)RWyFlC5`9kLMdv|wC4Vq?B);y{#XU)mn# z*RV58ys{I2s~W>L;mOCeRyt`@rNCYpFvdl48JC{pRM4^VtKYMW{Z1#I8sIpV2c|Fo z94riE4{hso6J1Sr|Iycy@JY^4 zG2vrIFT#^`^@}bq1JXJ{2rnJ02VVMHcf&b9529OArm)8r@BXcJ>MqtQ*c*WSh$JGe zSF;LlhA~p$Yp!_Rh=4)I1nT*mjjOYyM})`D!24B0cdq$7GW7|<^}QjkCVy8QJ3r6v z_UqLAIKI%!5m;krB`{|Dc|B|pxq6)K1B3)%d zojd;Vrh7kmk=D}a3bI}LGbM0TgO&2f=(b)yMiy;l;bmmJ|O3J7cCn7)PXxZ<58?p*-GS50Ib0~R4 zkU`~_%ziA!Bj^|#`QM8@FAWrw%+0=jR9s%L&vQPjjM8Lq5}mcWKM%J)&CmaMD=@6$ zSq1&OV3{ZrDkf<4*zZvxXGN&24o`eP+p_Y$oJ>$zB!eh#%sH~@=ahYE8c70a8k?FE zOBJ{g<)aBzo|`mvGC1UCE2OTK3$J>UBf*a#3*$ zi-o>VSJh+ZadtYsoa(`@P(5q?wch7F--V!O5s+TZ z1e%dR;O{*@4D3;-#@|~i#npIQj|yr9P6==49q&=^Q4S@)x2GA6z2r^%E$Ed0jx;SN z^~Ib`-jut@mQXU74Z1fwN9YsxD(IXY(05kW$&VWdg>bjndaj&wwrz*QeDi}fIb7)o z_^t}t+BG>*Q9odSOD;B&g}m*uaiTYpJ;Kbn(h)yv%2EBG^UfpVXnJHHn{9fOb}t5l zmnY7Lh4xHIAj#W1B>}34=mM{5u0(pOU)IG377;%dF)J*Kb&)&vK<)6yk0!l%>bk~X zOux`Sg>2g{i%F&hvrFE~!C7xIrOgj2f>CGAn@qmZ{f`i%49kG8XRzVEMRnM3c7tvPf~Z{@hj+b{Ucz5Mo7=InQ; zf}%IgJex9Z&!Xf}^XHBlq=LLjG~dCthll+SOL-iQ%57YdmTz3!Y-JWDaj>W%ytc<` z&VC7N{_M5*F~1fba=hn!$NbiA`k*}aQ9XWM7}awyv;X+-i8+n*Z&Uo+6r2{|eE7Fl z{7-I*-3A#0?ed7M5WvmU%1A=Z{z&xFeleBvRGbg(n>4ZW?y}ryIY3H`++=##KFn@* zvcJbJccM(^9R(Nnf7)RL$!07AKhoYr&dE6+ZVLTkJnxOb(jzmWN4u?|6RX7eGd?K) zCC~3@qTQ02mWPD5ge9E5-ka!hVV>221bQSk%=|{A@y4X~-U-UDLdT_ey)(12@=1U( zK-Xb{*laE=Kj#3^6*%1k5|4wPbas9tsL`4r%DLt5ep;z{+Ni;IK7tN+;z+sHLoT_c zrKNX(vY35o-4I{S2{|8%T<|P^h$?%)&2e9)tE=l0w8~+^D=0sz{@q=M!P#B;H0{o} z*uWm(@u>JpWJM9>oOt;$H~9p6Z@Y^kxRC^6FwKR6wxx_d-U`5qe@*P+uroW)8-+GM zhsfVD;+U;(YP+WIe>VJZp?iPlJwD3*#D+X#Pz0X1p~;#ARw4Izso#CaU|-Os4MbVb zL#@Q9I&XO^#)prSh@qo_z6^O5O{IXVI@s*%Uq@t((+F1D%n&A+Ho6ZubD83RE z22|YyvGkK$IBmuG;4u_$`DG4NO-WgfdCTV@pLg@?D$XvP55uW}cYj?0U(G@Yq5%LNwuZ5we=(Qz^VVx zm3e3DycJ}5c%V*kOdcL+Z|%2g_{q!uxsjFeftQuK96=U-hF&CSg>&%X3T zH+c}=?-WopI%}OcfN^01boSkot4<`Lcn5g#xt70G2|*H+AC#UmFfOIYv+Qr)Koyk{GSYD)+K0s&J`oMZVx2zGI0OI1yt+Qt z2jX(+%X^&?Ux!N{ms(STVyb z@WQqf3QF~j(6w9%y)0L&e6V&~dj^2DzaqM${b8Ybwz0xH@Y}8l>HEx89T-U`i^|Fz zWh@)U)%Zd5E*9M34?Q6q|I}5W?*YO0{LA_dNERzX>3`qN;oZ@|`WmfC8dE^f4KJ0X z`UK#IeJyNI$Pfn|x-+JIS9X?6$|X+09@h=qg%0UtL*x1-i#2^p7t0&>uU${}Tg^Y_ zm!2VE_91J3|9|+zG}Vo(1QV&#M6IKk7Pz01{khfU*%L%``)pN<|Mf{b8|7d3oYOo0 zZHj-Jg3|(=5C8Uxf185S0-O*3_KJU-g3|(=5C8Uxf185S0-O*3_KJU-g3|(=5C64Z z!8nY`re4)~55oB#{OxJN4q0e6-SB^lrY-4yLO84}K{R9xW_a+0^xg!;2sU$#!8a8*Cz9+po z!%57NHBYMYXuLE4mrLeDahpnrNjk`BLE^FnaV|nAAr>$$`gkin)JWSPD@4qutyDW6 z>^RMB*X2O*kVn#VB8u+AJLG!b0n+8RW9?6j-cLc?@0Zl#;^o?_ncDTT^qh`O{s93l zcz2|So(G{kTM(u@1wt~1>bzRp8>1_0`OI^z%}LA<5uY>pUVYCmFG274Dk;E27|(f5 zK*S%YNi$z(i)zXY5{e>G!ACnn#Su5NjHDV}(nTNztohIfc=_(PKBxRTeF%b>hcfnF zeg!_Aynzt$aBYoz_Usu$5(80rV`zqLi3~--YSU{Kn=t_0h@LJWM0>BT&JRHrdD8{D z9~_Ag1GjZ%E>EM^gt_XY#Kgp`bB@)0QaIDv-+{^qj8VlPXZYksNf5M4@4{d#)EwI9$mPh0mBC-}u!ex(LV_^drq z%J=-HNZ->1BNk+@2u{t!T7+CDEa)3jzZr)PMuQTPBwG?)_?K+_I=Rnz>@txjSh=!p zl6`4e+1x{%>T1r8^5tXf2Tq-u2UW%`iRf+GU&nC%*7booNi1IO7!tQksU<0w7mK?7 z+Wmhp!r=D0Ue+OrwZ%#IC;rntgF{WF?wBkpKmT&?KDg^sG9oBG#uUT_Nyx*Jp9QZk z=I1#`h#%@3!=!sZF&TZdk#Z|3wKR#Kl7G|Jz9D+%5f&Mw`D<2#HOZ&s7$S(I+>StQ z*M&L%``}Hg-fBWnX)tmmS~Zur_aZDW@?W7Ob9B^A#roe?11Dr1C# zGjGMq6TdLeGXvFJ*6FQys)dUeQ|NoOTSG%bQAx?{|3Xw6 znF|H)jk7FQDK5Ovt28ygJU&@r^KKJg9*^KMb!7I|BwioO`t1XLG#5q=Vcm~oJ~0t+d=-uF{HqC_3z;i+ zq~B*W&33i#CF4oBsb1N8<5^tv6=Z-Uiz;QHOt>WxRc#Z$eEBjT;(p6*2lFnC5jZ`_ z?$ds>BV`}{3n_-TVaEAyJXIGA%Dc%F?umtFkJ%A=u)H=0A^D$SdFSQ~&Di{4bKcV= zb(Dyn=4nJMnR0oZ_r`{k*QQ8}ShN%R zc-2k$E{(?u7kK?#PkE-B{p6YIRwLyQ4sl5$QOYd5p<+{W5rDqXiBJ3XAG-sXZpXD$ zm`C$L5UaBG1M$|Vhnwt0rsS`Fay+U|$aihb?PzZVEAJxfoS#)^z$Tmri1fAfO)t(y=0fRH@RUBGRP`QUvKB9g%+L`pov6 z6Z8A)jyuM^<8FpK+wzt7ectC;Yp%KGT8jqn-iwTk%rW@-%CEl(UVs$PMhv)XdPxPG zyDVCMt&-@sFLiABZ65a@7|Y9^`fVclONKOuS};BgI_Hjbm`bJ)X3#>^KaZM?{lNb% zWI44K>wAfs1ERElA-W4|k5ZBMGU~@fL)`xHJ>ld6Co#m1HAxOfF}gd1FcP=R=jIDF z&l;893x2(mufJjCIFCKE{pd~!;hP%{J#MteqOD?m6UJTr(>J|8Nv?4O{UU|Y#HREW zhUq?T4A^=0IBNzldYH6a`4;eO-;Ct%^C=9L@%Ujr`%3@-@5gqV<{#Kl8nLFybHh^j~iD>yEsAl zI8C_bpEkpGRuXa-23PT^#;v9C$dBzXtC2Nr3TgO%yrKKwx2#PQZE_N#g6wB@{V@&^ zSy@ul{^&M@hrbnmUE{C6`Nfz(pN4tn3hF?Gic>44{_7aknuQiExtM#Z{ZCKm%0*<5 zs*q&Fyjs287$}GLtPDh1Jx915^+I?W0iA);O2gkVO)PMIJ{CXP)9`QPU{lowGjrp?w`_i^oev)jR z4ag4ZVK(SL3ODxH7@%0=n;(Kb-~1&T+yAT6JO8=jzn<8h{}1No{`2tu^YH#|aQ?H8 zzplvt6H77R+S_1%OG_z0bf!O3$v5EAS|P40dzKm~f4@Zg`yXD;*?#oyW*_&pL*;i1 zf9l-3_uaLJ8fGh&-(0n>D16_K3q>v#4+w7f*B^6#WGwiNq5rtPdF+r&$1&gg`!+@% zaUU3u_VrD->63J9inbLltu3AT5?fFF__s=tFL&tt&r=7a_Wh&uV9mOJ5>$kH|IdaD zUsvwUp}*C8eEFBo4S%bC{PNc$Oa5n{g0CO)pUeKb6o38ZKdb%s6=D0&Ci(l4{AZK= zeMSDWNxoT;|DS9Ub(PWY-9mTR54G0YzB9A$ymM9XZ2A48K_c7GtG?v-ALa|5c@w^X zOMGY0x%9IKJy-jVLCjnWhO8pnbyi8z?3;CC6NkSuKb@q$O|<^}{t^@cP!D;nEOQ81 z1H`Nhsh?}T%j8Ef1ja)6Y|isJbHq?HkeI(IDEg(^w=~Hc-|okuY;jI~XxT?E9)E?w zqLt@UV<=UyQp;MAM(%c87bnwAW2B|F8dSkx>!x{|?{`TY6 zh9<3z`)1G$d>c)%PicHtR~Lwh<2)MpP_Ush8RhhTjt9IYY); zRm<4YJCLp_!D^>ejy+9EYFife`B$mIYYO^k1t&W@GhJvx9V^eFA)uyZ{ziIw=^1&D z>EEJmYNR@saQ-$|2I)ow~eMor)Rj(_uRS4tsdj}?GB9VrAzq)cC`chxp!y+)4dZv zCcle$^9yPQbgwMps%smyDiJff-1jYMv1beuyrp^zcw>8q)E^wr-;u|e_*`!RhX`NT zR4dQ345`!4FM-h3yPWqeo%X(%-uqZPwgZi{_xsn_L8EP&HQp{MSRDZ!HGk|j@K~!PDP={Lq(aM3Fc?ZHKN0{IUJd`m-cOsxbgtS|0>z|IoyxpO!h8^8NjNc#50W zE}D3RqiYpeLkSOVF7vZv&pJXK%NrXT?|u08LgRMLv3PT6x=p~fYpAY0$UJnkS|j#Q z-a7+-t@I2Lqmp217bz+J?OtT_KoP9-vBcUU;-lL}X;t&`sApyc#NTbBq}0yK`-gOt z5WaBUG!luWC~J6@k%dk2>GN$QItm{t-ow6onj`-0eK}ll+tqn76axDP+D*D1+Z+t) zL5AAee?;v^r|*t$@9P5QeBuq5CW1HU`^e!94;scm{`LHyU+u^Ks=Bk1$ucW(qGz7o9IlcQyWOz9&I=Z zBmWxm7wBK8KaB-#7yGZwsN<3McQ`r(de%J-{W&V~NRQjed5ek*W3z*Q82us2K|K25 z+~kEtffuWbcw;LQb7lj|Wb5jfb{Y3J9x-O?loIC2lq=3J2yfRnIEK=U1V!`mca=fW ze8%$1bA@@>_&EOkf=0XmWg7`6tnh;1=mlS1Fd~QA3prU?S>oH3Bz>#2d&oJZsz2$` z%}Podn=yJzyMWjCh`NTxiO7=C+b1Q|E<>+t*6cgUKV2*6x~vieABjCvC{COD^6j|x(M`M;9js~}iTQ^G z`uOfeQm7$u+0AI{Q9PmQ^f!Ur?V zES*b)o zAvY=6Hl#r{s>;EvUi6tAx=hD^RANxz?*cwN?ZvG!gAL;xbUpO(=-_(|b zsW_u`sphuk{}hO4%F+gg2IJ#UT~BOwqsfP?+RPeFtK$FT?%lTS4>L3`#kmOj;UzNi z(KWCBfeFW_%DlL`xPPb)>E?2LDg+|3nORTmzumoYEi9yzYZmwCTrKS&lya^1JovaO zwA9XNyv^U=!TBtPemLa8qAHKls0>xPNHKg$=Qd+`{K?^>1Da^NHQq%AZ{lvK!tMW3 zetVF^tg3FO(N+UA``>1S>Kh~35GF|1>x7PGMbuV6A2LmT~Vc5D{6B&R7QXTE&v|Ql2Y0M-?2SiuCA_c) z84w$9?{A^a9wU62cO5{@vy4xe`Cx=xs-$>hh+SiCThiW(a}7`c__1v8+b>fM#=g3T zS`+^V+Szav4%QhD8qD*!?2cx;OgmWb{`MX0XelxA2ruP_ag_J3F-MCrZjK(<@51jE z?|#!Zifm(I-m7oHN4N=H<74Hh@y{4T)iydi-85TVp&;F`K~D=nOH13t#H8?$)X+`t zRp06@+#ZlmdploDl?42?3p$IL|SNIc8 z@Qv}|4uJ~iHTJDHX~3LJVy$M&D60KvSeEuf$8Xm#PWStle`@69^MnQCsoL0213MKF zY@zgEPxW+6)CaA&m7?%0Xl8oSzed|-yp7_Ew9hRb!|yLH<{ysyb)p@ zU(ph~CpA51&z@DgEcEpb!UfRM!+aegt0-hw;=w? zQ0bG#c@IVsc9oFf(b)R0&$O{b{K^Y0>#_r4AD>nTTDHEX2{vk0#ljL=U$@-*k1&iG zKTIQiUVwppt`E!3jJ%aZ)F=|2DtxKbpxjpNI59211dh3R98c=EbhNsDiMthAZ|f9vS$Arbf!k zT(J{}Af^x_$h%1{@D>cKAv7$JRmK#$0Hz1Zi8ky+HF?P-R0`Crz0hsWcgmwr|NVWZ zlBz<^ryhC0#BDIf2fbZ`r$4&wDr6nCxvjxD_OaZOK@!@rM4;r(aL=dq1|sHTp>7GYERAkq?mHbTQ-82cmK zyj%vX+=H_O zyEWIC?Oe?}PVn%}N}JxOOvHnN7A-H8c5h(4GdzX5<%E}Zy?trf9$?fyJJU29M4=;H zM6S?ZPe(PO@6wB;r%&(W{xFDZ!Ih|!TiYBTR~d(5s$np&U7HC$t+A2)W7PCSO?D{F z!J`#W9xP&gi(BJ;#|3O|QU+z4{GbHmm9a#sza4I4<~v5zn|vgTmc;pxigVJkO-Em&EB6-0m-t;-kF`ib>BhQU zP1&wY{rXgkeONa7!FQ{@Xy#9AW%k0zs2PRW6qv{yk33m^W+xE?2#?ybCi^tTbpSBj zr0zv!)wxVhY?OhzP2hij`m)uYua*X>0ul;g=ma#Ho0iZ$?4gbA|5*rM6_wrg(^U!r zm<$Z2GG*RZj`Hm{7YS;s>%@e&Feqi76w$WdvnKv&Rvjc)LxDHwAWp2#d`{gWs?PrI@MD{Uk=W0a#FpcR5l zLbbYy0cuT4fFA+?ri_yUad>fr;hK(yNlD^yDVHENvaBG%YE<^!}ZI%Foxk2&gC$@ zv1G%+R>^*Qg<3rgr$uaTebfYMPuM``J@0lfHvFO`HO5J~+#|w4#g)dwU)Jg50U^k> zWUvzSEU^$~+xGMn(Hciu%8t?vS)oF{bqF_)?sjE&!ut@DFT}y!L zDz+STDieO?weG{0n_cBTT24(o0F4AJffzdQ6ciB1F>gsGq1i+4laO5&x{xZ}CK_xW zm-R8*2Adk}Oeu%6bJ)`Fl=t4w!ja)s9Na$8=-6LO(lE3qjIDThb^Z4-7v9w1w!lJ3 zjqr*nGxC9j`RMSwp^94+723Tw9aq@&KvX{L6E?+QQ8Exy3jP>=K|wKPQ&(j)Hhp37 zFYt}ilVhbspj=!mwd*REt*ZUv;%#{P<_jfPEJ}H_<-|kKj*Id~x+=)4tWH#a@(}D0 zO%-r~mfnz{bAte+d^uZ{Dnn?Yyu`a78T3VM>we9s#h&_N3hue5kCq);Q%r+ANuY%D z-?rL#$&V>#bj*fci5r8af)aliif2I#O;S<^M0}>xXt$6f6rnpEv zr&ZC8H0-S*92!5*t)2((nd)y5(Oh2Ro}To`!o$H9)Pg-Gz`6D2Axr$P9g}0_lGHyw%~FdF;jde! zQ5l_kS`E!oN@+sd)CcJqJ#Oa_MU!wgHd#slyD8Ds3?ey)ahtbzIN5oAVVC36`TTq( zYdzpEh5agzGn?-^>k$20+mL`PQ&_pVUUyh&^=~)Q4%5 z&(szWJ;*Sx4Ey-;BLs$4f)FNJ-ecLrFjCNwh>~a)JlKt&s<>?`6~!>c(h`z&9<_{E z<--)FNIp?baca57GxR{fc}r~ej04=5#AqhFNkp!?QJIm(RM&+)%JEkmK6r|@Lno?8 zgX=JnGj>W~F53V*CQGro`Nmur7<;pSaU0yIEMm)utH0%zN_>2J;!~6B?Df^cTY)-S zb%8%*mX11T(YNNjdyDxN0W{J~hP|4^-nKDN%C`M{jWVZ~1# z74tpDL{@w5Tk0n$L4N zqYFk6#GR^MYW3Nw^PRgM_=A8L^ zKVi(Z>o6e}qHueE`hiXL_ddJuIc$3tZx%U*fF~H7GU=ToPONngo44SX8_0l20KUpI zO~IBKY$yaJrgQGmqbg$@;1F$Z4pDv?+T?~gZgZk~p8;~dnWXg!wk&`PKbH|EWF$`t zQ%3&Yd0e-MpnNw5IA)I^#|q4{0RnLk;rL49Bu4&?b^8-QsTz)SmaUR>um*U99{px* zqLC4bA_8{k9VAM5C@a?)*j0@~bhRf9wo3zjG7O@-74S>}*$IiuM2E+9M@OGe)eW(R zIeU$mNn`E}y9L((*wQeF*9!XBs8v{Jc4lxgN@wG0z%vH*r{ zbuby=p!;;4lSUmRQC0yvK$1^p>wO>j0+bY}6t6_I@qU{LYFpAUewolJ4vWr5mQ&>v z6O`zU+dp^V>RbNW4qM1`(H1%yyeodeW!~z&9_NPv1!$BNhM$4`v>Ljm;ItmbThpLv z1zUJ*vDsCtPGg@nLrhWJp*qWR@3qIp$rnbUgu@nv- zkBX+nfrs)T>6MNviZ_Bwdkt<+{PK$xLHu(Uao(my8vm4%t#t)|v&H?OFJqOI(}ika zqF`()R_sXr;jD=XZ=TvyZvL6;Yow)`CaO~ef&Jz8;#{yrJLO)PLi4D#G-{PVU{CXT z>}p~{h=yZq>3Qxos<&icj3H2*s&l<{g&fsz;2IE(IYYl4(C;$#tgXk(If>{kDi3P| z0F_U;0CYa)jqjuEA_C#3c zsf>JJpH3nGhV2_d=Mg1Q^S}<=kk5R2;IJkW$7KhR9z??k9>t^i$eRAaTSZw%+R&Iu zE_<)+FoKXERe;-S1WdGJSBYDn!5o?*zXdoH+&B3F2Y2vDkIRs+kPqYSuGYQhGH6v6moWH>ql4zs(f}`< zdHz+%Lx`1n{rO+H9oQye6p+I0wK-z}RD}`3#sni33l>eDDZ1!och~vUmQa}l7#JfM zO<7G|Gm#*c8H1nNrhEcBWuST|mZKgAjz)8(UCYwb`3Q-6NGWE6(<2WoZeKcS?9aYb zg;R4Q%l^0RFspk?ZSV}5OyZRT86?hT5f}L!^E7Q1-0b;08Dd+vg*JmFGPZ_KlB$nn zYWFX{#gDh2$|nI>-0{(#8UffLIZ&I-j6t{93wu&=D{X{bh%Pv0E|U%~*vCWPcxqJn z`#9&3duxi99O8_JFUn&Z?7Q+3HcW`7l&$)t{6twoIKCHWxb3a^!w(NV8MP*{ndtZL zwBZn^#jjTutYWG^Nmd<}U1Dm7YM2g){%sIQEu6)oMZLkM4#5auVVJ_BI3@UnQ{sK~ zh0W7kc8%8CdR{UKaj)bWe_U^FW+n%Vf-0UHpFY{%X`y%j20CcL>YbZSV$uwmCC*7Q zL#QW_piSj>flcX<*5#Kg#+FeSoHK;M6IX{Z>C?}||A5X%OhK>S!Wk14nDn9lkQZmb zi^bBr4{_O?qzxs=i23km(KW=rZeCxO3lu*eS-?ctA0yb%wzF3NrjapV0rhCGaSiDz zD(furIfA-b7&toA-SEbd2MI54q#7sHMT+#wAFihlpcts_;q2XO4<70HJP?iNliA_T zAQvbF)~N$2;8HMp1vgS#TymQzqXxSrO$qAwT0qTHE9gY&c?!efM(Bq*f67O@IskP7 zX4>LtNI)Bx$Nq1qUhyT(hk<~M6#^}Xu_9@a)@UTAFV=l(=IUF znq*kcHAF?-NY!r^H?nxEyq>ky9`IJL3xmC3Y+0Omg!$c~U28Ei?bLXdL06QP>+~pj z*A61_uLTAbB1o-8tGlYvB$NhhA;nEBGz@^Cjo9q-&45y@DpWOfPWf%i`iNI z>JQ}>$P(C6!Jg|LO((L!t*SBz4{fv={{_R$*y37)G2PlY`XAG6o;L>-C&tZ8^mK$@ zTvQ;MJzgviJU1;Nl8j`Husx_7@nRTqH{W{*T*}VqqtH|gWmdyCy`32F=@jh8V%T}U zW8-irYnUQ78x$AIzpnwMMj^Ob1Z|H^ z7KGr{IAFrqfBDjy_}JvKGc3IQUT#It4H_QLyfxa2r;~^aw|Ok=Uq!*Ew{x!^IV=g> zV9#B$P>pYf8_vRAEYIDa?)v$v>uZ(li7NPdVwlOrBqCb$>sMnZ)2N9RZ##G$QDEqW zw+IJ8r=ccwqYO>3Tky;FaHDJj(!33K*I8V~V+scds_8K>gPY#?6_A-S$T%~t7wNr2 z{4I!Nk7ab|cGI_U6p0c@UHp3Zxid|CI%5eS2s?rE;cjrd5bd zbv}ko2LKMhQi$OEm{8(nNV4$mkJn_GXh~@opH-)O!WHNJ^WX;`22Y6f@q=ERwTU_u z_NHVIIn@C?g$uLpuF=vaq@0J*ii~{hY+rbg_}&NGs*iGb9$dB5pMlnC#dsJh7fkV9 zjJt$$wC391l7$wMMip|R#h}vz*X_T;HzxV)XM&C_yvxEFs+rb2jF_@Bx00Hf`&yWy zfvWBcmrxLm2c$9+!?V9`?qcp>0?x5n6UKyoi$WzOs*IEBZzxvAWlw)JCvL+KcCs^` zlGdGBFnqXfOwntjpp1EIU2CSWHZvMi;pyno*kz=n!OYn>G!b$MGS3pj<*%R#6$9)w zSoufyyb3^PgxO^chrmO8wronoxxE{k{Nc357cK(qup0L%rSFrGkP`>_aA;BGQrN(O zc87C3;OqG=dabh{UMTeSgWm!cD*@&!%SGdr-q zg`!1MNP%S7&(#ZvycJlzlsLFnSbf={SDU7EKRDxI!F4rssVk@>6i;k&nQTLm@dXh3 zATycwu_WN$qN*(6izBpS{aNu(n!38W-$Z%@sI2I;_Q3HW|LkDoK_Wm++L*)3*X)kP z?#KZ2&lIPmrLq?;o}?{_AG{qmB94E>_@+4N)e}`&ka} zP}N$Vq{kfU$5vs;cym2cy7ZC@SwuAygB8xy-j(N{umep8v4c1q;>VkKN{v6WtbW0A zp4!FmE&1-k@yp2_ipW}zZhFL2nX(cwc3XokRmPo}_e0q3smCeiT+ z5!1vL+rt3XVxVe5*+GttYQG2`t1Qgbj77|JG;n96%D!$~UZko{fm z;-fek`SS%bZZP)cQta|wuJ~!hJ1Zv}hPt;hVmg6@y|%Ip^uG8StIq??GL4TEpaOGn zijKi1z1$Oyec+ZNBykNNFQokR+JiSrhY`{v&GF^xS8FuhNTo_j3bn>*OQOY}A^cj1 zNb}egxQ!i@;3>3MimF1Khj$u1zE?>FpPGW zBG`c{$o+?*P>>-*&jmrOLMWOv5_A~pl;_xhuMWSrp206eTwv~mF$@hhGRSZtm#NQ; zd>NjQTaf<5yh%Oa432S6lVbE1%KqDfvKj^%Wr)y)l13tRXYLVvp~w+Q3y}uv3QOk% za7@3oWLkW-Tmsug3^UadCUm5z&PU1wY&VnY{fPD(Ov4JS_%ApTRkKVr0J9p&B};1t zzt8oO#si8QvJqv(JX&Z}9Ow&tvD`YhO^PX<@Ce80%j3Q3POP)w zay+{d;UYEy_h)u|2A}u(QzVt3H?h>3GoL>MfC^oe;m5bY8}vXeEa`BJT_!K`S&o$J zJQ^Ul(evO20{Z2SzrB~8f6YWBh_#`)xK6`z`+*H+itcOqgrVaM^GnZHU`LKSAfqCN z4xvI|3=Ozr&*;L0kAtcWr5gu8mmFZ30&R~gFS#sUE}<{=;fwwND-;i!@{r||^YqEpv% z_F-(bBWVXaNTNjf-Gs&{mMOUeU*@GGKJdA<)1>?86lt-x)?q-^7DK7V;V zqYv#oivPl5TlTlWx3G*8#eOEkbJ$dUS!PYW`?H&c%sPt$!$%TY@W6sp<)y}3r@#u7 zQu-E8U!FydZLMy&k-Gj&#^+~0o+WXfAV#B8=Z_kP{Y#FIfgRW`Qx4dKTnO?GA_)}a z%N!3arv>PK^5X&>%s#vo_8`=GqFI6$^L&_^eevhW#jno4{sxfGR!N`XR z^Yh?O-waXlMHUQIik4VsAzO~mBvzkV^^T7>9kiix5>c#EOYYYL{dS{^-~->V<1KRnnKq4 zHRK;e@^jZTlC8IRQw`4q36m#o=2Vj2JfPyJ%$8<-SlZl8*9;b3(|CHV4Th-C+PH=8 zop=IL6URz*5*xr}Z7SGx;qEC#1)Gel-Hm)OBW>bdO4pr{+3;VuA=AF@^u4mz;KYQ0 z9f4PFjT#S*MXMz`n3>WUNk+@ojaNA>4+908fl4;1Q%f{#o0;raSZd;f7g)>EbxgD- zTzR=IGBz3Zj^k4p)|0x3jF%z>ND?3aY;>E=d8eW1EeV?;)_z-05RtOrFB!)wT@N%= z53+r@+izPIrlGD*L>y*D&6td*i#nPhR<4O}4~C5)E{cT9XxQAJG>#=TWtqy4D;96{ z*?9G(O}%v!w)jSPcdV_wH=Z7!K<3Et2ymnX&ExIw4*<^?AjJSb`AYZB8Q!JK#|&{~ zI5gbHHs(J`X$+)2lmEQkf5pP6o+HQuAQk9CIkk8-@`FSm5tYuWblRQW;m2wlg7_DmZl*63Jf>5R1&l&{1a%No@oJ?p8*kN zkiSTqw!+2PQpXHBk@MtMjA|sr8ETAl+*L__wAdrG&a0*$hG42)G+UlpAjC*Rd{uZ1l0&~E56W5G*4Mo1jaJWp zrD44o^VghV=NKP*XP!-B9ksJf*kA9Tgu?-QuFBoyM$&K^xE@9sI!cez`FQt^^!OC_ z6+oMIiySO<3KGN)0_>?zHz~W3`kXS2=hV;aTn+neP=R={u+-&KqG@{9LrO3C+ZXjg zsDOXdnwzt5jnr;(g1#(VJ-JA{Go0KxQG^mz4}Fhp#4l<^eGaNDCm2?p%Zb?9@1H#U zpAiOE64EG=vu=suV_itI0F{ z)4MdwBE$g(Op|L3iRfe9+)x2;8Vu+VB^^A|Bw-*}9o zvVVsv3*vZzD5qV{iksZ$!aG}6oA4GMVqxJx@}!lc8O2o9>paf_)`Ao4AOGSGVp)8baFp zMQ7}+qZrGLM()&m2$lZY-0Dh|o?u0ge!Rh&vE`?RQ+|MapsMcSdNg1OhTjC3q*ka?zMO&83R+1mB0`?SC69&R2YMX z#j^P+=3Vi|?dr@7`HzG23WFf}Npv1)h={t!7qm=8Emsf%+L7O-oW#_P_m_1J8j2kvi^jGFf*+^y>M$P#S13zJZQ z$uN7tl&so);XK^2#<}ODTaZNKrVfMt%&`(oSZ`F?tw(0Z*biJ^bhGUOf<0o8SF$(= zP~26dxOp>gd5=5!MN$kTi10cnUR}uHM+!8wagaLh@=&kI6r5hdi$xOpln^0u4ULx= z-cl21V2Q>Qbh^&_gQ(FgT_~M61{6W@1n@PMuVHpA38u!)FNPIzOW<*<^x^K0O-tD@-%RUR%J&aeL zgHQgUK%*lf&bHi@Iz<%xCqY}0)}F+-&Vtwmb8{B62R)(?QS*e1+opNcjw(ucA@M{h zK^T#UJ(&!&N@=X<%EVe_WFk7b{>zT_v`5hWqu)2lGU2>d!jFoxWPCC@B>T6B8sdd} z>}*ynRT-(W)jX7mtWaV42y)Qh+`7wshqSI6u9@fi_GAYPwh3uI>Ho4Xw;K%bXR(&h@NCSt z6urp@j^gE#LoD(?;k=b|UR`Wbk}&1pD78VVNYbuIH0Y_>n7X7PV*?yIW*w-)N-z}3 zOMJXljNUA1l;&rP@+aD|7O(KRAN|B55cN{RK0FFU`BX&qkvPjh5gD<0+Xd=WG@|cq zV%eDqcirNF+3DRr5mZRI0H6bMQA0K|(!qO;lL zC4&My>fx96{`dK`BlvB-;VxbX2gyMwA6LgJ+yaWM9z1>8JoNLMW$yp>**U+pT)Mvg zZTN-FwdLh^uaVc9f52=67OX>OrcGz42{Pj^nF)~}80Nu8RWrDe%s_6NBGQKs9&ALM z1c==>4K&2Oyl~@z;!hXXOs$1eECTzQTtBR}lMw^{da<>5bKDtY0ymWYMVPdKu%0a$ zGpUbpdeavmU~Q*w^RYhn8qT^F^Gt{u^oyOeG=nfJF1<6VC-}fNd#Gz$k*zNT*Ktf6 z=FUKJ92q7sx3^3JoegHN>gIONNsXG!zani-)`4{{x;v_@=z|k>A|z%pL|pJ=Q(;5S z4Nne8WSJA?3=s&sACegqQUvSWjWB|1)oP$1Cmz8iR=)COGvk6MMuIS!DXbK&IH$t zCsl$ZKT54$=_9*e1nDQ0I%WV8h(Quv5a!|e5n(MPl@)U@@y4XC2QzF2(P(a!_Lx{JM}v@$(D<8^2-phK{*qY{B0&qc29qg zaY*7L4wb6(Q#;z>jc$61Hts5wo^?`_Ww*l0rUB1bkrD**9z|K4-;s4#*xn7|W(2%Agt$~bA!TuIFaTNm9 zol87K&eOfpuD}G7VBAUswiHpN-Ergx&AiB;nVgq{BOm0QcN<0P`j9@JPQJv9KM5?$d;T|0SXh&3i>lZNuA z;q2MT?2y%m#pf0`;QUZJfc;r%Lbv>3O-NKsa^UX~IkV0JMGSO1wHA3?kkgiTW#vwK zI4&m@CTo$ODEEvwe+l0bC<}#MS%TULl}<;%mNWGH`x&zce;ljMzhh7{f>*P`kUfW-X8~^_K?ea1fwH__RHU1X#)mDY}`pzHo`W zRP%>j1q0Q@dMK*7Q09f4fBqBc!Y!eZF^mYYJ%SwBD&QS`KyYU!U-gUh-LM0Q#iQ<< z0Wej(V~i=F!F+bU!>Gd%X>u9{v;8bsVeeo5-r*eLA{NC0d1C~8fBy5ps2?R|nS`9F zDJF~5L6o7PTOk8xIDGHL1Tew|NC1|CYAKu=bDgyVbz}cu&P)U`?I;5+p<)E!@UxeD z3Xou2(Z40DhA0V*b7>n{8wOG>LkJCvXh$&&id7*$1)WSV4Z4kdQh;VCnzM6k2GXHC z-V~}UN+73QcH78noPJkcf}~S`Q*HPVB!8%?S0d9D*;&L{_E_@}sL_oGlXaHe zqqWHGt9|Fb+KP@h@^AsrB1A+h`P_1Cg6fb6!31p0I#b#f%naY2;*DGO&7+h6r690a zI(WD_1ywsZ`;a~kJ7?5BB6Z7*K6yj&p1hrOu9`+G_(fjB4vsCr_2(yRGA z)v*p0o4W|%iOMn)3^h*6?wX&~$hdysdDu5zzI5Q1M6clH5PP~J=pWE7bj;`CMt z8&@7_dK1^^&|^TQ9k^^Ckbq~46aK;m0xp9dBx>ohK1CUaOg)=O(fOkenZf-yLV?tD z?o0G?2g||}e16|Kl*|g=HbWHI7T8SXudSrd?8$Etl7tA4P&^i<%T~sP&ya0uKpcYlKMT7tG`%oVea%8_5ghy?RxRz zH0@YWDeaI4X#j(6q?8_;Y};_e^HqJ*>C+t=!*BA)!ko<%A%yuf@ec}uXF4E9Bc?e*Op{A;UM2jG2Ut-v_>8eF3 zyM>i-GKl@g;4OaAnMEw?@}Yo?XreOpC(nrE^JK^4~AsWchsD&pCLVAQ^++wnqxG`f0bewn6K@~zA!D!IwFhw$0+z9L|elT}2 zjOa3!_5{wco$~Rh`BJHN%1jDXEiyw=iX{Jpr*B=g-Z)>h8;bR#p}WPXPK+vBv;!z{ zpyBI~Xu>*86EWzKx{G?$GB1zTqA>*X{sO&c02GDWNA6~i!VPS0YN`+cFr%FzlK>e? z8Lvic4HZcEhEZDUjWYp}#<{Ett{#o1BZz~^yrR0=C{1BEU!W*ucsRlS69}#~WoDG6 zCfxTN;1bQZb8=G&*4CMt3Tm3#1cIl4fE*S4XVw*!H*E4dc<&DD(flM{?MeZLAVos+ zIpmnPA{KF@=K$~WMJn}CP}z-R)S~ZIZih$pYZZ51UCy)N)6+f#1x;2m!snSA&z1Lo z4p{8mxu0-dBK)Kg<~V1vB(;sk?lvW`;7H`JrAQN)C|h%^J5_D=j}2b0QDRr<`BOf} zs9B!ek6b1hEjD?pR1PVMpMr)kHsC(dEwLv^q+?c4^V!Bgi!R zQ?b2id@g&Ypw$0f{A7x4#$odUP@};|J|A1G)<=EhaS24|nxZfUl)zy&Mm10(w4Ibl zr^d-EsTOMyYYmI-!$ADN;#feRc{{akDCAtKvNMthDKQYZ=v^HwFUObr-3zbcH6AWf zGPS#o@6O?k0ooq=vkC2LnW(2~ClhBRH5`EGYV`AWhgtrJH|9)1>eNsnE3KFRJUkit z{_4x5@v(gV6VH7(4jI4M@1N^ld;w^B8~O+@EmN=py1#;wcX*(tuePs6y!si5b)fpE zEC$LH=OaaM7F7s@(&6M;+DZVpM3|t_PA7Qt#%}Q#tLmQ7U~C`wNwLoGi+=VMF7YL} zQ-yX2b#zP<@u##iZ|(9*SBlSdTL~AZwz3fjisJu=M+2B7(CeboWV1Sfb_rUU5fyvxW(p9AEnl&kL^(5l_x zwp)h6F``z9sf#Tp*^aI@fj&QPfIhb>)x1r9)zz10yF^jjHuWxhT7$`FMG4@1oJW5m zi|o-F1Cp)fL4zLgByc3qYLE7=zJTpUL|h{;OkVZHfX#z^2;i&KvW3OGGfU)zhW~ zyk;wu%Q8%Z#-};b^~S+lHz>i|!-pCIAN7|xJjPN33gS2z zJi)b7NsjTg{{H+cH_34E@F$};0{quFNE?=r2gdEp!Rx;$x;*@xvh@*_d9jCT>zG#e zqpppn@y1y3_dlGbK$adF%56gR^Lv6x{Ym@)4TRKek@NyDMLlwf$fVe)1Ayr$Vy=^g zD};@K$$>5`0ahvV0>t1_9<|N_c$q>SqP;a?aK5m(coNpo@};2nEup-Sm@iSRBPyF3 zK%)#D^fq3y+78ipD-zw-2&!VMtsq8#E!=qJC7dcT0@XsOzo7zxQ^LZC*GbSor-K-6 zkt@qjn-+pr-aX)mVA2xDjS9&^xKpcoMo0Raq*2~#CurOIc4*8aRD|SWka-x~^AurK z%eGTYLUs*@F2(fH7H&RUQ$u^;Gb_Y#C{`3EO_TTKmk5=lc@=i?i42(NVmUiQ?^Ztp znzh1mJwPTgNeTWIwj~*Om6Fa>KIX1JlBB#08@1hX7Lm8+@X-&KTk%RO=zj^25ao z(zc=JdhTDFtHt~cU;$r=A`M5UiA3Wj@h$HkLp;)sQeSy`I73zk%v7T7l(+g4&Vu8i zBns_c!p)=$sqT6@d6&IcwcqnE#qto5OgMA>zBYNf86-ZTV`?Nlsn&cRsLtb~%ub>yRJmoNeBEuHg+@n!EU$@|mt$gYNV3o+h5~0sk2;ue#S?A0sQ;Ix$P$y$ zm!CX7hm9Jpki-yOa(w1|H@B53%b$<(JBc4+RxF-K=fIObkB0WH0cgYR7wfTXQ4M5! zP6!H%VV$bFta95Y4y@V4Zbh^K>5mMFzC%#tgM%poui05ybiBwO_LHb_5?ONseZtLX zHDA2}*5ow)#fQcKn+|MwRYhhq&KfI3#Xi|nh1vSp?(}q#DWk&5sHy}ok{y&irhZ1; zV)0qO2&w(zu8r)N8g@I1^PD!pqubFqS%QLyc@WoY_i~f896JV(z}p}SG!1otZwu70 z+u!28I^*rJAz}nc(_9VJq`@qy8ZL0Wg-VIg5>i~*Cp?v6YgR_ZGas1O2~>anw^CZi zG=#)PB(q3_0kNH^zuOa>TZ)reWlT)eYw8Pt5=TK|d?^+s`_;ogo}F)br*|@qAIf&dCN4cEwnV~(W5-K>I(@{e|Hq>Fl z`r4Vg^(}KgPz^keNy3yAD`ZRWKJRi22X!5+7~z@Z{p^sX{5l;_(-2W^T~5bRAUkCn69bEqdWj ze^4yAJCb(6sG)b-!}FxSqR z3?t+c`zgzwft<02Ssj8-u%;C2B>p*QJEKV3znp&IOv=x7TJYAHow z0F{;e$|6Yay4OWN@+Qwl%^M0S907l_qjdN1(X>Mpmg9LK!*&1)&;V3pt|R7z#H-*C zVh>f15G3ucyz4(o3ykbQAi`ZCJfXa;;2u7uY|5w90$fp+%By+MHc&=Jb>WKNjInJ= z^3CLf5jnH7V4gE9nnT5nRElg!!)rg=GN(QTB2I#b@(H~`3&f@N5I-0sT&=o+YIT)V zw0sdD+Zh{fX`jL?x2b5^C?&tu^&lyB;b=e3ae?bqDz?NqU#WS1FVTg)I2vCb^jTt+ z02BP6TLFvapVUHTe__xm_!?AdI4QmVV;IOrYBqSi*(~5SXYOv?E^@Tv9dL) zn9=!5lP*(!_THb+<&Tm}Z0q-baFDmV0CXaeVNg-85;0Y!GH1{y_cS|tKZjrgB+;nt z0&G)K`HWqVOYNMl42CT+Cxfk9kN7k*BkyBMQ>eG=)CU$`65re%0YlU4EAw7z>E%MU-dye)S?7;2njg>$}xbFEvU1$Uu?h)$vg`idytMJ zY#hF;6{T>b@*T#D&3)nJdu)DdWn*jj;@hjBP0MxkDEe#kDR`ZmI$wBI20ds;t6 zXjheqRQ<< ztcXbK+aD)h^55!sRC)%ifzT{OnO7Td0PoT;IzlL!-+Ns?jg^j>{2Imsg%CC?F zj>MCbea~W-EdqM-+iNHKab-`iEbAE&yT>f)2?psQN66W$YS|U!>=9S;(4ylo)JD3X zF6e(L?N6=&I*8WqE`HGSxu2>|{IgSDQ4RV*=^n7W!^FfA-Aelum0V9-jy2!i#Ah6R zH!KNlDF7SyS^y3JeH;Q3iD79c$dFMPERdaTM5%y>;=O}yL@6eLZZ`3EY^3$ zQoHxR&7&G)d$dFw&g^#z{TX6{A*Nx zRg99tt+ABg2yfC%kE1x(oGVC)XmywmeZX)^#!#1wqCjC@JhdX!8x)kTurXk1&A_nJ zkTxox#CqR^A#x7j&|+^mM&;xLFNm&Vo*}TU%HB}>RL3--)40NgiC`B5UPvN%9MWtT zG>z(qhycX(B1UE5cI0~XDU0Kv4_RkV8KqikL3$;^UyQtc3)5%q?O@gA_f*Q*6Egb? z&v=cqDQctIY~a#^XCLlO*+s%rFJ1as8}MNwIC%gy&I{ z7zC^7lD7XH!kA1}rmZ@%-xh_jZAQwERUl(vA3;Ti`EIstA|u%#L8v`K0(hv{H!pG;9Zu2y zvWAy)eU6}O5H>n2QTf_@@kDS4M@rwoR^}LM~t=Pk$Q!v;jj$(BRC|R;CZ?uOQf14+j zT%VZ=-Eq$@Z;VR2r5Al2LRik2TGPiG|F%-^enrk!JoEypYY+>PF{(wjM54hQ&XOv$ z3dY8+u)14JR7RkrVd_D}cdzwN@0UYeI%!;6sV>5P{RX<`CT8Fshpn7_6rz%-G$8<` z7!&6y1w|w7XmA@*naqqO@-<+MNasOuT=Ug6q10fMA^>vl*u4?xSVB?A&JHk%luRIx zal>^o#Z}|s`P2u_b}Txr(8yl&qi9eur|sL*waDKfXU4neOVPqB!Fj(6c%b=+KI%LQ zad1Ki1-Eg|hb^I?Vv*#?8<>#CD($%NobpIf9H9$B2mYuf_MVb@kk3Gx{lZ6|u8`2f1NOt9;jO7i;SQ$g@0|#ENY3Phip*jvdteo)u@;eE*7Y31 z7Rs;;3Gr@V@T|l zh@dCLLDeHXwI%KYEHd?Vut&m!szlpEKnq&_kU}~s#D2id?QxJa;qh9P3xN&YhfG|= zJYz1=T)P@5i-`pyt^Ux=0inZ7azro@iE5;&)SntRTi%-@)(uAsc`WW}ugki6u1u#P zpRghXPB-rlUo2@^*NRAA(FH(*tuftQZay7uLc*}zS-T_S-k|#t0y}dYmhn9F;f;=e z1%j$xN$c(47y?epeo=R2kTBWxJIe+cKx#KBWT4ZqG-5fHeB=j@b9K@!e{3Mr1G_Hd zl4nx_w9yVfXojlglQCACsL_cKNFVJG6Gc7JGK#%-`>sHYebgX6{{tjyTe7LJIqC|_ zs%+e19EWPGH1u$&)WTE7Wl&T`r5{;>XHE||(>)uHSncAb{EhyaOsca%if96d|MB($UCce1)_ENEQkj zW{@gR;f(rhhwM<*4XBd`*cC6OY|N+Xed@$4!~Sk_6PG#Bf0vd=>?J`CVIfke&AU$Q z0$x-Wb;mR}a~@nMQT;NmjwE;{& z^mLG(i@=gcaHM8!wKpAV1iOeYeU65|f|7R<4KH)Gi&ffE!qNCLskMcC^O|zR(SLKVwQ>NeDr9E zPb&#y;OCiq_5E+l%a4DGae?$WKR%B;sMT|A>Rox><*^iPrI8m2?H zp);Y{0PYDAGmc#9Gp*C96E|X7`b;Hozf6iAKFTEZFlf0gH77l6N^taX9H{SS9jFaf zXIK2+<-4EKscQx0uDX_Uhcb}hv!<&4ja?bD!^M384BE~_$TUGWE{5iLiIBc9WFo?o zb)#EG$s4d}xbDYM1>+;XqTolx8j@|?o(}3o_N<{eE8M&1^LYRZfudDL!PL(q0>L{- zpIIMjXd%-$GN*+}yyJxnDpOh@cuYkoDJs8smE-=hqhvWuQyOe#*h2#qi=Lc$Ov&g7 zF`VUr>Zi4aL2VRFu|$?sfj!rQmX7SBU<8|U~(XKD28rml<k!ZfK75%+Bp3A8!-Cxs>bsLz3K=BV~}?-h-zw~0@;Iw%h^ih(&39Wc5?PLK^8=LGRRsJ=CsjU!2mG% zbatr|+SvLEb6E4K%M&@Vw7kBU_1~YvddUtQ+@nPe5sX72iDsyoX5nr#Mk&$9N;MO9 zqT+fuHI&t_KwD97ebBmxnF-ass^(cB8D;zgZll=3t+4`d?3<#OXe;(%DBV1&LP${d z*W%ov#R&)fKza^@=+%+cTc1}DNsYE@Po4UQaIMtz0(Fa&R6{c-`Fw2Yu=`eLG?r{q zxjs$p&S|3{hSv7Vj&_tl=`C-T$&P9eX4Ks3)Cv=7BaeJ|`p;K+S08j=#>0)w?-bOj zCF`zoCe#-*P|US4A9Yic$Z@7kP2XMI2R#w>DujFBMMUNrapiU+b#U;HFk@`VgA#+D zdaNe|%Q*$2II2#c62n+2K%f9ln12}u5zd%L0;2Hgj1UeL_b`;sRc+Awb2hKh`HG6t zgZ?$yV~sp_QVt(--?`+6z5n6~?=1R5Ymci}=Uc~BhkpHS@4?e-^94oTEIBwg{@C}2 z;@9oky7}JP@SGp_?YX)7#>4NAA5JKoJuqWpCBN^d8$Pebth6*Q`S@hkL%?SlUaeH<}s^?USX7HZ_oL7rdoGj3v>p?GC%L{ zD#|RXK`lZg2;YUotjjXmHiSD6ZPb^b}n9u zai`pK+v(ET3cS0kqGSuA=6DEl@~g|s%d6_OOV^@%#1&@C8h7Y#*w}VCj9>laX;A2w zKfGJaYeUo6itm4OYFa!drgNeB2Vs-yqmm9Ie`quqUHNs9$PJw+5%{LF zNIE@Mit-b;>9#&OwsV~YbEor!bz02-Mc13hQ@!@xS^uNv32DnG%vhQAden4wW<*D;1LYx89rX`?=5e_4CJbdhWFMXSlBS zu-1C7>y{dGVOiI{uVyD(nQb`ad+AF%>_V?!qU*7GA_UeNvJAV&%XHoSz*5)Hs02qF zv+cUPgtYYcpc<`LmsxC6l{e6k_aK7YLIY$~5%Wy?&4^Y9B4#VIV_|~F|y=!g1|ifHFj?ms+06}un5<-*S*+y=JDp*yv58H=~B*Z z6|s-4aUh7-hxO$}{J6Mx@j|qLF3!V3S;AAFl?=y`R^Y@_Q=O%Lct7rGt5n5OS(dh~ z%edBA&aqiD7ZhRr?fpg4Zw6FKS|A!T3AlVYLhXR(i`ySqoEXwTX@6cxCTt9b_dctq zCLVp8IR#oy&(jNBE2|2**6R%Wz2Plq+(O#g+Qf1K2R9DDF)~`-sa4c+L{wF_u{ZPB z_kFZLk=pkArxty7{91QMl_TJ@si40iDHd;Annt4-jE|B3eK;aIyTzDHwc=X?np;~3 z@&J-WUX|SaPtR~%;C0B<=D5r$B_9Y~SaB0>lmDiT^j_Svn?e-}CI%R@ZOvTIC1siC2*>D5ra zR8uW%a)P6fs=mH{2+JXA7zStpE{pv76d=O(OHWTPm>3pl_RQIctsT1BwxE`-pCLZ& zc{&9lcl0lR=C$OcgsiqrBzL0YE9kF9b-Vcn8%KtZ(ja1emhB#&l!TQV2g(P+)~;O} z0^$VeBeJThYN65jx|Ts)ZgO{bx4p$8*Bf0pf8d-8_d~o~yX;4!j_B}G-6V0gbfisl z$FbUY^DG?>6)8cNvs>%5u}OV4G1;)utwpmjC#BBr_r~6DSqGY(aXyCQe+xp^$!DXEK+Id0r0m*lu{CJrdd|*V zSLZj-f+g-wHTFqfHu5s>woP5S{hPc_@9_dS8od%leLzr9{M%eN?}-OD^STI0blrh{ z^BvtJy}!G1?sjdkYUBY7?$jc*@IU8YTX-=Oj`;vr&Rn7-*=_WgYYEa|ABVhj9 z706TJ#9y-P-~r?H>%E{L{=@IYY_i8rs75*F1OfB&tS{eMG*v>GU*6o`Ex9{oTB?!2 z=q_Diqb0qBC&b$g3D~XoYqidH#aJi`G;kF6^i)ULCEGTwd59`zqCwtqmtQ&hyZI(y zvi!Ed~W<<*M!cn%k)VFz?$+@l?x ziH^FudWoFh&Tgd^6ica3&d)ANdj8q*NL$6`Wd<3npS+0B492GkJ64j%ND+xy^Emhjsy@NAW+1sAgq8?8yY5q)soDbbgxS9`#{ z&98Lds`eVfA>p@WRo4~WepW%rx+tO5To3naZ%?xrjDX9FrhA9VJ10^^4qUdUxOeDV zWQa??ODC+%la}qt5@>ri!+WZa4DYH^OFl#`+;S3z4TLI74l{WNHGhRMfjzppLeaxy z;{|BHJ@s64QdN$#HGCDeHd)i{Fa&C#_|C)%4B|Tih0y$Vki+a{&|WbZ>dAwJ{Uab& z#Xf!69g_P1lE|w*k?utf1@7C590IOhjRh{dhqfk4Ct=5n1-@@vjzdCBq?x)l>+*t6 z)i?0u_TJ!@Vo_Rl^XK*~=6+`?rbiQub6uq;@@cr-3ACX6~BND3nsgrnO4;t!=?0#0rGjBBKCTXpFaeTkBv!}%dq3QkDORnSJ#hv zU(Tsji?Y~*vpchgcHb$({b^E2wdCFBYV{x|-UgP}ON= zT>;mxCj*$@#PJ?kB+%_LPp()?rX#6qh*yGGTvDIDv7@ zahT|F=Dcsrpkjq#wwZ5&>DHbf&sWSAtXWfXLR-6TPW@fmJtz>IqdRiy(@akRl1T*j zpour*e4=TJ0i{&vD**Y~c)14sY}xEXKMXT+PLg_pLU>y^<6sy13OC^q^F(-|?j$vb0 zNCBMSdsRV_8wO+IH!y$}fMEKO^9nq27xp04C8X!(a@ITg~cLSCL&Ns*Y>!ZeQ7W}N43a7)?@IPGjh9L?bXoG zTNH3su*G5Pu6j7MjO1Tj!S}Hs8J8gLSPB*Va1kx_`tr7YgmM(*&l7Dp3H+$X<-u|q!xs%pwRQ5@bh3Fwnt!wvqHE3$jy>kI& z;@}v*>vx|%+iCh)1?>NyinLFEMq=8h{N#O;FIzU4}T+Ql>I;4mE%AlSiM|s zltYSGttY-SHLS=#hM~1IxZbL%;0)EG3T0D!r;43%JB2DYT?&0WLjR+4>8uQqp|vp& z%ChYmmCR7?5n%W(lQ*lZjpauV1h{+nXmfmSLoPcPR3L*9do`f}?Ev4;uq_k4LhZXK zKv1U(%M^oM3EHuGHJ=9ZmhMhT0^Yj9Ajc_z3d%dH@4Q74-cCi-YjaJ+J3dd<**(PZ!<-_Y`;fb-|dkFoA$b*>zx|38{63GT-3-+_)YaMQUWOh0 zpDM~DX54>UBetpF34!}%mP&iNm2URU$Ss?rcnskicuWimSZhcbKGWe=3>AU@Z1Jhk zy&Wf&JZE1EEB3+sD$31R$8`4H61GF=rwir{Sqf!3C}(9YUW!)7?^vWDyEf;AlhJMz z3e!yxw^iyK-#z{tA1mN<44}xs_8Tl5+v0?Isiksqa?Ou7b*=1#gPFFgYmQZIity?% zflS~p99i3XS=(1>O> zUpyyOpvg%LcWswoFSXwg)sbWMDlKvtQnp)A42WeB1kDgnmU>=+J=~2!I?K+txxSkg zqA;x(&Qa$;Z?N39G0zAh&dV(>(mmH&T$;)#8*`k%b;Y9y66=cGX>ZY}mt~`$J>0s$ zxAlhfh~L|2x%~63H~Qejev{rlaGgYapMwWMw+!)~_%&QAfFU~H_po0@iK;PjrNQle zXV4@pXzbnJyl9dCIcfwkO2(~iAs;X3UlCr?L*%;i=T|%6FXN)AXsaKnP*w>e$Vilw zhKO|MJ6!Skd?MTe^fCKEK~q$wU86DTpIY=KQ^KU0SbnQ*>tnFeEP)3#8N82Nxxm5P zy?m=w^{w5z3C9WvN$lWC#4Fm;NAY0)~pqr!BNlz0$pYR|LN5PU2x^`t|8%=MNt~Tm^fY{ZlT;$ONNi+jaNQ zn+(?;E5wt=-a>;Hr?1%o%%V*UDlGdl1V4+wVD9n-$&`O92nZr zKdq@{>pB&#Lk7`QMy~)lVY+u?7q8-`&hy(4M5y^fap?QWQCM?0atIrFhp@cU<(>9J zejmhTWOTrRl|u(90yOfUi}P+_*Cy&Fhb-NdDBx)OlUsc6RsK(4UK)3_s!A0m4mIHR zWpOip(*m^s5m>6Ba*x z%G%31rtm7uC_-d)e-}`)C4)3bE57Fy^_Aiq*7H49&^ivP{XIX=Z1;RzPAiPlcYP|p z`YGp7NpFeV!LsmVGQbj4Hfj=DP7tAfpcoel3J46Oex)56QY~Fw-P=i&1sGa`vz8ge z;M_JnfLI(Pv-AT#5XUBT=8p`=Z+u_!@G%}zH_BYFiu1L>!9ZSR@$!(r3eI0&>m!hIVuI?w?nQBynkL2b zCA9HGT}w$xNfq?QQNT7N%~u3Q0xSXE41{#7W1AB90trDvLSpW!1Lr8s&Xm*@2ONWa z3t-MPHF2`fIs?BCPkY^|es8Z$y@A3@qSkrp>7$|jM=*go7kvWy)erOe4}Pt?f7A}4 zS~8AD4%Ko|P%oBG#AntI0e*a`P=JfYYO`ViMKBY^#l^mRu;zvczeD4)5vZrxF|vVl8v?I9S*$6{7w;iqCMDnX*3**M)?w6RLRG(@fvvQ@D!B2O=wpZzIji3+sItz>>& zog^v%4EJ%Ezq^v#$MeR6_^^}OlO<)C-#O4!FC!x(XIemkv4i?0e3pjI7rJOI-QP9u z!`mx+b{#oFW2gxrU@X4Bf5Q1W@m{dXp;~9^si^~Zg*%56=1@g?+_wt5VPLWY(()=C z6oTY3^JmhQGTY{lA6rrW>u%U^5^hYUh;w~ja))-T>`g+R`BgVv#}T1>U04TjI+eN& zT$uLnhWkfRO0-LUYHn7qAMq+x(2`2D&$h_4uC0OOJpz5rDg~DUg`H?Ev4>t_;9l;? zXT4p?20cNYy9LhxAQnh*C<|9qXS2Bz#b}&@2b=7*jSJam3_*%q9eYrknTzs#yt-H5{NkV{u0 z(uhgKEq4-91?N^AYLcWK>x-A@sQHJ(7IbpRS1W9hElrMpKXcg8aex2btwYz4v@wQ? zs~Ez>%0+86L%s=En|ZWwX{$^c#~wJSFoU0e5(($Uqe*?q5Z_0w8MLPvDB_nHKhOjG z47@_?(4j+y1x=cFo zrD_%H*ujJgRz>_05*OOWS?GTbj}=(sV$Glk^0sgO__#nGRu^*SC4$q1SXv11Q^!{d z7x}FHgU*+&!UW?@Q~>cC&P>_-6Bc{em=6H>V2axp%G?d5!7{3Ng8txIgqFPSC28|i zXIAy2vp=`KX?8IVo>yO%U)Bd9A-iHnbr$_|dxgl~k2%_`S)j7AQf}YlNi@TYGU8fH znrBt_<(!mTsAVM9uGPoUGCn!Z9dW3+X4Vpee$%JrZi!Ms^U8oFp?)H)FYz20veUAE{iXiow|NQ4y5;u%Mb;)(_< zEMug}2r=6kCRS}|ApI=urFmz7G09WPq4rgA>dO#Xx0np*4b+(bN0KmK!X%CzghPXPM zVN<6KRhc*54~{_uz2JmGjO_}gGYF7MkN*7>7Xhgg_mG(eF1i8=F^Su!A&AoBGJsk^!nSDt1+ifpI&%>HZItA_Bz@qn@be30bK$T z)j~0h+;=c&;uU0!TOqQR{7SE7O9~4B0a}0z+w9s|@Kl8VD9k;m@8iT!b&ib7*+gz5 zz!hhond}{kvW;`)%G9usW3mL7DZ-K$-~{ps3h4HRyQw%RseGl-@|-3vE2YK47r!J3 z3pl9W%%wXoGwB5JatN+n9v99qw`iP}?-stKAq%YNNzrq4s@G*LIk0L?wZ;E5F3y$PiPr#%Jb0ilFO^DE5KpcZ=eNL5rWpR!1v`4v{R+FQ1qMK{t7oz!N2t|pYBWh`XLGTc#p z5|r>uqF>Qbbbu$&y}9jrL1*xlMcnwQnu6*t2QqCuG9>5fPyfi;Cg@8RKW%`mt45oE(`lX6hQmQg&jggJAi`#Fp~u%w<}5S`zs|XPSq#er)}aO zFr<=_Jy-4p>%%l+nYG`J&l8=5)pP9qX6)`jDgA|P>8&fChbw zfSTNtukXaTMops}mo9TEW9BM*Jjg@K6m8(xZP<+R1x$I#H~Gb~FHmPfpUTDS0!%-H z(_;i|#I6X)7%>g+lbATjVK{xLyu*WL>ZrEyMhOF!`ebqxZx@q&*DJehBzFI3MB z=Z|NPzO{!dbK|IgAU-J1Twhm^=P0;D3kVufVinK4+j=>hyv-=@p!fu{`*FZSuOp;4 zNZz?N#mE&ftQ-&@nzCsQ32zbp5nkTpNOTP44vAE|HJ}x?V+T5c%zVN$3_V=U zCoPmkwEg<>M3g?u@O}f_3n61P>brn%OSyaXyH?O&v%A8hEPm=`SomXhLUZ&+-klS+ zv1qKimC~^)gA0zb_Kk`Gw3INcu^s1x`^T&tY&d4 z(+l*I4nokwz=@1{@tr{6oZtbRfVgM{S>rDy%aImCbaNzS%0;=<)LYtFlx2&4min35 zMov2f$%ou4-3OD*46nogD5Q1NXWl~k&cG(XGj^2U#5aN1eM*t|)MVy0ZVDA27x%Y$ z@7Fi=*$&b00FI}I2+X>4=N%~x#OU3~&`#)3iAdXtty*Ayd6Dh7K0LpGkdV1u^(P9N z1dr1jU*ZBQ@}uR8BU@SNhy|F)yxf2oUE^h#RZ0sxx5P7`5Ee zPwEmBzh_Ksid<8pJnS-13oZkgNT*Y0K$7##xw2(mu1D9L(nnao6j#Ymc+NH-%w*C`C&hmDOlK4#$5!y6pMIxn)lCT&pDj~Kio}l?G@ni3H}NA z=F8oDs)e$lNrv~dQL@&e=lPVAV$%?4rjHuz-o1OP4nFzV++3WVls5SQie!U(hhu3U zV-{Kk70_A0(nj%ap$ojJ9p+G$gK*$^y43$6ko@%UomF@3OxP7DY76QsrzvNr%;Wj8 zoxtNMiWwq`Nh25@*+n=V(-MC0oERGL7K(`oq)g`ZaKTloA3~D_gboG(6$Pd&t+Od+z&i2>~|NTbd`$-*$)yGjd`(TBww~RE5pvp5LE{o zd1=Vh`)9Ow50w8&fNR$%?A72PcqUVAk%MX1_ajYoB@8j{kOQvS4=9~3X0)%L6rHrO zf@ulEmK-R$Fa2`u8!%Xd=9!df#Ia&SWB`KI4r-7;#(9*WzsF1MezxRBE6VXn`F1DY^BgH{RpaYYoH2j)gcAf%19sDg!&?=KG^dz+ zYJCFsjS53_%+*G)a4I+QD#ieC(O4?TXO9ts_8TLEH&Gpxd{d%(2>3hBLxasO$M{?d zMneCQ^;R3`P?EX4w|!G3bB2*f$bvIy6cd2Jx?;w$3bKJ9H(^aDHA7?w6wpqOZ!RKW zl5x2a4XW4^p>txwt`;LIh5cK)UvARY=7hU6&k`CRutW9s z5~yi3B)mpwUBvCb6eKN}G!DrEMf`uRJ}t(ok`G(q5rHy zP|-nLm#>viNj*S#^tm|#A>d-jXc@ds|7bRio&2Yu!m+eN@cZF6mY|z`r)Gn46OPbc zR5=CNvt-iv-Ux6^hF7kYNOgEjFv;J5+STFTP=nS~?M|CT=-aV&o(n-Pgs8{8jJ^`!?i444Z@lD^<{( ziFap4xfw$HoXM2kcap|oLEz4)cD8XEkUklT3yAVj`0|6-|+0&Gt)cI9$9VS zI@mzQgKZ(x^QB=ohYP$OE9_DL^jqs(7&$~O6t=R+gh!^{J*Px3cMgEDyY4m&=>%p@NG7o*+zVxnj65koSn- zoq*Z=vyCL>{h~jAwxOJNe)ftQoC)BJ)0spZSfL4jBMGKJblZV*)-F=5#g&{saJe%K zhM?Hl2%x}9k)UY8xzG-^3IG{$dml1}pt@MGlTI(e&v*OmNcXa?Dbv0*0uus{!=h|x)~{iiqkLLg>kQO|I5~jCO952^ zgYxZjaV0a^;uVstoiWn5p5lw7hD9wCkdgm~*rbzbE}x()tAR`|8o?>xn>u&TdCTBF z-O=!wy0@_Wz(b*iC0!L)GvT&%|2fMo4nnHX#$maKQ}KhELq1MlOx_+n5Xh=^(|oR2 zFu5NUZ~?HiHR3y{t3c+H)7Leo^eV0UVc1X&W37`!wA2T- z-(|s1u7|=X98*^DPNqAF5CF+}oxldH4lx^D>FJ9x?0gbZ)&*_vN*6Rcmvv>>HmoLw z0R79Q1NyqJS~rZYk_cJ(s+S&4Mv`m-i?%Bd33PRhE)NFJ>9OT;4XU^Gq)Y~Q`^m*v zGrj?ZTnbbeLfr?Wt#22Jcp6fvE4Z>{z~hyhb-PkS)~6b|k-ADhlTjTnm~e>G#-e^8~y)6>ZKx+*mn(>5XSzdKU|*%UUAQX8_ z{&HgYAeb> zjbrMVQ*C>awIVnC$_&_j4M`f67X-(>mz`^UXG+Qv@W5vduDla}g*Dy^GOh}~-ccVv z9-xQ7DUeS?6j9%{t%m+r(g=fHB!;oTIjA={Z%>YV9L0rj0lS_A%LFO(*w zk=IG8C!-uE|A5}Kiqn5hSRWxeDZ{kzF9h=J({xY-P-DKzr|C$J>;dm7h*5C>oxi!C zneHvqDYw88=Otd6>?7kGO>+u}u{7c~=t2X(_<8b03uIrXPVuchb85JFhrq0{Qilha z*A7HA!75vpUo=WVLT@=d^B-o}H*G+W*5nje2hDj7dln0ve`1Bs0%31{mt$YYJ@EI) zj9YPRz-f%y{&5|cK1X2wKQ=YZ10Kg%T|jHVgba0f)lzWw6Bvm`XU5w0Dl7T%<4vTV zNz{rwX;?d5WaVG$`INklyfwN7`Acu%A8;vj(HDS?qb$ zUaY~%)+@R~T3kp13BNtbwugV%kSdN&kY<~TB-u6~J7BQ3vm4;V^DNtYDQ>SagKl6B z0kmQb;pqgYaG15upKwpA^WGd?2b~}GZl|Cpmw@3q*cM$CokT*#b~=R`m@!xG9cX5W zZUh_{a2Zj$G=|m>K=0rT0DKZqr|pi7$`sx+{b>AU7R1DiY_!<$WV6D&H-~$g78Z0q zgwUu~$QWH75wx(j#Qt2u!LipXG&J=o;0AW1=PvZ9YkgaX(Y>vtoGB?I!hs4I zq#OuF-S!*RR-^h#0n&O}@lLt7xQOrM$=70_32KKC(VY}{!GFuivMoCO7!3mSxWohj zsqd~uh5arltqZqdFC<+mLF|3!fOM%iSFQ#g(;7ZF#igawMS{4sxO%vy4O>(5f=e#S zc8*ht-0R2M#4#G6#81Sl;73!~u6T_QsCR>o0J-C zHd}guO=-q9G`F$ZG6G`L5X0*=Pc?I;CU2Ls_!^6()^K z5p_YR3j20uj7w@qjw`g^nTU}=owNj1ecuo;^Wh~)jIjaK>OEJT4XAy<@3V;(Y8#kB z$%Ieb#me*0K?pKs5NB%Ig3ac`%M;x;jSl(H_z#={nQ+|c`~B`7-++WlWf*7?w}TrD zH`D)pdsD`4Z+>3qE%aUl&7+tVtjxFKQ69#7h=$}tqrOoi6m%mJjrSk+&7J7m5&?~I zV$_(2OFFE9!I1D3RR3DT)V4^dSaI9PTdpY4u3ova(QX;;&2R2Hf_aPP%$Rf(E-3Lp zew)UZwtB}9{Q-%xhRsF%H3k+WG(V_fWik_RHJqO}ob&`PE-jDR_#TB%9AsT;6$|j* zc!bnO-<#;yC~jveB$Kq_Q+4t0yZZgomdDk|&I9FoA72TFfNF=D$9;2?1R&cEP)sHMc?#Kk7*c$3PFzb{`u|?I_h=z$i zTySBzqG7mo=uHmh6^@r;sH5ovi987yekD(D>fNz@bCs!x<8Y$5>PKKxB)CD)*oIp# ze(@rZ39;@jkUp8O0TPIP;hnHz3XLfI-gmRdvc`9nv8+)`y9R=tZ56{W2aAr*a0b*46P(hV{UUX7{(rf%t zVC-p_u0hy{5WwVj9x(5uPtUK-Zu1f73-%5M2BZz?%l?ypLMfr}1}A)Atn^mQ18;x2 zSg$CvZQ`$ijl#-i^Bd6G1ffDRy^gI7$pavw1NwpZzZuRQx~LS@=ptxfXmzq)U@Y<; zX^*taYwgQT-vd-?27~t^eIl?y@dF z4!}ufBbN9Tn}+2T!D^ME^$_WT8q!Vam=*tA>bW-1FxqH<$)SNYGHpyW(}#)-i`8_6 z&5m0-H~n0Hex8$3J1E^dfW5%Y0(vc>Mw60A^PpUJ5BqF#4}DO6kx7hvpod zbrneEF}XL5D%VxQg3ksK3Wq0Aff_jA%9R;s*A?S=!CfD48bkKIB? zfO`8DHW|iyg68jWWOV@%mxG^G5Z^u2aadyrZ^aH_3-xtmE0xjQycwTj#6cYL9i-WQ zmPZ=(Nm>P(Gc>HMtbVPmkiDaT&IWqe&4ypC=N~@OUd5)9EHR9`ag_CW>_2(?Z)l8W zQ{I)XbdLPMO1E8?^!L__fg^S^>VE<@!j1;0? zNm}dcmei6mq$8&(7t&qGiFuU=McIGySS#tq-M9-+ITG#(75OBXMc3+8(NOZk?a+EZn3FNxlSYkghYtrLnWe<1i6K>*Woc3RXuh5nL?0?r%f#|eu)~H z>P)=YnuRxZBw;=p2PaLT(GNHH*ARPPOmk-IlVwY>#EVNxF0>8FJ#t2W&O1QF zm)D{vJV4S^Ca=|+aX#Sa(#_Cw*)-H(wvNOoGwUqp^a;nCo*@HIKw6ieUbe;{Ic8>V zpxH$sN$dlbTPwU@u9Gf-Jki?mTd$t(+a`M zCV*AmAKs3;6B1b{mW%&-eJGC%(L+lv#)i&U3!CFalPBH2O5MJYeKb{ro<;?|rs?&A zM9kKLSRnBM%AiYqeHj_D%S`eN2^U*^2*(;@I@<5U>6=rxct4Jfjcub6md=$R`0vp8 zsaV7gLX??EQJ;}^wnt$*O^0B(b7t2%`c6iG?UURxODt-zieFyc-Ux_L9>;aj-z@sQ zKV~ilp=}FXE)f!!kX2^B=*ni@%}K;scZIAG#vw<~c+b0^vgaMcd`1mMR%uofudp4C zMag(%1_!KVax;h>MHSW%@7T` zjtC(dc$Gc+)vzur8AC5z@CdvqjqA|9Y;xNg?fZdcYMYJlL)ALPezc&1fC`kGF?a^f z1lIR8Q0aY`%fIA{osEAC=4C8F2XnZi4Br6ifi2Ib z!`|p`ynbVD1yHVdyaEVdg-^gF>XcIrz9DVFnNk`=fWzuzJ$or1|G{>}%G3^;-YpF4 z#0`wQ@kNhHa~ebhTawy=0Td{w@IZ+{-F%o5tB~iYT5ii|cz^ia5)!T)+RJrO?6;4nwzDRs=?3llS)+#D~uzqoc z;VLWGctyt|3!%mU)av8x4_l<{Z^!~~5RDOE6WHbQ&fEHaLH~g# z5J8zKGkhb^P3Ga=<4-Z4;9VN?qX?At(=?Jbl}fNXdg-IzCD?g^Y3 zen2l|!~3X?xxw2GVH0>A zyOOsuO60NdEfVBoeT__4-r{d;MBq4Rbf6W9b)uY)Y-~V)}~9*$UBkE-Dqm9 zv0>1GeIB|hkY}R(E2Pf|r&4T%VqBY>pLp_Duzh@dsLV#w!DLsF!jrXFS4&`>EdFFx zLd_PoZ5zEQ;M7Zt959-^DD%)81A>`=rWf|kw>`z~p6p#-*f-H?pEU10ZUQpXadISk zLhRWiZBVxF0-)Xvx~3J1TwU~2NM?q$)v(y3VSN!t9nJ2tMofTXG{N-Plt{o0OGNtf zQc3*?Iu+{2Ipb&IdKg-NjY<5@O&rBTSb0}_>|s8#tPhK+<9m@vEjl%fW$ti#syq7T5|gH0}CU=NPHEbGSgSV9E@jUy-KL8H%P-8HrAkRSfiS?nDiDF02&+N8c;-OV>Fvs6lEN7??KEcEG?~ZmgHX9UmdQla zi2pqJXR2#Qgns^0mUKOF{8*`H823~!vVNNBStX~LTb7meCvQT z2uh~iAby!Nlg-%t&lJrP5F#pT56s_|f`StqKsu!ej;MjlRPXay z0JNdz+5^))Qb-i4ta1F7ZaQqMbknr|#MVh+Ym@4F-MLmgg}uOq(QH^j$AAgh&EGw= z@Ua2bM16Q2ey$LPGC3Bni3S=8%InQH_rQcSr7&-NTNIA?Sy@bbFBzpJR?QhGM zEW#dgJUl%hZzE0Cm zlLZ4qrSIYYhFC^ zqA{Y}*2sy!qD3zI&5_wcz8A2|4hRb~d>6TZo_WM)QhO4og$3XeK0AE5I1Cq1RYCv; zUHa%CGg(5=fflp+E*AnR{K0#e4`)N+Czf|~D$DB_*Vhf1N%9$7WV|w4`BvWvAMZY* zhsun+p{okcOf8Ja9{*>YYVDJW`%pWILZ3hgI+`?V9DmJ)W|AmkoTGEx7~)4uPGw`z z`i;lH7w{_wOMG}LhX0yX=eURL<(ojj9+ZL+*-0&0$cjtm@{irr)c@zE z66{%JT@cQeSOX7&=KcQBPF+M&GyOnfHxXV$>~o6_n>6{EQ z+7>?^k9AsJIepiIdZ`;HMB53;l`t~ZTp6IPZg-#x?%g=in`*GB_eYZk27cE zaNKOmk(bAzTn6(ld+gg@5`J-b8cPmGOuUAFjRNqI?0p-Iha&nU(@agys*%ZOkz}%L z(J&%glVQty=^IhUvYpv~ZbWJ|9SccLrlXSC-yw-1vD(HKo5vN1AI}_~H5dV+A>Se) zp4ql2E@(EC;2SFI&;-z+_@og9;%pHo$gwQ5ozM)3vaIpB1d5Te z_}!W!2Q*Q7Q&rI5aD+-7k(2Fx~%`%xx_1`>fWliF!$vmxZ2o@zF~9oW|DJ8IfUp4~VqQXIn#6-}Y~ z^voEacH<#1XR>yDBfSeE*6Zw4NMOKg82OBaqh>GY`@_9#9LNr8p9T;G(fIa(+JdX_LgTuqGz+x;F z6Wa_oU{4_42JXnKoR|w2E~GVPVRSpuI5&sn6+pdP(G~4NK=J$a7FYhToLnIp0rQ%h zUL)9%xC>vM@-Xc_lk|7!Kxu+@zlRbHszO9V*a3J`YY1mJKj!YGCVj8qgcJ0b^YPuX zceu}kXAb|8Cua*$r6Y#ItWyopK^l_f>|STK>U@5#`+-jby%Gk)kPKo;B>^@-TrGe-5imDk5j|j2TE=#9zKQDLQ8UM> z<@(V?xW|yJExURD+G%|%%K6r{%dtYNa7U)Bev-MO>cM2vg1FSc94g(OoFL7n)>jN7o z^005OcR;~RVUbSZ$Ub{}v-jgS$Gb}D_Y2`Cwd^FQAd&|?WN^t|(Y${>4$*q(-pNox z5^om(c~6vS-dCth$XW!kVs?Lhd89QdLy$m{xPlbH>{;NPE)B}XY8b9ND@{E zvRg6G>mReQEno4aB5!XU?$_)625x-ZjY{k0y06)bnos}L^{o?U`DtN#mMQ$8>G6ZF zF5w^~;?OS}P~Ru@e2NIOqJ-O8v4Y4lZOxsa6F$@?Vo#F)oaW^{4{F4O6__t%laGbp z#JMIqDDd|h)^Pug___bBzZpCqVWU-!qul@?B#uMuJYho<$}}pn0iW)jnG3C&E8b=| zd?!Iq@EQgRbUDj*xq{@X-Ff$r5vFi8$Q1O%0DZxy>3=^#CNk6q6Sms!trYa?631J6 zv15g@d^T&BB=93kqKJ(q(K4pEpxaer@+(=Ov&pix#yH2^m3zIQZsjvRHO&~G2ar7Y zuK+--)CH49#jy4&n($K01f%l=QH`R>35py^Ma5i|E)9hmF*Z%-lZQC0tWXvFRTEr) z7x2s&lqtRcf1{+AMVwN{>w9;|lLivPrw&4B8*9AEz4^<6q&MQw9*jFSFG!syT(KH_ zBzfAHvcf*F{^T)OoqMafvDq4Y;@g^5mBTakEnzp@wW|ebo{0MNi6M2`;+IB~!ATdd zUKA4Q8~_sqwPX*$wL9?pEU>{b{qY#rE*}>x)~{GzDuq)Iu5FUxa@0a)1hZL{Q1*o4 zA5(zmtgjS5g4@nUKeR2FqJrEh;aej;DX(Aq{&jETeNxJRlqW%d4lC8L$p<8D)WSLk zOjnTeQ9QHbQK%o-1H7iIJ7gz{hStLCvLgqgg+z2? z%h^r+D1*FyoaJuS<w;q1a zty12lEpUial3OzO#SLW>z*~|(FK2@lS&HG;@RkGu(jewxK9Mb_dt$uz{`Y!~gS`M} z3Z}@cRM@|jp5RZ}Qwrad3~7$EQ_)ABK6Dlg*Ubq$dp~5cv9Q11ft?V61HNP_6E@9_ za|?j&|*qdZ@KkGNW)4}5WMLG>H!;b>v(?Z*hV?(uKY?h{` z_j?QjSGtSgf@oAyaI$GNFZjnAllr6}MNfaT{;pjr5`u`65sDxK_y$qX$0*R*)P^kK z8GHx#ts)A57!F7)&B{kOxE(M4e@-Jxi_iYQx*YS~Pf_VD$?tr*+i^AZ_Wu850oEj=f$B=XY>o;DmB%3}#s{I-19Le_|8@vuBm} z=p!QZAphP%X#mW1+CG^-?3%US)0i%pPGse2r|L;#sPCZKFZUz4YMmxuw2ZADgXdVb}vX`SFS-g6ILtm&hcPJI%{mH*tg)=)~ zHAd!dic71gaBGI_C_%9iMC7zsC-? z0R-;R^r%gP-(;UIcjY9zd`u9#V>VfuqyvoN@r!}}s@!|W@&iQ(z5zU2U7EOeYuB1P zaC27$fzG>zx29wj+Eeyus#MYc+kJ}Zj_Rv`v(M)pbvXN2IQ|~^1f=5Q|4N4uX;TRG z)74aa4Y~0xWKOQ(4b;}6mY#z5CVO|hx*d%fK@sB)wV;(OHZb(N7@C-m*q|P0_9W?9 z?~wgg(w@y)Zm7r1CuK)O#%KxTdd712DVk9x4Kh>=|8ca-J~e*RnmahALlZ{1{xKU@ z-vN_!*_;cLiEoVBzCb~1qq@_VqfE?{UK{CuGH0~rKgT;dI>-1;DT`a3neNl2up(*M zP0e7gT8>jN_vPtA63Y@a4i&aw_mLRDrL|QskRUI-MEyH)F%Mj2(YZ$)E^_Dwg|>9k z$>_W#^ib;gsBKeMlnJ^4^AUt*cO;;GV85~lUkHI2J&G``g*KC!06q*1lFflSsLGf# z+s5Y>%H0thd+2R}%J#N^PH9qy+_0!EL@d8a)om-juLgM;CJO>2*-O$XZB%`4kF_l|9772n96h&QIjnVJFq~@07n1o!|-m z1uF~rDBWhl!wb%$-{i}Cq7o#~%cTIv-Udi#?R!#>V({+YZLPqy*8a~_^HAD_iN(s# z%O;&%skYoR`6#)1ki!8AHeaW$@ZXHILwrgF&pN}=)6&zc52q*53TfBG2M0{$LJ`_U z3Q6dAqH%;{6k4+lc%yL6h1dywH|HmBWec@LZp9KeEf9j5PTj}GHljsGW7a=ZE{(0` zJ(|K<0N)|F+5)fpd|Gd3Gh=NVuzKR&%G{hEa%K8OumlNEVE5x_lz0@*Lf;Lr?q^>d zYjEcMd#7g4T+CcPGJx!yLM|W#z;S6MId%nYUCjJQu32+erOX!PJpz3XdR2kGA zI?kIRz6P3TpO8Z>SzfJ94BE@m*`q!s-!J8hru#u-J913IeUdvhs144}Uj~=P?nC6?_Yot2 z`+(Z_Erpiw<3HHMIl)H&HprM9Nx;hwQZmt*Y63mmTp4psJsry7W^+cO!3V~kb$Le) z*0iNDY;+MMSb?{b`7h)8#)tdnFKiYG>s27D98ULt+3V;~^WS2edgYIHH8ur*!EYmh z^kUE)N#EcK_W6|KnhkSZGa<2x0Q7wZOvZ5i4;9VnRwv&h>|$Pm{s!l|iIDRM^U1XR zLcW&1325zLXq1Rb0Uo~gdl!0cT?-Fn@ESBGEzLcmhY|5HZFG+MH^{2O_EoZnG3^WP zID6dId0c#K|Asmy&X?=gd0p59Vi81@6B3Dr_N$3D?FZNtR3JO4CA=`eY;>VMl68rR ziD|`|54&l);@rMXyp5q(cuO0b2g*G|n>nQ4fsFWa%!cVLA{V1=&4d3-aK9>>wvC)T zbKsQ^Z_ribnRijL`J>A7bnM3hgGvYJwPtv1_nmHYHhHEJ^gObP4`+CJnf^BFrU7qKvm`auh?_M?oo6 zV-e6S0XimBRJ5=WI_KmmyQV3RKsy^Bi6M-50OO`ADhd$iuCBq61G3ZpTv;kbZgTF_ zv4MZuX;?$o%XlU7E)a4S91^t^I?1IgS*BJm&~POJ7A0l^nntnb>11Y3W7n9M%ETcG zOQ54=JvcHPIn~m0tJSdLv2T0d;CL&}&Q|%VX&e79ekB)Lw&;^wI=n0iHUUGagUGk9 zeFuHB%J+Ylw-t+XF7^CwNb$PJf3q3}wCByH$9WiGPM1zP`sRhfxB|Xz8u;W2%L7K2 z`QRm#1_9z-DqqozSHXs~z}rFlZQ6ayeQg-GCL0XEctwN9L4c5zBlQCqu&Dxk&d<{} zKt_Nt@JGE4RI9s1c|;%dJqOUkth?s39N$9>f|59hX$g?1X}nfh7kQVZ`G|#Ym7`m+ zSHPGkeHsp4n|iu%b8Q$|7C;IpM8d&AeVsy@uO{AnN~7iOeo>zxc8_9Mr$78 z{YHJ@A~MpYB$GZ-`v+2m#^Pv7O)No=M}BqBd(x5sw}1PlRf&6q;P`9!Agx1s>xZMg za5w4?Y0e5U=j_Iw@|!at&)$iw&i(~&b)YOIqQd|z9&ao&lRDNJK0!%1sCkJvFRJcD-`d-W0ob1GX!a@y<9%i|wnZ_+6_+9(|Uv zD06xL+~zxBFaaarldO054<>V;lHY`XVZi~q zM8fPaVN-uOr$o{R#=F*k z$0dCoLsR%m;+zaJ*1bkn2AD~J*t|0)hM9{KYl(Mx&Fv}#CLY;_{ptft+p7fnPIN#9 z=D_Rr`-U~Q)wIt{nOOxtZF0=QTp=d9(y0?W!;Lptwp7(%bSIxC7?dVlnCLj}43oS` z#%^1BWls(0n8OdjB+@MWth0r}&(;Dx=YzSxWU3CarLK+W|WILdgEI0M9m@%DWxG=XOl=y4x>h{h{Ee z&05@SpcM`pdKLUaZ`@7bnYLbKNMQ_FHlv2lWnM!M#t>M$7;vM??UkiW4h*Sb z?1@AJ@3pjN7|e8TeYqY8x^lZ4YLoC^S|K5$bitEFz;af2j;t@oU<$qtBbV}cgBn)BxCa%(mCMj=zklIu1jL?QLA*T%sEkoW zfw)6UQ-kGEXzIdlhaUKsqw4c%FEmDL?T6nBs$8*5@N52ROq>2RoV?)UlY>*+nG(yL zP0)%s{?n{QlAO2&)Y>j@#k?Ln1q8WW}e5a(V5GLe@6<&lp=_j zNR`U9fG7y56B2btbS1Mmu4}xC%cIAFp!;sME)Gl>KekR-aEJO z7B+`OYjiFW$4?>t#p$;+hA`;z|9{QBLutIf@HDH>{k@3!r8D;G535%Qs5?k40w8vu zt2g!l6juq-@S;++u? z=MrfMS>EZR!R+FU^)PD#J4o9G}_3sJm_2&H}xwDk(gA8l! z15f!zHO6NqkxGh`sR#*)N=g!CY@p0kN+@G#Kt$^QyuW4dbB?|5|9(FA+Y3EN}sS zy2M4o4C-h?$GqMq2ArzT=bA$8ZU*n24Jfh}y0(j?#X^&3LBGdv+;mTfN=wo=RsXR+ zdz3UWR1W`X@B%cPbpeLkif2fo?~N7Powi(m>&WozFzWla-?|PqY-p|OGa01t;n~5%<}K@g(*BiVHx+mGg}P$QzDOfGiDhZ@BIRXF zhU&JCtsPD%ekG)A1-2}iBTZNDslbqWD&P`HA*)__w3?3_hLJHKhTv)FrX0;KIKLAX z6s$sbyp4CTDF>^k0T(~Ujfh{SGxI4iesGxOWlR)o%mfOJ<@5FII{rDym7Q*87{&;W ztG~lQu@6fIZ?0}_$JZcC0HdH3IIFYPp0@_al;kh|>4H_AeeJ;O=-8#*7E(!Cupv$c z0>Wwmrz8IAr3C0DOvp!pXK;G%4+bVkTWTnGGA6i8K637XJh*~iEtEV|dtrwO6s^Zi zB24^acw#U3@%9hqA%s7johz}zMaCu|>@>kcrO0ts7* zT!VcAR-$vxo9H@sd-`LO7RNh<}tgJfq(z{N=DUI6B#cwQj-xK zm+646R)?7sa!z47gfjs(F4-P*1oCN3HQ2llR4wo&&NsPW*gf=SSSoPz-(}Q+d`Ue} z9){iy>3h982XJ8mVlZT(Yg#gvyHu1su4%J} zl_)w{9L3(LJ3!Xrg7iaX_R8penIZG{Hu{}YRxwZ9K0I-jV2{usSmzsJB>p&m@f-E;vZN`ftB@P1zKTEfng56lc}eJ)yA|VApYD zx)kbkUj*dhx8|KR2O$?T>CC}+sr+n!Sm^zO!o#tB_g5BwPClB%Mx}%mEdC-`EojfG zNL39G_?kuDiP1;i1-H{DImg}b@Hf1S)JHj*k`?iP5Ozbfrl6ELxq3}GkHP0Yvr~XC zEOrV*yXY+

E6pVEV)wh#c7Q1dV?b?TE;c2*G+oU&U0Ld&7g!l=`Ze8Oe&jzxsPL zgYR$#JNZ1x4sfMSlE&Vc8iCzb%UUpRGDLiCiwRTJROtqkEbqaZocnerj21 zDTNEb7il9Sc|i{dtudP1R-$9z+ns#RcL@19>)0K=7t^n{bzLL8$&MR=zlpas@=YoLn>osF$$2ZQb zWM}BgsvT>t0r()H8Y{0ntg*MT4TEnh=(ZPPEJZ_)Crz!z0-HX0K(@MaZ8xMQ;L&OF z#$LGj@jmxoYgN5)Ee)mc7#P_MESY~#y^jX?-SOajq1x_~iN5b0Pz<{lJV$-;2~yEx zWC=aTCAXciXiFZ-J}b_#4!>swehOVKP9G>}N1F@XDBN+`x5qP4f)?IL!gLhgho}>Z zKj1vQcJS!5k-x*y&Znc$`Szlo?g$OvMzq3=bCqU~)3NJnn2D|vFM?_r>sVLp4;Q3*rYEslsQ)IL zDT@A0*p%8jzA7Bv!{oIgL&plbN?NC1qCwENfR-@n$K-yy5SyxUg>qfR2L&4*MM z9y}eJU$}?*5V#G2D8n-!)H}g2LIq}fSUv_ML2g{MTO?Bope{fsX+B3Mc8$^>z$wFM zbU?d%nIW)-qicE7ZRDYzf#ItN1QuW$>s?)Y|ERry@IzRllj&zVB5AnT7ZdkczfPl` z^gUPzu7Vm3J5Q;JSk@mN3tEf$i9lUt4s;N)6j?#Ne4{d|BN-}_WBtOhiI^wphb|!- z3$-Cd%E_OcWc^jXX{PO?JD^@Nn2}Qd1#!rBI46m3*o9^aLKAX(KoQ@7`b?D+*n?Qj z9rE4&`6Wk7M~8oCQ_N7r@~)aJfnVtv&7A24cWE40poWm47 z0EnDYgYejx3qL(8A3QL$eF%(if!!b|uPtnZR%6trIAD!1aJ95%#f;7`_0%6{?^sf(nm4mQk6hvr#_e$W46d)FF@b| z#)CLUS%LTZfCTdxVL-e>Z!X#il~ObMI(+X&4nhpn*USE~hlYMIh1rhvE3d;(PE29f zahHf4CUnb@%W#=!pp&*9xC{zC5Ig^bc0wW`nVF6l&>9#Ko>uTAL|nau;ahrX2R_Dk zOoe#3AI>wSX#1w9R;SqCf2g4W2k7D7|828aQ7Gg$u$vUa&!fqP+m8t4KayxJTm4S+ zHl>bg<=<7MzizM_NH(`1>*rp|ULgJAW!r2lcZsq^EPpCmB|}9Tw_4d_g|HsTC>2UL zt*!^zfxI#C+ZmexVUeP)LN-0)cvnr)|13-}o5NhC--Daq1F~+ZIM~b32PQ(pPkRt^ zGkc7cXg0B^sk09tK#~-ML@0xV!Nst9DtzI#BH0z3Go0oSG-@&-XMD5m8J1SScQw5l zXikXV4(OZAAL~Yqi#_~7{b+BAv|wnyP=_o2yDqZWBSR~Qi*cOwX)rNXQh0(~tvIo+x2k{sNQwa|n=d@_T_r+BG02SbWMD>X4f30k&$c7~C1w zUO8One%R^fIEFh^=On&PlqGO6>sLBxU23CE+72RWCVvg;prngQoPQy$xmsY zaT%wt&-zNnriH(!Kci+6u1k@$Gr_G{Q2rpgEeDgM4SE<->F+Qh%>s|NqBVs%Lwg=l z!Uml>wD5;@+mNHZ0Rk@{TfDUkH7guiKQ`dJgH)gB70LEM#|hTY=?e|5 zteNRL-`iy8uW9$2L#s2O(UR-O?whQ#N2f~Bbx^PS=XL1aGeD)!8G50kJE<{Qu|tmi z-xPX3EO4Aw+MOYs-x{0?_!1r=*K7$3dS@aJ(ER++ae=GeZ*&bz4wu9My;?_cV6wlo zJI}cPvYLd{w(U$R7}@lY>oPg;lJ#ie13{mGFCP{aW=s?dEHHAzw*guun@sO#Q*T%W zS4FQ|eeQczWNgV5jAgEPp%6FGv?AFFek3qKxj>F>pOBHU#j^9kH=jozz_;mH0$Uz9 zSrM{;E=v=%paq@0o#nBdLFedK3)?%y7aU;QYnxqn{t{Z&nXU1{s?4#ZwRC2QOr`s;bXubMwl7QorXK&{-uWVQ#PLS+7@Gs}>!ZD? zRfPyIH?hv)FO+6hy6*=7K^3%2Gf)gwSL7ss5~qtTBdLGD<3=E`-HGmPx z(~CHes2HXqSy)iukq{X)Rvy0#aG-_MAOuQ3%-m$KR8kY(eKsC%Ci$)`MVntE`*=vE_crpM2>2P3?un=dLHGGx@WRV&5yWJ{C;l`eBtY zz|c`Y+>=h8d_VSJveVm#DU~VhfAYosrt%Yc_Qcybk1`!a8+0UnZ{pCI9we?8xud78 z&jP?hZUAo&b?AI(YHEWO^QdHd-K)n{`-|ZLT`LVGE2TC@@oLd&`PkL3q3v$Z=+Fhh zUm>~eK_Vr9eJFREFoI{2EE52}Pt}R` zJ$~)aKEYU-Xg1hqa(df&e333OG6s^jpw%m}A;>KYSI+~BlUC6C$P-;CGr2wn8WAEf z-$q|Sd2!tI}Gf(9y`Fzu|Dp3WcsGSWWC;sK@F*NvTXyC3sYiQ@F0# z*P4pW7`Yb1h1sNMi&3h1UIEY`@*G=1%?N<{Yl)+xHR_4wfsA8Yc3QB`I5^YB#H=Kn zRJ4cyRP;$Ah!1SYz(pWl!5_oWM?j(+6jOSnl@Vq~uJEr8*)x~UcH2e90_iVhhv$Js z?6*JxqlnuL|74RZdV4se-z8CKJf>vX&$R&EM~@yYZ<#;V4bo{|9#SHEZeuOTS{7*5m5ow%W0BPI1=p3ky~!OWecdT z`)pwR&TztMFY`*V#5!rJTT~vn7;7mNLBC5{)Xufu9L+XGq2Rcy`nH*w*$+)Jn41Z! z6cUpe>gnmVc;%qYZ6l&4?B4e4bw7Ubf?-etVx1r&l3VVjzw6UWya{ApPM0NzFKuXi zg2H?m8JO_WmH`VfAlx=K@gVWRAxj!ewcOm?p<9{1PEXPJe+8V=-nM7uw2HyI_=A8(H-V9LzH7tHgLIyGM8&{><<>Y zJn{rZ>%PS%+MU!qa6=T`Q?3~3g-uZ}S^UAf(H4htT`Igj%q-gxO&w|0-?|aIY|TP; z=31_97hXzxDX?+!-7iG=M%Cwk%_>*Vm6C{Rcdt z;N761tz&%i`$y2%i5Er3dg4jJM!8RW64?)!12VF4&;@v>*Mmu>GkR+OY?T8q;YFPM z9VqPs#YKy))Mr|TF2$}Z%jhLO6_wNKT>Zd# zCUTF0@zscP4O7GW&sF<&aYnC;hYK6nCLJn3CU4zZOrk&3{4vSvC4eiy5f}q4IWUru z3*-6KgJmsDv9v;s>6w2wZV~$p3Gf`0a>~OIYL(1YLVOx>5%JvLqKFS5qxt1tC8;~1 z0if;_tuh+RxrvkPdP8J>iTm+L#mChud5N-raY2k}wq zz@%f4sSU!75`x`L3{r+KpZ5Ar7b#n*699>B{MKkG?tFT0a{Nv0P0zi3|)hD`^wTs_T?b!gsZExC8o$D8ivfx>XvCobD(G0UXRa9;!``I7wq(^U!f*{ zZFZ|BY}|4g5IDk;U6{nETfiz<@A}u2zqX`E4MQJ@o z{7G64z@Q=Ao#9pTiwv&5CtECup*o8LdSaa=Q8OXOogZ(--pLfB-l9G3Oj;T^cfIVA z>5g#3IkS?HLl+lQXQV!JyFzQ2mEf~mz$$yce-gW-Z5-y;TN3C+rq|>;h!u}!LQNsF zq^ZEFnD&6+>?y@p7nULq4heu>I`haFDcMHXh1vY(zJQXZ3ziacKbNK7`G_7C&Xnt0 zz8-}p4`GDs*GOE!ci#d-%#m%+PgE_OH?O?(%G%T&&n9wqF>|J;_i--o^dC@_^VR2- zZzW1;j9Zs6`b9t$kzcs%_$4~O6Isi?JRB-J-BBuT%L#tp`zV{8aF_P_EX|_+&w8?P z(H}UBO^|+xd6~sAA9`{L_XgT^ru;P7&g|{S_*w>+2kxUkCu!;{mCO!WXAPHFD8Mjg zn2^u(uGdK=!z`WU)`HiCCyE?)eWaLWgp!xG#6~?mcvgWnhpdC@EwJ*IW5gh>3gJa` z`X-;2SZWJNlX7FQCBoFSC9K_l0ZKu-_Jj`XSka=I|KmJZ+_eohFF?*2XL7m|0Id!b zS8>I6W=(g*ZupQX95rdN3`E_lf#=T?_uClJ9Fb=i1#+>`PH;lHYG3kOk<*FcYYM}UDT3945z2X)&n`HwC#N3I|x zypnL^2At@<`V;95U>%#jXGDYgbVuO#seACPh^a)Qxe!CYQ`dNyfcxm@6%zLhyduTM zFst!pnx!`_*KoL5>uV<~IYd&GkwBjibV*M5EX|NOm01-Tm-FSD;JC8*kQy)XWg6O? z6@&)YR~4Gu7UI<7$Ruk-e+~pR08#tBq|Hs;r}vd|q?yb}3k!<_9!~X9EHv(vYBH*T zc>&{U|51P!U3MTHfjKPhyBkrZZTj6cXMo7M+I36jAyz$vSkNKUi%m+PouM%{p?nX<&q6DqdOy!g$!7ZnGbPsX@ zJ^*Oe$zyXWqdfFYbJDSm6l!V4_iJZ}Vi{%|HMA zvll0Dy)v&Hl#=X-0UP5X9)uzv4;;qv4O_Qfib=O`T*N<@Mkf|&(k|lA#EU?D?!bc* z$A73$Y5?klaLVEA@=M2#9isuhUX}U1>YoPPC3X9d$ggPlN$pEuJZWYI+7p=t3pSv5 z#C*%#AUtdVL-36G=KebUo9T4N;kuF_spz;AUyWCjw%^5FP4iG=Y{C_cDyvql!gF)FkJqeeQcVPYV1xBD7IC{87ln<0-y!Uo zZqwnPx~Fjw*Ay;CtRTg+!3AK_G$`RCD6ynphrI{}eC|G?u_L7K{JC?pWo2dC%^9+Z z=RaP{P)J9xFv0UjEOhw#tB`aG3JI0%FS(`&Vw?k&zdU5x+zw0~O|)^WFz=!v%0p zv`QPZqiZwLlCI%sZw%?L9fDdr5c(1v2Or;Tvig;gTRU`tH)X< z?Q91fe@0P+np(!jjFmISqpQbeI8XjY@WJyar$E8M-{M^)zH z85>U?o-u`Svjr{h`}5GOq7LW}itj+fw-I!1@0gxH1E=oNtbvkN^ukxVLU9ZrX}uDk znY7g8J1rBTDKFNA1uOF?6=Oh;#D*lze->}fwJ@@Y^5jI#a(!RkzVM*G{pV**YYRXfwa^*%R!yf~@ay%An4CHiYVf@?q zrt(UwckWBlA#~D^f&cY?#=KDFW0^5?hE>#V5UU7LfzRI(x9kp;2@@wW793<^Gsj(e zU2vb)=ygV8k4#gMh(^)V+-X%m{>Bb6xfIVTdYF)`&MnpVpM&l&5dI5W{9sT*!=U%rDN&!@J|7Hts!oq=5{8Vu&xDY%gOyFx~Ox%a^I6 zeFI~#AB13HeSnD5LMNdpM9e`0cKg8B`n2%9Q-{HTpb~Y`aMLCkn6kVuS8W4%A%{mV zYgHWdkXKJN2*VZArAHWfnAuI*3zJxH0BiJ8WIH}h%ju=<#Q;o_$hDnOCljM(=U#&+%7R{SGcZ~Gv=%Yhe@Cw!)Ej@v`qp24VoJ>M> zFXWw$=OipspR5|e!7P%bJQ4C5kT3W<>(+JXe zPz?ia3Yt5+MzRD629{kPU&1!hk+G|un}s`89nq9 z{mtTAe3}CuelZQ_WQVa}UJ&7|A_aK>>oD^!J-B3uUC7NcGpxu>< zVqNQ&d=TMS2r(?JRy|wWR(Hzp^dIH>?CH}|B0N{jV=Yj2_yov_p2Jz6UYAdsld;Fd zB*gveNwa3N-@O|zx~(o7mqLk>Tkb7BR9I7cYu-hq4z_|saq$fg!1GA_-(CC-HQE$n zKk@+Uc!EJrldm5E(xsU>K{L;48t)A4b_Re>lvSL;vB;8`uHw8Ty|&WpCINft4X2k1 z*l5;JzXR=pAyG~xfvyEJbyMkQPs9=&=!ST}lJ2)F!=6cb>>rB)Pc)W~;`BZ<`xOPr zw^l-!)kL{DCTQh=Ecy6@-9-Kxcm9&er=~OVfk@lFF}`DtM53VV+xLC>vD_&zPh<2f z(0}odZ4_R zvzzljK#H1C>*Nv9>whsVNwwC{#@?kcmT z>AJLp)KmMS=FG>}db8Pg8Amqon32xyitSM^wsagBv!ECtH|rCaaJz$LyPrINUJ32Z zwWz2lt%iit&H2H=3zkDV5oFl4x*y-He1N49`qE4@+;D4-(OBPS&z=Pu_Mbs+MUGW4 z_7QVt&eQcPC=HPRvE=KVsR=D1 zT=A=0F)&w^byf_)Pj;<)??4Zzp_SCwBCl8IunY|i$yQ(;VcO`eX%-|UfW*>$hNzSk zSsuT?xaA*KxW7hC6?|ukf2Lxbqs|AGEhmprTrti7I0btj3A#s;Vi#n#x|sL-ec#6Y z2C}Q1vAc71KUP_7C*~%N4BH$$K$2MuX9_Y$HV}pj+KUbfL5$DX_w4cGks9@x*LKT& zMk_{OE*%2s?Pl%@mkpEDCU*_SpaRAW1Rao!n)nC6v2Qf8Kl|=JCu<3>;Uug+goc-d zgqCIeA`c-eE2|zJ9<=-Bzg)((oqs&TF=z{`*u5!n$cVUllPhqGp?n<2V@WWy%9zDnOr{#Y~e4?ev;55B*$1A`%mhz&qP z;^X4to>oDiLmWF;SCj%sybgZy287xN2-Fk`*~6iX$+jd+vNW@Cg4|_mg|4;wL>+Q? zr%uZvQZy8Daaz)(0tz_)uKk(n0!xCDHB6%Toj>0h>*eB3$8;_Wna z;UOR)9Imtt9xjqzpmt5J^n%2@7#nSiw;Nyg!7~>Fymv0=Ll?XiTi@JQB;SRY1jW{FQ1u{Qw2CGv5TCdd zdz7!(lD#pY#mqAD7<%j`M6nbYhJh898!AS!q4uxq`dN%881hPpO^hvAD%80cuPE}~ z-8~aq959shIUiCJU6SLM(C!dxBoMVEe@VBddp-RDS#ZZY}!`!+!@i#)0JeuLGkkrv9%(y#jW4vo4B^L?rrMP%Rja;J$ z4s-`~x#rG8PyV=izy3vOX6(2Ic&k)EMxRtzs7)DgP)9h14?VpD0~56Z^ouo5yfL2o zW~^oVj{M52>!y}PB1+ZChG0@DUysBj;tw>knrtLh? z39P(8p500P9Odv&5NB6G3pWGwE6FN+a;*H2^)&dBc;LS`Pu`rjg4|v+Pu8MoD z=J9$X&n=y8Q67r^G+#hYsSlO;{HzJ|jH$0mbh^dE4Y7#f@SN^_p3;JgYOlGewJjSl zb0F%Gkrmv)&26Q36pM}!D>gGUkMS6V0OWgsU2%>TW~^5S*9{Hz(Rowof#5i zKp(gW9Je=bq5EZw5|t%)=@I8!^e^{UAP5Mb4JZv@sGY+M(zP9jxuzztkY3#N9Mx9< z(kah1zGK@RAi_q8ildlF1kp(ZS14&0=Dr5=kK5p z3Lo+gIpZw81{fhcE@^#-2I-uwe}|vqTP9Z$WKmQ+)4IvgpN%Sp_ghP;Kh)Ml?A9jQ zrvaFIIB)^3+6oCNTI7yI8?Kq)uDQio<0)w{e*bJzBVG=4_0`S#M*TLn>pD`VT!>%V z2Pz09I8$aJ1OSu#lZVp<5M@^`F=2MEwD^zV<=$$L3QAPg!i!FJ0}rjf)_cvxpQnwa z)jO<}rY~q%qKCinixRIqHmo4j(dV#@GA&~$yAwxvuJ(-3d|?jRnqZt#x`?9!SLFg6 zJuz-xBMSqRZKLJO4}-HLv2|&$a!J^?A~p$vmwE_8r(o?Vuvr`9Hj3r&(3wVVe|@|a4~t)zx4?y8O?E&%SZpN*=o z>hdqpOjeQ+7U*8lPwjvoA?k2F%pHn>?W>V^w9e^tL$3BuGF1#h_S*(*bF(nG*NE!$ zqIb7J31Msn5{<_@4e`F`WB(d71^h&K=-JTF1R{}9)(^UYJl=C-J?&dZ2YY{ejTTxU z3)?J|F4B9$=rJH+s?f#?J0jMCr$>>_<#^@I+>k{y`iMF_7VQtbz)3~+dWh>Pp==Ah+E_V#*thV%@gR9iu5Fq1&w`K|NtvP*&WD*XOQI!8? zaO_UF@LlIA9n~n5=^t-B%Wg^t4Zqe?P3jVGGOe+!$I-iA`{=3hZweQ4TMZGnw9&3C zvk3wEGM(m*Eckt0VO7C)B{zKu5?q|``gsxywJ zuffSp`O=_5#zjU>vntCJIaqHF{!tgk`8HhG>y^1?M*Itb_F`tIQD*X3y-k|{h=~xZ z>->(J>&fMg?=&AoWmT{xNSN?<@lftjE-o*w?Q+*$N?*aV$MRwz4?CmH(SUt)P3Byd z?g{=Al)M4dhmgGj_Nn>Gtlhw5m<|rEAqS45DTSme4o$4%6zcUobB;3S-r45irw51< zqZHG_@`X5L-9=81TA$;6k(@(N8<|FAWPp9)JlZDhM5 zXM=c!j#Fr>iH~YD3rF<$viGtGZ@uB0w@7Cm!)FjaIhfDz6dByC(N6lunp}wOjwDeE zC_t-y%#Y3P*Ph^U`Y+#g_mb4D_Rq(1baX0^Gej9il>8G0hpq4QrJ0*3 z7m%;`%Z>kC+NryZ$ND)viEQBDpc#+LBD?ICzKbb_UI3Nw?1_X4hY9Yx?DY3vMK2g` zRfB(h3u*u?fDUtGV=0xIN7g^Bo;VLL+HoK{GCyC!-s{L0JUmqZuNKDd)0syT>wky} zWrY^Oz6AgsiTq^<<oQLp{52Zs^2Kw^JfTADE&2)~_CGAM(kIr+?E%H{W<>D%G` zadDEp6bDmiHyv>~wb5aS5dWME%PB+GpW8zGeyow;p2LaY*pD?!IcZrw_=;fJwkh2? zenDoBg@jX~>^HXv;n7s8A|>U-rXlhhZiytEMNbUfLEP=zhWLS?w`JEhE(8~@dN@hu0nCojdkaoX9LJCVkoENB_c3C}c(a_<LChlFTdMc_A8)CVEGr2QH1-i6 zPqNw9Tbko|K4Tmfl!Oi*nSY^j!CbfFV5Gx!BqZGaHAjE z-xfG4ml3`REhgnXtOM<*(YIOD@Wk~cW?-O`E}r(1SVr7Nzb5Lx(*r}q+X))XdSX1! z$BygA)#af>|K;i;t4uAD;K#bLzMEl(^Q-f!8i&VXR|@#5=EkQUeHR;Zxa?}3s2$EN zlfMO080UkqM@ctFS9b%{pGr$6Ekt~H08zd`|Mc#@J8!OvsUzMm9QZm)tLdAlf69PS z4SD3xim&9K8tc+P$2I|1gaP2-I#{^+PlH;c#FoO($OW=}SU-Xi;K(Kpq>NQF&MH6HvkI%GGCQ)9PmtlL!!$2Pam8dK25 zja>79UTg7}w6l}nL>~u;+*}UX1azSr$+bV4ZvPtZqO4eVksC;kj*d6M%Gj{8d+8ZW z-CN1f4I27Hy!Jc>B)WN5&3vX+U6b(Vn2FXP31T|C4q~qs=~fIo&M1}1;(I4Mnr2`XAt%Kf$0B$C z3wS5LJvRTg{Zc8un&YZl?nw!V2yOh@>$8}7Rn;5D)i#{NjCB>lyL%4N?vREXg3bsU z03Oi8Xcc9p$5)n}VF<6y6o3N}0$W~63T9S(lsM5|dehrKvtKo5hc z@b?1n!7wvN0Tq=^vZOlfIn&6wCfT?LR`iX$tn7R-4FrajG_zHQqh zeuxA3Bf3Q-Ufz5aMzyeX`_%>XP(3``NKZeZ$U#*Hf%RkaUZdmyH(uH<10=mplB|Gi z*U~$m7KxL+DaBq9NzTy44>#h&hYu}yz{}uxfF1Dc_zc}sZ{LlRX#P&+A0M2ACJ;BR z|MYnqis7ZEfhw2^=oDh}mKv%-@Z@+(-{a zSlx~rMjil!sdRHx%ha79fx3oC-stTf{ADsSGNa9y7AEczQ-5wY?d{22_!72-5W}!!Zb0KAEzgun0sS>7B*wqzyN0h|SJkLDjbx z!Nb=n^qdS0QB?1UR)yoN0(V#Z`{UJBcks_APM#F3mLuUSUmq=PBiG}+A(v12z3(48 z98C}DxmH~m0I53yC)Xyp$(;%s2ktM%k3{S!n&JHMzZT*fhz)o!V>Hj>#I@ z@uScU4wR_vkA0awR!;+L%_L&~0)Y$;?bx=WzOW5b!FA-4%l~{s8l6aB3)WyoF(m6g zvfB<#Nw|?tli5zSz7Pt3!sd^iIHIWQ=(OXbeEUA_@wkSzPT^2e`rlh(nDq&5&9dOn zg!zH3nHYU3QL%pkGI9j%6;g#jhDHLci`J+Il3UBuM~wD8xtf{nJ=4j8GqQvOlS@JTxnTZQtGzF}`?aJc zD~;q%{zWnwqxeP)gl!CyWLXjOZJg(;w{7YmT>^^c7RnZQ@v}>$4Z$>#En(P*BU(rj zfxaXg?XXkmYC*y-H{ab|0SG0>PU$qt)+`R0;9xvd;Zz9jmg}+tWWz04HOU62FQlpt zY4;x8N_0pBGZ@w)m6Q`7U35|3p)+mBIN^gK`e zkxBr11Hqiep^Q)yc)(o88pvBXu+g>AoeXC19_1X&TMNmS{cEr(j0(3$ z-`fd--}m-6V%`kVfX`M+PA)|sLhvPaLPB>b!2xKO>{U@l8{&*`A*~bFn-X#|D(~1! zaLd<|BV$r#uPs2dwTD#-XE5Mc?jn4DL#=}i^oT&bT$!y>}c7o=APe(x%nIB628qL{jYFz~hd zE*|#wlmfDYcPf_;2T1C<`dR{6Q{Sq!I-ilD!WmIpSl^e4cY)HLuHkf*6+TenH=nex zBc>kGla6roD13lDcfz4eAV|dD)1Z&a$H(3%%UU4h>&4!InMxJ((i@I>N+{}r%Hl~g zoRZ-(X^lDEJV(3p$qB2EbPSMb2ezM;J*pULf@~d}p4Jnk9U@QTfP@M?ruFiZ_$Q#F z?q=SaO~MAadSgu3Tc53 zs(93GbvNdvS^wG$1}0iX^WZ{~s{FNhSJLOwDh0@(t+#3Zw4E(*Bh}NyUnVQ5r1Cu zO#Xs)@S^2-@=s2QShB1nqmjB~Fw=9SLPHm^dyrCtO*%SNBEeu^%F!>}&va)>i~i+( z$SLxQCw3;`YP7XP3Hp9(b$2GUd_Z6`vHOm}SWgPbV&e6#k#t!JmFxpu)gRkIfM|dp zc#Bb&%EF&+CA77(1J;S@QFSsFE=EBk=aoyqy!txEM#qlA4Sh4sHYwF zCdGz*CAt$4`S3K?tu)Jf@`RL4$R8?2^90dsZhGI1SBhuTcD|4#PMWr1=(r;=#`R9V zZePtjG4lIx8sUd=8VlIz3Li&w{bV6&4(tUtrahUFE(Omx42G2;Zm=p$i>z$d!*t4~ zl!Y_V4kX#S9vIH;FhOG>-bAkrhQ$h{@}ly0&_kd9+(%D6YdkZq@r^k8&*A8u1ITzJ z1S97==u5z<5Vl&tp)2}ar9+#x!H>AyKDkUkw3eaP%F2Aql558%0=Fhj! zl9mj$ni&5xHwQNiZ4W2mhp$g#>d_H`m9VQ8+ghtF)tfn0cQ_24jey5ywSBYnwmct0 zYf?Ov7{KdRd&n)J$jgEZhR&Qlz~vE&~M2HJ$-m)G{$zQZ+x(4_V);TD8Ni5cZ4ny0HFz-p;c| zxtm3Bojk}I*=0BsBr7FvE$bVz#UToKcTqGRW>#GhYN}dt({o$tHo4K zVea?&Jc~(O$6@)9qP{+&Q{USuN3RFVM`=o=i@Zj9xi6L(d4K|ZR zj9wx55;?!Nzl)96uYm8zBEvQz-7WY`8d{nK8M;)&?Y1#PG$LE6tW~_XqiRw7c$_pp zWUXa|GdS4)A+8Y4WpXE*T*x-HHW2<`x?d{ZZ+{5H-3?w)PhY$clwJr==fBuEXyg|V zp$H6y0KF~gMaRZW$`7x!)_pPZh&uJTpP9NGxB0){U4s;6XH5VQa2f*yV57x@ z+75FLb%w}`$lGx!cK1gtUl!c|xR_R@^wPH(2@7>&-l8}U;MsAGZlc!}J~IOOkAUh1 zck&K#!J_EE`oie7s&qNs0Iw8IDYfO`dV!5*yBwFqBB^hzbV*_{~NdG z`~LdNAvGsezb;z3v@a5pJpQ?s`KL;tdW0>=yk{qtgv{v8&7tKHt&JhBpRBqkY5kkN zkWr6OmbVg;G;7Y!EsZZg4eWxhPBS2(-vqxIr>EtLdKe#C*y_S*H{Ah{A7erdk}0vH z*snmKB!{dt|C=}*$drSSe66c9Lj%XdwQC3dP(_?7bLDac|9N6Qyo?t!nwSe1;kjX> ze8dp!w&31-A#jUC+Ld%CPjrVGl=1xqAu#7)aBpR~SHNxL?=8a#PFroi_P>#`jTn5* zUwrvXeJ1oEjlRJ5lZ3J-+{m(;$f@)7Db0iyt_yu6NvPGi2Hp&5{r z*jRaX4Y=l%a?km|HAP4odNlO&tT~xt<1mQ0oBQHgPh^5t{^V4-dfqO_+Z^WC>|3KU(oY!E>N_tAKniYcO1K1IU@gb}9 zs|)MXld!b+pj}fhhPY%bF98M1i~Ijr+Uo{b9Knkvq+QC+&JO19Z*&JtlYoTjoC5kP zd|z>Io`-8scH7jC`Zp`6ii<`cAe}3qfmwNX_sRK55my0wZcE8AN7SL_!zfw`(0-0@ z;23IQB2@>=S`KW&8kMJMTeM}|FdTo_XO>}CC8oMjtmO0O%IjgYWF500O06BR7W`uEQtTX&Fqif^#%*Jy?#Zvc$$q zLWoQv;<^Xg5@N#+GoTg*LX#Ta<^Bf`FK%tDz)wgLqiL@iV9}}#k~VpQgLUItGo<1z z_Ld}RC9*Foud^^Ty&DEFr38E83WGc+wcQO*jxD-M7SEv6P>|>gh8jAdCS;3`I1Nq( zTrMeE;A=y{TySq8Xp`Qu5_#f{BBY9@$Qx04>oX`J<>19)v?Y86%WCqtJTSuo+vZxk zP-9}p4Wilv0UZMMy!>3r}jYa6Fb}kRGINe-iLzNy^u)G7&%Js#5GX6N*BG6FXv=WIJ7*Q@hh{QfLl^?*ZW* z$bc3|A?{4D>`Vlc?}|JlQhA{fqo&7}+P=5lP>Ekn^LtW$iBNE;#tDEAz6oL~t~VT~ zU*0i|_G7R*PYv{#bY1?ASZO^mN^9VpV~82ZaRhE)eRqQ@SD@ux|##U_)%;e5V2jK=O|9#f7Y6+H28F<%F+)+x?2nq z!liF(P2nl^l%K2YbluS~A;HRa6|ZGS+q4nOss^+Wh&_hMxA9Gjti$i=7mX97-Mik< zK0B+k-`QcBADCH;z1zxH)x-jzH1W7WjNZx7*KW}6%s(jihmGN`y=T8)JclSl@<<{Z z5DyS`&YeHM;n^N$1Fm-g%$#f3NRcH2(H3F8ID_Lj<^+l#4lh#PYA>@@&?AF~1>MY{ zQa*nWw>ECRK?KAHS4%DRUHUg(6uqT@0ljY@QVOXo=8Av$*q4c82Dgr8-L%upZ;3K` zYl`Y7@K_$f&I$;}f$DZI|irjU6WpkDB zngwQObNo^h$>u-Vg*!qR&d1NIzL4q0A?8U&Wz~4Pa(Y>42&qNJsw#0wbek5c4w@+< zaWc;6mA+O-SDC{03=2hC!)QDC#mnhBk?|U>UUd1qmI(lrQ#n?Z6Hv@@dLcwBl&J;+BRVKPnlY)mUV zMKfW&+{q;&#kb~hL9(a49U0A&VH4#g&~a^J)}V#z6lN49W{kK#7WdODBh<}wRO9IA z)5==7UtfE8<~7{+_gB=%eDfOz@UIx=f0Xs*lWJ`WkSG22jT?(%E7{%qOgAF`$@l+K z7re5?d2}V`l+MTlIDl9Z0b48ZbqGix*iynf2ZPM_Vs}h6qmOv<<_@1>43#YA_U+sK zmkwAfykDc30_h7Gc7O!eRAjdfLp4#0(pzG~pa!v^&YD?86G{)(BP^MJ{|=pcQvDVks8a@cJtKxZj8(Z+0D&0Dfg9#uQK$Cim5wq&!nV>@mb5k4O zJbhF&DEm0unOuu9PzX0&w)|qIupSALRps$xsDt9}HFQ z%9!nuQs?KnN<`Mh%djvu1XqcG;{@kLT*eEaRqJ2Fpty-t(dA$pPg33e`VIs9D?>0t z$HglTb-JPt%`r67Km1JtW@8|NswEmA4W*hwaX5hek{duPxuWU9@R+9QB+IXEW}x|C zPPRH!!c*LN9yqa1MFE|YHbjM>sKmmK69Mw~lIAjrx`p09Ng|6Z36jWuzbL-mpwa8( zXIpLGDh{*a@zi+cm5a^EG4|CZzQyJ4*vU|o-|npbeQ;S2H&hC@u{|4u4K;zc0Wq2} zKNH70y+Au@2T^jJC4Cc5Z{I;@oGlD)DZQ5h3RuWB!v2)I9VL;B(+Kl%n@8mq?nFKJ zf6NT(F3La1fd1QVr2olh-5Djuu}z6z#%6b&QAnY`fKT)Sx{;F0_OUIwckGEal3wR6 zMve;<9b0FYkhAN!G4?CC3p3(X#Bp=JuyGI@xw^V)H!zEpcg2JCL>fiqhmky{k2{>* zyI$XT4SWgyR>j5t)O&y6c>77VTWah^toLZ(2ZzIOOFKV(oC&;M}XHi(}xG(-2HQQYcJ?}27uZ&Q;?jZWwY^#Y170moB zkn5ET>{A!ZC~R#(KZv~qMkYQas&}9>GA3J~7SO|$%V{c#KY^)DdT!{AB^aRHOHqjg+Z4r2S0jULGi!A*sg#{10}RV(uQNW+YOV(Z^k8@AXoI( z!_HI%0rLb1Kw??EZj@_c3sMfO3-PMAc3z^Zazl+y?9)mmS7o76rHwdde%dKOvWR7! zamz9qHh=-ps83}d+uAxO_C*#JHDzERvGNpB(vsHlGD{;Xi9|bnH$xzSjVf;E9I!T! zICO6Jy$=(qallw0%T6M<_!^i}?*T?e#Ml6fb<5A8_qc8LMb(FsS!||sF zJ*A>UoZfv!f5^r>2n7dQW$Y4&f87s3a8iD2$6Bln-0Cd32nlU1#!L!?;Fv#WwB1WG zv^>AVXtc!${8rBc6g=p#QZ~?*b|4UWk?f-x;DcZU;%I2 zn!5k^K!Ua1X?5O5L&cF!{$`Kd$$N@L^XNKy9!4|rW`Qj7_6)e@$LpZ4Kv0P+srb%4lijmct_%QUNBGU8ks@ui}h^n2QNWSBoP1^ zo;XU53xq7WUL7*-GvXv5g{^BER^REcT58k$SF4|c6vv0pDv8>%y6jpE~# zS#R&yqgYW{!tQ37;Z)`eiw^r2SUi6hNSWj#orye};9tfD*Ft;0{)WXW1x#xtxXrh3 zZN`=o^x@^`RBf+ptpOh0N}fhoo0~{SFuJ?UF|-xQX`}*A1X!Y|oH4vVN=fd=cML48 z#JYzo=A8J9C3)}wJ?Cw0wKu8$1#u(ZEqh^rGoGmFU>XwhfW3Qhd_qW`w&sRiR#OIM z+kq&HxJxM5WX%V~|0d^8bAzfLfRg9+C?x%5d2Zti#134?rpt(gu^3+WTB|W-yxid5 zI00ZVSlO1oqL+}|3p7jr=(AjNYV!EX(h+lAe-dk+4aAo-TFs~B+JjGbUU!y7{JOoN zn+|HvQ0Bol#_nGo>_=Th>{u5nvdvrl8E%kAUnF@C%@~ zHjH*O5`RsqZkS^&&4`F!cXV1YA#7y6i36zoD*X)tj4s?K+{WVUQ|Tudl%0`8*b1d% zfME(Wjc30T*9D+oDbDAFT|zhQ??26-*dbb0EkxtwU%v3) zZs@AK$h&B=uEC*#M4!YPAtf|U3(CBQVRIaomI^k7YY$|KC=>+2!!a&0hZNfrAm@8w!&u#(jf#GW7fwQgJM~f*) zi&H^_Aa%2BYBUQUw$dMT!z$EQ_J3>EQvcShfeAR-xQP+1adbi!&r0;rnQx!JAFo1l zM#uA$qYCH%^VE1Wy6~nBbdE3KMUhNbC?K1Y)+{^{fm%il!h!&YuTcaEMtDNerE3aD z`2-NJ_abPw>F{q)$XE?96w`YrICgYc5`e>c@_T_mISE^sF4S$tVo6#_1#xhjOr&c;f#_^t+x;?CV^Oz zm1U@k%Qth$`E(=8_ar*C53se%C0dKz;uM*e*Ikxdht+4)lR#-ZFsfdq?DPkP+EDVT zm2FnD=nV^{EuBs!)8COeBy#ytS^!*$i7}kZYKJnb97xeMsAB{f_Pv0SLX~`S{A1!N zPZhS^nIHVO@~-1uraFCyxS`+cfWSpME&;8H)|xavU0oFXL{3f?r85qfb`#_A-LLzQ zl)-RY5=dTu^Iop}O+&>o74kWl!1dwud)DfCc>JTIBfwR52zYU-0%Oa$TzrEzEH?Z? zh9#Z`(how5c8-r=vh%kn#%$z_VdmxE^!k72GPCiSr;4jpV3}{uFX_7yGdD&~tUSgI z!<(I063a|g)``YVH3OC-aZAg?`t*3h$$sFZhC#m zgE&kAj^SF%9rbrL|G*q~nZ6ZLfJ#xtvgE{d1CSm}?#IpW-8@Ii9z`oSN2KFR+}KlD z{=_Q7s!-}@%_8=0VyR!esKW#ac;=kOHt6B>3n3vU@o0b2D`G-+fM^RwqJMUokdE*( zoZLL3%(AY_%2Dg0l%RVe15#7Bu8JV3`LpumFP*}8M3R0##R@tTh$Rzh616Tn4aXN7 ztV>Z-hA_m(L6BaGOvSs{e1RkC8@D(f!)fHssi+;v;1GTj+QRz=&=RhXtJBm0HOWuy zW$7S`lJE!(J-^_b^WXnGv9@nWMZ)Nvf!`~FN~YI0c3}n=@@+=G7Rju;aIoIp)Bs!= z|Ip1e7f{a{Vw6V=;^aj;ZCk9Z!ZEKua)=cWJfcn~1Pnc{5|j*n%ZR=S%)54RI?WNr zhO`CqVlVmn1yAJ|=|B9yiR_OIGnQH09SXrDLPRF$ep|I}T_gr+xHWfg6F-tAc8Hj68aIEWqVyUq9a+4s0c!@z-|vBp=5y^fZiE3F+I zNj4;>UT6;stvUpt4blpm0|6^_Vt8Ex;RSF5;MsjxD70?m?qz^kzKXN>UPN&~D}d@( zX0*}t3VCPpazH#oEiO8tJTsr8U2jjj#(?Vd(x{2G82F)ij3hmwS5C+{xaY!|2(!OD zu_5VHamIeD8GFJ?j0}lzv?NL)<%!auZtBtB1>?T75Yf_MDIsUlFtE1ZP_BSu%D#ts zUz&UGU_U~F4x(rhYm$uGV!oxz_I`Qatga+1zEgEA-Ykmdvj%KU-;bh<(tq!z&-!kf z$#R{IuTZKi;RT2Au`LHey+Vw!77BlC2PYs3z@Ra&uFoBZnq6J?u{y79=!ih~xdLdX zs`p%uHEEs2Ii)Y$zssRt(uCL29eV^cDBOl<6@{rNL~ae<+@*1if#_%ZA>;+Qh8M=k z*IGkVYV=)XnSEwu5yPYaS)@tzuMXkz+ZrF9Vp)I$naq{JgOu%RsBheaa!L!&0dZYI znT#rM`o+s2THbVT?-le962dAwgU5B^`;wsy(n69qEVFAZNA4!NYjUI`-~nensCm~g zNy%WU&)@z3mDhQu(d(zT&=8RYZ>G&K+HG|Y?hiD|d+{%%Hq88eTQLJvfxn*ws|1DE;cp@mj=V>kg<)Rx_fMxkmrsMM-hWH% z=E%x^^{k?o-8088NS06i!5LncOrfLLd%^DPg#G*3;J~BYNL=Ep#yJ0_A4@^-!_j*A zKI;LgNz8VD=|X0Af$k-0$>@sV938@H*|yhMM|kbHVFUNS-aC{TnQi^K&)s->BhPe= z2loUA+&MNnKfsZ29eTnq`-Q8eIADPaD`3|$=AtPL_x(1ln9@S4f#MbK8f%^yT(d4KZfG4A^f)OH{$#teZkB1n{>+PLIT^T|So@b5Wy0 zuh-a5knY|VFE0_urN0vZ59WP=+bMu$6%=a^Kc~mkRGfg%g~80NJU)n9(-VdV&3tw~ zQPn{Jk&+?!M-zU1#L<~Ss_-gi)%IdTYP<^Y7lqO;QdOa$teZ0# zu%fVk#G!A#_$p842=P84{!@MCVBqrqwfF7eSZ!VV_O9M)@0}2mNJ-KGNpcL8Na=i( zQlTh>D3nv@M?%=8gHx%5Vy8mkQ9F_f+0Nn-3XzaRIeqt7qNntJzw7tk_g>e#{%E&L zYt1#+oMVo0k9*vsB_0IK?$*%WKiqwJO&hq>e(TdIJA^EEB3uFf<7w-X-H@C(E4L*I zc>ZF6ht+gfQStTI4Q;E_cM1bi8Z$aes)8X%DBpAniNNQmgHIZ`R(Xtfr|E??5Jx7X zn4uv&^bkd-P=+D7N`U~MZvz~JthQ|XHU0PBeq(yEv4G7j%)E$Zh;$6EE-yscG&-p7 zdsn=Qbm2+#v}m8{;quuWgU+8wVH1&pyHu7J+v6?M`ns( z28#lCy|Gs6Ap;#KsD5wPa?Q;oG=XeD|K&a8`)s_&;~AWN)O!~-_#!%c49{|j>&DhT zZzKK$$#y{b&ku4u)(xF(u?#;cIslMdyPnrc1A{#7Q&FBUV}d&3Zk!vr%>GvsDC2`v zLg7(b+m(g(EA+o`+Sya5?cbYc-~ryeQjBT;kVs(YhjKd$jrBJ+UZNVFsI{F1LTltH zepv17I@UE58x#BcL}f+G-tSa$(Yu}J1HP06y!}cM=6t?IfYr*Tb?~4fG(~pq%?mo( zK_$p=@lQ3Jd!AOSKQ;IYs*Z7pg*>p3p07;OK(w{`t}weFxGuJ@57J?M;G+edbyQd& z7ha|$Twv`Z zD+@eNxotCaruxmGKUjg8AzTs{4C>ew-3#-NCF>PFdT{?#f5DvZC%4AT1t zDQ=I+Md(mZUCEaC6PtuNBiSzZpp*<%Cv?G(x>0bM62NOAxeudRP}0r&n%pc%7S7aH zP`besr2|DRg%O;@)Ee{Tk0lsi^iq@rl|tS%GXgc1_e#!%%CK7sy9 zNO-@D2Ze(_u=Cq6EmVFeG8J>CR&UFKR95*zm~RYv=kz6|h_SM|9QuX(o%6Mk<)I4X}mZT2@@0=M}q7urH>>^ z6x1B@-| zu<_O|r_}`Yk$nY7k&u99dSIe)k#)Ow<(c(aBk|jn|G~;drN^`& zL8XW_eN#xkT9p0p>E~EsbBHSK{u6k}xWd&q#3^_3O%YF1Dm&jE%vOKG$uxKVngOeM z2W(jVH<#>MOf*fQ|I#6TF30@65sqe%#}iK!&~DdueMmOwVN?{FraH!j?b>ZfJfZ`2 zlc@iO30E?i(V;u=>D{;x2c~|dZ>-RRpx^Yflst~hqW(mys?vW%DY`FRjk1B@W5IpK zk|`*d4`Vsyz~3wU;ik=j>MZSr)s0vLp2z(^LhhlScs~*e)DjE{xL}8W^eZFB^$nGN z-VZMcPi z-04D%K%`5R4GCmJAD2jr%MA*THg#6&RH2dU9Qzsr{gbNO6-NBp_P*RGT+f$R;@cMT zI63IsL$d-gWcTqjVz8@qoK}HA4^+BH7XX;*$&viNyr)Q6M-#QoYM)Pan!euxF65h5 zkWJPhPqdNY3_S2-Py_x;QDa75EzX;S0(FWnsIxFp=cpQ-eND|^&2wU=A-2+g1IT$y zNM8o{KI94Hr?Dfp)AaRS7R7_c30cB`wbP{JfPYb8xPvX2r0jq8s0p%w!d3=IU%EEX3G#qcOb|+}wws-YUa)YJkv$>mHpp!f z>2oeRg-k^;-3{Hb*YfjQ~ji|BnwXyq@+=g#VjAcMdQc@!`rOUn@%tx+;)N{w?bp=_@gl-8rsSi9S} zm2eP%%X8d&x|-Ge|IS=iv=7WoBajcGL62(bJ^k$^d}Mx5Y!r=06iMADK~Mkfur^Mv zKbq%0v|jDJvI8kWWX|LwlXbm#f6+Q!SJd+%FqHxoz{ngZi^6McGhVRZFm?68S)UxE z6(vPk!3$7$MA*_~{r9umad8gFkUxIIrXhU&LXzw7h5XK+o zfcfj1ntEb40;?2g<%>)N|Og<{(N==(#=trdt3iP55t zbhm>w-QS{)L27c@4QY?+9t%gh{PP0t;WZ&e=Up%NBS-viQkTroJSGeOXpyZ^!TS$; zmd)E&q@p_dNC{LngL&?GHgK|JtyoERW#eCp^nsKdHU(;%5gfqw=Imv&(q%>^z#HHGddoNng8n- z{-#o7zTodEZ%H@_KK6`bQ0qJ7aBX=qdIc#tHE$yk(&26ANi&}MhY&_9$Xj%2;c!CN z3jjSW>O)C&Pmqfk-n=DiCL?i8(sMYw*CG;!LK4NOP&kVMCkH^sU8YPKVthy)W^V6c z(7nDf`#22x&{0rtzP>5_bYj2QA(VQtulG?iY>0fRY$FY2P~p5u=#{!K;p`@*v8GNy z{Vu4LSNO$YJ|D?&5EzMObmK05{tTty0+5{TfvD$~k7dr-QADot&<#fytVoy3FS_iV zh}KJDdPG*If_g#0>ok7kKdC{2YVw2#B)0l5){nBtql}2!A8#)fQAj#@)KQptewgRu z5BFem-VGn=p-2ouQr93^1^B1ea4xI^{rf(3!~2M~a=QrTI)@BV^Jchbs7WD;($%?J z8K88|M#hCa)1=n2a=Oj>ao{PUcjnY*aVEp(W&CPlKddpATU_2%#eMVVXUj?{FOgWs~ekZU-_&TS)l0DQb4j} z$21B^-uxBc?a6!oA}Z6U_Z{gk`wMcO!Is_6%|CN|lw6;X`jIQpe$MnN}S+1gVsKM-n=cV+ZI+u9cUzn?B6hLn@YgUgNeEhX+H+m z!_+|CA;L#%9z9GOD2sHR9*ZJ;s~uxg>4O==)Vj}9fie5}?v#;w&hrO1G1U6^NIDuN!Pz3_)9G;BNd zfXaJ26T_>6DU^w>2-bT%0mD2(hfQoro>cRQI-VN3%k&U05){p_U+rRwtvK- zi1Um2*ua7z!*?R*{I-Pp{%J@*o^~&0DI=A@lfQ<{|4%Qz;S3+261ha;pt5P`9BR36 zzREK^N_DL$0(=5X4ts!@n94Y!-Ftf|C$n3S*BmLR_sUY)Wf%MO2Ci$}m^k9_qj}%# z_4kjh|2?ObcREY z`k)Bpe?6h}k20_NU=oocetL!F9)wA61FLccmC8yL$Vp`%eMJ0q3JTkx2e@SVXjk~R z?4}I|d`GgPJ8oOQgx8LgF^Wh4Yl7KH6-bMEWR041EaqTcdII-<-zKng;IPhj??efQJE8Nn zP*y?$!!`A(?c6a!v-+uGynCK;3sK{~)FVFqXu&sQ&-#3dM&Qxm$wz~7;0i{r8Fy9!2n;yaNF1}Ldpr-?z2V3vn0LN`=Htb&L(SWE!& zC~roBdXOS51Z{Z=!Ei;_MV_8TtPbjHIp`T*;A}Z$DHY+!jVuiXIs5y`%yuzmw93b` zr-DQO^e?86D&)4HNdd^@AZg@5-)YL36FCfEv%_$`LD)+hx7P8X*ja!ZKz+0B zrXE4jfpdsn-)0xn{kNN4G#FI4S=+LjU3)0qNYM zzJX{LB;c7glnt|95A~y5)!LwN%b0t*+*BcdDYR|xbaUF$Iu2!WlTi2sWnBD&^_>Tc zp=#D}A1eczAv+9_$DG<%DPHyDx5E=v%8vYW=8)Z8d62Mna5dNV(Zm_mEhGp1NlKVX zJMM#GB()1&`}|25fz-Vb4~{^e3M3i|(A1tzH#O)MM9rH!MlAunZX!>EQ$@02m_Zfx z#1UqO>zyR@NdsaInw~V@tN(fqMSy;X%FHn#<~W0)nYxDbBS1td`>JuXQjsCY{Cwyq z9udt8?7Tk5?J1#bLeMm)`Vso(P$k&1+G^vbJ3zB`qi=Yn()8o;=WP=wj)+p4J7#tf zSlL?;@ew2XUuZ8faRf4+D16JPcMd^vNs6+@2XO2@s*R%P7rQ+V{0u03Z7v!W`<78J zVIW-coT6QsYA5Y1a)oepxHpy`3=(n(?&xh-HTS5T?QyMa$^7QSiCc!Q_T0ZtaKrO$ z*RodMiHnYjoOfl}?M+#`4HeU64&Es|Yq|c4boky9TmE~INhddnwtGpRl-#g`=lnC( zRIQHeE}q(;d%=qKr)yhVULDHrR1D4OY_1D+)@Wwpj<56dU{V8;IPQ8TWnHjF)QaBUKAJ6dWgf=&>`Djt6V8Z=p)n>|*u&!2k4t3DH6v_NP_+EmGa z%sFN2+;>)ly`<(h6q0#yPO?OxBd{5h*2GGi5|DX3r}LJt>r8g@G5I@n#(4TB-5eu*H$LOsMU)9iw{C!Gf9BQi&)R8kT!=$-W$mAq?v3t?KT5&T)R(h8c!w)E;% zQEHyL&Q#%q*wdoE@c18|o@1uIAL~h~Im;tVp{Q>M@q$Ggt+RSm_PEhSL_ zUgzPuRjVV{n$14HI3iDVvfbUgclr5mnWUdQ-r9x&7h#AEuvivr#A14Gepy_m47c%| zid~gk=M-eIr36NqBiAPiHk{Y}`}d!i&6Yi2iT)Z2c2#MKkYkoSZ=h>!ZT)9pU$yvbwA7tnb=Ly@!1B_8G zbvuD@T@Kx7yXs1A*Sh_ZfX|83#}wc*KR*z_P2I*uY;d_e&ho=I|L*e8#beL|aMy?j z1sTPt5a0*@P#eTy-F55MVdbz`#i(qAZgco6b+i_H3|iS`aE53y&elGX{cj%OLtSWw)i}WJxH;>1^4uMMvibv~A;UDmZ`1<0R}sKm=Vr?Sb3zTwleZEDqYQw!ee6O@Wzki@F2+3R5b6yh)!M{pbIJ?Uf|tn;LH5Z-Z1^lNyKW(U7c)&L>)=~$hA?Eq;VE#w+IYcZo<1~ z3OI(D%^nAr+?%Qiz=>^Ru?`=rqS=36rh0#WqbR0y($GNkeu0qzXfmIJxNc(a&>v%{ zFdB2b-Y`y4qe37Sixwn|R&o<8Zxu4Mr;u~ikq#)a@pVuu=ld(Vmz=8f zrGwVzrfEh}axl+FOJO!OFx@QxYUfFQ(yjh5&NJGVodRAOZR|5kl%Ci3I>RaI493vH zoJRf_^JtHUi74n@5|QU;AZ?1B+JaIrEy)k#$%6CquQD_ohy69`(0xo7W0qSkjdqoh zl=Rm>PhZufcJTTL$NJ#s$01hPj{QG+-p7??5D4P;3x&z-M#j_^wZiY(+c34TMLwX; ziKsbPT;$+(VcjDJjc>vnVH!$jaAZ`}Nq{Qp3GB9;EDO9p9e!FU-giQU+IV(|f%N3y z<$7wtJ}8IO>+R`&*_?^Vvsm#f6RL78;?q+Hj+^5aMmjqC_U$u(HWK>#J}tg&zT(R2 zNa56gfAWqm&@1=PG<%M< z25l98C&UWXHxoF!Rr18zZrua4mVuKL+g2VvEjpjAB?sA( zVSBepVYqVjy^6hNvrG8M=;xxMtK;bgwgAbQAwe^*AsAo8lXe{R3fadGN`87=i8Y#E zSooM8ZDIHiXQWpg` z%LIPnE$BD)?H}wETj}8C8Fc&JwynKFD2T91awSFL4|;=k&Tm%0gzXU3yc_Eo-J8=r zhB+8;y29@fb=wiJ*Ue4DFD?+eaN8YU!KoN?lyW z{xs_GA|0uKOgJT->91hZA6ZqXlzIFzr0UN#%RPa(%?3rL{ch86oNW_h2b+mt9C6V% zZ(5J|TU>z7d;FGF5%%a*aGe^+$bf-oMx{7+V;TX z7X<_}i7?LL_wTDg>YWeZ>8kbXh57k$(px0PYR-fueeiI_pea|=#wY#q#K`rxFeC-- zkiJQWw@RlZHY*lVGj(Zd(sV2R@xDnUbFC+0A#TQfg_X-f%NBrwATat<@Nuu-yy?wKQAoteg?$0M zy$V?hZxC)Hk0g+dd?D2*h5GF#VMC~##txB^9$cvK`1rYG4*JHA8^YymL8uHTu&!<$ zteC}cPXpa?=%+gAk3bmcfXp3nq*? zY?eFwt_~yl?($&K?jpyZoW#=7Qn({nvL+~{A7zJkktHr(vs@qsbKb}J2N#W}mtb^0V|~jEq;32lSc>H1N^DBAK#CsGySlPz#!{Of4peHpaqCO4O=|u zWT77x9QkhsJ;?ashyTwE+SA{M;<&HZ#0r$P_Q-2?mgMXV?yXTy^+d7gU2)~<#tMx$se+%ya9vo!j=x+tTd#1<2BIJOpzItAI=kYD!g(Gx&L~3)F55If9-}137tu(-S$G& z`rXc?{=elBJHWL>R4kII)f^hd?00uWqJPzzHF@uw23EOk9hYK129i>+dHSvgmzHt=@Dg~D8Ynew{A3Jsmz45t zTXHQvF$c8Ye7N5^0+s=SQI-3h8IBrrLrJc;kTJnMyxm-ZD765O7~9niRirdPm!iQt zUc%e{dX;jmV2{gGb$8=7%+%r3&<5|#NiZ;dm$oI>ms)uTg$aC_X4UQm)^OrhVc35q zpzFDCYi*8=0|++45G&sQkTc*?ym~R^+Zxx-B@vQSqDoP@CD1Q41uit~;6c!?X`bt> zPyh`4ZPQ-~1J71*;C^;K*vN6&sHOP*)BBHa^KNJFZ5;UMJl2!Ap0f&Sb5TO z6P+N4zhJ6La~qV4(Y1Ew=EPY5x!!I>iv;*%llr1g<`BU_OhGbQjcg7=d7}cl3D-db zRxh@ECg?ey&cx)!x-}rkLawKsYI;@sKp`Kvzmp_jX73z`z6~R3wbzt#&!g zrLkZ=7;|=c!GR}WW%vZAiRj-bf+*fF2UOh|#K5BY02eA9{1{2e%LtR~;D~2LXm!68 zQQd2H@MaJ1(z7!?Ndi#;{2(AZ-XSFj(XA@j^AN^KK;y=Ts)s3#Lfb=@JUZgqfi#fN zT9|bkt(2jZe+o8%-gUGx>`Hb7E<%;$Ov5;-oQnhID}6Dy$ytCOkP7hby_4b|(p~V4W`j^< znjNHS&5nj;LSj3+BR@h@h-}}4c*_*Pxbx&vyFSnim_REA_rR}qnNtGOmMNF7U8|tc zzl^>MX;owpCCH;Jt)R||*tPh!b5m*7^zfG9w^1>+2=NRC=}qqXu(6x@K%SB-9uFW0 zom$>^l+|jE#nO>0R>z0b7c+{6+!t-a8K zQdSc@KxK7}^_w<{PS*1|g~Lgc@Hy~KZ^7$zf-(Vpn-=(}N;9(2tK)tLk`ofB?DK-? z$DKA)9i24Tjq0sEZ?*!_Qf}x%xM2dF1_h_)R{+_}b##&e2He6wbOImF1bB?MTLmz; zYNLuT>o=p;*105Un&QsqEabQ1OG2aZP%^?Ib1YVVd|$3k(E{RrdK0o}8jzT6LH+w& z=NtgmRD8&)=&Di9MFCyB8&i8qGZV(*^0sVWpwp{2mPIe#U^vRp8t&1eM{;xmWhhNRB~WeJ zj52ta2BV6TpZ2;z1j9;jZm(v2t(H4Qv$11 z1wM&Y8#{}NY96Ehqryxq29yZKD$qhiqV?q3>1SHIp!ZarklJls=HA<_dc-Y9v(t*) zsg~H@FR?dE2Y5m<%QLwhvrXudEZJ}ayLO#pVSavE{~5zOGOgS$L%aoM>$OGzfS;vX zzpQ?KL1~62fb^PEj~3~CL<4i0AE(!TdR0*Fb_!6(Kc3oy67P76rpE@*3jm|+YLGb! zRfLF0S`Cd!=K#Az77JO@$F!%IXudADTAVnV?Kih17-H>+qTS-UY!4%yG|=WG$grdE zGGn-A^C9VKTYi2*@5kFDE1)92;B{zvlUsWdt(i@C2DaMDGA?&U<@z5Ini^0p&9Adc zKPhy}sw7UhLKWtq7!=U(+j;0T4W4C1mHJB)@_48La_xD*om6;TJ-(tt7bx1jMeXf% zxs&&xeQhEbD;G36p@l1hp_X3mY`=?voSSRCDO(Yct(gIYRN<23Rdwbd_tXKHO5%k0 z{$jVSDY2piSJ)Ub=K?H4u&YW&2-R=K{N4pQ2#<>OmrUj90@3K06={ zTLO`-^26=}ZuED^lkE#=O*6-ZRf{WgTo@hufX|?^d-v|WA)m7y*<)~ES>&ms#nld% z_x7RBoOi+!wuJV64!!ACouSoZ2fSlva3El4`W7Oy%@#*jpf+Gk6{=;d>||o^hCtx) z9NgbRXF!sWg_}od&bg`3gbPfBDvJq}whVJ_KpKs{MNI;2D!M$XCVum#9?XwGO4L8| zaqn-`L;K>X=tfKI3X0ft%?W)+KahxZ>hYA{HN{k`=5$qS^_Ut2sVK-dp@q>t!ia** zK`ayoz0Rhk+`aQ<1#=WS?}YZ0U6tq5_>T8OT$_(boWiz@%4(mJ;fq!tL9IRS8#I%!2f*D1?$6IPWc=qg;C&2|~tYuYhlqoZk6q9~cc8h*E<0vEjpQWV<+X zDh7P|(onAObNE%n$&BN@vm&6uDn|LYOV_SR<8z#8eICgEcvS>t7RD9gaQa+O8=B)> zgQ|F|++bkTsFeYz9D@MlNv8YfPD|oR7XH? zI~y*DQPQSpDpOKrZaLXU6Wk1>vC68<2N+NGWViWx~Xr%@T=fi?r(RK`F5LI5?Cqx_uxj& zRa*+6Lx5!zXQrS18OCqxi}nm(iBS6<)celqi~-R=tv30z!rcYks;GREX_$FtNjlol zZ8`1Oz7OE7-t;rO9nJ!7ft?#xF%SmH%;a*fc{f3|!52mE2yU>hvNsl@R96_TilqY_ z{D?(>q+4&9c%f(D>{=I4`-Zu?hmG$&R(?yMj8{|P_&&} z-pCu8(}G6W^rky&2)sdAUiEi>-Dthm^Nsc>sKq^!-4Fpcq-e*`9Of%W4MkTr&z9Vb zLxv9k&{MT0L7&X z5kVcSRrW#Dw!R5!{nRlN1}-tpsJ|0$@j-8t$o$r5xf%If)2l=?VoU9{*tbqtzcr$=)|^~p2924!Aemr2FI5sJ=L zL}q>A5W6O61P!&x@X3Z%T!KzrYY= z1shH>Wrf4dm!o+xS?o$-s|9FizBfBMDynpc?={Y@-8n24-nllY383sE^}D+4-sB5k z7&avHjL-fRy$8SjZ=E*w7D2(v6i%MPZhX_l@j>bR1$&P1p+qPfybe;nYp!FL_Wnb+ zF5*f1Y>2;pNUXVF`QeK+b4UDQYq4bi$AS2d=d?W7IDQ6mdp1;)e~ZX?k3(Vqj<~le zfcAS(D^$JWpG{actcq8wt6929EyNJONIPb!Nh{$;t?Q5{oEpnINq#gswoT60s-k?_GCSSJ=Z1e8` zqz6e$5UC}(tYD{LyJo3wg#mo%NMJL%#F+;Jw^Q{|sbWbtkrR;p8Ff`g$wo_|{VW;V zPJNM7Ye|Lg-;O`}6uO820=wYa1KAiK__)Hh;Wck7Qr)vt5^O3az+%YM@q(-A3*dd9 zrrDFzNa!r%*aJ<_gB0%V>{f9bOJ8NmuaPHNNCU9%DH*Wia{{?GPt$b7UJ5|nc81F1 zDw{3q0F8myL0SkKhqH zXpTUTGVO1=2Pz$2y_n49z$GomtliEAjb&%3^=O2D&s9TJ?RUu4=VVl3jEQOZRYcH-cX!sEGu$8mu{V$CcW%sMa1kRW4Nj9{LyRwOx?&U^%F5 zIX!^=c`Q7^Jy}bz9h_D3LXMA?m_VG8LTAKx6ekUH?u3uMCHpgKum$05*nDy&uqGUv z*|3&EG3Xr4>15YXO9|pLI;Sd2UIvG@rE;{pN<$YwfN2&T1E~X*Ie$8EZJ3oKAewM$ zw9Bg@syTqZE(gc&3PC2Oqc}rTm{N>|ioGooz5Li!xmX{`DV=~@^#HzRE6RGm#krVX z(c4{-fzH#S!4Y|HDOTzNl4F&d0#%P4OOQfI=G=>2DtAwc(7PM0FZU$Lq7~3{97i*X z2gdXu$>=+fX#s`XF0Us`WPXkYD*EdXK6%U3r;k7^R+iA z)CK}2T+zLv_u~rHT1q!l;gBp_`6B8w(vY-k-M`fo)HRz1tE%ThF8-B#hnI!h^fqK&_E!B zjV=PjaaT+Ea2#?Q${g~RS+gUifinZin9?9~pzUJ-f0-hCEaVc(N^$o=lU#2kUFDhv z_RldcESYCfjpf$PO~g;BJ&%ITNz`lh?%Uw4T~%XSqPKQWuRTu`Hh9qTck6rL=s~Ax zQMMu`%o|)(_A-ULIKev}E3^4g9lpCwT-FXa>| z=w%BakEBKIO_oTj*+G5ajfNtsrO9}kz?93fQ=_7$6Hnlry5kBy2DiGvF$d6+>Iq`5G7bsOYhLn@adzG@Yji}yx#fsN*F->~f3}tZ z+92owfNsT&gNo-3h=xGv0blq7>NVu?Sr%Hy6MPJ*pWBow&bB9(3kCelY_EWHC~8Q? zZW8+lYH3kbYHw$1_3`n0SfIiyQlv_;v?*L$>(2w6c5T1u+-{$TV?9*th&(~4#$yDT zdqb$%Pc?|DUP!i1qvG(XLADFTV9)@Wi-Kt9Lx?2OA;JW(pt;kWhVC)TRVMW`R`ke9 zNK7Fn;MUI4qAINCEa&$K^iDpLOPa}G@5l-SAu_N-_d*?bAA}a7|A!DX)V)w_I~`#g z{oe9p2&B`~irAm-CPIa$HO_r5VT#Cvzh}Ak)Vlki1PTvSo3lqdX*0l9S4S4~n6m4d7j0&1@x@ zin=MGwEpGC_fQ_JLiycEV03rihPb*SV)X$x8(@dx1&t)Y^cw+lMRVRlQ@jhxZ70DP zTJP$cf-~UlpFEeus`p-%{C_h8%=-(ofP{A^J?1}?rgHhJG7M(p!P$Q326Ml z0lT6KQVpK%$+h#GQxAeq3%Y^%>u!!KN%wluS^Lp@2*lB5Qw~K1GCQShTLx}k&Xro@ zIa!AGN7ZPL>x)n>cAE{D5TwvnjejV&SWHgxGAsw;2uDhJx4Jo^$pnL?d9s_CLgU^Fh6~R(7a?!&AHbuXSmNT&~ zQ|?1n)I-)Nf=)(aX0g#6h_QOU8Xp$Zxrwmqr9%^uUm}9{Z$b`lI=b(N+s0>6r?lI0QlZw6lao4AFef!+Ihjt<@9@8Zh;DO22oYv z1hNoiA^gjBFmg~eU*mn4T0Pn6Fpa1{> literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/gpt.png b/docs/assets/images/mii/gpt.png new file mode 100644 index 0000000000000000000000000000000000000000..740b94f8ff5554c81fa44ca30276b12a411398e8 GIT binary patch literal 438479 zcmeEvc|4bC+rFl!X*8oo(jv>G5K74!p(Z6tn_ZTXvSus0&`g^`DzsS3mVHUzk}M@l zDGXV%L{VgkB>VO|&hN}T?<>#y-|x@I=kvZ#Bku2gU-xyL=W!m#ah_e+zgLBA0oMW+ z78W+uUCIYpSU7xGSbog<;d}bdtAiEzpUsXtbR9M9%p6@zjvr-FH*vH-X6Ja!^6)Rt zM~|Pdw6m2ElM<6y_lt$2qx}iF4I6C!{R3il$IUmCi9W2tMOf{3>78I0Hz!`{ zjwQ<+78X_IZJMrOJxwmI*FKCYO-Zn_ngp_3{B8mNceYFH#g{0jM9CgX&g>1~vof@K zlWCR0(6>p_N*Ir3q}uW5t`Lb@yQ%4iKY9|+gm3(H@t5(Zkv|S>IkH7H;%&DR_f%y^ zPVa!+JFnjEy|S0`BuZbrcrh0@^3B&N_v?Bs|Ka;ApE>6^eDkyPpO(LF<(~cRtsy`E zJb(7LFAM&w5KsB-R}AZaFFj*T{<|PEmV<@mzYFr;1^Igg{u@KS*$V$%kpC{o-z)Hc zrd_U@VV1`M`}&%5O((|&D^m?0jq%R=X4zO+;?Ml?#~;t0KfmxiP^fNrZK%Q=hppkF zVq%x>-n~1(QThw`Aq1 zj8r~L?)wigfMw+3DcM=S#d1n=)h`QYfBP5pAADxt63Z@*?^-6;W zSX9NR-m__xzTaS#^szSOP*2W4d1bE0@Uv&n;_K>XuV4I)Q?g!5u8A3PY24ZCpP+H4 z_-p4Yx_*XZXO-3PidwR0%+3Enl+~MX&O9f}|m|NMKN|ORN&SNXga%gKE&Y$*t87h-j zJ4Bo4=|cH+V%Z;~Eb3AX)jT#Fy1&lIYxIL_XYBRXaF^x_*g8Q&ef62<6JM_9`#m{5 z|IWR>xRbdpuQN=$9gSzb!dvb~=GTP39OGrNdNnaRU@gTh#(x-lx&Ng^Md9u9yj%fF zQ?4=mCmhDUcJ})kPwl3!t z{NQtXi5|mB%fNH6#-V(3M`4JRwNU9mmXZl|7}-o?1xy)`<<{?V7@MB>y#wS~rK zzlZ(ea~v)oPRkp!3iBH4%&>ivJ;ocWwqU`6)_bq^`d;7=svTZC>%&=;^#9sipBf(o zym4+^pY7QBrOeyS&8;%WZNQ>A$4yyz$6$D>$$os^ckonSwk0mb23`I(FBlXq(T*?{ClltFS-%N@OJ8cwfGE52Mbg@{vp5 z$JZHcf@zko;?(b1z7mvk>E)i!>-wADa&i9lve0ENEuJo0CcnN?P2-WW()@`_a_uGA zRv*ntc7FaS^NI+Ek$2}yw$0~R5-#VYmL;}f!-g+kzC_`K^#?{ytPy+EbJw*i;ZAFX z_tc4X#(7)bIqlf7!{N!iWreTyDe-LHyjeJ$XVzMtdjGG&486v~zUy0;<&W&(c76MM zMhUNnvYDBgzX#UizQZeZnWA))r-uuD*bnIH-mw1^Ira69?EA~xt+?-93k>AIg86go z+am7VV0p)CSaxcpt?ks@g+4g!N_gwn>_6R!^x_(R<6<1<(fhE5XT|JWu!?unUmBCK zeUXBs*u(c%KfZb7qE_%O&$=&n_!1URDMZ>P#J;Q}(wo<`*e_v-_u<2b4qu)NmD~_F zZH)`HO>~xRa$(rDDE8)0PrbHz@aAgjqv@&f==k{f;{UYe+`enQ^}Tcb(d)9qO(z0V z-TI4`B{j)NNJw1T+@xBd5Ds6a%+3c5n zYwTZz+t|Y0yAtriBhwWvt71fkx3v1KZY^GEx5sT`lgo#WjuJ$U>yBl+H{@zskI#i!sVpY)YQ6;ESA!8`+54|qzxKIJ@Rx-LZ>fh$ zt9B!trbMeI+CF&Wyj$Nm%ie@X{kvJeaO#xouHO-KV+OHl>nhi1)!nygj2VoIijoVN zeR&p~@b7kCTc@gD@!~}iE}p8FX{Op88yo8+$vW%WEZ?>MGS93mBsl#Vj?N+5hevt# zYDgt%r>N=MHQclOKX1pOtymy=u=CZOg+dA02$Vu8b{R!W|Bp|&ba+WJFF z0|nL9bo&|@)<_eH3x#Gc*oXti&+}^}?V59B`wKZt`&)}RYq8(jT=K^aj~qEt-G3x_ zxR7I?vZbYEfqTB!WS2eH?0Xt)1`jv!y1l$tr}-Z@G9H2W&-zz2&(L`8j(Q&kQeIIvDP{g=3Xk-ULI`z|2^(AK(Y z;6eW6`fD4EBhs6bbkbfQy>8fL&p-RZ6z!amwu_CYzFZF)w600i zGB{Bt%i6ZJ7`(EiGM~T8}(y`uNlwCt1$f*I>sP+KC**@$?*<@2c(Rlo6e~J;Kx; zwq6uH+}re!wHw>qUoz||+^`uXh{24R|p2X=gbCJFtDGj3|@@edz99MINYx>EX>52E!L zF9i=T%}tv&@nZ)7EAp>i9i3N_a0P$){j|BoS_&jVK|zIw>~cBnatF1W97=e^PmX*Z ziJTsdq}=8iFf_NV*xy%asy_fP8UdUBTJEMf61=$Q*jKswA7|WxS28{|HkI@0GQNCgq*;Y!gG06OMzp=Q_dwM+R(`M(M*Jl_& zDJn?GUwi5$j?N6jj{^+x0hFSE)RwWc(<>?VU#a5k?0m%BTt!>^8WVrUoBq9a0s;aD z4jx>L-JXHS;N$bGtjr&P>wAuDV2kNVBm07Y#hZ2t<@?BN}KoEssLy5gCR3>1jKhC8Ees=3TwcWd;fl6^y=wMc6J9WQC zTIClNP0CZ*t>(P{4g1gYwKO#~msBW@ADbAh@V=y#q%B}j7k?mnWFT^yi3E7|4tpAV zjUJtt=uiqc`{Ob}gI6)CfmwZd<54bs%}&zSxAV@puY=La$?MuHBKf-O(gG0UcibnlTM`Jme!b+&hZoVeZvArn`0>M`HuqJ#FU!_%a}vtTn}6zDoZCRVL+b^hVkB!m zY@gj(srT+lisgQ-B8c5(pKZ4uFKMsRdD6Wic2>b8ML%r{+>=1_ErhN^a}Z&^z8*wX}LrCCnx$# zoTOjjO|^!dD8H~sX#)4+=jWHzU%ayI?de6SnGflT|Nc5vt@p^;CRsOwAHiOTn+h%I zxj-XSBkE=x-jQth=zd5|nYIbj8 zQtHLk0bB0wHJcfS4yNH{ammVUi1aSTlF}z^R zIxSbbTO#P%wc>fp41VSi+_NjY7a`k*wouxj+rMv~{&1?{fWAH#!uTb;8mf)@h^9WZ zgkBa-ly0uO6E0LcEL*)AS)2d#_iSpqhwdNKdl{kdsCKA1-DLYjyG;HCans^8hQlw6 zr#9`C|8!43bS9XI_dI*{EIT`4arX$q1G^YY*Xt`6KFFUQXj=oQaRh6xrXRWagjq_@ zAtNK+>&9MgIiCxDRILklC|)5Bq^W;%=BtjEM+&F}WVWb|Kj0ZaD;A?0z0*GmNIJuP zuxpI>tUL3{sHu198{<`^^mu$?)D;8yhC7yB5QV!A&7TjnBxbW6_kM5855zdy{m(`5r(0flDLdWV9x}t?YrhZg-z(#K%r~{RnuFdH6JHTPDjmx1 zPJPshc)d5f9b5QPs9oOXO%e+2Gp?Ba@HO70@|o%BU-127#d%Y?)7{?l-q^{XUdh{==?486||;)w0dmf)A8N9-w$Qti`v|2{y!A_5vyN~W{zc+;tPgDmK z1lJgzcz;Dj`_Xh#S{@heEDFQ#LU6J-_oDo_s7=;m49~vEttnO?)69Fv>hHSZ{05#& zj4i<4Um+BE@;KqTcOg8^uU8GdacQv{jFfda9OlyOhQxQ(UVXUtjp_SmS1Q4gSbds( z-K}Ft-)5F&AyWN~EmP9x7V>ay$oc&1vDfLVK6p?~?=>|LS%T#cOnroNwpt>yD*$`t zRV$#Fe`{;&8SDEkUgJO#^P`-)Yqfg+L%l!7j!*cMS)u6r-SVmp4XwGAl$F9hc;Lm)9A2*f832N83XwJ$Hr>o1`ifu9-5WC4S`D<*biU zzE++yw;yva0@1GiTC~i#5DEG);!ewW4z?hZ9Khls)=OW!c=7P32F3v--46=RuT9Qp zbwqeXp}eUHalk!=8ffg_|1%hanl58br;OQUBa1QO!E`>}K+u`nY|A(Pg34M6G}!us z8~+2gRT4zt-=Tm1*b0=wUSakHtbgT)83zZ#tEQRfA{NGNdA*5h!Hpz5)wk$&jma|$@R5as7(}V(~N;~`I2(^K5l>!hV<~$ zMM?(Nx&L7Y8GqM0a)6Q=es?D*wQi3kv+r|i zs~#e$`s7~q+0lWHlFO^MizF+6ugZ8K_NcmF85quHwm2M`fs9fDJVE=J)(>>jc-*z{ zaGuHW;Q*7T=ZbJ0!kxTAM>UEs@f>>BL4`DNqE1_ovi2XEr7Q>E#jibk`7*gr{I%cc zrJCHUSFbLYu{H2pDI+H5)U||m+)q4m_wq{ke^;L40AO$tI7u$zT-g(W>jsM}whZh! z%K*Awi-4Ra7x4qjsUN;w+^qsf&CH4uwUSx^UHlo=b%F+Uz_zm33#;Yjk? z6fp2^nS|Dtfq}NO%Zz-$v&W3{A!1O?wOqm7@nxivQfCf5D{-{IKT9RdUhc}ikN8me zz^>(nq~$?yed{|Ldfz-MdHnlaQz9kDy3>RVAC*Zr7GiM@Klx)GMG4A+1q>WvUm{?s zmItV~h-W`zeTZSjNz@kp!xdzxwC^~*J=w93(Sbv^oY$jlCcf6S{ib;S$S;{um9)l0{oV2lq>hH;hd{dyf<*Bk7wEyqGQ%Fa@{n&k`R*Y_R66)<{^s8hbYj5ZtBf#}+)hkDAF9Jf|q< zP85d^)Xrb|^spl$c;We#k{0`8pS~kPRPFrJNd(FiN7*LC@g>A{Y05SU-wC4K{&gTwxKJlJcdDv$0qoqN#F>|o{kqjan zCyA_FW?;XKu>X}SR}KUoNVttKPc-yds*LOUCFyK~7MdA4s&{SwX*I{f;b6yXKF{$X z_3U#Y-Bq!>QI!kOyQiY44FJyJ5` z4bs?K|Gt6X4N%Cz`5ITAA$EyQX`t@U_lO*R#Q6vN%Be&RshFjZCcQKEP*sRTxn0)*E71s0C zy2Ga%oSw4DD1*npn(Btt@3P>M9-12OQ`xp{+y7ct!&xCy^Lab(PC@Qo(az&a@t%md z7qIsgIzt{byNklx(`~_@XWjEUgVPJt#xO-_o_(lks^`s$VX;z{RYx)x{`HC;@>PeU zgxkcd5X8;P!?IVL>r-9wvkX`vIjVf(MG48w{JQmy8Sz9#tuz0A)3{8Ev`vHZ1KT%O zoBL3b7old>5gkuM{TjFD=HkBmX|I>z@-kDP{EEA3l1$%ZJ7K4CP*r$)`}wSiFE642 zOlq3VN56dNM|Ld4c{CmBsn4)~SD=gPji3s4KcKA)p$8s$XFr6pVqT9P_VG^-j7c{; z>)lWqDr08^y(FnX-|%0BDLCrQu5JD|fI~?{q52(V9Vs+_(=Q*fFMIC35EK9H@u}B9 z)T=>c)pn>!_sRF=a1i-Ivfm%`e#~?64wbZAy#COAzlA(<%GbpVqX@_WmF+mckf-Zp zEyOJfTy1RuN|!+3mLPTcY!oQoc&`^G)NcxSeItp15QgyF3^f< zmpze+3WED0xFbD>^oSBAWQXW@`;Y}3{-c7Xskc=)wmtXAkcvA+J>UT z7azA{+qNe%`5uzUD)*a?zp%?2rcAvzT(0It7?{xUcJnLy6i3&P)85&qpRtS_+PnejnIOztJc`(Pvt)sxX{jSt1ha4 z=JTE0Xyo=GSQp$-a9SVA3vQ2|3~^Ln&&tbF&$8xAK_+&EV8ui;pd72?bpXnHkk=gk z+%~@mp)U%jUbW8n_;UgJylRVz(p_J3>SB*{bg0X`S%Uceby!hUMC2m=B*_bEpO~Pp zidg$gB*uX>F;|8}VkJ4?vL2AykzwnfR1>SAM<+t86k4UI?=}z;Wv?kVA zDx>8-Lq+#LO(tvHaZEa(zfhTL!B& z`a$%{gVfQCHlpJ}5aZlWD;94*_Y(mjt_q#V4jaRSF1=;!{iUL5h1i`d0Y4I|jOcZc z+6hFak)oGmRe$%WLxsXHQ=t^qNlpIh#C)jfi4IcuRE&(8hpqZ@2KI(QUF0;9!J3Y| z>GRTwzNxXE$7?#xr{`9-hKrK=Oah99tr=S?U+>Hn&+GPCX)!M*Z%y5^eIsY7P(OS( zF%R)F>)pAPtxD6Q`%2xl-(#yWIYxvy<4?RGDZ&2P6|S_)VaF197=TBrj8+L)viIg| zCQvx}=dEpwyDjI~R<`~=pVtRhsi;}7F9c!|=^g!W02C4pPC|*5NvR!3;ti>fdb4S5 z{l_RHqGDR=J>J;TdUdzhh0_b=*6!f&BDtjhj?$#?6nJ4lPZ!N16l{l)CB@ypezX9U z*n}WL)oRe!De)CbUMCH(;UHWZYmPQ>ifR`s3^zf8?N=tok#?T`jg1dc$`_&b;#{qA z$qy)2zdjo8n{x$l3iD#fS6svT*^ZW5AcOXU2`JJ&WW$+1QnY~*zv`19wvKQH!acDf zL_Vl`=cQaMC@6599IR^_tTPTkBoZUNR<<)HFtJ9|u%fZ+U!8GPM6qOzbtNS&**I=U z3P#Rg?IDMk;w6Q|@Fx9!G(cfUccMSQ+Yf@Fi1DMtMHffFS}dh+(^EO1j`q}OBxK%q zOzY|L+hQ@&e|%HEy-rR6Ut*H7>KuTMqD5nB5rRug3af*PlQ|SK!Yzh zgB1I$KHw>BZtscye#q()J6=uxi8RGao)G`JNqN>tDHNbKl`Bn8Q2AOqw)f^HfrXpj zUnZXG;8N{-8GwDdcaNy1d~FW%=ia=DZpP~DQ_nhb7Rcm}svE%Q;l#D+>u5)0$>(=Z z+fWmxGVHU?eNDDW1{F-~`*QKa%233mhv;X3TnyV3**e3eJcfFJhF7;@d#UNkHWees zw?U*#u4R-rUR00RviXu+SG?7Ed=^Lw_3JgX-)j@;ngoQ68rDlte>=ZQU$0QEA z)AAKl^TxV@&Mvh*20ErcGjehroFZ0D)?eN~cYyn#2#7>u6q{#C*}1>e;b8;_09?-LSkW{9fjHA3?A4Th9U3@PwK7VZ{?dFQK3K~{E5|R$E?y} z^*^OUrx(c`om^Heaer*5jTJpbmo%_{GEbO|Bf%m7RmtYgVJ}mZz?IO{35fQKNlm2LMB?N6YaS>-#~1 z)!G->8G26+Qee___r%{FW7KG8pb#c)RMhhgP*NKmc=u-8($*qhQ{-Gid1}V5h;Ikf zAbEv(T@X|J5*k}}CHnI!$PrZ~U`RhhP|UICwYdI0ivyj_rX`pBMbmQb#;q1)9RE;C zq!S6=jQc9v-D{tkH#EJCHQ?$Vvw3mmkB;jiUdZQl^ z5O3Npil+MDG^Xdukr@iP zl64%~kUWZx94Z;34n!xaI0AXD5B*64Co=pnf@;Qavxf@srPpWAzfN-O?(6t%iNnB+ z{QxP&XIMF;S9E1%kba4u%Ny21J-39DQ!6Vk(d3jd)as-!Qcjkr=p6>iWFV!CF0#T0 zo|wQE^e4b7=iD=r+knm6hN$gF>(q>N8RJ1-Dkf!!BYnB5KA%{~BGBO8pWVR7nUcR~ za=1l_yg(LY$wDDSB}CChPO=%g;=|eyarZUk-&U}08APF}Be(V#wpzgUg`3yFbHRg3 zZ2ZY^F3A6->vVtSJNbDV!Fh3$f-^T1Je>X8$Mv^+n7=tup}^_Yk(UJ92ovzZp|cVz zYDl#VXX+tPMkmKQ?~s|0h;q}3k^Tew_wWC3Gf(tQd81=GB}mF%?<$OtoP6<`cfcbO zg@e}eV=tjXnU<3hMy~J9l3nm>uLGF)k^?F{ob(a?X1Peyi@$I4s)LBLj9d`_WAaoS z^wj`7lXvAJ$O=W!gyW}M8j1VOLRDuaDOAny`*P7se@^_4)AJWy@+rJ9IOAw+Eh6zA z_k2OjFk4eJU4U~6p{N8Wft{UA9@yVt>`j2MMqDAdu5ic6fHG{H72cySj2G9Mzr1nf zZZ5e%sr;;x0$HKP#q#Cfupjn1Go_JK8Jh;l@+|`|DTGjg8ye;SxJdl_f~TipP2eF< z)JX^_tZ8;Qgp@rf;i-LE)~~gz6cy<*Y&<)tWZGGRD19SfKO-Jlq$YEd5lo8X{B1NB(crZV3ubeR4Cz>WHpx|M38_BJ3hWmoBq6lY076?+Nb_KRCVdI&0Yj{)q z%_BXfQRyX0j@J>VyK`Ec|@ewn5BV35ohR@I+$z1vflpj zW1?gqxndvBU9@MUmx8DP!gT1#-o<1!AWJH8IX5w@$lAqHbp{+IeBLL6#|-|i5;S!^ z;H^1~H|0s(mA#<(#Yx94mLIdZ#gg5WSj-KUa>TPHZz*8rK7WR92?44S?Pc5IOUp0`?xej)B9D z9MqE#bU?~yAWJPs+uRi&&Gh7cn4dPc^Ygg(4yJ`xeZ!1;mxW~{?;J5nsU~k0A&VE- z5TSzFH^F(@Kfjw?2NPgvF5$#KsCps;^@8)<(;dnb5LHh*ld?y`pXtcwcfrjYT9fR9 zQKu;NA|8-pc-ERu6sc9Q9@;yj9{Yog(%H;yt0BFxR1U7LHdM!Hnz9CkI_9N>Q3x=f zHPJ8*MW`Xy*I%NOLyeY>09$ecIHb0OI}uEeLDifch*Z8Ryl!jggv503yu`gi!n{1W z>Luxf1uiPINcibQXWawJ$`E+T5-XNJ@x>J)dMnu>hdwpxJZcGeUfeQT9xh)ZQL#m% z(>}|`2WUtr0@mHvPIZRxM*TNKzxN#*J&96Dqh1giB zIbTZuHkDA}k1%WuwC&@=tKeQpJ_f{RU+n)aWat(dwy@|+22UserkaRbVyYPh@A&f; z$Tp$OYW?F!F(3MC+w9Ibk$IwWcB1$D6~^l;yp|%DS%5X>=ckP7it}=H$_=`2@YmrW z^-)xk*Cn}oy9e&yeP_?kPDyooJb&638Fs=WQfR^X(~bh)y6NMumMIXi@u2FR*Zy%L z#)(n5nu;jrnaDG7H-lCOq~K!m=&I)9={NAAdN?x(m472GJCV*qyd++r7M3yD`UctW zFaT^#@3bKu3)IV+x+}N1+N%xiim_|Sb3lyL5x)GThLo2Z~ zm+Jcx@B%^U%^>yY5fcZ)OUuhIkyUY_;>g=G*6{R^Mv-u^gc`k*ZOcM7;hzx3DQh1ft z8}cWP7i3DbCPiNF1vgC&;G`OF(|=sLC_%MiySZBBMeE(1k)h&xJbC+TJmsLIv_WY| z{VG-7s4fpufk|w`79nEXi0GF!HP+2hyBoI-$uhOpfHnl&9spq1I+m?E&wcf3E*P98;13TSL&g*C1W`fN zH@INY~_+L=o}^Vxg0%m{Qx-Y|;%MS#yFkE#H?4K~Q%?TwKS6S4+JhCr zm|6|0FWnyo5z%|1jW6)~3R^WG=_bcl>f~gz8a_1!gUt*Cz!HBi5)nPrX8BF~EW6mJ zledNlW7_78w!2P^b_AwcgN6EO#ilg^9#b%v`|!9li3{ZN^Ebo5NgWg-$-s|AY%)#z zqkp^NKS@7=!=t0S605P!C?~7f!;eS)enACn^Sr1Kk{za%%MrvaPzCspQ%PP4uIS7l zvf>sK*@HU>@|}67#LiC90fmpER!9av(VLVnRO>Vanrwj}vAmETMap)3A|Gks_qiMyx1w&NIx9%Mo9?K^ii>gG#pDrDR2#srV<0nYE7sJ zLd!zoHydzA#9A+~#zGU3#MLJbyXOKWK7<#4523ptW=fEL!>$_nb4gt^o*t`X?M9K8 zc<}N|k`vD`z?!RnudY>uBeNLRQdM0(@SaPSEz?P)di`wVo$WUkvjajf-ML36i3N#y zVidZdHJMVN%q9tqWjwEF{oeQeLCFATI!V&l#xlMy*-@#Y$}?|F5?V>nndBWOIq$s& z#lOJ%#q|vVP<%xOcMpwk1v({XUc$lrBbSt>MhGO8j%_o9Yhvb3)NFt&CB6PEu|e8$ zO^d85U_NU_oq6d*`DUSoaQxARlXg>&RP=16cl~^dxJm%vxELK}WmQ$xoUc{8S-XiN zDIFS1CDD^8UK5*wGptfw3UF@UK)HUF(eRUxJSdlgi@zITOihlLEIoAWO1dKau4;qA z(6W%c*PD34oLrgbIWAIh{Ute>&oIE6-Pq!GJa&u>;sq5PZ;ZQYN%{wbRM*P~W4avT zW`X&25MkMDi3&Uc@6O3D2RnX*Haz-}KTs;;}@X5P$ zt$qHz;Sxb$ZI^x+my@#vt`Ae(hSov{OC0AWzzWhc->~O*_^iIPOEof09fTL;5|Lz- zx$7-`RMkm+AmHqZLd1K2V|8Yx@P%Jv$RJHco4>nIVAr2PFJHc-nA8ReH9mc6sL@U* zv4Uzs98uP80F@(Bal37wU0y{3%9)O#!V4Uxh<}YoBB7LN%8pY{36(Bbty)*@#6BRt zeQp)t1&Vz)A#(*lLT9zLfafN2)z1MHioN7+lgG%L>2}%@M0(c5ey zyQ5`#YB*^4Y(r4CjKa)?{TyBMGbyW{qCCAN5UB$1l`krTEEYIj20aguS^tB57HLbw z=~MPCZGVI4e6UuuHRbY>5a^RtxXUN65pKAfG%$^?y-6`Ipi zldFYgF~)6dJc3bY7%h{%E~kHNF(k2)&$UI)_K&Gc*QjvO~7JI3;!8#iz2iQe`C8S7b)zI~U@ETNdo^6wod zHf)Mlty;I=1?9)GeF}q`YTL)<96$by0KZo+-^T|qZmmg*^mi3`qDo3iCN|jAHC+#} zC^w{S^qHqJDdlqvUXt?8onS)XK|uXIBEKZp}Vdw9EXznvIG9q$JxGxm)D)YW^Cgp>#^vLV;u#M{N9unK-blFH<^1X{T zD)7a%*!Ha`f-b=YnBCBqA9B1|J6R=wSGcbp;pYqk2SO`*-`zj+@J^*ehZ?t4iwWGnzcZT;O5p}NB>aBwO%*IL_jWdH_e2(2EA0S7qmBf1iJ&R^A zI)|YehxSlE3wgCUh(d3JfZ;*yXHFOLAQ~giFo&13OPT zO|7}wGaTd36s=OEE5blT)#=;Cv;7Cz+{EhNu7VpISJ92dbgSkfK~c2ihSSXf%nN!4 zv{QPh`VJZr1&548a%PZhDzW2QJC1(mFR}@t8u#a{ZmzP#2d2q(D4WA1whCtS@9aZ!f?Rv}73 z1yaEO;%FTsmXd%M=%GW($XFDE{ETwlcfbZ}{h{Nh4x1vYw&Y#T(*#%F2-V-r1naA1 z8JD=rDNC?gdKQ`pmioGJ(>wc47bSAn#B?X*ks1zBiP~C`RhAL|`sqQ7BC`n+%M}HC z2}5x^84(-oum)NS_!D-6Ed~t-hYiVNf0ZceDr$*GUtD9<%GY}u-#zkNFm2!cIO#v^ zL>fI1$Ai00ki4m99?l7cXr^lL_*kIC0Kdpf^k4}{MrvdX?%h#jH6S0E z>~b`3AebEFL0?Bx&y6M9h*;dEkAPkufTiu1u_M-2Cwj5K zut}&d57Vs7ReMjqSZMAgSYxA2|yLiiWtZgoQ#WsbwF+rO@g_{leC%4+Ss!`X=Snks$;ig*eA7;JIkff+X9@ zB2p7quGRZMyu;?{m7Z}8W2Ng`u^9stJww8Y{P~W4)HfPItPi7l<^ouBf65>pYNL7HT z%pXM(b#`!4u=V$tdL1+1lDFxgXT(m)CiC?N(wpk6#wU`K%R0lI(1B9|4U--EoSpx$ z5qb}skc^mwDG>s|J1GwP@F@G?h)%BsR@85lsrkKVf8%U3qch@+?frky%-pNO>({6B zu^%KjLqh!mYcQIrZID=Cla2Fw40>zOdX0_|SP}e_G?PJP#@DpoD3hZAgeaA&x zSsB*jR`9?9NV>l)l>2y#ioOH+Xn-mN?pRKK!F<@Iq^3sybPJuj>S->`IibrzePN8| z{VNahvCeGGVmWpn(c_*TQ^h3Q`Z@>}_LxB!Hg-hs9OOv2eEqsk>>zTw5w(Kqk_S#? zr&=O9TaDahY@1vI>pI@QFT@c}YLe#!QsbzV9-Q301{w<8ob1P2r&~dy_@Q5;5D1Do zPcA?$)=cWa325y~N@IF-NS00ERg&(U2v6IEaA}C6el=8g?C>cZpnf%}F?omPxCci%Ol%@HKQCXoOJ!E> zS6d0T&}GZ# zU7dB;>U)XkC<0k_J-r$F6IWlxhiiw5i4qeEv;UPWe}p8-2;|gNO$cEfI{4Hkd6DZa zke9-oYW1WdY))J+Zy6n{LUp2E^W%>{zT*GI_Jq5+)*4W#aE)il_GY#5^B*R6pWB!B zH^4N)*C_=7GIyvU=w(Nax)K;QolaP0jNuIIae(9)U6n z&B??6XRW9R7S+-ANL8=b>{IugP87kLMqS5F%qz!U*;V}fiqjJVRO_@79_Vi?(GjU` zM`ISL21P*GkyWG8GXK2jM$nRh^r7s4c{afvY7Cr1Euu4t(?7|v`~iH2uE>r|xgQL? zR#(M16p9jfTd4z6IK0d)l$UY}vj;M#b||F`+no9IelL@CzkC%qaQA(tozjbP|A|P0 zrT$;!itaA+UM4UvlSAg&%Zs(AH~2K|%{@GNLuqIKgynN*-)ob51L71G`31N2U1CTW zsmmQ2Fj7(RReT!#IlLs=qT!WNy!nGSVh!NW)F)VHP_wVOrS4IdYf(GTbn3?BzI_A- zaWqmH2}=vQ87nM;)Sxi>)@O=NO|vb+c#XYgRMl}i{<)1&C11ZecpZ29BWsws^gc98 zF#vOJ=Ef2KC`;Th%7TkzyZ%@i-G)9^KVtUYNm0WgIY1oH`C2=6lC2)t+A76GJ%3;@ z_O>F^;EE!XT+brO``|GH!g^p;aa|3=7nR^UN=SxUhb?Mdabn6_RgE=x5oJwogxLtRbKl&lmGg~qG8~%?od-r#@HM4 z!1C(5(|VOr6`ytcOT#m|20ap|&d+E``z^EP@11JcTVisa=h{8KW8|ar1uBm(Xh4$g zat-PF>$pN{RCETdA$Ri~n8k$PJlM=KV7xmsi1tw1Kn3CxH*Ss3`|gN7Gy!lRjQcJ- zprEvpjhQ^y%&s0Z>qf=I#1t@y^g|g(MQnKa>&w-(E{gnzp&hbX6X(wGv`$>5WKBT* zFV8iqpJh+aFG?FanQ(7&PUgW?FrE&zcu&)44wBW#*rAN3=d;#IU>`}MbA%7My!}LT ztv}%(W?}KS#Zi4UkKs18-G(`SWmj*veO8ZiV@az@c_NtXz)#~UPx8x&jUP}5NOF`l z=$`8(%xnn!cLjKg&*HE0=IY+z)%7jQG}q{zYCziRvcKB38zxa|AP>ELzaWytWSSz9 zel0jyE8$cYc`K4Vp$Mv0^o8tN(z@iz$+&EL^cxk!brYFxq_5xbTi*2GQF4320Th#C z2k=P7qKay7=Bx!)HO6Naw8U0LnWNt2&m$g6NW z}^z^zN?t;CY+e>$~86=1x@|o(!y=dE{~=tN#X;r~rfS zZ8%1IoRum^nt~%27S}}*Bb3#obZPR$hMG=IjEaNH3K~{l;tnlEfrlMsOJkJMe%8e z(aNPd8j(2|5i3Zy~O02n9iNQ&<|K| zfi?CyF>G=X@m5T6aqjOSGIkM?pW~%tRS`>ctlY5r2LO4b<8pw%9?-~P*ZEjv7Su_eIY*Y3IvCX;X*T|~E? znoLZB)t!}=Fu0ctC1)6#=LTXz!S)x0Z9Z|Z)(%g@8rkVn>fO=3d%IUHR+e|}_do5@ zMaxKsDJl)Ehz&JdWAaJTv0Yn|)^{ozZ+C);Ody%&wDdj{aw}KWqK*j)L_ve6v-v4{ zN|kn3zb$xoF>XRsTYdRW$Myd{xbz3MmUixcu*q*4du899Im005EQ!e2*I#IR`pY!O z|Fy3p1P_xD3_@v43=yNG_px^K$T|9;D2%R}gjeud`hBm-F+%7dKsOP9fKfv*d72c_ zfzNr$Ae91{s8((G`HL63bsNK;I-JkcgWuEJ_}c17KOCo~OM{`{Wi1l)kN+kaROeX&H~N%hVh+(x@yf2g&O z%Rsj2%Q8D3A3k$_ssFtOj{|m~fRbtaC#lW}u7o4~HB23gYttYykXNDGKC09WHVM(6 z!Yo01!`+n5D|m6F+jyD_)m(~0B$4w-s5jFUsc=EVv17iX=U5>jk@P8$^u>7HiIS!} zNcog$b~UY%qDQsP*_XIx+{gudZm`Hy?+CDchq$tLdFHh%S(=AtJ&z!n4+X6WiUZGR-T>HvOXnZaqqZ z4mzQ(GAFGtXEjgmpOuGJ#Wrj;I3Z&r|@q>a6VyeGnpZD2FBs0i(Xg}UM#^yjjhZ0%GM1)eph1vij-5t_ z-gd;A+h}bm0ka*EK7}f&1o7aINqlb{7}sKaYRG4EF^eAn4<#j;LjwM1x5cZ##?bJ; z%7H}kpo4cbC>SG030VorrYhXxfehk6Hl8{2xql`Xt%#F4UsA`xB3m z?*VtO-MwNG^c;6e=Df%%921Qb&i2%mX0Vs^YSL*7VKXDb#UBhC#2s0Ss2t za?y-mhN_Kg@jLtfc~AILbtaDDniQFTgY7vrPUJCR9_c)~E6k3-?3QvL1h`s@H0;x2 zsQY|mUg3s2in_}L*>*I(20FRE@o}8ucdalE_Cv>p+$MBx5g%{l$uFw0sVy)7Z-JET zfC9V(#!imZZq&J9IM6jA;t;mTQ;q6-Z&o|Cu~8ciFo;%X9?s<@617o=dkYx4pg&J= z(o5hjOjKELLQzu`(<(}3IyLJBkDs(bTM0Sc1lgXiz_F`>Nf>&W0NZag@rA~cQKw)` zU+%dhZG97v`Ik~Zl^?G{c9lXiFgjHSSDSrF-h$&NQNi5P0+dZ1*>F%9tzXR4u%YVv zQOl7cp2+GGweA%IaPpF~ie6qnY-TzS;gB@r!eR18N{;)%&FNt7#DCjZ>m-joW@3amf*2StM#FESf{QW@VuD?H zC9@HLoL6LcqW0OAi3en`1EgF+Fmhr@8ZwIZTZ$y4mm;yH}8BbqmQ42Xt?V34Jz4cdfd z4}wVskb`$$HVAF%$O8KyM#9muz%f zIXqi+p2;L5h6Mn%dD|HRrhpb%OSU!Un<1goYre8%vP*3-a*SzLbpp{RS)FmABmhk` z->ik)jY@2EQ7ZgghSZTp-Dw16$beY-f}=xA67CSM!6aiXw>(EByM)(B04w>tMoe0pY z-5i8g8v4sOuu8J-FTGSCyjO*a+R59S09}~lDiK@7w6bZ`Lk40wDP#n47)$R-xb!wr zK0V7ATj1mCL)nj_ylNeFc2L_Q)6pQ?L@18}i@M50F)T&Ar1i(vrJ*+xjZ8}hIx5l* z{-#CzE_LvOb7;_lI`8X9(@DA4G<=x&)EGrynyRyw- zoMiS~;^u-2-eI>*s9Wf7uf?e(monU6da)58)#;M-0CfInsKR>gUhB@O^2Ah>v3AWX zs3tWcH8!vtP2+0AXEo9L zdbynIH(7%=8+U=3!%!Z#$KEEYd3XLe_4^2f&k?bHvHpP{O|_v0JtDI+Ju&a+;d80K zz$)BICBVr+T~|cbgG<3=xBNnoUtxD6(4oMDVQ_8Uyc@x8e0IJe+^r5ki#tKuXf%vl z6SK7iDg-q$=yyh{Q91MvZO%1_7`K~_UnTk(mZ)EwqR$^e%H{z|5$<*)<2e}R19!~# z*;sz8(8n;|HfowjkdDi75s4j;PfEo~7Llo#PO@jf!P}bQ57u16>?blAw^G6dwyBNR zfo|2d$a`eF0Xj=s!UpId4WDydjT+GKs2NqiO}3M(`yTrS4$a7dzYSDg{<|xh8vCj? z-x7hW-UgY;kL>T1pl{w}aHL&E4nhZ@J>lWt1)I4c>YR;S1nsmYT1yY_gofvkJtnSr zl`u(zwAe6(M8;ZV^Kk)g5Ui!bDrm3=?v^IZh@>M_v-U8=9chk9h$K@LL#1Gg|~{0+20S%w$Z$Ox8u6 z6UjEE5bIaK=4ekHKa4M1ddck%cT#QEzU;hA{=BjJUK$6AjT)#hS~MMQWjbY!Hi*1!$(iLB4@B>zp`m#?hiIfjH<(Ve``wndF;Y6f za%`vmqP$;@%YSOVRq(EX$HgQ0(8l#4j;ekCvYCN26LB`3F-HnkUvj$b36T_5M2!;j zM1-802$0N~y`cG3>uk}YLN`XIY7gXLrp}qk4eLh+Oahdhp=AgeG$QRRmvgiry#hmB zNEl~*`aP6~#njM6^TFW9<=eb@Gpl(yY5;P#QG3civvl4ykZT1_TN2i&yjr_%-JUU* z%6#wXk7yjCUMC{t))+q8+!TbO(W7-3a;sKvCYb)BeaJG)rH|?2f$LmT$z(tn zhn<~(HvueageJ`?SvMnUXD;Zv71T_}%)U1b49V9CRE^vl#?H*`7Eg4ll|Z zIm~P`MPG?Bk_y8eNJMxHg+3{4@Bm80o``!T-)#-q<>GF!hieG-(Lfn01DSFSM4#Y@ z;ERsEO`8`gO?;-Obp)c(SK#{Mw)?nE!@Ua_u(FPDFa%f`<}7DGRJsJRH~OwsB`vGg zP=*edce(V>RHoMe+332uQxNeyFOz({DM|}84^fA7>pi|x`kx1t_L5^ULP*u_L#)7o z&uPX|@%CAaOrwBcj$YrCa@Td1GY5MmnqwJgZ~*nvqrs_mbczH8>@vBQ>wJq^#JdWh zCmhD<>8C!K9Jgsck71scqn^D1)1~LuoMGet;g27i*8b3YHcIyAbqXm-sbiU!+}*3Q zQyQ!eB-qrv(l_TxsS(Mt*R0N-w`0kUbJrA(&ijG2K3}PP*!AA~fmLlBS04|Ix{j#y zb$c4uIdwP)1&xg4ge^mN)&&}q1mY@>btMQXU-UJRL5W6R0MB!fa7x}Q`9d>*dGegw zI4Z!JL*C9aCV~m}ps!0nc3UN*uD7QCSuW!DTF%_12_rPDX)U-1=%&=S90mH1h9kpU zq|kbK`@#!gELHUN^`jL^;%JDU;NGz1{|po;IQye14F>?U)6&wSF%;CfavW zQGh14B|>|BZRlW9ny zVQFwb>_7%ERaDEYUYu8Vm8J*NG%ku_BT!!tzbiOP)9dJ_K*88eanlV!C=sJ2CbJJU zVbjW#Wm{`Z-qc!2Lk6fb1m~TMs^ah^V(ws(j8X+K8BG|Xa7DZO5|6yszyJ+M@FfPQ z(%IL-OUaI7a{82ykFQwvT1pu~<))?`VJG&Yu926jC8~jHa+$tgL;tMyp5CYI{Ac^C z5r-En)i1hOP4z(&Uc4jbn=B?T@M*@_yR9z-vV4o%c}!2dhV*NSTEP#|-=_BAgSj$_%kUQA+vyCBKDuC6W;X$!_kyj}6&_lEul zrl-sj9jAiU)U>ZyxJ-*v)ni72XL(#3588*Oy>eNvm->yBC^ROM)t2f64xvY5Nwcn1YqNR#>?HqdSb!q~WQjv*V&|C^mQNu9UTZYu5BHt9_sl zefej?ToxAn5I99sin~cZKuK4Y!-iipB@Q{jdt$Z8Bw}{6U*pzK5MM0B&pe%>y0C2B zt)ZpGNj-o53`ET15_WT0(g)*{ll=e{x1e^jPVT_8X()B~-LC(kzxe0*fAaq@v&3gP zb?qb3CNKk3>yn{WdO(_m{o7oZ%%(P|;IZzQloll!@GgYis}}HA(xxD{O(nEsnR2?h$r&rLoF@D17W- z$lTEFW*QqY!hN zRG17OsHv$T0)sZhNsJ@v$5^W(#2%T==2SxC2*qdc&OL~K&uOcKmi?cbuQb~2@?CJg z`*=g{feiO;N9L{VOLelqxu?M!rbV=K9d}pF9^&4WhzLYd8>*N9n2qJmKjmDqq!2FV zB@{(g+x(CKynE}W{y?q5dNFjo$DQ3gwRI+aKLGI)EzR^{zan;a6XpUEC>xaC3|Y=&TnSV(j5zQ z;w8c)zM1|N0zwN+@|&C=sM@H+!VFk$p#BVy`#7!_P~FnBEhr8u=NIxsfy=18voDg2 zujDZHg;(=_0v{`vGG6VEvkq>25-Mthuv2ASn2^h4i4*ErZbgve7Le%@I%6Is==?4i z*j)W1%YON9$P8Ml-(CpeRm}VO+v9}4Iz){CWd^%{JpbZYqfKDN%o&^X%hh{m!_~GH zyp#KSaj3*F|9Qs@sh)m@#p+r;eR$@?)avN6$9(sILB!0r{9N;)h`rK5BVg9cW2ycn zH(?peEwlBTxKr<~)y}nlB^Ws)#4|4&uSe%g{V0)Va5U+n+nY3EYU4!HzYhda%40x% z1Ye1yAbSH%_JbX?nB4A^VG;P_yw2ER3UVRhOdE|)fe>2+HqR8@NR**>?b{cCpQ4#! z)WyJ@?*m^M7kS4u(*7TN?;g+f-T#ljcWrA|+iD$EbS|2eS~-=Hx-OBVRtgbT%K4B= zMTyU9r9_J&gcgxgBqYK*U>#6$2%%UBVba?%eDC+y+VvUj`s4Te`{VZcY`Z$V56{>0 z`FK7a_s9Kyf8@!;_WbUEJ#1e#qfT8TXl~E}1g=3p=gt37^W>XYI7AR`6*RTPAo-zD zjb!v(jEvL-f0y(?ArB(Hx9}3w{Y;hNW^;f``F=s;# zL=kJJ0y?wMx0PREQds@qs|~N}E@%43UWMWHF(ja~ksJ2rsc~G$`>fBy18mZ4wpGC0 zK8}vY9qg~sz^ux)wP32pLN(oN zxE!V;RI4y=1AIJ=WLnf2O7$7@GTyEisLyDGD(&kay25lO+(JN83i{s_fGb*d$qx`8 zQfz`J!b}(C%H6uWMTZLR5lonK(K0A|R%w;$Co$Z=D$On~WDrpii~0m3b5blx4r6DE z_y=p&@W@EzR0|-Lf~4%rDNC17nPvch_|{d_cbXHSgTjq!+~SOeuov}uS_LG%wjP@D z?z~pI>q$ zL;OC?19*8`Kz%@T37*s2tr1@6ZSmX z=xUy*$d_($MTtmctoZu8sK+k;w~_R2E|s6`kraEUdi20|#s6Bi6Vn1dq9TH7Zl4ei z5^1b-Ll#E^vcqBoGojRW>BvGOAq)i8p^e-R+_%zUj>)ROz4IG4+~0RfOl~zOnFEtk zzMB%}a!9_SZ!T)|nXzjF6{4y#rohqR(xsIX+nrE!t$^Giqlej^AT$zV11_Kr&ZQ%A z%kjvymX4U^qqcL+hS*K(|LYrYMhF#Bhs7Lt0N}du=`}L4>V~?^{$?UT zsFq}6;Yjm%Sjh6;Sl7D$;akMKEN&?KV0qjFVS_$~C53MZ;Oa(|jsy7i)DjJCdL}ld)uYp2i_SrH$om?bxWGEbCa#Xx6&mycDZeHDwExEGB$s) z`q3?=8rcn>UU9kQFyrdKFRT!F6J>66D zV$z})sDvJ{DU^@f-!d+)!1ab+Z{oFjsfu&rm2#1LPh5_y*1vysWN?3Nhm>*5<>(Vh zZ%-Hwb}GU8_Z{T`4dnDQTn`wNLVD-FPb>uoHGS0Wki@JEg`ow-+cYO|q> zM(p_g+fT=ztt^Tu+N2k`^pVwXuOdu7+=_-XImN})RlM<*A+shV(M}B=$jPPSNNI9v z(3gV!jXqg6iY4^Iz-4<3z;DEXxyR1LpPlqVJ5pIZkW9r5^NaauBuc;dJwx`lmNnD& z!9A`xMni4Tfuq%3$@RJja>KM5@9z09)0wIWQ|^!1LYj{zz+v(8+eb^U*8wsaRl>Fb z>Nlt6>$1cu-oCw~Hi|@)Hvu1PRM7!{_l1`35He)L$)NGDnBB~gniJR6WeSVe2qYx9 zr&t5sbXwsIZN$A~dwSqx0O3X7!R!3HwWOFnUb9#(6MJ5=vA({(HO92}8{|a|Pw757 z)CTBf{I0C>RV7A)or3Tx%WCi2Hhg;iBC;YKqUVAv85??ZNIEPUwsP)Ca~3fJSy?TE z79>f*`%Cl^tKflZpY(Sh=_@=puv4}ZM?vN*7~irPi~Fkv4R;IN&9gE(5DHIHL20rJ z&BEZMDuD7X;yABuA2|kG_VX>CoW_X|osJryqUhD%jfu|WJ zTNhileNe85QuZ(g6Juoao0GjxN2!_y3>k;fv~I zs5J0w}F3xA&&Z zWOf5`k%CxqSPG;}^pMnzAFyHx*>^G54m4I4A*VUG+e(8Vk6 z2VC{md_f$AQiIv(TSD7HPvEe$G#B3tY$QPi;cwz1OVv3YkD^QMc1Nv#c>o=tQ6=6$ zdoNwS?2Rk%qHUGmOjXs4DE*bksDn!>GY=XRVZQfVkEs;c!rtf3on!Uzevy2D2oYGn z`uVo|`T6z@V^buN*hfQQVnq&zv?_5BI?xajBX0mCo2^hM&svfNrXOfF^2{x0Z=%w2 z8WxdBCRnbzkmQZ))}2bqjH+yyE+;3qbJs52l9MlUvp6y3e}7hHynXwYwFuaY*cz4% zPWU}{jh~Q8;dxUMZ+@oM2eUGYX4@hP-IPHhl#6S01Pg9j(NKpkxI=T$pMWXt$Jix- zx&^}M_yyZm2EVlX=CGaaH2Yy!qJF=Q$2s6(p|W*KXCoh_)8#Aepwy|bfmjy;BL=W* z7KY`}o}Barr)G@N3Nms*btb0+V@Y}?QVw+oqvB5gz8SFVX5MBX zu)n%TaHh?XcgXBAXMHc+4bc{;+}f>%pi5%C4SKRf*z*>|AOYiE zAhnKHO3-#0ypf_du&DDFnMa@!P^k}q_Bgzx7u~baq)Y%nh8@u{yisHAtmpmj?!bS2 ze;=}$vZC1uKel-*29xE-a)*EgpknUEqN3S&pti|e$|s}9AY*TbfRdLOgo&#q8I(k$ zn*wjJ&1R~bJdFM=-IxLNSE+m;Aa;KHO6>xVv1ain-i*BpNG4_3rr>V%$WviKF2km? zIUo?4=!C6r?6eUkRw+^XScWIj9GtNT;NGYqcTlN*gSNk0Hn!+@F(!b!><);F^>J6j z#It3RGSq7zO)3n2=NDg_&R=}be~m+fqPvD3q7(`F&YK0B`4aTr#Ib76+=!+GDzM+tJ(NgH|Qg#jV8%w zV(UOA34P-{V55tcwNYA2;frrb7xD&jorA1qV720qIITQICTg4mt^ucpz2u|D0^rny zd-I0KgLi2TM_<)=Qirk7cL z_B*`Z4di-5ZGcSkBSqQ{nt}N6KroO?twvZIJzX7yceKsGcmAB^IpkUxV220$3h7s{ z3->Q=1O@w$8*6$;(Y+xH%!{FogjI0QMh+gZ`0&Og2!*s7(Hkp!|KfifZC3nOaO1+` zXZXVN`Kll%*0NSo1Cv>?L=CwGDO+r7^jXICH}J&B8y)cHwixq@pPZzLXAOMR+H%k0@ANBmXCWYle-*Uq9Wl zK?;A8tx+uCJb{vj9vsVb`0QolVxRFoF0-Rb_zmzVn`+!6MBZLwKXJYC(%J}gkP#%v z;r^i49EV1S&?>G>CUoeu$|As~lfY&MDh|Du0lAjP?zh(>N===UFsB7YLMP4wCUt&| zBbMbH38GsME5=%qbg3kb(r*vJZSXO&wEy8>#rd@;lqYp3!zGkd2UOqqVk^AFIB~hO zIR$@2++Ygt@T}e@htVZ_ktom&t)Unj?5{tT0nA!W-Flt8k~e&^Y$Nsv`js_cWqO0S zN@_tE@UMDR5W{KS2y@~SG$-e@iIF48eu2Uun==P+c-#){jqze=0kv)%nYEmVr)q@f z!ZOIi138SthH6?Vz!Kx68EChtsmBBaM4kK}B9h!dl!s86Z8?O!jJ!Wpuur_nTrFI4PtVU()Y~G$|MO z$v951^AaQ7nl2c6!)Z$|zK<2aEz<|;>dfU&!FDfxkk81n`v?lB6H0{AAQ%@Rhkgmn zJA}xF%DM_9#e$V87*72jBzTaQ0N7YttHq5LI3y$1fK0r0Q^BWSNZdhRS8$IbO9>1I zLiw`w#(4B}$Ks*}eVM2ofzc04KLn3qXnn2epGQ%@&m^ha6vg6eUb>Oenk!}m_fhH@xuqHHd;bO=O4y9u#=5Iw~%~mM9bA$1#)?Rj-txA{XDY?*R2O_ ztgoQh8Dpy_-!c^Ufb>*vxpH9jwOM_QSv4R)p5#igdP1^q|6Kv|aHtol9I<<^Oii!T1YJ;w3@MJPy^2}g0h*o z7F>!!R=(Yg+N3u@66l*4BFriSLQV(c#1OA~TxKT`1eFmuO1|zL2ULQEl%(REc?%^Sc>woXASen45I_#>breHyB*5WR@}=SK9v$Fu4Gq%7Mv_yI(ChN2?9AwY5FzA9nRM zuMctn0Gu-@qjJ+gaT(W=#VL~XAO&?t!SW>I6+OjNDmkSJwX!#IcP1XV5xCb5wO@*( zV2}jrqWTcL8VMl0OVJXs9(!0HQBkey?plE_3Eb^kW~2Rl=2`rbg;TEqyoIe0r}qeR zozPX+%c#So0Ks7#P5PC^X;q^|?h+UWX%x(5=1Kt3F5G(=qIS@PFv(yllRebP*j|_0 zPM{PP#;g4bfz;i6lwl<16O9)pyB5fq+*rsxm5LmCI#k?DuTgl@U6;((ICFY(wIZ}E zlpe$c5Xt4zP>56N4Ah>ve=H2jtbc&=gMBqT zR!Sth04ETa$H{z!kvMVx$|CPcAYj2en)+d{%o1K(H@J`3mIk$)z(dYbuvN(bQ{WAO zL{#Ule3Cj|%8^?K&rB&=3^53S`=b+gt|SV%K)!$8%P?kFcvD)7ZH3tb5P?%Wa^9O1 zUqNoByw8-U|2{;=eW5@PD4RIcGjZpy({L5+Opz;etO@#q)=p)^2BMS`d z=2EL1x?AuD!k*~9JScHaT1LqRWk^=z%Yi5c`=CnS_k9(0f$t1CY#36_}M^E%=0xzISeSp_VW_2=)TP&?hQf z_U%D}?}%C|7Y>i#wNx6V@%~2o`!-mR;nLonLy%=|%!BfRgcxCTnx_itrL|CK;{*D?*^%1MsXW5aXDT0^TYvz}pm8Yjs#VSoe@$#K zaLF0l;W+ajc3Gx!cJuE~X^%kW;30ET18}8}V;aV82#aDvj8sa;(-ABqz}ojj5i7H# zgE|$&*G=&g{t%ze&z(40*p;`QkD{L}0k?x4>DAesgGhD<4bJji$!4J6>xPXRiwz2Z zIgxBfzJ<<_gT3i&Ly?-mCcx+re3HFeq0M$`NSE&F*pZ50yM#`{vxebnqmG%1c$zrR zS|X41m2+J(I^H5FOqv7ciAdoHQ05#C_@s21mYC}-_ys}ltl=$*;Uk&rELNz~pu7+C z)m*+aDbyI!lDfQ?`EBXuT{`s1mMtGKa2+l4nR4QKJke3@rpA%stYQn>*!N-&YgkAR z_FBnokz4?Pt5gmcM8&z~d<92ftNiG+;@#RERcLeD#3sX8n%;m^K(=KbV~P&8w0==z z706061fPy3=B|)WnhqNKCwDe~* z_JqGix&*QW#SofF!bAL;NxHC|BtCMqJ;hc$a_jvY^!0P`m?SusVKoK>pzyxRBH&QZ zHCO?$V@9PNnVy8j5}cY4KC-!2h^_+DP~el%#FX)17tXNbO>&0@f^SSO3|u;qSNz;9 zYk;{icSe3r@`XwsmuLo*?n`GM)HHLwyhv45mBeJ~PprTE#f3Wo>;h844Ldf2UjUwN zk%1JxEb6h+{nZ=(96y2v5^5`{G2LQ2ZZlRQ$SG6S*&!UzJb_fRRp!a+BuFDexw!nf z1&f#im`rh!r&F<{8cniPSpGFWE#3bQ(Ze@v(EIuMNg#8z;9!=KBi8}#U|`~KZ>k7L zy`z?m&jH(j3xzF!%yb+%r&P(}GLvAa&d|L%=+1w+1->L+C8(4-yfJ$qm)vKK zpi`>=96NHN4%#VM>*7$qkfVm2ezbL^0~>EPL?<)$*nY`W3DAG3K?SQVv=~>H#R8V3 zpAv^1#9isbKkbTdnr3wu$FCnXk>hJ-ed!Sdy9}fU#m?SLfL~xHsGc+mM~=QU7msV+ zKw=FtUp4lbB1{HNVzvAklhY0?CA1g>hSV)}1Kx~BrQ5zHrD zn*6PKtD!iW6>AC<6YK3)7(k6LZ&A`aysWv98Mp4Ie1cPDbZ==(7Fe6YQ$RK*Rg6$5 z^!SkhrPI5W z0LYvMz4^$Iq=C{JS{N|Ik$B)>KP{SFtOa9cn;U)TOU0~lg%8fz1SRyAiR}k>D;}Zl zY70D9w|FV-MV$@5(TLSnN!RnrAYnOMco3mM$AI?o!7f55kO94D;Lk4_t9$^xx2Rg!cwPG|wD13V{D!X^VhhvsYrq9S@DbmF6ZeKo3$~N{Wxnu0 zE*j%BMac9%aJ?3#Y{g&p;x$fwKBee$jMI{Gc9HS&4!E)xvcBZyZ3Id=vXgB=SS3}Q zH*tcv(lD}(i5Dlb2)CMn9u9 zLmO$XhYF!Mri1e%F&4qvXGdx`P?6(mz4dEp>*mry-W|=>m_Ja9HIe(_1je-D?Y?UX zK_wB@k>Lf%6fDJO{r_cWx;q?b*}_J9cv>o`r-5SF6vxAv4Uj_LY@AwU=M-*KV1^{pQ*eN?T!O$hxm!p&(G_IYyRRQ! z5EIalh$%L4QUr^H#^VBQ2=Pn>;iT;RLE>5z$by!z_6})wxH&F~GZ3VQ1?H99{4e_@ zld$Q~k89wjg_kt^><%@b^HEZVThKmVXe^=;kDY50X`F?6J-Vu+M1wjrX7%r0X{sG3 zR7jqhFcJiJCr?3q2y+NH=gVV)^(4YYam}zASYyE9U!b?enWTsRD^aATH1EG0!cK2m zXj_q`l-$7HC)q@|T=>TNS=}4s4n0!Z4)|5JURMx1lYd0#JUlc(aE^DJIK}OqBdVlX zV5k{1n&3D&f_uw(g2ZWqmQ-V@4=3|o=+S6cV18e?vaWx4WmQww7cNHMt2DX*bm|cR z7IX}aj{vc4#j4M_0XaX{zWgtaZ6JGy;YhP`| za&wN3;9rVaHxnJ;RL9|BSMUL4GU#c^En%DJmbg@BtdC|}EpO((`izS2?8%zeiXjoAK&K(i(2Gr$te(o2^@TROzL`6BjRBbZmA~~eN5>tmvZa?OBN#%Z z_!<-c%hwn&awHX1f8YuTy4f)Luqk!`J5|eIKMr6+lF{^QPj_n59923)$sZAgb?z-> zU6nS)b*y8R;2ohmo#+<8?ku-CC%v$Z;ySzG-$4*Sb{5z70soMp$sW# zG@njb3e$tshUgf~9f)0OQ&KJzmFUff<)7~p)*5vELA&J1aZc%^SvCt2a;mio>>*HU z1;sg`B;zX>Rvu0!&4ltqm!s<|fqXbZo{S<{Ct#_o3rPsVKq|OIDJ>Eg+vBV(h^rG^Y>7~iqA5#XAxaP8_U9ve;Qu0r zK`EN97bULNW~c-N@Z}?jZ0d9S9%~71K^7cLa=fH#Ia_dV6uWpGZ!o;30_@{3P2UwbTaV%gh?1-1qCeV z%9_A^G+cRCQqTt4d9hV)xk1)JSF(;-%we#n3DvFwyPtE2sSRN9yk}3lMn@can*suk z`>hsz0dF*WW(Mz~10Kgj->Bx&(JhK2_q0NvBMUk~aDtwvPw+X&lDh&5-U@^TWffC! ziu&<7KvMRE_}J*IU39I}m6)Ox~>$!KPDF=G>twa@ba z#0jBl18nJq^?HwY6LqIK!1#Z5*l~^o;UEqaDY3~-<0{+$MBIvHa;tNp`Vm%hI7m|w^aKYV;`MPZd(p+^AYWRh z9@=Aji6*yr`VCGF=s|I0-f?VA1Od*B72h9N0}uZJm-K(q!fbuZZpMtCy1B^ zZoy@3g-9R-vj%{F!-gOA^k60O6qVi;ERXVm>kehG8e;(W0Ad@F)bdgRDIo{^63&&a zS4K803{wKcqN9~n{>|54KZ$L#9M9XYO;ErLd_$4WJka>a6eLlYo)|)7yuAgnn{(or z2;~6AEI)4o7=~K-9JjtYHw_S~jw`)Wqwts{AIb!7`hM(Kb5#f!@F zC1A~P#v|L*%znYo5iDF-%&^_E?p8^r93;0yf%gFesX=$>7!m3EKA@4budN|<9P-cY zVJ;v_aRjwuRS>xNjf)7N;FPNanIjCyL|l-`;6w{ZUDG=|#2bSD!Zb{Lc^&6}R%ZdM ztfh@mEBYMS&<{@Hp-2C-G~E;uNQS}ruz93H-V9I03r&wP_N#S&=XX`J!saU0$@nC} zX%Up*Gp5s^)}}@$@S1eTLA5_9Mh$apf(|}vx-l!rWdNnxU9J!tk5;_Y!h#@JY#wTA zym5k6{e&Rbxkg{LJSnk&`A;k9k{lF)Klt7t3G;$;%Ir7v1F|r5Gypm%Is5RyZDC=W_O#wW|uH$Uc zW{?wK8B|e}jX?7aFVT^wM(qSRQ<^Zd_!A+_c=Yd2q?msZN(I)5L9cL@a*=l^dSg2b zTUlYRPH#uQ`nJ#PmJ;nA`15eVKbtRHan4NvSoroB5kv413c=)F1>k}x<`qaPl*HeN zeGn&TH3g-IviKV>bCfsiJ4Bd-t*DUX9r1GR-{jYLvuAD}SfukKSjv1@5a5>;Mi1W5 z{KKNap&#H*{N=pEAEXHQRd;h}D*4+3f0oHl8$5l8f8cz;hk+Zv{BZUEee8kj_kXKB zXg&V_ZIVv!hUj|{CO}Bx?l;`{_wMm~>^8)jjEFHwla9WxJab7)Y>{ul2q&7HkwpAwWWmTs?$*BP%L ze}D3#S4%0z8YL@i1G{sTx(fdv32C3u(nIp zEAHw$lzubu@_nlgjjlKG)?7NZ&&$^p`BLungT~=zpB}zbfRqUK=Ag+nWY_q{H{c!bmCFrhkZAnS2wh~lSdrUUy zZ!Bs4=mX?BU=`Ku_!OU;_SkuTadfuPJLjg!ABevL=Q9f0#zrsLqV=JQSz$7NwxnE#12>vLh=HrY3Z(YB@%>()1a>0R zuSJ&c3A)%Es<4qb^}Jj}o-C{~cqvmJjQla1B>*^Dnw+nEDxf|Wylfli=Q{!(HUAp= zH`=oE*L}Xs)(az125>Cfhh&AFq;T`N=jhKWXZoBuGnV%==|`+saD96v*zH6H!sbja zX)Qz*!jMfrFGCW~j2-Q8l|K+aIAP2JzDtuI(!wwY3@D_{)z`z9-T$fB`RJ=1uZ!id zyB~MtsK+#Qmo{__-ahBAj~bCH5r<|lu`Z6~B^wAi$&+XF|N7627o=SRcNM;OVtCZ;z}4tjwMshE7=s2-)O0z((!au=3Jb< zV;}_@;9=U47a=?JN9}ER(yic2O(RG^{V4q|yp%vA`8aHdhO=)bL6BUxwWLCKISboTdU_0K!_#y3>3)_v2* zGL8e?1R+;dbqXAj*|9W3Nykz4>Hs;y@;^S`WhgF`qelF;zk?MVy7q&-$;63vyabseaSVfAKC95 zLlD=Z1-S`5A>fe2kAjAA7+mm6I#_g`!j5J~!c}j`_g{QxoH=bv-1Q>^jX`Yl%nL#$ z5ixv?+&XA`6}n1#jXM?wW&uy)4hNBnokJ4;c?{fiNr0b-b4Ly46SVn*cIZMkpn_(2 z3`v?oEe&H{PFVwFt$G0p3KmVYMg(99i&00Ka#3)~S-?tVZODR8qEo;Lu0Q!qqNlIn zkGJi>6yQ6IhrEv>58yOex~A}=CJ-$!m_RmSug)rqz8qw%vw?r8{j=}y4>Djl-zB+t z&ujB9JftxF#5~~NZ3#fc*p8`z0lF9G2XHD7Dc-vA2yI4lKz?u{dx%33={H6C-med0 zyDFxl-GzfMG;vT@rhC9bpJ>9ca&aicavTz zN-rq|?~&~{Quv6WlIFlR*vj%A(Ed&w8m{s+-l`#4O=Ki7FpO`|+o+-8qM;jqQ{utm zl5eT!reg{O)0LNYfwwc6q!wUm&SMkxWf~3S~ z44zyoS<>S+?^yoj`xHrdQ*iLc!E5;KnBhm&w-^Z@&fU#{i7b1`3Syf7K+y-}RY6!zI2FDd2A z>w0`uSo)#ys}q79BW!yhIKkZ~(Q(5@u8jz?k8U{*-R7`Q?p(=kK*vfTd|?2Q(yD$6 z8hyi(`#{0<#V(G;;{B48SW;~1kmJ_TxgorNjBn?dBIsq9ha)qk}nWB z;gfT*xF%{q3^U9G-UM1?)vIoj;YbLiS>L*sm>s0Z_f%<8f6SrQREQ-nH3b(P_n8+5 z>9Mr~r?Wnp7H3>^93Xxikm~hmF-Ul-eW1r(W;Ow{cfx+W-5U>1rUIs@j4p{>9pLlr zK06Q$0(=t4l(7hQ(#z=XOGncp8$gZU+Xra71^R3PFG~*7BA%V!W!<`Mz(URf}3!kuK!Q}`a}DiVo+c2)$f81$F$6jj23BCd7#uT;t&{T+$x6xM-m}AR=+xs z)I}{WvrgF@k>QaCDU}rZ_iQOux6hiEyiet?pW5AT9x#?8Uq*GwZ)D3l?7Ct{(5>iVl^!o{mPlwu{0u08ek0RnL?-nL@3)JJd-!MqKH?|^@js=LJ%sJghJ0+&wr z<K{Ivq|%r>Zo@0?RFMS4&8HfhPc6@c>qxNyx-a6^?x%=Rf$3FG8Aq1)206 z!0$KK*@3C+2#e|PZTYE=wbnNd;=k3=23%$S@ZB(kODok!7Qb~(;WiPRtRXwfCH0Nu z781O%SAc+JL#87|k|pJdf=M)Df1t|?32I+R4&=Q~iJ@R?3rve@Atcn(r%%VJ;Qh&z zfc8mn8{5iJpg<;-5f_6x!Sc;St5D$~$Q2moGjmG@I>h3J#RPHjWo=@q^QXlYJ-{RP}TF2wA$JU3xf{KfpEhbbU`&BZGv{G+UD#Y?+P&Btz$X*{2;o8 zxp0YVtn7zVz%eMp)VB1Z2&OK-C|yk?7dmK1`)%QlHVH-gf1uCVr(nO`Ep8-K+Liwa z=i+AV`<$|o6yoc9H9*NKlifZX5&Kx9Yjd}&My?5#j6An<6&P!CwyPQivPnT%nm{hw zxKUAz>&$Bv4&pB)icEtu5Dn7v(5elpFhPGPJ#&&wLIV$?z$z< zFi$&Nr*f!MwY9%IdcVcAE$f%R-jt#zg$z8X5A@8M@c5$kR$moC@2kI&Wd*PYz;Z6;mM`b_d^Su0|I~(BwA@ zb#31i+-w8&1$oqbX^LT|s~Q)5E_%3N8-SaK!ntIgb68B}MN`sa$t4!BA?Fk43?A^0 zjMJ51`$KUzyy6~G~jd=?!p(L7)ISZV#-L7 zQ4H;19($NxgTLAjhYCEEK6d-*-OT|jlJXrzDwupjZzHAX45WCZZP~t^@S6_j6Hvk- z)a2{pRjtZ!u1+OYq1K{l)9MNZHNX{)H2K-G<+(;OX#?yH(>GHvs51nvdKnBJE@g3@ur?0( zR>zebGDvqd)j5gzPZu~~9-LmJ6Oo{)j*3Yb39HsqRzPl$bo$2+7dS2K>jubZ4*;)e zc-F!a*G}bW1GuWc=-K=c`{PV9{^;VPCT=Wh;Xa$MAPciZ@DdGMRIRQ<%5pZT3Z4Qy z`){d_!H&$_B?ncmAmu~WIr^YCZZEars&5|(Gsx-bO+i|HX*avgP_M}_m~6OQJ%^V9 z`3RgnQMijeByu1b$q{O__ssT*mr2)T4Eiq|Q={ zW*vVND7XV7cfb7g)C@D;k3>c4;*p7J|W z0Z8=o>eSiCBTne_eLrBk8707xEF0Iht{Kw8CGD)4%I>Lejf16 zQ~m)~RBUTEdV7;_3r9PJ5)0(8PBS}1b@3)IiRNN`Q+f?bt8!?PZG>H57OXu|5nb5N zk1x$2p@0y<&;n|!DG)yr`{XrE0f_{JTTlzcT|NOB#jp_g$w<(Yja);Rm`OhUN_2i2 z8vvGCB=nQbMnLR&0D0;(xzwmMqjM-kLf=w5fTlNC)a&Ax!Z-am285`qQ%Erp3#n7) z%~a)9InDHFUAVUb7>sD*yTP7i2%S}-tx*(9E}MjWlRSs8t$99oEDV<|+)P&w zwE@O2hS9hR={AG2iwofC>o#y^m3mgEEY)eXSJuC~G-i{rv2kr_15NG7Gg!51Rpzb? zrUOWOIv<@pxFM#D0|8oHfC6nrX&vXdC$Ye(x2526T6t03MyW>EsFqBfX=SCVkM^(E zC)Y{JK6P4s0K|U`HHFS%=SV4S*Sg|%`67x#C&C_im`enm3P?alW(dWBf7Ux`t`sHY z!QC?ZkK}Zk#I=-m(>nnd@yu9|7h79>L*IpGABg|QGD;C?tOE*}Ld=)`U@h_XZ%|D6 z9JQ7oQu)!WAjv{}&d(vJ0^vjRXb%afwZw~nVV!<^?mcWEu*&%esA*g;ImUk_?kXJD zj7N1z>VNO8Q?w2XZ-%r*$G~AI+-UnpH*~l3zd;%OpP%owPr>&V&nU>%9@s|k$Cl@p z;wc1f6#YvVV|-T{z+EtU{z6$qCwj0EDpraH@-VlrR;|y>cb76qy|uj~M?z|%-6{X| zrtdbPWnXwca$ASAZfHnyaD_pJu3T9uNH~c%3uy5oFe<>c*D7Ec^n8C;mQQb2a@2m^ zxU{r1lQz7!l5+%*02;3=bvhGM;bDupoEDwa&8fhM3!W^iq8t)iVvymVb~A2hn|v8z zmz&|Rn4||p$*QfHYd<=FnYPn_xdA-FLF|AgmJD;%VL@c{DXg%*KsU40(CSnal80ZiRl#84_L}InW{m(2MPxU6~NhTcg^6tvO=604JOadM$4aak9Ak z`kB`@BSw0)UrosDQTlP4X$K;SiyH4s5Tj>vNc>LU&|UMOeg#@>;d|BR$73`1M@s!0 zMOVhl;+GahoccyEqQFp!Bc}ugJxaUSoNfYahM%e(G{WkGTldH6v&%tIbHYLDq9F-s zEE)|D>HoDyYU@bOL+{V_U-$^`pmIA&m9(m$8HV&SUF-90H zgo2YI<}i@($-_n2)QaRi7rHew@Df@fvY7cUUWWG)AkFdqn|Eq$-f{C&S+|gW*rQ6I zqVOsSjVN7z@cE}jYqUQ<>iN%p>2LnC{|Qi@0mGllCSVI7mFQ#)SNOc8y&vWKT8fsn zJCjvN7~vDm_8jN1VL#wOGh7yHRp>7l*V+xTWEg5+t-Y&$b938MyfHC;xq5BJJaOeU z*_l0Qv-YpGUJrCD)UD51aB2pymWv5`)=t$ZpxV$Zxi3>t+!1X5r8xLn)ZPU^>#6lP zz4ShXYu`?_$Mi3{$phv~ZuWmYL9%u~;Ic*(C5yJi;cZD-4{up2O4^MNPq6~ZIeWcD zbNk*^9-!|Ng@B3?wQdz0%^<{%rRhXjK}%#;WORrX!Wk_<+38sURkMZ|3};A~J)u3& zgoSo33@5!PcHv!!D68|NgO*XSPv-o*ARmj5&cCp>8i*TH6!>XEZT&(%s1Q5dH`5cWu&3rv zhltKwA!GRjIxw|tgziuWNDxP~(I_|GwNn|TW2T0T&4niF(ePiCbCf>fVUOD|*mN1Gs)?O)c$l>6RTN{{?9o9o;+1%t zS})*++6Ggw;aBP2#V#8Y2HIQ@(m#Ce#Qdy+|mD!z;Eg zst@-?=RfXIyKQ|0UID_Y?N5*cf1#~c-p8!GT`AXXetl03~cafeLgUgrqC zs{b(rP1e$<3b~Uf@={#PR!Ln!x{1G}!^g!zSV?d9g`IB1wXx5gk?2Yno_W?dF z4bM2QXHoIxyX8p}Iu98J=T(Sb{fhtRpQod0%YY4&AT0$eNUx6^O%JrLplk&^N`DUz zx^lo!%ra~jfG5%eG^(#k&qi|5}jz+*2%i4VXLPpNcF`4jUwmD9llV5Sv+TD`%$Gw z3syW%YED!1FKh_f?sILG;q{HyIquJX_%ZN!RI1JAXFs1FGxfI0?blgq-O1y39odv0 zuxrTVlZ|QdjqPnNp&gBj&FjRg{1Y?7ekJ{y z-yomVU)MlOVflL(GRsk{aET%}G&Wv4OI|fGnBBRD>~dfjCBJ7c8vf;bt|fy_Ix{#9 z6zXBVBGESQ9~P!y(jkQUP}~81@eQemj}#q_>6oZ=f8(Z2Wv`aZn&s+dE0xZa!*2gT zZj$@kdR!Lhou=O0gw4uY=4Kfk98f}7tsYY0w_4&3#w5tvrB@i5Rqq8=_2CgMk!ZoJ zd5=GuI-2YsthY&+TWd8sRDy#YLS4$1#GIhm8Pkyy6sGJ)Usub5Ev5>S0V^$ zU-Xox3H_=^)fEn2ImaK-R3Pc~gVUT=RR0=AO_sN3ibMfnQ@xvV=rzdsYiDNb%c$x? zP9V6X0N`-Ay)D!a!RXajrr;CVhfVpnHt~O}bT{=o?jL=;K6CWI2WYvN+qdw#`t*^s z#n~m9qKV2t%cA~kk06L!?zKD(L{2+Pu0yH)3h5?RWdX9EP291pu#B(;dLVXZhlr-i zkGXTjm_3wpP9>XIo`zfCQPhHi&G~KEOGjK^%7f+sl;da_( znZUrl#^DGRV$Tz7P2H5Q2CdzGJVOwr(u;Rt?Hmp%*v4ZkJ$lCKTVwx6fiaga&n3`) zy0UbGAjX1-RW@T6APb1HE1%5)nu+gsuFy3A8}l0J^uGBmK*BD?$2;6s7m4iFE}vMD z1e72LceNBBXQ4b6fC+mpJYmbf-Bim~Gd=tYopIP%ui<`q)L=|+YQSkJkL2mbqdpzK zH1MM@GuMF}>5(%!oW#QHP9JG{1Q0G{cRMKm7i|l$pcAO{EOXUKMoc7>qU&OT=u%h) zKSw(1TC=DXpzRvc1N|9G+bB9c%8KUE;hmZ2L|xXONZz_6HydjjIP*Jxy&N^$z;%dl>IH zF&VW%@v|GLsr!QF1Wn&h??PI@4zYlA!T?g~%f*WkiEDv3ZI5ZOiX!60k`NLaYq2pH z5|-@pXw^~H5wmw&7?Q+noU-v~6AjBJrwQ#Rj~0E=FG{m#AyQ5Pf>T2XnVYUY02N8+ z-L(ftQN4Y8o6cSes*(RZeHDPzl7skaNRk2=BU7D$SLUD%L8k%IilR&VY}2k)*eYRn z_YO!#M53}Ln1!9dmQ?6;gL>oF-Ahs&fj?@g0CBCOYf4;|BZ_AF6P7gWH*6r63Eosm z-62?WlFfYbVVh4fNL-b#7XUtCD!Pg(x)8KQ`D0(rX5^p5_NdTARP}{JFYOo542V=M zKpGy{8;H6w6|>PeGSAB|D-J_e2#gBjbAUNhqfS9QL`~0h>#zaGQ4}kTY(gyKj0Qb< zq7JRa@duO3$_8FoKv02mdEy#`ZPg2jtAO+fK8|6l!0rCBFrW3SUu z!K!rS*gLIeSCTQH`N=5>Pz%82-pfc zPVkdZY)l4BFK7f&&-#Xx!J~;Fft}rh*c7s5sW~D9?;CO*hKOi{k7ZYhw85D6;?mMw z9#V*rjNWl@9YFP@hNU9+l3(xc7M*7bWkQD1sg*MW#c6CUGhX8*^5!0G1b&A1QDsBl zJYz6}IprnhHEj)R16248ed8`gMOhWP5=vn2guRtZmo9bIwE?cydtrMSR7kgmESRVZ z_y)G3Itn%4W&ubbKt}%}ySGoTWs2jJ`a2gq=frsA@kx*5K^=7?kA+QrhP-xfoQ-T8 zVkw{_UT9KGCF9U92P4D+Zmj-Hl+w&o!-hs{aLa*N%uLc6X->$7=OZwdlLpVZ`o2d2 zO*t|)pwYT^(;4^9I~4q0RPDR0ae1vSGl1$+^!tx5vtXy*!Y>=k# z2^uava%7@Y(K^~BlGf1Q6^l%a^Ifxn^p8MA=YsoVK|7We)&;$F0gT$*$Ry|S^^D|v z7sCR9O`!VjZxdG`Lk-115`h+sE$JI#1iuGLUpXU|v^REXsScN0)x#i$XMGIkX)%{X zAR40qw@zQ*zxk_AMdvq-xwCvRiM`~PQs~99U*Y@uot^o*vYR=KNFTB>UZ!55{s*^s{xInJEooC2>+Ec>8iNpoJ&I#JAP~P zC=KQLt+7z+*#v_ajuWQ0*%hnD5ljqx_kM}u`$Q&UDD z5haNknSAUo(c%w^!Fz894gC}SvgZ}}Jl-Vn({Tw&x$N~eri%`M?GO>x?=;n>`4_RH zvkF%0ZJ(yjm)^@(*!=DHPIJ7Y?oQMFMX&})#h0q(lcr!(c7Fltl^LHpKFk+`P7OA; z@1V*HVg*}L7D)figqWUF-aK3CaX-+MfkU{$zcs$kx|uFZJkOarPP=N*_PnDBzbCaJ4r zO5ApDR*x}(0pTt}&o=N~IWZKR=30yc_rS!;8D6p#kp%}{$_q2QK(-dztTaVHJ!iCA zAIOSeqsVF3>og}_^!Q5j>*vS1@w57FJ>Gpif^TfdZOZho1L-UZ!dpK=`u-~Qf0RCp znw8x;AQS9f&?Jww453CIOLoW!GK2BFO=XWS?FmS#5SYLybxepT^B{Y8P<~9LhdgB7 z;cVIfYx8@4N2h~tB+z+m(e!K$o6`GrT#vFt)kOL~P%(bx`xNYlj5%@rD44+oe!Z5f+@M$Fi~4;B>C6W| zKS_`Iz|RYRXxq?F2M`$E|GiEB8oVj_8~^vx@0aJ#fBoNr{J9eE|NVap@_!5R=L-CP zj3I9pO#1V0{`v1Gg?Y-=6OrilmxflO!}?$+&CooiXj zGM84Ns0bsS3UG^zNls3-s$A}(5c$@Q+B6t2bI`BkXuRyObsv1fvi&U^F;iTByULKC z?x3YtXfi?>)pAnRS{HIWnaaw9nu55Ea&Q)r9fb&ROe8Nv;I10*Qyu_F=UE#vt+~X) zlA2rr36XMdqbHMin>OYfH|EXmoC7waE{3t4pZ|HyfWMDl3SPwOJt1L-XC1!8e85CW zO*(qxOXz=%mRosyp$i{+yA3}Ttz#q$@e*Did?``WYUIJNrH{T@hVI|v#H-j_0`b~{ zGi21KpYqE{s)E;5Ett>tJM@ssf)!?T2!AYkAJ}k@&q6j~>G8v07a_}|=nFl3)+V?c z11X9)Pz^(Rn(`PeSvzyoeEX-Vr(0$?ftTW&zqI3eK{nd*A-k6X1=y|5Mf>>!8-enl zL3h!FJEP<|9kBCW^!S`PF$q~Ht6;p8BDg1%Cu!BO`eZ*DbUCSWD1a-Nxa`gbP$PN% zZ(bE99OEIQIdm0Fi<$UV>r-q1)3gG0x_Gk;Isf{UqeVURCPN8rwF7(T8ZGt0(2OJz ze`dyvO^c?AL_babn`g92FQ3{+@(XlR>S)DMUS2L2B6JpzE1hndCPsfQaQ7Z0*6PTh z)zxVbSbrTBIO-@`qQkHcwOD)%`gyoO$ow#cl!r>roCfDK{gK`eVMfWbHvTu$YoAHs zRdm-vEYMNu?^vG-4goFWQ7<@p?jj^WTAYTLO zELt&vLJ4E9BF7v|0#>&CgCr}-jBs|sRC~y3$x_RG081Ko>^XLI`5u9`&ArATQ_c0di_bx;UEOpKcUY*pN zT~v5X1FlP9-qt&m0Enf6<{RWnl=MK|;%ciMN;J>HG{2O6eb~V{L^c>ckov|olZ+o^ zJVM{dND36jeU$i+fE?aZ>fr&=bT%Hu)~+`%gmJ{sJac{&3C0T>W3PhV;B8rAkG6>) z+9rd3mffEO_8cV0so8CTfq^tUiD{XKi2A!T1*fjL1Bn93+3zUn zA(S8GvBJ*?;b$|xsbGu)8>PSv!%IhzF2&`U3)lX$?CihTakmIy77g(^rVJJ_s)Wjq zg#Q6r5Bf|=W;D@*GIWRqqk3^A5LOO!QCtXl=jNHCj_Fmd?Phf+IO;Csj4pKV>{}1~ z`qMN%{UN5l(YKZ-R=~e?Jn?J@W0u%0Ci9PP&RLFRex0uD;k+eM`>1xl*Zhg-*D)U? z=MVDWqYvwerl+_|Np>2R56|JM zn*EW&xud%oMhaBa?AMAXR*6D2aQD(Zub+fp%{kr=F?&`vKwkLB_J(vy9hAf16tW zXtWyg(suoCEJM@O=(KN2D{SiKvx)dDmi$8Kk!HGbMx-PScUk8R%Lm)57BKc|t$aWh zx$G8DZmM5hhp|NinFkRMwZ5p~V3yAOy_ur{omp%p zI8Ogf;Am`uLWKO7KTc`8>)lm3-neDhE@^j7^5W_2w^rm z_S_z(Od8uyZQjlC#VFY>9~E@m6`5SGzPWm&;&Ye+S{N{ zlZ)hixp=}71eXebNCREu4w}PHYkEEsE%@-hvYo*v3?IO!HO{!PL5&N$+W`te+|4?i z;C0bwvH|{Lvd{GCR4V*>*UGS1-E4(Bp023?=I576Fe_yoMAkM-brJXBVBu{Pp2bytBzoxg0nG7>68n9) z<%^%iUa3iNyT*nfpe)IAz|H24I;QT~cFF{v$y?`aN}O4L0B1obvyp?nz_hz^lazDc z>a})J$O{Q~(#69kWD$_5TFlF}mWr@eL+k7u75MA-RFi5(tIk7L(O*l&l@Nw(OtwPk zu0cCo^6WK30ZISc(?y~}X7E#Wt!-?~==2JjSvEz|^qZwA7cD*E&g_BU%?lnegyXWA5i_`6-@U>Mfe41LH}-XD-LZpDky5B5otm2OdS@d`+d-psaO0{>V$BmgzYne10BO(x%s}zt+)uKm5xZsAR4w7e6nJ}1W6aV-?&o>$`Sj^jQPCisg9e1@ zMcD7U=f(_Nl$F>1Q#)`*7e26W<{l<}=rSuTw9&db-mZ@N1)!uuEZF#mL`bP8Ul_cA z+%>!nOOOA(8@T;vHQ-vTRcS$I?_JHfEDyK?ByWjs-;RJHo6$m}7Gm5BD-b>@Vm=^G%Q$B)$GbwDL1_Nw3n$rSr9gniOv$IGDJ3pob7~AeTBx5CgnW`Q{?o3x3mc z%^V4%V_#QYE()C&`yhGIJ=hOj!TN)<27bw(4l*PEy$io5*<1Mvtv5}OA0!Ik{5%FX z{pNdbPPqsNftn+_W3T9tnJsm~gRLy61+TrT+TaX!K`p+pj!Fx3qfenzdW5En((2+q z;0)hh>`+C#*0LbIVAbB<$Zb>ZhSg9nVC!NGsGpNn-*r;(AgAybw#{~Z~I_kM~?zj zfpu;GUFU4~0;*14o5vSr5Gj$*gS1UuCiO0!;0?ri3Oq-jSd`s_9_G%T-4H)3O* zR1Bew9)b?vXKJ0xIqKWXH(3n~-o5u)He={QVHsSUPQ7vMzkDSz9sHvRCkSVwS!Hfp z56OZVN(`F1b5=BGPcdquPtR5sjiopffejLdai}g`WA;`dJYrv+WLi+@^Z(d;^SB)I z{SUY~b8wDz7+WeOSwakw7Ai-U5Tk?^%!rB#B}Gb|F*CBXs6?Bhl&us8mEd zB}poYlD6mlzRa96-FNeQ{&;@R>-GEN+<%;dsq4P3@AX|i%X?`v@l1QN#v2&9q(Or;qMEi`L1|C960X^$FZi$W^O?U2)tBkH%sMAK6 zF3|1=x*c@}xkBtcA2Vw0)f2aImQ*3pUZ##?xhgy?EX+`YQb^nfcC}zM@360jKqk64 zPnu^72pIH9^Er($W%*_-Hyi&8^Ggr_=sRt~fgZjoh%O)J+5_pjZI+65)k1)JLhxLI zjQx*auXO*c8#AWM%o{KaRkAMK{ScZ6vxF0rynR%E3`C0ibgN4JIMJGPoY54 zY0$?OgGF*<-$u#Ld0Kq6bt94a9}APxPh4$h+lHx@Z!S@)9{#q#nzjqP9uN!2g98-eu4 zLj5li+P;(B%~Cm^2pKYdyh9TKDi27(my3GMwL=up-xO3`(To~~JURMs=PDYP(raP0 zfkDBuc>r4RYdFJv{L5{!d3r-T*!D)BQMZs1NHL|ek{3bZNUFeLsPBMEE@7IDFnIv+ zlk_E^Ig?b;1E-(Ww%nrApl#nm6W_pL6PjyrIp7RV3_+_+*gA@rQlk{fi*^16FloJL zLIU@7<1{NP?zTL7eR77N7)yn#g41Ru`rv@)wB%HL!M zJy4QwSMTbcethXI;8q$Ai2yn+Yp6R-39N&0t~1&L!J0d0!A3Jf-sLLB5Db|{|Fwu_ zr09{>Pm*n`d~&E@1pg%GEQbk#cbBG%Q+^w^=}`a`t-RM*wsVz!XWKtW zUs58G9AUO~lPAnRBl2YR`A~$H6fyAs&1<2(F<|60%eQ;jfJH!kPGhXndm!O%VbAf+u|jN)F13w6 z9esoZ`Cp^bz4Q0i0Cr4408CP4)m*5D6P?G3oL|GaYG6mDLMpI#rDJxQppAC~#hu7Ufuf3m#tW-=eP~iB?hHgholpUqN8GV}~wF zaVmTL9yuqc8Rb`|$|C#%BFb`?1H!N!yR{-ve*WX9))r?EywskL%=JBS^8=8`;Gd`IgsvgHEPK(+ ziZ?3tnL6>Ap2=dXk^mNC5H3>84MG12jDuD6Zm+S^xabn21aJe1G-V`(9qGiB0 z*0li2+=fu;dV_aIHiz9qt;*ZJ@aYgIj{uY@@{7nbgx=nwR*@G~5`_)u4tD?mfUVw_ zVv6&mxA8mj(ilO8z^AW4Iip*Oswl>@8EH@e^kPvqop(h2k zY4v(ZM2=sxOqaipiAqBVHfqvHkvO1wAS4bz^^Gj6&Smjhs%jJ8ZT z2nX3Ib4MT;JPWD#K{+VX(bs!rQ4G}=6_tYT`vDbs3*lU#{Q(3*to%q#N_vTD?Jv$pmAafQ^yAOWMXzaJ{SlG&{>{942C`EN&nj0Xdg2MdWAVL%WyT)#s zy#%{iIt$hqkC`)p5efSy2jkf4r5GbcqAodfz%$MhwAlF9p4N7l|CxX^%2K`lpnzv3 zK9iNEVpT~A?UwsM%39`9*R*fRgYNGEZDbYU&Hf7P#1V1S_(asy-B!W0f}}-;+7Z>e zJdA+Y6L6%oMJ4HPvDVn0t5ISW+49Jfu0%A`=oA%+{GSiYf1cBHxlroceu!-z6kTLV zH*k7+_UQB@z?>#1Z_Z*hT-So_HSmHG3|n&p=S*$DYh7D*+y><~XHaifs?!WN)}`Kx zt*wYTWaJZRt5-Kz!fg$j<4Vey(Wlh+Y|r0^)&ChsU*^&Iwiw60YgaevO~l)=-C;DfcOGaC zT0Gz}A#xwfThpHuyDNTx%-zgdE)PmHA&y|K8qw}QeZv@bHB9$BHVl0|gDZVcp|u_T z+i2)*j|S~n{&$kZX^xV#{R!KL2$hb_aDF~O@Vw#EF_?Ia?LEL7fVYQZJIV7$$#p^q zG!$p1w`K24{MeP*zY-CiAQz0&X6vO^ij@*_3lbgo%f$ogyUI{3q2=c>7VMZ|PZWi( z%Y9(ETc|4LEb$maXJW8TY3xy!vIoF|rVDY{Ew2Eg%#t#_ z-C1ni3;&f$Y*>1&0L4C>h%;$+XIri-iFo08v(bP#LOI6U8jSbs6R|8=WR?n^Fzm2E ziX}Ij^0M^ra1{;|%yZ!w7>h;8i;u~s7A;fBN+d5wv6uO~$ce0#cw-!|DB4a6Nm_K= zcor*DRKx0=--i7UDP^KL(vxTpe>)cS2ytZ%ZE?m}IyJKoI<%?PE}qzHR1lze$o~u1 z;G3r}j=rSER4U&Q;-87!H0g0{J(Yw2+-X8-Yl^%)(o6ckljF=4L(nHi+T>PWiMyi_ zBMxMaxYMQ1#G@-r1*e}XNZcj~HCj^X-DTK(OP@8<5<3RsqO9vm1;eHN7W;ejeGW!% zh1P|FlPef}%Th2T=>F{K08 zb2Fh5Elr}P@1}L@*8MJ%p19Xh&IRknd4^nd?rAi3EQ15 z%F;xD2c6hvg=NM$4?dX*xesJXMw-PJ19ZWDXTeN2^LO)+X8-#m*AAxPoOZ{d2IC-& zfefp}c}7Vnu5x*b_T32Nxe;+7D?PR^AG|h5WV%WeSu&=+wtA z3GddZ$k%Eo=hwof?Essil_YAR2R(sjoyvY$`-W0AtF9GaY8y}i2z*Tx0s94bzEeaj zMK;cgJ5Q%ic0V|gibt$~d}j5rWk)^;x(RC=t=`CLb`_96ganI%!sd;&B}qF4`&H7t zSd^Yehec-pJpZy&+Of5vj{-&Av&b7;u$7pXT}!jHx$#}%Q&dwnW7Xx@4-5-%9`eWZ z;qDG#8Yt_F>gPhjs|#%9l(0k3k{M_9qIyE?U-A6+QX0R+jHsOkXY57$q}ru*bc!|m zDP;!@9-`H;E4&%K(Jk}p*gItOP0aP69OP5a+4OC>U}4f;oOr_jKm!TgPZ6P1(ZUO@ zfsu`@`U`@Lz!P;e8=0M(s?-@*v3}A5lZ*<;Xpbg4K`x!PEm}m&L1GbB=vHTH=1sxx_zoF<|h1HLl9aU*3F0?wRWFZSyU#>9BsP)?R z%W@5}6{&%jI7L5|&%i)#uX_^fg%;nW5H!AW*tK1()oKGs-&{Iz5&>|qnF0(LhoMNK zKGnntlhrCchvuquDR+9DVucVv@ec5oRNzOd!px1-(ATBje|jW#_>&G#YRVa zm6~P@T={9Q*Nvz&+>PMj8$!(hR&-TX_zCb%5}NW^rI$uW4|GBsAvFZuB$M@%6e9!^hk>kh`E@nq zK;h`Q{zec0k5N#a?doi&l#UsSw_M;WT>mEFJuINBO-r(DfsMdt59)j|NvmkPS}E!X zgqkg_EeAHjIzcW5MGo66#i$7Dno+3mEgn$Wg}^H6vIwFNg$u{yJIc-8ZsRMa210;m zo&v+nxkd$N&;_OV4YIR)_I?gFX#E`{JA38)506$X`9k9E`QR1(+^j!geQYMTFhLZ5lYFOxD zQvZ-I75Ed^WHLVvrMf8R#=-_u7?WoKu_5|-JEMQ)v+lVd{}JFKi5Lg!59ov8oO5K0 z%{UGYTuFKRKzGDK9#_3T717!&XB^#sdRy*-Ngs92c%E6XocNIh)W^dT*3e`)yxD&1 z(_-2JZz~d!alqaETA_$ugFs_}#VTc8Y6E!o3nBzKzk{~W{<3(BNU4#}RkR3AodkBz z*=OMSmOmQ}B~QRCr?Q-3ehsiLDG-cwYp_MFx?dh$W`do-KTUK{BqTaQ{%1Xi6)le4 zA2xNLuwRQ~3x|$k#kGVVk~5dpiC@ZcKB~PZe;0V5%t&nj5mTV`#=OEYC^fPD5OD}sWDD#+|lt&J59H@a@3&QkoU z15LJ7Z8_0p0rztIiCyQ{7HwST1j|YUeNc7U=QJcRmkShD9Q&z*d~LP?zu*Z~pgFZ2 z5T;#Ln~u1Iz*3(aM-X6RWRe=Y_Ys3hSeQt#htL%1hUevUVMs%eUIkx^vlr1BPVPwy1GGZ1SFJKU<9t?cu}nh-n|*%B;8z2Z$_e& zJ1*&C^#d}L3DlLb*Cb|sd1K&fq5#nJnqGSyy9E=fc&A=wp$A4l#Rio+TNEWSfrJTq zzXC015*c|QsjMipX9$WCBai^Vwy==kiK5xaiVRA9F}r;gFvF#GIc)5nRLb;n?;e<& zMp!;<6SL>@ZDQ;moSv^C zFHg1`#oSTtBL;^plzkPLA~J++`ue;P{~l$$ILjp{*N#HKZ40g8IT!vKi-rtJ7R7Jo zQvjiZZsx(k!Tz_zPmQpj;1a zGIdAkaXhPEer0NKj@*ktUO;~V`|DRGz7uN~pxWbb5p%t%Ahb&vAb;|v1qOZvD7msE zOqQZo)0A4#Ih{t2$_gSCN8C%;1!7(E(Zu~bJxM^|_?lp$j*RZsZuZQMP;vr7AQ(zf4Z;`bQlko$l1jpxki=Vj954Qy&t!3|s*uSjP!VeIzYGUNKVd(y zdnEpG%MfF;@Q41tG7jTs<8Th}{pw?F&xYPIjYLfgBW8YQe^Rn4*|Ht`VH{pjvAqhKjJ;( z-|yQGpQR%T*^dVrP9E`x06Y9uKnRn$x@t_Yq@w!;}!1}pVAF~v<8F$AFts%dfFi)kyR!Z9MaCDARs6N+coYWL zCv2xUt`4+Z@NGD<_+S3>36pt^;vegSfUcY%hgDl6k-vr-EB;jnm62I8ho*^9TZ+%1 zvC624`yw*A(JV-lvV4cHHSj#LV*rr8X2%adv4ooHNVE#B-2KbG0Fk#Q5&uAIStY4T zMC)AMkg9@S*ci%fT%VV*jBm2&JhBBe>b?SYdo_6Zcv9T-km_9>zOIjZPBAwJ3g>{^ zG2*x3SdY>%rJR{;P_zIA6!J~5nJrZq22{W-8Swe&*2_=(7fZ=X7+8A3ldx$K+j35`9I|b@4i_uextim1s>Y#LdX-C{G7o=#O z>>X@8J?5J+FGG6XyhMpNs#C8JKe6p1FOQ}_^CTR?!*BD=NFldn0*q-Z-1#8~F(R?` zP4oaT;uh+H7>QmlSZ=<2#*zI0o*%bq;>0NYgxqq`XBK|#?l95GB>eN$AOCyl9>e0S0^zrjS?thghd>V78`0iiy21W=09Af)>iIu5c_ngpzQZackegus%z zGdSy(fLA#tKXNU80nNZo?gBX0ViZIayR=0n6aK2a6p2GTsvH7zpO$_oRWdC%ak0Pw zC+NxKgh)fhVoCSXUlQj? zL)LzZp^4vjo0tW`xw{cKaPOe< zZU*ZXay#f*0`<=MXmij#wlHRc6uRt3smund+}G%{eOlXi9R%-l;`Dh+5|RN1gi;Pv@|_KMA0aB?Kv0+ zfcd<&m)GY-?i0B%{%V=RdZ>L7K*Ck%9PFX!nBvq!?T#pYtYmN4<1|@t2I-DgoJ}aO z)Q-$OG*I&DAV#5(lYSE{>0K}dl6XTKHD$)w>z1r&K=MSFQG zGKqMjk>ZyNq`2Z`z&SJ0Quox2q_tAG1)M~JTBL^#WtL2-H8K_AxOTw%^HRAdK6={s zk*Tg7gN{geGoG@w+pMwt;H9v}>rJjc-risBSg_+C7u@GQO#IuT%V}Z95;tCXeNoHt zQbK>Pqx}t+G%ee3CNaUUSYGCle*ct~qc{o2W>&)73Ara#HMk6bW$shiy2 zuZ2{|#<-k+DLu^2Zu*N4f!{CAsqt-W?cDR?!6V6v?4_Pq{B!p`;Y0y1C}(Le^Vb;j zH$I<&jdoqA_$TiBnOr^g<|bMQTDNEfWzQBLriV`GUY zXc*)lXk;htDQK*dG(HcbS& zM7N4hdma_fB+$&ijqd-LL4yYAI_988HX1D}-d%ENaxBF7Ir10`g5XN_hdGd~2m@+m<4DFPPl=nB;Wc1~6~A3%HRHjm%J)cjgdb#&D= z@YLghF!dg{aMr2Gt`TwHeG`P(XcTAx*$xB4esb;s=-^E}^aIJodWq#+)W!}dtEgmh zjHxMm4g1X#8u1l4wGwVjc%5$?TP3kU$^-iMry+Zh=K?9f?Xw;J8Ya^$BU+O5-Utw8 zk>|r5!_DymQ_y)5NG%Pfz~vK96ZN4S?xwfF{SFNF05~Zf#-x!SG)0F7c?#xW#5Z8t z!YB7YRtO;>?Aygxbe1(9m?}~^o{3umg=QI*Ivc6)s;nkO zs|`eZyW?2!B?-sA5YV7wTf{G98QVWjM&jx=bew3+2Ue+WN&#%gsR%*W zlY{qq`5mv}xAJyCTG+}KMflrb;Q2JL$~+cWZCcdLrW%gO$E{`A0_pmE?A0uSqjYt` zJ_?m_g5Aim>+37NJwH6H=pJ5X1l1mfgCOd7MrdFrc`^P4f^62PUcSX4c46X|+7oGS zNoFn)&%O;TQXK%-JQLl8kQ-ZMQ^>qRw`5y^1`KT_DfsD=(+x@0gxX%p{|f6))k}CQ z5@^6EG10on$36;S!~tX^x6tN2d83zMeLD{CXFy4!%~Af20Mm;Cis(RBb0{PH-bP5G z3t@)zdv@R`E+cR$SK{OoR2YYjU%X}dXpu3+U!9NrpgMRvmlvG^00i=BARz)cX+|?y z-^~*L6;2)Lcpwu5G6Fp#YgbZ4oVMP?u1c=xCGgTiQ*OZpY`XMg-;2%3w*jT$+j#3l zZ`|+gdzQxVRhZCOmijn~jx>@(Bv0`KmE1T4Cn_1eq(U8NoE_9RGpjn_$T}s!nvzS! ze@r6V0B@z2k26wA@qgC#6jv|7PkE0QtxyZU`of*d*5)~NF)83%GSe=fwz6QT7kP@$ zQ|+(0gv38?Lqv=1!q40t+G{z|uJ>}JbnlhUCB0WV|9k14ui<}jtmP~e#z*SYthK?OMBC2fo9I3ULo)!m& zHEEsOb$folMY#hSigE{t%s;s~LdWQZUy9AnMP)AyW!vNBrsSlIS0ALNl&JM#RnraG z1HSK$#caAZB_(*5?wh?1=5L&y*WjDIv}W*|Q#ZLxn=fCNBz+Sk4|2S6ov)fBy=h0qy zp{roGVvU;7;B~2!wu(HZz-Yt|zZg`#OXT;Z-+aEIQGTMeu7_hkKU0mOsBm&v+Hc<} zKG6F=4|H%Z1HJh3Kr2>cJy*Bau+P{jl54nietvy9_wcx54=uV~@ zj|_MJ`zdqkL`Ty5$mb_nS&oG%J8XW=wK$`lBD2tK4Rb$y-duWf%HV9t`KEn_gP+s{ z?LHfpY!Fv2vi>A4Fwx*oPo06K>)@GHvC^ino&WNOkK73!e)zN($8Fcemv>e_zq0z< z{0l~+nSaJ^OoqJ`E$hh{{uS;9;yG)K(Xq{QlRnhFlv*qwX5s28tk+bn>+uRj8hu+=)6~L zZn-|?P~E=g?)>{Z4e~JB+H3&bM^x;Zf z_?hh{y)2sFPL4OmEtg(BtokLQd2rcak0Wu7$8^e0KTZF5dh+XhjUy@Vci1KUa@pj) zYE91M$qmC#WQ2_AGa=+`a-;V|#{ox{hu;ZET5{;f7gt=Km*LeeO_BO(QC{aGsk8RK z{&L=Q*8DLS6Biv?ICy81iOA%5R`q%3n?1~9P+oIsqss1f?^nBv-k67$)pZBuy(v3g z(;ZfnIzK15KGrqF(%Supf1Ne&o+dIGor6Em@5KwgUu|u&%j%KM{;4IajV~wHguU8# zW4>5at9@F)^5E1eX<7ToX5)!^(cHs(#Bcky$T$6ZNVs2-f0cOgH&I}V@DplhnIj}B zD=Xh3-i=DW@NJn$v5Kb&R_orN42fDT>kzEnLM)C#iWD9-gF_sV@xU+Sf-Xtw6!=u+ zjPV)*^cu_(SSg{1Dg#lF)c~fj0dJ$*Ww zbvz*1r#4YG1;RUnL$1x2P;NmSJ?1dw%<}^gtk1OfDEUKD2QzFpB?~Lw;rN{)6lG< zT<2||Nynf%d=!M?!hT(+fG!qXNeP6R2D%bz3??q-0HAc#Wpr5y|3k(Zi)Cz{jL8@O z_J_v(D^)G^=(qtN=kZEtnQ`PXm?NNx;Eu|1Cf#TKcdv^!9s#{5Izuc*#nw1yg+ngt zYhUyE{%!S^DqdiA>zw+)LBet>9gTwgZ`NC(kkjPV{n3lLV0>^A&_IG%zKYVJk|(4( z2^=FhNR02LYWq+b)%b}hbMpRJ^iK?z_68p`6GdZ5R^bxEMQ5LcM7#+%@2eM3F`>hV zptJ~>x#oups}?~_mkb}4C6R6u6~pnS3s4h1BQh*VFQl9eg^P;kb+bW1#T(Ks$!;QP zi6sVlNCJ<;LIu4-rfo=3_Mc`n4Ao=3edQ>U&^BWx_^hPp70(k*B5=>H@PMdaNk@6| z9U;_0jfIhLxahnShWQ2xJN-fEKXD5RW>Qdrc1QhoE_6#yzw=}Xda4pJA@QpVELoGKdvClakS&_rg{=GK>|Y3zXa5+xES=OmWq;12JN zKm@TRi#< z%$N}t6_STj=3nWoh+1s+Atyr1WPPEMQ}Aw!;lmw|AiVPU6w){$BPviLrEQ`Fz9PMeZ*eg3e=8k_ImGz)J!*y`K0;`_F7 zdCeWNIty0oc5E%LUY*ey5|PTIt#vLUpR)Elo4WRyKHBlA@x}N3HFR3O7i4`J9?oCUUL@nz|T@l?U%tEzPdU-k-UHzPyo5|-JT(cP({ zc&L3Tpna7k6tc=pDg!M3(00@!F}sEhhF zgC)FWvhIBV&Tn%YwWn?u8Y6eVlgE|r#vqhp$L7J0zm z?cAW96=-+5t_=e#7cy+m{&z9bJK7Dphd*|8+R^PL@_;kIbXiN$9~>AFLUp`8e%~-R+N%7q>qfl2O!s6aVyw&t0khm2!jhN8FmDti0tL-TUXV#ttP? z^o#<2$lJx^v4(GsXl(EOcm58(%qIs;omrP`4FqLdFBM z`dPsOmEOJu@ug@YB99aHqWC$YvFsKfbkEL_uu|;|%mjy~Q-lMKZpL67H|+mDnD54W{=`#e*$=03vFbdoLq1*&>YG7?(EA; zpnd|%dae|@W-HA9h2Pd5eD4mX#LekTJ166>fB3xn`3F^1ZL-jTcbBid4t5y@lm|}S z9$*w_z6k3*zYX%70?@lJ75ueZM)>c!j|ujB3F!49k@d*d))(Y0L#JU5@2!E<2cB#L zBe$n(bv1dBD}AmMx)D~h{8${eTjI65fs?i&eA42_@7gZN_AD7|mt*$#dq!qCw2vo* zKuRnVUC>#l&bK)M5B6xTB)anMJAJXt)A6=x9eKMSN`5_d}f%eSoQy1IlvOr;zYrF3&CDC%!*yYKEsMCS=MA9$f zX@M{>I=$w*H2^dN&A|>PVA%*5Pt}js9?&-UFIVq5e&wx4nH7YdA2ceKr~CG+-UUd8 zVs~0knE$W;T&6G#?pWUSqwA4}T1KMrlL_LV8M;Ecj>H%<<|3+hqSa{(}07N zv8mm?36l2=AX@9Nq~x;4 z&h#f0+8T)U70PPSVe5bn=ur^Ww@h0{3LYkiS?lqA!W)x_H%mY0X8lg)ul-IsPdRkd zoqVcJxgOu^_T?JBn7`F8{vL@+Ob{CA(~RRMSLe{xkkFx@hs~*g7%UW5W8BfLolvF- z)om_vvP}n0#9r)HdJ?6P9#@r+mPW_5KZW!FCW$IlL#&BrWy9TXu^B)xoV^?ppX`}t z0OyiU7Ov1D({2#0h-fEbe^;Op8L2f{AylzXZ%u1lgvb3^e@-s6%h_>j!gx(ABbAKd z4Q1?qzd5h!U0d%2``GoYNosdt3G3~Jnt3H=yxz6h_>EH#T0+-n>tw@b>S>Xin$fM% z=6Zc2{4fM2FCZkjLNDm9OP|~4n1LVY*q3MX=%b%dc>2#Eyg-TK?M%jjcsjJ4N#e$u z12WO)%$jUYXllHo7C3~WIeYm zR&!+P#$aE&o=6+F4s1hRcj!V10Z1-)`x=0%Jg_H`q|w*6eiKj=4&zM1uoK$xK3JJu97myzj8srF}nKy~5TQ|0})3K0` zMRJ~EE16J`rM>&@6kPo7D#n$vQ$$K>a$llX+n|J8gbwwjr#e!6gwZXuB({8G)J&}c zx?J1UtuL}OGy7HcBYT(UupRSC^>p8dy$)}K{p8NtW$+)=$wf{r$Nax9Q=G%7x@Io)-NA=1n_P z_p$bS#axkt47ovg8M!p{NoDvPp#e4sdea^9m?_D^nZZ;GBuqaIXigqun1H?XXML=u zUl&CFG8sqYGOtg|)&x5kWHsZv{_wdgi`O(N(WYc1C4XqJSi6$=Qi}P|Nfy(G+aYCh zpj+8}=NZzsqyJuNJi!M~XPSeraN6*`;5_P^)d7FFKK{s^;D79Tub&Na`fdS*VQINE zr8ywic_P44GI+|*t?GWE`Cr^b2pr>7Kj2DLKxo7~@ipstu$UVm-Xv>85gN4efJT#Q zZ3Lo{%lkHB>^?$xN=8^9o@y^o(I5m&?Fj<@;n}k_*S0ue;o1mYu@|@Bi9Exo|F@QU z;k>EqVuT#ErJ*dyz6Lm%V_Z0xRI>#f!{E*PyRXl>t-S93^VbHa8A1N%qkRA}=l@gp z{p!no;MG^UcE)y(054cKWj3T{1q25&8sQ1T()2~Aq&XuX%K?1DGTK^Vm0h)VCq35h|6=X1Xe?zXM5@1Di50jOvhhL7d_p!cmkBL%)`T@ zz3jq;3$$yrfLHOHN!?gbW>9^!IX&TPNBpLCgZNFcz`)OoiDFj;RF41U<6p)8%jaI4 z#7C(%oH(U*?tUVoh~8SCi;}-?s@|k#s+*Ck;}thC>bev!_wSEeM;_X^VM?YDh9rh~ zi$>-b?U19bv0j3oadWs(ck<5SsBux%hH9pb%U#hcl{y>I_{>l8-&YX+aCgY>h0$yB zvUr()(m-qrN9B{hpdIrxt+MHSSx4051`UPUm49dvhkwc*rQhH$vwNAn)Ev#0%f`b} z*G+S;Oh4$`;L!2@7qn_4xuP7>KOUXj;Ibyata9TLKG_nN>YvY>zVn^@+R3tcY)|l> zcFu1s&7mkmyH5|zP)%{}sx+^6RD168%I;qivWu0pTC7$dc=IGM!NlR?-N{m|mT?Zt zKMnRwUpG&BsO-xblc#Mj%oRx?Qa_Kcw$AY3BX1}6Nm^W%m~>U<-0dU#2Ol{-s?=As zd>rx5-`=I{o|5t-DcP5!upY$ADynXM_zuEKO-K23W_xvaRP42%iiJpV< z-vQ}49{-&oBIEJj0r~HM{5}Hz-Bf=ciQoSJ|JMOw@F8b{@>?b3KX;Q|PF^|`z*e$k zkLszw^+uwN+6DRbB!^LAbtaT3*vqbp*YPITmr34@TwT_ zqlb~nkwZDqMRy2zBtXJ3p4 zhbuMPUZ|wjjYkQ~i_#|S_o9c1!ZlsYn1$J*W+-Ss<@7A{@vFkBKX*T%c3c=)y{nya zk{#%}Qr0+JuW=t#JSvepFEddqO(&i@nc^>KJJehBEbH@H59guCu7_gT14`>cYDGoO zwY{ajDSD^N)N-Vci0V-9g0PV+#Y>>)cg0AnnRGh6xn;u->ztjlx@Q>2YpQDhFEvW@ z{o_y)Bq2_))oGBy!!!*+rKKq$)jP7KS5zTLGBkt@3T-4>mL1^UdH>z3N68h3IizUDPd%(`)E*Z3x?Zm z7Mx%+x9#3MK=G*^tKUGBxu9}}F0j_jsVJ4vSd10{kncIhF1W#}`6u3AruzuRdOnuc zXg+!UhWFF+qr8d5!pMQ>+(OfHB%Q#h^TNihg3_Vo<{#faZ-N5a9jL_<+EBX@3MQzP zkOYMm^vmoZPFqDsk{pss?Uwsi?~;82kp`CE$g`}?snJiogF0^ohCf~~G{wMub3l?0WHJwnaKWaX-7PzHYe2_v4u(QuKl_&S9F!%v3Fo(g{*(7wq`=AdU^$lECX|O( zssRp?{Y6MSDr~pmjOm-x-PPFL<(AW#3|1D+Ref^U^n?cJviqVyKwKyylGHwwF;}U% zf*LdARVj7)YcB(7U<(^rnTOZ()Ih8C%|UTNiXL{o62;VbH#j|@9dp`#qoLs*?V7G9 zuAS@hiy$p}7H331`yB>o9t@}5*O1BLFb;`Y@a^*D3>TPUrXI~Lfp3T)$JRmOWK%%9 z@9PIB6RFkr0@`@1`^SEuv2!^T6aDQpf#4Y#dQIy>PAN)bZ%VJt?u};nHiH zU*_Ms0Zg_0iDK!%bp+XO1S*KWVMlYzFpBv`kb^XF-njdm zx=Ane1?>{#?BYC0CG|J}H$IO^%bTLJ&BpSNi$J5>9=Bh75STaj(l;C#YGPy*V0QqT ztMMNnyj*7ZRZ@nj_}#~&_M$3*;usn2g#P4*%+66AkIC|=G7e)HU{{V_%*fCpQ>?a? z>Lr^*5x2OcF{wG;8}a%)_S6;LM)A7)QysWsN6{rl%eSj878P%_VA_K=QN`vrkxY75 zP}-F}x46!TV0&)fw5p;#D@~ZN6k|4|>gPb13G3qJ(mgFW6{tpoYJbU_17gqO*0OSe z>1h)HbUK>6T#up6pu{pX&A|avQN9+mR3WhAI=AIEbnFF|wkB$hw=6&t8m;NPmq;I_ zqUOhhDZvMM<+a(6<$}Oa#TOwNh;saz(PH~jJxPtu({|~OjT?IdzS%6rJ-|y+Q13eJ z+SMjh{ZXOruA64`Md_V-lfR+VuisUQ&7z8yhS;_=<>f)Jt=t{jZc6Pz` z=?YrW6gWt4d+FzzO9&3Oh564k(Am?^(Xh8n)k*|@L9wR&eqt;RusOB#T{w!_nh6j< zpv%Vn`s>&4{dRa$PsZoxwjBDHJqB0_(*ruyV2Pei@5;LcV- zO>&(M8zYmec1e*(ss^uFu;9j|hN9LE;>oV>Yo<0e9ep$DZ~)fWj`wehX4^>GExWjMRhFLJ^fj#BsC{oe_R#(<-yV*x;K}%SG6xLRS_Y&>pue zap8oY-xiI3q@IzReuoa#blqiVs;Kx7)4nvEyM3iX*Ei1gUU%-ZMuT4 zIio+i{YAFTI>QJI^Maa>S!obUoen|9ve)fkVj1K}jTvL?vSbzxH_nYh@+tEW!(lNDM$Te~ARd6HFqq=B zIfAaQ4phO}Q2lM=0t1lIjB95CIs^hF7Rwt=%kGykIPX?%DQg`ho2~ijjmO+__ok{l zcL?{OiuTtlRnN?EZJ$*MWsPG3NJ8+Z-CxnZG5=rbUs7M)uNknKsQMWH;4<`>k#yO_ zrHT*lw_lyKMfJXkWL{)!WTz?~eXhAtJ#pta)1)0`n_*)m*tkPDz4DZq=V8jPE;6EW z8Ehy%)iomgdpofe8;}6Xg~%BW>$#gDX36Hu!~xGWPw%ccn+$zDLKq3M6Da!ZB0+z1 zys@8}0X?>;fML+Uy$LK$vFLS$I@!E$SZ}g+qR4h z05YmOut-+5hsYKs+L+`Qb=ipzIRz?aJ%iu%a%#66gQE4~TZblWlbx};Bx!wpxPAht z%YwN!+((?~f)usv5hQuXa^(pcl&}I-TN}Gk=IE#yo)w)9j!iH+U8JMj7U|*&eQsF* zZjZWlx4L>U>}Np#W}dk$TR&(KR>kfMvZJ#k>}^5I3b6ZAvM z0NdzQcLFTO`)))XfUX%hP1ow8PCfh`olT)hDWrP9e_j1LRGc!Vi287_Kr)J;(#EleM z12%Sv$h=ij=RPh9{q!iR_>Cw(42xAWLT>t(h?BHd|FyNL!V!j&1cD}itUO9d)4S%S zYc<~89bW|qkrzc^(fX1mQkm!j`O2N;oq3H~Q!?AJ)m-(ND_{BNWbPVCoB;VhB*Pgc zO0VHE6bs=0I4NYRh!7w556k^{-D-9{KQ9tR${qIbIQ-q?Gq$np*c|Ln9;t$xk{=+^;On$%?4VEu*uy-soDuA zO^0E2yDHtgA30*`S|ULhan-Iw7)uN4JmAxgRgZxXaJ~_8TUL+i9n)b6^5+5itjUmN z0IRzaahB0M$TU#>mj=IyW?LUc0jHgepHSY8;)L?2kc5zHaI9AT2J81y0s`}Q~K-JOj~Fz%W2X(>cV zAWR-pC*E7*i;B`*;;F}2uzrBV)eDzZ#Wx^(?!p8T+&FGuh zGKW!5c<#sfq0aAg$u{SfS*RDzv$|`dlfU};s!rus3 zD;7~DYq(Ppxs=wp58g{vkmATym4$4zQ-%1RuU!UL*zhFFytArpFMO*21IbFZ@YPc} z%xVQaz9I*#?MG-mBwMxwZ=w(E#*O9AnvcTXi8bQPeTXP1pE5{OTpP#j6EFjdqk$iz z3n_IFW;ZB89|w823f!?mC$3mKcfQkm?0>5ARnCwXq9&K}#|{CO4^f6=&75~W3ftbc ziI4RoB(o~U*a2o@&!MPH=6pntf*1tKWkKHo9O5=D`)PHJ=IpbEnYu8m(@% zu_##|Yo<0hxD9I?cZ&~;C8}4}ezYPbPvLso%OJ^CinmgypRRv+kr{9mqBGoa+tZQT zgnm6F)lrt7TDY&AXEyf`0u&?#Ln$&2bbXX3&lZ1>K?b0K{W5MI2p&zS%{)$tLWx}P zv4FhW+`Xq+?@3E{h2g?2=r~A|U6MJUHMw~JQQE>?>GpztG8A3C7)bYAPGMAsCJ7iV zDC!wEYET{>a`Z;A55&LyQ7DFS8M4f}QUgST@N%~PLAefcyfdg>@v*@%Ly8W-nJ&nj zJORF##*o7jM51`I^LCq za|pdq(4@|E@LV7?X&W!E>+|8TDCHaV)-OkZXqsJu`YCg6s&>#D8QI-p6r9blc^w8y zPmPxK_3lc`2$fWDCfJWn4@6Z@ zk|*iePRjilL7r&6Fb|fwLymK)w#~3;s~%VJ5Pm)-^HV&xQkiX zPDan$yUU}GEiz3{7&54+l-U>b4eiFVzfCdn_3)c8oLQREzb0SVLs^drG#yy)EE=_8 zI8a>HyME4`^_x9QqM7wB+NTL-e)JtC*Nap2Is#~-ocO9}N&+G0c&jJVi71Ag)rL28 zmjRmXDKeipnO?S#Ig{P*k6SgbNe3?@HsQ2_E|0xB!@;eHlL55)Q*`-%o@m(2%%WoT2z=EN+!L88owpjTmHrDj_FDKPeR^PG%!!1|CwmgYU{ zrGoZw7+27q*$@*t78eVzm_eGY^vpwCk6D=TL(=QpyKsNoq#;fTAs0~F zyP^JiR&#-}2S)qXFNX+1|B>^4q!CtMaxl+Hqo$HP73Fxz5C-;RK9TpRxIaT{6n$vthuNl92U$3R2POC^Bo^ z#=*OG>d%)N5bJx$HXV|Lm#Y_FR<3`sg`WN6g|fN zB@(qUDEc39Pgq_fFQ1N%$CJA2^f;=b9nyyZy^SL`DEjgpi$hgF`PGBNUr&l@QPcIg z*a~R52ZS;o*IkD-tR_y~D}_cwhXNC8^X+2syZJn7X!jh8`x$oAXaQSApW8M@*XNdI z`yjkbc69}&MIALB7tPhsH7RiPQ}Fi5=_-<+eW@yzW@JRM`g z;u-~AdA$M_mk#ELcPHKa7cVd!OsVvP<^dEXqWZe7&m&bDlh(PI>(E4NmY7HZ?&5L~ z#^E7aXUF333{Ih~2z<%G($R_7mSVix6y+QiQa)(TmOqPunG-W(LrVMtQDq5SNPBrW zUUmG=3e75OWKSP78^N2zSX`aF0}0;w)_o_iNm-fBm^#(!ZGgz*Z*^Q4?7ihqB+LjOuTZD6Cr5pND1YBeI2#fJ~-b?Dd~QPvwTkI{deQwKIC zm@u(7)M~PW)>S#BVtfDdtNAFp?ls0(><-!?(s(Cl>8wbVbj@!p4&)PcqZguRD#pow91h*+oY; zV;jIgoXGuvu;oV0pDgYr(lF4zQe+;q=vMcU=Tj<9f_6Ft*mN3SZtrM7oLGrabRU54 zTcaBzg>?ZnFv$|@IK6|-TT?W|#OnEK9vLp+CP%MJ$OqG-dc3ZWyZc4EBy1R@j`DQI z2_VY^hg5Z1G?2JMVlGde4%0oH}7Tiqz3+6=GN8kn4-q+vP z0-4joXT5~$Vdrq%n6cCT)F!FyV*jU83}+lRX*P7(crEH(F2#LUFOf;u2z#?!SM%WB z6tUb=P_Q!tJF$|q2Sijc7ZqZh{%#t%?%eDB`{9s);z+eqwGnm_i1m%XbmFmL5^~+k zPkK50geAKGIo@F^G164!;=81*v&nnX0zb;LBIwDwQ%ZE;(;ur=AC0)#@Uk*^*Q}8xqV*;yhx{#3b_+Ty1-F;<<`XcOj z%p!zXil))%Hf-}t4Mna&T+gf=@EU$ye65!w#khYTTd99yhN8Ag593(f#(KzfgQn=-Qnj23;$}>cOWFC#+4`jJh zZ*{4^$3N7?f8j0)QFae;?^SRSTto`F;ntmx2NVUKmFNXVZRw1kUV#Ic<;W&z6X@JZ zIr{vf2nd|t;`UMxd-*ja&sES{y^t_^N49=);E^ivJ8*l64$9yFKw3zaGVHS};K=if zu}O#v6Dtn1ze4+hsY)?UMx5{)F6s1gTX!)_)n3p8346yi^yYFFSS>qIUMzC&iJ2Gf{_-SzTO~4PPiz@WeH4(E zxu16mRnR31mB-{OR37|&#g8P3LFbr1fFpUyWu;HV>(1ZPS9F?G_DmBWny{hOTbU{# z=SwKW*C0;Eadl0U@x)(z`Zt7~LrLaBC?zF&8TbV-?9T88nC6wiPXt%wq#+?)> zBRN7}LhD^~bDW=e$}eHL0&ksmJRWt(Agn9N_7?g4`ZGf5C5W%mGe4ltcKxug$Pe(Z zQb#a3^RQNy7T8CB03UekxL!`ZU&irC5R^vSx}dC~7c7p!ka+qn7KS_C@#ADoR3Ij< z>}9a6C*!VfLF_g8xA1f?d@JL=?&XDA+VlCqfj1+Q*2)E;MH8~4t>b!$g&#h}H+zFo zI6Yr^gOv+1exu$*DV0??BZDeB%-@4;^m@0C!tuy$6h@)W$(ugnF|x!M9`-&bPmUV3 z!G5)Z?$zQ?z&2D;coMZ0b45w<@q9QMw{nU}HH^~Zmli0?pS?J(f%erb@S-YoRkD|} zX8jO6>_Zf9rIw(zwta%_w!ln83zZP>Jc=&kR<&`xeA8F>oo^8ku(&pKiS(7Vx}~gw z3BF~hg`kn?UmYR-d)h8|m8%h~*uw5GlLpaPF7jRLh9v>jr%I|T(@vJ2!PS-qjmv1| zK=G?wUM`%D0;LeK$H-BpyA}ZFcBu>N0%L)XcA}XRgRF-^CiEz+#u@n(=*{&2 zTYBQXy|7T6b8o)*wY}?x-?I5DsoVm=l~$V!7km_qq9;$6KZ~4)+x_Jiy&9VB376Y3 z{u`XY$;hj!phdqadUTA_YSU_CgG&c`QFZxN1CzoUsk|^8SaGu30%Hs%)^#=qZnf1L zC-swkFE@A}FZnznhp2LLeMe8(yxd2@Owq*PWWt<1lD(V()&ub?VT7}+)arasK+@l~ zT8m%#mn+!)%rqgP^fAxtz;J|@mqy$?w=Jbu^PH{tPkZm@yhqu9^)WziBQ8qBByLWg zV%geb4JhW9~mMG_6CBEWP*HKgsxhiUB3?I1gYQOC&t;*w$-r_Oc@C zP;EI>>Qon92f`zKyI{wyb_GYmq6PnS?FH-Ty4I)5RIj%s1Bs%9b^w?Y)2#aVfsn~F-YLj8zm1x98rA~&7n66aHOj~tXRGynjz?GvmFB3B$n-OZv@3#k#!u7kdL{u&!P zHJ+%hPdgAQ4^@?;5y3Y`Mxf3ulr=|?M6aV`jbo)Ld*^}K6!t-td3$fJh+8o3LU~F! zUM@;EK>xyE@VX(wtodO-ZD3Hj^gub<`p(5TV#CpJueb#y^pp0>^fDj@GZ|&}Du~n{ zrg(Vb)6GX0Fa5jNLu)VO9?i~;g06lQ%ZFB#%Gb7nw@Yh6l-#(JXy_m0XpBFRJGdApc3&^NmwgE;VAQMqE@;9cg@uQhuGaqs0_qHJPUi^=mZ4Ba6NwT zk1>Jzs1iA&Z0`kE*o>SC8i5j#!F_~sG?*Y`x@EFnpx!|R1}Lx}$n(A{er;uM9&%`S zz{_K!RT>)8MoSkOsAIFMTQm3xy0Ekxe}n47m4cNTiy+ppHYhh#ON-5Js*!X66NR*N z?IWjh^5~g}R6=uOLu`lmS3TMwRNGLNIB%PV8LJokW8{0Hx&bL)TXz~!MJ6(dv0hqT zdxOzDyMk6%jNxEF;PHy@uw8M|)J|7$VF9oyrn!qI=TI;xeAe4{trC)6%*Z_qU$H^j zxS)_?v@E^}dphggU-lj7{>7Jlb=djEief{^ddX3UF+~>cLmNQIws1J5Y^-qro0gv7 z^q;SULBzsL7xMp6E1b=f%qc|t*wVf!1O>p)&_^ix1E8()1Gd?jpo6h?>cKkbZXxzK zkT5?st;N_szcbfQ(O`3ypQ2<9i|Pa!f0%ygQUleYs2L8XzmIJ$X3V8txPoDa`BjK- zEzs4Gv5L+`oR6=%j~cBseO?GB=Y9aRLXJa(I^{>Ao2$IEG%@?*%3B5G74b5_BU=T# z!a|*e_oogeHc!OVNAXIxM`6vfN&(rAuLScxA;}*@u{pvH5cdXE|Cs#*8Z~h;XBO}@ zIu?gG5KhB8{Pr6s?T{gOlz?@~R#v8a%U%Mtg?R#fI4|IH5U*~qNyBDCm)L3NHZKf> zzeT1I{0=vxFJu*;3M}WEx(+lUxBp3Z)pW2L+VW3~mlr5wXU)(-J}`3Fm2pZagjZcg z2?do$pr(zw*UpsJ>NQ zW<=|>4z9n};15XFV1zt+^Z%H`!U2`t7h|ej(ww9_3}Fo?KQYiFee<5eV;15ahG;LJ zs_!KPRQds{wgA26WE!#{_%yWA^$`q|=p)PR>S(PcI3K9tnSgr|UQWXxWaT-k6-Zj7 zdh=q~j@`JQOZC)EhuLkw7q3t7SJ0*KnPOD%Fm?j@gh&6rZ6FFjW|>7e6`q`T>}2Z+ zdBw42tfl{ly*Cf*F@3{_&4L;0G!jW7JH-&KB45#jBw5m;AynFw3hl#;A{C{K(xOOO zweOmtO=&~>MoDR%O4{o^uczYkQFHv>_c-47ztZr$Rm(A0j0-21+8X7 zz4b!5fDPuLhPcPCa(s3mbsFJb2_O?b>I7kdE*eKemLb;a0pp{Vm=rS>Y+42WATPa( zNdq0(6x1?iajveX#3Y-Hpf%V<=|UucMW|UEjO4Yowdrl*u19-fW2#puOwho@$8<8) zfm#rXM&~kR2^1j%^AMRZ{&Hx(oX^Qo?7U2t#5Zft*C!x?NGBURx=e>o0$r$UFj#g+ z!_3K^z6bc2&fU3a6)L;ZGwN9V_#k8dd8YLAK-;}N`n{K3z#99Y7IV3F-?!ABM+T9= znY*sAEJ8t*%iF3?`*I#1+LutC%3V+VD_w{S{;TGGTVu_h>LQDfNB+KX{#M~bMAD+r zHNE$)*tr@?4YnZm>RMAvt*97J$o}f#t+ifZmMuE%0G1vfyEq=7P})XAG+6lXhv2Q| zJT7RIE`;JEjXiOggAJcxo9g)>Z@s`8?*hZlgZ`gdj6?BtHkw&xshxlqSg927q69jZ zyR8CLfSPn3gX@>R-8MnWBPZKG=bcR(O4`H_$oph>aTuD2++i5tMB1NkR~j z8jCG~`cr72=eI%X1ewdGKd>GcA!|4#@of&18WLgxxLtGYY2e1voo-4 zQVFkfQxR59XZR|F6QQcig>f>7>K9&t6sQ3I*3D2dAi}4D!cBNXN%qc!Hm zzvgFd`)eBy00jMFW;vV$r=XpCz-WrZDdfWlb$~tMSmN6fFQmOpCoB!&%Y0BMHhU?e{Vg#@C}DtqnWnCt z44%<4St=3nG+RW3L3L2-I4SD4{~CwicE~y}E9X!~n3#VZ+7nCc4BK9W)_>crHD z;KrU9KD9|GKp0}0%YdTAW066a7%khJTwFz>$XA$PIpS3I0*eViToW`PbALpQv``>D zbGHuMo33JYJ`@4f+RUk@lLlHu=k-ot@B-;hd?{7kY}1MP$76jm4l@Jos22Nit|*_d zt`Zfg3R?S#)`FYM5={ZrkEiKd&^msF_Q7U^&%gP7>KRW&dkG3{YoQjT)rh;EmShTg z9L!dZKd}yS_MCiD7a0~|@>2xHGnfl%Z?U*HJVE^?X!Y}VBj&siF>sLgfvzQb_RW;| zSPUR37Or<9Uo^Nek)j6eTJm3j*T-Yf+{-<^PT1Du%&mEX> z1)5Z0I^);%G{e@iyVWqa)Fi)Mbq-i6nWlf-ebe?~4|vi6CSJE=-ws8Fv@|1IoM}3N z#z_6ZPsQImfk8XyxF!8AI;yJ((MwIA)cqy|@@1MR0^gB8?1&JAR zTjxK#JaxHpJUjdE+tY{cVcnZ{jWzPCm{Fg0(4uo1l^(nvYtX1bUb}w z8oYvJrbOYXA?hzyTN!u%QpeW;oMSXcE z5r6Bxo&f7tqPIE<%eXWH{@vGJus-?Y0n!YT=h}si=PRiNVL6A09ekM5$X`eF-@m`B zg?N`%wqHSYX}cVaOVW?d&A86}Dxez+5qcczgqWM69JecZd@0r@dbOR-i9-8=HTE@> zPl8NjO45TNm^5V|Tybytt0@G5|02*c7z!g2O9yNZf_RXe&k74b!8qet4Ozqn(7ksh zwvl=O!U)Z~Sdk=6!6Vs~I6gD8amPu&t?o$|VZ!*p6H%M(Um_wUJb?}kC-X1$6u1qk8tvq+H;m#Qo(ZVXPWawo6?#4;L!T2KmkQ_ zpf$7_IYLFzQ&#+hsS*(XmbMmM$z2Y(elRZ5shb?s-Rg2ejR(c_L{H z4G|vf1nJFVi}g7y??F9P-B0Ebc z0^BSe!-!0`{|+3LtM{xN)rNM`1a3vq(yQ6_eY}) z5bzh0n;S|YTWYXWk*b0+`NvRGczI4kEleHz5u$IzbO% zokJj4Dc7d}xbemQZ?d(GZVQ`7SZ@DPehM>E`YYSWK9a&S!)m1OwRjYc2)l%7B&anq zDG>>=DJT4zhP9U3Q!_{M(LD=5UPSOtWHA zFOY~0dJBVm^YL3Mvo-v?4RL}aVQ%EdNXU{dt`}vZLvNK&;Zbp`)amAc_|V_f8FY|zTcENKj+ck(d8Ie-~p2x0jDS+3Rp4Y&0Q^qv~!u4c2 zU(%iWn;!A{Y2z`(u9!h9B|0x`W287>TiMw~U#s9h5nU70*v6x%cm5Nqo8&@IMIVSx zPF=3ZyFIK=QWeTpe?k1mM=}O-ulT2)HcmiQ^PbtxCd^6t&UjL(lp8jVbmWMriivyz zFip5O@uu5fE6S%)c?sN46}@VAFc)X8zZUc%*gdxb&!0GXq{O6^Yk~0*#a^mjknj<4 zPJu2KBSV1cOeDdChtQf(5{Sig7t=*N&zle1NI0^jOdz~ih^SIT?-IrpKVE+~27s2*ubu=^%6bH+gmh4D3rc1M?ZhFU@@o7ayN z86=+y7_?pxLkr#B@o@} zO7JL-sHj81OBgSa8{m#et|qjuU`pVgoWN$5fwY{~P(rz>ccm`Y6I!88e;^C-um1R) zQv4~F1LLbkjUj=Ny0onN>R0ejDc*OebWiXHk?aJC$^Sc?jxj3AjZ@k?cg8FYz8&R@ zc#Q2LgcksB8TlVUHRs#&I@^Y=6@;yK{O3vMLENMCa z%T*N?i6s`P526_x#m^T&bf}S=&ygw};8yt58x=kE;^gq&*f6`I@@6je>)TfD9`662 z;KOAFz5lAqlzYy)_UCMM&g>O~kwp>`i+&LoZ{ED-+6q;kJ;!CMP}7ENW)20=^_H zGzn*?A=s>dD?0N@_pdt*3BsJ6`H7bPSN5t=N~3tSkJwj6srkO61TmZU{8O{v3V(<< z57A`XQuc5A{MpRxym+IyBFkBJxl;$DiaJN`VD#?*WM|B``7?|c^wr))&o}Xoz*U3q zT9otHs0BV&ql_Rap?ous%5(4$eRp}DZ^MUe#b)c+4v9Ez^j3ezv%h(+?rdkDRHed4W#b--qJ2!Lg%)AFkSqM>dT zm3$fTQu*AM!{IxYpdtyH-_Ct{&~UB}nI^?9uCd_C4ipT~MHyWhW5Hq$8&=Wt*B z^;mx%l`CJ5H7GO8_4(ZSdaR8Q2XYV}9Dy2oT+^T5zRQj_in}D27}m*7J`NzpKn*DI z*NC4(txAY_7E{Z7>hU-4RK-+pA1mm!n_k?j%KYEhU#~rZUy_^|?2Pjun=-WX3{q#_ zzmFuE`8BQYTh8;*?C^H$?Q5`?Gi6`{7-c~QXEz@fT>|P}z1C&S z1NXV8+?QS{c4U^w`R3IM3K17P^VoJosY(pnb|hcU%MOKs@hta>xW#lm;lo4y8FYat z>F1h~b@!x6XzSF{uj46_Z4`HEdePToQU7JWBPU89Va0BZBM8Zu& zMm?5144L&tY4%sWBb3o+QcNr7e{oqy>dbICI+Vgt9QO${N`}?4ZMaw7?Gd2$ShB`HNIQxS^n`TJdoBP96B^_6{)NYhg-7EtygY<)sGk8Ym40t+BGVE4;>XwUqfdN`5L4MXth_tb^Mn zJb0NIw#N&;ep!f`7@Z?ro+=Ns>I*h%!GAfCa^3Ln zZ>FX~37<{DRYdOv=9&c$hq^wii!P1Y$8J7F8p>=#5+5S-V|uew=6)B@G}#@{Kr0cY zXJanX1-yAZAkuMDYx9}_KgVkXf@NdjT^MoVX@G`GE>k;L&bGR=>T#`K)Hz68REM=HfT_F-e1t~S* z$OM_4I_n*wvdD;pD2kdREJQ6rEG?aG-Ug!VLGja5*E;?dC?N+(+WO_@bkNh(P8aP!d&g{mICBcyX(0k@xiHL0tMzK)uqav`tPG-jLU}L-&2MWn`8bxqcOuxjlsBTg!?VZp1tC z3kX=X2t+J$)TpJ$X{QvlEVR$NP}8-EIz$18WvV-*6uY@pCY}7iP)C*<7+F{2cYeJ$ z-lFSPr$mxUeT;CE@>4>42x6vmcD1UBYm*LF`k_!ukgyo;8_fZ-sYwDlBRBXfGfl;| ze|_u+iyg&F0Rh)iE=|5ix!Z;Z8j$=P`bJgm%$jRK0`_ZB2ZBaU;0dss^Z*6F8VH*8 zo~~qOq*j$t$ij82b=6|0GEbE`3;y!i$C;|2NiHx1NfK~P9rL865B;dR!gZ&ij$7KH zP&xNatEn+SI@|%lEUYSdlgE5K|1PZzK`{k&kJSrX-i{rT`5NGYq}r#~18+GMi-sUh zqc(y3Co0pm2TvjRIHZpk91OyE;mcE7&0px39< zC~%>^eOu-H$)2%~%D-dwfU)!KUrA-_PH0Uz0c>8SiofHwp`Hs+n)^jXMFr-En0=#a zqT`lwNL?ioQntH|O=QG3gGX4vf1hXB_wo#-E8oI+Yr{hH_S@^663=5xZ&aMr-oq5R0 z3%2~}kMY4GcN;=MqFYO`5}hD4cljQ- zRP2R(#GZrIF7x*6q6~|2o3G5$_c8ITbs4?d3IRmdFJgHd(d!8P@UeHHcvejE89FQ3kDY0nxVrr zigC}XUQ>8*elfrUBCU~|4G{uOs>m{k8YYlN_Zk^Gk@lL=dMQ-6Q=NhqM?zr>;C6In z+a@}fl+f*Wbr}Qw!|Jzkyl=Lw>N*_PYZ~5bwpD`>~0Wq7*(h2+Y@`{@4eF;o?M25EVGf1|Xq!J?h`6cGbi-HtFQ9 zDHfsX1m+&;&OhFc+e8U{4xG9)`69OoL)L(>WetBI#Pwu&SwP7*>MSCY;Hhm@&uCuRyf$D?K+l zXdD9KTEwGvcEheog$EN3Kk`2iM`S=E$B=K1F1QRl;%3s)3ym_DkV6ZZb{z5_(Qu*` z=yHvU~`p8m_Mf(|Oa_Mn~wKXVPJ3-D*3u4};CZK%ux)GWQQe5@lADaB* z-;F7uzh`%&BxrAnj-h z%>L^Gnp1;Hl`SR}^=7(idZ`lbPAXxhig^n{3-wL`=|Lzq?h$B#|#5d%5GjVcYw ziW4CGcMVwqP1Ydc9u=O+0Skq#%1Nu)mSW~^GSRzSFc;OQplff)x|z;JQCoRU0?OF~a7nLFGXkM!D@kPD2#*9>jHJKL0htmsl$^@*(t0JJX+>Ev|`7&?mgJ7xw~TgZwvDyJel;m8hLPD&p)%R zzDhHh5B#>299N_U$;Wsddb+|1No8@CD?&v^r@(ldTh#r?bsp&e<6mZNH3*PZF1j~mrhZj zJ!vna-Ja=T9GUXi65EB&?QJReWfCnlfKo<=nTKfka6&mf5C zAucn}(97GKk64G)lu==S{^Q{&RG~9Uz^kmiE|hBrmeLZGk{)lpIeu0JkY?>Py1(7@ z|DfE|QWkAb9)Zt0@1A}7BRRRIg923KUWxgZ6R|iHc2YsE|8#-qTo!APQszaKwXYJ0~9830HEZr?gqJkQQC!Ic+`Xd{uVs2 zKGf~lO9qIv6o8r?aX1tU{YomLPc%X%#tTSn__{okA>C+44 zP*`oG6TJa#Xn~_Woxc90U~xKAJ4Y>hfhM&~LxviCbpT|LFs?@cW94Uxm&udz; zgy-hrNUUyD9pXRHWx)}o`Ou1T&FhgGj$*JW8-GRl;0Dy$ZQe7YZS;k~ug=_vQ#;^J zLsCi-*a;)O5m=UGNqmMoLYhUQ*?DU%=M3uJIk3TN@MK2vN#zlEw?7oQ|GK0 z|Ga!2(epyVGRV*psx!TsWp_?xIj@$uL+oMdMLELfKE7c5Ah_>llofY&7vW{APLyiNWp8^SLmHlRgX9+JMwT) zf8A{8T+t1x(aF0e4f#u;Xi)3A2sa8cRiZxNF5wG9g{hKD8V3TU=3~78_e*3V7cXMV zIg%NjLlf0RN6vw*UKr;P0jcSFBzPJJllcHB5fQ5$0ZnMPCv7VfzEkLcZOtOQY61rl z1`n48sgJBSXu=XRYIQ+b(Tf9jAXW}7wR3rOW~~-IJZWhnx{z;vD$4N-x>|`%!?cn^ z5aQ=G0TnKRCN6+5r4}efIXI{-pbX?l!u%orWMhPtFi_PM6LYOZ(_xkEs@M%`kaIXf z(4+5J4?6uCit4Bo3sORH?-21Z$%ldHv7RYR*2DHd{_a}~Jgd@ePkdc;l_dm2# z7jPp}dnc{QU+Zefl9SO>pNzKP;6O-d$>mb#U$%P~)-jPiKsVHD1+eU+Q#c~&&{YBgx+Ry{E{XZa{`Is%VUL6rin~Ad zweu&F7q?;weIW{)s!I2DnY&H^UgOq;iD{5wGAs$0>J9V zH3{^FosuPH<^=_Irw^CkanF@oozFGxR@VOVr{Ab>@NwVT{(gH_u#e3TJ7+DPtib)f z!i~!^j;ng?vcm0bWma(S+bQ6(_}jVM%BQvNxyIx#;e9DA_p*6-`p{Kbp-tManw#Hs z#)qoLw?!8v7oD#=)6J6^YxrVAM(SY|y~5DFO^SyO&82wCwzX947FGu&3f?By9fZjR zZvFZ^@hSQHytYfZZMkeCaB<2vM>}S)MgJW>fq`rAj9g8wFWSCcbsy>SDd{EHif8WQ zLe~g2LN);W5LsDb^AS7%3Nhps@HSnmA%Y1=Sk15ZR&ICq*E^O7jz&7Mtie%e*{X>a z9_@+MgXVZbK(lOl=TBq);1((VJH!aeAH4F`4EjqGR6OAOVt+Y z(a|(tN@Dc7Bi?a;fAYD|53CP6^D;OP!An2R$1HtE+)3<#aYL4BKdMNll0`Tkt05Nu z&8v0F4^pRO$`CLzeZC1M*dLF3IzLFHw!65lu22%R(kj7oaMlq`a$sRBiqQ(9O)&H# zJYH+TziQ2oNWoex@4b~bhEI(&G({XXi(Ux*km;=X(~aeMix=WtQk08@ey)fu{AcYO)l8AiT+V_uK^rt*2cysUM^Lx`b7WtOwe6)t#L=+%XGm zOyUg7c|DLgqw}^Lg(70N@K{!%R?=GnR7&}77v;}8Ul!5DOF(5iK6g! zc%in^&mvF&i-PW0gM_H+q!@XT(LYbXW-=$YxDJMtiloDIscXo^#E7oi40!~F^|K^8 z$)P!@lf;X*o+EIYF|T$@S@2TbTS@h-)nuh33Ps;&d4sIEVF+CsS3>@KNPXJ8g1BX^ z;9oi*_*g+c#{^hWh{0=Qig`5v^0__bFA9ea6*jyec|c=|6BSB3T+tCcUa|mcgA>o4 zmxqL6R~*zpQ+Zy~_gIN}iTz6+-#LUvck-HYF;5`hWZC3D!yCNz_?|9Jns4!MtY3hx zw7?xRlY{Fm9@fpC%bD&oxX$h+N8--js3wh^yYrMKmQ1mD|FJ>HY*^{jZSRamkwpfZ zVAA#EvY|0Fkd|I=u-9$fo*&yQ#3GBBAc=2%D9*Ry16G`He{H9WecC z07L4T)sgWox!3 zwOkKy7m2J(o4j+w$(k+&@tW!rZ$0aG)~U$HM4bEil&!_ezUx)n+_s$E8Enx$JLLWR z$HjLFte^fE{xO_Kqt_)x%S5l*T4GIKi`Cw*?i#%kW;q~Vq{rFwT;A${`isL4LzjiN z0v8Ud#XfPJG^Fr`;~~hDtVKyKO>y?9Fm)+E!<58wuLR33A~{jNSK@|2ix**)VwZmE zY>p?k2+a&qN6K1HsjLKDO(qkRT9oUxQqF$x;J#w8$i*Saec~>hung?0BuD7@ z4VTynF(-F#R~0FO`-3R?t5eToJ3B&h2#le~kkkS-Y4#d;k;&!U&aR;_=m@M+B*^26 zI?IvHbt|UB+R53OnrOq)gTi`A%>ZETlDCr$N#z^VNg=AO*T~jjyy2_yeYiMuDmNjM zSmT@KM-`wY_Ix6XlQn8iF7{YSYLhifA}l6Gm`V>^_2}-iXvdWqA0ChJ$!ylGu&q0r z98(%n$Ogf81T1cvB$e$5M>4K@>hgs!n%A_5G;*-j*EJkc^zJ+Oc?;Vx`Oo;ZwreYo zhpj5NHVW!zowe^k-D-I12)Ggi?lY$Pc$kJjK1@rFk|sK+lYrmP2NJyeDRJ?66!rPc zcO1;MyjLj6dZ>7-K0sz{>OcKf{Zqe>onC0z z;70wUIr1{8sbI^)QV`n^`kW(LN2QYItF>VJJP~A$WPPo2*BXmj4TO7FVVn_V-bQnT zlnEG9!B7f_sar@()yNStoi?94rgbVN$n9BSR01m%P3jm#a0>~mIsq_8Fn1eX8m7|& zXvG!pCEK9XW9qM{t{IqAg2c~wYIHss8HV$|Y_Gph}etO^pcbD$YKeX$XOj44TSB>QJGC33@2@M3Lhcih0TNchCZ* zVAPu~z$Bm+Mn!hmqeHtZi1ETamb3cI^(@Z&>Rp2TMDl--lw$u%eD=u$2ySB0deykI zetFbfj$zf%b9ountM0t}71Mc+C`xvA$CAy+XVn*BICX#Y_isu0#e}Y1B&9dUA$Nql z_3kDcg}^2lqRfS2uod)8NDyC_GSCqa5~9}Dhv$=u30d8Gjwl*Tb3z}V-*%iB*?8jp z*@kfOo-*&nnnV?fLs^0t+`LcDLuWw_{&v&{0>U5(3v1?feS+Ajc{iecgTH(H;ozY| zY1D!Q?zbe?qWv5HMQwZTYiFmNeO2%TbNYPQQOPt~9+(;!u}o0nf4t+1t|Q!tH$VXy+p}%Bepnj7(8ebt>&1=eZ@k4MDIz-p zCZI+8JO#zABD=4&R-9o4$!wCy+YzOYtHT3%!`HFWN}_c~c>rhVTFyulSLXyTqO`Tg z=UJ}9UXgc-cO+uZytBm(+7=6mtcQc5SFBF$4jNf$nDL)9iNe@@R8=6SNop|DXkX$E zVnN{jkhPqzN3JhlID4q06N{4Nh5n-on=X_eW+Q%L&5T=UM*hO|EKo5Al;Mz760zow zk?n7B%Hfpu3qhS~;S1M=C>Ne#ODJ!@zT7 zl0FukQcEC8cqK;}@V)Ho&te|RH(}3LyuNCafUSNlr#hOYER%J#V&aWJEjU z*pS>HUVD$Y~9HZkx_I_we-j;E~iUt2C30gygKzP|(j@0zEN5^aAi3VL(kXo{HS;W};EtZ2!B9qJwUSbdRjk zqsO=ipW&5=#N2tSA!QR683hn3_oYqmjuNk4p8fXlx?^qZ0R+;TePk0?+D^~XXta1| z_A`6n=1ETl<})d!?I<7Y8@0TXlE25c()x4)O1w(dWq0AhKRxh5oV+peBHOg?W1k$t zG&v9kl{EL%q*?tq`df|<*p9atjxt6>SIBWpfpMYds2iV6D?3YCDuxb5Wjly#y4qZp zT?NPj$t59cGh9aBWVLr83qyUn{}yIb122}xN2MX=si6#Glit7{QGloph^M!B>>n9b zV;;mzO^HWmY;Eoku4LsHGat?*Z75#d6nVysk?d9gzXgS zx4!ac54xyMSPHC>ERb*dJ6JENC+$2uS(c7H@^_VgC+B5huL!eJ7)m)3{lSWL!d!&& zp(atYON_Dwo2o~wXRr^?qoCCEpOIh93^^1gm#rpNVXL^$$^BGkxiPChoXr-ju0oWj z(zT;qVN(WJ)5yd{n}Q<=I?4TocjP!1qXr(S?`+L_>y}jP&(^T(odfP;0d@uoK9I~G z*ao*5XyG<7yDpI8jeAF45~-n8qi^EKAsg0QizW_ zd$VJmgL_B3!q%1yTDjfwcti!VW+iXITO0oFo{rz+VJ?LdYx~p1Tpt(BzdSo}i*wCZ z8sVJ>Uaq`m>!`n(EvECr%np}FM+8G`l}sPLu%wRNmJGB-JP+!P2Mw1aF`XGT6P0`5 z;MY_#a-+se03VeM{KXfO;NP7ozpG7X=%HM{>x@b<+L?Ids|8{PUJB}|@`U%c4;P*k z2vOsa-G1Dod*sujg9V3xx=a}OyllPzfZng$WVs;+2OJ*GBq6Hf;Gj&9l%l{JsN4*; z{mEBThK3_adtdsle&isJk2_y_s$`tETp?NOaFIe;$c4;L8{SPXKF!92^4Wp=y$O(W z9eS@AJ_+Sq$KO~CPSdtyii&e+(}GMrHbGuS_EnT(?#LVVTkXO1?sE&LAIlaWnfs%c zj?&ic1zi7x7|89JtfyD1p|R@9$Xd*LYc`(%-|fh84U0b5Q*<`zZ9Qk)$Wd8;Nvas@ zo2~Mi%jE~X%YT~I!1}Y1?|lX_LN_`#kN-)77k&ao!BLsApp z$W$#JV)(HHo$YsY|Az&i?SDr8@n-2~5|+*0`_Vd)j*k^OyjaN34eprwV;}su7f)Lt zq24***rp(>;9}v#ouxxAenzd+!$02SlZiU;^l(ydz4YW_adH6_7T#VM6 z$EzehcKjN2Bn`XCwX(u&WmT1Y^WSK(`t@4Bbno!7xtp`!i%6P}Jmr{n|FKizs~#*i zM*o?)x6AjaLh_qvJG)g(emMM8Y4DT!i2^m3rDt@M2UMT`8@{3Eje$&;twFkNtoK~) z!a%NKu9;QBS+j;r?+4vZpNhq$%++26=RY4g-{|A!o1AW`w!@ZZUJG0_c($Hpj2?e8 z?aQ6-Pk44Lyf<7VwK3LPR}mYl`oK=WmWgsC4e1E_wfJDvqlNm}idJ)F!>roh-g>2i z-qqLkl$&n&aw9gTUxa!m*B~!lZh7M0f2#G%>%RQXq_Rh=n#TSql~<_aQR%BYJpJ6E zS+Qx)w>kMnaOfp29{V{MgA#L3jxKwXCN;!oee%AGZMP<88`mEHW0f(}S!xSWihzJe zKWXC?glC2gqyOJn&0WfgF?#b@%8^nM@K_ zho229qZHXwuHo+Abq*Z9fpv6_mFbSGTC(gT9~mIW!gtDn%Tq%T zy>743gMHLx#ZPR!LIHBYzY(I~tVlb{QJ{OVJpItP&BptfRD` zxZr^v384p@u9&YsmLv+}x`KOS`o#QXPJ^xV4(wQQjPJoxQ=M0Xqqk3A%7_*SXlE_4 zp3v@C|KOw}nw1CQV4J*UmgL9$p2j073&;eR^xY0cdcg!df+g%;*$D(f#rE##M3?H` z)N^AGaFybI4Rfk0u*DQMpVk29Yf;2r0p`3FnBtpO+v@Z@Ax0@knHdm_^AY^zC)o-I z9dgtZ7kEX6K$4rY7Zeu>EFS#k#9%}?OHmLEnUq>CB&nt&gT6_#HIep#mL_s+U6|P-q;EDL&Sa}Q%@6dS8XMF>2 zWLB)#^_qKrU)>Y(wWVP18KPjeY0YJwo&@j_s7V)5M8ze`K^JdQ7kPn$&L#guu<%1) z35^a{e%FISSHWD9Le_lFM(z>jbyOv?W2OVCoqyd~Dqta9Ieh?5nbfXq)vBHbCq=bT zaqv{L_{W%aVr&3M!y^g{z@93;ZxIr@s+Ei*$ZFB=BnOH7saR5qrt71#Aj+2Y8hln_ z->9Gjpg%SQP2-Uf(Uoe z+G`TX3`yp9`Je-JD0;30lnGs?X1p9d{S6V;S9=n!JRQWsgTvyo&V9vy{`n`F44mS4 zbd`E-n5o~6gyp>bi1>5Tq<6pKZE-&Qe)LfEMfmQexVu0v(ual2-TmQSN#X|(wY(Jq z)0MMm{T$0xCP{g{3ty=Jx8^>BXE`iKC=0{35S4=xD!;7zP*W||x95xHI#CcTY>pe% zoubk1flX(XZt)JZsJcFLq{lXxu zL9y?CUwfj9$0=;sh~)@8;Nb2fv693S;Mn{2y^L_1Rzr94d`ugkX}^5-F;`4+*Y!ik z2j_j>jNsueh4m1g(@~}E;9byXNlz!S71@67%+cLWO8<4J{t(|%dp5Y?BRRDDK5&i> zJ@`{M=%B)8-sB)h$a(fEj?v4z$d=hLQ#KWTk-X?Hxd2w4r*n1dtk1MbQzu^d48a3i zdt)qF6WkchVF8!0LL47YQf64`9G$=)L(D!h9JhO&2{{X2baXTahupPh7%X}2TsC@o zqY0)gN^}o~OrCukQ&v{i1Ls@!o%eUOVx8cshf<>W>gOR_`}MA&0amPmVJj}bC3Trx zj;DkOA@5jMq+eS**)_lq_ByxFiXg4gbHDq8^^LP}0$J-S(yFa7AXeG+1lS15z3OWt zdnWXok&M)(&NFCrB+SqMa#P5#*D_i1%GGCf3{#W4T3?~zVHAXa9oHQJrH$Hj^Q;|g3uiBZX8T1#5d;wf8`@C=^&sAp1ElbC~cs0>Af#! zG~khXLP02IJCz{APxSGIsIwTxja=>9XSNw2y1opU9~ycKGaDoDJ!nPM?ALl*@Phs=rPazkB~ z$*O{`qp<&t(-;O75_D8JB~6TVu1U!sL?Bq!7NkCShMbbA*Jtz-&ULsERY1?=PTmeH z&lN;Yp&E+@Y^jN(F6^nGqQXPHmWlgC^&|rMsdt$}?jv)xhx!8)-9S1JdD&nT&ybR5 zgZ-HI6Q29+etd(gNZj=ols}Qn4NbzpFynzgpem7yy1d&^m$NsgiUt+Vs97K)x*{hL zflDf}3qxBiafC+rL%@<9~LlV`W36Z@l(LnqHR#{sIQwI&qO1yEWQ}I^~>M4(g#WrigfSDTD z8J~FSpor0+Zmi^9)uh%Bvr*kXsnG{1$i9>r_v&=`RuRNbRWIW2gEL$j5F8Sk8gg7g zbw`t1dZD(y-X3r53suy(0DX2GfuQQ=tBJn?)VW{%dl6I=$;*h)k0vFL*PxCS`Hw9C zHUezXHdtHcWU9pQL5PDx){C3g{Gp@7S-f9o`PrEvVbWM||CqDSj_N-ERw9-hDS5~E zYJr^9xaTc7=m#$~352_78r(Qp7l_m*tv>?T3eRl6R>P1?|v}t!bJ+Hhx)fiEQ)y zq-VlXWd9B5LDMdheKNxE6msjloicEWy4~^n${tnSku&#ch=Cp)#Td&<(ml{Nwf#dW zXKTvv5H%Nc{(LK3b7xns7M7?9Tm?P>vM(aytXd?V(3>{-&kb5B(|wIo^?~yADJ7mz z$kthUb*!Y|Hdt4$5dFsCaM7-hSS%Tq2bQ7mzL1qFYUSn+Ej8X$X-3z&vAohqh(kSy zCgrkEd^MraWMRm-gpCJf2LXn<>*Xp5n4T^Md*>7PQSe9l0JFSdvZFj z%A#*_a>j_Uq{~t&tL&Qmfyu#Cn4zp8k#j(lyn*N=l+2MzF&qtyNlEJWbDTI(P$ICB z`&2C!)Czy07`VtBpG& zBOG7qr|pPwTB05&{P}TXtfWLxF`5eUnjM%rl8hZ-En%jpnae=+^!oK{X}Bj62@$aB z#@+5+g?zuN6e70Iwe^ zoXBq5r1kXlT;hqX3cZ!<*f_9$tZWDB>vJehAp2Rl^jMsQDl$U9vS1CPACNLf{n#dk zwx!5gbZotjz}nczJ)KKXnG?RE`lkAT*FEq0*Hhtxyrpw;EnyMofNHIY5kKo$N4^&( zZ{psUx1VsLmofJ+xelTY_lv~^DNHY7cpHjv0|`F{ImC>gyB1+0{C4A+QXy!fA1cMA%*<3-j z0?76n-%W5M#@|miys?A&caW0v|L|00&}~c%Z%|&U+WE@D<_hXc4ZZ)w2j-k?Xl#eq zvp5BfpN33O`sOlB2m!($2m5pnPYLWHwV2YzpQA#SQTAE7+RkssNLmAu-gDW6SRR9< z4HPl0s3owO+^4yhjfRusJ_zRD!_s!h0ZfgYTbqax36klszG*9E=AcytpZjif>0$Y3 z{#(w}f+Ztj9n@7w*8(Pv^P&6tryw*i1gf-4PEO7xo^+e&>panXl9E{q1fsVa7M2;P z7xOYnf7eb!R2ASVuyJ3pBojSR(|TqRBo99YG4&(V;w4&Cg!&?bqm7*|aL$}M8F+4D zJwx@|@6h4HyYPM%ZLl!~zj%zNJ255p#`>8O{v}bIso@HT!BeBH)4r3WAuIhqc#)MW z5*Llgb0n&=Xx89sdMC~62XX|#cEK9PvRF&Qk$!99tU1&rHak1g@tY`@FXY>asm?YQ zj{*a);q^Vm>xWq)lL$pL!vZH33Tp6aOr{I9P}K7>Bp`y9|ESESR}_gV<{bX#uT9zq zrd%z1|21XV^RHFgfCNQ2<}V>jN_a5lOhCCF8m||*% zOl!V`ZpwSwFL0|UdTxB5<1ugZ=FQ07cE3e?QtL`E&d3!`L_4&#m4XoRQXN^Ja!g!K zL%jA--aA<1@sHrkpXU9h;U5<2_t`)DZh#~_*_OS=L;_NjiIz)f`_RVd5&x`Ec#w~w@&mH>zD2j$ct8}B&zMuELNs@9gmQ9;+lrRmCC{&Z`Rv$t4FYsm zk5%<7mDgO-$y?;H8@xj}Rg=l@U2sFy_!W<-!R?izkQKGy#1lcVuXOyXV5~t0I`Bb7 zvn_nX)=%M1X;j;%;GFDuUkP079_Y-2^rE7mxq?WTeElUE6YkiaFoxa*kw<=O9;MBQ zj*x&^R)b}t0sOD}`_;n_P&Ws&#}A+*sUPTlCha*jqS5b&Gim_T4No7%C?f_EP?x7EirJ zXklK$0_%^BrPgljAb-P;g>pepqarkcQn^b_WFDRz`;{+W8y>NgO1}K}q`zg5-XcMB zc)|kVjQzZF2g6B6GXlr4|6=*7%Ma+}B+{cCN2i5i5iTw)KO6hj1F7x8-ghgv-7)!I zI2`1A;ZOvha%PO=fpRZ! zJIx0E3EOEJbL>1i)h74f^6DXBtN!xpn2VNJX@WXQYD-QfIsLroeM3gXF&D>b1T0r5 z>2gWb><@rpe@U8~+P2W9U>c$hivrsT`sGS>KS0I91F0*ciUu~6MgR$!=~5<$2z?HM zpm5WcGC8l~q$nl&zsJA?BhoO7Gu1?hXkQhm!7!Srf!BK7ZYFMGZs=T+%WW0v0>{WhN$FjPEvW$V|Wn==z<1>Dk8ocH(m z?RXL|3EL!T|G-dDtI?+Hw+pzQZ5FuRv{xvSX`l*ctUo9@i3+)p$!yFQ&UU@Ma5UT8&p;H zye*1dA?qyMQs_1~{*rCF0BZ}uT!PxH$zb4v>du;I5F<4trfdz-ddcg7G|Qt4$2L$5 zp{*lgAL;4gsJwvi6LdcmE=!Dm8VB|iBrV?&J}O&~n; zQr=Fj(uf(F0i82-iFKeX7lvz1#jj7@!5kk`uIazN^W_9%-{rdyyoj(*gn}6kxom9F zSIPXu>Qkh58uc5N&EqLcaDz zmLrXR>mz~OWDa!q1{)6_;f#eIJq;P6(64jbliMuUlj?3oEv1=c5iwCDY%_q$(Hx?S z*q_7R(VV)>0cE5#AOv$z@aWPQDM&%ePnD2cpHW_x3(17-n^-$`pw}Z2Jo3qAU~b&Y z`fihKoqX4Tqu=p>8^n|ecJIaB7~xdpz6+AW)@qca3kSJo`*nvsdQ=*ahemqO9jZo{ zLxds*1P50HB#(cmd>+y;P{ZM6vI`uR%l?c?$%h+*K_Fl^cH;Z1$Ddu@y=c*HD0gE-4$MKE!weh;2LigvhKa^( zC5@Tj+*FcS=9ojZh~xrGgQ?~j9XGU{(fe4%@|Z`z&7VUR0+LWu<*i)tClP=-T=ir& zH8ZO)A4&xe-muWns{cMA)m9^GL0i|W@&hkW+40hex)*a~5a z!ziDf?K;fJ|An#-5pEP`ssIa^wnV~Y5m(gZ9VP@_A@;;iQ|d_{gd@G4m$w=1#eHU0 zjiK2rM>7L!Su9+CyQ&&TIas#7{{H?s{p52km@A2LI1kr4eyY9`&p}$+?_0d$6v~#r zv7BHEt^`$W4)y35Y)bex#?H>&-TeaE%akYOEi!K|I0;2CTo6cRsRSG8#7I{FapZm2 z_cFo6fmb4?(tLbGDkYCjpDU?RQ3-ByDZe{s_#i+tt9ZWVm8l z7YOMh5!@*s;Bd%pYP4un^gdHOMqOaJ@q9&EF+n@`EB!VG>(~7pbJXT2;H}W|-XzzQ zUE_tOJd92NM{lasf#9z_I?R$W;M;ZT+|23Ch+j3(8(Bm zvUfc;Dc_uaTE*qk7r3QLBClusD2cX)$s`h~+6o)A#f&SI>R-X>gWeE$Uf7RW_V_>KRU zBaeNTFGQO)3g!0;`V%h2P%uD%s&JbS5EIBn z+2cJEOr+Vw&L#j83iIoDzA4yT&h(?jeQ9ViV^j*fMn&MNPMr0QoE!vCJP^n-d`Tsx zl2$fa=*T6Gj=Lt_2F4vG9Qe+Pb36w!KRFm!EG4H8(v;uVE&~nWZTI`&L@NR?p#erq z4Bdj(G1|n~{o*TAs=o)=@fUs6ie)+Kk8=-}A-@b(`ga zCQV9MKiP0o2d2)}t?=t106FR7%RTLQ)ET5|hTvWS0R;6eLu)q}pvh5zAKdjjCKF{# zU=zP9^> zzecc|!u|14)+F>;SA}B*q(-)iW)tjmJTmzCtw-Z`rb$g}ARwOL4GAMdVY4mpra1e_QvOsf$uh3m|Ox94O3U)Q2AVH1h}S6($cUsR5GPLt~| zw&7IbZ5pHunxgO9;={p89ON!eWjUKQaRv>b~nK{e;bW$BmIg;V6K77tETvDc$`v|Y$p)ErF zcj7w!S?lK?B=ul_L@O7)?zO>$yZQF+8KYGg*HAkiGYqwuN=sPoZ+$6SfL;-M7J<{2L z^_M(Qy{G#F2;goKd>a0d%ND`TfS^{oB?ZM-Mt2UYP;3dZ5D(=J=nXEO3qC~&248%! z;HFQl&gX0Ete>b35B=@LH~+{Y!&6%Z8lM@K-FoF~re|XBU1{l2S(~t4bzayD>*Xtk zs;?@9`}?Lv`w8>S9X5Pnom%5+Uz_RSczyN}i;|i$!9D|y;qG_CJjvJ0(*6-yw6IOU zLi5?W@o$O;mi&Zd!VK^f!ut-y&EBRPza=o}0w{$wBuE5aD-7veqHNwBp!}3W8lrxj zLD~EBTwhIze`}|v-wNA%bz4eby|C-EO7o)R_OeUGWd~=SAHLtOaj~w0{#WF~lll9_ z7dys%usfXnwvV%0yF6{&T=BV#!clo59<>Ir#-kZpx5>zjx^?c8Xrqonc#*_O0#8Ae zc#Eppl_U`3nhMv4O1w{qQDdRjjUHND3HzK)=y9^=g~vx4&8lF(d;B?d zMyILY6q#5;Xgh!DuIpNcYT!PQi49piljSP$I}8yuWLOUtTknLv*4~?vtLf|F7E^Xa z)B?^UKDMIfZIQL|VMD7y@Rfldb-<$#?egX0Rz_y0yx*P- zFhMWyg)3{*uT6*0&^%+|xxIH`(8qgtM)Cf(s^nTY$y_935^9unx|nc2_)#xFwL#U^ zTRv~UqROk9yJJipmiesZ^`)xcVw%b!quWtI_UG2a-PTS}!6v0`2GVu|(bj~YK1$Qp zf7&$h`mpz?R{FuiI<1$?-x1hWu0Gnp%|_uLowQ9*yU#w1oC#%pdHYU6GC^50J;9$K z#L5sGy~@W{Xy|E64s$A=!??>*S=ol%j>EH#9Nhmp#(v!FkeZ6tK9UtcVU#_?0<_{Y zrrgnTP(?r^=%nKR<6f2>qkl>OGqGApcBap|_g z3Ew;BBpr3xDxPfs&sHwKm&hGonyKpcjQ8L6?`k^w*BeYYb4l->StQXGr5ZYygWCHU zANV=JV>%26PQUOV?W3Z;s9k2dAu!Rq14Mt6{glFmKN_hZ+?F^9K7bF#~e8p%Ei)d#Nn z^MJSLC0@D0cO@HLZS-PbeKb`vpZr;&q&HAj6GDsBeYttX)E- zliZA)(&sWg(uOT5eT88))WJPS=LxQpNzD6#u@r};#gl;pnoVfH(VfR~-}!@heUbAW z&1m@PqJVAU&|gip|EkS$vLhA)_~?3ZtBv_8>$nd>sgBJ5XvU;6xMi_c;i)!0Iy(5W zSbfAO&9Pu90+>ZL@S=8r@c&8SW^%P48&Xf_hohzUePU}3P-Qs*SSe3DeOGzJuv@%< zZGVTCx?|I#C+pUUH8@i}hhX#iS&0qW_ zvXrigJkkLL#7y5IQlRFwok|_l44n5nt(7BIUH`=vSiO2R0&gc&;S%Dh^C5M?1)6=v zFbB3eH4nE9YLo2ul@S16Jn;`voedU2frslN7Pfw>*LT7mu$-SCa-Pa79^a}dOzaZI z{xBL=Goijyf#i^wzRywWZj0Ku`c?D?kB;+4w^yd=E=hy0K5zJO=vm&d#A9;6hIl-B>O}Rt#Mx_TQhZA&vpyAY__F--O`5nSdk3aT1!!b zVtfYBhfYQ9JZMrG3NpNfV)xz*Kb+v^=q)v#11^N&Ka=OB!Um$SF~Pv9(}UdjOb)E+ zv18q)Y#39%{D|Or+lD+GxUU@N{z=;K8Sh+Y{?`W_hZLOl{1$)f_kiU_ub1*))7rIa zQP=#bX%T;@PTNrNtE9}i=XF~C@os-kJG172Z>F&Btn6QehUa;4xzyj${OwBp6YpM& zV`ggS)KZ!zcfXx)F%sX{RQ!;-{TnSUEjyy%0BdnGsiwx{D71d!a-X*KX1ZvF-QccG zgTIUn9LwKYlfN~*VdUW8&<>WL2syL{_mdR?BGbBm}@4~~56GZ$yh zq6Qfh6$P8`6aR1@N~Y;C`O^Ph&7JG1$ZhL!_VpieA3k`vTP+^<@u+Wq7yUK-vx`KN z7j=07i{SB}D=wu-wB>gm;I>U1_$RxzsNO)Y@Aq+^&!6^v&Ef|XH6ZcHOwfcyxgqL5 zI~5cP+_1i?>X3{*v#2%Uf>3o5A~-EOAsl-Bo8{`T13T8 zC|v8^d*pL5_3gMpSuBA;{oQtv*(wsEYw7B~hO@6y2^opmpANp@3mKW*7MMQ zF@tx#9OuQ1#Upkg1W|zvh?Cj}RZG+w2{foAw$Sy~8KzVngYq8Xnrtl!hXWPgSp9a} zgn5HyG)~z^nfM?{d($MfO?G#ixG-Cy_EW6oIo6`azYPpd=5G@ozyD#NY<1H07A_WH z{d^YT!T0yEwYB0!(L=lyS=7?p0AIMVfEe2FMedxEzbl*$E_=w-f8Ca7`isuG1Z;Uy z-P2G>uE=slg3EyRsq994kjTaF&VdZ&j4K__9S#&+5+{5)?hic+d6StUkM*!P*RICL z&V#LKr+pdjsI{Su$NA^(@6JU(iXne6`gR-W{crEEX~O;BU9IFI@A8N>=$jOSU~J(n zG42K`HHfstCkz$O#7t*U6)P?O`SWctA|W{PtHwc%#Ss<=X%N( z{Ehox8ySj+3w{RQp7i|W%#6|c2Mioy?i}acVT_c}^OP1JhW?MA*?Z_0KoSfl;@4;X z_y5Ul{P*Am1Ag+qqw(L-_)iP=zjNcibK}2rW5FE2{J%LGdn>4z_g&3zugTLJOxB3Q-q;`QK~A$&jP=Mnt1Pn8pdklq$R>2#=s{R8-C&f^raQ zT2z4Cq5-rOAiiVpuZo^<%{9u;zXEsPe-TV-LI%Q2K8;F6xTg40ffUqO2qU5 zB=!-%E!(UMG~$5<;RRk2@Ut=G>*mh~-??{f@{InP3k(;4%v7BcT$=%DP3M5r zD@bLiJJjYZYIhX0ZUfvGJ<7ds_Q);-`i4~Ujr%ss+$oHS%x#IaZ7^&+&`w!Sf|K)`fh=ocr^#OAHWSf{`cA>~#Z{{qx80gSo&h?)bU z^-$L?uRY%7uEpeGmnt+5Ctoe%tGLMq?JVP zNM;!s85w{S#XeM@!9ym=0Dv@{Rfy7^2wk2?(EsugD6^xYDR_w)pa8ERM}B7iH1N*+dCC?oNFQ(spjd?(v8o<%PN{f~Rg z<1QSgGy}2i0i`cpY8Q1eKzOSUxh>S4>m0zw!UE*1zKD8~6b=}RA{ddUwT?)C2u&w6 zT99esYE1z|9dI6x1>ga*F0&-7#T&O^R z9s)fOyP-n4<%B+*X}{A*eWt^pI)3AKsZ)#@5QrR0drIgSv^pYGI>Z@ zHdYvZha!liG@CUbGl5H;s(ZvfS!>YJEZV<2`~vVHt4KsL<=$030t48;Ur5*=v* zk}6H#el`DiAr_6@z$JzR1%t3INHi7Zg=Gqb6XE}X4Un?|(wF3lU*P8HS&ie;fYL!L z+?Rx4;#%Lrfvy}e&KydVMJ=Epj=+|=d`h|ZfmL~q=Ar**VSLuy9NQksJ&5l%G z1Z*a)`ir#>>EjXjOpbf!XH@+0HATfJczxCrE7D=UZk|2k36hmtP+fqRP*- z$p|)|D-Dt`8*e&jsH+=lo8A;!KD)&t;MJA`T0SMwp{M$J9l6qe6!H!F(888r{|kUC z+<^q>e|*epgNE`;+1c;j9U?hh_yr#Ct>bBkGJo6v3VliqyX1$kh%tqk&0qtus7b^|H=A7QN!tp5u`D}CYcln zgbgJCK?tP?dwRks^8*BIPrXvX>$Qn@nJDB)Jm+`mvc0r=YIn zOWDiUkh>}}VCDeQEFwi3jfO5N$HoNBvcfshqJmK~oX6eUoEgcoPyhpIR zeoX;&$N&JK?h(tP>run-K;z^aMM@}Sm<}I4d_8Op<`ZFXwNQYPo*OiedPnUoX5!_N zpV;8_A>fR^hc0d}smj06Qo&cs>UzuYunLwmlws70`_`sZJXTOrGHpW+cpaeXS6s?{ zLNb#28}>`}M?&U!RnZH*v)&svaLJ@}zm;b8S)-)tX`1-j>s<8Fl59$S(XOBaKBA_F zBAVkgPC>6{Mr?w)ug^i;h!cuhPwF-MJCmhL!m_OKY`Aj1mpuCbBBrn?ep zZl(8xfj~~v?y7%bP7QS%y7?cWRC$33e`?+Gh~tyx$xxf_Bf_M+SFX5`rwS?q?yvUu z{m7Rih4R($I1grSULrFepGbHZ3z5vr?&tSxhlK~l?$(*DNB8kQn>^`^>&l2ubR{OI zwL0~I4-MSeZk=(z(|9vm4;tDvfqI=%H4qy*~BV*M*IPQsbrO~=c%z6hbHf3 z6MO?RVt$U%ziV8NyoRxOu#!ePGXT|YBskE3kiQh44m-UP=$dLyoyC}_xcL=?IlX^< zq@-gvx9B$TewgzI3POoiBKe{sykV0j^%B|&3KWan9os>2gk6AZB|vO3)u)NvjeK7-W(ex9DnLRWS#UqhPD{a1~5QG;Jdw#kmBlr2}v70k*FZ5%ojKn6n`%tM^T8QWdx7jWQTJq*lS%K(C-fhr;d% zX3VLbOjNIW?}9B`47Hb{xcOJZgI9B zsY-YT_@<~9hJpu9Dm1JYQ{ezmb7*?;>^J@!pV&M|b#Xf%WAWDtjuzWB2v*@2@aNI5 z=KcyrFO8dAp_*Az16z1TXyQFljbboPDi`#?biB>7bN&dN1iy-Vmwq*J^sR&d;glYla92){af+yOUIs>hH{y-BVi#pH1 z%y@8Hp9HDgUEm;y8DGSl{3{hq$@e^EUr95tJKD|zpP*8>^c^>%g|;2R+>t!cs`VVz z1Y^{nMK4SPDnWC~4Ac0@zpc9mRxDYPlozX`$Hf9TPx0$50?TZPS|iLs!@0WzZZz5b z0s`WbISBCvn$&#l(_{zK2fK3jzuc!LHD{7u3Nd9^_m|x!2}E-Y^{=2ej%E+yzX6{XoN9SnN}IGj2zxa` zEMx%TV=|C!b>W;bCZfza6LR$PZm7WDUU3al#+~my(oF2f0SP7CZG4Z&i zZ<%r2pF!yo+Cf4^uYC)`+*}cxS6Teze>n6B9I8+V?t`)+H@P>?lu#*|LRlKdpgJrD zv-VOG!75DPC`@BR!*ei=fe6Jrej#5e{Te30axfw*_nBkyygaxgX;~_VTe^3vm+MN%w16>Bm_TE!8BCVx>5lGQ7s}EzKO2(IUT=J6*ffOR%y$=qmZk z-9;X~hCgdL2v*BCi|lmgd`d>MA6ryR`A#Bkr{3w#m5BRb1R_b!CT zF!V!EzkVUaL{0my=QFO{ZTt({R4w-J!rw4zRu*Fw8^D*DrnR>J@-TbGVPc2PO7Xjy zs1D1o`2qYc#v64=*y6$UN2C<#%71>N=orawfujI_(@B|?YnneAxz!j9YYy1z)5^7g zB@s(d0!V68{H&ze{E>lS^idat_2UUBm^gT&xWNxiPo*+S$P=BA+;!aG( zLqM`>ee6j?Zj0fbqU3Ax_0j*s8-4fS9fw_Ytfwb_u*?uFMO~et^wR9IJ2zvDoE%7D zuz*xmX*Y&j(=8Pg;LfvraYY?VZ)a@?{oalHz9z%a10TyZ;r`b*uC(u3zJrV*>x{vq zrtac9=%q}_KWtCVvniHtw)-NKI6aY;?9|#jR+-!G6&{8=QB^4|o|R#0*nsc)%QWgu z&i=|=QcI6o+br-w;NyOSLF~h5hm1C1G8vov{F;1m?#rT=*bf>)Ueh=V-)2e($Ky_* zI!^v5WEk;9>mn<*>sc5g*DVBZB_PwLp`;C$@eyc*oul`k@a%mhm zb0UDe31o?}Ky=v)IjeHq>h*gAK*w?`U=+n(sc5leQ)u6Fxk8pEZQWbepqcoGx1_Dl z#J!2e=Om_q>LbjZ!y>Jc_5rx69Pw^RqcAan?%Y}r)hb7ddWkM9mXQ&^fjfM*Rap+@ zPkT8;ULB3GQG#3Aal*Fj2IZWRdJ+8VOrm3j~W+pzreeh)g=vd<=n@t6G&!@uqS?}OcQ z;w)pUsIyeCb9dmS7K{uNB;B82JMU-|Cv{ElCptxhyXco;FX+=gjSJJr1Lw3fLa-Ej zopc}5d{^^$-HfA^AuhTH;&h&1EgQ-bu@!3?6x1EnLvan61GGV^2szp@L5|Zsod@b!S{xURdC)gzk{ zKdIl@^!P|o2dz7P#_Q~sF#Vnpc^mJ7k?&vjoqjBgPRM`ar2)h%STGA7A} zfJAIXsSLscfD_bjJq@khYb&M-3pIhh=riN^X8)t$) z@3?t$D=7H}fShgl-x&$%`@LRY$nduB_lC9_J&vJnW1-J>J!-5XMrJ(C<{E5!!czYb zGovFoOo$M&3V27g@GkBdtX{?14LaKHJbd`q>bcH~s2DlcD<24^^ag!lO z>F>|uW6=1vg7RR$v!%gzPK9-wtUWW>U25_guA#;~0z1RRp`k`}j({ZCW-s<;z z`z~AC-}zxmsX%a>n0hz^m7vT$mQGYBn5c4W^wp^7%cmFCOa!9}aSXcFRpM5zt%K74 z7~L={oytEoZ-l;ILgttMwSD9B!!)97*311&pitXt@ZQ~5~t%b0t}gTdF`&yT2HeEDFpp(5UoE+J2%w$=rWc3@r#B3+124} zk6oDjwy3HEh+0ZLv#WeVD>+eeFq|V!H)qGCd}mb!g-7OTHfDBx>MA+&BYjc;)>i4) zauLarmdx+o1w77HhCp!k4nO?KB2#)3?2wlki%um@Yw+x6!BFZmH{H*Phn&_&@V?Y> zDvHulDxP50t#CexywFx%dB-uF5>$qxwn2K=kJuE6}TCP zCDk5_jv}(F`gW>nvkSP{$Xf)==atSQkND7Kyp=q>ZR7Av$Z3{I!5~(PF|fX7S^32s zWm%N1KCUwRVq4-E8e&V0Ywt>Vo%`dJJVhF%ND|3~zzC2CPiM zExcfeS>q2Ecbw=GkNWO`Y?;|_3oMJ=CiH$+k?IX~>67|Ftc_wf*tAO7z9|mJucZvE za&_5)*v$O}eXedo@%n?_9Qi6`9d3(%&TS^Vim|*oLAT5a3l0CtO)%;vtI!+g|FFMV z9Yy`)cY;b1s@j_vFmBCL0ITh%N=T6AJ9YytkS*Ck|EupPK7_ns9A9o0;~7#PM7{Hd z+dSGgS=S@gASb%_bzSOYe;8`8+L{i|XbCQ(g1)&(@Z+4?i>Efg2 zY?$Hc^0PA>W}_-R-w4I1q%6j=GHbqQaZ()3rtJVI7OCocv~XSUr@;IT&yvutxTB$< zU{F5|qt5uqWr0H{x2iRp9Sn!V7egIT$~*dPR1V|rcD_AuoGK}IlmXYiu8}d;U+%N; zO@*TZ2bebKEvrnvekzM#RdCPFmz1c*YfyZ#d-NnGvPg&azH^|56mUX8;Rg1G_TXb> z{h7AOlETT|#VJD(6=ODv=U7pKdE4~8!f#5uOXQQJAB}{wO!|4Ee1AxI-aZQ#T75h& zpk=a@!)SomsYfe=v!CH~{CzwfMX zO7nv$tYcgA2`ImFAtw1&6A?6h1f17y%jB2SP89YXu43C`8b1ZP%3J#j`a!kkZ1T&F z6&@w1O5D0~?VwW-WJ`gFe$$>%tK;0Z)yK|hYq#{;$Oli2l?cFQ5h9JV>&tb> zMusgl_(kE9dr&NqohoJqpNgk?(!DR>_wzdzq0ZA;7bH^ne?y5lgv&uBph&2>rn>s= z6=Uzy3fkvX2=V=f+J7m+@bFQKpiNNVO$^;*5a!BaKQrXkQ>- z#*XNKeT1_1UP{#d5@js^9Q6adN+(jeh(*!_20TFp7I8~tzX~gd643L}zC*~KA;Wbr1%4;4~ zQ80Ub1dCVdmf8By^~k=-0{4qOb(tDqBvJhdRW=~aW#^gkDYVr!N+&c`-k*x&%Mue znxt0(N!0dld5{q0ICT9aH)KAO0b6ncDDq{~@_+*J7LZ^PirDph0Wc*1>|%it zfRa;52}vF7!RBA{J244G0-9M&v%I+noSX*)1=| zpR1v*N^sl0B`>FBQHeaYn3pp>w!VN6njXcrX-59T#(yLE`PUVS>i&tFeU z_)_jIrFhqI=MXy%m>?`uNFcmHdyLk_^ZLpJE44BEo-Fy{ z=EU|G+Lfg?^`FP|pE=6UyBwhgPr&=B z7{}EO`0VML%I$>>FyAGq%!&NW?q@yX?KmvCzfoB!o@5+mcPWKvH4}?E=n(J<#3Dvw zw_^kg!1e`FpkO{2d7?XtZQ4Sh^=8^?@V==@A@duD=8G7HmTh37GqbgV?*Q>&#<}B9SK<< zV~bH~0Ay;=83_?Ok{FKI4S&a`o&x^pFa$_}9Q06ClE0Rv&c02nF>AKL?e8nv5*GCW zzw<=v(fw_ghfm~{+(?&p>SGc2Eu{r*L`yh5{(9k)!j4Ekx?_VD3i!DQ($m`+lrqi! zW(R^80zk*~es&&k4VQqR#GzHYpk+!nwNyG#3?L!^@&u<+$Hh(s)B(1Fb8r%amEh+? zmn(O4JDXR6&UrF(*<~=>qfiNfi%N$xPDCENquH0pEA51|M-L^=087-3!y2@1cF^TFx9;%d0s0rG^2{ni>bEXxg&ii(`xfa^Df~JH<$#^5m^N`0gsok2jLO6 zTuY!5sKTd1v4kym-6#D4!aaImAh%obex5`NJx6iG;xxmGL%wtQ?b2=|VA{mUGlL6I z;PE05Kuw`J&9uSS6mTZfZ>$5&khBujA`&?hO$%gg(;Wgy_5DO5vEYqI$hz*4zuw+yxj z!ie(BJS0Y^CyGrL&Jdc-yd zgrpzwh&7rmd)+7Ge4Ff>bsayP|I%HpKp(F-t*J`~a6M)5BAD-L$u2YCKhw=KZR|7b z7IPP&!`R+{A2x7sX7C^s^r0GYIbX)+#hW9pK;&iQt71!TIf5D#(-g_K51%&RCciCc zGInFd1hUUvGllT_JLADJOzJsnE@z05xaE$1mJ&l=ylS4t2y6v&*P}mFBPf?Xz#@DQ zY7MN&Gib|k6b4mqUlL+@8|^Q8WfD^MaR>bM=CiThHPPAETUTJr9Otf=+U2(=C2`Lo zri?svC8u~CwevvSDZc_kn(oi-Sx^ZUbD#SPCb}pGaRL$DA*#aN5wbwvi&}*ZrYHQA zl2FYMDNu-yJD%378`8g0oYXKkUIqG?mjAOywrDx8kAo{jk$; z&wZ>52q++Rjmw$VhLy%*hYuG5qcaf@N`9b`YH6zqz*TJkd;`3#t?$`$wc)-i!EiD2 zj0Eo+wbnBb%Oxl?mdUTK<%uxxa`rHzeUQKnaM>ZA60TMe+-M&H-|9mV6#L`v)g_4U zwXUu%qA3MA9*FM+s`LSKj>kYH6VK5?74cS9WL16gR(>*SN=1f7B@};Pv?)#ptvW3Q zs2oCOKk>T~eLPctJLM_pRd7seK?(?=yv|=mGJOaZkK~mAdeqtz4zzv%;glpm$=}n{ z6Fj0GfKDI?0)zvI#0~u2>Tf!T%mR%?a_w2kv~QdFAGf)P5Aj*3+~gKgR$hf^{1XyB z%`Yc~u>B25=7T!)*>hKD4c+gEn|EvJD9}eepgn{{t*`|f#F6q3!dG9RHq_SxK|k3| z4v%({iSIFVu|WX#g-8mq9!9=&NHq1x(Y*Q@Q*fqQ9qL_B4Wp+m8CT257w%R2#Zxm- z?cmtabvB5I`Gff9Iw4OOYt;L@J3<3kjS!It;`18-x2aG(e_RE;r9Z(iR6l~y{8FM0 zXLp56()SYj*s{j%WSF=99F>Qbzkk{eu!nc#?CbNAGD@VdI~3HgdG{YnJ^4g9aV~m}VBW69=|jvTMR1%D6k6rGvULRR?~4S~A}wGGYHC z%Rs$?z_WSBxs@X&V%L)1uc_k*BVb@Xwa48XS0o|y5RRj#ZP1}uy;1Q8SgCOrw&_zsd znh(kwe#v%3GGv6*fLH-Fr^63J0@NQtjN1)1vOwN&D~{4Cw0-+&OQbNc1+hT@osU(DOd+)S-YQN5sdps< z0|OuJUJ7xZVXxlOQc_6ayeU4d9cN0A7yzhJdkU zGXhKo={oI0h<(HrTWkXhV+-&?^Xa2b@YfeClHYUzKZG}!vP(y}#5B4`!=U4SZ>6*Y zHfQ8g*GA>@{aL10v~lLtz58C5QuoSnT{;I7edfRhR1h8o)^Ex+j9@8c8-MH6)>U)h z>XRAj2ixv@HlDX3Sn*Xz37$SW>iO@Pv+SU>bfFd&pVncL@813_1BJ0wb6Z-fqT6)9 zj!zS?Tsp#`#e?1`0s8?bbt->`Izs@0Z!ZWY&1^yEj$8x(L5dlOSM}zZr+;h%d>#){ z!qJDu7_RAOh=L2TEzsrDAVsq){P)*G^;&iVO{SzF=~Wmt%%@q;*u)g2Sm^+otune3 z8FJi#7W6<3`8Qt}VJWiDav%4h41d-#nOrt&TLN7fm8wPY^fbvY`MbukniHj3 zFHcY6qT;*5Io)1?qmI%hC{2+}_#P4!Ut+;Y1&)wiS-hfjGbS3K;`b?0(kT4pkk~nw z;X;%6I6p&Xy)C~}C)-v;PnR~^G&Lu7ht~{giP(h3Yne$Cig!DRihWi( zhdt$Q{QD_DDQP$GL{Id1spS1kN;OJBd?Ux_GJFo2U_fI47J_DH8=y6fv`x*-e1n5g ztcWrBC!XC1ZihNb$cS#YpSH&Fqvw*ly?4Km*Dk6ytDSP&2kPF6OX+o$DBaPCTYptV zeMDC5sOvx>WLlg;oox=+*PT$vcon{S=jA5(lp7IqlJeGoXIqmB5udLqID!DdzhK<( z`EwyOKegYa!cc58DIW~NKG@2D;F-krKFQf^2lZmy`bfXii|lTQGhd)KAD5jwm=kN(myjMbv_O#`&d(6 z8}-5O8{vOxv9H$57Ov65yYuXjvR} z#aQ7+15ZljoJqaA9k*d^sSk7``kJCNB^oNoP3P5-S7F?b&tbCP%W6#zO4E_Yren8m zJf!^o=~@Mss*w)GwmP=Qy0iyCp5s4554q=PTK0mhOK}=V>nT8>7}t_*a<1#Cf`S4# zj#{0%PaD%le6Kbsi^_EB{RXA;&U+XDK-H>iV$kl?5F+? zGkPv{5t+Hn6g@z=Y7-RtXVHLoGj%KsX-J@bjyM7ONxVfxGLYR2j7u%**#B|A4UU5` zz6raRnhE|RpTqfYk3z~ko&=guTJd`Md5V-Az}H8oiw*HbeFwQ3pEhL94uClW*x6I( zmT&Yr6r52m?&NW^Q`}5VLO-*^C)prmiUcxsB^0lZK(7a4{A<#u*1-(*6K{j`9ms!c z^~s&;HJfb)hoGeosy>l|5;oP(AoM|4h~idhp^Vjxu(&!3#uGB^ ziFlXp(6*H?QrwSOZ3!CP!hsP$se&VWf|G&UhZTrYq z3~vTLk#;+`r+*HW(DtyX84=hFweLV@lyqnneiJY{r|BGNuIBvbUro@nU@-QhLl{~@ zeQ}q7rVh!HA?;mZ>u;q!Fs&=QApw`~4_b=bQPYQHtxlt zb~i*!3UO^s>nS=Wj>jX zd`?I?D8Y{&_)7;ngXcy6u>Of)F4r?Y3S}1}R|*1Z%lfJWtqdWQZi2>4-FWTHcUQ1@ z=!1SLfz=O%x%cf9jN7p!$u$l36b_Mo=e{o7BQ`EfLNHr?cY9H2t`-opC< zKdl8VM6;3XEC&j&c)PRBdOn$)I?B()lu7mj9(ulI-oF}z{OtKCr2t+uH}N3Sh#2$) zU0tCO4#~+4RLe6L{kEFBn$SJBuVAWEA-aY%Z~vUF#JFFlCsN1|2@_29CNRjH=Wxc47}Nd= zCf6qw=W9=D{cv~p4ZVNOD9>&9Bjt$=kgNSsKH{X;Fii`pAc<0_q`Lm%+WXgdXWtU$ z+XeBP|Bz(r|8Uhz2p=`l-YS`WNwMMGu5%3o#Z#1< z&5}(4dK+C@wG5`GH5AQBjj`rD&6Kv_SLrNQ55D01%-sgl?Fvboxfj}V>WmjEbq(AT&w1W550;PJ?~*Gm-1R7KC{bMHmeS#{X}_p&(fA>i zn(_M~KF&k2ji#d-J}q3rUBd@QCI|A`!b|HuvrTjJyUgDtY$v67q%VML8w$Twg=@P& zj-DGXjM`t?D(3Ooq}rNX@-u6`m)A$4sR1=2N&Bh7?#Cc`K6~JImIbY49Qv)Ka4&M5 z>XS(hxyK(hxf5T#PKcdD&CAu-J(~P^e)wj6LU~cgy#R0j&1|f8;n* z+@IIler2h#9}7KG)G>N=sJOIFoc(9V0CJ4>p2{|=1$UjX8ocRx1SZd*!5-q`Q z(t9SHaNgPcvv+n~TP1TLZ1#P%W9RK%&wL8{Z*?>j{p7CR55eZ_U%(eR(eM2qvghYt z=b}gbzck)m1S`=r#n!p7<=rfRm2sDmW!DcfX|V4I$cc>2{?9XTLU?mD5kLOSyTDJz z4ttm^nXquaE?FpH{+vND+6{-lx7MVYVHeI zrKTuGo2ri`-7;t!$x6?So*r9S(~v7#-|nXVIo;o{sXlp{oN(i&4=hcE#JnR}WF6Mb zYZd3(kGyQpg{jsfVd45YjlxBuWN(LeI!N{lZsKtKwz39KN$t^0mv9nG@X0#2`E;JZ z+`GQN=p{bxA@zIJ>~TpiZH?AX`l2T|@x|cm%=sFeQpD7F6xE&^H()x`($Ks)Gq@>A zc5bhHcXIK9_;&NAL90%LKiRJ8(d6D;fO&HB4R=(}{K;~M3te>8E#GyiXKa&P-?fy? zO{l@x#p`|U=VL2Spt-UKn~-1qZe1Ak4}HG9ijqGMu`T9oDwr+?QdIBNw0{eg6}4&M_MWYAqXh1;!~ z7@+8LqNwa~E8t;C2F>YH=h9t9U;9qa#d6^FXP{$EONlgb-MmB5oG^U4||DCrX{R)j2i6E8*#sgfcqFs(v>V* zgQ>wF$-%_@k%R)m;76q*OV^YgGE*Hv`X8T6VexA=_v&~2lU!3*$CYA~%Sx*6WX|*B z)!|Zp;_D9%^Z<>T~BgGB{IUES3k6?>Afs({ACW3nM%+Jp9{92! zf+zU$2mj9~ZT5HW&lzutb>3FZ{^k!``df9+nXkpn{^s5j`pK%3;)^JD#aQ_d_iHN{S{ii=?}+*MITJF#Avs90sF1G!T#hFNFGC zHJ@8{;v6k^$#GX0b1KJZay0KuaR)ia#sw}b~SbNO-aKRmW=M*gSJIH4yomBiE|D1+E7=Om4PMB5Ax# zV-|Zn``75pBgQWGBRI_zH-y-PS?g?DkNPLbA{w2Zn1~9jWzoisc~7xJg5;iyDZc;L#rF;OOYIMrSWywF$`%@MO;R1f^b*)J_}tQ zT;GqVt}TJm(z(*-v@U3o4HjKMfw7;dfkUQtHd?8n*gT_uOG1LEu<0c*tkzWmu|^s# zIK`UOQKuR>PfmF-!tqL>5z#(J(|T`o9Xv;MZNtNo?_uz2cH@P6^? z;MhL9>d$+jn!Fug68grcC)!I4s-){FO?Q>_IE?d2eRx8i0AZc%#PPqooqzD~`8t-A z`?xuN)6*wsbf3`Eqe-*~UkLEJyGPKMPiCNCEIVXIC<~W0M4tXQ;Ktcdp0k_RZD2rR zBtq4%B56W&k(B0(Q~XT(8=+=QWX~T{jK7?`-4*DAk%WK&&=w8!#w8U)Hb@^Okw8F? zE;<78a-%@ybnBy5+a^Il!A==~eybV1E_?zJ0gsFAg%0K4p53P}q{(U4((Zqv5Be$_ zYirtalOxW~#6%IT#NEDijn!4@71}T>GRh zssrwFbA{sU8K4Po1CkL(PO8hx@7%1le;1QOoq9BZHkko<1;M)q|GhI|(fVig zx+4oPyoJ;8Eq%YgN*x{>OvJ=sowr>S82I7T?iHo3RzN1qH zX!+E(9hD@32XRfC^>K4!FW>Kp`e#1sBp zW9Jv%Jw_h<-b1~>)b4{A^U-_>n%oPvXeGEz@`sXN9;Zn$X+-W?alKR{@7=bCSc$JQ z57Q)C3WYZlo2Yt{Ul;Ep_a4Y`U$2IgI7f<6g*T+NUw_Sm%-OAO$#zblA2-SA6twh$ z>Zdp=al-pgHg8Uc#DqK)4Fv{}2si?^(pmlh1$_|QN@Gt87`YdqpEY&u)kQiKG_yXj zzakrI0N*8P6(uDfYR^_)nSMM+SwJ!GfT4qWrUj)D0S8}hs>-7 zwlDYI`=?R%{LMoN?(6bwL@TZgOJ5&Ko&;HjO1^#Y-x#&qPENR}^jGQgu>g!Y%@^GU zl1zjOw_^hmM=DX*(Ah#P_G^CGosp4If(TNO6eFVhwtzANhm7-BA}{!tu^3p9kS&9* zgk$zCxl`owqK>vnw>GXY^DtNmm`L|#H&@(&>48r^My zM1CDI%D1r(olZ<4n#zuAwhX3dttCs7BZ>NL!b1dHfXl%rQg3%oY@cb+@Hi`Rm@O=I zuj@pgszC>py|}PuGx6RRan^;ql^PqQbGK!*SSw3`!l+{dP%UyIwe;vj3oF+f6cDW% zV4nHGd}k08F8M>|?V(vFwSmf}ttt#Cms(VAyP6@0yLm-ww8daLvC}&9vU3?hV&Lm$ zqFehwKCwQcUGv8kc3zij^gH*SOx(LaPm=P~QPIsM?FTRt73^^xYiiJt#5piIFL0qE z4?rb;hpzKL5fU418$m#0RF*F7w3ffMACm45UT#7e%wF)GAMdIIH|Bl#aEU~V2lKR5 z4HnO^Zx*V~E(_=zUO?UE5P@%IfX`rZw0}BL1=OgK9HtMlb=l@E_$-(>*Uo9M3tQ^q z&P23vXy|6j)W?7Ky2G@!p3?oznx#8ja=X8@{Vl+^2ml?ZB*e9zfOdNV*f)h1>yO2^ ze*A5|lFAD<7Zj07A>}OydR8Gojx&V=HlxfjpWjb?1b?w7I%KWhT6!i&&2OQz>fQ?e z8|M#_-po-lq)c5Kpn-w_CLh9=K_zIa!mK)!Zf7<;JY1y6o>l_?W>0m@NSYcIH6qrwC2#)yzub6*n}-r_YR=Tr`BqMmeUEgmKAjf zZx?%Fp{;sP2129t_<>@sp~Ns)$iOP~d7Bh$QEFaI5NoH4v3E!0i(QH@?P6Z18hnwj zL09q>ED?LC#9fC-Vb=1pZh80D&Fx@hFVfqN=B(A#KLA2PKyP&{1N3HdNI;-kxiK8l zQn9uImux|oy_@;aZ+)k1x>AdJYV07@1HA^X0PRpkA?HMX^{FN(kN#isx$9c5mf<+(Iw zt)>XBJ;{W;m#i=MDG^Wg9;>l*Wld>6B3bF$@MRz^!dcK_{EOg9*>3zDe;>5PQ=~_8 zFAu(d(Bm^T)LXsAqd4jzTajwS^o;{kw0|1EWQ$PR_Jv*0AK2$Z{1O!5=hz<3u9cn9 z)jCN&Me!GC!xe$I@gN9x;zyPH?jBq)Gavaug^i3<0@~6H{Ery@TNUNi-Wm()WP!QH z|8Bl2_UIL#0zY#eE3Y@#&G$oPj_+oE;l<1bhNzEI83&5l#+h#sm_Z(|+p zD#}d>1MGYUZ|b@Xy17_?{rKC5kBXxNE4L%Gg9dPq>-5&oy%(s#a6!qW5y`Lt61FVc z74)~(w_ZcPAAS2+z@bZLv&Ws`>AJ#vZ)Tps=a2%-8%m}qD9bBDc=5n~DkLD_VN96z zXyf_ULpO)wLJZE3o`MzK*?)&Jc9q-peFvnmTBWL~mZq8St~@{-+jQ{t>iM(hc;EtxARC?Na%W1-k*W*n&9fH}G0gR`f@w`!_>j zChE7|JqB$V-COcz%dO~-)x^NYy+slYjty;tY;F%F(GSN9JNPAeY5fJK=q^&&u-jSh zZe7Pj$Y!3(#*Me4ABSm>G7|a5tmOvtXthbk5cT)UMr?(*$+r0XC_NI`o&-gX98;un z)RsoaJ2~|{9|N^rUL*tuVoD{@l(c<&RA%Op6MkE_AbjSTu%n{1!Gkl05brREd2kO8 zp#ZOs2`f?hM9a<9bQ9fk%Ga9AmblgOcahG!Eka`7Ut?u_hD;`k! z@+SlSB<(BiSX(idOM~-7PJ#J62i%#S&@9%a!zb%HMUp`{^|4-!{H?$od3AVnbac{@ zOU%p@(YOLV?`B!rK)?)$d4=8-J@4_kunmHS~)854#{NyRvS*yp*6w9;^ z#^{-boZ%vI6MIFJupOp=xRFTDd1eQ|cQ!}h8f3({j#zwEUf|pr z!k*{l1Vr8I5@q2LlO``KD=UY((Z&&KUDoTLX;qMN^voX*$@ilF2em>P%T9i|_3ggW zU|vjiwj@#pLxTeprfe>>9RX5@C{4(J4GRlPhItkhp`ijB>FJ4?t%G0kTyQY;q5gE; zm$UGy@SVmjMjP)Gt=HFhdh7=N`>PEQ*iTo{XyEj(y~h(N`}pu3^aPtcOmCQYnV!wu zy?tuprIY%n^)O#9_U23wZs_^`Tg0f9dm@$n>wI)ag)*I#94YfDFsLoILLgbk|diMQHbaPAU`@UnCv zX*p8qLOh5@8|a@ee0sL_OR|9y!r|Q+%{l$4t&JC1p3Hz_E1}NL9u*fC*HXf7P|m-~ zhqEy8k2@R(j5cQfv;K6!X4P-ZJlD4|u@88(D}6~9bo<6MabcfG&sP^F{{UWE&ASrx z;#^44?cQsmcPsp*mwz$o^s{4bq60|;c@O8R|H~o*twBrX+uSOv1700~mJ~ zszEyhvljls32@~%mwx^1fE#7muwg@|%n5Zs^C>(Y6i*e0Fb+}LA%f)(l-I9cdm-#Y zx@9|WJIMYGktdPCLQRMWH=!s^1!>RHYj< zhH)r%(CLNelIrxoZ0I~ z^c`84+H6CFo^(pXLG`%vMrsfg%Jaw~WEq1FL00naf{q(Bys-CqN78UdgWgRvC+zE$g(#B4sjE z?FAl7vO^U#auT!Kr!A)X+iCqm`Q26RUs_tE_E!Bq(a_j<8PY@Q_7cvIj3E zv^=y-b*Ienx>(=*-_^Gv*-z+jr~O)bFX-5X-Ua;deZ}XAZd;8RAb4 z>dBZ;koD|Qg;)D> z1o-Z;NIGfaSGO{7sS_-D*nHUh_K#V_UYa%aFXuW_@UvFO@w^bFORaQ;sqAp{9fBzg3 zOex-B2jo90D1!EKSX{j$efT} z;&pU*-faQm00B9OzWnRkg5X1(9+N*j^t{13N^!h7vzG3p*ooQ|>8vmP3x@J#d0f{- z7ZjSWABR*0PsR8Q;Sc!J*-vB+@=E&q`&X>`s;Q+#EzNFjY8p&5Z_jg?y~Jp%I;0 zzH20ZykGIr1#MLB+lEpiay6}VxS2kQUHkmkPI{`v;^@#TiR=Gtqq|mrPjb($8u&ew zLw5Ej?WU-RM!|p`N>G6hKg!UngFvgp8Kr#LY@)R@8&u9`0Kh zI^wQp!LbP;iNQY^*YXc{vOO?H{*!*@mVzpk?P&7|p6t)0U)_UgTn(T z^)cNTq@_3*(|_{AlCM&moBU$N=KOj~4&9aoSr+$g8W*54Kh9%ay6yhQ&cQ@k=-j$P zt4vHxyqxdq(l!FHc;o8!pBF6&;vGBBa8qTS2_7gYY1<&1x6P~Z)71(Ael3e>=xp~X z*b=NU_bf2O#}U#(&XtFU2L+b5pkaEFzT%JcP@I8_c5R|6s7Es{vXMpDLR$xD0^T;z zs89qzF)iVVmRRu}W{b&(X|fK(H7eLP#~nnJ>Em%Q^M8F|h!Q4~yREB=;;F>f$_vRk z&kqmbXHuCmY=T>XJHfuBPQ#(db+V3{ZAmDk$uiYlpRDY6Y+$T@!)6r1X);!ZnG0&0 z{P|fg1S_`%6ivg#H&Q_eq!4_LXfO!XZB(E@&G|Y3Jt0yibcB`Xn0$O>Ow2){tXOII zH*H5sF%CdMIqOI~r-&wWJ`h}!zQJN5gyVuCNVmHw4-HmrE2$9IE1H`p#&#bQ{hxd} z4M4CGkRs#Qr=UDm7nh_Nt?umXY$6hIa0Yjc`;mj4gF^{`z#cl}zJ9%rMmR{*S8GGz z<_{%v74vgiAwEM@dLUB$jd}MAvrjK3@S{z_oieK))s6WNV1M9cr`PQ+_Ty%6$eU?u zoa*hukCA<%QxZguHXogwp7QVVA46WxlY37jWFjVtU2isgELX><|Ium#9edVvi1 z0bsnLMYoRt$GISF)N)1)+&KG59hA(A2mBmmBD-xQ5u&G-ekyuX!^TD4WSCPt@Gxpo z!5z_2kS$>2&gX}Jk?qn3nk9v;-EBIBtXs|X0Vi5jJeJNU*+EpiQ@u@Va+AgnDmz}4GdkAh5$d)b9riS3Z8AcksH zdd1XBsgsG1`raq6F5Zx0xffxi9x*KOJkA#W>JJXEkSmy^hpV58nQH=SF{gb5gdxPx zPj1zQw52LnQAi{tnqC|%I_~BPe5&*wOy{6*I#T9h9$aZlfHd_ofE+GnB8yzu(R}yz55WQLobM{0I z1WNwE5z?(gf#NkH3qf#5X=!Oi=^=;wNu8D4PC_`o<;prbYk1r~A-h)Et^-{QNB>i5Qo#*(!*n1ObD%baKTsh^O3eCuP>PRXi z$vpHesZ>&=GVG#KgiIN>shlzo$q)$*LPC_8tuoIcL*{v&r;W|t?{z7he(vYGukmwzuE`#RJMBR8*U?lMds1U7L2!Etw8hPWuf)otWv-G5 zDI_2(v3}hqRp8ByNU}Nnu*e)-jNe!|tdCF?o}#jhP0zR;StUFTyZK8!H zCc~t7EDS)YbkG-|Ha@gwah7o+Bh89LgDh6z-OiXZS?&*xLTZnnfzFqx-;nE#Tv+j| zL#raRpmMbI#d;7T@FS32cSnKHc(k(JUM}md$44B75@lkzjw>5esjP0?5?Gr0hhUINkt3-!u|lJRfkI+XkikQKDwWif;VxSes|VZN zGAL)Y2#6whe-a?K9FXft`#Nw4j^ryzNy!d0>&V!)lYJbOrV?OkP_z9_JRXliRlt^H z%fVw@+wMpGvu>y`4V@JA+yy0s!{uq+Ay)cj7rJH77BO+)uQnYZJu>B{E*X1!A1J-+ zmtcla2*6QHzWm0A8e7ZIs3j=!h6&9{AaZR0f(x9L==MVu1uk)Z><>SE1&{Sn@dblW zw}MdoyPs+md!b~a>iBqisc zt88f4VQiJdj+)EH15z@1k>06?DKRCqJi6nNpDSnAVX%&>kES3C1E={p#OQ<7v|wrS zmw(F6(p|=U3+mo%cm(hj)5rTpbf`aL|CLu;28tdO0$J7+MDEki@DKXm! zeegC^*WJP_oA8r=MyD1*tDwWB>p3Z7Vw80?wwC~M`RoyQM(x@1&jW%N~KO~2CsE40WE zFJv>S2KZTYQ`JSbHQiWL|)PM6l(chtn-VkC|A~F6gII`F-#FtZi z=2Pmm8Hlf!fS%~hKCH`sR2_O{QWaHdetyTN@c&y-Zc7_U`~Uw5z`XyrWY~fK?eNF{ zgLnCT!SwWuAL-YOi1?J0B?M>qDU|VD`K3w#y#5`0Ph_QdJ7+?BJAgJXFmG#WJ!p9I zBVmy~p`RtCpr_k%TT>$qPG3LYl~_Ofgnidbq0yE{%XF3h&Y^&Q4hv3t2Hy|CIGKh5|xA4EeUSEXdu$h?KsD87j-D z-jx9;nOE$OkB$9dYocz7walIZA5tK}!T?tqMlppNfpH5YXKG=rfR|fOkEW(+Q3=fM zld&cq7VQSOsu!yqL3aCoD~VA1bg9vn^mG^N$p&2jWdarXG*l7ddzzn>C;&CJi}*}^ zyUK+p`$l}RRVpm?a68H^BZl|93KG%KX-;o>DmoZ(DMip>%y6o&0o+XU4eSB##B2u( zY0@1s_f?L+0AM=VW1*Zk%zetqp!E8j~vC)K!$zQ7YmN3_&Kx%tG)+0P;d$ z$LcV92>_xkP+LGwEr;9=rJ6Rv+_ASJKRlKb)1Sg4EKey%Ta#y3+#g<^XBI9_}x2Ag*&wL6ej&%>Ez_p6(mP; z?VU{%sJ?l8uu*Q~GkX*K9c_E{&sk>}xHcI&SGYnmh`Xm;n((@^n7^I_>z#LLeJ?xO z9%1WHMI4>jm*X$Aa*zLhC=CFLo*{FK4*|gF$A|;1$4DMZFRhDJ63F<#yZ}miWLvL? z6;YyceujCgOnd`V*oCP~edxBRPwr@6)vLDn)OAfOhDVg|j{X5jpS?g$;K}nsw@O^6 zXQlnZgH@wzt{)L>(9_&3U@R5eC$70}@~v)u#F|#d*Rk;_kxh#AhIUz5-Lk9SyQBQ? zUfs1fC``b9^P>l5C>_HKltGc=6AvvVy7zL)#~`#iKN*x~G8zgJJB}mPy0D6Vop#Li zw5q#YDcv0fh2i{AlcPgJSGMIywyKV8kWV)iO-dLEofBS&9d*yQi-h*6yzznJ?uW9S zN!CeAYRHz%olR(%a&Od~oXA)=VkkN0a!bng>>WGP%Xb!vJXLOKi+Y4@A66{$+5%Rj|#ffT9&${&)DfM+NmESHnetA{-kyr6qUvdSCFNHXXIYDY4d17 zuYlED%RZglYus*{c4p85wCA3Yl`w@&?N>NBsb8{%vOOn~e5PG#HQmq=ni7z&(a22Q z$7R#U{NnYSH=6r3>o)-`y5PYYA;6+p_AX}r7iShbHs1Dra-~~00acEGFn-1*wL8EQ zpdpLSLA%eq^)`s-ypYRy{~hf_@6`e*8I)3{iMVa}GV*{Uk-jP*Ph6AOunn3!PMX_y z^*rdMtD13!LS}A*lpI^51F3Vhlv$;0r>}318b1z~#3>uuXI-@T?sdXa^Rfjv?Gb_a1yYy}@ zkU6h>n)2Kd2YpZ;i*3^<%VJ=vTmk^9&t3enLsW>USDlVhXMRlqo8DFy4V%Ws9lGTq z9Xd({TvC})^;LYbmD^D&Ovv`dK7?t@uodFbpcZr;_1}a2;iFa&s-Wo#n(0=Ju6=C` z#}kV0DLu6ksOJvdb{({kyzJx!*%pX?dSkJkew{4)37i&*pimGHygN&(w#C~1;N)Av z;eVx6US zV)35%sjM5Pt}ZC_KKMAouI*Jv!j3!r2kO`MH|TxDf?u`{$_jbq>fY=FIxV<%uzKhea7;dzL6sJL(qzECQwt*e>G$vGawh(%t-jsyF<@Mb7~>A*L?F2N2xF6~k7p zSzoIdCbynpTl+rTcOTdld1~z_{1PoWN3E{t`(qpNR&^&V_EW#{@9Fu0>yQ4?6Ukl=(nf? z&uR^{XwJZlF{*{IfBj>3;Ww&@)Ox{k&m)f=`n2cj!v)WdoMWqk&ytLXKGZ8?&GN_5 zaRk@;%x_2*j@2ou!y}tev>}TlI(;f?wpEsTMggUuJNa}%P*|c|U zvUH9zz#@a)Qk%n%{R+2|ks-)KDavubK4|kDSUrbPpweRWCG?%Wj-4LnK7mz2u2Aem zeoZvW%_u*$8&yWGP@MLy4_$SlwQ!fv3G6pE;B>ADTmL#9wZwVk7?QIs^Yg=+F5MM^ zTfMm4QTTeNKL!`!1Ckq#?Ls4N&Rh-K|H=E3i~L`}%`bc~q-D+Xv=A!qL~$)&AoVz; z^z?k#DfSNA(nhdnPq0U(uTj8cCB|b|cEO8!gKwd+?z0(Pzd@7bm3LW_PflV_d0Llw z&+Hu@JBwjbey~P46l&rI2^9)qV379&aD@s(v-QRnR{xW836EM z&+TnW089iI_x(7|Cw81#c$2E5SbnoS%RgW*-*8wcI|hRuwFn5U&pooHer#;IC^ulO z!9OTIC^#Z4lH-f>sGNj-`Dqv$QlGxVV%-^G2YbcOy{o3Y<3wu}N-6D6qB2(XyeRT~Q;My=vgfpZk`%`mM%t z6Y)7CW$DW2mI5{6nwI2U^lTqYZ;<7hg7rD#zGkjYyfNI+l4%f)O$&4^{;wA0SIOOZ z7b4(1=vt1964y-Y-L}+*EYB591$zHLD8mj7-A0<+HK%)k-5nvJ&`-hYXAGfBxbP9w zX?V+Gr~$&Z+we+|I$V3Zt&q>}uGmkEosrvYxBGB*+?BsbZH*U+cKi4QUGoHYqqtmNK8o#+w^${GXi1HZd9o}v$ z=CcWX6qp*tGJ#oXtWm=N-0%m3^3ze!L+tp%47ljgY|gb!M1VHR0xU1nI;q5mEJ4Hfq`i+U2+?h9j=44808`T7Z=D25h+Dy9UJokPR0YHs zmqMTZ56gE>2DKSpz4cBzOA--p`;l1D66uPRsAimh*+9TAukJxq51#ese=|v)7k;7x z0mN3zBRsdk!iau6R+r`O1`CS|xGh38f3*&+8y>u4ACLRsi)u9n0cthhs4P2lOS6H4 zdefTwuMHs>g6flRZ_OhXv|({5i2|)CMU(o^M!(C~iYiO&#eRH?$1m9yc)!*&b(u9` zjIZNi@4-DgOlg(U?Nm0}pUM)&wc6y|8el9U0B)TDAk}D}9AAUj0+vV2+SKkdarJj- zlZf`S7PYfE`dM$)cLbiSR217rKs6elcj{c1e2?FW{!?7@GdI^S2fLZ&G@r@ex$Oh0 znG=Q4k_iz57}~!PM4)^hk5p}|r2;5g`m%lD>99W!@rcUZER3W{ zYti=k2tolB%$E7uYYDe1qaKW|O?;7j7#pHd^Gd%8mtif z+aPcNP~F+RS`AeH?^$50+Fcncq$ZA<$f5o4cb6Kvcn^YV+uH=A3pkf9QK} zAUX$`^v%v0b3Nsk(h@)I<`G#kE9EhOIzax>;nJY$0B~B>)IHruqpjz$2tvN+H5C!` zvq$`QJ8~Ub<5b^64H7YQPCI-!VP|K@b{4awK5;or2B)Y%kN!x@+#7+hLRM>e*a3)1 zb)?dJg5%&=B)<6YK@dEAXi%oMD52%^TmWAc48#)Dl?D|3yY)DD3~v1!kSpo}iST0F zbHIHZwcUXU$H!)S(i^G?#rm!r54ZmE=+xcnGwdM$$YGrSaQECMr1h75_QHUzROs$M zetHb3AfSeaXYA+!)L!eRBZTbuVN$|ey0>zetPHAm9Sp&`lX#SswUZ)5MiNpAaFdI1r4(lkYz!YL!Jna1KY#RIs*Au6jN(qvZnjzm6~m_E9TqLP zC>1r8cjvJzcRte`7rp015F5$kn$Kv$+&0mU}qQ<`+N`b`33w} z#2A>$1P%YTjdXuQRCF2myW@_QiX;`Y^|H*^nvRf3*%T>A76=hD?g3~L_{VcLhEe6= zAJ@dXj#ESs!K+Sj#NDpSfFyAT7HUB5`ZjD@h&nJ*yo6$5kv`ZqbFet&F>!d7Z5E0= zTHxvIhW0@V?N|zd?U&19kRgPfMD~WvpuSz2-iPp5Q5(ti)9N_S;g|px+ui46Kz<%z zKwWj5-LwU_970nARgyrCD^+^<{z-aXBz`bZqXG#GMy2JnwfY$*!Z+W+B}rp`=v(~o zD6pXTyn~cDYG}09Hnd8Jl3LxbZ%1N4mPa~1c^jEa#D@T`8d_q6bxl@Qn}p+(=!g^w z><#zXbHq!(e+@aZ0l=SbgE_XN9;mk~o-|wdcvokKyO7|D1tNFBZYG+u@09#;h({r2 zJrk;lg9sMItYTEn4|mg#qB5%T+qaP8Bp;9otuSzWokZguOHr`^w$telX!v@Fd#YUp z7^}dm(uD*Lq;+28VDNQZT2HWjT~GOQc7TN)wcUw! z&T$AJP$<^nwQU@bw%Y!^7_6u$Xe?M)c-Ugyxfb+VTvi^A#aYB=Id zrU9G{K-wp6n{98{is_cL!{n~~6Ef5!2{ng^misj@&NFU63~<&#P? z69bv)^8;~!&d*vON`Io28i(B6$3*;kR71DFiV^y2&wy_LD|;Pn^zF|&hj)gfx8&@P zKrzGifeT0s+48CBnXYDb z(IjDTh+dks-bOjGsXJGQe0Y+S*g3YJ)$m{J4_JU9z@Kht%(OOxdwJiQgjym13@@Hb z^)d_J0Q)pTX#)D?-DROXZF~)rxhN04QMMgyE82Mg_OVDa_%Nba1dhRvKm=}(=M0Sx zYyA)_%Bt#DRWTkZ0yOS2JaK_4A`nC>h_n!PIL1aS%^U@v^jMwoI z0AR|sI_SN4clkq)1$QgV!GZSrisggf?<`l!t>Ah>gek>H3phYjU3$Ytp_8zfTT=4B z*D)BWZ0s^ODb-P_m{I@%h%HChR+_S3>9FX7q`_(BUQd8gi*r2YqoL&9;p zq~=?GdWI|~@NMxERZd-$v!SrcOuhxw3F|Sc@creUma={zd^6yR9haH!do^AL1S_pc zB|_QKFwXta>@m199i2Zvv|IYv>1q@lS60SpnycX9o8(B+tKixM>D@g8-$ibIM6K5f z!!kqf-=y}2zIi!w_Iblb%a$~v0VM;Vk@^N^3U-1%0$B0lQ)m@1i1leNUSGnj@#eMb zFLYl!cM=2!l5797h-orz)n$OBTz>)pny+4*QXQzdg6na!T@d4dIIKJ`y9am2}B30MNOkI*e@2 zb`~@2*lJ!R>L^$O62}Xk3eQejBE0yb+)W*G_$btE(Q)?`Oic46o!y6$FjVz7&_EPG za4huGa_j_}F5^&qddl(KsQYmYe+a-rt{GdInU|)1Y|FSmpn@_n5dgol--6g0vkef` z&0|Nm92J~>4`C3OI#C@@)q#IN_3=^g9c1+LA?)$`AuqsIJE>$2tksFTp+N z`#y0hT7AvtYWr_ZsMvT_g`jfGxd zfq_k{4vNU;0vem2bEl<&9UcIdcs5hiucPZNGn4LJ2Ln65JY0{!lkKY1GqejTf{%9@ zxTS5UlRt9VBZq2nvDX~p;#2qr6qWb1I~yqGX@AJRGBsHcLXLPz^pc&MR2Ur=pXqR! z&6zVpKs7F(0B}Y4FxrEZ&WE^O@5`%$5~ho86k);q&5hSv-{6`;kKMVg-UICn*IOa8 zQ-O%>V$If(yVuHFfYa@P8rnLPa03)}1vKA?+Bd6O|5%YL$Z{A>)+#SAuTK}HILNDg z2o4Sg!0mpba1>o*im^BJrkQd(c?d6c8R@)#dSqywh|6c4wF&!5!_9#iPt3;L5T;(i zbnW_NLyf6zk_Wn=F^AQbF*oj@Q``%bkVxVFH)H8ywuM`Z4x4jJEhmjY-x`9K$?Vh zvg^tQ1_m4lZtHkRfJ6>+*Ur5`Pz(iGJJlgkQj9{_jnaM`0NMKFq>RC&A})l(h7GDy zB90Sy!?n*%)l^8&c4zJiE+-#X!8@C32OaSNB8fAoC7|v<-isp=E6Tr6l=OAKOdcO1 z+pfD_!L4`h_vnK5ou%AWNw8J7ZruVb_1gy^*`E;WsoY$V^1N0Cq*VT7|QvpD6IO7pYqICBMKpdqX^$LGw=jpxp;_!fl5&c`v)o)(sNlCYTPe`+hkTFZv!yM?z=YATYgbm?IweY13c>ud5T&^gRGZ12-c4hK&$zog@(fYx;MUOw1s1KX zub}C>V+-c9s|En02n<=so9m7D+_rC>>dDQbx6j*Y;)kCiKycK``QNImAwl7eI;Mbq z#uc2f%+{p->PQ&<94h&d0ZY6_U8S-njKU#+;>|LA2JpAMg}?z9XE?fQ^cPiUgwE+} z_2pISfa!!*%>{#lgSzcs9wJ?IcQ5TW_AV2_!!P`Kz<>4|1~nz^W{$z^>|38QR@J;fNFVYy zs3ke%0Uc2DD3D;_xB$OQ3m)MH_5vl91(4?CbWBxiAhIEo5&`y3V4TZ4!oXNUl7LLz zR-hz0IFjw4)MQf7^Hfrl(gFoF9mpy|yf&O*-v8*Sv;?K4=*ggNz6jf+_>F3!OGLHqPdbkB#+&qN zD*P%UD$AL(b}O-|TJ>8?^aXZ^?ulG?ao~%8tL<^K*}c53nj0$+3yJa^W5Cu@=mL%T zR*+`0qRV6~QamDYb|BQm+(ekx_ zMKDrdbBaKbDmjf-LO{Mw!kpZw+kClL_hRM>d{+I~lXwNdhfIvb{sVh+@9n>N2Eg8| zG1eq+0SyzmnUl{)Jt9MG^t}n4h*$}0DxXL5;+{hPIhPD=vwp1n)73$sEESH_Tuk?7 z4zoGE0lrl#P?_SEB#B*Nm~+WSh8kfua*ZVf%g5s0Bp3Fs@?S=~{kR@o)USY7x@1`w z`3m1G(k*+cXh3n4`<0%y8iXppEg?x(yq|9G@nt=^_LJaWc#4C*nQrUwdh>DMyV-ygnT8l=tSr8Dp}Ql_Jr>5lKZJXQOLjO#SMpZay{ zlRqRPe9vC@I_uSc;tqSkX7)ETZ@yl?)#>Bs?DJ&jYJ+i7&i;L&&w4+vsj!GAEs|q0 z<1AY1MC02Buj~u+hMW5T9l@=F8>XjYXz}^|Xa|gRjhmqs29~xL`~U2&q?(_olb2qN zUxKEfUxXiMkZOp*8dR~@3supTm%n_3>SXR}?O&X$YF#7}%ItJUtj%6UB=EkmH8>@m z(f{2S*Vy!x<3mLd=TQ=)=f2UjgP}Gf(Z2XWgTc;VX6-En%rdfX9Q16Y2{Yrqc!QU_ z1?A=@#C5;136t8tkGH*Lpj@v|q4#oAFmx}@2!bCw`-@4 ze8no0l39(H2?XJ@8h5WObmZau7SaRq17>IV2%MvRBrkD;_RHg)7IK1VDKFxFC(N|R z=b0S!c2AB^$;(le)igB*(_bo~cQ!?5K-!Lud;Htu^9Ew5O3P18}ITP-`RT7az}A^@jwj5 z28)&nriEA%XwPOBOv?2qYO*QI5LJupP?K^QL%eIJGZ%MtwulTDj$~DoC91L|FwE8( z-SE@Ju_3FwHBLc#x1{0(c87@nZe%x9dubuJqckg$)9%V?nH&1j={=RR?$;b1r?H%9 z)gw=czzD4;G2%_V1$MIV?u(D9pdna-?+UJFpQDR5Q|=;0vajCNSJ5`yf18FRJn9<- z0qB$BNFx>|)m6_9620W^*`iNsnwd2;DVUJzAgSBRnaM#S*$C?D?z(2%oHIr;>hh{0 z1J+@ryXby>G8^=N5Ux8GmuVbEC1sdKAfz=Gkvcry*inp+^Jq?xBI(ErmR^?RvouM~ z`oTtX;^7aOl4pu-exKZRWp6@+oNLbTc4w!!j=9*1KusDj3qH;<<@gVa5WfB-RvOI% z1QuEg`Z9jL;(9Dp8aFo@It#rnxk85JS$r^>x1V08GV-Fg(#e-oD=Ru+iT2>|iufd? zRM3|^(>L`M;y9F{iY1dzgJBZYB!03;nCew}Wo*{^6yMTx>-HU2{?_2l6-GA6U3G`V z4iyZU-7NTOf3y>AOk^njZCA^D!+dIwQ(f-(QE%`5)*vqLc)QH5mf$GY5@mvPIjhC- zQhTd=(y0TVeevEq?yBzB*HCSt!mNRuX$da|>v9Z%8jR{mj*&lK@Lk8PEA*7kZ$B$F z|8YoZRoYmoQbn%WvoLp!{MOB$)1D>`52`y@gvc+u$YMP;QuBmeLt`2IpTMLsD-EL%)%o3=AuRSv8ZLvfL@M_Xd9~ z72IvwqUn5;cC{Wp2!hFLk{Y2b=*dQr-h~8m!-y!bE3Xe`ldLeUYYNHqvpo z&W@Ol&R-UA?@_;2uypeTZkJ6zK)slE^LRpiO~O7*x~a0CH@Wvz#XcUnp8e0CfXZm; z*+#oWCiKHCEeK>?Gdr>C3giQx4@-RzTGN%^z-XWp2NL)8y7sEMiG|u;B}4*C{e!RD zR&a-#{~nE12%bWg4@$PeKi;Lb@?D$$?OpyW-?i%B-c_^WU0s?OhGP$npb_Mpb|2;n zE7Yk(0QERgQ50K&JdyS;El4gc-}qT{i+?bQ*?t{EVu>IGzSb6BzHUi`O*qcgc^7HC znF!kZ+ftTrZSV`zsNGdA0&1I`@fE+1x=Qqx79q(3^?EWlVDEEvk&2KYBoXO#mK9nY zMhgj@x&!;_`*FfrZIDkn@8h`EMtBoOV1 zPlQT9yp&jM`A2Zp4l{ZGxU#<2yOdGw9nM~KZ>U=|kt+2xsRQ}Sm#Zj5t98ONmc!+e z3yj^ZOL}fB)2!J8-xKZ1bUe;RID0%~5>Iv9?QJnNy3?2@fCOy6t@$b5V@d3_H_?@lx&v+TL)XzRH7l^< zo;f5*&3aHVh4YhggA_1Z+8p+s1V#Pl=3OVLXW+tUP%t_J#|ehlL60egY3>jqTOa>7 zsci{sdPY42m&L-i2^r#VgEscHBuyY}s+wec@^DiCx*Sq?pwNG4yMNrUsZSMLg$VOb z{8GyTJ{1p({g^xVbp(qq0i+M%-IB@!>L1z#98Uib`*3$Evqc`YlJ>(aJI1ZhzF_sK z5B~4(S`jUVyHjD1|8T9JJcxgN*NSNoJiQ=9plD5~g+AmCP*)!wv-!t3hQb7>vw$6( zKTH%Dg~JMf9?;~RBC}8X`t~fgNZ&H9AkMw`&;?a>Q&ZGacnfJ zc&5?P?f9Cris&=@MFNLBPZtgD4AB7Dq(jFU+0;xaw-v*-Ur-L(L5$Co8@Xh!=)obK z*wwP-QRqfchHNEug5IEH^2VH$82leG@8B{k5848n&eH%`$e0ElE)``RwPAK0EKe7N zad#cYlzT@Py7=P#n;J7L4NHS-go2+L0K=$Q?)>yHFSMl9K;h1=)92_8mc4W2&#?vl#<22s&XPj)BeM@P=MtXY3JuPP!wf0N>&+=aPCC40uL z3TL{YG#`4jWeY6kiQSUl{j-?cw6>4ByL)GgtDB@^PJ@!Jk~PaOzN(F{a33u{7EDz~ zQbyv>h@1T9G<;viIn)9)^+Sur#mjhQ$%)sO?#(48&UkT1ALPoM`kXkE41e;WV#G}? z=H8IWVz|=)3_oyssgV*a4&;wI!wp)zVKuK5&%i*rPpk)DqgkhEv)L5OiV6>xTen;; zy$^SQuD#|?UnOl9Kwc$*XBO-dBtpV&vBLh|(Iz{~{>vK_4w37}O{yOAbpuoOC@&Ei zbP@WXrIWIDHOh0TKh>WQA?aiC8ybLPrwBk@`D# zpV9Zcpgg>3{q{R<5lTwVK|6N}riSx2G+fag?+{41Uc^mqZFkwV12SpkARM#8*wlLE zf?_@OizL?PKsP(lIn$le3w)X*0=@ay7MV+y`7{@(!!5A2%QG!D20OSvc2W$`l)KjV zoA6|5P=f97#M6{J+8gAoAhtr0Ah-vvpj`9ewx6*Hu~p$ul*&2SXD&39jHlt{Se$Ga z-Hv3f9X2s;?I8#gGt(-o>Nj5?i1OMTnsGTg&$W5eCWG1`549M3ZdIFI$%n3=I0}_) z>x2j*AqB5#dJ_HxH>z^+PJeaS$Tma6SQEgxb&isa%zCuL2RAx9+0HH5UBP|JGfG#g zJ03-S{FFNcn6r)5l*8nisqE%K@)Iv<{s1k)yLWot%i=pT`dx11xw-LW{H~oN3$xF& zNfpDK!kC^qE|MSR|TPlF-?hfk)m-COl<-@bLh@ypw0rvn1F(`4f& z=P8*#Kv{0vw8^j_DeuoMe7UjS%{!FBuFPj-6Zn?G>!$1pa;oker?BZ*p)X#U)#g7a zmf4TXknRz9#*)L#rENM2UW0DAgqhCpqa>5z#X2OQjh%#D3>=qx7-@d`9?2i4oQ^5= zh|>TZ&lqlGur2GfG+;hL;F^x`^?fZ_yUQi`a3-nGj2aM~=39A=hZlb+H%!5OZa|BL zZ@=jfq}xKK<31EO;6cIUC6)yGCs}s2Jsxk1V7@tWd-RrfdP90*C~dRlO_pHb@Eq=b z63|SQpzeErHO_6*u5|D^3m*zOSffbCfoS=xv+q?Ik6Y}&%ZL6FABiK!iWsfUQ6nYJ z{3#$lnns$xTdtny%wv%{6&C^HZS6-vZF%`u#JyY2fnCjxj-8=x6D+HZkrsF?ODo+8 zm_dz_`$4df|<8s*yML1WvbG>pb26ZEQ)8ZAqgHwl-?+eqE;^aWq=rC8H z=J}RP$?0!gH!5P1lWQ+!S}6fP(a7Qh+5^hG@l)nAPekV!894MP}P_d_mY z!i{wVm}?0U>Qzk7ENixv)3P@E{!7?vyyC3z{6-V^zsBriH&=uls#^aNXRnON=-%$d z7WMatZ04sEKueDOEj~KG@*KFqIRK~i5u-sNG?Z{7r5>dIN>S`4^p1H$_9)-Wuo%Ph zqlfr>h3o7{F9>K|NT=v}^=}u^n<;@N5oQaE+gK!C{bst%mdl6P)AKRJG}gUIxm`tB zA{hD}F7wrT#R|mR%4Jd%sZY+bNqA3fY)SnuUHN$|Lx~~l;b0o*;3`=ogQd@ z%g2wi45G9HL?`|p=%iy$VushQYga|V3+KbvL+J~hmxRB!=KZzigJFzaIbR+kX@4fW z7{Mz11zM>bcVfBQ!W2&rk>k%dec%q|WR9~rVCK+hVkq36`lKqJVnKveNEd9t2i+($*>=L9`?$K=!Sm5Chx+z2QoJ2aoc z@fSot2$Nv8l>`cTGjQ7;fI9kU-kUSeS|&s+N1E8=<}1BB6k#}(ZTothi@RX@9-!Ub zC}T5aHOR1(rg1zdJ)Ub2glNmPt$8~?%g0I2UGJA$h|X_Mgx(RM!CHSWme)H_NC}x( z;Sj4?SLAi)MJNe+N;#P8^|b8E*WR?{Y{*8G#Y?^2eFCiI}F5q=>#bs}`1 zu4O(mPo#%9@68a2U0n7P?X&fq-0shZBOIMilgvFm>zUblP((9KSYa5_ zLPK+HJGO+moP4r7??#~3y-z=;QW;YjaotuX<9yN^16xDkM5d*Luscu7?I-ELwOtz~ z1_GX%*S)s7SN(yP_`jH;F8z?qS+^bSJ|tre_S@^2aEP`=UHX=MYc%kegP?M=Kj4WY z*j@p18;qB#yGUA2BW(rJSYSk7xmEI;AmCHfGSo`)UX{52)UK9C>5pa`oU^wVFvj+c^SW=zpH)qCUaEp6^p@cgR(KSivA73fEL%!S-Qrp~<0cGyZD)YvDC_!~IR6+zY z;9Mw`Gc-`I_LDCvJRKUHVAz}Ni+7&l+B{hi#x0aRV`|$oQjx3FsO{$B@@g%|Hur|@ z{?LMQTF|IJSBXl*eo20foJu3K)lgM}nKF%iH&Kw-&E>zIGL&hE_cHZqK zOaH10jJBpxpg2u{!7>jDn_Q#7Pxy*pU*X`#zGDYA7uss9ZPu7FoShdbI~|SbGqc`u zPsA%%VOtPLn2JDqsXNtIN$SLaoM>0Zr1<}m$37zK?x6WjMkP`44shPXXa> zjSC&$&s30&&V>Ay{zy;TX7@$ej0t7({H$(4!M6)<0ys5-O?o`6W{aFl+yu(VUxzMD z531ok1vpzMV_4#ZG5(Ir2P4^i+V%7Qxe-WjiirIC24<#6;s029sjEA7M$>+x8_9;w zOwNbS&uYv!3Gu_J$xlH2ix6+-eOZ+KHC+%S7L9CvjBH6h5JN^n+p_s! ztzlT`cqlQGZv+?*W?^1C2O5B`L|$zHc4!>w5iYB?`%y6${#??0S1FK=LcZD9Rmy%w z^KDdXobyQZI)MeV#>c&lN^Q4q%?s@~4O@qDL@6HGygLTjJ3p?8%oEaHQ0RucgQlb^ zl(a<2#)KQB?bNuCF8o*WEb)kOZh#lNjpj$YtXL=svb`$bBb9sz48RQs2Q{Pp9DS0| zA8`qAMGRh2BdN1pd~$3s*}ygux=NA{?^X`wfEi*YIgDa{O(|&L7l6$=k-C%WUUJLm z#%{vWK)W$=Ug568wHh1Otv2NWR{?V1J*P!}O@EM{z5wyw*!?$vvoh|P43C2B8e^XX z54rydWovA+MrY|Wn3UUktk%Zn2W-f$oxZxM`ivzBQEa3!Ak;V4Qr^G6;65l&IXoZykTjJ0{mhUCvS zed~}L;=xv}I*l#0IsLh?&N1B{HpeL~TayZyZ^aJvPmR>O@-THxCg0*yP$3Ny^8AP5U1_|*zm|7V{E6}xLta&r15mxoEfvwrpGYdvg}Q-@ z7H7sx>S)v|#k`lv3ixaupYKSKddNt zbA}V2vBQVHb)g6K_e14S9am;`z=s`2YUqK?!)|4J;d{fywuiQ!f=)fCxFD573i;lR zC~MJgdTW{{djb%{xHT|Tfl7JmKwijdS$_g|1}Ht=nroK+cOWYDf8EpOEzJTn=`i{C)Nl;eAfDLboh$=*=6Skb9R(v zFV8bOK$Q?u<_aSEV^Qc9a@z@uuEwZ_T!LEYh@4~s_09lsF@gBWX$6Of$-M^5KHRiQ zSCw@VlzPy*4va5uq2~8#WmW6u!Hiy*92&8nuob0}A?mw~U=88(K%s1TBTf$HjWG1^ zEG>KEZA|}K|*6eGPRtImA!sjrV*T*Mc~9vw9Pwd=7sM_0~k zaBs4u=Y-U}v09GoEh|l^c#ZpyB?qRC$?YXQ^qS76!Om*&uvl!M@$FmBQt3bE}NUq+*G3{}e z=h(REF`J8lF_Afy7C6(rr=a`Nh<~FE>)UeSO(v^BZCAXFdG01gVDF$nvZ(KR$6X&G zce0@hHL#P@CU&xlUROmq{Cj}xp@w$Qsb^r-rpJsGi1612*RMADs<6<;B8SzG0-Qn=>PN8bu66)j24 z4%|}7{JWJ$IGJUpPVK)uVd}wJ8QWFo#nsFT0V*JDlcDkAnQs02D!enJ0m|s-rCUgK zBJ~G>m0y#T)m<%32fJ+OOn?V8etrYTB^G*y+QrYIIm|+f)AIJv-6e7{wn1fBmkucX znTC;P1_TluUoD2b4ne1gDlk07@j}Zl@8r6^E7T&AFcni0X8>75-a1g=Kj6gzpJDKG$&A`ux0Tw}wZhpx#{m+JQ2_VMzktl@Ri>p! zE+1GI6RrNOrUu472_bfIl5@4U_)Em5kAu6XlR&lu4sy`w!=1FcEUi5Eub;IaeO5{K zf>O^szGX6$-kesMprdosDGPp{pkL#K(vEG8CAu6VPj%DtN`n>;wlXUG<8^5!_%zrK znq9QgNBS!rW^dv00H1l@Gz(gZOs0UO{}UB$jVUwvI*XAs8B-RBzPyek7(HiZ{lfj< zTtCo2p1TVa;2tn5%Xbj5P^YJ-(_8IaQj(BAqa3Np;72&JI=_3CRJ2f zi^A)efwObYMliF8$+r;+5t%TWO=_{?EY#KeHXpcxxA z#s;tIgAjusj09XapoE)0FAeRxymc9tqg?7*7B#D+H~n~mTc3$@ydIr+B7`mte)*6~ zg7BhLHD+~B@|{6|kN(Vb$R}XGK?O@}QToLxZ~QFecntNjNkdf2RME5;1Yn{QEhiO2 z{iyc>VH;pLe8s>R!_qY7yQ-d!SBOB;$S;BbWfP{%Q3ncXye8gW)T|;>Zn590v9aZz zBeg}4y2Lz~kvjL@ev*d3l3kc)E-=#w-G}zfem*9QY8Q}N`u}VxCJ>N7b86Zh&Ik$R zNZJ{gD+nCtw%-@u^Azym`QM^1h)~{;P@E$RGvcx%h#|HUMZ9(Pi-Wj=@FPw-kk>Nq z=hRXI0&_5ySq%l@>+508mbtmPDv7nrgYB3e%-LeOx$!y7*}^?NVh*#J?5?e?WGm&K z@rM@=IMu7ei=W}epd;pM;l&&1MILz3FHSnIUE6NDrqp3O+o}G&B7N>91R7=xK&2e7 z9(lO^>hOp^4qC~!FE+lUJ_)g#UxB^COvY)#_TJmbn9?v87nC)^d;rhn%=( zztd9_Q1$a)Mpv8M+^F!)|K?X_`*Ekz>8+i2jYHM#x4&{pFWzuvw(pB(etU~_ye;=P z(czXXDIj`(Yv=JF=vH3+9lDi)(v>&C+N|H<*6XAsQ8bYj^V9dE@q6e{WD|D9&rkJl zapruJU#3As83h=zc4=7GN91pNav5&>A=(p%f87(>hMfzZ*y`+VwTQd42opj_hOdnA z!D1T-26)Vx8uwHjP^eM6`4wlXYweAV>#ls#!*TVO42+qd-r%h}D?8U!pM|&Fz%9EVkR7?N zq18mzt<%sLud!jrp{Rxe9=%c?Nf=s2IN;O=U1p>&8(w~ZcEa@{F6CFr@8jV`CwTFo zQ{M)7v3SEv5qNPQdU0(+d};Wh4Q%JRp|3+=xm1D*(#TgOkHn7_L2*ae+H@3UmUW;; zr$1vF6J|jZ^uSG`q`<22rq`%1fVLa4l5i z5d1rw)n8o#M-5xF_#d6en4OMkT(B+?@)G>R&m6uG{`kjA;o9(L7m zciM1w1m=Oa=@$DIvt_E%faZb@Mj!wnAR6Z0DqkPT7)mTb@y@4J5*Gm1eMn4<+sE>V zN_SchQ&$+a9)mUXc%0sV1s%T)Oz_1MxP3wF=@3s zb7PoO_@(d&fz(ziGd!H}Dmpr|_sHWeUM3dU{i7^?HXI{mweP;O#i)-06lJ~u-`Xf6 znAY_Wh67f8PWd~CT6957d~cFj?E zdgSZ22rg&*2T_B2`N@WyFt5z99)ReAnPYRaWyYr%TA3uH1bfP)_M}_HxK36$2-e0m ztC<>mbBUTX8q(_)`aDYQd)eDTA-8e@*yhkN1vzF2-s36dO&cD{`9+Fr{wbHEr3U+% zEl!=wLul zux{0B0G0sb-G?E7z^hO3OEFpIFfJCuWWP50Gs1>fT1$J6sHjOLZ&mr8xjH?7_V@C2 z=ZEXhAG9%+qubi0j+Y%o(n877cWs> zrJJ9rZks*dbK+#RD7X?VVlaMs7A9&1rS-yC3a^-W!u-#-YJfS6YW>W`P^*PpLv{g?S6 zNFOL1!vcy_VJQ6rJckY8WJ&}FpID3S@dabH)1D&LZ`df=5u|r7U$icBYdqn~e3}zO zUEXvZ(cQ4|UctxP?2vi(^2Otcah{hy%qy6gozb+Lesn3DFm=a3|E(*V$bA0q&WCcR z`q_1+2dXBM2;}KjoF`Yaj8TT6(&>=;m@aMcb8uwWXLJW>bb!6l?+MMGkI8mJQ*ZS3 zx2~MN8O{M-VzDPI`&;f{7#q}zqlfCx!(sfM5CQm&Ku(SMrr!DdUEX)xi=N;-o=3>J zvlMVt3&dq5;}#ZrSEiEo_#zJyD0Um_2pHSeVhs+#ojSL&Uu@bmUMeEhxiA^# zuCC3$*!1Ek8a2D>M#28x<-;drtj)+)wUI?jwI$OAX2DYyYt{>WT%Y^+Pt?v(FGqkG z!?`dVNQ4+-UUgC!pgp>p11+`A?P|mszcz}?rJtOg9g{z&`d&8lY zz0jNLc<(FFX4|x>`i&t3=tjnJ%>Q|eU|pzGg_=l?9n&-`Q^I@ZG+We#*QY0CifbnI zZsWK$4Xs9B=g8yS6ydmxk@QlT0xtX=Kn?JuTMU9Lxdg&Q3S;gtBsr+QcTb22-&ugs zxwu?PKSXjeghE`Rtn{x6BH(>L#g8<-EkI~%1a02A^Wrl0ZO zY7P~iMK>BvOk#4tY1%I2q4bSp{FNOV#VBi4O&OXJ2RN%bmU3xG*43EmujD&yIIt&8 z48#BWUEDI~k<$#=hy-wP#Z5mw4x&sjn~2wEpl8&l*}6Dl*$LiF`2zbW8uc|>tWMb6 zoM9>F$hQjY*?mmuCE?vN!?-1nLdF!b)4EH(y)~j5Bk=av8yodlU%&vO$PeYWiPRnjHJ z4Ubm}a|@)&VVg44JYP0Q&FGyRGLUc&X0Gk4fmrcN|dyP11xXdq6JY%aI9N{TJYMM3tLaL+uC)^3p+r z7G6z-gC>ANK>4Ml0iubcm=4{joiWln6CJ$ZWW5@)uBolPGLsoBon{HyCAT5Ccn~!* z(=Uv|qRjhYG8k|kaPm~>??e+}VFuj#SzQGJ8ZZ7u20M!yr_Ysv-X9+c^J;P>(b zwz$E{6yw&PpKPDxVPhfA*%Lq{1vw!o1^OAo!^PZh|D7OsA7WljF^pgE%<)f@>*OUF zXdkH@_WGy`N^{>*0!OP33qdSQQtMoED*d6@UcNY6Am^ndnD$4qvCyL2*aj|Xaifgk zWT;e9R!OrhNfrEm>(L!pZS^fk3gG-`T-m8`5N?#ktde0khQ|{qj=hTu@8zgB+W~zR z&Eybup9cu#RBs6(sIqQHu4{r}Bh(t#GI9J5_TD_4%C>JCUMiGKE0rO$1{z4oP?@p} z4W@{aD5XR)MP^E6Av91*MN~v6B`IS_l%$eKW>JzU^YrZpbzOB`&+|U_`~LBL+xBg* z?XGLxT5Fx>c?`c{-}m3|;7PCZ_TcwKd&sVRzgL0&cz2Pht^VdeS zoXO`~RX1CGG5dfAo(&EuVraQ|Dy*AFq;b||clBMnw6|R9vAAQNv+Vy+ZCdt_PMDYw?(gJc!0(y)Cv(~M;_Iyz;f{Rsd&E_|AGtD)aN;qIMcMS zp>}YbWvd>k%?ckWl=7yMo*{&zUNrFI`=@mX0#b_h_zSqyv;V%g7|){TU#ff!BliRJ z3Y!usl8pWIMKx877WC`tBq_N(xh>KT?|QaaJbucJ(}^&x87<808+^AA zA}WgMBSi&o-1=UtrR@9eCrXaPo{W^%B$`iQ?~R2!R}H9ATUH=ccNps%BM4mA(`)y3 z=iPegk#~%UWO`qikFQdKUgR0h?A`SJAPyN$@#?UVKJ?-4pZiLjbjS^Vh9jIIGbW~S zj{nEUX=1mumyPkVh{xa`=anYL2hi!z>z+~)6JVE=2}+mohn$@9N3Y-}bKX)+-z z*^drKUBwL`T-`Wj0-w%~3e}EWyQ+*;9X+SX{X%sPLx+)@asiX<77>oU4j2s?$iEc#gip`E%y+$$<7t=I~2%@5Q2`$=U8|m@8=K zCZCD$UXS2NsZ?p(>Qgm^4gIny&@a&at{}JZ<|1u0uyeDUuid(J3vEF3`JG-$|I;j> z1n_5xRNy`1f~roXb)updmKTrj-Wdpfi-TiBIV(nPaxlN5oC?YXNXUISEtU~CMDs}uDF|+;I`E?g3r>~+WNkLTf zeTvlBlW@aHGiU;@UN_Wy)Z$}^EtUE+z*vWwIu;BXfKZFBpD!K2@;h|sG&xwatvr^a zChs?E7p+1H5Pwb%sX-hVZ?4U%o$4{V?^o<`)i_-$$1CON6t-MN%B`nn>tv|p%S_`* z)!%d3zakHaKVJO$txI<|_Y{^{it5>3v)hvp_hk44PxSCm-5jSJIl10U*mNi&wQA4{ zgkNZ4uxM}J*9xe4WMm4CGtO>2!KEYrXIx})5UMSV3M!nO>#`&#Auo?fx0+XL`|%#> z6n(SzD|61E>gTM@1GWel#+QEkgCMI+P{e@ z#hkvOpTAapc)!leMY75Mz$Z_mL$zkZWe+exy5z)UL~_px0=V{8By3wW@D5F}Mu&Qv z$|gq3IO>akA8%JuS2=axwMy$FCi{bztRC=i!q&$!hOI$WSj$jjsubYQubFOBr$Ch4 zZ>yV6JkbNUse>F!oThlFcRPBg-mKd{H7PwEU&RI3r&ym|AHQp@o4k?A>B=6$PM`st8|{x+qDNso5%P5V35^On z>8h!zd3FbxpO*6OMzh29;LKio`~1bKV+NY_CzmNI03X$H`TM;x{!_1h;Cn~ieV>|j zUusjMyjqLyA1I9UvK%!osrAVBKRoC+QCnLsI5twLPFqXDkT#N8B047V2TH3dU9{@d zHff~lqF6b?z2w~^pO!HP)Swgn-J&QbREJu8zO;HTYBHtmsxo=JySkuu`Y#KbcHHYy z4sj03&&x*hGMe*|Bj23MNVnpS>NMthnco&t?lrVC{aWJis5Z-Hb?5Y7feD+S=|wk< zwy)Mt(lwwu+hV$2`kHsc7i*z73r#lprSsEP;M}U}eUrKD&ya?==)|IipW>WL@-^-K zt(klXpq&IBr*7Kt=|EQd9r^ZM2g3Vm{wnXt5@QZM%`D}0_fFPLiw3)b$GuYp6fd1g za{k+GQg#ddyiXK9;E32^%i39iILdnf*LXbczIvvpD7nAwL*GiQh4w~~;#w#Yt{h~_ zyGA@%^GM?Gu*@WPGbKn5Tncr@ty@#SnykS%mb|?4`geL=(PL(gb*}$;Phkybvi%F)q-BuUjzpPQSL^k z^GqcmyO2tPFMINxkEgVB%}sN5#-7i-?;tdJsc3ijfy~2#Fgxu4!C=KruL7;h5`pa_ zl%WBjAc4}WMB9(6%-EZu6X~aeEKLxcWQ2C6uhuwi^>Y1mqTeBc4=U9lu0Md(^-O(= z5W_$iFO{7Xb?q&)_+5ZURC!=Pq=uaTQDQ{oly{nIA$utp#hJ)2QnL7o>_9rcGMeX| z&1=Dd3nRVt3zUOOBg*AYu^x;^1pvCL_lr__C%c4ih=(7G*DY{27kA<3skD_j@*>$Jf7QMJe{(@2)aX-ub?k z)C=N|cn0>~wHNZH1a<4HSb%fVnJy^JIO58h!ATF88C68rdH6vq7f z`9JN9O@1i3Sn?W=Zcf*cv`~Y4KP8TZdqZtW2Tr7lh?9aof39{+p0&#eE$z_m&2cJr z$UWxl@kI1@J&+oQ?eq1_b2rvD+Os*cNrR6=OB2*vAT9!40fh3x(4n@kk4wrd(6jT| zl;5U`y%cU^LGS5Oh|)eM=#_dJJ z6mTM5{T+5W=cc?^b=nh89uDtiovYg#{5Shpk$9)Z&ikz-C-W}A>6sTu?KV$3&XL_KEyW-v$1+O560+G*I*QzjQ3WqN`11q>B^DjTd(+2PN$^ zUi&iaXVG|1W01gP=Bm2sPiqxTA+1MKt-o+EYzF{;p#vfj;-AHJbbn z<4>vcpHhSWML82{D96sHO$C6zNd`QBtHi(;>*@XBqJCEY_N*U!`u1K=Rd|JxM%=$j zESC1h-9GsHy+yzHFXaS3i|POFr=X|sw@8tEUOZy%l&J8Jd-y3HvG}{VY2>$(4DvY^ z)Zr6@dF$5Y)bEVVKLORwTBTF7cA;%CDSk)J=<#IE8Lr!tx#%x-DMkb6Ec98cqZlW* zJ%Djba~5{aoJ*5UX~~!4$M^V8!eWHt|N=%XQ4E#;-r{VC!5xYPOlE#GE+z#s4w<6lf2BW~JaqNZD}-ISHgG z#AZr9WPLbUqSalcQAgpX#8gyD64&V&4p4MVZQcL8mV_C{VNX5a)ZcS+i^+fuQc8nyYe6hodKA=&n2>J-jxgTkgre(b$BxzD zM0)cnxXlE(znQQyl*BDlh6%Z0zsnk7=3kW}SB+D$FL&=2mdox>E8RE=l|9)s@pb0& zvnOZEq0}}#-Hnm?+&`bbBi(Kaa+7RFGxu!Ik&h{O%&!n3HowT{xER7pyTLnE)Bj(| z6_=8AHdU!H{wYwN|J!d(dC2(ARq@c+JJm4ai0$p7-{QbuCH8+W@RmOhjP;%NU+$-W zOPN3?Oiks~-;=lnQY6zq_S3&F7yi%7P3056+6eqX zDc}B0;)=rl7mpX5$%fTI+=MVQ+zE>a!8tr&at@A%NdE6M+@+mxdpt-5Mw0fLLJp^Rdg7C>@lhTd$aLV)9KYFsAlXS%nuJL`%tm{Z(<#u?Kk3j|4Ua1 zCiXi)$ELR#+SqFY1 zW;Lb;uW%IURmV*RSG?Uf{}CP*a;Fza7aa*DG$F_&@uAgvw1S^;08?EJIl-bq%!X=GcnoJIlZT$G05W?y6r}wJJ-{b)5&v6!z_Gu zYEbz{BsUF(oW>|_%KllkZ}WaL3S!1eo-90bWfGAc`7Ymj*A{ZbEl|5#FhF zeZh9DOFcSQq_Zyfu=5!C9-qi{@c4R&<&qDSRg=2Xd8NyQwFyh{shYZaf2LvifU$~6 z?ck1YpE|wAT2nwfa$Vf@KJHG6t&L4EFjI(6f9X-v&qXosG1$WgrJpqUpDf8tdni0RjxkrBr26uDpHZd`{|4EiY^AH>xTHEXY z`SN9Ji)W{^9iEG@NqY^S`jEB^1-2=ngwj0?%5;CJyM&qRbwzgN%paGIjc&ic|G*uS zZEM+Q-BtFQGKmQ?**m{_EEYBw^nP???xNp9w-Zr?zSh;LmPd=DY^?w6jIboC=Iqew zU#ZG}w8R!HGddIsi(P~Qzfqmb`~EQk+`n|9n9(JYPwwdMFz+SnV`M13X4TzqSx&9} zQXZXYmHkk~9{wo~^;tp5jYPh<)KV7*BJw~5#6ZcMXE&-doRBzNu+6w*?C6{D(eICw zlGXrNeNnycu<7R03o5t1ubmQ}UiKcS8`HTw_n)`&{tu-nFXOSF6L0Rlot;u2$~Gm$ z=|KHXTX`@M4-otqhdL;nVjFp@Xa{>Bvj5dL?)LW`r%Bp@LmKrQI}x-HoEX^jCz79F zf$V}DH@zq;%cJtyogHHYh9!_CEW-HrJN`mu!vZSfq>;d`@Vbp^t^eG3Q!|+si={}B z-%Sc>pdV?PdhR|6WS2kn*w++L1XANQkY^dyfJWSg{k1y}^A0ZYNrIpoYS^$bR$(EO zRQZZ_tvl#y^JgfGU`aCW4-Lkuz)b;b8|kEXvJcAHj;tcRl%gTJoixsW{iSj-ZIZUK z*2J6UzB?n*2YmRGsHnxa54R$~R-wFO8Cq$)K!Hm7IdgPS-%IEPqdOl2Nfx#zA9m?~ zV+;qDjF9Dx1KNGZ0L1-u7gH~yxkzbF^~a+{+l>1H5|h97GTT71PO?@@#RI>F>6dKX zzUlxGAX_GchfIw^p#Ddp_SCof>b(17k*rJfx)S)Cd7$_{{jIak&s+MyF{$M- z>WC+KmG6~5+6nx9+phlzHm3eXMf9JOGx_^Dzx9aw`zy};Uf%uJuYGBMuJoV(@c(*~ z(f@nPe@%%n^`CoZ@}C0!-0nZ$u7#u1{`%x!FaGbs_-iQsKWLcy|M#^_`q!wm4w4ed z!Ai6w-=5)UIhr#uFwr>F8Wn1c`gRvo#g~?rLifN&4y56S5W?Z6e|}Fq_Ez(2oc|eUZU&iK(;r(8SUpRNg!`HSJpguW|M>c5AwvQInZ7ybFXnD*?@RqQh)-4WWt?*IGA^^n#+KeprkepIb~+>|VY<`E8+yN(vVzOMGdy%whl z2^_6)(0uUVL0F9w`F)wscyD0YUlrGX-Dz$%>0>^5wjo)Kk{RA@*ZrTB%GL2hK$X&# zA*LB}dZ@QmQ(L=Ku=3-_k4>i*9FA}M)ywR!uc1&f*MLTPgi{g2O%re54v@l~6HSws zd;bVJyYMJ`dlvVhfOx%8vT25H+3(tesV|9k#OLoup$}VkzR>vH=3(=RKJ%h@{n*+! z>m;*&ys@&iU6Ci_wT{Xt%ZOlrW#N8q;BkRaUag(k!4pN?b30ptQ#$@3p4$g;dFr1(q8Qm z{P(3($%pY+yi(bN7R7xW-d%oI`;$e~P8AR_+PQZ09%+AY^xs=W?=r!N{gp@ed>)RP z7>YXjbtLuLK4T1zbJ9GAdd}JS3wgV;^2^^fsFTCOI%k9G zilFdi#x7rfYe)R+bAtvL+h|N^QNp%FEEZ7mSI1vb#WZN(`ilF=nSWQub$-43@BfbD zQ0#?mmkwMDfPuQ%JWQvGtsT4i>9I3ubf4PnZglsbQ6j>zS1{$XOtRdaKMzwi`R|$6 zdVJX9A8zU~{3UMS#kzkF!=NI_>eo^}LoZ(T9nEYK7-`AcQHH}r5B&G<)-yVt1Ok?c zjqibZ?K*TEIA?>h%T0&Sb0a5{`|tbz`O3EvGj#l#$M=kLHeMtB!PV7cy_y5jIAzUr z0tb3#ng87^zs|EM}Li7r|rRm#in(cZoS?fa=+g7_kS@LH&*O?7-rgD9$Vz?(OG-#>b&HCFG^A@ z>^CE;zuIO|C5}_-|L8H$|4jhZBI!r}{bo*m(oEx1h0=^Je~l3S9jl)8=W%$iB&K?- zrxu?oz=@8Vlg%9p=Se#>7NlNt#_2&9O+7Hou~Q>y*v%UhbL-aPwPtP-E=Rv5^y@D* zGN@Cujx=eKlbTrU^04zxzeE2<12uymhrbIfkbO|}ir)-|GR8dTaaX!4s&EVy-J+DK z`#J5d;^ltM2-S*xOviW)wcVmrKXagNCqZ!a%$L#DtNe0V6BhqBFX=HadPf6R`X!XI zHYTm|OL)N=)Alm@>ncC-$Lq_YGs~i8u8vQ{M7GA@uDyrfMj4HW@4ALnYK7we^78U~ zMQNnpPsjqhOe}@;tSm*1(>(f)W8}OyU9YbOmWs=kagu{?+TJJagrmr5T>X@Y^iXF! zSnrqUIVYNqb7L<5pZ%!~g!)AqirV?hjPAtu>pzC%SBRno+o>I=XoK38tNqJe@5?p% zjr*|1Gp(K(k5TwP`O|Zq=ncmWy}Z7-G$rqPGPN{LojNtvfB5fZ_Ujea@x~n@4^f&uGIJXg5XS?S#ObVr#}cBQjYokzBv z=x;w3D*XD%VP>2=w`@86HhJWRue_a|UEHon`hW1l!>JjzY~BhJnbKCl^{({~+H0R2 zmHO2B_KDPQ7x-^qmaGjf-G9I3hc@TgYSg}wP7{VvZ@(Oo_ZeURz@(^k-FVLnsR(%o zi_2v~N}drS4z&N^N2hu4goJ2GDvnyYHFTxWLUsH02!mvg)>l_$qI$c!cFXfq|L32x z=cR>1#JPKVdp8dZMatQ&ZF&5~>-+idP`>@oKdB)M4zvtCh;QC{jiX6{PR3z-d;81i zFjV8Dld<@JzB-k|x zW^&})2kFw*P=1Bu$O9CbP4KsNuL3tW_XE4ST9HbT|8@cY{SPreCKpV5b(+oPvZ_O} zo&%qb?#R|SazqBcFY0q|($4f#>FGx=q^SKTv!?sZS3brRr@2|Xb{BHL^&B6l^C`-3 zYE3;7oRTxn7b>W@sdtEY(=WaM$%+(jB2W5)jZa3rwDpeBvS+~y{2x7f6gT{BY-E6F z+YTz#J#yGPI5MJKae(W89J?0vfPer6uh}%i+{uOE(QuOcHNqm9C=Pb^XNYjlgoTCO zE>pxV#(`Xnk);>D_O89k&b4^U^LGcfez%90y^_uN_t^bcuVUgZ7|owX)pz;u;2;-H zEDnN!Z26JIsn0-$X8mqxr^OB9^d;^g8w|Jla9|671{ zMS|t9nZyAvXha8*IM{!$$+(g|@4vd_-@l8A>wz|r+~d+{I5~fuMz_2DebOMZLn<%t zrMKxc8spsQjr1Ohva<4)ORE-rsx*CM&v13!(Pwc{-hl``MI?l1Qak|zr25E2%SZg z7#b1TNVX{NVF}qoAL_%(q)Z-&lOe)NUE9)ItN?Fw61hHuD?h7Mn#rCt6W%$Re-AQw z)w~;psDeaZ4273g#;m8smJ6?3 z$%zEfWsuhIRHo?9wm^0zzhr3DgM;T;{a5p_u8b_bTXpEQb(+?eEd|G>Gv9UXwz#tH zsHA^q*q;+6ZpYPNj~zNch|xVu@cQ-ZJPM8rTU%gS#Lc%J_P(cl-gJG@{gQy^81~>A z&!ZFLUKkzW<;z(&Zrn)8wU@_}ms~u>Arx|ci7qKpA4F*(MI~f27ggaSf~A$V)iP~; z?gd3fP1qk#VScD{=A3(*u0b1$DBkxJEe%&%zZ3sfo3+3sX+B>t6aF?zoQD_alU9hM z!R-;5Udi4iS`vrdco8$c|9)*D5a}Ta#}RT3(ubIaEn?01*bbktaV;0X`X%M zW|@P7L*Ba1Oh2r_+)tlAsUFp=YJoQ%ggExD*N6)xlzBwm&mguu*E;QKX(^lbWtlu2 zHsxxe@TUPRH_$tk&){2VaZDrjm`e7tUCGeilZQ$&Ug5D-7Z`&N{UwfX@G=u`p=ntXX-luC8STXj7O94hlr4DXILrI-&9L@g6Uu#PB=^TJv0s*@7R})0($fEUzvsWW>l|o=7H& ze2KDNFmis=I~pwnbrx&4L@Dk1u0OALH;$fbdv}`)f%|nn4pC9Z8L{PbxR1upoCytt zL;RKcAqva#FPpi4Su}%*DaP>jRt3i#T9|TGx*x?^;Ei=!&&pzKoLLME3<}PPG(!rp z^Bk)qO96?^_8x3thPe|l=v-kK&-wVsm*W6kxM3lVz7g2!g6U$yR`<`D7+*k9Yov>h z1h6NM#VqZ%lYA&DdSFxHdvWHvM>DTt1@^?A{*m!PNDSL%)Yq0zw1n*qi$53H8?VC3 z`_DvhHP!G?6-tWFFX^uHnb-q6f;^Q0(K-_>hq1Bo_9P>+r-J#`Y{+nIdHv+5ovWK$ z-rKjUI6k)h0cN@C5EI$DXW!nO9~e_%eZ9hZ&#FtW(P-f%^v11+&(M2r-ng;wH1BFA zW_Es+4I4Hjc;w>}o5Ghenwy)m@~ouHTV_PNxhdSSD>*RPKcL`!5z9JA!OpWnTson}`QJ?2-Ym}_p|&ePb~xLY<-KVDA+Pjh|4NfvTwFn}|u zX#H3XdnYH@#>LFD;Ao-g-X`K;p}H2M{Cvl^b;)PXo}~u``Bhk-EZ(;PpR8|hU$#W3 zs(g{CW~%uIP62^noJLy3e$9ZC);)g^lQ7i$_pyGpLq009y=92 zAZF{$O{Oz8kvFi&WpL{2@F{u9!-)!Q!%;N6PZy5AIwLRYcmdxSv1Kv2eL1_@0zYPs zuC4~PxhqWOo@AM~0WgRD<0A(LhlYv=?7gqiQyVXud*@+3?;U)*m=bCexcz)QhPSdE z!)v%;ucwz+dAz<@X7{rNRbo6zOk5Ab09dneF zlnyb&_~~C;z3(}vtGjz+kW@A$S6h-6wz}4>Re)c?aR&T@Y^3(9ydb3uV^ORDVxpQX zFlRv9x`0g95BC8EplkIx<86|Xl5MZAPsex%$<=upC>2SIh?a;aRs^N$8KQR(cQsJSv#vah}B%b0xOU zAYm=;qM@^s!*_NHz3t0Yd538u1DzP@(X%m3ysS)!7b>DnvDIGR(q3=`t%7j|_3|at zF$dyhYq2yyhy(z+sz(Tu&}zDC*PeUZVOv@v^ziih>4$d#z7a~ZuCQ1~;(?yiIN5kl zW%4eHuUVFR19%F?$+^!u{OR#Y&;wsWDCRcEUbs?&{9qx%-_F%zY@75X2~kPVD#rwL{1cnuEueEqxOA%*DW=SKa*5^8zbbNW7D+g8I zeZHhA-5^ev^&Ij0FrW>X@dz z^O7V+KK9PeboRG#x(5IZST4gwv6w9|3fSzoL!<{fK5}<_OxMhE@1xu;RilLr+;Gvu zuIz3r7pr-4R97kGijYlhro#LcCNr+;6iHhlBtYm%ZCQQ$WPt=%Ac-k2-87Bnv}r#UV%4TH9hA8&V{>ME zJp1}&b@+S?qBy!-D~WC*CFGvaWkal5>HTM6 z?vy4*on-84*DuLoB0KvOtH9d1B-)JJy?p*`k|4nvPp?0&D_Ww2wT**@=K$efKC=6` zV_w_`{yQ~yi(uy6`S#!E#fay=xw#q8=d7VesZ2K?))<^-4sesEO!_Anu~;+(>+0|S zT(h2fQvy*9T59AfPmIDR2*w}3v}#pK@}cR-jNx7~j&&xb*KVlG_UcBUxOQMOr~b7? z!@JM`Rm#G`!f?%V@3HGWxhD5yO?cnTh*xV4oGWo0TPYd~uxg@^F0RX&ws`SkCT>a7 zT+n3p=r}&)Y3(I#xgwQgrMeIh-P46YBVl zcQ=->mFzghbaowq@kCe4tdEga4 zN`rZdLWOkKq)exfJOcB!e}Y>$W|}_(;`SjuYdGd}zHlvJyd@`Rkp~+eZkHNG0!a~+ z^5l#&xaqt!PFQE|dn+07tv6;W4Y!F^eQBWxP%VjB?5Z^P*@Jph%GUS7!Yz%PjOaIW;RK7X=bPrzi5knS1cErxx`0yE;dw|K%ux1n3uM^&;oYy#}%?;TTj z6P}F3)&+RKMVqNE+=Up%ukevmVc$u60RJ%EUHFHFxP=k^Qh zElm}K?3B6fCgjg#YRsf~dV0PFF?w2H-F#40mMa;kH_yODawrW_G(-4Rw{AUmsPzW- z%Z2QA2Ja;njhuO;Y^oubGW#qJqPWHan^{{f9MeAtaX}=SiX-R z7a-{G?(Rkyb3P?a*>G!>lJ$qXBIs-G%eTh5Iah9BOwE>eL0G^Ru)F9bxZrSYro>12 z#x*66&sFn#Sli6)b632z<iH?`mSBX|^ODhf4bGzhFdUoEnzM2Qu^)1Tj-H0rw23q$Csn-o+Bn?jWvF?WSN6JyN6Zd=Q}@3~oj<31elR$fE_Ag!iry&pN-BjcNJ?Df1Ad!KsrzJGnofL%E)WOE))Pq}({fe6u3t zo*nZS!YGRyRW0vei&E%^Yz7MrfQ(J8Safo0#EcCWCq=@P9U!}DgquRJ9W z9I8CdqaHZ7A%c^-5y%hE%H0&5EArP9*cv&tsE=+`wqGx|s_*DvLrBgOtU;{e!{8%p zuih~(36?3zJx)H35bdyzr?zOxO)~lR^(tiYRZ7wn5!Yt=UNm!`1#n6Q8T|FPUpR$? zLXjsoBH;RvW@8*k*-S~u3B_`o&cyosvz-<#<5HXPt{iWH-p{4^@Bvf-fveW$TX>^w zgx-WzXJ%P#X2^xlA#nji{n~?~o6Hg0u08twv}UN_ZFZ)WNR0V{DewHJQQ+%i?(Bix zZb!`gf&)zIrQnuFQ#cvmoyLy;@gR)LFKGUX$uyUbkA>p6C(e=j82O`Fiht?;^FG{M zXG%Wba(tQ!HS1vWdEM8@;|`6x(84Zyj&!7dxVuN~lr<+MgQA#rzNB!ZaDOIR^svB7 zm%Q&J;9;14oo6o_`kK+P4W{X(`_Vj#tkZ4{uA@|(xH%@V4Lb;U&UBgBotVK zmMwW$E!#RSy_>&5(;oCEbTC6-N-=zP@d+%kaQOmo7(S2fIAV2&NfdUM7*KPQYSK-2TA>x4y9(d~&JO}0hV z);upPR9`a`&#J|6E`Mp^##REYz@1YC1+VQg_ouUnOFNu7yPEIK$7W3$V^CXA!1%eb z{qtJ_axurmMfo>|KA2{BtC%N1fV$2$nb z1EXpflvJ=ibxeeU1!?pC&e;W~-#%&ps95+_7tdLKU5$At!ZEuYc9#SYEB8E|fgEJ3 zphe6TAuj(bCr+KRXnuZH{^)npSDr{5oPXJkwMVq(X$i}Y1Wq7bq;hilqIoqrA%I8r z5YJkdZ!>*-d?;s_GbGwWQ;pO$HG#znd;lbOYS@MrmctvY0l1bCev09YySyo)Jh*>4 zfz$=o9?3=6Fk4DWisJj^$vQq6D_=YW+CEi))#T87;$rJ|CZO#j!`IT)n=dRs>oh?# zv?pJOkP-pTmeu@+v2%IWx!hs(-)#F$^JNkT@2VMSO{0RH+niT*9I%m5qB`=?N~Bo6 z&1+ub;`xZ=Pb2h99Ja79Gy72($SoCuWHJUf-Bz8p%*=bp@_}^)3+(W@cJ9KOOh>1? zJ-HtNK|Xikrs`WCSt}4Ye?=ey0+B)|jIvbD=&H*y2d4?Uu|0_X=9FZfVotB7fxL>IEVdZuP<=Ulq>l|^?xvMkDav*jZ5&&<)!i)EcNla;jq zaNoWq+Y~NY-LI-|Y&I<~5I#yt{o{W4&N219z!#{t9Y9UYn;F!*j5BX27Jei3=%`uglKLMW`>vzNi3tZ7rX}24khlMSUPZ+`Gcz-uRr_WlMb=9(Q~Yp9_KLEPAk4#4KrbY2+MyQK z;22};3>5cUO$OI=X7+vNr|DNV08_M*5rye&EsN5`ly6zgCBw&7a$!Xf(%2VVrHl`~ z=q$J3N-ThBAvwj#-?;`EmtHB=(@qpHRv1|Ert$?w?S4K&8ELW{g}i#Ff& z7&r`E;--m*()i$B0?~+@KDz4}suQ>Qp2!D_d(m2FMcbvwK zR$RLP#Q>=Qes9SJJ_Xj}g5<71L`ZU_d_QmiMvQTaS&w4l8556~;MfZ5@|&Bvk=A;G z_o9b{Tqp}D3k?d&$0mObYu@h00iX8wD_&kv;F=g45b}LDVo{N3tm((Z_Bzp6 z3VAb4@WW!$Ojjzmlvx3gNq?;J;-c_A%?HQ6*JcGF35x~=BcbF^*Sfv0_#6U(dnf(< zV}YHBgCGC|ffVh$g>D3ohiqgiNmePls~;S^qAV6O_a~KdDxV7c^hg^g@aGj1EBf7G z=UHv59~=t})qV8|(S(ieNA@{Q8UO&F5?mgQji2Y&&Hyq6DHZxLuOVbmYYzSSK^a-W zSsQ7F!my%y^NuqPBJ!lDR8Z!{eI`jEZV3^}(&%sb4Y+dR*Z8H=YZDA_A1AI9q5r9< zDD#9U^uP)x{1lR@1b4B^Q5K@9z#+FkaajRuI3FIGSO;H|c1;zWiZCw|SJ8;EmG+iajQlbx4rQY_|u(FD; zspqCruUn6ZPnac{tU-qpK_)+uC)t7^aZ0qEv5p@h#zG!_f=-`ln&GJ4R}Jv&K~_nh65uMyE$JpnB=-Q1v84Tcg|)UcZUm-27u2U?{Z20Z z!*_iqythQkvcXntO6z}ldChFtw$dN1po(TNGVXbGMM2)>+xEcmNTbA)P$-;VtbG@X zv^zdj(GsUavJ1vXdv!g5o?t*+64qE@LAJqF-%>K>l~;g6B;0bsnhh4Fw_y$viQ2M-}$33 zGb`(!RkGWK7y(20Ooe^kvQTszYt)Sqt8_{dId)%DQRq`hxvua?a!5Vapug z?$G*5;K!G@%pjT)yn13fjc#t5_wwpG?rjN%#kcG6@80> zjmmxJ1qma%#ED}J;Q4qClwB(={&~wVm z)3XbSRxAbz(D`}4{f)ZiJJ8W47YQzdaA}!n%yF?SzJ2$$?_+ZZ{!-uC8s~m>cbW;o zB~Z-7OIQ?)lBJhEdp-C$60-Vp2)T@Ni^X`BjW@sY86RRsFq%tfUo5bCgrXR|6NvOd zBbcRJZb&i9(nGLmIHHTu z%+Agx9Gv^WcQ!Bzys0rYtg)5t4&9CFZf7VX7PPQ=C45d`Xt9AcB|krQ>EQD%8k2%z zf#O?kRe-I2*SXz@U_D$*bP7OtQhaTNK&T?{TE(xEQkjTWQ9)}8A3ccYC~*nWh|Rma z#-^Exdn9-z0yNRj{n8~+H3en9E<;S!OH6} z-VH#HHaU-)oG_`y&B3`)}>%aRgjN#s0eMYW^`GPqAlWk}OwU z;_q5P7ncgcC6FN#n&@T~cO;OnXCVU1@*BS8BM_ZG8`e|Wt!H0m-}{{iNKSi(j@5WV z%>mxe7nw700ha+wO~bmgIag?L3}7g50R{@JyBbV!Il5I3W%6`VZGsA1%K?kGFl>ek z5hb6~d25|U01lg3>K{SkMx$hlV1I_82QKi;p5wFB#uV3?GiMGM=RGofEgoTGvZ5t9 z{N?O(Y))V;>LV1P==BvAcknajYgL4U$}7LbH+9rO?NYIpd%n)vVWsIduBgGUEYcLd4vx32vb-n5Segdb6nW3b*1e*-Wp&3*s+C$1Ofo! ziH@7fvCLU(s;DVaC) z7+XFX&~5^_tNC*#2rM80HIwAd{DuN&+D`E`UlcWIL!pCNQEQ!9_Uze{`ih>h?C8;> zVJmjd&wVQ-xop@K#N;E_Zc#!Dljla%xwiVD?Xrn>SXY9wA~`4VJYjc>ZU^*8Yd-Wt zyo?>#BC^CnDxyclltQ#OXfca9up=G~ng1}>iJt;U|-5I1sV3s74Y@g}JvrR7afjqx%_Fq9xTCIVI?w)a5k z7@%o*IjfFNXJN4vFTnzX(wBx--)tO0N?{!$(#Isg?MxILpcId-PBNN(RmUGLllTcD zDzHbcI9GKhR<-yX~o9xqJe5R!fKxj zJ#do=iaQyb_glO}9KG+rm-ILzmByvqqG2_A?Ln>>j#wZEbW6yNh~RJno{tTUOkcR@ zQF!}t@Sh+&{lLjg->oM}y?LYDD!RhDg-@vEQ7OQ{Z5}Hw%i0DaUr5-{NJ}yZQJhX< z=r*!rVW1CemcR1o)Szduh98*g_3l4rf)$y`!J)Zt-wn@@4w0DSYNNGwpB^&`yKT{0 zUwdcAYH-;yZ#(8%qTlnD0$75Tj?G#)dT%;FfIO1I_I=$}@a$P5vM4}pDppqF-V5G;c>oG@*`>;wKb^wO@~ojU`{zefvn?IL;*}WZ7Vg=J!V7NihDhg0(17)a6K8PzZbu`wImfnmdwWO7 zJBYF}`I)|3q%@ln6BEPVw+$GR{;8Nz;mTISoY=7VfX#)Qwa7A5nkf?ls zcQdHWD`8VfE7lm@l|kL{AgG|RjBE)c)&$oB_bxT!j2%IjeU6Ch9@f;t#fwWGhJpYk zDYcAk^;}99;f|2t!@#tNEG!wAG{|bmWA#(_ZLDw&sj!AjfdrPq<_A{tykQHN4gfo5 zniZCTylYqVQhxQ*RRrKhXv-We=He33=S^D zI)o5{4OsWuch4&;xp5h_ORFqXJq4sg3}E_*o{h5gger(CekD&HXozZNZZTAW4bWy1 z4REuU{`?VnC^=uK>&K6mSh|s#o)4`C=+kYHX0i!ql-}S!YE;VwsyIfLr00N|Hl|6* z0`cZG9Fp;phSh+01;RN396|J>MSZ^_*#maAqKQU3ZWRP?$fnYh`NS+mbHj#d7(Wi- zjNrF7HgX|gUZSpbWsG4F;c{3KK;(#$940#i2?XHgRDSjYIiIBw+XId&03*8+9R35F zN_N6t0%mB5A~Fe>DnG=mn)9B|xEo9hs|)Bq?WoL1nY+wri_$fHQF`$qS;G5}1Qo0F z6j&265fN2EqnPz~=D3m1XoUcyd z&P~`4gAM*d6nn{QbpB96iZL336f04`j_p`dcTJXGC&O(~ERA^xC++wlQl70HUJ2%> zxSAWT@oYFUAx1aRQY`fi>xj?&?)D6cH;J=e}{R4zX}ExI~_ACWFZeT==WWty(8kCwV# zr1wZpO27OE2VEhfY6P^BS~f#v{5?QY!Z*M)57nZ!43_AQQKGfZ=1J1gAJJgb&5LGR zSD0{*rC}NyF||EuU*OG#0WrrMZ$bvUN^5JC+%!S|*dr)ugn~@vy<8Mo8bF`J(>DD` z_bu{DM{L{xAZo{BCTfcS!BZdsV^PWCE!=;<0pGs?JG3R|{-(qVU|!LH+9=U$bG7em zgoJP9r7b(M=Tlp}!7`IT-5!pWQtYgPz}%`lhGcFyexm7&O~Cjit6pfP-0U}Ej}SRg zCxh?T*V8QHlXImP#f&zAa3@>PdPPV)FNu8NSN5^FR%b_8?G2aQWx}*|X_0S1Ux=V$ zU{{wt6Fa9qOAI@}8ePF@5}&JJ`bcI;(JM__1A!nGx8wL0-XmmlA8)~3=}-e>u3WSx zl55FOnuT!w$H4xQlT4tsqi^bKWk9nPgD2k(M8wrq>Zc8`;MD&GAaJ(wu zUjWTpvueH1-?IYFA`$Sg7+T)|KtmKg{}{x);0?iR*F%h|i43$H6z<^ga6+{#Y=auy z7TDQiuV3pC_D)j8Ski(!uDuUNb@N5ZLcoS+ZjTF5TO_M0*x!6PLU1yaf+-M?ChhfR zq%f!kN~ru;IqyBM=%zA)A-eZjulF3(bAEG6C+>XO3m96$?-ie01eqJuVbLi_Lwp*9d!VQFBh zAbSe&Oqv-mU2uRgKR7$MGisq~p!99;+pb z4eLMzG6`Elxmm#z21LK=&@-6V3?>M$A0hBY4eK-&#ZbU#pw`$BSsIOhq&7*$_&`*k z2ph|aFw#(9;lcY=oEQVdNlloIvYtM=_|XwSe})X6QFs#PC7LW7E^+kYk>Wh!C&j zLAHM44qI0BQeA( z%RuX_>#4RNhKc$QEV@s7$uK1OepryaAmnJsZ?`Ny&w60a#E#_`<{qns{uMGWP|IYO z5tZwly!arH zQ6BrjMKE3%;>nSHqhuml;tl1#lL>%;D&@CUGO%xqI#<`;tQO-~pX`7_il>AyfO;xq z1=m@%dzKq$VMPHNRzS-rM1y9~sk>~U&hpc+-1_6^3>xe$=BQN7dT*TurC3*Y_gOAh z3tw%8s)*_Wu_XclJ=x#xs5Xw3sIH-AVos#P#Pz4CII^WGFZ$rUG9)rEKuaE|G0v@8 z!R@$oL?6!#Z3zx@VItKm4-f4aQi?4{B@BfUzw3#tb!Boeq!~ z=oFQQ=CpP3keIZ6dLl+_}$MZ{0XU9K>B;2Rkksm8H|Yt0?FdRVRa%!jLTA@>F~Q!Y{|7E{v>Zgp_9w~Rqp zT?+tF8ZkeVkR5(vUqM0S!-i89+Q}wEC3#uSE`(rx#7y;n{}co%LGeJ~@Iu+SIFTpMRQvpD;K-z%DcQdfmc)$13D*=lW zr9Fz;oGxA7ANKo=sKw076U}koFEIO6#{W&ZsI7TLj()^<$J|K6lM`+z1UD z7_@}na_`lmG5esJ7A?uAiT%J=5yEMBpfgmi)>Z6nR6YqDMB<({_!jvc(TIhMrzVUr z_+kmZxaX0Djhn$mr4aQiB34EO?I{V% z8at7a5m-PEb=*UO_vAO6$e$%DlMeia=n)BvHVW&1``T8^$>Nf3?C3ox83d=I-|ux0 z1&rSgGY6Ehb8uV?4REg@)$>p!P<%l$lEMq*6?qWJM8ExlVyhD%=wh%40aUVLrAjJ? zF4#k7k;N)Pl8an)d)gz_ToXTs@$=un6^GcMOwE=Ak}&0pRE3*xV{{ zZ;RD;<~F&6^IT+SW!(qcCwL&u!~vlfaoVRgEalz@_LK!E;fC(ULWXz_cKySS8(}CP zID~cXfeBFXE8Xvh&06a-;e}Pf2OcR^C%`uy&a@$Gpw=K*XaOq|FaTCCA0>KPHRw?Q zY$8mTR45@jVx(kb&<+`PqvQvn9D{tnuc-8A@WwW#Ph7Oqq%|Hy9zT9uzEV9b^vjX8 z1qB7W;Rgw#x>pjO1Uf$8JAf(98_1>y4Bc5}SyGQEsps{a7L7eE5o-|r;VrN$>J}C; zOV3Oam(ZO;u)R?3rDeEP83`%noj#vV@Zu5@lHypVpS;Vkzo>u-(Bx^1Ct-S_JS0S` zG_)y(35EfH>D=@nGJ>0D6Sg!b*2b~(MC`CdOg{uD?4%d~{MXXc>g%PZ9SNX+3vjp$ zKIcVI!Eg+8FbK@Hty{7hib1wF=QlKl?NvwQo{t1epU`C6?@a^5>oPpSgenEpV{E^> zn*rtNw=V}x6E3ns{Z}_adHPH5)^X8p|4~@Zuu*NMcSyIrM0|Hk<5X21%zpj6EzD>2^LX3rzN$5nN z@HMi>eiN0ISw--!z=8$K{OT#s>*dSMxt#i9Y>&XRfEv!*x9U1jjU8q$pnjRRU_lVN zbIe|Rexu;Ih4OoSPN6CxDoO}j#2@gViQwyn$#HML00ywfs*QlTu_CTHq%sKSqy?^Z{tAg=;&Pm?(NBEs_3Pl@D`$oX?Qon)|I5DrZ~Xm z%!flu=IhZPY9p5}2jVnIwV~_{4SUTiJio56n>Taj%v_|2s2sMec!iM^9`oW>vGWlL(_GDW4*WkpAijPr6CC!r9=^Bl}9w}ktB(fw9v9WEuIIV zQYk_jB&C$1QiSMHS|p=nHApg|vi_gD^Zu{vyw`iKb2`TF`F-#2eSg+{K}$+X<@25R zsOKbJj!|y6!RyqGThaqpbIxgQXcbEDYj_5jcF}!Sk!bP_!uurPZHr_dZv*akR>9LcZ|}h!o0cX_wVNfE727B?1C_}+3leG z(}41?o|&Xg)lX4eyxOLXx=|j@O-Y9=DbUDt-Qg zzBC%OwY-C?lK{lY$LD6dA(tblY55kD5}dfJ`djWe&wKLZiO<(pvmPQ+LhbVtt!Yv! zPm|x`)GbphZ1Kqhee9$@85TP+NEHiVK&PT+c4?B73P13q>448#?-#0I&<}(-LvzpP zHAA@FIk<4*8Xn;^KPvsgF=;9J>n&NS^kZ3o|J@95*UN}Dfk5+T-S+IFii){|Bhc%l zYfTYh;@riD2(o~(9DMvGX9xkyarWO_S_xN3MwT}FzSt>X@T7CWTy7084INn##4q@w zz$y!YhWLDYGyBMqBjwJQTy`rR`U={sGQ}uXd;2@|zS2-iv^txnxje_;Zw&4pV>tBF zgp58gcA)zOwIkM;!l%?V;TpOb5`^g_I)rGz4u`iJpDxWXXt}&j!>2CD>^KI7dKu3r zk8ofVfmdFO{F>sm%9R$f%w0f707l&FHdE>zITQ_o-fe2+a+1E_s5dX(b;}78@b-uU z{-vxYu&BPoN#t3kn*;~zU$HzNId$O{H+%u}RjUqxOb3vZzNa0C#Z27(fF|U@=|$Dv z-db_A&uaYuM>PnNqFk@nPqt})McG_|PdQ7*{Sf&mY1Blk(6(ZVr>K+EAg#u$+)E1<;6|o|o2Ha@)`*I8)VCD>!4Wg1^NDVcshTEC4@o z1f=MCtwGTF8C;!#ZfaqAE@^ioNJ^?Rq^zXmhBX+!0fKt!Cc*Nj;>ays+uVqo43K*% z6u?~k_b4nA8jJx%avMi*zEDT(RRka8=Qkzpe)PYz6%FZl?E6V^&&3n zsonMzPFx-WS@^7lo1`QS<^eIMmejN2G$U7|+x9shyy+$|SUtI`cyW1jogGbM-%rU9 z;kG=~xA{m^9t228clv}PoOx{2eP}D%SIkVlzjI9!uc8U@IdI_2o{h$S8?clemQ?&^QYN0YEo^X z3J*79=Eng%v8QX=u@y1JeVz3+YO~CmodLiIq>5db7?KT4gsI#XA!#!r|HOv%ikmSL zXo65&&c@40M%!T_{M+9dg|14#{G*vdvW&+|3^uTsl#vR>_}4jbm82HsL`TP8utfes z5&Br*&z-Hi;U`eQWxvNJEtIpLGnSjEy?ruBUvL?A&o6_9CZZj*r9*fzH+N~;sM6=W zl9XM$c0t7;@H6i;_e+WM4$mO-thH5Mi^jqXhz{qLXT{`_(o)Z|Q5ROcD1<_=C|z2A z#}`xpuKU>ox9X*Lo+pt4*<=dRc5!s9@wf{ z@VhyR>U3VM)^GHP=ixm*VFlVs~{ z3wI~DSZqGP?jE`B=_Ame+ya3y@t~ml?Z-u=DMW;i^2uixW_*l`laNIUfN@V@lCH%p zU0o-!x~OsdzRjon&pXL0Te|0kjT(~u0$EQI=%K}B;-uR6;8cE!LS(l?OHw)j9HU;&zL-V>vaIr#IM7?z}@NwmO8=}W z{uM(%LLVxQO7UJ2!tqhXa<+zJ)|Aup)`tU3$Ds41tV|d^I<@(2MW)|YYA9Jo&#Zq* zOgKl@qUtO(8q(;xPQ`NF!cZX)btbQ=sac&rcn9zHu+G0;W9A^;GK(mfAWP&z(C ze?%so&za&So7#hkkCgV6gbHQE$_i%@A4sNH1-Xd-P1GCTX-)U0= z%u`8RnJ;x(m*haPs3z;rNxva56*|ORk$)7@Px_6tqt;y((Od^Oi7D3$URE_j(lJ%! zpxrxp&xCCGzn&l(h!BH|VOCix(6^Q@e!~93L8&KLVZq8S}oJt1sI6j8n_bLmI}OF$AGYxs98 zUJ!w}hp{GA@@>g~(Hgm7I#ojgj$aCN0Zp)m8k6f9dO>yPJ7d{9|s z@VYzc_?>5qCv6^gkzY>u2X#yX zvPPk@kXJ<1?l?VT)oipBI8?+jLwXe+6%}Q2WA2jcLH*y0(51Jv^sDTt8&w;T&NbW1 zE>&Xt&&P{|(^Q5d)*d%FTj7hYZ?hmwZ6X(6C%8*l4as0MLDTRF3RSW7=4#%r9*>U9 zwThIqy=K5$Sz2W&Ir%V&snO-&Vt4{c-_4!%I+$t_uWQJ`iY4O-_CIXl#V^!E?M)Nml&Q4f%d!02^(+-z`N?ln4ff%H>h12aKKAnHi zx1r*j30%ARZ|;PN6O}l5>&Vkn!3bB8_5|0jw?OCc5y+RuBX>xs=#+q>v<-1 zCAv2=tmtY0Bln}qngNfA>c(%J*Gm`7N&Tkypj{4-aH$!Eju#@xg>%sazyI6v1gj z=?Ex8|MaAQ*1g~f)7RH$j3=FzR~`I!nxXzjomMtV$x3;iveo`kC9&EM-)@FH^ywgQfZlBxtP zr9+Tfe9;!vac%sC_Fd0vGvdcP?$Q|F_ct67qIhMkZj4C!>b6Kdn z3qgKwoeeRuKH0(5D8w;yN!v{*HLPZ^GkV(@_BRbH`{6hNt++`pumSP>QS9x zz8op$d$#WiP)HQw;K;+irq^k-Mw&`$U^MyXRY8@GGM+V#wg;Z2Zs?Hz>1;Kj67u$AbYj|1>K=?wfBJqDV3Pyi zm{mX{g=#I5bR}_MR-f)V%SocBOl*{M^N$}f_@OJtv`#&B z$|o-?{IK-#UI&=Op`bZRb+J<(7ZOqoKRxJrRE(J^M^;LuZW&6^Q&35Kmp+Ku?ux!O z`rxa|oIwxXu?5h3lIZRxnz8(|v7GTjP+V+3;&uDZj#?=&(=U3qo|(Hg1}eQjNuZoC zS$}POY=oCcYgiAn^y=%6>EsS|QQb#w>m}!`&gT|`s54Lq%JI%PdOeGeQEjtdN0BgKTVcaVnG-;z0ntNlB>>Wcksk=*IKKi&AKdraj5rxM8Rpbj z&+Uj0d5{hQnVfnHLWh3uHE9M8fEM-GOQTU)X39H@_z0+1<25$Qp|=){%bB<5Z{N18 z4G$)>15?gmdV%n)A$xY+{4}A0;r@Cu6SaCw)l0Blw;D_r_WjPJgaGEmz`pl0zX~y6 zeQ%re*GnDmB~t-o|9I2Zn@;K|Wu#vqx$Ke!E3?*vogsc3^MbC16>Hto4VTbL!&Y{b zkvH|wT(V>ureCr34^KHlwfIR(DMq!6EL=p9L#F z9t{f#9w5s|2P9HA8Sg8(FEBjZo+x$3Srm~Y$EWvww;Fc?7&PvLe{ll%_A5X>Q{+d! zn?Wv@zm5MDhHB&`JG`@)A1cpFVNGwvI0EO3+M(TuNZ`UJs%Dq+%m+CC1pv6f)?71j z+5P+uGQ%A&(mEh9ovCPz|F4KEoPg0&)^TUErx9k;Gp%$c$iOt_qwLPJ`O>2l(Q#O0!4%W}i0N1OyIB>U4RH;1K9UU!ax+y{Azgc#vM&-uZ*?+3(DX(pBiY&6BHn)ePT56n*4H(p!p^Nx&hL-l^xUHy6UL;}QJEq7@-D?b*3c3`*w z4QMjO{|Yg+0LPoX`t*@AXU+tlU@-yfu!4G&LKd;549nKL3|WF%~g6rXMz8Xm?! znzKC;1T*C6#2sGRSPaq+!*im7lI7Guh4$mGy~sodRFB4kg|ZLvJ*{X2G!&1Mm0Aq2 z3_aMg>>60CM+$)p^XcEP;Tf)llWYP09fJDg0Fyf&Tb%Bg9)DpCkjFCA>nJdrd~9Wyk`wdF#5p zJ5J5=Eo?Ep4qAWRqxq70$&f3s@vP%-o261 zOG=Lz1Hb5ozm?n^2t+uQUOJX6cR>7qq6d**vcy?vI;TUm3#gp_sq86=Dmje~7(kxl4<8In zMe7QJ7L(uybE@2y+v_Y>&5^O z8l=&IQt{V1%7wr?M!e!5Ih&yG^~=bFAtJ)UP)9P^cWP+`u*Ij^E7hLK|pl0@?HO^EbgFnt=}=>Z*(FHM;IYj#bCt z-sFRP??hCoX<{p8K!SUbC~<1n2fOeRI1&}q)z@fSLKNBn5MNKjDqa|`2|5x)5*c57 z?e_6u0C|*jyt8t~KqX~{H5R}d6vY09l z8*qU=K0DrtE388J>*3f~ap+BOUCpXj0&co^R|E62Rr?6<&ux^L^0I;dym-S!{O)1X zU}Ei=DJiuWtXo4t&{}Po+3hP_Tx5Y;3PI)k`}c1*s9Km4UrJah*ML4lnBp91`1a)c z$5+tfK(1K=GO;b;ZeBRL5HeT8od&U$M#rd$gw0*)&eNoeLE{3r=uy_vG_enmL!jX{ z!`N#B#-;k4SKCMX0#|X$1@*38^`o2TJ-rlxY-pxUacMvdqhYeIGd^QSiQ)x-0iSHG5gZ)JNeAgX_@ExFhS+Cbep?^(k={Ist%d)vN!o{clF53@NEe%rPTf%4&r~CrQt8S1=?nA%3$w76}A3aGJJ_JN&Bm$XmvA-eI$eH`Mn{4ueh6(#?kC=(8B~TKv{2B}I#IL+ z9XReZTxj_H=gT<3xRmhX^F+Q+>t-)+#*GEt%jSeDJ+38rd)?gRkx<|;;Wj7ZlXdu? zduW6xLdw{fEi04L_m-DIJRg3~d9S^5!8eNl4@~x-=V|T&@wDgrq@K;22IF)qE2P^*(Hr2^ z?IwsDQ%sNER>^(XS#%<^?9_#N^sbf<3~>mF(T3xcL3nbx`z>5~MLTpVre!pIKNBA> z$c6$p-q(G;`gKY;>+D8jj3fC9s)DIv!j~FLi;#VG1+n5e>2sN`{ zzkazY@Inw!N**8A;BHeeaF&K&c0iiS*!Mc(UB%CV^6)88PGQx_6&IJGyC8u#^K7sb zh%RncHh@ykj2N}DB3P{`Yaa*#dw`*U4P=OcN?#HRz?8WFufYg@KG`M%(f4)Q< zXcPDtTQAfca%vfZWXc0DYtJtRZVdY_@u^;ve7;r(!(43jOFaJ~cGk;P;;3E?S6Y z`|Lkc^_6vpOSYdPMjK%CMf}Wu%Z?21D;++Mq^{(lXM9W3hQ5HBBF7E0Z@M7c+czLE zM^SdZj?QFYTgL~U=>QBk?r1I>=VGNb9ba|-EA-3@FkDUbzA6$To$V013wJp=n_1FThZhp|qv~phcazaP7r3URX|#<6P*S_Q z+yi!ueRw6U+^dF ziM{Iqx&pnGH3EyCG{fuC+i#!R+Qy(yuO4#ro*vBQXf1z!^ztOJ{~4k{?lIuk&cu42 zhN^|;C{h(^GtGPAdpE;{SNUdOR@(Pjf89Zkl>#36)YaA1w>|Pv3I-#1x(qPCp?J0w zA%jBM#thvdWLF<@V5h?`=utoQ!hci}A+mb&FN^d}&UWaZ1js=I3Z?4p9Mv#H0hZzV z2eQ{Zpi!r)@XAq8ksHv5AA;I9D49jK9Q6+)HV3zL^&=6@FDma8fe?y8;EnLPfOt@& zNS}a^s!h(iZI%xlXHZ+>f}Dn;)4C_E^J10*33M?-%?Op)3}FvRFasX*?!x|$ILxP{ zx9nxH(&ifgWCU1c{VO{@$mGVx$1!MwD#D~C4MZKq3F1u>vV-O`Kf^^&LPP5H`CF2+ zq218@gkY((hl;=xXF=yOb(|88JHtkEo`!bIuUp_}tL1gr7=R~JMlJ43!&0RqU)4MI zE`b1b6Cz{OEPsN$FNPu$My9r>2DcKLkfdrQ!mU^%`j>)S;H(#^k+<1MxgAitia^xG z{LK6KURVWPCR9t(XjyutjiA|AZgUszJ^&bsHte8^S_MD84O+(NCmt~5TSs?12Dzbn zVC6g|%UUshEjg((wU3-26m7lY=6n7Q=jBzKi$W0KaM61kXV;|beVzycp1wLaWi>1R zWQK~n$p<_X-yaQnxFGrQ3zkCXKwQ;0+E97xSdw z-+|$|JPtkoD?&B#J$kamx77xnnqO&ahCV=;;lsfn4}KF3hJc1@-Is6G4CmZuh)v}6;*It0c7Fc+RBs=;do(bIi{jd49Uy{Z zXI=N5xNp9uX7^R2!h@a_eGCuYMaYEw4(I&V zw|P4{=(PR$`_~;{8)$GFi{+|arq`;QafoGiDDv9=*JxeAu#-A|W1y|M z%a-ldAW^jTrSM(?ATC4!w%;_L_obApgMa)49f+QjS-kjJ$j8QBfA1v=UZcFcd~ajN zcOT>$*mMvjj8_G>cO>43e6rsd`+;0AQnJifU8vZZ3c&C4)xYk`@i7n%Reti|se>Py zU2~o1iOn>O1`>YzqePn|uhARbEmjU4!nI0a9sjz7>+2WywO3ET5Kx9jHypAKEWy2+ zxA&XWs(4kzC9I!~Au~DpTUKKjzHjqSsefU3-&fA^y*f|q#VWk)&k^%L`IG-#!!CJ+ ze@$99_mkpdZr#}alHIoIbun;gBgdRxOuc)t7rM+VW=!Z@+lGF-r+hBeKaLwX60&o+0i2{qMH=jcFQ_ zF};(31Tf;d$Ix6>!(9ke?&gkKEyyrUD(fLJCan>PLl9!6u`%UTA%h$bl?UHj4j07nRNL4>15L7x<2W)hoWDo$ka{H8pb(4Lu^SEpiRfdOq2qqo}Jp1*(Sq@Au)(mhOnByhRMvjB%v4AzCUR=pM9G$#V8_sNklkeGA?c{vd6OsLV@V1WC*ou9ACG+ER9xP3At`m2JPBO9$Ga-j zHLrd}A_GQY7%mh`kEADzg_+?Adhz7`u*nKMWjQ%H=mhpe!ltUoDv+4vi}Xlqe7(}- zsiLOOR*6dC=TTZbVXh22a_|cCzS0Y1!C>t{A--Lsas(N9|-B@_ciufpU2yNjjL`=bH!ul0J6YQsk zej)UGsu!W8+uEoTcn;rDw*e`Ft$BTL=7AY$WkE4-d$joSh}zma~(Vdm<-O{}HM zclgu;L3sqmgThO22+iO`X%C#b&VIh(1F%KFYVu@jgyd?@pt7kW4%BYe-fwtNUEmEA zR>fvmKy=7dZ90dR91RZV*s&M3iKbZ^q=5j?RA@LwawR@L^zG=`dK!7Y7qfOW*!7oE zYem`+5`IHLaJsr)In8CworQE7+=}?8N`3kA1wE9rz`{KLfIt002p=ggU7;<13Yj}QN> z6r&mKQ&4#RX2gJ3JRgRYm*^9M)#(#@A-Ts6D3%Gdu%0`87TLU|pmBV&YjxJV!o0qB z-qJQ7DR>w8aOQU%N_M+ub-V^D9`bh)G(j28XEH=iNXP1($ngCi{r6C&%~cAwK1S~d zSF{1b7#7L41(VB2TNw89EkY_##8QV-aP)A_Ijn&vm`@bj$4vm|&3tm*DbUO|GWn z7pbB^M``t~TkZ@P`9%<=J>KCsdn}j&;#fWi6IvWo4QJBU>ivcMHX-2&oxowCaXbk265YfrwVF(Tzd%xL zWf#fUErADt5%*S9(6-Td`0ye2AsB%uf@TmH#TGRuQ$3vz*&B^8TCkU?vHrv*2x|Zt z-4VsnG2guTl5C@8Y>2K4kO#=OY_l|_FcQ=`W=;f8q}~yo;n|!_*fM*&G`B~qdvF-NBiv#yCI-!kw@lVo`_g#|jyt|j zSHI-F!R(6cn+Rh0A0_#nUj}0PNpbOvQ0!b7#QYAg&_R^Q%K<*NXtbb|xe5YU_2R$u z(=v|bjg*v>HL@~^F08|v03O%>pr+rK;=y@KF^LJB=!gN=i5r#r16af!tKe>MzEi&G zpx|-<1!TWsVpV2#E`84UAh?5({jK761sHz|)Vu3Ger;-MItXlTtE$ouH2u_-(pvzU z$1_7nPa)Ozk1GZ@gc!|a2l5@wTPvS0MC%H|FzTI5j5QrB#_;KW%MHSCrYi9}Xc1DD zBl6*mXvB1|@a*Ri#p6h$1kq6|IKcbR4#L^kcJ=XMNxijOOqwxL9s(oK0_LIe?*6!* zVKk?WipM)$;5ue~>!Ia9mzOlWOs&}f)UVyH!s&<))VDV6o3z2jg;I?Pd11?Bsr{DxK>$>QF%#zpw(Z~NR} z_^_ue+#&+8)4}p`^J;C|=>wBqqK}&aZh<)=a|vlKc+q7I0vDAI8?FqT<(pPX@-ACN zZwF!&jU5)3D0_GsJQo>cU%{9~HfPND5fFMF+MjmFBD$F4EMI5DE8DkZ!HJn+iPzblrMwOVUEo(D1XpWu2>LXsVRuE@NHV-1x&r&d2SR zlZ1@SKvj@IeCGvHAkpjumNgo@2i|T zo=?r2+aB#dgG4Q2VPT^aXZb>f0ny|X;Bhb-seCCuUy7Fjf)d!Sh2k1+*oJS$Vp4t| zOVx>zp5mH!TObC2xcwrApUi@(JZ;(`Li+5~U5(@;1x#(3Z}j>z_+-SfT>fnbqfGwd z|1>0!!jM5HGuQkO#C7PHp}NWBwa6p$zW)%yny0+x(ndzS%d-UR;%9t9L&sO1Y$}YV zNzLRkkTq&=WiCqVaTRs)BIELa?;~pK`$?XuxeH|ZnmgX##Hj)B%)5Xl`8KWYAPjEO znsG{_yD#YHhI3?hgMLhaj1>Kd^5VeFmpg&Hp9Q^^Uv4atg<0C={@!s==#>DSPVmtF znp@*a(FeA{SNuP^EnwV46?nTzLI*`x?F!pnmnMuKk1aGhiSsDwWoz=%1C~Pe&gzrV zNi!5`NgJQk!@?B&I~`WMhZ_}m8@|gN zgg~VXX7nS1>>VAr^z4gI#+MAjuyGSIr-nx%)0xdnOV&YKe*ndu6hf2>*CA!YcM8{S z&gf;?npGM~^8sYG1(_y-Xy1>P$p3DzRoWrrppjWv_a}jg8m_XSr*k2B7^;}5Wcn9>n-G6w{Y4( zi`4jTWOMxDzjl#t55?GT&HBDv)Lf(Lj(DwUVq2kB2T1uZq6eyPi!%#!6jAB%=X4M1xU(R}&Ar?TxD9HPv2=z_{%F3L|%zL~a^TS)_+?gV$7P-*W86S)?TtOj*qW z1zzL(=@=~zBO#&v$R5uY2?oVotngN1*xQqkFC@IeDJIbs_Cg*c2)QCevTbqy`2sYP z~K^#2ILhOzpp z#-Ef2jCn5Dc257IMUG%x8th)oz_ zB(uv*-St`~2;ixhoc>t({DSxyO+E_xy{L=?QbU%}sxa=>f@{ECeV} z$o;|)W{(B%ax$_r75tQZ&-nvuhU51ko^Ahyd+>w&#w6N}%j6JqN*&6^##u{l%Gw(6#`jgjexp4d@JTfNPZPt_-f3 zD$2!RU;^Ry$B=;y#AZlUioFVz^1OcYOl|0TXFdxBpGtUAaCMRb{husy6r34Xdnx3o z{BeHLyU~l_YlXL42j&W!|H$SmL=X82DWPUzGZf~qSp`y?R=a1(va|FcvEz#i*`CW) z-SfSEYOR4K%QYLU(E_}+c)N$ZdJ?-5H@4I_5r{(-I-XwI*RO}{11wKKedtaD(8IA29x zi{&iHsP$b$UN6SeGs}Y;+_6irSa<(vP?CF*9zz=Sg~s%@qRSfaoJ@ zECnZ|BKMoc(4R#rLt7jOZCT{@+Bnxqo$?>8UML2R=iQ70Z$|lJBsQac1w`N)*B#z? z7F-v^D@nV|lME0C1vz)i9bdhJFFNxXU7bK8!lVd{T%^&hc<$m#HTYDOD(sNQhS&7?1*XH^xC0Mbb$ z2piKE**N_}9U0OYVj%<-;G$+wR{y79fpHTM&F@Dv=l95y@J zbH_X@62d8IE+8udsy;Fg66UFdV{%D%;TUdtJ_UNGcUK3NvGN-MYRJydee@ol5Wb&`v;uF*>zDv?fci>4Z$?%GSi&5j zfVS?>mGIesUXFO}^}K(C8B0Ov2roE0ko*8dV) z&^CtrngkRxeTl@gT2=vE*lRb~{v8l+DMrSdU79nlJTxqxB0XTMt}}|7Cn9 zV*ZBvN1!cRG876l53%8>VWI&ud4EQL+FbyGBKCMb9FPiYdgx_914RBqQJZ&p5I#>L z{=De{=Kt`2^MBmehSRDx3Z}v`{tfxZIGpnSqtvh=oR??nL>Yw87+t%+R)IGKXBHxL zn8X#~CBx}j$SzZ{4;}thQwu>{d(5!VR8L(OK;h>nn=|@`rdAhUXIS2E8F!vlnJy9` za6`kq0by?OmoRRslpV@l!2c+)AL)?j*P4B^4hgkSTH9Ls_{ zl-y0XK-3T|hB1tAoNk2dlj;)*=;~0j+8?}wuL_`vR-a(G`oT+=F3FDL?(y}tIks|- z#{$W5z^uTyPp!O`bzDJd7edG2kt#VdPTE$~#70O;H!a(Y^{v7jIPp9Lw_qL{Lt78} zyBcEwd)40iR)6o`mj&N;fwo@#`gLljkgxUeuL5(#)s{dSNkUXIr{Je916|m*?_k-o zU2tX6j)^vRE6DeN1%X%wg60yg>gfoGK%qp;zd>jk)Q-&EgPi2kzYWw(auzNXyCf3P z@#gXbjECVkiA#rE-@1przcl<;0YET13y~1VKt2n`zH`<)c?JFxU%EOEtDV|c?6%<2 zgGcg;Me*OGB#$ptTRg3Px6qBX*>V8XfGf(b843$>G%|rB0#;}6ZBKyi0~k5J=4g)I zA*YJ0?=o23BKdPv>Fl?t5GYbf{>l7!E6>v?(GSAR3^ZYFRNOsSpP2UCrN-@~N7puj z!4fa|GwtxSUF147ECaiZ9CL;+W?5Y8(?`z8rzKG!D2Z3$TH%W`*v@A<$_o{<2++01 zvpF9+lNDcpkv|iSG+HG8prC7FEA8IN4Te^N#-c4><&Vx>e5dN2>r8Y(D>oW3R!A$fP>PcF(@1oz_)Q;?!wq4Jw*?P;Q*C=R0n_#PKH zE>Ig$%x67@ACHB>oVoA)FIyDX!$bW|c~1~WfazxLpQMG`Um>oFit;&siF85CH8Vjh z(xl}1UFNvq3#HWmBurR+Ie+noNM}Vu4Xl1d+C0Sv*m%;uvlZKYFwY7t7VVhas-RIkcI7x!M~) z0~a%mqK=MZxI6+BVeb0p_dtO9tAd~dRhbSG3S0WawT67bUXcs@6;`Jp3-jva!*e@M zg2AOQC@3!A8FT?>-j>jZ7S7(|%?@!%$+*yeCcd|e-M;$+!#4ej)lZ~PYtjC0xa;Xq?4(C(X1 zK>o=`7zXLom`hWP?)!JA=?i|_HaYOhWU2Hl(;Whmp2x;YBLrm`B&zhPGG_skgctup z%pw9s88*~JB>VKKD@b2aHMQ-aaPzHb!4glMRIWA$`8J2!8X6jyOD@v^ui;94n|1A5 zwOscfI(WA^cWtrs#zi_+!~=0^vsLRI5TlSb6JJ`9gx*@eziu;Zp(qicC}i&Ltkb6O zMyMru@8a?(HrIM!cIlN*(a*o6%ZQzE{uivx#nWUJ(Rl;X2CImvzkFSoJ7Yv2FonLZF~2&fLQ)&^o&Y8$P6wdDaGn z&9Z#3a9=)(Ky{j71KG$J_QVhk-;4nqO;{tON5n#ZS>_4V2}RS;GXUO7+JvP!M{!30s!Qz|mLkx)!PN}e^Id)r8F*}B6>Gd~E-0#?K&*9&GD zV)g7WUKE?$dfe?q)&^~h@1jPgGJ4iCWn@%} z9R|oZxPJgCAkMW>yE-wZ(qNRC<_F?z86D@U|oOzFkm5+ zW8lA`C-+R)3F^1|#p1?PJlf3A6zD(jt=kupW&x}oEipt4TY|_f`*$h~m>V(?-ZoJB zK`Ugi7LSdAY(YGm^CliRNVT^^wRT$&6hTwWjpHkDOfCNetqm{f#HyT07~x3`Mr-QG zfKAKKC~p@2?IN&#?}XYK3j8Xcd*?O-iY>#POMVWjgV?o6>wM5F==oX^If2QiEyO?+ znlvaw_?Vd?eUjTHZ6Swx&UQFfXox^m4L7V-@u1(KO}atgcv7~k=#kU9{( zo99l+J>7sKD9)wfxfyF+`uC_`Sjk5oC)x@ISL~Xw@UiWWkbn;JFhcPN=0qPufz!2< zrBwd9^Y%wOUVCBfwQNgq(|de_Mw0u9kCPlUFENys4|x7%TXBDz^V;WCykPax*?{Luv`~u*VsH*e#{~ z9mb5`_&E6VKQx_S+5h~SG9*Qddp+fs;e$ZNk01si$`b=}xq@cZuC7>MLclUjgf!J; zyvDn;D=Ix0qvL13n>H#;zzq(QmH$VJG{Mi6X= z+V(g=bWrY-h@aMxLH!w;d6F-N3h@v?38o57FMLlLi-*-ZkliEuS|%zcW(C??3W9)* z!X27wtqi5xztT{wY2hd=TFNFi@jDfbL$uPzMoc#h&NC@)A{QksosCP3H%<9T&?lY# zU+ZEyTBvD3l*>k#2N1wn>gwv%H39!N2}_gA6HKjjuD65NqI2n)8vXwX(~n~v+?J(Z zG0c-*8?PPmtS4Amy;R?@cpmo7uF2I9(l^xX!&w(Bd^E*omtq|s&c&FxhGdM^c+SO0 zIXZ?%a%r&M;qGiQQPsExiDFWv)|xg$Pt{Rz-Hx0<8JP$)odOJAc8YS2sCwE5HTEXV z9yww;a*{UA@rmC`4w``{E<3w$>*njW@3ey&*kd0l|if}BgQ;MD;T zu2edVIb+;C-=LsS0RIb&hv%o8lqx&mU3ceda4=p%+9YTDPHt=EC<3n`-6m3ossd0&q*o&$ z604*T>}3Ej*Uw^+jp6LNS#%gEKLp$^Z`gw;P+KzP_E+I^fTyAAfKZUOSF}0p-SXFr z*Z+ESWWea(EqXFV_L&0C-W6l-*E z9W!Bl+eDGlI@Xp;`ayN0jL~*Xa<{-TP9|&Po-3}{cttKv2rx*ShVfwx#!1SW1$6Qc z)zTkm*n?D~@%Sti8a0%MLCOisZ#~Q;hxD)0AN+os`P6qT3~QvPu&EQEOctnfi($D@ z9w?Q+2WSmoBb6VBpd`^f`_zRP;I<0*`{Y0WnRz<6;n5H>fx05jIzwB5f-!rn(V)r0|z+RkbOlwJco+~=&*HDhw3zTtHJxHsOgzS6BFHgpD8_)gT z$!+vQfFvcz04Tc%b1b#Kz&}5vi(R;LhE%l;Vpy_HLjgtGGGagj`%8ga2+Dgd&A>I? zvsu5tl;bCZU01-5lMppv+%qN8HJetxTce_{xo3sjj{L)bF+C7Rj^@KUccq_}l4X^V z%}lQ9plaAC@90@~m!d-)Xc5-pyIG}vyzhK(2BPc&}3y@MKgkdM~GCd zv?-IEt!RbXh7!C+RmI1H%|s&DEU?i3G-7Vbo(3rPgpg89$7MF;3uQV4CqMGpE58cn+xGx#_bn(8L`AA@|B@Wy#fK1}Q94W6$a=cS`$T>dL3 zgjn8r`S}`KX4+mIdN4dRX}h@1$N0qU)?2tsCL-_xRj}gj3BR3pi`k5GZqGnsn;Pbl z6}ifdk|L08j}*fUg>=8ob7U=VigU380x>4U3{dRRJGnF$1*2c3NIhHKcF=;C z{|A-7BA1I1qfLKnwLJV$xb(Y0vk>=F`a4I#>@Y#m`{iZ_nlS5)jqd0`~cq=JPDQhmP zk45Q+emQrPXTTXQa>OnT$LD1>*e5b&e-+o~yTQOC)p$KQv>UyjLAiOe#i@3^3bQvX|MvLe3>6 zanT>aeAm{J_F~rZ*!WKI?i42urdBl+5;@$CB>-NMrb(qg6xV#(8jYGVingX0`ieK- z{{b&pbG87>bo1Fg2UP1+X*Vshd|nE$k^YF`gHJ!?rUwA!TPP2~I3VPG$G~^zm>*W< zeL7^F_Fc*Q*zXTqQPniLN!TP{7o~CgWQchZmZ|~Rq+QCjkL@0XH~FygYk)GxKW^(= z-puS<`s6>FnqqNaBJ;Fv0Hoph^+e9o+D0pE*!8wx=ZNP67Nm_wX!9$24#>a>+_x8< z2xPF>gNTAK7LK!7YHBMDM=!|gV#7uu`XW0oe3ujCgX$#UiwPC>#UjtExfO55 zD@in-Iw;7>H{10C3_fI)6PQc7_VVz@f^edBZ|y#l!!Jm*&B)IeG7xP`_=USC3bkH< zJrt8BBsygQ14QZB)p{k~f&b>rpOs6yK4FG{@%5(j^_-`eQs!R_(F9eF*T=cPH0R=c z^jXikGW5r;cWE3kr1pF%PN>q2YBU-<8Xs-AVFIhO-($cj64-j5ali6yc=0%1S1-_q zCti!vfOMj?o^OA|Q5|fndE^95Gih@uwfIYXX9^NXE`Wg6+n31d8_l^W#C;hA#nHc| znt&@QsOzrm z>#RQxuG@vVe|wQstDvfCzRy1~=L%rF@wgc(CJn4%ZKyH*=1wPCKD*k)-;C zQ&|uAjT}VC`QbAkxjskuF3f2fqtabtl0y}DZ)vqRPs*^OWrv{Ae**K$4_Hy90oseU zK>b?_KZBd2n(L$6orqQkm4O9)2y@Bno(pzb!9Pt_>=}j&$TFT03kd^BYX3W{pe9Zs z_bA8uT`IbRTS%Li!ktUA;@k9C{Y_4Nk6E4mb#c1g)3ec6p`h(Uknl$B?u@JyT+fsU zh@O^`WC_Q%Yc$FCoytI>5Kd^fb&Xs10BP89YBOKFWc6)^XbKAmiqWS^%F3z>3>++L zS=z7ia}L*HR`Y?a@a*sJbO!C{0mRr+lxuhQaFV1{`VLB8AB(XOpqY@!6!w?opcp{c zF2>O-Rgg>x-^qwFDyHmh$=r*Wj3J^~emZ;IyJcq~^qV&&P{ z_w(nc_o6oQ-4{%Gb<4wyK)Fmk)$T#dxk{BtWO7?TBFfS_`DI6n)da@nToZ-NW88Vm z^zlcf!(<{d$9@|lb<+UBGp(l{B@7Rp_KV&iu~SRPPO5E2z-1xN(|(8$SResAJs+dD zQMGO#a*_*?HX+K^=9C{NdI15{cx{lCe(C-hgWl;B)kI(gas8+(iWvM&m$Le9mpuFkbLkNb zIrQoIcU-^MI?da2`ikIm1fSs>Xq+4qKw~D~N6H&o(0r@kO>hvfD5x5Mp z*Ur0JNHR29Qe?9=EX_&x9Ta$qi*yF39>Bz%L1h5VGHwAZyE>V;wNzoRCoelksa_+} zcE8-IFsp0PQDqoNNR@_=3bMfi5i>=S2f0N5mzE5H|H|GGyN=&d$M-<+?S{7%6k~*U znx)S-zM^ zHX$|z!%I%_BNdxv@tCnOrBRqa%sS7L4W>_yGTjx;ccOkK$@0-EChd<^aH8*hHT)vy ztvql_Xm(CthPqaC8Ul0XyX3`ri+Upwoq8N<<;4b*0gqC&65ta%b-6KBJF|c~xs`Av znYgA>>$DF9Xb`-DpZ6*HcjnUf$6TLyw5*#Dq~UjAxpR$l*QA%T5wwqUKpQ}Q;!pAh z*M>*0SD%a@NeC-6W}|nNahLR5j&ep4jm48!%h*N4yNNP<-g`gC=OkBKMN9xa{Xw@3)4OUlZglwf^fy zcCq5M-4)lM;1YWS$Br8wAj8H~?*W+H&+a;QX#Mku-q=vTsWTiI7_eVR&l;Z=`|Ur? z;X=sHV(sp3uS_zv>O_WkbmfQ>~2fbnK|J{zH&fjwT`wIu)3 z+*}^F8P1M3LM{mU#zSubT-Z3lqlXs`<#i+jrB?)EgSj#bPrO zeM9n@6C;t&{26_FMtz;ajQQ@<=gWH=1K|BR6*ld<|7n&Uz=us#eC?{3%&;10n4|A6{yJ%-j zXF`X%&8Ts>WyOlVMI$tR5(JVBOC0a;+dp$N5~=~y5+Q-3X*+M_wnz1@XslY7ByBh> zYwDVal+D9na|GD#yQeYe`h-yUS(tG1vQL$d!=EYng~d-tF=voN6k6po%ZJ+xjt{Yw zx(5I)HDRBi3AtLKxIn@jLZT$G&7lN<2N4`ZP@Ol?TH;CO8lL{?Y;L0ilR7jlvc zYqiUh={3WQD#X|Z3pFBwg#I4V#P0_~_!+Dh{;+jX~2 z_hy~1NHH=3@FYJkK|bwHfP=0JIvf&pA{8PHD?gA4ut+Jp;OsY4)kcd`R$3#vNnYiS&8g6X-wIuDfc36b6T67ecY>q<(r}r5 zk_F+2SR=0xc>FV#iZZR6_%swI^OakTo{q4+|KNegf9pgGHgqA$0VYCIGY&k8VG2Job|x|ffUMz{Ak<5a{pm&IKt1SW(PEs1Bsi$$^a9F@|+Mdc!ea)NTV&Qg)sV~PRN6oWdJT3&ej z4d2Ia(~T-&l(`8Q|1I`=UC%SaZcI;u8}^64KifZl1jZ;w*@g-F2)IU=d-XM$z`n0V zA0x`gxm*`QmcrP_qa#ZxW(S??oF@&f7aE$onhi@Dkp;#@$RW_Dm4icVE#?8DQjgK< ztlQC$cYM1nH>t4N+d2?gF9Nu8>7zVxbRCpJ5YJD68s!iI%D<#=|NlIutqUIdtqBdu>4f- zuV1G4V44t*0o6(o{2!!7I>e7iJAElDH_Vv4W==mem%r>tK>wJ7cq zGC?$|tGcJ%ElLXhZ62=!Oq9ZraFx!TC&ipFT%VRqyGVf2u0X?f0kJ`!KZm0*(0T({ zZ5-8=Z-zJtGtd$lWBfyf+gdYtPn3JR^Dz6)(UJm7V;*!TuffwZ-|=g#loP8#2fj@) zIw2M^%B2L3;clX&35E#^3!`K~j4eyfSKb&nX$uH$Vj9^gRBxXc9UURp6I6v0TTaF& zZ>Swfes~6{8WRynGxu0A%b>!Xj!g`N#O~-W0Em%s8ZW(&d<(8wle=%>@>(wr!bb;q-JBh=Mn0$%-*35wEdP&guHQ z&BC!rcNhc193LYjC6Rygx&GwDgH?NE^{Kb zRIuNKRrzI3NSuEG>bN`L4161V7HM+@{krGE17-+RsGNBEMQR zXKJe!&>)DxR{6nh$^H*8mEwM1Qrs2A(Gn-Gp=`H>?yjIF?UK(P^?aH0>OblJF?c59`mAg2<(o@j48+Sx;#BKv)pS{6p}|-q zqsKL);w;t)3f2aOaZ;}CTDA8!W(hb9D)a2^yYdcz=jqtNo4#;M^qtg?lST(6JcY8Q z#Z;7w&`ux5<=2FUH1ei`rrM8|mcv9FBld3J*Mi5YN|uk7nW>A+wMi3%L~sxy(JCQ( zegp{JP`F$g+Z?~435`8%?}Yj0hEfGr{*SIVfy#Mp-@ot5oF*l+1}Tw}F*MN~ zm9f}_6qT8sc^fJbnowk(BDJv%$s9^FC{w$3giIwOL&}hh@8@hkzyI(5uJ?J@dX}|3 z8{PN!dtK*noX2sT^g!P$SNzHlTP?$^u$5w#Nm#aq>gRuBs|JuyL1mQTb!H@!$*r<2 zAlN6+`?-2U;XKn<6~3$NKu@sLpxz~Km$u&w<8OAsrIi?-1ysqk#0Vv!HCh8p0h<81 z>-OR(nAv4Rv@bo8_q!Y1sgO02`eMKTsuA)V`ZEB|BE$>{$T zt(Lo)(KzmQx;a=a&mjzum`-mn_e#b8cFg(BV2)e;mFWy=WIP2iX2v_0k9~cE%ZZyfulPGC%T{%N z?K!kCLLLDiX6}n5_2!*d6J*7k`wLYDEvnb(smBH!4jMs}LYcZLd$#Y z9o)COkdm?=IEaut?ZVy8I_`I~Lu`!Wo092xAd5`bnb%n0#1oj28$%Gq>_OW>1nz01%+ke_F)hJDHaDR{hRv@x2 z;&nrXdGPRWA76!XH~R*5_gbBK3)fXXYp&@Jr9&)AT6m8)#j}s1@G7A2+;5-f0AUSi zt%ZzVPN*!h5eWe>h9a^mCUsybc78JZ$&De*UTzx*TC@5HNzmWPN^F+_+5k z;4si}(a(OtXrJ>x#4Z$XBJ9%3x2+xY=U6UTNO$E*xn+5bxWLoi`=^F&&(Qa4Rkf&U zT5(^$+EbnB02wMzqMmPcHa&vo=l10O23_rEk@%<}2QxQqH~-N(O>*cAnYTf;mZ`ZQ z3Z)~}jOPzaf_5`;L5Io?O2aF-5ekP)ar?Cqz`^fN(z>?unsm{7IyJK7eWo}!9aW86 zY`dTBTiTjoLN-OgCc7@9h_fU2DP&(lT`@U9W0_9vl>i}%z?PEb8H(%EzQ*g5X*8)^ zo<8j5-oR}b`8e=H(wCh_)@3Q4GB_4!abZC9v_EFuJvxAlnU7MEMM692_0$(^v3Ds9 z!72l%T#qIZ|GZi#7walXDsfw2&?n-LEP^Qk!6>jRw(nEsxh|*I`I#D(ufn z*wy4L0GP6&Hn-_zE92|7buFB4X2Lc+s~c=LUy5-v?$+2&uY(%H%-St#8X3eN*#%f8**~>Brl8IhR%ZpS;@d5wPb*W0(~oRR1q`lwwfE!nsD7@N;f2%dm$L<^(Z+S*;m z)37pZ_VRU5AVrFynVLkOIlD4SuERC{Pg&e1Q6PD{+ed@aQxier$Ry%`pP4CD=rWDs zX``w~r3-3AqBg?MSTo4Lsv@Z6+JR-BRy(7|93D+HWOyWN-z!eMp0?0s_mW}xq1F%M zI(=9=&SkgFLc_$`M{xJV5>=2(MR|?3AOL1@fh_bf`4kXuMyJ+Bw)QZv-6SXV#!bIu zsi-jNGjBh=%c`cCcFCW$Q~tc^6&g}DYgY$j;}aN%iLwyvVDq~X_R(xR0h_Kb!X&_f zws|^JPMl^uo_vqfJkgkPQGh694h%m!XOHJvGa<^PFqMLE-toH)BgPvI6sBerQ(WrK zPEqsEwElQu_BG#V1BYw~v-93R9RSgO;suA|(B;od=C+N}=^K5d$gsHe0vU<*UC@Bu zF^gD{xOAGLz*~J@kWP+CwhvhvB_&h50gPT{%sHf2L{RgM3(haoaEr#~TGZNRCT%o-L*DaP351-j!WkUT?jEe@~I4XPHW%r;! zt{5U+eeMze&#Qz6!)VQ?`{Xn?`K4T zIOqmgHHq*88OwH?WwTpgQ4-QT;0L`HLsZj+&kIR*@a1+2smo=+3cJ3M!3m9s|CDK6 z)?_l+n9{yFO}6pM?yR3*o@wFU@9L*NZ)fDoW6Jd>?K%$nxpUw$r_!=r6X*&hyJH)J zh-mnQK29w3jOaZz|MjPr56*NE79qk~ zmwxwNBB&OXmRv!B5h0#Lj~(0U zJ4<~v{gy9hg;63-^YfckwL-af1xe*RGRmt3>LxzlXl(P(1^9^Dmt#fI7n=ge7qB$? zyh>>q(=+(v=g*i$?lwLhB6#84=k_k^_V2Y_x=MZ$uiW#}5^IL%FPfc+4ll^tGHc|_ zN}tCUWn~~m$#%Lh_KJ3SyJAapmNI%YY+b?j$gjgU-=8_sZSrO*i)BL~cs+aSy4+9G zxq^NLzbEnDiB_9}k6z3x4+)jz7PE`liP>d=N(oL7w6$cVm(5H2y0#~sT5Q^M!qx#c!U_3d!V zQ^7`SsW()gIt`9L-7M~otgyzVlX-09(`aVr#_UUJo&M)Ek5`NA4}M*Zxg9lytHE|)pr@zj$rBS5k*&>G6jtkhIk0X01P7Dbcj;TiWsuNXAA}`xA5EKC z*ZK5o{7;T$x*mK3nX7;n$#WX@9fDcXV*ZNgl{nNRL`b78*i>8&}7rrd39(gHUl z8EsIQoQDrf*_$}+Q}t`L5a0&cCQD$p28*aqW@I0^sQL3>2Cm%!4H+h@JIuuTT$Z7>RiqBG%^# z84(k$_0=}rU5a)`R&3I%lu(A%_rKfPzkhJ#x^+r_e-q(H5ff3$I3{`+@gQ@yc5TCy zU2Hs2E~LJP8uEMB~ z?~ibf7G(~Pxv^r#GS6xKPpn6yfB^=vQ+B?u4hv%`xbcdg-``hP48z z)J`0$tmj}`*#s`J)*xy1hzZHL@+P%aC76^nj$ss?W0U7jNXuP8pq4Gu9Uc#lB(;c` z3hOiF7e}goKpjV}n(UfHVAR62GGOt$hY?(wA*pNT{2hB}0549-A8^OJlEL^h{>%9V z<(drl41cV%^&DQeW?}*N}&e@Ipih?)n z;K6Apf9aF5@CXvuTB2xTUZRak6NMDQkl9pRil+??e4K>XlIxF`s>fgE@p}VV$PODc z{>S4&^?PeG?|&Xp8cb!djwbv7g9@>shaoPQSq-s?F!*D66VDPeEVAxuV zqeF+d;HawGHxA)EniYmL?{RW-i}U$cUWeg}EE*6Cq(ZZtZ|231`JFnRFD*gQR7~tH zUeBrTN&SCn;oKo^sYc5UjK9r;@gPmp=JD~p?Sj9z#0DoDyftCi>{WsG&Nq87X=w-Z zd!PUiHUYznSSgI!ge9yxyocs~P7`*k8_(}LZi>ZOrj$%DysdwIhJK|*kn zCA)|p0~t(^-w>^K5pNPf6>a`_x7#;=)u02i6%4xcouXj7V%Nh|lFx{TQqYj_P&*&~ zveey*ZM=2McTu@O9LRufX82?1c!x@AxY#ELJ@3T;HZfxpahW80!@+a1i&GYuK~$`7 z_&n<&xP3NQsL#Dvn}PIaVobx-_7kD}9j9gOm?Kj(jOS0TZeM;dF8FHB#ttfu^L9pk zxmodPnHUby?RH0T5%cB@&saFmLa!8u0cL}VmAQRB-)E8SiDT_#H#l>25njPnxH9vM z+rbBAw|2F_eo44c3y*DX@2l-z0lBpa2f#j+B4y#WAdq4J!x%>8M2*o&v#}asy7-YPlZ>-Rz@L%E2JmF>o7GjDU(B4?)#Uc1 zZck|i2hk#y+LwLWP&ep36`hs#?OlI9(wO{d)=k%R`jB(9S0^eFDRCJ=C-ya1YGCHm zqn?6SHAn~we1RFa6l#sbcMmRH&yx`@FaVB6;M)4N_Xc?n_UdTz4ib!zx%9NB_1E+R zw0qEfmI2{C^b{kT40jo0i-;W~Hjik(;%m9z;ur}Wp!h7fvQ?7P+M^E}&NFDR@%4?P zL4UuFjEGoCBwIT%I{ajV+bwliRr*`QZ2OXF8fb#DUyB zEpRF6?EW~U=R5$*ps86`J3xaeyS?Yz8J`hNJ#e`bVX;hSxb&XbJTTV@#bx5qoeKY7 zio6jsQ?Q?vsWgAg5Tht^i0VGuJOKY^8heYCgGX>0VqG;ncRg$>4Xx zy>(_4V zOg@=b7wN6=LCCPxVw|dDE0b;G;}?g2LsVPGYLPboRY83!A!j_4s!$xp!}lo2s9!td zTJ>dJ_lh|W&+Cx=M}2qj3S8R;F%#si;wh6il8WJsixN~Nq_9oId$&Eqx4oN{FotAj zz2aveHd_hz8mi>Iz7BMYJX})45Xs%lCSUdFS`lJvGW^(>XQ<)We3QS<}Y%(>PPjQ)?s+H*aSX$9Mv-R6VxB4wM|!IS999krdzj|RIQDB&M&js zeck|Y?T;rxE!6cb97#K%;(80LN`Ky&*-Ks)`qxr${eo<7k;NwT+Us?pj$aLI+4 zBPHl?=5QIXx=&(pivKkOGYMp_8w3d*e{3shsl~=S-wfYn7|x%wBSmZ3L_oOs(@&3D z>$oHMO`}FraqK&Tidc#)>4|Qs5A`{%9VZDx$P+-iks4o7Pw{Q>#>knKy4A)!3vmbc zK#B$I^tFIhGy<`@F*eL1{HpI!p5Of?kgig~e7X7u*XGRsEmLhU_%(JTvjlaz= zoN8;`*Bh<@%BKN;O{>4#~*gJZ7*#pk!Dym8& zBclO2-tjOu&Wx8j3fHRk;QsEbyG67g92oq3fGGDez+j}C5>2GzNk#Le)4c=?DB2ch z-(lv)IdfybKT&S(Ely&%B_rH9c(0MV0x!khjP$=gc$PF)C#YV`$8kyjioo^aYPTg1 zKJ*UsLNO(EGOFuE_t$1d)V3>*IS;5a2|9qna8AgAgcfiUYr81K{~8;%lE3L%3YS-x zHCA-0a6t-?aN=#zE8R@hqudj}(##03j_e^VChc)p@%VU@W7a2?eX;;Re9p>KM=<}9 zub>76FQTd9u%+A}8$6k{gCTf@}y%E!dYOt zrp_3q8sa}-v6Mt}#!a@+M?&<+Kt+VW1-Knl(7Ms37Avgjc38!43-x^KCj~ zdGx=>w31`G;KrL?F82C2V$GuUzMKBs19mStj}}$@?-P1GeX}y$%ZcJlxj6+;M<=!I z;9=yE5YTkRlvOE|mbv}|S>hXeE5d6u`y+a`UVpmEIq-M(WHpaE?c;3GyASJ@7|;om z9s0Oh$mw*Cv>o+bf=gGP-`Z%WnpoLSreKt5HrH&+p9($0ML3BlZ7l;0PdW?^$kt6N zJ~Gyd63{m5pMJk}ZcZ^Cf}eM%w7ohfr{!&6==o>MW@eWiR)RBs3U{r6iR7|KS=d^q zK%Obj$YoKdUVetjhZG7~#6F*#-7x16L_SeH3 z4%29mV|#yjaKOR+@MxJ7(_fhVm{=H2dEn-Q0!qAM;Wy30S4)h1WJQzI!XYZ|Eee+V z2!dvy{P+U%Ro`dF8d+$6aQ5|{%1`i!?w~bd{$|LIlVBRM*e^8Ke-#08bSSw+22P64 zjc=O7pVm#iNiD2mo1{fqi_n*g=fnjo_;?}ycG-<>S`>qS%&c=WOJ=3wXM5%1O*^iZ z)LWaWlOGkH_mgU0P&!54tZfWG(E$r*PvBDk*Mgcd6_?}M%?{>IkLmx%?>K~e&-l5(k~LJ9UUE->3Nh6DrcLO0l+^)+rNpQLdt{@KI*&eHDBfJZChq)>rAm{6s$a) zaUUJ2#ZwlxI-#~tXl$LoPSsJVT>Y^Z?FP-Ef9s zQxmN=IxDvQ>QJ%z{8_g|sk%Yl#hw20HL`%Q+FfWh z;?iD5$|CY|T9*`LcC(fAEV~80TIzbJss5k&df7< zrYBK3n74hm$UdohSwhikI!!HE9jw?}If>xD-}g^oG0C2h>b#dAb~$Xw7fWqwBpnydA@j^D@lMye&mu@Q`^ zo(l6$n4ohW^c1o$L2Oh#orEoO{vB2icFEU_XJfa29jqj`L5BJTF?Q<`x463RaY zT8=1taB@5~^o}88jtnq1IWFXB5LQ|HE4uCQXh2s zdWMHhZRYpzrlI*8*w%cBvQYoXozAI?Sr&`s=#JyB6Mh2^sjj)%T3NPJsdWl3WN8e?2s|`jX8qW6TiNg?c2iv?k7>T ziXlcfPAHJ^gXceIJC)vRsFTt=*|iM(gt&WZNKzHZYVGd7iGs<1&P}xJu0+F-xToTRE?@d@l&t6?cz`0xLGoHm95F|l(cxO z8Y`}+Jb}WqpMK$A#? z=7~j#oJZz}w^ejfP7uH9L6VY*9jxv_X@RwRAKtbK%IT<+xM7aVLvarfJ2>EF#omCH zmckug-=-XU$oRAW!S@Eh7>4)y#cOX))|4y>iqS_s>#70XrPNbm9soD>*m*$AcH>CQ z?k}#?ow&Bnf^_rLCEiX5NTq7;rmt91T^8|?g#}%ap;99^ap7tKMhbE#iw6`|?axP^ zXvUo0D(#CZ#+%W;8&d++lF05Tv!uY6H8CQypl5OCDUg6h=(ILdEzYdW^{E6|5aIR) z0|(2lEmuq=^*uYhMESW02p%!)xF)l7+jzG-(<+Zc0(eDmNsiwdK1(8WJ?XlB1P&U4 zhV?*xH@!7EYYva@eCWSB)Fh@v2an!hdL*ouwcdl-Oc z1f~#6@M+r|b!%0RQh;;>&VCoF zeu8FU4BL&B!wBoL)W*fwAR1CwQ!t?V?=KBz&{R<4WoL36wwHLSU?ZeiSG$(4f?_kK zO;oN^>D}WKqX2(hlJXunl`T6tUZWYeAfd)TuRK&)W&?GYJ}#bULSDhvy&-*2xg#en?CAJV!ITi-H{H?DjEnLY#xUI*57p2n%6n*u`;~odnXV$QPeu6qzVm);?tMXa4k-%s z-*2Xv&7WQ`9n!>do&9s+KEw10Q=eyt@5}@S5*0QIjwT^`b`fk)Vo)h)0v=Td zgVIJ(n{{fk@iiwxNDM64YodrWobYt%J$L%0w~Os`Ue;5Sj9wjZBf~N)exdslr3S?#97hC%=^R3V zqUnx|9@Wff)9f0Ql`}aRB_b8(AHn=v=yLQIi#%Q0?)(160e}@k?(M&0Y_Q+;$2+f; zQ*cQ5q;ET{+~Zf5R({s}quC-;_El%q_q0unWoiL3QHRbpE7sPj^70O>oMJTZ?B!<+ z8m)hcb*!jArQs+2tITaXsLUeZ0qv*oz#Kq{R|@$Inb00fA)y$GSFsG;ycgsDC0#<#gbH3*(Pv3u%D`i{?+wyS9mS zc6k`0q9&ECA|1f8m5ZalneTzijUv9m1_7&+C8S{i*|U)9R1_s?8P+E3q2>dfge6wx z|0K`F*yJ6H9XJ1MbE1Jxis=@&j`h7~^=zH>VuAv!vYX;wug$~Ka?b%4)OCUh?}t_O zs1A;x?>jzt-DGU0BB5J}RFWE3x*E5w7Bgz>%V>Bhq8hxvq?wa(HtYQGBX$EEU%u<2kJ2LCtV6%d&=(R)IE6JYxq2$oF^NnK|t9?py8az)RoahC;Bt*<59VI3xz=tEg3{90X7t(lBS9 zC1a#{7N6Vpsju6&;Vdff#beVeAy$?Yu1>P+OAbUWFr!v=G?bWK4S$je{)YoAw+Qn5pt~Y*C?TWlXWod*=G9H zIr=256kJ6!jQrYF?Ud;TcXEn)6TME8wt4%}0krHpKcVNKp1W>&c&)P-aWY-4b`4Dd1%Fs%;k)tJ?8$wv_w=YdYwk58}Zo9Z{*B9zxhQ_ zS<(({MEdkOr?T@+$CpytJ!!6!*n(cNY$#pi zM^o)ZWpq=rNrj=+9EEUL0O}`i!#Ip8yU#^X}1zx*KsuXXA%t zEo0+pz8e4cFY}w$&40SpHn>2a&IY+5u9pf`5S5mP6$~+E$m4KKinE;AqMF#56+ua z-U2n3K45d;QC)rg22{Kk)84_StHT|x&$TI@H=2q_7+8E_HE(b4oGo2izbrq_(Y^wP@LL`zZ5F?2Nl)#w^uiSQHC5c0Qc(Lrl$9an4fT$(H5|ZWaZYO5Ocnio4{u&0-nHsN zlB}Ip(7t5u*=KtgIKWCOfw41~zx1C$G+JGJOZH9Ak4KI?2-u}cHg&NMBho3l;Gc`K zh{JpAAkf8GYgnhoR};?jeviXqx&$VAQ>BtW@P8ev+#EU;&)KLxZ5CDNiP{;{Q2v!1 z=M(?s+_bGbH-@#PAMkI%k6oW~P)Xv8vGPewXF1agzvfZ|p0Q-kKs*T^&o zAim&5I~_-}cWMf;QG?=u$fn1zBBS`DgA4ZpBnVB#g_1GT7kVRv(y>$uP7z9#-O2Q+jMy6VuS^y(lu_m^$V&$5AVa4x%nRYJ0?&3cG%~eE{PrtE#sMvic&!Ft?iikknbcE zG6_`TcSnf(RS-A!$Q`OZ8F-VrLWRnFG4Hf2ak^`qG{Jn0>jS?_AHdj`Y786U;^nro zUVH!yU+AT#pxQf+Q{uhKNN%aqSC_<^(%RiGb7z~{l6$46<|OU=1#*4=?nc-&i9Pjo zr>uJeuS>M?>?T_mgfMY$nsPaCDOmGWo$d{9Tv6O&XJ>sMhTDbp&g_O>oX zugifEDgHeCENZwnZZ(d|H+W4;4MrVV6 zEE9%YQ$54@7R`df8aZM~QShou{9@_&6bU0um+SUYSA4ChjAnP9x$3CQAOLamk@Xc4 zsF(|>x->b}!MHs6(a>Fcv0sR(bS{}oN89>{_;XP)CF*HTo8VBEG!9>yrl{uxON!L* z3dhFcj%`){(i&k}kexew$3pIl&8>%j2IFrL&xL0Bx)&-ivjxFozKmDU!k2%mP>}z& z`sJU${b!aoL(^s5>#}DlaLWBiZ=Ppow^DqzQSOKcEV%=fRD9tvl5!ACj=|20KcAn; zgh}&@`~T_jeuYAzDcm?lO!};gdLfo@*J6@ut|RP;xAPKI4FL z^I~yb0Xj*nY7#TrkJK;I1fX(PpB0aYvg1zz-N?ce3eqeB9#ysJnimgl>qca4XMzRs z#No$B-9Lx!Sf!%YfYww^EFV`tNwaj1B}dn-G{uP__iL}J2mEndS+CKpzNgQEjA=Qn z$i(>DkQu-C>DNzYVY1@%U(^QRrXZ+mt>>`dLk9wHc6>(_i=C=IinC0HS{9F<-IzJ1 z?tsH6XKo=&d$|q;KfJ>`&7p<(H(!o5&h8y=nOmd|J!?kL$?wZc|L{I{H@u&R2wbW1 zArYW>UNN~{-{qezn3w&$?$mg73xv@&mAo_03k%Ir1-yyr=c-J~rghyNL8JP=zvNV% z*NqGBq;_v@29=RIz|I;vLAX)^0mvxnmd5_+J?-r5R2;`QwQ9fN@x?#18icM?u~1qL z3LI2?dPXQ@Vn-<2q0ZJV?Xf`?2WtPzEs&1>7#DaJlGFyq^J*az3;zD`<3~OsE}TYv z{xYxDEzHW^OWlsq?%G|eB3$;v%c-+xNYLmLpN-!vAxP9i1?5^k6Mc(LKi)-U&xjXSW!XF+HGwc9pJ ze*0MY(GH{vVq6w&zj%hBd@#1R_s*{*N0H!OU7$ezdNJ4Eqh5A9^snN2B)urUa5M5} zjK4J8WjEd`6sA#tLSi*Itf+(Ww|}Phr1QDPgOP6WKl^IM*`ezGw7C7)O9+jW1|>{1 zOTU!8R@x?MLI;x~;ZJO2m)vr1} zdHeQV1+PCTLSUMJ3S61IXj8cNx*+X;6F^-SsQbk|dy)s8R^#dr#b6V~!NG1Nb2&Y+ zJICuZI|o*R7_-l?HWdU0I)le6eg$O>44h~D8#!@=soDGi)sy-FknupLpBewEhs9NA zh61Z8Hx)~~p9$-tp;mjBT@y0%&H{i_OfNnbDt80yel^G6rN*Kn>d-|>hosGA!oM^JZTYS zNd;r^<7fw9I(3fJ;g9wq<$l&nHFWJZy?iFfFvB#pNyj=|Z^AcWrB(#xX2#4j1Cxr( z`XBDhtN|w*CuRg5flF#TLjw@%5rd>PMs47KBXcT-_Umb?k?R^ z-;+w(hQPC|=H2@3+gpY`(#_wMgItBBZv%M4v1`zDek$XWGuCRr2X1^IfBRoWGz>zu zZM`1f20O7RgCwUK^6}wKIOh86|2#c+ba432FH03dD?A(UWP`{ip)I<&Xn>P?%_EvFcE*wj|(op6@e_yme5^Alq(F?pWi=>2qZ!YINEPf z#cFoh2>sq;g^||kz~&6XS!}7gWU0$TnZi*AsJM(DWq5pAvM0L~sUDti7n*CGBRT+{yq)nPx!X@KXz1z7{)xDS^h`Q~XqK2MP3jo?5wlgN=`0y~L| zCCr2I=YC($2b7O3TDWzC4*1@hKQp1D$?p`zVxi%C$t>90UcU`ucwYYNx9dq zncZKtgrS-E58=Z1YKfV$tP_)+`c3rG_YDZui#n4v)knv&JBzp3Lpd-xS((0I zQ47b4(1soh0u+CrnCx_V=A9-Ay*4vL%GEv*xJtiNJ&Sx|6yImpAzg#uFGh0ZW(>T& zIQAhOXEJM-vTDLRz1|ei+MfQzbj03uGN$LT5R^t(dnu2Nk!FJ7)25HD(#%Z;X=^tAA3*R*kC*P=z?IlWT32;ci37+A_ock~LN zp3QwBHnfKevuU7dqzIK%J>XU9u(Y7`h9YpjGyN{%#1%OVUEeGtnPJs;xto# z5*RDC6{-pu`4U{h9&N@`^H@Fepz`K^rp4@**A)i_ z3OR+zC+F%59q!f-e`;er)on)dp-U^ktmMKm|JwO3?G@-{^|T-|4=`KELYPR&Wgqg* zhVM@2RLkt|)%zZX=Bt~HwCq8(~ab0F4Wj$O_NdK~tPFuFNl4 z93&(Of#u19AE5^;_C{xw`L(kC=>gLFY9U!NT|{%tM}3*i$y^*;X}vb#qdN3rz+*a- z;tnCI825-pHo&jK^}|r#K;q_z-FC~z^vh8|$6zbewvP{#o2&i{a@Ke6_3L_C#@FkW zMV(WT?JOWVOaljV_e6V&BFvE2AY?|F(+W02O>Y&re%+<=%~wl85*n_e<`h%u%a<=3 zyF6W#^hstGwD@!`1-?slEa97W#n+l0jp zwM|>G%&n305A7Ad-#cN12Ij+qA6hz7ENSmiu?Z5xtOYj=aeRR0;me1F`Z97|B7|9t z4RKQmp>grHzN@39!JZ6B_u&5bODgMV$8fB8^s+t}(wyw|7Mpj(xO!S9WtA7H$tX&d zG&Ajjqm^ZzKxP@-B=K?PXuH`BV=ib71|mj;i!a`lB7=6b1Ad%Ub4M3m*0TSi=rZPL z2m}DNP7x;|4qoRY&!fKfPi^o%PZQiWRvt@UNt3!7M8#h9s1aqBdiOJ%EqO#>rvSMYhX7Hx=lI zT+7gH1K|y3QG4B_u9YJIrNZE_TrDmx-e2ijxPua{16?eV8Yf@h`1s*$9*T#iT@Z9~ zgz-s(svVJ$kylCE$?UtC@p!E5jb^)+-kRD&b{11JmY|*t8Gdlc=VjHw({M?-ckz#; z)8QVEYjWatH8!t*DZ<{l&}>5d=jSkaWur(n=kR!%>!l|s%=|e0k=g-EyZT0Rj?3)Q z`TF9x2jZ&ydNZoEk%3QcTu`yS7Dg(CJ76a4V@M@j;E&4=*Y3VC>oz=_6aKl)6xZ#C z1luX3rxx}O=dF~gTZ}fDauOjYi_GF05E}epwknf3^b6_)}Ho0 z9kTtZ_w}m!!?AC+ig-1PHdWj(;HhkySUE-6E@gR*B|5UXNX3a%S(TBsT}d7M$Hme*Eps)|t%G|NYJ8d7@#p9FgYOy=PB7*)T4i zYhVdRwzjrePuLVqtPq-o+)=K`kS9072X*O?QBp?39l`5QXW58va;KfQKx0d(M~6LW z)4@3G0mFSRLIcAYeo=u&RL9QXQ~8K3s@rmUu$oLrFL%7&)jKK2zz zSPE!}0qJbhxqtpGQ0q&W?dU$5@A{yOE@Xum)Q!B!sWDPZfP*zfw?=HVgE$)jom9+% zXo^SK#%P~m^bya56SlzRM`}S4ot}kP-KGqc27nrlg={*Kpn=KXwAM7Mc*yY-lEn=9?m$#rA;DGLh1;v#2URJ^8nRYJy4H0Z7~rJnm>5w#%+F;S0* zHPAM``sv#GHepOIJJBFbE1cAA5)&iVQ*A5s3C}wo*mN_KY7pm!GM#7nqin2_t4h{J z%aBP*n2Tyg__dDJ^$%1pH9>WTV@4ago5ABt?{-TtMVyt^6 zOwtz#Z1*c?KWoERhKl0k?QdIlO-%pYaK_D8P*;g?w5L%YXFv89;>&N=X7v?}n*eXc zRho&}g-?aHAH)6yI#e|cYW zkHv!_2`i7-x|oY87p_i1S(q_TMmmw#v8Cu=Yj zE_Zy@jhFQZ|N5D^o=lw;n{nj2TmYw7d{=;`j~Xe*fuxtq8KklA-1|<1Mh4gz4*K?x zGZ5fe{n*Q4VcsUyfv|J*Cm@fatrA-h?uHW~;%&3F)yB}TzPve=Cq}~SUrS%Z-|wP- z{?CQceBbR3E9_B^-3n7EcWj4zQeBpdhsc!#*VvJ`9fWxW$TX(+Y|lu$O_Dw46bq~# z=Cv!_E8_?nCZ8?tjC2!EdE@w*L2lJ`GL?#2Ui4?eJDH| z_lyK8{5)#UhWmD_ot+p0#mwi*jT<-iR}Om|OOf4}iZ&avwNMYUY9ey15!RWlm>S1` z{YwxBlnT?2&MWe(aj@2s$J2xw$e~!vrvqd7a`hXnh91q2`I@dOBR>u+eVk=9i~`~+ zHCoBDXR*hfUMO#+5Uhn8Ou+7xHe7kz=*4J-iTWk|1qEk~X2&P#I9O>_Y%|zr>V1St zW%}RyRK#@@a?`wgbttFklx;8`3be=`c13x7WsvqPEtz04$dtAmy4%0ManBZG_!N`;r*>Rz z(J(^wo%`Zut5@I7nw38|snJ?d$Lz&b)scX-5sGbCPwyETVW-O1rhq7y5KG1S{>$GR zJDt*w%L9iIe}74Fisy!7EIKhqrcEd)RdsJjf{lhW*Y{0(MoJL?j(>hh1=ki3Li1v8 zohiRl7KkVD?Kp>|dk@X@M=}}Lf-I|KK=RKSo#=3{BHo_t8v4!xYk--8%(RvUKvH_< z9`!IUb)--zdG~Jbn_dEv5l&S`{=Qi^X(JjUhBMDt@^0E_1T!EhEoY>c+S3w8Ofenn|M>Q%`?bbqBhGo4qdQ^SZ0YB^t7Vg5_?Sq z#p09h4)FJRgmT>OBao@v4^Gp`{+G^|1#J1xkKl+LdxTG7q#=4s$AWvgc!1HI2>Z%s z)|-!L7ky!Ei%Q7%=b&|OcQ0})XRcpGkywiEW=!Co@U}PF8_g4kQ{=5{u~<^%W|YVZ zTM43H%3?=6efzfS#8InzN90|2J76~DMW(3Cc_reX3f)}+qo5(%%~zrONVIA3so(5b zTC;REE?|Dr6vQ@z!Ep1}QM3DeVGl%03O7`!yguO%zb!t;l!$1WXdQj2NM-ZN`}{Zb z-hVFC_832~6f3(m1R%F3x{F>r-{|-JjQ$e)7Ix?q7ruMgO1PD$7^O>*p;X>p2}KoD8%sJ-+Z;zN&!PVY1!!Hy zUrtKP!98b7!7QnnF}zqEAU;4m(r=j8cZ-2U? ztUmm4{+hWq3RyFEwS;JTg%uG)2L{@cs3VA#9jF)-n<{pD7VhJ5F@`-@4kAuS7dK1A zoAJeE?3~f0>zon0-Ys!c#kNDY&K~@#qod-;85eSz@FhR&6;uObD+(h|9kw=&3w@~} zb`Ly(E>}yG(|01@kx0&SJ3u|0+q*0>_z+A&kCw-6JB_Wkg}LBl{yyc;nQu!*e4Gir zTgeKKp{B7P&mdKHbtn=yVG1I}zAvrjMdM0D`Z#F(W?Oi71NUq2_S?4BY^b0$OHNLX zOzGy{BK6WGQQ~pB0~lqg(FL$>Lj+|Uq=E*`4QC90=-++7!G9vx=Khor;<4`l{0G(=P#*itD%tYVPN*o{ zxdRA}=Eo4XYvbE<}_AwOCO5l9WpcTFk$8zT^FB#8Q&0itns(>eH(;u8Rr+i7Y zj%wK_Vp1aDpB8LzR#era=@Oywz82>$D^wH;SLJ}wbh6G(!Wd&krX?b$7dJOB9}O1l zI9GPKtI?>MS1k)K#2%I6-L2uZ`|We;$#x{0CGYo5+?#a&ZhH!y@)Xb^3gIwbhWS9V ztR;gE{ zm#+cbo}juQ6Eze;6yvWDzHNQ#gKT8ZBaybS{nM@2prgCCI+(RsFaX5i8t=>JwY_t@ z#*G>Y`AE!!>(#5*N%OE?SzbxzqAbs&;i+Knf#aRD? zYYvvD`83xwyQ)=IGS|1q9=q^{hwH2B*HehB1DMEd@d{;`cqRZAGd^5H_D~J+Ejed6 z!fxBIQ&dY2FaD7?Ij&~2{>iB2=2)OYk)EZY)3L*{V+?Rg0S?{)q#Qc`E+zt zdQo}L_cyAJy&KVK)`B}RHp%#x&y43SwJg@}ujse<^?jY1MaW0WJr zDp6L=(y7GkjZJ-5ei2Yl_+SjaRjjO%a1k#3Wa*Q(4$O~`Y>BhNKMhjoJxhct zz~$E{=r!t;mGb5|a7;0kk>^1rqZXBAd~4SI+pZBohWu0FW@0`0YCWj{3VtN(>-5#C zy9amWGW^dk@$0TtkY}8FE|ZBQh@I^AHTNIZGoV`A1{yl^+%+oUxU+1JztSw~c2;Fp zR+bPst`QM1{oVCWBlR##!Alwve@@-s{if%KkJ?(sAD-U`0|t+$5zl=`F|@1Yz85(g z)mS@U*CPItvu3lt75RfviMY>$S5cU0rU3qY(-J!kg=+GNN7t?mUWVb}{CH`y)i4ND z#GEb}QP`6ZD7XjgN`;O4pv}L9C*PwQXMK+Go`f#FPo&_O%i9vC+4yTq$7w)8>MK^^ zGro7Kw9B0J{YT9c0n8y11Z<13$*=Tc%?A+sZ(c)=+%X=!p)EOa4{U5Gi_zrU&hOmU zjpAbkpm4C8Sc4=h?obb*Wx*8sYG!zNIQ5M_z2NN&&c^dsVPL7QuD)Qp%kGO8FZLXP zZQ3E56RX3!ZAMUq0qlgOM z-318Kv0%_+>F2ThhN@Fv;gWa5`7KwvIlGEj0~_})^f8S=ICbrQ3e{Zi5wk!32Al6( zfC0RpoyVz_K9J7ESxY8|5fK}&s%5sV>R)*&R93U_S3J7y4&2+qW#9VNG=h8JVw6bWS^+`S(LB}j>u1#!iLZdKbYmw<@ z%jrPdiFaoRraL<}LY%xmdyur!z-_|pckfa5$@%9r&@W2KH`poRi%?MXW`KoDH2xm| z1cg>qFwhlX!v7`Ig?7 z8a-VaAi~h@J6OxHmh`@#>0RW=*ljuveP|uT;;D6y$gITDoiurz`76Xa?bfY#9*4qw z{L&wl?bE(ibji%lc59;!hAM*>AeS*;m(DYwcZqq7t?C-i=D}CN;V*9)Rg{9bXzS=0 z%~5TzslpX%`fka6w#S<_E9a-GkRSOzJDXtc>m~^oN{0(J{aO z=%b!Gy!F#J7Wtf33`z8ur=p+V=H+i-`6G1NOvQC9!6B;rYx)O!dB>3KyDe|)HkvuR z;EPxX#ttmWOa-zfsBq>x=^t(8{-VROjSK}%`yb7m-Ma&WO@I<-I^5ZSr~4Mb7{^(E9P)2JT=%NBZ{)m!01(jc9|*7*g?hR zPHl$=uiBD|%*`iIC5w;>B_@VJC45+)xf;V>8l~Q{$e=D#V(O&x}Ur%PMVtEd` z)+lxO%-2yb55VXXUu$!Vv4M~LyYI(^u;7K=yg5}rcoNTGmCAH+@hz&~c%pzXEoZgU zNpvaP#Nkp2F)nn4pTk(_(1dfA3QJp|k$*_r(9{iuOPhWRJ0auR_i{+49eNU(SfGAB zxIFC;?LTmyzzO(MuA@&;nPk(}eUR>=a(qi<3mb1gRGc|zgJ>{vzltXGV({=R)uWt^ z5)On%_&HiMRp`{}G%;#q$fUHdO~Bh0r}V}FsThln+LeX`;DVY_{Y)I!Es#ao46X+r z{KtIN!#5=;Ix`rnKvv{Il3ulEb()^H*|K zRf~-u=KfEweOy`CR_BtHK@@9Or9N8n?d>_Ai<%MT6JZA&8h@saj78G6d1Q}|UXjurW3K|b`zLQ&#W zSne~vF81zC6_M?csVJ}jaQ&2R(>FQiq{~KQ!R4B?u(Vijps#eaWSDTbH+17 zVe+kS-{L1LSd}L;Lawrl$|7IuS)3suOlK{Ree&#LWm**B#=N&fl!KXxiJ_6v$}=0n zJxm5Rkq&};pz6}^w;_ebK0ZG3vK1}%>ZYjHs)o~BsDMI0sQ6wZ6@gZi_ zJop$ts^7|l3&0g8{?l6n%$zDd6Or=?^QK-%av7-?EU+k!3389-ORodhWzariS-a&r ziw*<)z%_P0`K|c^yNHMgh9swo#oG?5tB{jP-?{psTI>U(By;_+-Pt>qr!QEMRfIw1>c>^+7f*K6ufJb(VutIp>RDgb#UdN3R0%bIQjlHoavYbzu?o_(++V+0dPqel zvHkzIW;ZUnx@DlUY$4x5b~()Z_|hFL^&tMGRQs|yOEzG}oIdPr>wn5_4-x4Ax8w`U zgG!!1PqaBk5#gpj0D->{#PM}pm>Nd^wx1oga?eOQpbuF5$^o93IcNv%ksnzzSAWNi zjA6<-YJ08LQ4%)f^ffhn3w7W@3cN`3(ax9tBx+OOF!f3N*3o#`7Lr>@>cTw!P|LU-z<9$7`Ympf!15Nw0-_JeZ)(d10GplwSGKon%q%X=ObI2X5* z7({Y2ns)IXi|nUnAyC@FAAJntk5XS?_oj(*#@XLLp=X@Q%oqmD5Rk;iP0X+roM7@p zTl6BEmuElw8rC^ie}KG)SYAPTAq z87(#_4pIBt^3jPN;@mE)OG<0$Z~B7=HcSuBJDHlAnh$YCSg9QSNT)8(7ro=u4PZ&! z!ndaj^_M;|jq?L$MD_nKx5#UN+pd6T7-i#fd8h|{%VL8lNG8&lQz|tG@&DF6Gj$@% z)u0tI$o9SPV#(~+rf4^AXZ#o9=cl>n!06ZC3tY^+7HC0@48x`tF~0^{8~Od+k@otL z*~eL|u$=-PO2o?Qe^yaYO10#gLhTinsp zd1yLw=a;_8S4@qJWa*oDHY+X#yX>}Y`#W3)=2quE_NxxrCJ;a9^Vz&z9pn29q9#;v z)LGKl*2hL)M`tBWIM=WCIw|(nk0Styd4LSt604%Ok8Uw}j`ZE^nEH5zEoSc>IY8a8 zp_ereb*gGNYi~WRjfSSDq10r=q$I+|!s>S}y_ zIuJ2|(TNd*tjvA&)VTK4A>m+BxJI`A$k(#C(fN)f4}t-a4C;^F7=%p!WzNC?z^jHneJ2B%GY zPN%Ul;3#Iu=^u8|>?d6LDmj#B=z7@+OF!ll*J#5J*t8p1Xtl*{4I$=fw8PK2rR|!z z&&+jlm$yklKN(((=;AlWe&xl8nG$=P6!*+rGMU97eY2L@#Mn8~g%JGSE5A>~&lku) zu84{VIa}T0ABtcwLHVuhml7K#eoi=I9AFpcZm}(IaBVf{Z8`oFt$=|j+56`S8JYo@55 zpnPpHNhc+I-Lk!6D+hK@R20h!lyn74ay9~`;_;9e)33O87X;@p@3`=S5g+?u0XMz) zY*(4=Flk~M=+}nbM|6T=>#pG{@jllqZXYxYd|xk?KB!(a5a96&E;G|=Uh8gL@P8`M z+r|b_911O9=dE|^M@$Q-?sZETx*=tLr$366Zhb4D0?AabT31h-OqU_W0RAudDdae9 z2tV2Azd#Mg*PMF9X)K*wj9a2?N65-vl6?OR(1v_sg$vd_Vc*NiL+ioRd>)MK;yucC z$t75(A*TcW-ezT(#NtLI1;%SK=KbTKb3e4@K79tmw4F;TQ){4HB|2+V?}S^La0N@1 zt4#K!Gi}R~Y?_lT!42waNxffZaUp2EUQn;JpI>Jw@?M|)l7`fTs|w;BiaNp7r-QMZ z2G1L?frXc<%Tm3Qm)v0D2-lFU1uV8{kgjku9y_&7BymucQw!+eOMt+U7Y?IpOwYU3 zRaGlNIP#~TE)D{&kEe|h?%GK7Az5pEty2Q99y2m0G>Q4+ieL5!S5`sb)1?L&=;9K- z@Un4&>-UN4e&f-s2bNJ=vS_^p&C9|K-f563(U z_8$-;FeKT2q^Sh1j?_%;V^uwj&T$0SNybPyssMaX-%gt|0kpG z9srH;@BL~xDXbLJV$RZH=8A<=$J_h1K_15^vs7#xHHBkR9CM5;`nemdh zEiY9zVn_<^q$xDUl&HFS-SKBYGqI(~QW@R-nO|1qj{43v)qb*X&f*R6DOacCrhSNI zDQjran(%mg#{PwFxB)M(lAtOGtM!Um~jCWzf3BWhP2RU?9bSRLE~c(|lbF z!>2mtb8u=%YS|&V%GKb~&oe(>a1yl@N~5(yu9m+WMRMe2R-QO`(o_Y#@k&hf?*F1i zxvXB4+F$JhsA9^5Gj`$FAPZ5rr7Z)wj9YN2Yfz(qtyh? zV_EL!8_<_8pjNs*=THG5QeTNaJjko?!gto$|9)lm*O!}C9TxM<>U9sr>VnP&Y{M^F zkHh2oBQI2yL3#O9V#<_LfLeC)b>S``w{BtwLj(GN`WA$O71;$FBua`$l5U?u$w{no zfAjQtg`R#k(XD+;=&Uu62;ZG%aHADdHflQo%}_xL;Xn(VaUs`S>lav*ouZP1h$5&U zug|@5ddU)*V_r0cLTqKHkd0c&KS-FK|EKlKZ;Tg2KZN5c?Z{7GS_8mXaoeuCAQq&# z&_nnODL*mSD8++L?*xj6xq1_(dd$Zuwq}5l|6i#_cxTgc7+^iYG$|B6b=lF#xT?)q zU)k_GIFJ>(!wC1b04O~>#qJH1z*Z867&_0}kG^ub@)lZJ{cwpyJQs2Osfzcu?*do* zOGa& zu(B()V(&!ChQ)r|uZC6E#F{0ofmZlGPz)G1Cy`V7b;sO^#8*S1`4G>PoK10(7hS}t zzQ$oI2;fz%OS=8$%x*ka6p4}p05|zDlaXQv+bK=cT0v4oRkZr->T9;E@`J(70%w@? zreEGy@-g=JHf;RjfQextS|Z6VhtY;o@bpDENRp&PxU&3SKoRTjAD97N&@P+?GUWR~ zyfmvU>x9RgsNAjD?& z;>M=oKN<1U#Y8Gps_%ia(GR1ihlfECQSQUP4tLC}+B6duMR*i}7(QJB$MS&B&lmNO z6Fxu-{(y-TwNf~I500aqKL9Q)K) z1IyVF(ohWfW2}&{op)v;(rzhn0(IdPl zTg-LRV@f-+ER@Vqm*6cIU|n5yk5b9m!II`|OWJQ0HJ675>EmOlNm1+&LR)-l$|(>* z6qS{YqSV7H!nwZ0ei_2X9r<0*Rw2q^%oT3LNdx#If(gu_obT3HBi>Ybvc2NFT^Nkx zzo=tT&!h-vi7NXZ27QGL)A1CY_O6p(e;Ni|6jzMXH`u)b;5th;cz-aKSPMuYD%d#+jQTagl*g$rzVf z8r=EFs24&rCQNIkh49THUYnES^~xQm_ly!>qGE3rv)3L$W3G_C0|~BjI;5QNWdN>4 zJ(vVo&a<5GwG$U<5wQK3{Ri1!z!|oEfb&C2VkjYKLW^F{!8E21&<#S3n)W84tdg9bhS;%(C}cBlo( zt@-qYFzwiaAV^g|DsVNn{*PcM2PiTDHbijeG&VB&!|CwJs|#>FR9!snl-q%ZiD`T` zuh_|66SHPA5oFrFgSWwVsgeuW847Q`p++@j5B>jYyZl*j&)-t$e;Wfhf_5220|80V z)_UL08Gtm__slyq;pFAGxVYwf9!uo!@N+DcsOg-kG!S4Asrk%C?Foq_epx&xXn^vyfzvGAgVwKp6 zkbB5&^a2aMDAV#p0p>;*z1NzuQ;fgN1_-E0wjQW__`H9`SRD6!6WYN(3_L&%0*lKN zmYa#CQl4kgoSE+Xa37PN1apkZ2Sg!?`==~OIFV4(NrvK1D7Zi_vN`S*qH`+7Tf_?u z6`U$of7dgEH3r?z;NUZcViUDrsx9{JX5dp$QOo;pnBZ7O9o8%E1>Cz-W#Vivy0T7e z{tI%?9vFD_^L0X+7Q#7keI`m&9ZNK62mRd}pIhdzgfm?p`M4}t4E0|GG++-hDU*;| zwtNYYNDu-BHs%R2M0O%kDlV%_5Qh^nbbcQWY?GfEZyA+8nhJ{@nGsqzrjVeUnjafo zr?{N`P0{xO#Q;M#4@yGv9GuidT%^(%F?@Q1Ry>WVy+@J(@?zHHO=nt__j0XV$S>w% zD)q5@g->t5RRF1a9<7Eribyv=w>fcxiD5{`BRNAI8EE21fPg%4gir^Op=8m|9M^($ zef_;q0aMAyv~?7s9;VC}7>^~Uid9CNya6q4E_kIREA~EYEMA`9}l645Gk+r9v$ka>5&4s%OV__2Y;=uk& z(p{qMdj?}RxJ*wukOv#cm=`fsntxcZm-GO?cf_qGcVOan%cT8ap4953PN`XV)m1UoT^9WU0J}z4_w*pdYCpg_vkp;W~Z85+PNE;Y{ zS!e(tVWRoh`&VfyAQW_b+@uQkz!l~(Ge7}EpA5I-O|K&&#SG5B-_rW?RAi(Kkb1zo z3});&T$eSR37VS|JY?I7R#O3UyD{;?hhS*~o2i+^zXd?agl-b9s&mURAaI?32@DIcpUy~%l*I2%sA-3K8eh8^BY<_s3&EJ651N(L1c|5# zTD0dmnyrxKDxVu)`U)Ji63H1GCG-30k>;2HJORglN&+0`=2ng3l9ei4yAGfT^vI5| z>cBzjJn3{6l0%7+E!cNp?%*gbdJrc~8ADLKK*V4%Gv$|wS3a7msv3WE_!m*eNM1H? zslt$2?D(=_E#C<`2bG664_;gfT=plp>R4)D&v_wNgDUGq425X=?+Fn#&p@sn*{&O*~nI6d(=b(}aI z$+*SbGSQZ7MMq5@rj(t5>;CdWXx{?cAULC=2Zur$nklW#_are0S_+tOB|IN#Py&uB zQcK2=6BlK=0@yF4tq)Kj9W1?^YT<&P+X?XfnkJ9Qn>~k~?5@s0q zDHg~$63oDmEf1V~C76$EPx0#KqO>AARX~$0qQ!BfSZ=sg_vL(*#tj?cJGS=MhKQh%1YHav`NH)6=rY+85*k{J?+!lh923RZTGv2m#PG?$(GS-o zQT#^`#d=>)9o7#ze%ukA93wCBbkB5;0_Q8^AdK?c5Y$V~?ECVu$92 zggeyMLVgnQE=In{aTdeib`WB!Ot*=xRsRJ)uAu)q0o|@db7!B}5k=fS)ZW+wAOs^r zs{lMkaM)~zVpMQxD@cWBuxfFjpL{aeMs2F}TE6n})1Bso2=F$cl&3sh z;4n%1zTF%j6b�-*ha>deB=ij3AhuIE_dy3VT=6k5U=*zm?!2lg%hRpMj?XrvM&6 z^9{-PaT?ZNJii@PNqB7OcL(VJ;KhzY#avj(CNc#koiKZkYi_S^t=NP@u?PE^vW_4k z=EDM+EvIuV6$Z>nR+E{0DkN~RN|qdKK3-3;!8qkhFv{vY@#c4%E|Z(1r}L1~Ne?#898Y1SsZp)#0!p^JMsdRR9H~TWJ&@A1Xen zmW|4mi^Rw+pIrICK6C;)c>qyAtx1iPaKg|@zw@HMz@D`6JU$M@yBbH9%LS@&tpW^pY62Oud6(<; zyMgP6ynEcyc#*7@k+B!Md=_q8z#++lfY1EVC4{pq}}i5G2UxJlggZa=YcYx{5uy0 zEbs}9)yN3zUkE%SGWQ6AO*=eiI@oIeQUHw*ASH1|R;|y=S<=<~y*Djl==^;cLn3El zbE*Y9#sA-%UKuk8u-&HpwNzSZ$SLOXTB2!(fPq8t>y%O9lu{g9*!KlIbcQ4O>Mv-2 zep`ZrgL>Zq4JRKBRj+(`5f7wuTj@P{L|5PVjlkkJKqdG*Te*Bz?~^@f-uW1E@5DiY z{{Usl`{Eg@I{#Jqq?O^Ou7{Kt!eIdT5YUq2xK)8PU4P{_Jva5Y5d;lfP&=KG9-~_j z0>8u%RWv1}W<^VsfN}(~?NqOm5u-K~)&ONcJ;P6?f7_wtDPlRXXj+>=oYRGsDXE4r z@dL&ID{EYzXh-Am8i_Ptnd!cjG?d`L?Q0!7*XAG{F=(AE43zM(u8reKgv=^>)lC#f zjB^QY>`-lZY*=4_kVB+Pf^7Mi344*ps*t&e>wzM)(65*p8gib0KF^)~Cvx7f8^|0l z24)gHIT1#xtaz*ns*LfRr_M-l(Ul1Q3Fo{?sOLsA1kh=$Nq!+Tk^f6qgD5rW%iLMgT`RuMz6zsJZJ*SK$-QbMYqmduqix zBrSu9LHE-|U6~&P#>@8cWC@#>h5vy6&Jiz;7Xk^!%?-JWC{*W^=ZjOjDJ)CX zW*I&|qfxwB`No@A{<&h|od}fqTX{DkzoTCwdPv|eS3Y0W>hda@iIaFR@usBNy!8pZ z$1vaEB)(g%FJODUI(2e38)D|`)lR@cFqfqA|4XfmkPvSYf1?L+Ko&n`h(yjb0fhXszK~PICQy;+*cyNuk+sn^3_6Fz07h zIY#_#WRd_x>*w|K~MM;iSp)paIP%9yjD&58u9h`?(^aaLxi`3t|;$Y`mob@^$GwkA;5gbn*)e3VZ&4=E;`xLCbw zWE27;badl*N}>=LqADl%Ijc8cz9*AGoUJO*AYf|9raV~M;lqwNg2D%ciV~b$lw`9r z{LCviwfXVrzB>{v2ipzMj)6{KNPG>1@K8Jfu%5F=+E>(_`=*xbl5j)$HY%KPzYu+c zuQ(Z6bk5q&jjPx6K0gU29~gTeVURG4UO4VvR=hB21#!pW6O>?NlG$;pUH~V~NL~!; z-pC*Fi$ZWLQ+5a%AsY3d9;@nTRnF80+=k)Tq4Z~C7jslSndpj$x!FR z%fGejT?f7+@FQuPGe5E1)jZb^dFRMyJJ-QCiolldmp&x{+hT zPHo37B6tepmf$8gg1u2?O6K*Y>#eg9g*`M)2WW7FU_MZNRgbdpA zf@X-miKV1?5p|>P!Z^^$fZ7M)Ah+}FO6gVL)~hD)^Fl%ja8FE_Frj;6UfgSRCXPJp zXGoI5u=C*KyxCXUW#(^93OONh60@9jQa=LPIQ<}sz3u2fXnN)K(zu|@E>_fx_3mcn zV_Og~Av|TVyl+l@%I>?M7Jx8Lb|uWp(O#SWT8j)=i+PCeoBtoEq+AeCo>ocCJ)xd+ zdzj~VjZ9*A}+Jf)6qK)JC~+oB#%UWaJho$ z-t&Jv;#tgW92}+5a}ZT1idaGe2+E_mCMcbnYGnu%2*DA|TDiPC6>%0Q7cp@l zdI!|Ncc$1*CXuK*;2?U?VDu_9Ldmw*5(XxJI}8hgxPu#RIvzs`4>|>GFBpkaa`d53 zHHY7ZW#T{kg9>^5;vm7&*u=a3;BS{$Nv-u0nA17houW1`BcjHvXEK=(jWg##B1C^Z z?XaoIdIJWofA)ewhc09}+sp(ajYSy(OuZ#<_}A*uTD&k7Oh|CEj;xtVW9}h|VFU6p z!a>h67tE7rSa5zhjZ=V#K;qJQt380dZ(-#j>IP8Q5Tl)hPIm!h-nLiA)>AgD+BO>_ zha((pn%8&sgdmlY&wTjUissq|JU(z2rUoc;-4^1aR$CFJ2uRWIv)Pxx|Isv{YW^Gt zbO$~`BmxnE(MjH394m;u7s7J{1YeS(Ij=>k`$o+PUzzA(vr)hHwoVuOk<9zqQPiXc`Lg%ML_g)2l;TkGcG zfi7+l18anq4lmg6!U=|i$Ec{llacZ=u{UnL=yvC5WKa=F$Vy6Dz${w|^IaO4p!`i= zpp>v&a|Rv?x2W2v`JcN@*QZyN)82oha!2CoptfOhOUHUBBF_y_d(|8C!dt!qM8*qR zgu)p>Ck-F3SAXz`+v*}d^4&39g;JmZw(LYoKtacHdpvMP9 zWQ9p&i}?BdU;CjGsG3)L8)v&C z+`Gu58`k=lE7Gq{0<0`%I-MI^Fn=_POUsL+(NO^R@CPh}I)b7Nld{b)_A3`ua>sAs zl%_u!&F21vo70~Z;bsLY4AjvKjWMarkixrY@F`E;`gj4as5<4l-hnY_14vInl`~2q zBm=KbR$*i=NnC!3<$2Sqb1l4E_|Uo^hi-GT{kkuzI68=Q!5B`=z_URj=G!K$_gRKR zuGmd93PQUCjY-q(f+q5;2WZ6Sy|_WO81-?53)r+wV1*MyxmQ?_bC%;JH`M zt+4*tSbCs55(yutM+oRId>khc`{M;?Aw@;QPR~u$McG5#M{z0IZL-}%Rw3CT!JV5^ zy{ib|xx9k7%PiUMqu)L+T9~|R58uF?d1pt9TQcGHqdOG2<}wr5HoLBlnpR{a6fct< zKT6PuF^P)QR#YBkDdEJjU8t-vLtMaI%4nkI>bxZb-=6B7Illa~#rD(sdAIGH${)+U zT$tKb@6zRR^lSAxxYm-^1Qv$MyX_8qS7_7y2)XG)m{qb2t2eRoLuTf3!3=WgoFit_ zl#k9Ki)^{ag>Fp)_%X`cEfr1fB|r*3@EZSc^2zoBj4@i*yMiT!;Y(Siy-?fwkDz@Q z0|sRhLE%KcsP*E(ZJ~=L#H3tlF<&58!T+boHuL z0BORmuUycR-vcfdL}DmX-j%hm6(Ih(gk!UJ?!u`$jZnr9pvocWpK_%D9r!;oYW9#2 zDfzy%)o0bJRo&&&QyWn;Cr?zGAF8pNP;nCFXZ67BZY=0t9({QNa^jN)3lHy`ADsq5 zIq4p;+2N75|4sW;40(2u_i)E-`NZm-M03~jrI8F~LyABb5 z_tGy*n+;f{I<1&{o&qyg2J=-@G$6vRWjk`s{q|rjq9h0yih8>@(z6Ndi!EU@Y{ag} z$9X|Z#8l15Pk-}?qH+9xf{!qp8E-BW{Sx-jTjWT?lCDeVi>?a}&-&tHOTj!I|@=ZwECdir8s+bdTX?vIe3u8PRA zICwO+fdx7@qOb0NZ5OwW%&cb%en9Nq&05z?#37?VZLbQ`MOBR4WZHdlfrXf2Y{k~1 z;tISYYEWD{56hQ0kxM9Nf5mH9fJ`e1MA;~{;=h)h$U6OzC1>gXfKi-rVjRcLeDBWWyq|@Kfph(lmwC*bCO13> zjPj+>dup;{HRsKfqRtv=icDg)l(T~vXsh^?7ctF-tU(&^TGOXb)Z3bM)qZx#`u0etnic)f3cSmdh+SE0oXJnMz@z3yRmE80FUr+ncf%QVz zKLq2Amm}@VZ#I~(SV0bs#K$H#UZ%=-R^?p+?$8P7GkcFKO)E#yc(oC9`nc>VCIeyT ztWi?WgThic|Mqa)zLz)TVtqeok_fLihbhhKM)7zFaQooPb=>~6jem`7b9e=`;2t*t z$?}GELa1iJ`M=rukbLwGP705%b)EnC!Xg>kqV}ml*dBeBp-S$kW!WH8WqkcE7f^^2 z3Z*d{ZeOuQ5NOgIkt!u9hR8AyZcmREo7-JVf69xsIPnB`$@KOIh3Blv*QXgnW>Yl( zt+yxGR1&5_C!=&GO$$#8C#@`~n4mC9A;|ZhYh>8>u1${2s9GXqjC>PDW zx`X;9FU#h85&UX1FqJwY(sRBrdjrU_G;?5R$vXenk{!FwfR=2O1*r7Q&Wmo%t!_Qm zRvHCO)bI2(o1k-r_qY?xC@8*ECR)Nrw!|$Hl#v#^fYA(0Usd3+!+1!Ew=dU?(Y;_N zC!+q)hFh8efhn=S5SB42%w5+GOl>01Wj!0mN#=)X!$b&yj-iBZL@7aO3aCii`p-sx zN6854IU!q=Rd49#=62*r-F{i`Wwzd21(PqD@INQ3XT0M85I7aVYQiQVLLD$W;o7kt zxn;WQxWNUA&loimkV8)9=rKVz@owg+cf(2U-q+XMUW#Pth7V-fccdz>>669nmz}M+~5flsJ3|UgY{Fd21jq3#3+0M!w>S3W4gVCSG>BEJSI-d+Is`2nD>z10@G_iIg*Ol;P~FvtGVu%F?B~}iT6OxekeF1G;Bs;h=~7cYHIc# zEZi)(q!IbsXf=5fEX7}!{y?Q8KdsPFr2^OJ`CHmFzh&MED|dOZgR zB;62=81L!Kl9I-K%KaG0U)_9vK4_c>Y9|}io!6~jFH5HrLNTorG&J9x>z%Fobnjj8UF0x2Z;Kj*EEEh0aaW9{GL5zo z!mHx}>C*uV$J|CBqbQPqvNn@q0!J-w#Bqu(Y3m8{2S5y7ovk^6oChf+>)&5`g(u$c z6_8a<@N4|seThOjLD3BpT3lq&JU@CoHdP^hSjU&$&LE0STnk4rRdsc!4<_Xe^$NjZ zEpsrm)SuQT`EG%Z)vC;-3D^nGx^8=o%!@AqRb&)mcg2RI-k`*9%3=5*sgHD6DJ}48 z3LzhxFnRKYExIF#tSCQqHl=EQpa5RtfTPhWKRHw$$Krr!P|o}@bmhhhnh*oY_D7IP zJjs$*T@K!q!sDi!)#eczf%R|>o@C_uguH<2j%=r6#U zJyl=^ZSAWAo1OPJa(aK1uhcEcivy8`PSZL8V_N~u-KDsRdLU10L{U(J0sFRiueE)( zmt$d_=JD=96L-ofNbJ?x=h~p{9cw%+OIt`+F5TK=CRZq|!T!n7ga3 z4aP9D3mJ~E9$Ep4EAjTuys@GDtjOK$PC*k6gF;yKfHDV}o&vn5QvN0e^MV;<0xSFQa4@abBSPz zy}j#{!mev6|KW{d6>h?Nk7b>zMF4cq^Fb(q^g>ks`}<41o#j!NP40ROE+D$a`0C=0f2Qzf|-l<0C678ZKGGuw8HAR`J7XEC0| zOn<@s3oq=-73AEwRKEDJ&JMdvm9RD?p2|SO@UId)S0nSR?p+uqQLHJ}De)PJZ$r=_ zvu)7MQg890cjsMZ0dS&V@_CCh-h4`~e-APh%~#eaX|N*>qP`vpp8qsF-27`FupdyC z&$qbrZ$NDicU14dif3mh`SwM<--G!w)pQuh^8?YtY6)FdH1Fi(Mbb2!!2Q>zyb5Po z+mbeUl*^D-5_6G4Zm}4*fP<0=aFXn8Ks_(bQe%>h#{t*s!Cg&mx|vYK-bppC?F3fD z!U)3@U#ji4rm001AhlIT$7=e9bX9x59uq(4@u?43^&fqJzK}}}uT(SPn)vYASZ?&& z8iI4sgOjq{54{XRkTxmU`%HB9o)mWHs;!^&{kNaLnC3oy*i_i5X}pyncd*x4D;E^7 zTaUo(niREwej$L03)g&{1N; z;^H8G<(qFlQF=Ni6sJ>b;PYzp@-oTqGAb4uW7VEMbH=$sW!DLhhi~1FB|X`GrsVbY@KDBo!ey+_?uq@N|zFkPPastsg{QQLmUtg^D z#;bRpEaA z9`rF)ll2>Qr6pUmdbC!KF5oTMq$NE#2RE51ek^qW0N4dJX);JZ^WR_Cu|abh*b!4J zmxGhpcy^urQpoRsWLLP3QPnJH0&||W8)_?D+i4$MU`%}aR1F|xi z#A3MmV8)Sg_~g^TLd}B9Bqq$m!n_GzyDVKhJuC4(6@Wozc}IS_egCo(>$(whxRrZS zy!=+ScPP1I|L6y)0|QY|n`zWcK}dl3B&`j;N)mGz6(Y0Kv!dMJy09xakDnoOP3)2H zJISUMS(%>J?11Ilg;EQ%&#vcu#i>ntC^(379ypfvdd=C$kpjf^{{8#qGLAEF15vIp zB>%l%=153+q0BM{vJGS+cc^z6ty6Sxa1UAvO*_;2v0K~)cu#eJ6c%k~(H83_BbMIi6w#?;_+!I_uQJM-7;;k{Kyyp=V z477ZRss+2VBFIT+4_yFCTAn5w_G?1f5L6pSMBFDgpU*+*23yQj!R|XSFoLn})>8(7 zF0npiaRmoKER(DzZqr0V$bWn5k-saQ<8CG(W)QI>0XJV|-qE9kSK`zNgoj(~q0f3r zqfP^<##^R5BHVdxnrqYRV#Ov$p<74D}lin& zh2cq$Ku)czND<^=iA_Q?f}(^UFCb98TFfUiF;Vp+QkbEY8q@74Zdunw4>i%I(=4#Z+E}9uiXE=(+jjw8}-ivTRzz)ai0Cm2f#^EF#u6 z-uY-c#zNUpI5SlbjUyV?jokrG`tQsrA)qYjzjR~krW>_F_YeSj;FNv2gKB^x&R?1( z=>jC~!At~(mQQi|)q+9st9xG4akZ>DW}ff=<1VyuJ8ILXf|fLK!CtK&m!b7oV;S@jvZfrJ_QVRW(~kh)Sb@4zXPC5G8@i-N{2fZdmbkTk$2|qty}-n(fWD@JHk_RllN4vVgqioEtq*(&Bzvy(Yw4_ zE_f7!5wP~aHk28SjgEAe9JoIj;NwH|vjixU4?*dD4r15FLY0G=DQOJ6ap^1)(*8s@r4l2EPW^`HvcRW7^4h*j<)y z(F^2jU1uIMdq3lSbL{&78T*lShZedS1thCmv5ts!6b_9Zxa#En^+8-gng3&fr@aD& z?Ivtt$Uze@Sh$z*i8T@YruBJ89p?Qi|9HD^k*bLKbqin(J7AxRp+f-LY(ztr!PwOD zEb&bUtU&%%N6LBZo&P$XL+%?PbEDiX6Kq4asp&(n2BquMlN5?yjkR|Rdxw7S{ICETKsBETc@E#6KWbB9pnT$;*MYx z7hGpVzXv46`U(PH_;dJE@_6qA84te|xV|BcCjUZ8G+S0beLB`4OvaW%EkJ~PR5)vf z6NSwNLGhyx0U!(6QZpPi91$WkBSRs_FcN(b-0C2wk=s#UH~{xu~oBH}pt#opc5 z5NMUOdd5VVudP^!W&|&F&ENIH{FmOPFkNTayEvVJOY~}$hh>6FAjhN&^L~I0$5O1o zTK;h#foXaosA{gRyg2GtU)*s1_6$-&6k+KRSqjqvqwCdV5=;I>sP#aF<^UotZIHPg z%oXH!k+uhI1kEUgolKhV8Wt3>ElGOYZRQc~{#lU#k|-jKv0Jpnlr1M=z(Wm#Hk<<6 z!P0K+$k@?y&ne?CkI=g98A|Au(Xvp35Sx8;d!c@0`&&c2Py2jq_3+3tuzTFGagBg-H;WT<2 zL1@){1vu|q6*da9ZUu(2SGowox2UQ*U$T) zRN+PCkH5!`{e*E0F@%`JO~=2y+Ip&NW7CJ>nuC}IV$13Qyi{6{%k9SseSlKDcZT9q zbi#;pJO)pbKeaap0|iK)-^%TrJ@F)J!U8M_9*$8R*?`g~g+H?ZU6YLi6Q-T`2I)*Y z<|g2P2dOY?mr4vJq%($SH@h(#X~qZ~gR@vaQD@n5TrD@@yYvd@B^0vO*p)wP(fJ~t z0BvHX>jhn7wkSvn=(J|-eyN4iXz3c`!eV=d{I0MOS&1`VKvs_vSF=FDV4iB|;R)gG z?jXEw&uH6eZiky_T$da$iL_NHQCNv3X#Dw_B|V9ZGMddpnx(J2)pwv*CTaD6aK^}*ZY;R z@h@`=i|~s2zUcH^pypXYy@La2vD&swZGJY@{TZjo2~f2m039N@5aa>?=s4amBoSrX z5W+PYVm~g94xp49q*gV`T7@~#2yO67M1d3h_MCrmxoN~xrX`xM1!T%si zQ$6V)z}9=>hKI~<7wcV$|BQ1U(wPL?YpXDCD+0>_lP9tnKdOBxUiTo@gUb=<4=61F zNIV>=1aaK0`2@57tKCTKVt*zI@4vsa4(hwqOcwKz{EeyuV4}E4QcCXHl{|E0t&;o5 z5&KNIpy}g*Md-`Bizn@K#5-qtla@l)-aAKkuiMl2N?TCymY)g1NZ8i9)y!O8%tyOt z_$Q8IvQg>XZTMPsEpBt?f$-_W4*TxD0i|>d%r^?m*qgS)*|w!4ShvTOu{`5@8^H0XCPmmH`iIkG^<7G^(Bwg;A_+Y3 zE!tL-4xSbLFZn*25!WdK4YVI- zXUmj}*t_1Jv)Wf8B#l`az|!bydMN&Pp(!Ema`#P}oz<3cywx5b58=Z|*?-W$jM-)k~`u?MMrGU$oWI5gxZ&~%@;LB(I}nL%op%W5*t{!zwxV#}q~j6W3tc_@)B+&Tg;y&IwMtx^%cs28Ln!j7 zA7@)``~hVgstFHKbTOP;iscW{j z9|Lh2IkdG^pxeo^w~!NGfIKE3yPBKg9zY^oxs@JkHwi{Z`pp=$6h|GQ1;G#ZcsvS2 zLiDwhJ>Na1342?YMx2A*k-wlZAl!zsdC;1Tg))P7MQcUKprW&R9V&-DeO0UMDaOs* z;IaQv@{bXkt}NzkK4{o=azTAQ?6bx(y;~9*WyRH{?L}l~HC}9T!$`(`nQlzx*q+=! z1~PP;?2c>FX|Y2>$sNK_7Nh63Mtk8p#Pm+o_%KFHYB|VT$G|HF(FTxPYtG)gZ)`B0 zgy-pSL&%Xt*r+iIG2P04bg+ldR+?rHDL=F!@E>AgM)Z&KbWp^}dj7=WOs@H8w4oZc zjqV}69R#M^rpsHshy^z9gtUj`*DBv9m-L*St*Q;18mWgDCYK*c#x5j@JW;a+gAh`2 zrpax7G3l?NqH#fbI z={elS_YgOl6Ob~Gr$wu=VtVz#e8DiF82eSw@U0Bm*u!A(n_F2u{rV#$EEDPOBo-{l z+K%TBj>riBId*w7#iYi^rgvMGPAd$+vLdp{2Vcc^Pd$I=FCWb%$0bCc2l-7|?4r_Z z=k%EDl4+~}{O{XXr^WX$EW(VyT1KQ@SZALt$Q6Vq?#u2O3#!6(U(+}n^ExmxaU{37 z9#wFD(gB$xd90)SA7vh0f^a$yKbUv}7(IUU*P!~FE;Dv4(oKKY>#2zzZU`jo}NFy-vOTr%|U#lazf-rTNF0%*+S0}?k zrm5|G7*d!leu2dWVik!xaDUDjA2eyvfg5jdPZ8k-+*t4jVrNYq)qvZb!!K%yGl^}u z(OkY;@rL0BzGJt>mytXWmq$|aB)09%7w}R7R$YjREzL@D>_u^s13qrS#q%ViU+OR$ zl)^Q)jk7qGqi1C8_OwIwK(QDoh#>#D2y(*61$IWZ0;g6pmSzbiG}B9*fb(OL0?dLY zSm5)7Uvh(M;!EdDH`@&%Z0p48U6AYM`>*iA+N%BK`g?T(KqOxuF}z`*{hnH;5oU!3 z_RwwtkX#T2Y5Szs8 zcz$I(n75u^bJ?x-33)c(n3RwFF&n(67W)pSbq8|=7^u^ks|!RzHcoxX*V=miob04} z9DOJx0|2MlW8Qpg@7t(~TCV;%OvZNYn)Wv+DaU3EQ|=GqV%(;<)atvcd)E&X+SsL_ zZxp)39S$UJLh+$yUpC4|aveqrY5BJwyeJhBJ%%$vTu7sDMiBI?rKjnBDhyE$O zln+XHy{DQf%TpbLLT9(=S2g&${*WrSYFyos!ykSi|GNU3_`K3a3w>ZtWTuD=xxsxVK6|U2yuOmsm_pJ}8G2 zRU{|EPgyA@vUDuRLgDL(nu3BaK-Xay4A>*LnEUuRbbj?keYIgL=hlh?7>EN-*jBa- zYAe~fJ0@VRz@qzWf!GbW?M$OaFZ4O|f9Owe5n^!%K?$0vDsPoFco#ELD5QF~HD`

2ZQV!ht8HSz=@OdAQK6c42I-muB7yo8}@7E73w_F+b`KltEI>*kk-^lZ+t>!Ur z_m<#9u|*VBd|0*sPYAsoXl*QzsMPD%9Z#Bt+pAF`CuH`lMWAY8mjXY~zMpEck3#G* z%v(}W6ek@LAe740aNzmu1Szw`$H{*$)(Yoi#rf$4O*BQr5STH)?hjP4*A*%;g5}!I zEk7;}9^PCy-vhE{8lxm~P^`y#H;+Tp){Jv_RkcuHp_tT>e`fLFN!Vq`xTN9%C-jy9Y}!O^EtvLSZjNR>Qi+x~^I?+@cZo$vaJ zgGs=CCAC30@%C3TRFpVAM-=_?<%Q%?2q!HPRDm0WEN>d16MT+K6o=Iqpo;+1OM7Iy zF=k$$>!*hWO;x}#GySt?xOFC9HYMb-6o>!PpZjzJdSFm&iHnqCLH_=2bME4c^bSr8 z5BlB~>F~R-XN4yy5b_TQ(7odaf<;MLg(*o1{A#Jv9{dWh_a6u}Hn-az7su_j-@D-MLJ9G9?}u5yD8<5ma21SSL_kuZg60SK72Yiymmc-3fKXd7 zcQS+yiouTNrmxWB-``d)6|?y8M4<4rA;V*0Vm`McF?Jk7NmmL!tzsk<#P3A*`y7Z> z#}O`wA>`>6tqWEqtMz!Le*r^CS$IK~Q#ZkNeso*Sh4Vkkk)kttI5@2l%szD5K`5RP zoi6Cp|A4FUrZ7hvJLmB9m(|eWU4%@CvCE4!g+Ku+F=}VQx`^4E&7UaBL*}|97zq)7 z^%ID->-KEbP>oaMbv=T{#D0WH!kI0#e|Q(yLrVY|O0{#>)NiB{A*DKDJ!p2?GG?~1 zR?r5O|Bsh#bKvGgiSXyRgYcGhP|3@kP?fe@I9ACE(JO%kFcH5SbROprY~Uy1Np@K2 z4x=wx6BmR7z7aZivVfgz*NHO?)26Ri);g%fQa;3yFW z$AJ$Uec_0vAnzB-Kpw73miNj@cy>3WDh&GNv6&;=c7vphdiJcdZXI~~ECx3Ojl~f^ zjMM{vluZTOGs+sgynq`?x#HU;#H@On1W2675oCX=ZI{VGkFBJsY4NqsX{iyM=|0RW z%%d^F%6=0%fc?tvd4iHcgC%vV7(+X|n2%-=o)4Y$oiUw7Y1@d;_5eo$(Pzks9TVWS zd#*mRBxVQhR$@4SvJNH%`k=Ol)0I7@m$=wDiEJ8O*rq10tpffA0)WE##Vb-UM-WAR z#*D@XepWaOw;67ey0(PoQABXe9cUJUyleBX6VTXvH3G6!FHq|Ju{9L3^gB2%!`oNCyZ}Je>HC)s zys@)e)phK}Js&W(o(sxdmXTd8C@q12)w6rcr`Ntdw+^fb5aV%PI^lKW7;DB0E$ELGVXVuo_ueta<6bgoLJw2^2GuS4&EDUcgIdv*ohVq zj5Bh;Cn6>62>zNV8XQ-a8m5cm7(*Btid7xyi9K3?r26=F-|)`e|Lx-j{3QD{KvF$D z)DyGvodKBkvlLz=VOzK0R0m^qWHoHi!y7PqM%Kxo8GlxVdQe&F-~h?Uj%zPZe}8TkOh&C*NNX}peQ z&e9)JkP%X2IpY7n%5+yj|J>Sd@)I;JjPuS;L*J_?c3>62N-}W74H$&u1Ftf~rsxN~ zYo^_(8RtWCN*3t0?)E`B!1Y(v$C2!L_*efD;Z;S3}L6$Plu-+H66T% zPU_p-K~!+xRm5T+(ZK7{jYXuK0cJa%>CKfg2p)*)?Julh9~Mh;|ApD>xj8N(d+W1# z!1hXwSur+P2;_wXWsEn1yKI2J)lLx$cbTqLamM5?48E5gTpx4Qb9;u~qna`+UAy(` z&j|-j*E)KN9l*mOh^$w-^b(KmRg=EaZlW9Gy#&n}$_XO;kslCom8M*Y0KAVr9xFTR zABBimTk+B19}bk11!vNe!aF_Z=`2uwIeyxLv7pmHegXVS|AJX16k4DSfJu5BVPTf@ zB3oo%J_!S-(VxmZ(%}93i&&eaOE#}`C#DCWIA9?ZfPqZmQy?v6F_Pwa1FQA@?!(m| zWpE%7oPWh+B=PQ_wn{Pv39hNDE3Tv1@c!GUZsd6^z?(FCn*igrKKQB(7IE4@-R5~= zT*0xFB!_`Ad>dLdu4!rGaI{de{?g#LZy+h@#{Pr-RbT3gNuzG)_}EMP4}EV98L zp-Pd>E+*!77@YuA+5GL`QudgjbnUsjUI^e4I4gAjueH}d21Kx|^8MXDgF8{K7yVH@ zbuw-e{C3&Gq8_>Mb{lOD0Hq*d@vNi6xpvPK-))Y#cpB~cOOYOz9$)3TR<%=jCc?_9 zq)#*R@bE}U1)Ga3jUfEQcaO9@DRCa## z_^4uUtiMQN$#9X3gCn$$XxUBgq;!B5Uxe0nnvA`3`*CY!k5ka1Z-v%QPvNe&GG;M) z!zx@~^>^pzL~nhnC2zH1-Jo6U)kaVNq5cB|&Q!hgF7#DKd0o)b8+_Fa?S28oFEjGg zbOR;0VVOXtRls<9)0K2%1F;HWi(Ywa=W6DIk5)-oCe>ZQeY3vbV4IBjA4_rdl*3=d zM)D(zinb6h@#rtV*z8kRO>hqzzK&ApaltG`bXxgzEGGQt{Q%SB|}*nfJ~D4O)a&$*pablIS3{jnt?8;j!@)<^dA(R{9uC#(`BTr(9^b z{G;ZAH0Hy&+?bVnix0J&T+mu}K4Gnx&F4MP@i_SPjA7`8aZ0;tW$o?^&$N9G-c4fw z-ztF&ixQX2mqApCxtCfIqvua-dt{oA4!aX>b-Z965@k>;j^YHx!jx+Vrp6TClx49} zt~+sP0X~30=|WX2xz@xROZ3L`R)1Rx%nc-DT|^sAopQp%&$Lxf-nwKGa6I&>t?#6D zhP8GXg8@K=1}VP*eyq>-){%zd9k%tCu;f8p9?NW45D}unzG-&^)6EhGz;r3z4$9>h zAjfdTT459({|s(-TzJ9irEueeD|0VZUKK1I8}?c5eEO+c*ltpoDzrqJn`%^x*V=iT zv<}vLw~Uux>?aF)2tJ+nMYw;QC$p90#pOM-&YXz7i*v&!6ourk{=DHP)Np#fc|ie%yu~Zz#P53dIFWum z;N9>Tw$I6bu(|S-Q;WZwE68_16G0wnkh}Sai>vQif#ZgCRn(|z*KM@99#5?8{~DIy zS6p*-B@3ABF_4}=6_skKVj}Dg1-fl4eoCkKygnde17OvGka(}|uw#NU`Ck5e`i=aZ zSR1?ezAukEy9qD~k)r6!TUrLY>VF#Pb1D-pBtV~c(DuExUMs!BHV_yST2tF5*Fl_V z%IJWl(v>GVyC>+#T$GS9C_jW4dZ@6rPw%iJX6Hk}hnUE)9k0D ze(Fj}Ok_$o>^oh#JsT1XV@wkuT1sD=)~-~3f%;2~-N`|9x!M_dPmLe`U>PtBp`0kH z+XiwX`uE_#h9v@x$B8a|WQj`oudVngR*eQrQo^MlLq;>U@=V(>XD z=4P(4sO4XAZWaq-n$ZkZ%M9u4Xc;e!I2_!&Utj0cb&TZG);E6obdsbb=-x2fvA7E6`j0lh~*2b9P2>jr|x$QT@;Rm9SR38w&#V(x2lM#)MJ4vQZT+<)F8ZM}V#tZkpE;cM@w zJ%)Np8E(2dpPi8B`9Sq^6TtkzdG7l{d}07Mn|;mj@tKTc)et2b=0(?hmy|5oeS0H` z12F-j+;Z}UhR3~YPEvXxuSbm;5h$IST3_QhD;|8EF~5dc?7?IIZVC(l%G)0+D{f4! zX@O=%7J)?sH$t^^@8i!xeGB-*una%HS@~-alb{`FqGO+4V8hipmf|gU%??6@O1*|8K=nLQJ!usQ_Bze z;g%-YcNWZtkYWPBg8cc1I@sZ}?aDUD?J0h~>o(>L#gL5RQ0cB(HWPcp z;~45(ut!kRI|7YMwm8t1sRDwi@3)V<|BtUPkEeQV*Z(3^6pKhD!%`WNP>Led5=~g8 zSwf0ZHX>t&goQ$-kq{M4D%D0AGp3?hN`*+Jj0ur`*R8$xd(M0I=Xd@%`+VNB-(s!j zdG7nZ?rVU^bDLi9%pcJ}Hpqijn0X$;hxY0(Fv$H#!mJ)+Yg07v%>%lL<_Ul>f$`0% z))z1|EZF&LvN*Ts{ACgNMU>zhV$!kA46$g|ye=P8``xOlHS9-r-aN`=1w!>66i0HDhbQamtIpWZ zR!0qs(ZP^u_AyKN4K1M&=|BsX414n&Uqg1~g2yuxblv!ASF$62&9I&VpSnEl#PBV% zWMwb^_zC>-42aaUStN^b=nFVHGu8RyIARhk79T@GQy$M%x4fmQc1+fHE9_ad&#v8R zB%QY$z%A3${pa8N@L0Mw!@f_JS4Ls+O$#YlXN)B2aSU))CP^-DPBt27c4DE?J+0zU&kFNU%m-uid1 zV7AA1lq_^Z??ZLn=oPzi5PJg_q^jWP2zWoTnsjjSa9{{nt{iwz08n^`l zpu!hDB50mR>9!k`W;_zX$q-!J@SNP(z(Fqp6w<*c3PD0}s&KG4D;oW@wFjyd31lxu z<2!R`{mD!3u>3xsJ;hx8%bFLKEYYc-KjMwo!tbV+_Fm=20<#g?z#mB&gGyWmsHLl= zSb}@ZIKw z+Zfhb3E{-q+n;dc%u-9ZEw*y{#uwrc1 zKj_`IE9*67p1}c5t-56mXYx2<)5wSAODF85%{SR5x}poS-W-CiT6^ng{I^he!MkBW z)JPu|o`;+Ov6Q=DzWv|cM|$mA3GSaN#fL%Y*k5DgHwFfDh4{6H(KV(6*|L5+%@Q!P zl57>A)EO8(Tx|Wo_DD%7){<=lIz+fWW;Dv21(;V4e>wM{FP8yQ2i^REsty+9%XROY zn~yg1W=d`t<&cMyKLtdy0Y!GDUh~iAu=5n1IC~_r(yXw(tI(X%2I*Xm?;nok8Y$7B z;ljnATo0c%r${Tz!005iuL)|e7FBg~MqD*T;<^5HoY?De)GFD-QP%0@H8=#P?%+G7eU_tM7zvSJxW!VA zkD|J|`X*iLJ!LZlLr2<3oVjiE!&aBcEYpiFE^#IzLI;s?60*2eS?FN!%-VxsjO4xi z&k9S|O5VzX3z@sRBP_LH4@)lMxCh!(%-7$67YH;s@+234&Oq;W8H^=%)MSi7j|6G_ zF*Nn&GZs`JYag=+yojYgM4 z#1yty*ZzqmuH#Zt?n~|vA5i%FpX`){HKN)*A zFwA$hN7ml|CPpwY9E(5QR{H!x(n3EN02ev^fw#Fv@Q8!QQswc%5>omgUKfI>FvU5< zdvS)WpGr&MXm_)On3x#-tdyW0o zUme&K*ixDhwN7T{(2O6`A<#!lVTbJ;=#~-wi)Vf~%?Y{-EVvVfwP!_Ux*l_vy96xQ zyY)&I2>{59HZJq&b~g&PXQnIIwL;7Rwg3( z0WKzGbTPD{`CRgIrth>XYvMB&eCqk(k#{0BJhV8XhU-#>??omkccbiLt7i@{W0BG4 zZ?XVtaUSR$%pyz@=cU#u^uo1@f_L>U3ODwYmf3dR0#J(?iOe3L+gTJkTFOE5J}}=X zTuyUzgPool%iKoKe=EXbPHpH2LDtwTUy*;79|lMSc*C^Df%77ZbNCwM8Qh;512ZX zaO%`(vc4iI2D{{^p=-Sn3@nXw3e@wj!Y>!Iu00SirtA9zZwR_S-zGE{q(Tarq#KLX z-4lb<@jWdVo`6KB=9Ya~8IS?;P%SZOuA>DyZ=fme{mP*$@>{zc&gnE1s%q;HFO%}{pA&RkewZdM10 zHR(%-k6mR+UVCe+h|y$U^pUojGC;KCZ!ej8r@^)EfN9)Z+62F6ZxuJQfjn=6D=4GD zZB1YBH}|Cj_F>$mZ?kO-)zp?Ij#FQ-5oR*r(h&QCjvaQTcQJ%oHMrs$P0a-T(&8mpkW>oECc%Oh1bdd%<|7FS7|P z5kwRwT`}-$guu=mK5}??(Ql%mJa_8IUX9t$=M&yctW-L&93 z5RoAGP7zrk#cIBVhk@*&1d}sZjYC2mkKC3S56`nOFaPx6JRWeyb-}6hk*w}H=owJ$ z5KI!IX7&UtIB+6BA|^Xm@P}6yw5T;2U`Cz(ZW8gKVe|pMI%S^NgZoZ;AGHc|>-VJN zgMk3dIWE~K&G)WnxK;lwbaNV(o5=?0kBad7@hPnnsKk9{PG@3ah~5dJeUe+`udknr z?MwYm8wR3#F^Rr{H46GcDW(v8)MNZr9soyAA@?E#^a;L-H^G*ixLN^K@zy-Rx6400(#C-J&*Q!jSdisM-C=hg1ei&W|k*I`kz!Y-D&V zgs6zYCI^(Nb2XoK>L!%h8-oi=>pN)rgs@6ndrJ|UVTCJ0Xz~Y%>j*RxE#v4Qk$Vx5 zYTQ3c8r7SZB-+nffpO}5R2IZ3P3y^9g-5y;h(M@15*sQy}w%iS`x^6`OI}o9b7pz(vW)|r;i2ZX3DQZBf zT``mhSsvuPVW^V#YNSj2z}=#*3TJWk!QPVHz6`eg@(|i)cw9B+Wc;H9X%4--uyF)& zS2yUuqzoQ~+*@x=eVlsGL(@?E-1By%jA?|YseLr5vB7R+i-u?lzUg|I43TxYd$dV$S%-9-ow+}8*WLR=(KP59bvIz_smqD zG8|LWhfkc(q5bXZu;S3Luts1zOb~3Qa=yIvoF;32wI)p@{Kh)<2}1D>YZt?OE2RD6P8x^q$6yQfu^aDNSHV#c6Qm5O zozSK}7>5juM8k@jaaYs6?8O&MJgNS7c#z^w;sfFs1#dB{LvJdx)X1t3Zw~rw%B1cp z;HwC@3&IN+Dtz^?#thhRIe`8PJgdjKKD*a%-k&QPAs}O96t|gkl_&{tazl?J2^CmO z3<-H4=fUC(uNL&AqY6FapQ8PqVezSnUp-}CE3PX=(FO@u2gG&DrBo4D3l2PjK^}WU z-G|Z?&>1*7rI@Y~GQzARilO>0#0^a~Ca&X1@V?p1Y$T<&Lih&D4Y=jpb;-K)BOZgO z9qPQUTjL0(2C5!k54bo%Q0Ma_M|b2@*nL%T2L=YVz*z@6%v^u-nRiEVR-hG_3%PLn zlll>C-=Kd$$YoXgOH`AzOiWF;Kwzo|pDdkJ%$81|fqr&w%mXg~G&@@3p@qDV@pN@n zqCpg3IQ}60ajW!ijK>Bq2Awc9!4rgP@u$!Wdvvohy9Td5r>#j<=OYloZDZJK`}EHV z;Z&)e`u$I~T0-{c!#pc7MZxBf!ny_H9$-ygZW)K)t;klh&?W0V4pkZWj>j)m8*_Fv z%9(@P^K0lQk;&^ZpjN%eyEH6_lSk~&2ykJ+rH_C30TwlmcN=gR9&FnUrQCIeIw?Fj zD2!y6OaW#9j&Fa-i%72*_au@N(=<)0-gq+X=z?QnEN+kSI2l?@#Qs@S!uUzUgLtGp z=;E{A9bCNSb}LYIbR-vtrrd#Q7XfAzKU{G=5eERgBTDIecfT_)f+tr7B0O`^R`B}B z(|Z9w5_t@f;=Opb-lk#R)E0g6oMW+V_C#XhK=NTCG>K<3^(t0MDu1Z9oB@5{0CwYB zplu-MU8WF8J3|S-qq4{pxA4{$Y3iX~{-wKz5t~5Jj_yk`@kC&v77*(DNBMjG`gd3M zOE(W%7~|1ZmYMFS1gp7z^R6iFh-#u zNz&f@C!n1&Ow@LB;0Zh;Hu5(kgG=`E-5S(0xat-(Lf+r8*O67gxU zP2e-z_=nZ`v5*;V}Cb;q@2f%!! zRzBwUKRKb2Yjs#sc8$iT-4(rnZeV?kf-{CREL;4 z*DI1QV0x-Yr&-nGsL1xzwT6KJ`c?+@9_1oUK<=o(TM`+Ym{6Y3j$0S`r=(%0o}QjA zyD9C)Up{ln(p$1mjg0U^Mk(J}T=-R;IGB%P# zN}ht-p+Zvc^m5CTA+n|=d!caRjG2kFR*h|*bsc+CSU8UL&9nK=1zp`6%@g0*gUTjf zL50lJui&hY!Jm#dE(YhDufE%dz4jl%P1g}nU5#G(TY7kSWDvz<67CTXsB|&y?20e) z!75}Tu6h}I&1qL>-PxjPe4rGVJ_X)F`B35LjIRcus3~xO)qX1p`Xg&aO-)O=2UE%- zFlu5z08QuApby)9%)dd^^4UKQr;n60<`6fChQ=2g|2RWz@P^Rw9RRsguqFIhdqbI2 zBydC?L1KP9v`t}ix5ftDXipQ3pyf8B@aEHM02ide33_00wNibYNx$zt-RqG&ci7Vb z;Zy>;3Rr{V;BeOZh*CUi{0g|X8x}A!7J?&2Fy5MF5;aqX{C~^B=lwiY`F1-T^az@3a>nuu3`UQy zucW3fjjJ!4$F9V}FdLlUdI%Z>ndj?P`gG|6GTKKr4i4J)v|_VcN)2X|8TA1D`#cV4Gl{--CJw@cd}^BiwHE^W*^t+ZR_nO+FLTKyL?t zUJs^=V@)Ys3;3&5wavAWr0MZRaNH10z< zfcJ|pRPhIlLkb%(G$9y!#KEzl5unGRtAa*V3cAy>~_Q ztjrDl&Jsyt+7tWARXdz$rT?lk06NWqX(#OCLe+SQ{21zH!o8T+%<^mPnJGF|G-b(=z@oDy=aFYHDhmbg}LWM~Wo<(LV3CDO;A*y276S^62p0mrZ<1f#8wG$j0~6RK`;9 zE>BQ_R;Cb|Hnk|Gq?E#_fN5f@-FY(>Zz9G_OaM-^PpL0*&zTTYF>5P0WxlM(l1v7? zAbN*>FBJ`I163_L-u$_15^{QH{u;20tCMRc$MH%<+cVbAC&(i>WlP`y+1F-T#U^I zGAIr*p#i!B{n+2a3=};`(=m0SjrEcLg`Ft$V8tNKL!oX5C=8<_3~pvwT>WY%axHGl z3?wGF&x9lz7#&Um47Nowqj~KSIY)shvS-gmEp5m=3G6|PQ5}g&Eglr!;6Iy1JPb0w z0m{-mkL{)zky(&~T@$&fq33G=I6y3Bre~glala!nO>nLoXe%>w)d4XS<#WB~3bE9>G|D$fnD1rY zCde{=gGHiy+p>QB6}f@vv71_?0}JdQ>;VJTqrjA-H>@%xWAR~>eE5Iyx$xzBr>72AE)Ty~prg3jRNH)}Unf2Rl0w z$%iM~EB2$^BL6=)EM0nQap^f_qR}!5n$QaytlP3j?uC^XBXg6}G#aE1nh+S2owIkm zX5rXfXo=qb5wId$?$)@JJ#72H>bwE}Ur!j>&O5D9j5c?avLU&m;NAaii|F1d(2*>6 z{_NR$%$D4}a%{DEC5@WbRhI5FXG#8%Vlm8cXu=LC(-6)GvP_)DrimB}GmkN1pCdos zpJtxGgGlb;=ScU<%0ub_UeNUr zf8@vSCXcd!PwBqTu1NtB2gbb_2wE$%H;cQfBOLz`Uzf_}1hXJZ$m-#GxQF+3vR zzE`FeOh`!y|N9KPzZmxNhr=>fp(ix$7A68ptC8+DCbSUaGVDgjOGxZHZsR)&lYg#t zcdE7Ul+RI!Q32JL!j_33Lxyq&e}Hhj+gjeEtxiu=PJf+{XVmSf`)m@1i3id5(POC(_0zrC!SYzJ1t5GoA+iAqoAtVlsF8UjcYfwhfD_T!| z$E0RxgxU_7${pf+B^@>O4ZjssFl56@h#mD%J^Zm^0;nM*JNJ6)BaBYyVaM5bSV9si z#3YDFDzhsiy+`_hwT`DI3?rK&`$ss4IQ&6s{+zl35I4xtuDjV6TtOOe@H)C@`^9Im}hnYQ(7OOLN1n$Xq)Kazs7ri zW;fL`^g3EV$3-X48%K_f$7XB{gK&a;-pG=3etT{REr9`s5+zC< z+{*dxLW3j}qFCJS!LB@GxIY~(^vllPAUKqyRd=TB`w3NRyb)? zR9~55lLk%}CQ$A4je}cm)Pft3zG0C$C#U%T8kq6AmJKPN5$EQ^=#xmE^eZzJ2R$q6 z=QfAEdSJ8Yz=ZEL)!c5-Lo{gZzXOazPS-<@*u3qvHm>OX5%3mK?#kJ+QF|WC_JqAAgWSL zgX@J07r+&W7G1Y8@qpu;cu1(s2Y-@V39X*soTp%TNyZj-g^AIoR-A~3s+Dr0-j?zt z1;_cEMUEBXM}7j|M<;k}^Sc|4F<&-e3$B*E3mX|Gh$<~yCGL0nWo>^d1S_C&JRItN zV)aFRFia}kIfqaJBH59F19?87pTXYA7$Yu4RG?*Qgz*Ecs(wVsm4k?eJLUv$)xpag zPR<$8a02K1cmp@FPbT?+yL+ZuvNS^%1y`R${TA)JI$vSDDl037SgXton3HI{FhQzgJ5$2Vp@zh2mV{HZ#E5;J+tY1Ci zHS$+0tCRg_b}C3P=o%_aK5~3GKJW4%geU7)KK*PjlNo%^hGp>N=LT$FdsqgqPSG9; zUcFbq?0}&Yts*L0ySlpemD+cL)>dzvmsz_L?*Z}-VnjnE0=-i5ZJ_(<@q}}Mk6U2m z!#(x_q8-T^WFm%(ZWWrMQc#U-Pr;b31=wk7ky$zBj>>>#-_}&-1H7C;EIpu?@$Y+Q zU`a*+vxpEPXOKv^!;l*tnLm>OBDti}A_`sRHCzi!B=-K$p?5r6WzhhEXN$dg%0i8T zGF3o@1ZY|VOrY;*xLWLpUIKy>)QI%hMnrHA9<^Ta>>#!qFiE=?q>JoHJkFJ^k~)j= zuYm!;5DYME1IiOzL)w#;(_W9xEgSz_^>lO(r{8*o`ku;|!d6;s!K)pst#LIM^v~_a z))@&tD&$*3brT)~bfk`drHh`yqK3yHUeHIRDGCOvTW3^)xPvNYstUq=);D%8{4pGo>v-u9G9|2cyFytOTIHlmZ6z z+*{3}TJV`jF6;;*@pju+#;M|VT80)F9O!DH%^BvP`!&+-X@h@UR~7T_u(VPam3jxl z-U-MghW%b9MpSsl^=3em{yv2t?-bVfuoarWX_sYKF(l$6gPhx56GS)Ibsz2^I4;Ph zi^Xx|hR%+GG2$`U#cht~|16A$AJ0>TNLL$6IDzMFFe z6n$WePPke4;Af71k~x+oW$yD_SkHHzF6134cTJFf2yPK^&Vz2oGFt7Io_=yAPy$49 z!eth$l@J$aP!ejpX{`mS9dbz6@CIazCNG5@a~N1ne3miIyo#Th8-k|pIM`=;;1Kce z0H*rislxEM1bz@X$yA%#p6A`u?j zc&miH3^*~a`4`}80GRtgH(-g2CvSL`IDxf>a*OeDr=WKuwHDU$Q{P$a9Qoj*SLwoj zyn40(bnHPyKlk-xnMX3Ls`ZcCQESX&as1WV>= zPoLg<(~MdT&i=)TXs-AeFjn`}Y-Mq!<9c@Bo#?|ts1vO9dbI!ddYq?QOq050ynF-( z)>S(iDxBFMyYj2o---z_W?^>YnK^ur5nIec*JK%`8wKb1(knl8Y1v;VH zuYBlZ@2@N!sy5yy@*PYQ`u^ygg)3eL^c#y0gNzn7yXGE}pdvsbGKtF0nva|w;C&Sj zXWB1Eq70Pf2LQzJK`Bs7{&Z&u8GMszJ@CY8n{2dB1Tghb% z1Tp=i@A=j1i$*2j6TjF$USXHb2Wo+^!$O46^toS~Z~aAfV1g)^g~%x(DegO8U^N0v zQGYYyH{>XtiSknM@WI>k44!l*wt2U!vEDJ1ghPYA9L|0!2cP@<{?>qaz1ELPKLe_J z5zvWFnEBP?1^E)pt5C(t3d5@~Ji21=vJ!ZK=(XEB8t%9o#CFA+DO?GNOv2Kec=2?e zTap6h3UNoseEJ6vh(m_$GOm_*Z~6l`P4oU~os_OePemvVT3?*H*fWpu-7E-^_G5!i zo$>Ce77R@&F%iw->KQA;O3-Z!fiPzZpO4I+0f3APDT@$Q*PaA+zCCLA?zWCh&^}?V zxB2K}z8ZV|sY5Q{XK2EyL{vtTF?wPA2DBYyD=eAYJ2kO;E>>^%YZOxwhBrt5g?su$ z-0_GQt=g&Z71acI)Y;EPC!MOGZ6ldQDzXnQ+Uj<8tV(-&1`j=%RFlyT;Fo%m1LXVX z>})G>o~QAs4wVVvH8BY5KzsJ!0?aD<$on0shuAh3p%-xd%@MhOO|^4gV>J#YH33LV z0GD-U#lc|zb!Ksfm~mGn8l^eF8?m)D-m$@Tp{uJVft^Ir!dD|FX_Yc~nX+{@uhp=; zitk-*ZN`^ltx753tKJsVA_0IO3+@DXAh!Sv$r)?PI^tLvwAmOcHZE_pJmn3=@6Xy< z%g@we1KeYh_S6p5j8xs&+&5pyt6bqzS-83RrA2Kk?87XlH2rkAoiQYcAnM7h!D+`Ei)h=mPcR2tRYQPZd5Rsgm>=8B1dMcPi-xoK| z`|Yhkw9I1U>wtPma@i34SiH;9wh`@7HtzwA5c)(CU2+S3moY0bgA+qY`j=UK66)EqH>!VxuK{(Zk2RC(d0p;~Y`NxF+%;R}J!rW*I4^G)YKFeL& z(t(DYCf|SSXRbPZiN4X#v2w!Fo7PL9Fdq|tUE$*JuCR1%C@df*2UyeL4K}yIk3-9U)!wsJ@FW=wl#wcnT8$ zz#(0J9H=H2amnonRcFG=mEa`qhkZX726_Q8fHjohW-k?{!23~TIWFbyvfAmSWdQtD zin8IL8}Tytyr`2O7~D4|O9-d?Wux$tdXA!igM>x4eBE=$zmG8rD+hS4H2F>mN5RV=SoLT&D(x70|B~V}V;V3>v-N{RhIEjnuu2OvimaH<~mhZFP z&d0mK?Tf*gWAG48b$q2nYrg z56d}2VLX+qpyyLW64L>h1{(v0h6MA({G_D7y1>9d0yeuidKRNx@6V)W3arQ`dr7V- zbf@(Ap%P$zxK;5pPuwT-z(Ou5gBMHG8^1)2K1#7o)b*IQcVp1pg1`IZ+W}D%MYTcq zi4t?YJL-4N6kYuynk2LcFeFfKGZdvzPm)&3%yp3^Nd9?`B)4dFEY?*#_hPp% z$IuMH&X`mm_zEEWTa0UjN=ml>w6s9#6 zWBvx$SE38RjbzdC8-@MawuckR{H(ey=Sx@#7`CJEzE?vBkcx+c9))l*dCD1vSzZ+2 znfuHxH(LjY+J;|LwA>$`U66#rWa{SP+T=D6hbiZhe6*5Q_%FEgCgb8CoVeb9J8sj5 zk6SzxXP9oM&HW7c0RBFd<4tPV=!vtM7v-V7v_panc}I~cQt`o(YF^jMKXg>{d@iW0YnbF&4CT!lE0EIAtxQl#{Lqn5ul7w?nZ?h?<-(uT86EjEY*%(QA&!^ zrC43Ayfq81SRq6(rPVkdVwSvo419=uo?4 zrxFPG?ZOUun*7>Hhzk^)VwNj>B#renKEtIOB-Iaxd&d?1K^Eu;)1zpasF`>$5gFiJ zG_~D?({}thzk1Odk!#ROhN7gUg8Ox`bsCf*@WbSgJZ|$2X6xd7auCpPhjeQ@R{h(|<9w}1rz;zp< z5d05igT=w%34LU&Vapf0LzM+dc)M(=M#(+1Vu`|Qw-bnu9U2m`yg6)lgMT4)G^vU5 zl+^aaIt%+R>M9TyvSNYCvmLic(h_(HdrlcBuWcbqg2M=iyRC;STxol zeds(D(LO^?*OqU^{t;NUKSf2kk-i(GRW@a}>!IY??Cn_%(4^vEP^HOc2F+e%BvFb^ zEL$&+C#6>t^9AY^v?1necaQdKhw7l&OFr)LKnA9EG>!|Avc%Y8NJdY#?sgKSCM{p| zcbT_8hrgXI;LdEH?(=Hn1TDkQ=AvOGTmv7iRn5`Kl@Dj&Unzlal?X!xv34gllSFjI z7R_N`H*8%h@2Q>^kybatd-cXQk7MNxy#rEApFC$5>L|J;yjNsf9DaN%Il=P9pBSyE z$AG5$(~Fs)=F`d=)sA`-=LI8JdjA*=&)a2SdYD!x@A1NTl}T_33y9*3|-+C8}+eVtY<=U|S zcGZHWiM`f#>om0yvb*IVg~9_qJS?}PXY%&*BV7rhnBwH*NpWnbcFh8NjS~jXTU#kWD>Bflg_sIb zp>_3)-W_%Y=mveUf9WcMhXde(z|O`ZwF3-cZNv(NG|e7Yy5rV|?3U4R%mgXJ!QlBP z4_cX}#O>umazzUx>hbV;t#*tPS%dEXZ>tsv^&Z3N9LVf}#aqtTm~ldn%>SWaJVKj~ zOrD7Hj~rw|yYa_$F^xl3RZp>j8c&-jzy{H@av*L1ifPcd?#GS2H=d+(mB{bMXhQ*t z56In3LV?Un&?g9^LFlc>v?EJAy(4DDO|rwhp^Opa7saRRArPS)lc^h$hO#!nomQhVCkgk$4ha%nvu&DRR)R45ArSE^5vYF` z`8{qd%H2bf6$CPl9-7K_4?pl?#Ma~Pr=Q>f>_i08z0SmS1qx*W=o-uMyAor4+W&ZE z`U2K8xMF2!f~TiEwc_2xK+=Z8)z>jLcHf5}qYPczI2Tb!gdd zi!aa{IT^{$ue%sDOhJjtB^YrsCJ z6Hx}ITg(zx2oYgy(OF}f=O2b-hZ?`!Jb|TPPQnn#1VP3H-hu+m;1!XM(<1~bAY*HsVhg# zhpm`e0o;UAQkK+?pZuS09b_b{_}3h8y$A~Y%L60l>us=e-5urQYCY)}!(d^48)jpE zBt6G~Lf#p~6sK8)5to-2`|X39oUA~i1W;hgkPa7x@go&Jd|f(nb9z$;;hOhabNiDi z%%{0WRQyOg+kl%^x@>0NetdV&g(f;0|^ro`!l*Pzf1EFxNBtNUpp~8V#Pypy2Q~09fxLH zS~PcE%evd=4+Hg?<+pD^MUjg%7Uo&_y2u~vQD)UQ=gVT-n!YjPczW_bnz*{s&e$`+ zy!aW4QHe1el0hq9vD!f5mI}UpWF8jc>H%*OWhN|$ziM?s;x7k@4_!xc-lB2UgcTpg zzl+}w^PK-6^tS5seSw=1nPj8>-<~Nx6NTU~W@}&km8uW`_+geA7W?#9(=|ooa#YzA z4;_hn5up(Mxx{+y?Y#qYXw!&Jy#bSY=Dl05ziOXN2Aq2Wns2|UkHJmG!(l{0;u$@*T3 zJ*9CD9AT6KVwMe~=s$SrPOdQ5M`HrK2oeg}@oV^podP@KT(b{kD5w6KmXe$HWf7ig z+F{eshhCrze^m3QJ@z379&TIpP(*|few4gNS$}M%#$a(O_ty0fcI^Xz1MP7IAQ|_B zRst*;f_q5kh8vzre2^9}3t&16Pu|Px;gtzgLOyU%>&4a0*wN}AJ#mRY2?^pnFgraSKij%HduE93QmXuqGlL;=f}vT1bexNZ z`@!k?<6(sHpe8mOSf)MpNG_X1n)-Zd{u;!MHdtnLt+ykYkA3OrHbuos!QndCzdl5J z-nS4PeAK^bf>_J*^aTar8mDsI2LZ0qWD^3G+87}GWIqQII#BeY-PH!+0Pu43+JOiy z1j5n%?e@^nz#lZ=0d0v_ONQlPWIUwVc(WduG;=}^xGU}-8A18b5Q}M|NcHbd+WUP1 z4H?j|NhVFl5CiN78w}6hsjSDpy3bn1q2iEzA8kuv;oR(jI1rO45PqZW)ae|2={SoY zS3e~47qRKdX$%~gMw}+?6A!hnarAv9qNL*ZrZJWU~fx&Al#Y|c;nUqi(e2#TprBZCtGTeKed1W7yNos zLsKR`#&Vk$6tIQkByJXAHP38b^b2D$Jj+Ryz%WPdjetTuNYQO+XwPnzlmq%)$OU~y zI|b}dfjhB+ii*mc9v1g;6GU~)YH<+^o)r@k>Bsd`9f&3z9yBzLmDM=x)}IRAR1Im+ zPDxObhSPa42;gLDhA^Gd;ax?424X%wk91ECbTEcCEI4O14mT~0n zbxx>7nuF`mpbsLK`O9PEkH25k>@DSOxpq2;>;Ug5LbyNR4M&jm_YSIQ2~?Y*z-;&# z@UI?hyD@0P9ZNefTGh~zh3%6-g>L#C2r$;&+94HkJ0bx{7Xmll=%}Q+jtjlw#y~AcZY65EsLVBSbP@*VMGy64H%=RH8HMn=u--j6-)+(0K_<|7(z9w;( z{i^&nJe8dQ&IK$DW1-E2&E{Qf=D08yaY@Jg1eMJ`nagSgWGvCe86IO^a8#BqzEv$H z+bY+pz#0X{`(Fs3m7*JYJ+5c)Eq`p#zk;8sch&bv@Amdire+fEE6iuxIa)1H{h{)r z!*DopDt!jKP}(fMm$i2(_#2c%OzTHz`OPX#o6B8b`QkK)F$54%CM>8!(*hTNo17xd>dnu7XY#NU=}1rl z5z8C96KX-21hKa*Y)e?2Z(X7PyJt9LhlTH;1%z*)5a_w62?ark(ttRj`z;pBIB^Cj zwbRiz#$e+FbcqZck90St?1@~!g?BJMW7@%Sk!>({v)&=ce>#QH`>3FzvBbR96{J|+ zQ$>E5l-2j%k!ZUlEX1GK2>OuKrWkKImBbptOkgTwkLx|{U$1v5UGEBTjL-B*-11DH2?fbQgggev4Jk3WbT;?6$zH9A2c3`(nHV_a1E%JLpjK&H zJ)!AteA29(Q`0eMXFt@EHODreeAVEYwXF9ohm+U@ed=(=&bN=;=llE__vOlEVSxV@ z;+%ED#w`W-i%Y&cyZRrGP@j3t-5q#JDFhSY>mIr*17Jn)f(%mZU|rzjYuki}$PF?; zBC-I(hjU24dE+;@mjuicA5q9ta_hc-Y5Jrb2*XH5az-$qzKP&Ek3jxa;Psa*Gw0dx z701Fc3Z0S?CW+0`yDc!qVdHpi$J5hJ+g{&t4hIF}L%)vrAxGbJW)KK3@yblZbV^~e z#DHVmvs6uTy>Ir}pI zJPng>`aI0cc&iFpvAyrIV;GP2x069ihU+nxk77-`;xnV^x5X?gkUZ8~(i0Lax^OT` z(;+S#IAjaa$-`HR+9Y&$*O8n>q)V`$4(+Zq%Qj z3`7*$)YUUtVAcKu(#||*@uU@i#!RB533mnjC=<3MPE5A73fpOzvP^GxWObZLm@91; zonk0~^Z;h%gSntGleIr?cCqu-ykl3KvDIa}XVvW=US{IUF&~ZrCXqW|?2Qmn{8CTx zB}4*`A4f6Ec(X5Vi}1-Tm-8Ez#^WVGxMx$@rKw%ke@~@c<1_vmaN$(`ur=uJWQkr( z;B;28PY{5{%nqF&T+3$7-D>HD#HmNOJ)IB8>Pb-d)PGA{O6XtKM(N30jPMbs?$m(5 zgAi%h*o|Wx1k?h?27Nx0f^QcWkhrlQKBc&Hz9M3C5T5{=n`3tKuAJR|HC$(uWE{L*cB7-kYgs6 zpooYKGoW{Y8V3qCKOP165~OGlNDI&cOsU#MbK{{aY{2M50dF06i(rmzlOO$3S4o;n zVJRT|lF|hNqtDMg&!0a}&^4t-ll&FfE)8#s{$%jBfh#>2~>8Opj-3x6u z@;}n7gU(%^RhhpX*P7tuKY!tKZ+FuR5ZjF*6D<0L2yb^iG~p;WW8dj>)3UI2@(Pw;%8USwVrx ztN|z2L~{X{^51`$V#Ri!LGzCbmg}WN8AWPDjw_#8e5xwas;HEJhf(p4Th+~MzqS0E zK3-ljy$0M6hMAu2dzXAJ6NwOgrG0UR|7Yi)j`urqP?hgJc$3gS>KVM{m&<3S-&i#dymOwlDzN|S6KdUtpwk}#BTlUBSyB^W zP;*+}M}nb5BuYL75byb3O2TKs-wVKsBf1L)0$$!wv_PNQHET*6+zjJth9en=53z*A z;h6(gywU98VG;7%|B;q%wQjg&tVB&eJ9hfqHoymPC}6>3k#gd?wg&ft+S2)a?dA*o zjU_!4+l}YXBQYOwAJOSL(W5@5?8#1vCUY)rSq%smu*k;tgbF^!D!dbqKu)&aI~KCb zW`onvvk?;)BnNWEL%OC)pt<^=D>ZT@B%K(_gCf2t`ci2UuA>J{^ZR)~rS+7y>^W2p z@*0$hT*%RQFEe(o-}jv;c@<;wFjbQ{m3D>jk}J7T^I(Zt9YII%dP+t&=F@EquWfor zB|*mo*lvpvkhl(H`TbQ~WHcU;NT6{ft2eJ66OpuR>C(ISGOsz@laQUiXLy zN+w1-ksDs|Ee4#=SKUS`KVI}w}#$s?TBU33D*)o@%IaUAh7F(n@ ztzvuMDev!-v2Me^mIBTKGkTj@!j>6GF_QpmFey#Z4R8AhhG1thv%wB?(~u1(REi## z|FVZx`zRxBY88Z_yOus?90Q85794;Xp8^GUe7QsidAjQx%`zy)sME(C+gEV(TCxj^ zhP_(28k*2vgrZh~oJ+08j{tO&l_Mx-MQ+V~?a|?qI!mC~-Z@8rId72^4eG#$A2-Cg zRjLEW0Msc$*|>Rcj}%N1ue^G;?be1fvSqw>=sqzLJdYTIY0DJHci8<4UVP@8!ZXi< zqOb*XI8pwwVZtf03c_iaaNtaY?|d>XjW z*e=IjGE`&f5i;k)&lSMJObhuZ* zIMxUu3Hb6UMv;5f#sCi`6XZWMwakzBa;hNx|HUFAY5)51U-tOjF5J0xW?`k%nepRo zv4O|;lot26(Q^2(bf~u4A&d>y!?OnESX&SLy}zv-)A^+w8Ns%iOvO-L7_Kw+I2ft# z;<##IS;h#?nqn?~D;-jEBoG=W?k^!*3N$@LWkbU;t$a(u7Ix*t0firi-`n&rUc4A6 z(6`&vXj&b|Oa(e;~8WS^{bS9?~I@aTbdc*3%;EA~Eh z|5N*sxZ5Epc{QBvYVBjZ>dP1p+uxkCwK?nwX*k#M3b6bQ02=+LimzUT2wOuPW z$BC#BHjPG`KikW!j`U@y@%$A&c13x28yL@t1N8BM95DDxSSf{{`yvW81a^a>Q0pQ9 zWcBml%bq6d|6qFTmAAGFFc^DjFSYcz#_9Kts;bTq;*HifFqnW7ThhGekX^u!agMjw z>Lw^iXZ`>JgOjNOSHHyQL-9a}#)RYIg8^Hzr7Z(@W zdQ+Td`@uOVh}1^`@@wC@d%@Unfws0bjQ{Q%)d^wsP;TLY1wL4vltqqvr`e~r#8~908i>D zJc*`2ExYy0A&Y6ni;ue_4B#oeI2hdlj5P-HlX{c)m#hZ;8zd+BQ)nf7s)I$z&mR|3> z^Ke@+o-ppYrBbt9>RwKr`<_#=>P7X7Paa(#l)F_UBF8ot_Mldw!zu(Y7mU24_vn#o zZLSX1g5JO|)DUC;LM&m*HimA*-EEPJ^d{a92Td8cMT}5TQZZLQYZ=Do1%JhJB8J8r zTeONTDszV>iV9(AJ%*fT@Zq*i96J{@XaKmHhQB`XDn`ZW0J+*li(L@D7>BTUUX4n~ zAEm3LGKJ1DC!;RKNp8Y-qeo6u*y5kT^xu1#Tge3D3g1hci_F$TJwNiWAi!#hw)}v{ z(18oUmXAUpsC=O)p25TbeI}b}x?tf}s`qY&Y!!q}Q2Hq-FhG4wFt@bzJ2WP!lt*C2 zc3`j{TM(7vO(+8b3~^3KE*%u56LAydLtf-)w>Q6VII!qmDYzoil|CV96|iz9Hgt^`8s48D7TF2LdwT z9L-SVQ|!yXSbDn$#Ct3eL}?mc}vtQ}!01lOgANwze!wGl;!#AAghtPZv_SzKe4 zmHDZ`Ec$3%A_O$cX_*W= z_V~`0Iy#YXL0&@W`~hBM?4U+r)e6ibBQ;!f%SuXYvB0NLaKP9J-GbvvymDL{_I;Ku zJy->yY6|YLxuFKD`Lz3)BIu(Dk|u|3L`-RqFxvTQb^Z%@_}Xe#Je-Q34PV+_l;YvS ziqG8p>#Z7xP4^$}3mXV6S$}2CdJE2@BGBN-w}rZOuwE`6EC92VR5KVrm4w?Hbt?~Q zvSp(u*&(VbbI#6JH(}tioQXB)<4iDT6hPO}(o^2&c9e4h5`7YClT@CNU6{iawA|I~ zoTY(Pzi(Xdc2W-_m4Qet^?2F)5Lvd{KjyL1egrmC2nHQooKBz5t46D3HMpWqh86b@ z47BLUnrCk`s~aXG)?s8shfO3I(wIj|;B2CE$#!oZ&7|+J1h(9Xmh+-b{3PJ}NL(Ga zgWB;;n0xVV(h&qFXXe}o5rsR(VM*{4A&w)Pi#Oa)X6%vhc;qZTjaB zD&Roi+}{N<4(x!k=_ve)aeW8j!rg|r*2ATXUSO>lhKGm+Ksvug@6Gfn=5?DT5r!0- z4Q0@?D;sOzQd7C%8MuaN;l=1kX@NcCI#$_q%*~eJb>w`zW(SagrRXC57CG8@53n1n zcsj6ql0=&5>Q^@$#gJ0g@aYq=MRa=x0Qb|lj*f-Vj$d=yT=^+4FE=;AcR9OqeshpJ zBtR#yUBG(J$`hek_YNwpk6`%S_D22ag5ZdPu7KQ4A-o!R**(5*KZPo(iVN993QA+* z>%)^sE+0f|ra_8ahtST!NX58kfy;hHwRw02@n&m}o0{gl#ySbVb{NQmWIlq26lVYo z9OLh@m9LT$*4@}}B{QG6XPB)xU>bqqfA~^bxCHkx`$r_ma`hCM4j6Ipbh;wjYK8s; zP^}daGf8%=IAP>mL#8mKq}12fx85M!HEFE!kEFrMi9k8FQ1mCdOO%!EJgU7qXv}a@ zbUi+b;~iS|>@0NUpYkW!WkTPN1dI`{GB#$JyA-afS`@#08c`G=-So*2?!7p_QaFis zTS?lcIjgPCnGepazOY0UTp-QjnwQ*tK54ECX((}4D%|o%)f;G`{V|E2Xf{Z8@@favU+X$z30 z6S{wY_UtLqx3EmKlP`I-h{`YuF^&ih3AJ5lygu5M0DEQ|`8;(4lttOlVAF5x# zxBDdII!7WMiNx}#c2Jni$3YpP&_1eUeMFfLycx;9_u!S0X^1NzMG_hu5r1`drD1aq zRJ7A3n2{k@GIzqzo`PyuU0E`V3?}EYgBYM`IzcBNa_7#SRfr0B2WL70M~Q<3Oh$X0 zMr&&hD=giK)w2bE5g`>p35&ROb#;$!Ij<$vw|Dc=wGK0t(8*xYEKi$e6goUl0t&OH zn0!-AS&UW%dn;RHKbu)tn4)(o=JMp2ly>q8gvSxh!VO>!boDm+j<4~55wy|mU{vf) zER|V=WKxzB1PDbnH8oQ`6$-E~iqIB3C##Hwvpn{SVKUNC@p|XkYJS7z1er}yhl5w?eWkh z>mA^OfCg8i#aB8yIttO?JtB2ePC-CBxbiiv%^<LMaHy2RhghK{zDow*TKl!ak__ z$Gm*VRq&7nVF0>&_wF$B4&&7|K3q_&63?DHM{l(^wrQ)A4y{EPF2qde$h&gmq8=q7 z&9r|v`$<^#5LUw)(8cocvsKKWiEOQigyFYE^Qvj_0w*W&OlFX65!DN#w!l$6C@n4R zvQ+P3ohdLh+=8MThBzN;8_NKqE(V*Zso%YD+1b-rvf9Cd z#cbKKBQ<$IXSoQ%CX5W{AtXM25>t`p9Jxdf1PR(8;hXk`tqd`jzr4;81F8F%&Z2K~ z8&83&O*IK2IPB+Ga1MNeni;&j=_skf+L-S z3})+n02DZ!IQjq79IUI+0Ec6B2)?+9O?#zZyQm7;wEf>x^G?1QS^se*@M01w(yXKoM-wo8kf~BYrAa z(npJ)krde7>D8Q6x^FbMnv?tnRgR!-Mz%v@`wZC5!cfZ?%WEb*V;7Wo6LoeFM-@t| z+oGNlaWxs~tMT#TLK8)6-kvW!Y>CK+NV{+4`dOU4w*!98qi7Cb-}gf8`Zu`(eOJt= z#WP1I4wrW^xpOyj2LKGzlcI(RBPt35Te-~=j%s>(6R-~uWYR)Q;B7&k zp|v=P>`pL68?f1?PadQ=_N1b^+1Zr`y@_pr6fmaZnhH5L9D%#C$MUmRfGZK1Slf20 zGxhSrO=L@=-&m;mwE+S&`OT0Pfv*N7lGHflAaf|0JYnPqeoCQUSuErttU3U=#4D$qCptwk;ms}OFUg*@lbM0UP>wy@|odx zIF@6bg#urUq1zHi+j{fWlth?{;F(rJEEHJ?Y)HltDke0_Y-;67Gb>~jUA$Z_5-|BtT!fa`hx z|Nrr4r6SRyM3RiMl~s-wWskB)b~b6~kQt$p5wcUfa3t9?G7|~eWfV%ora>b7ulJnu ze!qYJ&-s2XK9|q?{W+(+#^ZT^-0!z_f9;KIlL}KNJ2@&)oTy|*G%oN;3OovGuPlu< zOd23cFK97QobH0fF4YYouz%pU>l>dxBb96GXUp{25sHg@5HA9%?} zRk0KcA^e=2Fe?e5mjPB9LD}?^P}p!<%I?^ivN2UcCuqy1?o2g06l(>53>SxV+DWJ% zq{R)|wHs7DHo3q$sg;9+Lm?{-M2gS_L_qQ3x@w1AxTItjf+JbT9T(xb|Dg`1)jN0Y zbXunJV!0Izk4UmnqQhG(c{p{@XJ^%U3Fw!l%J5>#Y1;ny)yv2uI*c5uraH`H7x6i1 zTKJpo%>1OI5R%L?;kHBy5rU!1uja1tWHYz4;%3#mooqc~Ld?3&n?p&XkcB|fUFozP zWe^}+rD2mG*jbnycRAZN#?4i)gLFKRVb7XQs~34<_L-;iMxIkTR~q?~U|^X6?%lg5 z^`Awm4$fCTo1-4JICehNDnNQC=MUzs2i6Mc_F!Y;^wb-}!|n9?lzbYHzJdPXUDjC> zZf!U8)lqZJ$RRf{@L1@}dghALFp1cTR2x~_oO-9-*{s#_ppy!{*dEMJW#kP(6&!S}(W3L(10_a|a-LDO}@nhcJwC2<)ddD*u& zy0dKUnmzVM{d=opt-!LA(P`(ZtQ6bhe`0SF26XW^&2yt#t$7F|HfQeK%Rm}p(#f`F z_NgAyB{~V=gU5m4?NF5U3^*v2?(=N85?PPO*QEGW582P+3a-tiNboLm5dly>cAP0g zp*$^R`E~4FVgfGh`Mt|Po(Zru`VN!W>hGUls7EW@W!yMboE0P0-Cf}_bp_DaDg&#U zC@*MQ#E`LV+lbW;y~Z{V(hqpee0SO>TOB$h|GA2f#{dv671AGB1y_=6K>t z&{81Z33g`6@1uB7g5`f7#WN!+3fuv;j*EVFQer>c(AbjgBaknHno@9k=j57(Xd^{p ze$No{_P-dnt6qZ-j*ag{{Eo7gqQOIN<~V<7`-ObU&r1B=L|iAoJQ(wdnmdyQ%d{^F zkHxhlAtWd!lF{l&hho7*%QxpnQa?}D8UZZ1Cl%WPk;!F@ZNQ}3$c~mgo}S;;!LnM| zaAH;_G7+Vy?X!(_U7FNHTQ$4Rz!lSm4I5AG2!VGRM+Ys_X!}<+A>uv zi$SKd#KJFEZ5JdqvS&M+|4oL~=D8u`bB$mX{T$00lHD~h=8~H2+LKq>okoNx!o;7w z0Sx+h=k(Ck964*xyT#~iCEXXpnyiOh{&GbR$Gg9p4?kWR>cy4EXjrRIc{;`NA@Y@o zmm7gaBwvM`E8<}FoxAo^>0UO@n9aW{hhO|4EFz3=%G%|A&GoY`EOL^&8V|Jk!}_F6 zq7TfWNA5OvZb!mvIW{1rjAZ5>5Q?#}&6sNGH`o)W)PG-C8A zdGizjCv|XPKNo`8EHfyJhU(}iR* zA*^6hA&(;p`j@2NuH+lTjE;UW9-FrywHzHb&<$O!*FgDwsm{LkpG)<tjjWM0a^l%yBh zw(-G-6lk@e#Al4_%;MOaGpA?!B>eF7s2V-@O%qXcqoTh`D%>(=k=W=otF-zpT2!0& z<(&&kqdRbb8*02xWNyQ} z8r4}7)NoqVr7;(AN=RT&Ja5+7zxQQ848drXh|83O9l=rPg_RD>RTt>3tE}{ zlrQiSIJ1!Hk~9N|EKBWSoGp{8C@D_30l)+F>(<>~Q0(6RIHg3yN_PM^0=-p*107r3 zn0U+ul^BaB_ufZ9*1*AGv}}|kn#v4p=^F1aFGt%Z zqAoUX^8UAf)YMB=+e=d=)*CQ>75nUg1N?UBC}m}o!V0JvlajLKv+b$KfL>*FH~h-i zs-;r7@pm%h@wye;L-TwOXpx#6H1%Hmjz|Z0Zro3#aEOdi*%8+#XEWIqYXh!tDR6Nu zwK_-&z{eKVVVx8xQQ-MOu;?ephd%=Sw~ zVx_mJA{A#^dy=32J~-PqZQ3;T;E+x@&xNQ|BAi;?^s)131y?0^mtxwkr`cONyGh)I z8dqR)GctzMeAD7!F&%_j(8Q{TB8zYG8r2k;9=WA2;{^917r14jrE1>tq_F4V9gXYM z+4feuVb*PdvE*2qH@sFc*`>DlXR^y}iZV_>50MPtIHwcz%`w3*%a2dP%g%G|+_^h% z?hGYeU)mHdV5_*WX;wz z%lNBL9g$ZsT5Y=3idv*II(@efT@fD`u-nMb13SUw$^#ozto~4f!(kmRW1;5@4yPX+87_jNs&^`ggOMbhy2cjLy)ND&wp$*QWz6mQ5fTrNEi3g_M^!f}yhKQ` zdN=MbF!4NSZiw%|EcGoX@@Z*6X_gXc6gR5IVCy}0F-&YPFrin9FfBSuKqFOH$kli2 zL(RvQRt?dxy*l{y>(}Bd<#kNB)hy*lhr*vYkZz2Rfnir#b8nHHYWK8^KG2}af7R8abdBOMFRJK2E!{GcDlCQkyvFmk3CKEP7*+ep7P$V8-jKk?1v}>77qzD z1rFHZtLv_P^P5JVer?tHzXfZ6W1l$mB_d(2-F|vyZu!1})l)y_ktJlYYwU@k&B3iX zIsB3L!-o)pOSl2d5F_G}yORfMI_P_38;sR}2a@1VCzCpZNSsc*3L3Hm~}wZ9$}~i z_)}yD{s@chmeZqoS=}4f*9MSHa+wB^?)F+Y*EKC=o*NkU_8jpDibX+aa^^v@a%uVb zO4ZJd8f`5=@((QX#wdi=MnqT?9CHmijP6HdFaY0aO$;0qSEv3e*D6W`V3`Yr7AS6o z?vhG6GqX4Na7&4X*g1cWJ46hHxa+R9Al&9-6RL&4<=C3_y{TDqR+N?bnMw-1NH?*> z1J{z;0V2S)_8}Z`OGS^9uwstI@ZXG&Wxa6+<_B%{`=zNwaS~;%0AG$;Y=Ng|A67G) zPTR$QwW@defM>VJb`3uO4?O-dOaE^0Vy27#cH|WYN)=LbNQj~R7Tevj6?rp@8^ssL z8Rf=`yR6;Zm`oVL%2w_Edd_dai1cld2djq-Xn%4EcQT6OP@^Z_t*4e(?)E&)P-G2s-_;UF~r^c7(_SQ$f7!( ze03IlSIQDC_ru#^DWAoOAj!*$ZI`3T?Y_fv?m}^h2SCwTI3t}=0wjFBLqz54TDHa$ zMxodS3YfHb_0CS+e4k-Co_iehPNh1?wJHI-?W&}_yCG8n{i|jD`a9i?ofT96S91Me zh*0xNn?lqJd+#Mck@D5*_^m&Psa6>ybnKs2HT>t5$!$g5`;}(hm;7JAr=iU^nwgAh zawu@p!@7lAI57lo1(i?t9eog`0t?@_zljQab$9J^g10>$n?}~z_?#>zaa6Ii{`#K_ zfu3B4{kPmu2>n*gB23Z;Mc<$U?lAS*mSMFGfa64PPe_iM`z)BVb}OZE*8aD~d9Y{w zp1H1Y#Plzg4I5TsW3?*_Xqgs}w^8${rbGd z)_`=2UXu*3O~+ONNf$YzbKeqYMn`8B$OUVK>f8nIT!|s$#)a@@TbWj*MI$rqb>z#? zw`W)*%vUyK)d}tv4iN?}-`DpV?KN8y+Ffd{+lE*@#2Rc)H4eJDmEF#Hp7%r%n&3Za zQ%F+SH-B_-A^{+Rp=fqaqO63X*;VFuuW~o|FTP9R{j`n4=K4W%1BGe+v8>P#vF!OerRYHua_Sx>1Ft&`FIraAe zD3uV*-{2#)GMh{06`>fymCl}BI=wXh%5aOk9|yJDHS~sfwZoS(&ET$v2&(>10jM@4 z%+Nl}*326zmPFKnVeu-wURoP3ht+L6{gFOHnnc=ih>!Tb>?%$+r}9r4&lde!AbBu6 zx<~g7;MBdWiFYExw~H-v4kpUN8Kns2Y~>v&HEYHs2JUzUoDLm!mmn zzOa>vjx;LD#33}+#iXUD^`|4Dp^+7gLqS172;v&`l-NNi9oVUzW)GM161>1~VK<+N zc2PrDswu=vT%=Q1^D6N{+@5}iA5ifQHNTyA-MvZ#9FRQ9gVGFC;}T^q;=THZxTjm{ z?RoHe78i387_mIIWp}JRet~W28ycz;v*Vb#^p(MY$jd;*0jpoOhnM)D?sYow(4p_X zrK1!zMd{({GBB}72i;}nF*UCTI_Nkg%@gFp{nq_X*61Y8jJMq);@;*XPOkhM`btfd z^#7O8YUUOe<=S>RBhwY=7{x0y(Wt{)*Wi5cYpL;Y1mNkXkXB@cWyF6i z<&gN~rWkFayThNzYccQ2rvpq6pIoUs{Cj=PXmlxuHzcUS!mq_DP`Id+*q!3H&J z{G%oGYm945!UjRl*ZH^Qry!~LoG!63P?pPF%F6n&xN_LE=jYF5&bIsz4$6W|0OxNV zI_3!uEN1&RVyg96mC;(Gy2z$56=*4g8pN`_lXx0zfa4{IE8}2Y*$QTHY@bnwqM)0% z0*DB>$hJ-4h9#E;t=-b5wGJLe%@E^u=Yg0pSyOR6!p`zuG#+i3KsNj|(B7IFbWN-k zaMNE=#0{*kfIKRm9eip8=qp>>&sYI$Gst?0-EDZ|=~-tU?vd}C)lEXHgDY1{*wLdR zx}E4)!KgFN3Sx@y5_&=mw`pp(Cr zhu92vbv)RA`0(2E3@!a?OY`@j$+Uieh3*M{6O=9fk5Vn#=iLSn`o!G!67oy^MU;=o zj!3BLpxEX=aa*7sgy>cbJVjsMWrXUic@#hK*khf<9tUw`&v~~4Pc&Vwb7NZ8yw-rxnadERdrHxeDy}U1HbJfyuH8ty-*~^ND z-mmdLBkRD(K%!r0%lNJrV@O2W%N`f_du@xCJG*oKhoDBN?f#&14!L$MNL#!>Uac`H z8rk{zjYs!#T^9U*E5Bu`DT3puTv!(|bCEEl=GAt3PL;mgQZRJM%+|7id{CFI_w(CL z2%gNf#AhV9w{+UHP=hY{D1N$Oe_;2HzR<;+7Q|=kWGwee={Ki)qKWf3Msj%w_kW|cTNml zS&G{Z=ZV#%!{CnH*`mCmDx{9s0Zx9`JXKTHZr+I-)J!pT*jDs9u;=gg;LoXcnFt$> z%zE8lF+I^gA*BKJ0>w@jo`t-^uvni^o8crBB9tPbcc)qKNq-i&UYg#17u|&1Vu@Eb z=+*W(KiKEXDm`QHq;GBL3bzhT@ z*X3!qulmo^K35-|{0_c#ahVbHW1W;u_zKq?A)wk|<*^!W_6^f8Bse(Kl~h$eO~#sf zy`2Zn;ST$+8mgsbqK>M8VR(+&d+#120Z*Xy9mk$tD;v$uRp zZxP7YsV4R4(?>ICexF<;g9&omMY$z(S4$bH56H0^(qY($D<6y6+-~r!4FfzdMyIEf zR?Ow@tRMiz=Z(f<=iJyHqp0(=L}FQ(Fmm9+A>hh%B0GSA?bG}X*1ip0HiTBTR@=$? zf=Zxge92&1Mzr+7GFczMrx+JgCADLNpYOA?MNexNko}NSSWz1NY1)haI9GD7*QLn9}NP=B=zz4Hk}$SU8qvOZ0}^4BOEvk2?~}O>@qlf?z3bYzD&LN;Ze?aujH@ z2iZl%)&$cM)j+4d9@WC8JvrV4k&G;xfx(#)v_PLD-vXI!?uQR)wOvapH*MMuf#2O- z`(i8hkuw7OhP3fe6Hp#Y5S&yJH|B)#T3&5;5jP^#Bx7??R;3kM$1v4RzN^v{28Xwq zn*4qlI)T+#>s6{or!u?;tGwqEj*0MW_7r8sR}q#{=@K3fdC!Zmk(KZnneQFbOq-*V2( zBypZkH}BrGJ6TyHms?DzR@>m5j=|bCGPaewf3c=2AdkEvdPidT>T`}6GP=ND8?YLO z44yH-1h`hxGCHjRtojV1XfJtb)7x)r>e~N-E-=IK<839a4;r~0pSqG;zVuxD6&t__ zF0x=0zoM-xGK(yD>0?)6d9CMU&S1Zwwnu%eV70+hM3lz+CX}MKoFSUT;;dt0O9SHq z<>8>l&6~dDbULn`XA4S4O6a$9e$?zyO-x3$fRdweQX-CgB3x z1xz4l(c{lk-}{YwuNU#-+qn_lJ=3*^8F%fTndR6oVboM=uP_PV>TQv{Bf{DEVNftO zrVo*|p{9;zJxv`R&U8+FPv$U%qOq(E8S}+OgtxO6m0zf|)`%3$9D3!V#yg6-g5p#Y z&!>_$!Piq#y31!5^mhH# zABaNOlN8u8ryh?WbQ0wK-96W=EZ=LAeL){b@1+%~roj*<)H6UpZtkXIVRb5#-K8Bd z?0ck(7EO|f#3vYL3`Y&g>(;awKkhl{p%ivmeta&irtIAzB*nh$;JZFdKoRTKUEX_G zf9(J{Q;CK^$->=a)0wW?@LKZQEzdP-dN7D+&IdX zs^?s1?w-FL4hBV3?(L!x+E*TZ6eBNeWqqe!!*+=~0xEhJopxDAEd%>auaeqQp%_tqsCfQgVd()mZn&sD0xC(8g2d}OeE9J5OD1R&U(6B##dNIixMH7lA3N_m zjigi&SNxw+RZ~-So}LGZpQzy^(k7@pGaY2^MFhf4asCV zo0XPerRt0`pyC7$3FenZ^$!MT(pBuW+J=3fA~qN#sQjPdpnttKJm+wX_a+#D@7z+?`P^Q3_a3)DujV@`Z!UW4*54SXsho0 za2xZ}AjJ+dN5!E-U-%wfNWJ-=)Fg8Y-^IlW7U}w;X20Sy-Wt@cy9)KCimp>)ds~h^ zuj^b_E3lPH8{v3uS);+0@t%z336m}S4?dcDA_sa=&LPKt*{|{fC)ulr>MIXNL3Ogv zawJ)tUnFEBk}*Rd6lKS=ua?oZhM)a&1_1N*=ssm7ju@8V9`F=n^B3EzUQbS)=BP?d z-#l%0C~*mFp+*W8^Qg1N?Y!X!5EdM7`mRZbotsiK`1W7ocDMqaHgLh_qWO&Z%cunvs`8MqLKZ$xT`wg)uyT zHj$-PAEqT=y*dtd#;?oaqJ={Tr+y#fdB}C(=||dpDfjw&>~qOEA?I$Rs(*$3YU_%g z)eH|!5CflT#r{EII?in;VC|@+Z0R1)6xF)ci~RiMNHsLvpSPW*#nmEyS{QS<gBQMEdofV7tZBZ@T5&wiSohJ?)0zL~?iq()W5`Fh7Lp<)3>On7 zS?*#|qtCcT>!s$^@~Hmv9`jk09eit|$TXyTKb??ZvvgVa2JjQHm{Soy8km?^y+g*U zCl&tW4nn+9Pc+A9eiY}H2MuFpo)~@?1(9qGX}ms+OzqkpU!3XYWyF`nX*s-XZQ14V zY?ra?Eg^cgo+kVXMwzmDB)j8|?QcE;J3h!mW$=VSkEIC)ggVW3;XuIBQfi1w)P+JktS8*Xm z3OMmg(vt(f07~QO?VR2z-Atugwm^THp-d0BX@1)={x(opo^jgVpu^fdu8|FnygoqT zLAtSdYL`Pv*+TP5{0X0U!aFl6d25D3*2T7eI?;LUW^Wb??VoLK<83)C+*oBeAhWkcbi+c*%)2dU3_FaDGdW+@H zTW-J!JTc7p!N75ccolYJU+=RZHKT%ZzKfDU>IgWPPfxoQU+Fjgd?B?57Io02=Jd3v z@~eP47OXpr$)|7Nj8?$w#1(mHW}R7b`n=GW^d~p2&-J}!!X`VWd1po$&$;HVC3s3? zwOQg%DNwQ@!8w2a>N(YRESwzb7wwkV_2y%BXLPuC*|NlEs;(3z9=2fOl1D*E?}weW zC+IawV+noA%M}@+A1zjIYZo=-L1&&<**Y%bTbiUULqpqIzPZk8Af|d8u{?F@Xe0d- zQUgc+7HQ|sQ!`e?2qYLAxTBKWDWt_ua{}`b-PDFCa3N5RrpB-Q=AvIeOj!AN36ZVj zm=gZu_IX&&Z7wOg*d(eBR(^!@-5aDjPQ)@Vm5J0Uu<$`I|B4^K;jF_96Zg_uioTiI zN?_()usZe&H?H&3-MDU@I%Lh4MqIqPerDlN62A|M>hfPv+w2jDE&TS{O5}lpkDG7u zThkSM?^D_zA7zNr?C1?bPga164U7KW$F*pq$l@cCA-jrD!(vG3JAZ8SrKx}-;y&Z1 zsQId#De!kCuIOU_KZjIq{~$X{E75Gr+S93R-CmwztvT)m{_L$c#Oy6!xNHp*33N)G zovCi`mJ#may#jSpN|}zKdc@OdLmsrGMw6$QeXGNB*Z)`!P0`0et8bhU43@b^a4&Q+ zfBbkva9L=Nc;3xe$R9w4L48zB-gc1;aCY&?08Q_wxm6i(el|CXWav=|U2Gob11=+> z*va6SR(8}ivRPVX$5feBWb}J{b=JgVrm~P1Z1ieSLrcG=;8c+#sLfqLxtkmJI=QOJ zxFf-~O5en;C2dmoJ_#$`dTSy|pE_bi4C)jTMN1_ewuq0058v9!J3Vm9ix1f?a&G6G zwNA?A*DjZLh}$Gzy|QJs+>+hB6#JGJ)h*_2TGISt|McZNmje7fO7OQ*0u~rt>%Y9$ z85}lYR?*G?kM5A2GRi3WP)z8O%sS1{XHWA{GKQY_GRt7k0;9RK66mCA{dC=JFrkIG zDTE*tk%#-p@57Rwn^g~a6PB2BObKu&20|c4+RLT1Ez0TAsDbIX^QGb$B&c5-hmAux zi*}*io23r-dH2XhbU5&WUBp>%esj@SHvlV=yGfBYB48w3y{^+<0>~E?)iK;Oz%Ht& zelO-h4!b-PX#5tZ#f0jt{yh=@k0DG{K-}we))H5x`0hHwWFau+(H0$BjX2qyuS&B+ zhYp*fMH7VpN8(HAn&P5YS9cCrfA3*^-*FAvASsIKrdz_`zKYL<|H&j1Q8ns-Ewl8qvpNfZ7h85uzK^Ohy?@=^i*^=9A++v>s2aHqj`cu~n*e{U$N*_V13b zBV~<9wbju08h1Wi(xg6>O#a_-WJ-^~86WAbE3D-)PWaU=KZZ}(uGbWqg9NvLe#sI= zOfYj7i)Bc2TZfrfsjG21?4SsK9kepyH2-KPH=yDd-HS8>vj~q(+{elo$hKOY=J+hW z9TQrFeMMlGe>-H0%AK@5)IzV`Bp}4S!!#^&pkB=HzEhl|hSnI3+_>$OdzHi!C#t>0 zm2B^?U5`Oy_Qz4bZ)$bUd+Pnd$U0yD(V9dc+TZnMg{^JcJ z2lXo_BAG5JqC3eUgXZfUtzqsz?>)%0{g^DQ7m4T2-90yN_ONC}hO;7!j+jpR+vKP* z>d4q(tRaHQ`czWV9D*$Nh3f+Ss$1wie4w*kyLD?o;3yS~OO-$Erao}SP>(ED&p9hN z^W*S8A?JWuVFH7>&0azx##yxpT{al({}ECkB6LETKz9P*6}9p6H_L07BY>Or3fo*` zr6TqYyQI=V)6n6aYZl8aQ@{AzobT1IU#m~Pktdc0EPHhch~o2#?!Dk~Ff7Yx2_>W` zz#d$q;>kf@Bw@j@f5JElULtyO9zFi+$@%(0I0|O$RtsW^Fh|rWiw5$heK_^P+4MrcZcZ!w*-QSI6z2U_r;wFMDW#_EY(6>(z$J^Q(f&$#WuiiY*>`zoR;!cqSAuK+T z?c5&N6m~QS-B%9&IpAT(!awr5^uJPCSX$bR&r`Ne0WhI_yBF%$`xy6}Y4yFHZJQh% zY14HdjThgi2LYe3KN7znV9~;IyP%bhol6>~u1Zuz6KeJUXCr>OJ}eA+-IK6;XotTW zznbhjL{1^8?=l}K1O40PeABWuw=Lm<+kv6P@8fcj&8N=G#TRuYkei1_Y%GnNe7aG+ z`acUXDLoMIi&_H#CJfJtl4K{qgI=tosSd2taea zPEhJZf93+LP$PCmg%nMBliGDakELc3h|fP4xgjIp=7RA}gkZ>$=*;T%9Ca=H*IOtL z2OO9Ffrk31Z==*k6YMhU%uJM6Z8%32Dxq4A^XEjfOf_-zSxzzVL>CHDA>`e}?<*7C zweD7{=V(ST)8G+jqt@v8g_;bm8wze)S!T$AqAgesp_v08h6!Qt>ajgQV!@&(gHi&> zhYT4Ns0kXhY1>!7HrM~e($pO2M{Z^oEH$-rE@`h?%|-EI`|M{fpp=8uj@!?8abx%? z*-r#Gu-`Z$ETimqQmqjDU7CiO*GCPR%rQN-Y8SEvK9LY!P~JTM!?BBzaU=S&9(L60 z;9E;^KJ=&8qTQCVtBfHkv0t1~%#>44 z@19>U?`E_^9mVh5CyQwd>=%4%3*ID4rU3UW+qWN_-paRA-uh3<4*q&FTy$k~tE5(p zE`Hz|xhVDRSFmJRizGQUcy1oP<$(laeOE8?)W!|ttqNxsu4xl}kH4vq$xrry$>(?S z)AjG$d!+~UEZm~Vj9y|37FX*J&9z_5w@k&+SrF1;b8I+*{1mWJo*Aq&e`@OzQ?WMt=8K!SwaEhszYPp%RQxvI@2OIhwT#Jft-d21JYpY2c(Fo$AP!xDc$D8I>b8ZfrbHCodsvd>Op6*yV|UFjE=Z*R zKiI+VHppPDzbrRWT#HYF7GSB;QI0w^No9c;*Q6UDsZQQ}S%*jOJ)u`!<34{?UT9b_ z1l1Wr+gjLRmz_E_`~2#_Q#+c_iwji+bn_JfXw|$6YIl++e{dXCwn2oPQ}YQfL%OVg^bQA)G+61SeIpIHnagoc^MaA0H1J zgLE-!R)e{&q0<)dZ%HHs6gwMB1JES(BiC@yXXX-`Vvb zLY4(kKI_-5TY}%8^tsMo8u0|6+>(9bOnSw$4wy5=5jx<^wdC2mJ{F3O>>TAS={i5_ zoU^n%T`x_8^(T4lPA&NUXIXBqyreZpztm9%{%}u^3DX=gEN1!Zm6}6SLmJXdYW$st zE_mbU@xOI#1Z6)ua5SXNmfq}H=|Xeq0jkVqb?a-t%n?fg?}`;oHg7Q&G&t15BmAHI zbaYgKXIka?*vS|K0F+HS&bok}!)`||lE^tZF7)flm<1LP1O@*~kk-^poi>xpTtMi= zdK=pHzQx!huMK<<%;VU(=NmrMG}LxJQTAwZTO{+T+84uO#D(#SM2P=KZ(?6hL=*f~ z>LmK$syZ_@csps4=wXq#%Q}Hg&u$u3%|c8TN?>ef^8)ymrAgTrseS`{Tyf4{A20ML zHLJq^lVSg?+2lC<7emj@Z3o|0lme_NI2F>4yo)w(PmFlPV0ot#!=}is;4UM_R>?8j z5d0wivlq0b40L8CD~305yuc+PS+lj)b>6)=KKzj5_jhfi^BcUs-8FYZ=ejHXPTe-F z8YRw1v8w?By3pzzJJ-^RQXr2|h5MU8*{2PoEmrT0dS?`V1yxzoM{ZHaiv9?zJ&-W< z_~9pXxOC&j^aAGnJNhW%gsW-XIUcf3-`^bz{FWG>?St!I0s;HkS7*o9oH;U4-40u5 zx$20XB)!*o|LBO=zKC;}3_x%gcuU+tfm@Smo_LA7Q6z}qS^4)OPRAdOZq^}De@d^% z{X%{9mB5iryEK6?6$BV8QJ`-Tm&&w_binh>tSVyGx$x^}ID_(y+a~RL`J_{0>3V!5 z5|um5Zq6qQ`PV5|tjWDB?C~=eKirr6zuFzVR{}wZwza6u&^4jQt*+K1>^zB=BwnM( zn?i?6$STCiFOg~K%*qneujnF?`Q-T~-(Rqy^fp?~DN+ik9e?Hzfd*4`GyC5v8~d11 zLWGkL8xzi2eZ)|Me~?H8YyAtMn7<_cfolsH%bfFR9*+;R)L2_uUI`8WFA{ZQ+V>@{Yb3P}Nsf{HFy8hH z(_Lm9)Cn8$ci*i%ZhtM*#a0nl3ZK>G!()?PU@BODJ@RG2-oO6hIaL?YI<2$fJSgOf z^Lc6*7Q!QDHT>`N8aO91K*%@9+{E(jk5=xT|7JpZkG6T!HiU1DbTaAJHI}$f`h#$= z?FoPU*;gh=;RkrxVpTeMr)QICBk#Wb%m{3o*=TR_kQ@he##cZ;5kuuiDwPOzI&kP5 zeUMD+afN6nP^%7cNXT0#K@d};&z}7Q@Ka_xE|ZJ*@2^)Q?E{jVC7se))ntLa+OE6S z(|S?GY|xNwzLnppw7M!^ENN+KFK@8XSaN^GeFV12VLkLH?mRYYpkLP834=kkGUxbb@7 z1;3fgH&4R4B8vWH&Fca}8W(lDn+PL8MC_?0SxDHTv1U=j&g2 zj63#NANm#ZfvZ-ywT5|B0>gA813GTkHPa`CausJ^1#UFw-H~e|3KOoA4~esq#4Gn@ zC6MR&`NI}QSGLmsd<4&ukW3UNPjgid*Qv9sx_R_n7xs44gBUJu#?2&JXvRdseFPlD zGrD%;#=mr|4XSRBT-=24NntM0i*9>w<|jMu@r=nbEW(1$Uiq%}fstp8 zQQF^nSdb+~m-HQkM^@6*wEyvkfyal#ze{Y`G#g?zBwpq2NdIqa7?at8_CA$CX_nDj%r$CVqV z{az*Q<;vFgj$g#;Ej%=Bdb9o+pYUB7KtNRF z9)0dpHl}^yOy8Neu~uNqPGS$l9ilOnOtaq7d;TA1a7vkk{W`0B&-xcs*V3&O#McsQ z92)K<+zun*oR_OQ4E*SCeAlio`v)jZ8~epH1))6u>*qZD%(b~w{=oku)^Firg`W1?eqSTO|fj;C7OvFAjxsP9$4kPHXk| zFl{!W44J&dfzc8Zg*QdnQ+K_>OPnRi6pV{Z)%rTAHuul~2uUWCESr=b>NP2ti5a??#(9CWbe8;MpqkR>!74$y ziALhtUter9uH{u8W*J@RfWvb;Zk07p?0T06TiX2F{pRbP&1!ieoGb+?of1IG;Z;tz zhl=>OS~u-eOAQaTCNWLAw~Kilg(8DE zK#Rb6(F-p6^;5juojUlM!)FMTiIrbSxR|vjK%O^v=?G@$(1ld@y4V%UhQdDqfj;dt-oG^Mi%~-OD|f+Nf=D*#udG zQU`19gceEPt9CA#DntQ5nt{H)MR4QfkIBzb zI>_+M(Z-E%9*OSyTF)hF2JrOsn^|Xt6ZCOie(`o zA@L}KLaQyJx`~1l6?)v<9hD6nI-=V(G%^~b*R20D4Ot$(-MG4TY=P*XL;fZkY$Hrk zV#lX`AH<#QXVavjKK_%tp;C2@ZxQ{+?3VPh_=NJ#jQbsu!|ZS8VAl7+rT9f@Z-2Ch zMB*2UmRDqGk~D#^mOuQ<+iq!vB&kuiIsuT0-;`}4ef$tP(-UI zT2OjqZ6$C(|Aem#@R6V-J2oIKEcEDrQKdLpWWnaYS*5+N4Ek$pxFYWW%Y!Tp5V;R3 zyHPv*pZ=B)?0XWm%dGOG10!?HTt5ZX>Ro45*Fiy@WyPMX8JPKeRC-S>L;Wc)$smwh zRsy8E)LXnVtPKphY&;P-xq|w`f#P|W8S@RvZzZ1DRTV! zLPK`DUHoyV#RNM~o=yqiH|;p>$WT&5D%_HEqweSL%azI)8F=5t^vy|92M`669Q=3= zjwy)12`S%_$}(luZ&a&$N%y~Nt|pQ>(g=a%g_r7k);qaC&;WtFXh^+ME_^*#dCHNe z?~$x)$Z+@&pGmM4={&MIg;u^n!OY~SgH%?&4IbrP>Z=A_nRket52E=Ix^>(*%b0sT zT}qkFtFZsrC(! zA<6O_`gx%=5a)?oPHZTs@#u|5FFvnrV6V-5^xtTZ3cr(UZpX~zF1fDUO34gBHeM<%YN3Q3bpM(#zSXqcIAheB*jo*>8 zB-MfE1<{SM@rL z*X1owTBw#7WE68?@&@R;)}q&=cewOFebazGXR;1VGPq!JV(h)$J*){vA{s_DcYEbU zh6}DSZukeJJYg725|=1BC@mU=E5pb%kU8Gy0|3?|(=%#sQs2#-GP>v5FsCQph(gY6X0! zlzuqjcbToN+QbFXv<<&;X0dBo8{c!6dHz7DdpZtmBA+_l2R&^2;Oh>hUov)^;3x;5$#4l~B6$~?D1QbaS-Nj45=OFhEiWPhMi)9w)g z#jQwX1?I+<{(5%3@zm3TL$3ubz&kCN(pVjHs;Bg0E>_oE?z0dVUHGiAfeLF|`s z!&jzJIOs0!bgmMjA^CHG(_NzO1Kaq`*Q4z*?^Mw%OzGhnl6v@5ypvJJzRCeL{5#X} z1bi#YY}Js6)7iBQyOoSFYL`!s+6~n7&7^Ue#hL!M?OT+0f3TKCjANwJgo~hnHm~PX z$t&Fz{W`*SmIusr#p~qZn4+6ft044D!~Ch9HX*8az>;;AlkSOfa@e!ISSX9s`U=xR zA^JoZl=0V{GpU~?le34e{B7Y3lzkA9Y?7o48F0YkPwx1|NOLPz7au*~grm1w^t=xV z^A{W}`uPN@g;XR_>vUUfw)T+GyoPioVwAJk)!q>ZPJ~m4^B>Q1tkhaPerF5A#5O6Y$wC{X5@e{|Hh-@+5YS(DJlK`+)1z0-2*RY8M>o|)AIxt^}YF4J4J zPtS5nbzU<2`2rItf4)w?@yF;P*`TJb#C@_>=3s=BasJ_L1^TK!bD(&3$+|DYMW33i zK?xF>st7}PYk@z)$EbT$Wr7ix5+k|1dqh^86uuQeKAy1qfr+U3#hvqn2^bEHPdz%shA9vw8n=VB8txDYMP8T zsJW8hGcvePpoK6^)&VIv+QGmfv~t+fGXeYOulc&@0%wyin;=KelRo6|NwdaWKUPL8 zok_hdJD=DPkTc4q=j^H>0~{wTC*!svX07S;RC|4!(Z!7Omb~YB3rY-RC73K>lm9IF zbLFKMNQw{>C<{l|s!iu>XlZ|e5K58}kG5R#JiKg%fnf)LEad9Gt^-SmGZ3jZ1yZp+ zIC?_p4EA0DX}ovS&8az)XSP&qF0*svDexJq8q{@`_J{2b`;q!H?m@z>A0t3z;8c{jS zU-ZUIkuP|Jm53Sd{c+%_<-R|kP&8^VakY{$HS2A9HL+l?ZO|=uL5sYOlMHg8;r_|o z5GamYI4>}Nl6mTw*0CS<8?62EFTOa-jF)N7mL@_=Ag=>d-tfN1@YFGTSc^P{n8mLp z_xq_?^b1R}tke>r3+yvn1lDEi{1%nL>|bkhyOV~+_deICK-qgXjanC@>DBhkxFbJk z78M^2o3(B5?UCNL6zGleT1_k1#oO7Sz9nKAa7k8(wT5Lq0;?iGpa_lhoXRT*0?cti z*q`RFizb-G>`-eo3MxE-F4~xs4Ys0@rMT}PX%Rk ze)dh7VBHfnCEk^_<3Fuu2j%ich5M?y>QyHOlQ~^2=-s49qk`F0!%)hiE>F_b)|TK>MW>bfts6IbeVLdOl^o;NI^5@!k6odoOjI+Nf@2R`7X+(Rs>2I^ z%#}C*Sscgh7y?F>%^-c1Wm`-6&vO6QYCpY}pf8kJOF}`@_DMJrZr!83(%N(&lBU2Upbh_XOUXkZwO`rjMpY4p2NJ_ng9wxl%D!q(`c zL{%c{%-SHk+m$lChiTP(Z;JGXV3)`F-dE^U#iA7?5xa?!p%cv|+L>@uctquCeQ0 z-`IFLzBMwqOjtg#;je?bcGfE+>*H)Y zGZ;(BTA=waE(z4E;x=}b#ay3I-(S(_hlg_3vtD7$N0Klmk z-Qs6dR8;fPdv)IoCe&+TnAEy@e3VUfi6{|RTP$^`K+&ISh9|3AjQ8$-)1=w2@~1N| zjHOCQkYS8JzgL!V22JbS|APq*#Tz&jwKu(T;ObBUtWQ1)Dj)!I`tkj&Ipa&!B0~%N z&BLU=M;-U92XHd4OCO><_}uO0wa}0)6RLff&3Ij%wzZPBcG9(LR@degOJrSOapP+h zY2%arNgIDP3fOKLAC8ienBh`wm>wZGUxrLFOA$7#6drJlK+#jq`5%k*m; zr{(=kYq6946LQj*ur^mi>Maaqwh+UF^59a>oZI4B!}xKB6&g_v^9C=!rM6|UF5YBk zKE7iMv?2nFk8;1l0@)$hNqpR7i9QYXO4=loADimWEs+f?zoP<%233xbefLKu-l~0k zT1Iu=HzCM(LXZBbIAaqRLNuPWZmkkOsq^6O)LGu_U4&iXBm+YnyMFUocvF!qspZ?E zo0}Ulx&4=$s_j6NMNDyEwBvdfDXyuWclkCDw_Iq>UF)Vy;U>&sfofF+In+moq-^X= zdwSSK*=hTFx|9!F8BStN$=n62$xrYk_)Vp%h*UMso48)84rB}qXeG4}cga+Z@YpVB zqSa*MU0jGrZC=7Hr>4)gzt5_PD`v(L5!m~bcUEW8YvX$pMsA+J^#adaVCDoZ?NM^f{NNo*+x; zlDOSnUG-Z1DqS<@<)xj4Op#O7-7vsdOa&vG=L*!2cw(HNR8ZP^J2}900!- zA6y&y9kGg@qt0#WjMGF=q4-^g&}$S`R(yRRu_I1(SGv z!JE#NnzM>z2|)>XW>V^`S8@yHqRnFs8G7ebhugPc*Bj=?19_evwk4&9#MpEB$*OwA z%X(LAmH!DGcxV|C745nyz_&Au-alS1npe!CYFj?LnHtW9ljyW4ge|u?*GwZWiuITb zocib!0WNXrZMsxRH8IrZac0XmGiUx$@6kMNX|-dIn>&q2IN!sp z8BtHbCaHyAk(d8Va(Sp{_*sK69TzZGgW9!M&~ z`$>lN<#9(+)Z)QFXwnI+hpr*D`9vpe%}_TT#_G<}2Gyvs6asg`i1@X{qA{Y+8rD4u zr9!*aVm}^$m?OShYFG=3eA~%)6z30(pX#)!v8=v^D_nz_tl_XxqKB+tP{#$qGmc?BXbaUSG)Du^k#F?Iko}6w<8&Y*vbLL70BhmX79dm2J z5GU+us6hjt?8}<=R=X2tR?q}AQt(BR*_?a(^W1Z9c6J?iu!Q8pUyUp*+7UmosAX`qDS2r%I&`rE53VeJ?aN?&)6*ZHcQ%m=BLM(jU=$ril_jq+{Kcb=HOz^*cn*zx|KR z9cG$)KTF&<{1H4ZOTt^T0hHLS2G7+jb85E&E)!`Y86^R?7VC_z$2 zvi1~MUiBo3fzaro+iP9>9n3-T@eBTkWy`vO3AgaW*^lb(+JA)ww8q*1tzU)9-($zi zQ2UMCAy_wW1h}OJT$NWuZk?z|-#s}|6;$F9Eof_kiCGY+&+O+qeR{m-{z%rFP69b}^ycEhQLsZf)?E#drJq)=qa#gwO|e(u9uC3GXlAn!Kg=)Zzz#z@hJl`%DY+2z4A}s!IXfUCz?S*=)nmnFCrBKMlGEC#>5r6fz};+xZ<)7G1hZ} zrRIdcsvCK38AXdXhfKi#dFk1XHQaiu-)b#NU6ePmUxyS}(`*hq+`{Sx7pla?^Ib6J)3Oo9i;=_C&DF09} z0B%-*elnZUWr}p<>Y93Mu*7!#JVyVBK{rf|k5bT1e1iYdNO?8H^K;vC^9Y0az5@Ut z8@03A#ap&Srk3QRz&UdyL3Vn3F|Cmp*mrS>RWsjjGr=ZFl=;AZ1PHzK8 z3+p=Ls2-dUy!txwoXB@J`{MqW4yf*$(4qn{8q6tFOx zTa0{>-6nI393GdfQ_iy9@pd{M1{(+sB!WV*uu4j9x9#({8Chpem|eh0a!cbOLxYS; z%9euWKYs`l2JBNbNs5*azHiGR2A}iL9?0I`{cmPsWXC>60TOy%rUX$9i~Eqz>V-I- ziDZ$%L9*JptJNch&E`jDb==;xp*KjgBxb6`ez%!)7R~*uV3B;$eO~{i71gV8)2(6; zzvP@+xHYz(wDff7nO+4=nZUUkFhJ~nQy_H;S4iyK*mZtt>D)?Qwf5YdoI9W+dqTk7 zJcOQwu-gS>+U3#l%K8fBg=|ou{tjfR*udjVGJvYuij0d~6rESMaoy1)ec5{2`DDZ` zY6{w?`^z@)<)XVf?O(7uGqiact~80g^ozPGEqX<$S4|>57@|dqBiFN@nnC@>JU5a= zd53%5UN1gM&+wcZ>7<2(5&7f*2ygp3 zKBe%zxD>p(V?f-E+iMApdo?<8TkwXA3zbt=tz3CU>>tp@33m6lxVHLJfZ$~}XQlvY3lYZdsgLzJ>FDR)i3*RE*b>+?XUa7Y(Q#RPF z;eDrOY1xKDfv@*$63bX3icgjuM!tFWBkRNd@E9}{dQ1ht=a1j4m~+^ygJR_H^jI%VMc{G6@E z4k_C8<_9rrDek2;mi38>v7O)OZ9-_VJK)Nq; zXgY1_oWqJDuqA5eudjBkQJmANYPjDzqFf|e?-RbJ)Z6|6-96ccs?UH#@!|Ev~w`lO_-q-f=rIJ#SA*Cc%DoIq5c}PT(F-2P{L}@aVBB3M}Whlxl zLxl#RNr?=l3`+x*QfA7K5EAmcAMJh4x!!aB*Z;k)-FbJrZLjtHem>7|Klgn8$SWyJ?ZRZd0J`MQts_38T zyvsh+!awmbv6biXqtC-HJY?rTKM@)YGq)FZTKGL$7qL0RdVAiE@`5Rr-B$qI4!vwA z1`5cBgd~8(OM_RXkP9FCX!;D|_ATX$ig{Bq%DURHpXh&o z$1L<(du@PZH|jZPYOQ959pS^O-BYMu;OXHLrW&W@)iaRc~EqV%pL+fO^(yi zS`E71iF9xCD_fs79fgz!-;eLR#gJqsA&ZQiCcTDB8+@@{rhxb>`4~7Fv1M@ zM!ro&4d2Y?WgXP~ibP5@Wb_^TA4jZ`IEbblLRDFYru^)d{YhJo!;QaXWF>S@T>g;> z-$41G^2sPqaExg32$99$gjl(WCfO5p^S3CLhhkR@StH5j{bT~Au~XX|{Dx$kV23>4 z>GHXR>G2(C3lyiANhM-Pf{U(n@X02S7^r-J*$TK6%F87TJ)hw-6B98<+_F|fA*O=7 zp0OU)(iNvmvhp94!o-@zRf|pBhUGN@{rQb|bE1+>*mud<6w&q>PAf0NWqB~ztGtH< zd0lWq5uwnNJVDR7!oAmyJ}JZHhH#Ej2)SDTimzeMV4%qe>!^eg&$#Xpo&oG!l{CrE(u&>?t32CbtWmGQWZMO-$QK+hU9ws zaF>#Sunmp%YB%$P+kosx>&b|4@!-)ThiQvUCf``^K$%QdKbnl4&WUUAJ1jA># zW2r~*Ew4aHtcQ&==1r=w@xNm9;=Rc;HE<#Z70@4VE=8o2&x!g25h{n9AmgL@$EJ2My zzO7nFo)EKAl6maW^Q07mS6j^Ee`4Z<0mNyvIg=Ti2P<5#)R=~hK*oC=l0L)LBN?!u z`!55gRP_H$gN@(jCo$LOyx;yj?L|dbt4hm^mngnSy~?=!p{mnK(HIer_H|Gx`2IHk ztZN$X_ZGQO7J2j8w^<>+yy*9|64hLL zQ)_YuKbA>MTkL8!ySI1S%7HYKhbEcoMI-tqc z&&eEupL+qOB-0takx6YGSaDA)b4a1hWb*$Lant}F)SSX%(raG!Z=D|CBTi(%^L8bS ziomxJ*8GM;WEBEF1JVg`+G!MrS<@m|XuO1QlI9QSES~~R{SKhl7Je_Vhj`fD*OzS# ztd@8Sv}v&;rr3AFpQZ=s1XSXSl9Hg-`M<8~OqcoJ>=sKUg>xJjnz7GJ=3*TXKL{+C zhpGZ+oNBy!035HESX=!0y@U%t%5WrX)QBRQtqgORci3k)V8Z%7+aaW^68p$$`zdM^ z%w49OGJPBR0DPK8kobm_KVzY-c7@5AVn08>T_8=>tg#RX>tl8Fk)~Qx(?@F3h7rIi z6_i`ic!AZkT{LilolM>~p!;Mf2S$U&M(I2(2bqId#x1`(FyNb!bRPI6QjpzE5o;Rj zL)DA_n*s}Jf^ML18B$4pWs#!Qhyz9PRE(3;*bAP2`AX}foXAw4E#?sgFA%tt%QDF4 zPb`}q;6YX)!r?IsO1-`SGb?>M1*71k<9v_x=|Ldp?^g@Xil^eL&a-a@qfOmd5)T~C zgP}3s##_euqTVCULvqN?Ks?Fj+3D!ft><194?Tl}9C`~zXvR)SX6ZcVE@&g*#zY)> zJi|;{>jbw$5=EgJBzIZThV(uz^`G@AiiS z3zbzkhYwyCg5D1)b;5H`=IzvFIE$))J*W8GJ%g;*8}fJS_$t{JGHd8Eq=`a?o=K%e zPA7C8qdZ2z=uAKBz#6WNI>hhwAiPa4~zWMfm=|YWRcpN&ca$0=O9@%z%$6#l7^Hj7HX0 znnz=b1UXtpjgo-c{^BEFIy#nlFJ3dy-tPg^5qz!<{oA(2>w-*!EIC6-uVU}IV8#2W zBS#=5Cmrej{rlrXqGGT@Fcph6g0Sv>3r{Bt%e;>O=u?8Nh2dR3L#2Xy9{rBs;ypIj zGAVAG4>l4oOgIFGmhcBFYx-Qnb_SvZjv@1DzNE9*~JlC3(Sv$ftfRj@;C%5 z#@AJnXdS75v8rR7xFt%Igv;8z7vOe6pbL6DK&$?_Yv2vwQ~q=YI`N6dw{J<)mL&R2V%+<~Mw;_`wKa=kf9PgCW7fMaS?~vCWs89ZQ)|c^a$ijh8HUQv6JD`9< zSQr|TWTa%`$%6~RH+wc0ZDKmmeGSkKy>EoBsh|Uy*Cmf+LrV(2%S&u3ko#t%}2(! z#~dT{egy8F{a}CN<=Y;-dT=ECYb+?6Fo47=KvDxVo7SQnJF79ksH{0CZ#4{g*QT<8c2bcEf`lP;$=WxRuPyg0B#t`FZgpiOp*y40Cp#0$6ug z!6yDbJwKdj*we|t-)I3dX?h%a7p#OG*dI1&|As;4uXtP@tfs2RvFr7Ufb9KG@mG3r z`E`$ktk%bXqm{hAp|o;F*c}e90w-i&Q8f=bnydD}tM4G}C5YNg(hpP^MDQ$^2-xNVhB|zNCSb^>PIOt= z3Q-)yMiHIEhgXvpnB1N4Lx^k!kWUS&iiw)7k;bvvST&EvG6d%z1dOy_%TXOBo%)A? z_`~TJ6hxU|r3lx{wi?kobeuuh4ZPrrn57K@Dn={7xD*5iZw0#O#rEcnK*D+(sf-bN z*Qn|)Im07@gF|*P=6@4rl@d%k1jLd?${zBe!Yl(LgN$%#$X6391x)4a)jJ42B-w(u z7x!o`OLP&0+uBIC#0^9S&I!XpK=4LvP_Yr^6wRA?5MS&DPkR>sqRl~b`T6;21qdwz zz=Z02+v3UZk${K+fKO@+Sp$xuYGs?Gx)JQWF!cdW2R__-JT>GTw2Uc4p-#k?fbc9o z))?E#(l@!6R`@UlccXDKoS_qiXs~AgdxqF><8{b3E)$cQ?H3Su^@iFhiz!lGOKzQ5 znUjxZ($c&5NP;{#(y5%K@rCBaZ3-Bt6Mc=s515dQoj^E>;LF0-;y1;=16T$)@tD8b z%?Sh|0}TTq)}7gvrJGhTW-eIJUDaniux`HcVFtq>UR9XLI!#g?9{{2;>`woX8eYlt zxWhWFISEwWbuLw7&{W>^6i6tw$kIb{A%st?|K1pkTkLZEr`p>s}H;) zGRl;<@OjT(>#VtcIf(=)!~KV`Lbpm?UzM5BF6gAt#})!8cE-Uqk2C#z6rSt=Pf#;y zIQ2GfpfHxj2Cgz>SAU~zkx)wDL$X?cNtA{9cb~@qfB(G@BFW!q|9)vOp{Fv^&)sEg z7I$XQwsDelaVmet+;!Lek1G#-&fP!wUgsPi{^|D*`!doTjdq!_J3wS+j15(KGwj~3 z>#0kJriJ#vqH%BOW_@WW{2vT&r3KwPXR1BX7WkDvHp)xlT!_tAU={}Um$v%a?L(7f zUWKBLoVW3!G$1Pz@u}p7t@K53+BDSuxLjmSTb93U;?!83Dh4#DD-4Z{bYTZT171Wn zO&+TVUkh|Lpsf-topq+UU*u^UhbG+3$Rub&|aQLz%u5-_=CpzYJ0OLV;XNCLMl({moT;*-Y7K7l4!s3f? zL?&JK;+`I=CUFq|f!;pu*UMP8*REXo)bFVyTZJr2K{RDNh>~IiG>Dp02bNZCrw=$s}@*Dv|1GR z%V;%{GC*_taqbJfi3i{<;pe%>CvqA;uLuaEQC4UWd1e1Qf&UJ9tP&EOWKgI6yGwo= zhZaLXGhAoVtFh7a+8s+rZIj`#AK#|la+2*l=HmR zfl~sr1l(roiA~+$aea{Y^R-mN2@>KP{GCp)d)`ZYGOvy`Qy9dZ-OAE?+WYJmL%aDs z>Ow-9!1LTCd+X?A#QPoJm%$fUZR*5NF z)^@p3limAzqpULwXLUI*Y`#-oZg4!G0a5-=)Dw3cs{_$Skur`YEpcey=+W7r`` zU(}JoWZz{fEGeqS(D zW+AeQygBN8W|8Vp^9AxPd{Us?6e|INvncwSMF^mZS4UZT{kB>gR7H8w?y5MgxKliI zcvoBFtK3{w(PQ--40JT+->hGBx`M3(EsGjTUUI_}JicSYN30pq>rCb44{Nu17A^#5fZInNxGB*R0q$i1=D}}$<37y$nyVzxa;F+RLsQ1Ov zg^sC;4y#Uvo52-ukO03o3=Y4jtaQL+>a>vN!a)obD$k4`R%?t;r>vBwcpkBpQb-nTb-hcd}ozl3+{Q7g}TOgEuWaQ8W@VEQD?k`WT@F>anT;bdxziF6qgKDY?jA$RWFG5s{fY|GWf zDC$Z+FVVEJ5&;zd(^DOt%@ZVXlHHOKfU;Lv*7LHmCX`?3yVOz2hN}du)z+_+R$kR+ z=;n4$Z7AZ>C5Wh`@WiA7E=#vXxgP>~ zS|J}Xpr(^DQpJ_V%b#XwAiL70cuHZz3K)@uaI<{BDBF462SW`MSAA$ZrBp+2t`>K0 zZ56_z3cbDfg1>*3Xf*Z>Ve{0z`O$Wcf!XYwPX|FuE{j(?PRKd~RZ4SPTT|Z`ak<$1LS6*xILK?gX41}(xd z?;8G$m0Qj3Tr7@J{q-do&Iz%YB-BjPZZKZZYIVc%@T1nT)?;k-;r&4E#|;68JO_FL zaaK2To_ne-adES+cCfe1r64)BqA$1;7nLOr;!I+sJrX(am#LnWq+r&&p<<>4d;f97 zvk@>b?LjoAEu{hRZ7z>K;ZDgs{KBL~6m{M2KTt5v{PC zF5<=wX*W32&I+~(UL?Yk4OdK2_hvpi0h<&)ovv)lUdI(EbKYe#KcWtgh~g8-V2bgj z3iE&W%huO7$K>p-l+FX9y^L4=C*HG>QL7*;@%+KdN8&G(czA8Wb}u;*V&T}?+12+A zK~7nFy!)|9q7;+=uXf)mF4pF)>^xX6%omfA;0feXkM?E z^$s}pH5YG=&b;7W3GquB@&NJhm6ViB6a=EN^|!>)>Gs$M*i0lE2b?-JVYuyTo%4XX zeM9x*!Ovwg6l#sro0dsS-_hu<-6FwHX609Y2QyvlQ13Nh9JGX=CHN?K*7V66^z`cS z_id=ygSpR4n>69)s8@ZxFPZeeP-=@ULT6VLb0`ly6;Cakntc;?b}tm?h`66isxTJV35c6nb>a2*_cvohE$3he$ zX_9k;SbEZFlUcEfr2_ny;^PewHG=o%FY+t)LuTygikQ#eR(tVdmpy7dgQ!)0DBPyfYx9&%tI*kZu;s$;mkuoXIh1gNnK_{rW8k1lI22WX>vg&&#mP zK7*gR0`2(m%=5bzU)plRq5xbhnoK%B`h?-cc_iygh_CroL22bFQTWD4la!-zMZzK? zc8}F&-AH|hcTe2{MFUjFuVMI{YkerQd*|;%mO=<6Bww?E-yk+O2uTjTh{c#q9kxm- zLOWQ+p-O|XZBB-UJO}T`U;-2t8L0<=F+iJHBBFlcPN<^U!eBxEqbb|AnkvSYwJ-&_ zYs8)LXn8SvKoQX$nHLlwGOvGayRUZoqt~a4*Z%OlS=Q2sPuzxr_zG@ngp1XcsVuqC z5IG5Umzx|$3DtAM1bnh(OWEwjN=iA!)-Qx_9sYRWJ{gUW#*x-)oGQ5dDPOT8xNmP1 zyC=p{%>0M=($-2OCs~|2*UYA6U*98+qRP~%s;ux55D>`uk`&b0G&`&phH49`Erh|v zgrrLBS}tFDEw<3iNKFR;zq2xO+s?vL@EL8}N&e1w?*z9t+^L-da?P~L_nZO6gDuH|2v>H6?Du-BJb6U)ZCaCeu zyo2>Ove}JUWimk?|8<;?rJGTRP5%+9*yo;o4q~rhjCFcLyhj!~9(-=K%vHVqCejGB z2)<}Wh9h-ST8$S-Ff&u;{^u`DQ~AR>>zWm}n(EbJ9;XkfZchEFY2a8+46c0=s8_9s zHzW+d1yt~2GpA@@xe((w2VSjsd#?aLAu}AVs=u&srEd#(7QNsXKsM8LV%tR>+_I#D zwJdEWu*se$6L|rO8m?V?^2EAq;W^&uq(Lo-!K1ue_}f}rLfN=EyHed2=rR>{s5Fne zlo@P}VoZ255lKQTaVB8!E2y2(`}jh_^|{c?!M|V#_X1XX36Cp_)S~*(k8PF=)I000 zDvB#=iD?&2kpM;#W`~d9;^G1%85(pJs!q1rpcUpT+K~M~aUcdtmm`1P>I-!`>l&rY zY~VnnJA)OZ&7=~-f4ic`11H5Ich}q5>MXr#vjr*Ydd@_$F5k`Gr5UgcVXy9SSl zH`sBS_p@;tRC^n|ywrV8yormlb$hFR`F*4G8x8lz*7&@DXcl(*d|Oy?JZ9xiv6-}> zZeX~=`ApF+z^Th_-V2PCG&t0UPCf)@i7o0K)u%9Lx=ZQOYXsgJs&w5+?Hh3FQ)fGe z<5to!gA{{&8BMxfbF2m$*~XH^;O=caQqEp75xy5H0pIXp9gz7w`>VH#C_4FI!^&-M zOrM*YHf-DofI6&?trCLlk6zN|P^Iv`Bn=Fgjtx*;8${nNwJD?`keB|LYkF=odh(i2r<2Q_S&Zm}{3Sf+<2KF`G)d2bYN$rc5^dUnl;E z)y2LY+I&g}l@6t{D=HQ(U7A*0$I{ymH>*p!qVA{R{~Kv6X~(-+Tv@Un$D|Dx@iK~1 z#%`XePG^#8UCNfD?_*p4tnWGYeaMr-r4@4iQ^#V=+;i}yEHiEV)aoa9#+>|?KjwuI zbyEF?Fz!ZGE_5^7MLoWVySTcZY&6PTee?LM(1i}83}U@PUU8K?=v3oP`zNq`V~g@Xbi9%`2qr~1ph8PS)9yaIs2>%_lXJ4FBV-|fuW6Bl$pfj z*nS%3Qei_pF)BvTFkW(;hZ%4RN_$`=r!m$ugTLzK{o>VZYX=*PzB|=W5T|x4!Ed0(E{k+5caoyrf^cz z2Rf!~ctj5$z52}Jq7F_D_;S}iEt_9Q=cl_q6I1{9xn3=Mcv(AR=QcW+&z zmCU6R6{YTZu;HZbJ^wUg-A7?~MT%d=!zXHUG%l4rMJZE`{=Y!}oz=wfL&5fV9n#zG z1fa$X2Xz?ANKv0ueHe>nuJzH7Oyq_ha4fi$I`0ir?26 zbd}KMX30&{6@T~^!@6L+D#ZV|)B~s{0#&+mFO-*;7oABuyFeozhT9+O)+$U6+Pr+c za0qtLoyL-rFLm#Gv19DlR+U=F@tv_;N?pidXIwy)s?Kcd(XK<@3zsY@`DOy;!ZQf0 zVSLzc(h*j(<={b?nwlCiipQvF+*vT8Qg_81)lP@M?mCXYQH%-q!gX$b;+2 zzwt`i+QlL|@_O1*(c^6vev`>GH{x){|E#=?rjf2V(q*P}84pu(?p%gb0q1Nd6x$b~ zqxC^SP10&97sCOVnmvlK!~2Pr*{KV~#ZPjH9`gZLS7U=h4m=IeSVCR!8-{c8^ysGJ$VI8DL|w7J&eIH@EId-;*W? ze9h30u$1GI)=hEH-1zEU>zK~OscfY-5%{)0BLUHn=QhtprcY!0p~O(H&+r~yhL<;x zWAP(xiBsxqr}&Sp4vUXf{NMo)WcTv&+E|bFw1ENr?8|+6;?C^f&xL=yP1fZF3PJ89 zLr572+MKSf<>D$TD(&Ote*AdnuVfvuSc2Vxg;-(`-X>xSW|E(=2i-$UTieHe(-Wb3 z6>bmwwqCuovO5htXJ89g5Mg(J)>RZ~#XcAtpkzc46ciK;1%9!1EeF;xHvrLxuT~V0 zC=|&S?N2@@m1uyohzSAb&av|H#KEs`2>Hn^FOgxsqTUd_L?K1rXDiWf*NuKVieYLn z-qoeOF)cIv&351wGE3zC>B>_i{Ba9-r%2A_S3r>rgCDQ*;pNDt{|Zfb+Fkxe7u~F% z=gKZw;$H_#7O&o zRG-<3b2h)swADpZrw$>V^Ltwu1k3~PUkz>cKwEu%{XKxIBYg?jlQOuqcE_3LQ}?6y zpo@=k`s1{V(q&n*^`bt!1U1M?X7))o3N)K$YceT2=@J8H0&0>F4AIWzK^4K)C00r>Z2aOEK=uL2fN zoavILf)*D(Za-vA;_{(8tIQL1_$?#V6dfB7Zg2uP4b^C<(g>}o{^xJ_!lG}wj77{w z@+?wDpmWC*Jqr3};LG<8Ny&I5pZ(KjQ$d2J@ie}J3;xI(nDN0|+dxu~qyS_thS^r+ z$cX2JlbWnVNnDsj@3G&y)BSyYCvHoq+}hZU9JKoKtTZv`w|0ZGZnt}PgG)}Q>s#rw zy~CpfPEM0~;Dg)M&^IJ0DGC2uA6Q8S;9k;v`9q@a;s6N|o{uLNqQGyc-n!#ymh`R1 z10PKNk`=w%28V^wZ3FVPg@Txp0nvIC71JCng}m|!XEi#OVuQOVrfj zpjFWEOBD$o$}MTyh34kLr-Sj>X|A&K#iW%{+Z=+G6clDFZr*CJNj^xE0|ZKn12zjX z+CvCsM{R2=pg4RHo(n!Uc!4sJp$M?g@FZ}%{pYj)dZmz}c>g0TFwtDTsHoVDiMtP8 zT$WROa*Nv0^1|!z>*|H<2WEm#0@ce+m*@5#Q=R0y7Vn}Qr8V|}u1$NkXZ?n|>3e1@ zOS>A;pB4+(NcOJT){B-bd4uRl4o#Q0EjBf5wc39F;QWL4=b`YcOPvNf-jY9O&S0;! zTL2E7xsm*rnK&m+G3XrtBJac^F*iLc34YeLYn^V7=+i;Y#hAsS2c&gc#68El0r};}2*Zd@P zy_ELql(}|VCWKKgkWZTo3|HxQ6^=eYZ|#)cKauMUp8r#fr^b;byqRR7b&)ad4fTxb z)48<&6wN5VqTs2WpW8eSy>e3YfhEApyZg4*e^2{zad%i;Tm^{iopmww6Ofa^Ia{W- z!8+xtoPa*YlZ{{~$q2a!a{H0`g=%?+xFbU7{RbSH8XM{NXo1BOAC4dG)j@QR(FmGZ zSOka(g*lqe7U?F~N*DC+wD{zqjmaXcZwcXaBVzswhp3JWlTY)_a z81~YZnJ=xj2nf{Nrd%C|7H5syMfvFSvgG=|5;%E*jTA&xI!0S6S> z5i9QOjfxuv6&oV_iY+fbMG@Hfa1M`u@~xIj3KDFQ<8c2#H`0h;aTVJzASDO~hBth| zV`q$BO)9UlGD@e2lGtSVE0dMh(Gg46=wGXQZLWl=J5FC5I);GfYh}u!Bo|U?MQkZ} ztSMndvnCi*ne!aPl(nDay8T`d;Bnyhel2b+=z9+Td!OXG!`YBYpkK7%Dl14_GI#{- z4VbN4N-N5kOU|Q^A$xyN7xwx`MZWln^>b8h!nSvNr>B1-M+jI6&3E5fH-YOczHC|9 zXHo5UC-G=k;d-y~PoAT*PuIpbfxW3#=L(z*(W#t=V(! zYUF1Qp0`P8;bQ4%qp1t`IA3R}KtLDuAFw&zpePc|UA?^2b9TnNja1J9VnU`_es2>W zYX%;#sesaO4E%?}=kRm+#|LwLM=kOiA*cTP+TTq6dG@MdF)@Ur7h{IX{&ci%hH-n( zS=WYY=$?UU`i?$&^zC5~#;5c9oG%{jxu$f_LTcL}=@@BsPwI!g6Y|$3&Lj*U8n6_! z-I>aEVRLum8`3xv!>sk3y~CdB=<00!i|21LKU0*p13h8d&}yOUoxP&GrNdcKwP@N% zvfF#lT1i2s2#>(*+1EOHnJp>msl61cRLvY5wPvi^cWOL-wfjmXIEFrNNaIcSOh?sj zdZBn=hdIz~4!DJ#L-!Gr%~lS=Xeb=eS$%!dv^K3Nii-8CgI2|6RM^ZY%j#Ux9Q96M ziJ=|HCQrg<8%45wffdi>_6t;7^z!th0KK6o))kYHPcK*7#2SAJ zKNr*FfqTqHo49O?EwDVyy;MP3CS={Dz{%d`b5oBKssLB21YorXpFvlckQSn^pK~`r zfG2MF>Q)-rl;JH`AXukjc$_9y*WEu9Bi5{**HL*EOCRpP#{c75rZgBu(8+O#?pG{I@>O*JC#Dyp`Cb(E6SX;i zNBeDsRZ@N0wfNVC%fh(v{uhIID{w^a_S8H>Jw3_lHR((d8O%r_$a@24#M*=Re=S1Q zG}pj3v-DbKBwqDob9Lm)bms>-0wE>qP!|ssE%^Ne-~T6uC`sxQxiVO7t*u!gfBJ9( z6IT#7idj=}OQ*r<{jI5}|A6$YfDr}*z>dJ(EnoC6lgf0##ma zufFJsA12thrN6x+w)-PA70_fjK%@$9_yj~)?Z@{~H&$KWIu2j8Mme0nc+su6n;K=b zKvi~Z^}T45pK3K4+*|?r6slMWnDE{9&QNCZtFWIYz?FO_AY;sbk6RN}XW&#?(efuQ zGP!dVoy5qdmfK)e-+ia=tZ+QhWqr<~l|dSDL_#Ec4f@N6e=Hv82{9fNyY9TPaZhE+ zv%!uPUS$_BdRtua7|+`@Gk+KWhN2M`?dgk{&*SiU5Y9!b9M~ea@_&2CJpN&d88lv> z@!TNu<;Z+VI84iNm`ZKiwJ}|5GY3pr0#FSbT44DYgP(&r^|AJ}YZjmYR<%|5PGW{l z`R@?svrgetVIr6ALn|3W^&vYzd7ChCP09-i*eu$=)_fznnAHxFr9IZZ>H8!DFF4|F z>9^(eB%*&^PQQY&15;k7f9Rb#d}SE)-Hsu_SYoRY0ZEuW*Z|NR@Tr|78Ch_*tR>4E z1vhRQKHVyir)Ba-T#kSDy7QFZey2Lte8b>SbXMw4Aum+>TaoBXp2r4}0hP?9vUt5- zkH?sIWz{B1G61%LI$I=o*pT7zUKbsm#8tRcI2M@aCL5v-*NkNu7J!d<+J< zrXFbHTr0rSc4Mu-%C!p4$=RRa(J>C7;T2H=cXM;gj5&1<($llZ%r3y#AR{nN|J9T` zG#GAX*nq!iIRbI^2=)f+feY0MXbaF-Ep>=>o{Z>c^3nJNKT8c+&V}iuepePV+On); zIBT6Te;M4w(9un=_V0+~KVLB|8u<>}4q{_PVBJtkt4f~9IQfi@_nH7z%5J#=w%@E5iw1H=N~XnAuvAihZ1$Ck$NQ~zW`_ZS z_8RTqN`nWG4TOb-7kMp)ZJAd9pU6XYm#4)MTvb$NlT$-OA0QudtN=0l(HcK3@kiFE zKOnfWc~^^6_+DeHUaVe=$%4&2n6ie)mo%lVL}WTmktrOJDUhlw4@4&XcLM@qrwGo3 zbjy@1FG9d`2&KP3A3cM=@55~(J}d&R?`}(ziEeoQV4bwZy7EBMw>W?JQ}><^chW|W z3j{bR+B0%N{UEb*6({!THIc@Cm_t44&AX#F!~Dm3c>uWpA8Gb~ad*wM>&Sl(PylfR z?=>+4i4+(Fm!wc|qm{Aws|HD?>EkHZ<0yg{ZtwRasmsU&^$H?-$80{!R>pEbmpnPBPwsANauFswGE&l(L^|JN07FT4TYcB z)YxgO4(5-#`2b{JIA^+)gkj3um9DqYH7`UffQ8cFC?$zrc2426xMF#1x1|sewOZ$r z`9zA8{P@1**k2=4{eL~ehq#TDWPPIF9PG?8WcSXxsQ4MThl?>4kV6)?&FeAN)^DYr z`eixsG?2CtYheR^qqx#sM=>_-5mrplB1s0F@_{)R*Am1~aXHhXlY>R-MBhdYG=&Se^@+s&+c%p+qzb|NM-v zsguol5WZfU%t#lZ^2Y^xBKd#q@2X7J^ROt^&B8)0%xBVeO zeU0^b)R|RAL(pY*`HMw>6i_MemmDzVPX|cE$|J9yfE8wRdTIA5eTNEkUM&whZeb@h z=~-^0I!@i@&|Zm{>kce+FM1Q;Uv5!@2R?V)ux!0MlrQ8TSbPzPtkNq&8dXmiiaRr_ zeal)3v7HJ<7lC6$8|drXGwn7yc~(%+!ez^XP@#xAH&i1|l->Q#&2!H0vu9sLlTAYe z3D%IJ)`2rMD%Vzf*U|~+0JxMD;9TPCneeXV3k$3UjTIi~wi@A6c+?juU^I$yhk)xM z^^*@)6*bhqw0Z*svK$P8PUW6)dT-Wuh~lxWVOGyOa;~ZNd1e!#Yr8Tm*=$C-D&9*( znfr{r=;9$;x`6)jd_uyZcg()yeKR*)D%;JK73D70wr!qR`w??6FSNvwqKbX+rjs!b z{eoH-F&oeWK?KH|Qi%f)yhA>b+IBuP1lv&@@ugi2{LB!UJzE!8^aISOX=tWU>ogG` zt~&=LrX27YQJ2k_IXWpnd1a6kgUWq!7q71u`=nHV0LD6K(z7*^V=m|fvkmq~ZMcud zxW-Evx@L))QyBj2YTyVQ?~Ry>TkyPS)(pFI$YO==zARJ9bSN}I0>Dg`D@n+m^}Of$ z7xzw=oaZlIG~n#r^>#S;UJWtE=XF2Nm0g8V0BgwI62aW$vBJkMMlf38NKrire})R={al9Uu~;dQq5^J^TK?jK>#omKd+v zt~FTs$YUH^Utik=9$E0i?~WMow^$$BO;lag?@uPWjoEB|_cVbqd;4KNe1NR6&RFj? zM_!e$;}ua1)d0VH+p$HaF94u*_EoxPJ4x_2pj}La^U>l4r)NmEr13cE+h0+Yg%061 z5NQ|hL-g>y(D|rR=M#s{$7BY7nksuJmMDIJoM8OVjUx)0iQ5-%3I5E94d^RD^BeFC z0j{7KyW0{|&D3AbsyjC6)q-k;uCZw4f-!jaJ?Oagh>_%YF)a)MR@|AEk~n=i8O}6y zuHrIBRl(Ffn)~i@``o|@IAB+tbKi|m8t*qf{`+Lay&v00W8*80)3>bjZ9iZ5L0@I( zQJ!yD$9aMVtv;zNj zntP|l-KRr`Mt5J@{>*Q-wM9b-lnIrywpy*-vPFmxAj~J+MpsVTSk$nB<{O}P*Z^Wn zS7OSa#`RD8TKdqU@Q2!;_Jje~Vne`V>GJj`F@vIk9s?%iZ+KW{+xgQx*?l)LiC_Tx zVykPcOEhK~eKjyJXvR2+7A{2$Y&HacswM$W(gS(veT_!8IL%KA=FZ{vgQzR{_U)IV zORT~Dqd_nN(*=+knv!Rw)quE(utmv~)=%QvzFUqMjl2%VMS(%fE{Z+B(vw<2mmo388 zQzEuHxU%9irvQl8L!m&9#UJk08NTbuB*h=vEVP`FSo>1WbBQl(A(|ta9Uxb}zkp(RL#;>0P526) z1NIh&?rJ7W8=dGHNOZ4(m;#Ol-mWKxvP2explG2~%8Q}H6@TCtP2>xa5`!6~rKL6i zVJQ+5ri6J(FPKflqZxi?U6w_@V<;#Sw)0AT0QZV`-b6ogcXu}tzi;o{21kf8d|-V* zQ&OH|Ow+r>N88k9B~AAaq;tJ(+qR{t?-Sa?ftZkNp%;vbfp-D`{!d$DkvnxzhHq8K&z_4 zWeyc-Jh9#mpugTf)b8th!UB4N2jK179y!KbawAI4VHGMnpVWtwxeh%%0Vl7nzWY*z z4dr<#K{uhYugTgSq;^K`-3{Vw6A=<(B&o&>GPh0VJNHy79w_mZYjZdE0P&Cl!=+yq zD5i|quf&(F&l<4%_4ki!8n13`R{>>kulKDKj~^&bgP?slkCU1J*aI!}37(jJmaRts z-fe$)I4v|(F(5u+RYw40cw1gs3+?x#QV34c!;ynam`?$wpwrdXHbID4k%X2nxEuZXEZBni+0mrfKYUkz*6H3w@0 zFuR#KrM};T@NCqdU@^~}J=?+P<qO7O+~f++lF*@+t8s z`9}Q{k+ODALkuDcJt-vd<0>of&vr$bwkBO6b&6)t|d}&d|}uWn#Q>w+}v!Y6o=0F%bRPw49MVHkqe<|4aZRb z$~N@~fuu1*XzVsAir51-BUBzo>LoVBYwn6)*M+$>OxjmCn1D<`TF398W-WGQh z!*}`nwJy^D-b9G)ah|aeFk?PYVk|FG0y)PW9wbKwT)MVik}-hCGdt1OIVsx{sx=#XwkGR@MlV{mDw` zO8}`vx8b*Os&J^eF73Ycjfnl{S`F-WU}Gead2lE+e=zw3crKkkUkC%Sq@WbU3|zaoqnJlGpk}*SWn0E(w}RF#J>G6`awPOd&(z!0B6%{Q6Ndx7t2XizD9bmOfL&Sx??!y?2 zgBlp|Q-v>WF0>l;SV5r%oG@pT8 zz1q~8?m;<7u7N|GL$~>(*#J!P0wynne{g%Rgc4|ArQfNA<_q7E1t3wSEV8#-G+# z2E&;Ml!#8s}*h{o9S{vRd~qpO)P6_UWtiQIep2X+>EwLCUKkBJ9#7e-M9HuFU)( zo4cy&FV<$>t1`@=c2x3=V}~xKMW9X0p>J8dN6Qy>14JIeWEiN_tf+~~~N_kj)L!RJ-NeVV9(UgaqIOQSMSQ18Hbn6~Fn7B+hlh*nP@xjmMUf`kapv7yv@a8)jsE7b zs|4SLpl{MSdGMzOY}P2)8H(!VEL3E}s%xe#f+`2hYUuRi(2cF%bhsM=@5`?j=f&+Z zrs4}6_q(%$=3d-QtENz$J=Ryl<+q~CpG!Azar(y`H*i-S_LPEP>JF*^ouQ!WoT8Nt zE{v~Vzha2-A&*tEbk20ZhYa#8L4>>UKz0eL@y>}5@xQ=J;?hT8)f6el;tcexHU(KJNNs$zs7gqv>z0z zG3FnBw%gUr6~onIP_ylY)rBZlJjkB%NfU1HH`L08%S9}S## zdNTGa6FTTWq>^DC2ZasIix?$`e#9M(5CKhzoJ=kx6rYyxy486s)ob zZP(cltLC3uR*P`%goQN9tXlQi6%(+ASel6BhNF++Aw%ZUmFb6kZY|iv77!u&4AcOOw1)<3W zs%2sVz^em#E>50`9h7dJl(+2~^Jv2M@ zE;*nzJ&()baiiNzpN^Xcg#H3UQA*f5Sg{+#OE1TD}y zlquhbM}351pfros`-cS?*1iYKuL0k813s6ht0tAFsYim75?PG%)& z(}2{-LXgP)Y{jOt`WiBPOXr!nf~tlbl`KtuFa#w@8Mf|Mycgby-Anr*#>;+0~x1q=_fdSG67)}K5^~SYx8F}@$&{aGA zI($!NJ;{6a4M_7ovg}hg?s*pm=`lJ3QECe?oj=Q+D(9*T6~I4a>#jkt47iLTHJtq5hA1Yjr!j8`eEX-(lEr#Lq9$8n3&$3q|Qx>9)E&j_)7e?SM)^^(e-ft;H zy$SsGlv-mt^*i($BtaYYdR+`p-dTZ6({tn4?BjPoCbU+9-eZIMBG#3y6d|g3K03OT zARiEZtflQ1{Tb>m=wq7jjgNS2B3&J^*oruyBgmi712=MJPbrx69JMzvGk5&HI~4Xb z)t$3qCxgZm0PQ0_WP~e-e6u)YxO6Umo7_;JyFZV_#;IKv=rKlu6WxA%j2! zc9OJtN7*@B#tF-3w}oHfv|zg2D9<0ede6?m;p1anYim(hPhm_uJh*#Cpj`I>`0T*7 zY&n{-W}>za?(_kk)!n(Xpf?+tN8A2ji>l{=(|9WGh9fa-2Q z({-Yf(Lq?WbZ#(B2?0rQ8ShW3$dX2PO`baRPH|;!Wi_pCki_N*;!8Lwo+a_63DGaH zLW4tuI~;=b5^xM4=pm&Y`H1km6=`D{x6QbVu?IMBDvW`1`4MRt4G%EQ6*rIJ#~19p zZ92G|X|4;*iB&c-FCRu(LbBH7I56&@L3cSR;#y*kxvmqKA_kcdiYbavtL^rv>vIdF zsjXwbyT^{3V?JN{wri(Iu>InYBKWQjiq2?2UNBNai_tUBuu~;u+I2vHT@KsBj9R# zJ8em;$6w=s`)0?u4QLQBD@jHzYJyF=ISU2&6@YF7O0n2mvXB|E?tjpZ1H#XUrh!;x zx-Lp{GG=>7w;abXg!&4@8%%=0ukzGRaS@!M%Rx)5G2OpkWKw=Qrld(+Ln8rPULY9M z3S&%j^E{mj2H-RsbovZ#iJ3}lMGO<(-i#UTJA3x5jXW36reBpY3LymJfqFIktS&IG z=WS>H9?9^beEdXl@kFh`rOeDL7-AILdIZ3^eHFGdst+{_0N6YYrOfPwr?nDOcmDOp zHL_B9_VP$&68zCr7O<&O1#8!$zp-7u@;DdZ1#B91i^dmrn zqy)ts4+i-b$0`=K0)!4%A>wNgMF-dIq;Erkz15d-sD^$j8A~9_RO{2r5C4ysuN0*z z#?Y?3kJ?vNG-PzF6bcd=h8A!wXnr>DkR1-R+e`0P{_`UN`C5!&%)MoA$s*<~K)V0M zJjW3zbFmHs-W)c!#C@S5IynHei+&qwOI=c`!|VYI=1&5s_t#?J?%*~!DHk)bw+Ch$ zl01I|t~`JooB)rkq+@tu{QG_TG7j3Ad|cIk2=G2^$R_ULxEP@A{Pr@@6ENY=gutn{ zx3^PFUf@@2tl7$f+C<|i_djj|norRH{vOAp1`f4vhnNg03jRN|!FR&@0;e-I3yAkb zI6W$rWK_cHD0503yrHJ4Y{+;Mm z%oHfxQcHV0{&|)(1hIH6KtY`FFKunVd%#tMa6jy02%rCJ$B#tg)(_Ty5SU$mECq5+ zgEv6f1w7*a`UCJS<^A*+^}K13Qqfj`VXa4@X^mBA?+4-P36JhdfdhzQdJ?Dbc%gIv zb(rI_evt_Yl<)|s&cFnMLog6gszq%q7R_46+lt@r>({T}T(NSP@w~j;4h1~%Wa>s* z6=wBuij*I%fvsEB%IIk`VE_Q(e^%k(GG^vAjI3Mb#^OncO+U`I|9;Wm;x z5iXZnk;?;0$tJQZf&6SWEckZfxB$5o3@LG~a@y|j_z_@yR!E2g#^OZHhqzEap}BSk zX7fajf)c!u^7Zu^-; zRr_x>TuQFhMc4k@z*Zr@my&A`;_)AX2#9>INfpQFoT_lC%n#vNkUK0qycpcUVW}Cq z&ZZFfc1E(sD09Wd9gzJYJ+19rK25e3jQ!-oePx&^^hdt~)rP8E*bzj*vTbwirJ*s3 z*Thps0~V;J^Z*}@rmZC+KL9vhBGmUq>__VKu=S3JKTi^kJzNdvA(CGuVlDbsL`3vG zC?$wpjau{-PY7&r-7*@dvnDE{1T{7MJ!YCinelJW%m=D@)sCU`+O0-DVe<^dr$R9`RTH# zgeZn{b{yO#2n$Q-**40?Kse%k*uC>3m^Qc0xMtu;NLZ!GPz6 zui7iT;jYV9fe6rod!NYD$&!LP5^Ktfy+QJTT~`VkJcxqw%BL;bQ~~oAjOqS$g%#S* zXo(YZ3ke{n=h-}h-#CKZInperKLmM_?okYDlNzj7h=~spqj`;uIbRy6M%s>|ecc0S zB5A7_oF;eBgaIk91kFu36l+)`!~-Pa>AgV^7KwF1^yWtkRyBm#XKG3Ciz7$^bbbj9 z`iR%qN={`o!)?%ro}-A#c}Y?P&KtYfU2q8jtxF#>R?t^8{~u#-0*>|iZjZmBl!_FZ zh$JKxNnT|tgpy1RDn%Khk|9kpl*&+%F-pi3B|}QcREUh5gp#C0LM0)Y{_9qI?{l5K zfBXE;b#->%Ca=%udG2A|YpwhH**pH9E)9k{bU9|+Iyx_CFi_|6Ej6Dll||P@p(rUN zO3qi=@@p1N2Y2Pjz2mgP!>#yzYN*}3>c&9INA-ofDmZn1;8z1A&u5= z>Xx{?v#q}FdZQ)x!pXxnJ2C#yQ+!8V_wUkIDWVDn9tftr091b=C&kehmuC?T4BV&A1m4;pV8@GxqZ75`HzCd8 zqr-DYqp5_S1HOU<clnRRb5B8cvakra{nzaAj{g* zl|A(snIU>iX8>?!D_yN*=X0HKQcSNHDCP+Rkq-J@%=;gSSvu9gy%#GVa;9^8-2fZo zD?sE4JX7oB;I3^DJ`)uQz*PXtPt!o}nEq_~k-O8mgc(I*THHjRme_ z!ppxvnD<*@LYFGf14eo0PFmxP0~TMqU#eGu$^=j)`xU_WfN>Vc(TcasT|V~?Hat8c zNKTya4oRp*UAyRCdv@M+tn?ZKGJ%pJuVLz9a^@k37R1%k z>?RP-LMc=O@Woypni7$5Q}^MJ^EYBYyzbxctN0W{kxBf!r9R=xOr1)-F3Ol?y#q7- z-Um?31`FZ}2};0dnvFAYbq?nM!eA)W8a3&McfB>);f0pQiEUPi1xi*>$coTn5%=6s z*>r*~APV$A*o5&er1J^x4;cuPZwPQRaFJm-0(4)80k!erh7^{(W?z)vn1QRjWKHzF z&nRo}eJNKQI&f{`4&WPXrFo39+ER>fR>I3P{A63`$|I_3`z(=Az*D`Nn3xzpIMcx) z8D>SGq|x|`#`84w2KVeLJWK-UePTkuVDG%vyClcr^hD5h=7HSzVkJuXgOR`Hfh0VB z4cHrNfJ#QQr=(!U0sQV!9{$_kzN`#>9%Pe*uD<}|&cXMBW148@x}@Kxo*M-aSJI=p zU;0<+B_DnR3I?LfF!UUYYOfGY2tKH8`?IPC4&c^9<8*AtjnJuCNRb`22?5TEfWkoe z!vQD0iVr5KQ8hlB{vRR1kf*7dz**s$i_{DBEV?+F|g}pleM*VBB!yM zgcr&=09VMbsqh^ncDu}x-%L}J-xZ^F<7(OB5B9WGYseCI9+!;|HyQ(L`y&Fi%i})RiykZ+pP8X;PwYS#MaN(O!^&> z02+(?2|L3fMqY1aGvV6fB&o6kA-c|-OEg*(U$Fytu9VU%GcpMA$M$!k7+lkzFX=yFvJPJ}WO5nKNy}`KNOM8N}hS+_!1EMwL`;t>Mt4kZx;I z-nACq{L@r{{|n?Jhx1nV6)J>S0TNcgO1*5bYx`g8nzMa_fd~vb$xNA+7>s;<2(qmM zs&#*es5fkw1Z*zJ^*0{2R3%p6swaQwzx4(Q;#qgJFj-I&xv-H#BUO@i1uf-QZ)M~0 zQll;{Z}{8PJ;`#Po-GUFoypDva;2K^v5Yd%>+_Lhz=u~P1v4Pke)TC1mntuESKeZ< zk`MyXwXyl}Yaxgir+5^Qj~WK<&yt+XhgLjqk)$9~@E(4fYMi2IMi1J64+5fF-D9&+wV7@>j+-2HQru&l5A z07^$YVEse^oiVxLeKK4<8Xa#b_@pC(Z0vpWZTYy;0I&k8*<;S9K`O>Xh^X+po7bsH zL^O62XM-k!cs(!o3(1H*$L?J8z~mv^{j;^}PWeG#b%6G*YU8Cl^k=DV=X(tX2 zS}qI7$%~fn44FCw!!T(GtqME)HZ<`|3X75TLb2fU`E+Jz8<(}In~^rzqS%5HF~NMQ zhfLU)FSQA?NF568!kdQ)QyGv;PZD=vhB@qwOD0n5Npv*;?7!I6Odpo1iT3cTtZy zCEY#-LCMSPlWB@_`<}F*uEW0$VRQZ6D7m+59=<^O+u+0h3_vE{=1s-8m0zTjLEMC! zGTw@cVUEP~Gy$2bu;dUR%@0+>j`hYGz|?*;eR0tME$jz~5vn*0(LM%pEEh4x$@sg1 zxDk4hBtr2-uV$TyL9=Tf#$2R;aa&n66xff&(hW4}idrD~0NfP@$PlYAmDK=3M-w?H zx%8+1>kP-c^>QD}cu%O=1AqtELJH6;r84ij$q$Kz6DN#m?Q zB7X>3_b~F+XB(@1d6R=bpwc7t@}4#RdHj3r7-wDL&|m@W8vf;8<89+rNv`(Q6?S*z zxaFn64TIKfn@52uLUu98gfxltwHi{7sOhlJ#5}%BFQR5Hrw6FT-%TTGPQs|9&*^dT;>Gc9jxA|04s6m1qk%LO51_S2x19mR0FuWmG6VsSA16d9(w6D>Y5l*o z4u)*7-5Uo0q9E&%*v#C>Oi-_%VN>JuEKG0SxLZKwpQbW}J$;_pSO8ashs z$f0__Kh~vC|H!e^Lb?0%j?nAsHd!5_B;P_ntdE~K6L>%DuyAC2mcS!JXE!NN@Z`Zl zf#wis5X;NV%uL>UT?UWM`8_XI(o1ixa_USIu zg?6|)ziMt235aqtIXzf3wY0oWMNILK>D3dS>GZFNjqt~fhyx(QErzzeKk7EHV3QS| zYr+tnL3T)Y=hS9;PdR9RAzE^G#ub#p+dj{p>BsSY+9p2@*c=_x-$+H0sHFk?4 zedY~dNFNdgHW%uw<}{#igsmyyw$jw41rSB;ZcGDQfvNMkIq!M$xOdchX_5&j+xa(5 z8uy!?@`JD$9*$-!6r}o!zH@_gxCS(^60Zc*3qXAEre`Q_*a=&i86&Ng-|TH@g#xH( z6WyTM_q`i-<}HmL-f_A9V8#0t17BO2%cFn{@UZM94Rxc_@YbU(FKAkOemifEfLdz< zPxoa2+Cu|hEHQu7>s12&NT3dA5$_NE5+qpy;Rmfwy`q=t-md+3&{jkm=1!3O^Mz|} zrsc(-bBAnj>AK*r6F&{3Wpq5pb7+QG5T{`-e&XhBrBh~ z;-=XLiO&CU4kqJb+<%F)pP%sID{&{VRbKDH^0q4a2T5`X1J5qEfC3EaCP+c42>qUo z>uMN4{T-Tv#wM!Al!tA3H{GKEhus})ARok`Z!mPZ`XdzLrXgVl|ISq%VS#c`UXn{L z{7}r-EN?g1jq_kJrm$$yqT6kO-vy>Q1f*iZC)PLO;%aF7dh2#$b)3K%e=lI-DaDIGG{Ld>R*RC+35d?+~ z!4Eqm0>Ehd3irY*(6;+QJEl{pGj%~ghx17`bWVD7{ja+w$l& z*wt_fO0M6vcxsCGq$AhD#Sr8!U7AKj6?E>3HmknWL$&>>;#`5bxw$lE;Z&Iuc^WcK zl;sq=n!_5wMUQ+}*$j9TL`;^*D@;1|e18=>*J3a%e(f^U4Fs%cPqP91HPMW1#vGmm z4#B%@Qrjwr*Qk9$e+F7z=E^T8*M<>BhmfHyCn7Bu@d#)SP5_Z0r665 z*`Q#6_6GAR+j`^UJ+NHYLy;Y=<3{8L$&a9;X!d=`4m}2K7O4}+7#{0@;-A?g5W#Td z@jXd|D%%vkkY|n#-1&#nZmYiYo^%f(fzW`tmSIGnX~0uspkagr@DdsZST>%mpRYI) z`OMYtniiR05nG#W0>KmL)HKc)U;86~)KmWc`Yl7NN3_sjV3Y~E(kn%;1YWQXP`!Ad zd`zgC;ioUtw;1lN4OPp^_MvL*f(;{l!#SG1wD6gn2e$Up+NLc{vTVVuYgN~Umh@Vt zeYFPCPDr*J-ySbR_P8zZf3<+_1*WvYsK;;}NYz2Pvh0D?wF9vFm?(bD%X%@L$HLsd!e;>!n>r#Glsi=vT{ZN^x3e8dt} zdDsoV58KhQje`SkzUzZH^SHeimQh_JA#cQ$l?8Pt-}MaimAUk+R&JaJ5d{|}{r(Pq zC(PvnK=$+oOU{1v>OZv+ey zbOImztrr<&8i#T{T1jkt{PWL=r2MuwI|SXv>m73$x%XS+{MYM&;TOfH7dj)JoXcie z#OnRcC{*~Q6#?Id+-F}wX(8)(-chzG+)EUJcK_waxTRXQ-BFz9NqesyOz>h99DO_}<-_n`$3q(d%!XN@rZ{E{GD*LZ_k}{1OQ5r2jB&Li6jz8HA@z+?P zR9PY}Zi4R_6)4hd6}~S7VDNO$iM~yhd zgrKZ3TGHsEJR|1-i)U7~x?0SA?I!RlCW_ZZ15r%_^#JFEPd4{X$;kEYJ;;F&#L$vL z@H;e3O>Z_S%Qstwq{h^D#v+r`mmp6R(kBwN5R(}qM8VCS%FM}ZXoDctSP32q_;FCN zrL`X=6{GEFsR?XWh_#N+(X@V^!CF%3{goJZEOFFDX4KLC`?dp4Y> zuhU6evFC&WIi^yc#+3NXLe&ej5kBi^$5{~4moMj};W)%nMK}@;~4Aql{(2NvUo2Q2n>e(b#e0?R~p9LW`@_HL4sU}E8pyoGnzF&1F zFKz$@C}sxuf?NbbLOVJ60B*k{UiyiTH$56M?4n0f7|at3Fr$D`Y!wU*CbLcG-~0f` z6s?uuY)+nrsUil4pH%*AKob(k5CjNYX~gJvF;ts6w2fdO6CsG9XZ)Gm>`?UFAE27`MsOAGR z(ctp2O{ZqAvw8?`tIgoFyhORSMWKfcZz3t>y(HcSIF6k+-tRcO*nAUpMeWQ3PFso4`yl;SbSD@l1ND=0B%BY^*@2WTE{N@W2JG@*I9( zL(W0Zz6@wI7491aT#+KVsXf~>FcgwG+)8pQ0s*~pG!sn|wivjKoN}}8TVXCg$>6yQ z{9NdY&`ZMz3-4(WNnDdzbzw<`dOw6|Crmj7v4U~iUHHW%Ty#V?WZ(ew84H+4XWcz1 zu0nRzid?{ByVN6s!ZbJ9+=4XqprHqz z-;H2qV^)^#y#EN3|LHwRrxtu0%R;d#4gOO*YX^7gUv>yD14#wRt%pcgnA(j3SigrU z&X9AP7dc@6;=US%=Biq$zSA}~&`_!)rS!Ux7KWDI>lq=hn|*okISGrF~yM58{kk# z>q!XKCWRkfu)wkT*ehYNr;V%)l$5Ayz>2!=OrBYVZDwtk3BS$)kkjE`_W>}vC*g=}8&FwB)- zC)~5y!{_G>Zp}aIDxV;jBIEyJc1p{QLc?MIe6t*}>nOXx&lxTO5?h(xGCby}0{{%L zjF%z8bEd7tYjoSsp|J#Skc4BpJkbXTJ`!EK_B!SjIyc4PZ6H5J^nNh!RHf$JcW=~Y z?goD#6g#a`Z8K`X?nj4+fSAz{cb)km&;UL7Y+CDT`*F$2Hpi{!3IUX1)&w^TAHScG z(J13*?y9_!fr1#^`U8|F9+mF1kpR4ET3fZb2~dg%c*7VjjSDj>%#6tYKMIs`+OXJv zq7(qd2)H-$Z42TOfnuQn1trxNKfa`V(W_dduNcH*qlgW^uz{@XxdH2l5B9W*9i?mm z%s2LQv}fX@03P4=*Ik;m4nZ7b;-v&M62Xul2$XG+g_%o)K2^0X+&m+P^BFj20N!PQ z&Lh=Im`!){G!#H2`J1OZ{xcL<+}~H+qwHolzfw1lg>@3SNt{*^#F*$k41(Xn^puse z$>ey9Fl?$z-s_9ogG`l|CV7!IKtYpvwUfDNzV}Ry^HJ;dv`^@(+rBYtfII{v z(Ye^2k-o23On571iLgm5K?=ZcB@29<#C|Zas{K7V^#QB=ptN^$4rofG_9yFn!tgPx zK737V1lu`&3w+8RDEA8dk208n+Uw2U>MI#!p$cVp;7M0uG2y;-u{d=i>Ds!J^T810 z;}zOvqf{+^>3iO&>w7ds*N z(foGXD($-AOw5!Sdi$AffH~1v-)Y+a&>wf#@e1LZ|g z42)+!NU}Q@x97pk<2Od*;e04gC#%VZypcg_DZ}Kw7_GEk2RN;G&c1)*G{PU?u1%|A zp1>312VtB@?&aw44pbTQI!gr%Iv_Bi_d%s?lijm{oZ{`_6!ogpohBiU|5|D~M%`J7 zFCwhJ42%}|9dF<3EK528gl#}fLjxL9e`mGGLS0Zf$QmE4A@T@{Y=~*}3zXxDT^SK< zboATi065VUkiH95$Qjl4*aQvaL6K$4?yetzD8ULiJ2>@Z$pF-{W^}LxC-~Q@hO+w+ zU}zu;KKhImLor8d*@JumpU?7;&%-(!?~&J%aVX)6vjr{hF5PsQ2W;n`7k@FKN+B>H zj+Q*aDpiYc$%w)NZ(y3VOcz)gPM0la?6aqS z_0 zeeQz2BC|O?cxQ^~Z$QzY##vP>ly)NOz~!al;zD9#?IAyxJ})M@Ik_!ZoDOQzj+k!__nE1d*_1+a@A^3t4Y-u<>@wriT*9N?TFB?6 z2DTTeco@f|=JRK3>L%||Tq|^UrA^djxT*l9AebB-92Zpa*6}Zo{3--c1_kq=RGPqGiQD5u z8AKVzz@Ie*&mdmfudzE0btGW4=h-J6Xj>G}NiN4e zob%e?bW2WE|Da$eBptmPM~TC5DyR;*+%||Oj93m0*u+NRgo4=@U!$t`YsV3mZ!q%T zTrGaDC+_XZXlPj&#~CX2Xb$=>bXzF4Sal@)&5ej~jS4!51~33*_{J}a_s$Zdtqbiy zjs}7k8;KX)TB2P=4e(oC@vwx8ZF%2pFBDvcJv>e$27PmZA7G>#jlTN!K@@ep31J}r zUi##`k>lwkLSuTa)OS88NUTpZQiTx`8vAr*gPQfXiQ7I-&W3pyIKU?4{5_WPrT#4R zaA7qUD6Sz8A%A2;CyJ{FV$!Y;3+>RYVYQwe_D?}+NHpl!Ke;ZNAMESy`tP}u~)`R@V*8+ospXR|#h3dDyIgs>#flJlEVgQJ_ zEH9;I3!X%4x-sf@cqi%I;s zD*^9G?O$Qr+e?oma*;iaTH&+T4BVbMil3is?H_Ym&6nhX7fn`j{$v8Yg^JZ8s=L?Slelg*H5xN`@Q-iHsD=+?1jfP{_Y<;0E^H6;cDiq)!fc}Y%7lu_$ zyaON9eH;k2%aZ96-==+eiIu5)8fWlR7;?ee34Hh~SMtGpAocMeU_5Lh-FE&oJNFX! z+CPB_!)AQsWjpVipg-*&+!=Us!mZx4N;}C3Sc!tDk!?r-~fS{tj>|L^+rPE`7GbYdWOx>7J$t>_$ss9;0SXb z48Mie%h3st&&{)EsqR^KR*Ec#-3Sy9|Nq~W%+(KF%jsRI-JW8ZUqX6XV#=9#MDyH` zbkNZa)eVGsyA%kPk?)R}P)AONTm<^UHBuB~RB5hFGyFciC$|dlUmr%~z6<;43aC31 zW##QwCU!BDpoy6pILtGzw--`Abb}-~d)RbrKHl-N-rTUpJs7Wz0l5aBgyG4N3`4vA ziT-OpXON3r{4sAH8kkBLXpN{?h4d<**MzwvXgF6lGPC?c_m3iTWMWc=wI;`uO@Cej z^g=*yptTc3fimUmr@8?ilfk5kV@wCKh%+kr3-%rcQi*yj1-z5ou~8>N6oLPO_mSUy z3?0MlKBM*%CAHI$MK;Y_A&}I*a%wD~(UGHn*fMt?(wnVLtYJbP#b3}&PFgcsn;0lE$ACeq;?#7WU(%IO6xMKy&6NV)f0^@_;=LI zQe#1IJ5Uodg}x8mPK;w^ut0Fw#kFXx$V>BqBR0qNVv0R(w<-A-Nnco4OyXY~bAgV^ z&6HLW(#s2xb6e0%iXoX`wDmgtzWs$LrWk6qP79BykQq^>w1`d6W4)eQVFx5VEtm+v zAjEs6i)A#xxrn^AbD=oj_6U;r!~-6hrCc0q~E|!_$WEkBIN!e=hHJsZgSS zn5OdXNbIN5|+S>EZIsG;8B9$a)*HMVl+!i>KZ zys;+$(Sbjth`nqvc^iP`A#i-dFfmcTU_<%lLNJKhF|SG;ph-UFLm)xTU4HdWLyd#fm)B8rzK>Cw#10);evcHKidR1R2XmzftDDD zFhMM#sO`yn@~aP?!vb2o2pAD8_!C@1O?v{F?2e?!XH-yv80QvXaofUd(b{eZG1@U- zQSP$z<>3U^)?+@%55(|#ACz!JW35L;$YD&03D?DZZprPcmC4RZf(Ue@VI4Uz{xMcA zce=sSR5`$BZatTOS`$Ck&!vudmv|L~4FNpmQu^^{K4D=t+zQEr(ZcTn08I~7{@(v* zl-l}iekbb;x-Rnmy5ed$v*dD%!Lk_^%=Daa`>4W$)!zX~&NNg2*Bz+7 efJvFxy zB@j31J=D)BV#5`ib8+T~r$7HlwjTz^FHINoRvb#hok78)%u7%svG$R`{TFYFT0U(@ zF!XAsNV_BuLI*^$(3OJGX`hdE)=ftV2x>d&_TWGQZqky)xa#_QDs3|eX#-09H`2z4 z#GBDbaw2OLH!0;Qo5n5kH z@TJqr+^Eg~hdf)`Z_3!VFqw_~$H{XI(lzmBm!V~D0%n%hU$Ds`10Ix^w{6JusWv(e z)C3SQZLk7-5VJn-mgZ)v6-&!%eeoc$sN)F+oLxUntqZHyzat|yc1}q?_^}f=7FeF% zVzSaUQ%EymN%6Hp8d&VI0p+KDU;@kXt5c!k%2UF}z@8lzoU_!u)|NBX=`twPDfT?X&)h*Bxfz{4}{iCU=X{}RvVUSFs3aC9ZH^tHR^biz$ zsobcH3yx+kv6(FD5?4*~=4h1(x$yp59IlQKm+G2n#=c{D{AG{Mw9eY>1ef&uKn^ zItdi+5J8l^!9rRV+*808X;^$g{Xb?JaATkjWj`+CI9)Rm91~41f=|5EenoZJv;T-B zW}hwtk1aeq-rV7KLL@0UZUcq5vlIJ)$xZVl7~j}NmEt}Hr8sRrtx}EvvQ*9~4RJ<4 zTPP$?B7E96KvyDhKh7K;L6?>jlbITUh=W#31DF%o1Ib^1nu&fM=z6*T%Pb*4gD`kQ z0Dx(Q30Ui7z6_?*27x?tBV@H61YKkUL5$2x9Z%nWDExs&DWzp!1{`3SAcJB0BbPwK zMPLa`oWKHT3KLbBk$v@SvBjY7ZVh%Gp&CFWv}(7Jy}f zxp&O?0GyO1%KCBwZ=4MxSyekth-fQ3ez4M}O;+&K3-Digu=E30#|K zy95MVPe*UmcV6Z&0kbq40T-xP7c%I@DFUQkA&o-CqqYquGO44#SH<^$_V_a5PRY;j zaH#qQu5bODbkqKpfuRm;YXC3xX=fkMZ0vm6Pf*XeA=+V3a3KQbUb4bKnf#KV<2|D) z`vkl9;Ew!-h26;=Sz=om9wkNwa9zxcK54%|#l|LSBv(^Je;>TmT@lm$qRMyNLV956 zJy`L}jGLPMeS+qnF3e@;Ggksky$pTydZ5+!ziw=0L;KaW264>o;~E41sD*Z5(Z->} z?YD$~@W@dIOHHDVyo4a>gUdoZDnE|DB-xCYv0&)FxNlE z&uiJuqq|DP2DLZ?NYY0-oKG)#X)(j(C#CHz$Ub)AE*j^V1uf-ICgb88axK2`{KLsD z>I*hu2_d7)&TsG}2tH^PK5$elpU3k#CTo#?ue{SG%vMNIM3o>MDSi#eL|5@W-=R&b zS7Ad7FbsnPJgZ;h7GiQhviy7Y;;Fg0pzV8k*r8c4Pd>&RW^gb7G_MqNl!sTi~&P9Jl5WlNL_w=i_9j(fw(q2!V6^CK#aN zRf^zZ?pVu%@DgTcFg~1(*$J#gi;}!N41m)7>996>h{?a4 zZzlU6V6p?HTsn4x+(=XFB1n_GQvE zK`@)5Y-xDa7QWV}Ev>g#{|t(V;L3+(Tn6~gUA9cZJYHNv(E$knSQ6pnu|fZZb^_WS z(6t!(phJ17VTZ*M%sX1qwco^@121!0^1#~<6{Sqq_n>rpkb~r-HzDVD0G5tL5B=$e zT8tr_JQcBe3A=6t(a=IiNACcp8$1|gthH8Lx>++U)KY)EI^*~GN0R$ro8yILFHaaB zz_(`Tt`d57Zca#Z(BdCdBj~>X`-2Q2Db{0_m9kXw|2vrwaG>f*%;DD`!^@sS$OW8gdN`a8mFb(5>pBQVBK>!#5`?!B!ky)4(+o4 zuoVnPl%AH_f((x*EiiVR)k{jI^~mc$nT4$N*vQ6&hR86G?0t3$f!0gFGC37jo)%w# zv;uz*5$sBHi2xAnF4Xc?J`0O6FqJjRJ|T_Ho}Xqlr0GH=iVqY!hJ@!2`fCdIi#*B= zISfn97mRU=!;>*0s-6JZ|&>%=2Lg{{@QLmt%*qk5JV0h_HSbMU;y*wVwVU zL_i?=U^13;=IG#|gP}(j^y`j|+wYVUaTF}d1hjkBPhajojZZ4tdHmk*CO!|;q-glWpaSvc@N&q7 zwE~7)a>M{9_BpW$K|4gej9xAtQV&}TRzed{|4HKG^%t?{zj=dNZ407tl#*pgRy}s2 z(%?m`mAT?HnX;$|!Cy1^d!88f9iqUr7g0Z`wr`&>cGYJHgjpEzsif}w14`$${L9Dv zSoif#9L)p87V-|5kw5T#IXnhR^u?b_bOjVG6-f(#F2x+@1)FG{9tTaiuT-;o93xi0 zfq7IVEj(|d9cttk1_1Zijit$OP6KH}R`vyF)1^!CWOXLIQP8EM2ges?FA^}}rgv2mIa zxw}Q&VlU=hv{+yt~1KFV4vy+yA99I$Hesz}VicO*hW7P4+B^>JpkDae8hr_hVD9Ql^B&y-blP zAC3$uPq#)5yT`n>b}XTGDtg?GJeD&A*!A>wPZqb)Y*_TjZOV+2ZN_~=?#%{A#U!VQ zSP6Zpmk&Fdb?4wSc}J)1g9_URZ;H!W*CLa;*8(qe{|uWn1(UCvz-3w|k+0{YG&%n< z-W4wokGPW39)PCRh+p?HI&}T&et*TG3OLkIzNKq@pgdDnhwv=)r?ZnZ>L1( zOk1!m!_kcS8sgAIv{#)-w)M0V!)D0nl`XT_ZrC;hA!k8dezp7gLaiwF35-aN_dBPl zNb0#W7%E%U1=)mxS4gd$#<#ZcVW}0updA||GR=G=>8ck1C?!R8%>&|7qk zd9j!X{uE-QzGM2}f;~VsFm`;g6)J>&_IYak3oaaPgz@xx-9;?XU%_|mC z!SYv)>tEn_$H6&-gz~3AaR>?vJKeyVQtncZ*HUPi)cFbGk{4iU%soBPP}o7~%c}>Q z5WR2w9Qcx4w)|0|wxSl^1_TBV(CBK_0N`jnxhM17E^IhKyto}751XOSnv*VlULo=s zEzk=T^CDUxqCQH(UgViW>dDkSo zx>)IT!hVlc5yt%Cev-Q11D`JrJ!EFm9k|jgdbRE3xoG}F?_Q6t*OM&sf6sJbmZCyV zf*!~iLokh_k_?|M=oVhUZYTyU>~(;le1Muqe+n>N|6aTMb+gEU-tf+>(J!Jr=DBcQv>Nl;( zIjY8*B9ed(jboC#%N8^1_|B|XFp-cS4q^mekoR?PaBwW*-F^TRi=B^--9@{;QNTuE zF6VK~RS$x|F*;~$Xc&VLmSzq2`9e^`@sR@$GQ;!~#+zpxQra)qVv83c{on6NJ$U=+ zIyu}V-7_F`-G^<_vfUZFsIdM3xYWv|Rm!o@G{GzF4u1OSkDKnWrB8;3!(vfU2`qNY zOWhVMHDQ*CuolXrFyiKERm+Wt49eBQSn((%DnA1z49K1Nta4NQ!pSG+M4FHQN6i!% z4H00_jG#IuLF35$LBB2GUOYzZmrGKX%;H$weODbxK@Z$FdYh{q2`sK}hE^uN553OG z@LP!aD)cR~*yVFaa!r`A3AOg8mua`qjRayf1kTS`+B=qm?6=2X>hkQb>&MmxHO&H} zWfzu!R)cUFeM+cz&Amz7oa}{lkg~%EMcmxdQsV`0V>&OVM-cuIC~kLQvbi0WV8dVJ zeP;3AUaF-$`Xd_dQIUAZMyO_+kEEo_kL`M39gD)S&vTLqhSEB&Kc7bor+h9MS)g<} z!zOpoBI%+V2j5&yIXIHu-I4l)D_Upc#u}8lM;}j|t+~c_%gkTDr_8_nTdvo8=Iw2F zue~|dxeO(KV)^d?8TF(^IQd1X``~<(i+7D5Z9^1Yk?P7|L|VW7?>H%cPeY#-@0XqX zV)qjNW=;pF6%ehnJ7wO&AxPN z@}4Fxaq)`Q)wAq*CbO-A1`h84hB|e5ES5HPC&R^H9*?nO6KHJ`5)%3-5{D0mbmNk% z2u0$P9t00?=RLD$EUJ1a%A-;I@VD*dQrYSk7M6%158Z=nQl@87XFsrezu{zfyYfWQ zZ*i_Ym~Fk1oDKg#D5oV9Aun6F<-(NUfEYAEulK_F)F}8v{eAr2&qh|;1K-zuUzcAk zDlM%C7danN-hY6#lVVE}V(6!a)M%(rztm5D?y6Sj3Fv2w|50Gi!SeUNX`U}I#h)Bf z)HRDdY_>v#PQ)t!f3iR*yCGEMlJsxQOWK>4OWw`6@O5K8^4k}&Zwj5~0H>=20CS(P zUmd1hw+U!_!AN_9xQ+X7>@xV_n)Ln)xCf=#Uwq!jgEqaU<=m|MaLmjbSlQA9Q-QvR zQzG|;j3rTg#)JyJmZS1c?}ftv=MPC0x+59KIwoTWl>t=gC@syfEJ;KE;;V zyXu|yG>?G(runeYcVUKgv>@ERc>G|F+m5J7EDX2X-bx6_0@GzbqR_*7Li^Bx8}r!s zhkP!~Qxjw}G}cg^9>p~Q8PtpWF^)(448MSHxx?Q!DT2@hQum|plnx>F_3ziYB2yM3 zBy9CX&9yPzyfGa;ZY)4HU0@4YXi~ppNWRfuIkVy%E5CN@^vvrvSz0I(17e)FoR;d8 z3i!xhTn$^;SlChgFkNQhlzw=rjEn*PGERh6t;nNasT##j+av)7zcy~$Doi-(V|;pk z%XdtfL)%z-OT4t~y&q$Nw)`5n6}6Z|CIbL{XxL&~lRIW51@5>rI*3t`4<(E&*bJzl z*gG{SxXLyxiVNSQ&+4|_r$JR6?tN~@|1$k9>fp=#^7>fUQtipTXt?U^V-v&6p3}UB zKzv?gwRet|Imn3gs=kknHjiC=<*J1;Nn=V?ui=gz@u;wOkPDrl!m@5f599i+pi8a7 zQNhVreS1qobLLJ3GddJMQC-wx#u$HQ`v!OsNX@Cmnd$?CIFRDA5W5-ybGYt{Pq4EX z1EY3PRqsXx0UnjXRLM=4i~a=okO+j@2p$10@rTW}*dvfyqgNa~UOqVQcO==9lcmNC zYd?vt8nxJb9RrTqHvN}L#+-8emAgH*o_puMey#||znzKAvL6A~DJ1TXoWxR`6rC`s znw@29Rkfiz@;F1qoMXW$Zk&k7L&B8;k(J0&KyWRDD^`=j* z;LW0)kH;)Yg;urDHU(Lm*A00mkH>3`SvkgU? zdc7nx9sz?6>#ws&5|_JOf#AA8Xed*W!K!j#oFDneHn-^yvP5@x4WR^sWh26 z?`3*-{TWK0{tyJ+23}650pycdkq!V-hD`oC5JukDPext(M^MQXrdAewh(@X;7M&~X zRhYg&VF$e;;JwHbya-O1?rYTw2?dwSKLl8S0_h;i2H!)CJ{&hU>*&h$f*^7i$u2aM{7sqBI}yO6#YiADxBxqmE?(LIC?1)Qy`*0u3= zJ385Q*P!DXKNMIYDxs`A=BD@z^pmApS>RX=n`Gi!K*!ul{RMu2tGZ?p5ZVf-VmxL8@j;~PcDMza}DAnHhlCD~qA z@RNt70^gO*@}3Z>5IU~`2(;tp*G9-g1}-kS(3jkVE%#O=@6u<_^nhl*0B#5+3a$UR zJ(g4Q${{5(xa0%GAF*`rYI3zI!1dzM^m{-%cWv1KR)ntP{5;3F%7r? zyKo-7!Pr<_>xrW!zGR72Ui~t54tO(Nd`{Im`J6p@(sBufxiR!z1FHF##|Hoiy{rDa zL^Hp%o=WM=l>sfdj_<+En7>a{3cRhc1P)PMHUF{NNol8B3)l4Wu+6gmir9D%UBS+r zQx5>0q1KMkZ!%oEz-OOPu;6Kv^dm;wyqZy7F7Cd5`hd;XQ1iuBAZLzFSDG+|!JqW* zu>=T&h$x1j<8%GYDVgUa9v7@t1CaD4B!T+=8@1DI>JmdS>YVw&g~7gI{PB0=@#7qi z3?LfffRDYbUNEyh!9Xl;k~7TovOv+9j1H$HYzPxfloNN%jDnk2gk;%c5}kFn=~?7a!45u8Y#rUa2Ye~%=WkoH%`3s8XdhpAX!!eWc2>g4*d~eX_CEJ zEP+0mZA`-q$DS)0s8ahrR;%J3o1ceoIuFkQjO|=>@4nmyk~Tk@MZ4K`!&;HKkCV0Bvu8lmU76;Wp=qLmPu@ipR{NqE=WtRz&G zx8N2Bzn;xlR~2>}v-dXJKpexR%a`i_cldhbb?9}sw&PX3!}VB#gl)uIECI7!O3L~q zG3LZJlOet$E?jL=QrC*pvG*Ot?rp#wcN)4|AKsooH4MsIlx2$&%xneRMxw_P+p(ff zwN@3+ce3lfIlUV93T-m?C=Bhxngc!R_TW(v4^V9jIx3`AeIN9OySp>>kq^U(qUg7Wu3WA z6sk?M6ggv~L+GR)(#?JW8}C}+LW-_~WqS>CPC0H4U8xS^$)1O^=f2Y)e$+OJ)}%SY z_3tCdk3NVe;z#`YKH92o7iKk~m;(mGkZjtC9Gm*28z_aNKCNmrm&lh&D zxI&g>0p>;&iG7$Nm50_UEAK_x9%|Z~Hj3`==NAKaw+1`o?EnkF1@*hXLZtrt3Pq=W zLm-D`_bYdBX8Xs0g%OXr<|`;$_YR3z!L#LOf#9fZVJExofg;s+Fk3rdOZ#2#LtYT? z#NXEYZJLLS4zq>L7_N@@BsZaw(L*@J&c}Eh{~|7rwPCl|D9ZEI?;8hA5YZ(FpirBR zR;l*aPZ*4?0xWh&^y0HEo;1Q}`GMIp|M=_1!RF8G_+KO$?;x z1Xp0-cL$KD26-@{*Brin%0jX20nQA%A$Vp+W7P7gT&5~eO^C%U`f5U`XuLVP1-eNw z&JT27%^t2zF;zv4tpo7)@=GPw;+a2%ehR%k;lNyIhl`RdBA}-k_OcDAha&Hf*>cGRPVOXAdkxp$!sRv0rBIQ~X*g<$R^12zr&Rri~L) zTs=N%L9yp*Y!>3gUz2M~T0Ls8?Vd~#6IWC;2D)_!UL0AtqSh)Fpj+j`R~Qb?IUbh6e2LyXs5Pu_*rh812H`Ej8!+!PF@( zxQywdTf!6Z&lMetIJ1Ac^9<)1whAlHNcvL)NN~1)|8#FD$HWtwufaQByhNhp-YV@M zD7|*Af^mkqnb`sGCx4>HH?Gu}6iFI5^`uS-9u6)4W(^3u6EGO`-#EMV#Qka6^u_X; zXR;={?O3y8i+zg8A&2R`RK%c9@Rw*CRzx)pW~W>S=smii5hJ0Xb@4tZJ$|NflzE+J zj5j_6N&VRYUGjT|C(hg#-u~>KWDc4>EL{C|#5*+QCMMViFmtlT7Sh`360@xJ!$KP@ z>#kqBb{nG0gDA95VG@jGa|Z98UYCYN8SN#aJQf%d@-g|n$)U#ns|O+5L_0>dF3?oE z)ZilAL}glbPZ9@WJGfs4fR?uc_=4vJm?5QMb!9Bb1L%lkyn z(qXOxSH>T^Di@@UO!SeZK2}9Cv{&xiY2QRyjWsIyZ1;fkP8U?#?f)J3DmT#}L zBp7IbYa6u(PLLQVHRZ9)mhV`BXi9r95d{*-W8zfJUNe(3omrH>D4t*r(|!D;O*#v6 z9&hqyaN=|=EaFHlZ4G+B=os6QQDrIIWBTjRVkD4{d~S1o>Lu!pL)>^AMo)#`LehDb zei*I3Dk@I{eSQDk^;_4kf5LsZQy0bvx0pLLUqAlzbd)Ibgiva}{$B`XX+(nR&{V5? zhj-D($EcGtGu(T@33KZT4DHb-C?|kPY~7I3;&N3MJKodcz*V|?Fex)myK#c9G`6Ua zIgy}XGGLJd*j)N5)8W>W7F)wDJXUYAPhP`nK+^WZ_(p$X{eme^(pb=LAHAi9==_JT)eQ@3p%HWI0-ldcfDqb5oCNhi`%j>9nlDl z>vw<<9kfsfXSg)`aN42PbL)Cv4Yyj%cZ>E**gTa_lAbj-UnGDCBe8bvE_{Kc&Y!Y7 zU+ro{bVaEW4O;PT1Z?e2pj42W^5yu1ymNLn~H_RQ4*6X^+P=F9+l8SZVIJH`=@2m-T$XT)%}Y zKrj=(mAAm{o7tz5&rOdOcXw)`K0`N$x-1A07o{+3-SyHF5|%K&1lXI6wrtD@HkINazV+mrl5n$MMs;j;mQ^BUj@ z#}R0M+*}&Rx$#@U9fhjru$iS|q65Za+t4QM%Wlt4JwxYRExZ^efb}S|zhh)+g~NcO zX6f}|QS%UXd5dVFY&C#9DkwNO4$XH0;3}8By4Tk5&YgK2kyoo`q3XNO*}Xcvyu7d$ zav5D+-me`(*;W>W6>RMkxm}4cHD=`s)X&mEkqw(Jv}{b z_C2BIu4wzwcd7e-$9{kkN8{-bv*iI1A1pTeG|#O;nq>e}&@Ax%9)RhXw~b%?`?7mt znIN!;cwJ#K7`CtfJLJr~0bq+!T4=|Q=M9d^*o7bwJU{H+z;?=?at)|_la|AAb4Upq z#gD$O!_hVwEt6tnVjvtEl6$OB)f50PT1OQgj_kHxaGJCQ=gR)!D1prD76* zNJI5QPOBGbaD&l)kS9!$WUT`QIoq>7n~qFpG30mn3_bl`|KZN*W%tm*1m3uDm)t_f zB=yawWqZFahEMr=v|@o6OO=&9r!Jnl7WCxoBT6|uGo0|4u*R5R?EB!dpsQDV!C`eI z$1_`Wdm>M(%niA|4=;3{l!+Ue2_8gt9BnTW z`#@gT0qrMwS9;|UIEvxUQA|D1J1?kJUD`af_QmTj1>4k+p_Os#P0@=XkhR4ESeugL?q+&XzJI>?>1shz68q7 z9vO@BuLCOp{o&!_6-42ZZEfpgi9hY-<+TQwwTKpcnp|c8(xVALrP9zLts$h_uv;ta1f(4 zeCsfYk$Yg%v=ad=^|kTakafSGHT6tb&TS2!htYi2S;L~;iUSpStal4~70ws5#Akhn zQ!Lc1b_6M5iZ{}?p!D(jciKBH4mO=Dq|Pl1Kgbrn6ALWrVS0lSY?f@pSzG4YkG4!_ zGM~GN>T<)~f?9Xl)$6iFSm4Clt!5)jk^vLs62amplJjt1$UqfbksUG3EiaxLX&?F% zYrDA);Hvuf*UfB94}w7=eNJRs8t;x7nK72ZLz3}!NB)1F=F$suw^BtJ1I!wxoCi@; z5oCnxy$XC~iWaZnqKpb4YQmKA!cp&9ur^|E-5O$<2q-j9>%iHaR8V(ybebA6}_4}j%z@9^4B9VgiJ6+}M6QqV6ZK1Ey0Mf}E;it>tz zKHr7*7+DWKZ~))~KwElWWy_N%PxM`jzmr6#zsjiW-ta*mG*?v5`ilTT;s2Tt=C@Ag zgY?(Z+G3j9V}5VAZrz!^L*-StOB*Kcn!sG6-aicyzeg29rFW~amDm`H5&sw0e^u&?K!LQ|c&@gZxbYMxn6{As#iLE6;(@J>wS%~y2+m*vj+cLb)6vEZ zyZyCaK2#(QP~?U+^%AS*&FcZG2%SYovGqqTBfmNZJqeaR#9>#7Ciz^m~<*&m~^;G`ND;iDDH@vyJkV?hn|qz|xF%=K)Z2o=!&!_8&8 zS_2kP@OohNd_%Cg`r)cx1-zEca5zZ%!x^3Z=GN9W@Zx4hu!sv#O-N#f%{SIis|1dK zUW+Xf9}mYksEnB2husXDXs^I{?7(9i4{D$ec4BX^9(=nT(UEJuTFZn3ewUocayhx} zv<;1VV3ei7@OG$;&Kv%)A`2Y(ykg3!S9QGV7uKI)2!5~j(u!pb9W6kx27uwJY#biOSSU=T}N zj+AMCOq1M%dITAnH=T=NdtZ1O2h$z{0g3B#*-}pQ?4!cY`YXgGKoAfmDA+h6`+dYR z$-aXBkF_@sq`K|Chffn}paD&!I3a03BAF_Zq0C8%5{gn36*4s387f5-l_6Ayq9h@r z5TTMpQ7OupBvToGYab1|pXcfO{_&nan&X`7b6wZod#$zC-u!s-iV=rA)-L_X9fDzh z&QLECcee@c$NiF5R;iSYtwH(o0!M_z=z(&~1$$2CLC@rXuMoYz2bc6NuLK1u1) zk#8_ax8d;E1;qo97c~HnI}6E$4zJ%4C3YZ;fM4nn^#csLhpqP;kwBm$FnLi_2mTJA zo&s$cXPF^rohFn9_}!5&x>Tzi0}v8A)1igB2 zS*RDhy$$g>5#iRG>l~TJ#N-{to=;MgK=yZP>gNfvJE7NFAFY`sBe>+lk#@}mI1Plv zo=+*9sPZ8I(F%B6UU3&kthhXR={K6?R}N7K2>|erGvL?Z&#AzO`<4|4PF56o4Qh43 zwk22x|QMSA*xAYR~us9Un{t-vciWlnPV<0`TsNDAtXS(E(7_>)3gWKzNXNTx) z)?+-YSVSL7Qzef`?N+;9Jd5Ca8nG#-Wp6dZpf=kS`KD~zU!O{NeWo`qHnHihI-umj zzZSFef7>m{Iw~JI25C(L%i2}Jmc|-u)^t=o&Cma`d%Nn}MN*)H@a9MhL4s(XB81%yK&YMtE*2vlvhaVEw z3JRwrWxST?%oFB8!$$kvCXoZNm(VejjZ(mY6`}wCph}%${TA=&AaIX)y*^`*DOG6g~;8& z8`f>{<)w+JBw~T$$>G6w;ND?nubYqOFCUF`901sXiCg1zUqy9F^7naH2VN;N1%UZ~JtyL{7A*n^1Zi zEXlVE6Q_V(B!G|p;1mi;XWRhHq}3d-jo;Njxc$-;q}a{n<13D{j=@TSq$#cwCd|lN zIo{??nzZz2*;V{p(j-*aH~2GB0j?k?R(&^5Q`Uz$@UR?N9w8^ID_n%)7oTY?D0rCOD*v;9UaV(=zpdye!>pt~98@3^^SA>gy2_0-DlvwQEmY+YKEXO92E=KQN&SY?L^2ujTC|hn7;X z?F%8Z@K}?bY>PZy6K(DZH$ulLHw5ohNL)hZ++bRe^_@ZDYzXxY3KpTaWhfeb%vSP? zGCyxVqJ7*m8wePN$O25)5oMM;vf?1z_T{R#0(n-0(gA4|bPcSGH=g&80hWYuu*FFZ&F|_V`CPu6f zA_pC&KGt;}b9C10?ep&KYD@A&qt;1jgnt!OZ~-E14(R}&C@)4>V=w83f4l6J&J7naC^3wrc(4zC^X%ccyb@tnw_Bq%DU>v() zSrLcUk9v3%tOMYu$yd`KWEvM9u8hQL(X){KZ(}wuI)vAaad4a6NlQ-rI5okauaI{cCkln>HX^2_E0H9Ir24Zmr43QK2g3WUg?~bnC$M<+kkZVg#G-7x2F=ON)ltDcW_2D z^PI;^4z&2-!n)wwF`bFWm6y0Rp#ibLwTHo}ZJ|V6=n9ym?$0gFC>(bHwii!4**CxuCPXTtIlUftEWRw_@XFSN*bB|{7q@4N}c&1`SWyP z>(CAc?u~o*w$=_t^Z~zeo&N#?$CN!wciM~pY68&^?#e*KwgoXe9mNva$Sj)5yg4x{ z3X2`3rjA2`Xb%l=*g){B(C?|xkAq5Fp?Ox^JJ$XR8%s;R*dqV5Ub7IgweJ7=6zQ1` zKG~noS5|Jq1Y~zQs=>;K!HtVTz>J71+9szl!`IxD{mAl7&JC9d4~9hQ*4Jf0162sx zi(woJ&zome3-=zmi6pZdd?7I0s6AD9cCyLEgv09}Av_Gn$Oc#q!D^(nAY~@X8IWhB zdG7s!O43OM{GI(jpUdH$ERF1zge#=0ERITADEb+Yx=xs)YlO`#kC$?#jBAJlG4sHl z6?}s(nf^JdBo)XdbhV!@E>ezxIU|rjZ3vBZ!DN&IWXH)J6uqOSgb(TdZXCaMs%)Vl z?qWZ$TDG9g;0ch+28@+mhXxOX_^IBXW{@6}P^Aq?+x=kw_r6?A|BT?d4tZ_BNeNT@ zX1^q*D<+1<;ClYNe;NDU8>dt9)2ti4n51Y!tkXlwTtr!5aB$4VU8$-#;<~rz55}i7 z%vbB(`mP}h(M2GI7tI#LRY1B=!chdD!QT!2k-^BB$s#fgbu4Dxhv7&xn)G2_t7}sd zr5;>vc%ClH-V3HE0kDY(>jmp(V$ScGosqyIfyBF~G8f8?Bv=+hULtKXAsDFXy|nmw0FIqPNs$66EApnd37HHrqYu(F zTDBcSw$U4sCos;MO>0%zk$ex?y^0sQw6x7_vhYmfG2UGdOjsvi^9o0LN^;!O+B>f* z^{FHB>S1rSBc*ACn6elygn?(y+^P~8p@~$7VV7RB;lvIJYJY)lgC*nWhSrdNH}ppa zW5Pxj(P|*!cW5Mp8Mje_TtSKy3*(0Aat_=7`6qI9&#rFBmtx!p`pJ8}6e;YaCeg7{ z!3+5~chnbOBEck6^`D!oOTcekh2_yXr#`B6fbPEAjuF|{LBP&MvBNdl`=Hnx_>;B+ zC?+j$$=m4?=x=<32^>7U?Nd|V;wqj2k*J8YeZ0X=6rADZG5!0jcaja*q@ERsQL~w$8`x61nVKgP)p|pX}cC zVLuZLfjU|+iy>pZh6l%HI!t1clj+m*(wb;Q4rFE~h`N1lY3Tt}+7Vo}CdCA|AQIv! z4G{R=BJTrf=97`TFC4n&AK>p_?fD?Mdxb2LGs7w6=9HSLjHkkbm^=iS!QT!2k>T-@ z)qE5_#(SKqI>G{IP-sb@>kNRN9gH;jIP>t5;!a6@0gxG@C(vv20>Mq)GBb6`Qty^a zo-<4Er02|S^d9F=;hF;Sf;I_CKiDBqqmLNVWiGIoY;nwxF;D6Lnsj{DAdd z#7+#KDn?6@{-qwPMrzhrj1iK3aQ{sdFLW;nNxJiP{4_}wAhR(*-Ruqs_ISJZw}m;w zL6@n@l_HsWp&Xi0(r<(P9GKJ;J^F&o?lPLxOafL2CG;RnX*$th=F_sbRb==1uE$OJ z%2$9r!uH2B4YfZ|%Lr#-#!U$jG<4hOIea_M;esI(b5^_*49nUPe^u*^q>>Q(b{eFJ zZ$nX&rA7?v@himp-b>|UZ?oF2r@gx#r10m>l^DG^(f8y`g$?B${7;FfheG4wgl_pwhhG4+-1{fF%yf$Z3!pT5hXqewQ zjBn}WXr?-SZm`kqC=1o*>!%+}iH}Q3Nztgx14v^HU!}&1+i{;W2i#mibGT*cS=a?6 zCAfQk+TAHrUob8`?SsE2wKT+{Fm|)Ge8)3k<4>ep)iXP|t^DFsc(eKgq^DhqLj%%s zH{^h8N^hU7bLy8qg&#UDHl<+-YY370D8GoRiN)KGu~D|TK2`Bx3L*Sk9*oTV%+2+# zQ~s&D?cpY^5J|_K=)6j6jE(DULGJ`3tM$|l2^9KhLtxdL=nkttGWffpKjPNkxjxjp z3Uy7E{Zq$HtfoS0dl_})el&+|t<}0cdjJhLUdBAySnKCkEZ?sBu;vEb!Wwa(mch*e z3RXKvd!fZKa|yjl>UV@4Qtd-T3IsIsl%EXSaqZX(Xe@!Wu+ejFP-oe-0XjAO`(Iz$ ze0nDWPpl-NZ0pijk#lE3e*S50{x4I%x62FRe!+OoSm0rXmzE1zy+NDt3m7-XkYXU` z`4^kAYcV8cLy{7G+Ld8vp(Q{L_e6b=r~!mhvHyFgP+T>1DoHY~_i@y8WC4)psUjck z2KG|d5;pCqCEMEI?(H_0In03u=bzLxtFclInC)LY~A3 zk(E%bDGeM1x1~y_3QfU%ExbHD)~H4{r0%&4(S`!`LZI4eP_I@x@NlkI-wZ`5;X zgng``Fj^|{a|XucQM!)?_6W$o%p#i;t^{r6U^@bx$|WMhH(?I;oH7nn^6nHRhJVd!3ltv=NV7xh8XyG0 z7%GL!7{e1VS~TVwW_q6~O11^vs9nDU5(pa3@dnfOVs#^rt+-U|RvreG2<&Vdu;$MI zbBL(-_q&AFX%{VPE0XGXFm@pl178j&XnO4j+kW0bs^h19S*kF2gCkJrMiM~;T&*XK z0jyBU;Qi3oZQeK!!O40V_0UbCK(qp%Osbs&qv%c0d6B<4yArV>FveJIMvs0xQ^0UQrqi> zv)Ta+NlZqjoGWRG^i?Ez&S-LnzVdGLW?B6_pYm{AGg{PjMZza&-cRp@HVkI(yPRQt zW(ciY_89LT%z@}Y{ap{)rM{`DsrT|r|3x_|eiMnWUGHBTXBjt_6P%{ZgF&X07b4|Y zj}57NNU-GIg{l~`qS^gRHbe4h*MuTt3KjCrHzk=+IZQfoXJ677vRDHyE$D&LK{Zp4kwZKz{OD(eOj2^H4} zT%A3^NJgU}?;@TXe~$|Qmldb%jd9AKFeZrT7y3Fq?0YkyV!ir9{C7FC_t2B7>T)SG zxWB-jNCFjwB3HFztsfFp6y)S#22oOKX@~fpRJdBHl7ubvKs84*kO}Go-jIyE6q5F{ zr0F6c5!WTV0xv}gr(R-SyDr1P&H7b)qTfyBQOxi z14Eho>tr=PtfB79fj&&A8=#&VtSF=s$PZ)~@Ro>017agK$r&bKAfR!peQN?zf~|x{ zpZE5_Kf^|)&k40zi&8K&G9G9me-@r3{Xl#NJnb5Q`4XlI-t!+WABd-qna#MI;aK9h z2{%_nM8ufN1s*~)@&$A70;e8DGK@8*o+vw6KIuh;BOlk+)$RBRKpSXW_x_L;Pd6o{ zp*R!lCQnL>fzVi+T?c4A!KJqMAWES~wnOJPe}W+M8|K?|d{nMWNUin$6hd95a0h9^ z5!79~_K6$&d5sbngh7X?-2g9keV9wcux&rTL#i+lC5RAU$koWCOVHsTMe-KZRp%^? zSt@tOCRW;lovGUt&WpTG+I~J8<<5W#fCl{*L=OtMJPNis`AMbYf!LWWl6hG4;co8n zDf3W}qIyEn2yf&;DMcxmt?MZB)LfOmJ{kuE1V~u<7G&8z=MTu>mdN-KQKF)+E)Qy+ zUMqELq3{U|yz9O+5(8~X`YEK2ZF!HhNYrVHZ0u+RO5jnftTWH2q;zy!YRnFQ2dq&O zus-pdzF*B7>*yAJbVj7dadpMUxSf?{O8u=|A(+VZF>Mp$Be{8NXlkCJ)?}bz1bRXq ztVgNj-4;c3#P;Ps`x>w|yEJNj3%f@m%vMj(q?XM6qdl`p$bgL0@!_;>d}ZJHUUiXN zmt^)o^_U6U{Ir__J55D4?R_jwJe$(yTjCnG!b*i1jLmj`U(#H0U2L`*cwndEk|2Hn z^*}M|f(|qiv~(>4_JO|9ukJEcGL!2Exflq`MASWbe9eVFK*aTH%d>6x@O+lwsrz`x zNQ;qz#W!jA52$do^`Q6kDwG(F;32;3o;k^9I12%#_Mqb$`a&kvQhoP2$Q3Fkv&zb5uJE!sG=>m=fA7Tg4Qrn+Ii@;$@xdI&d z@)Fe5vi0SJg3mKaD=U%JCQmU0dQ;u^m4hP7WialUn(6C)?>AOx%Pd0mWra%boXW@q`nW8E=b0YLf=g zJ2JFvg415o8fCH)w#fT_BSQj~tS%`%gh>npX<@UMLx5na$UfmdA2b=glWw1~(piNF zRf=2}sEfzf7ne1sxHHPA9kIRLVc-@dtpH{T=jvx%16>|z%i#5hM9q96>CWz#XmEp7 z#B`phv%f&;Ljfr5s4)=a{7k106QLZKFSfb7^g2mK>Di$Z1N{8Wr~rXYj6`)o%Yd1gCbX815h^z^}nJ2Wlj24-Ai zQ$(X;w(NGZ-f^_f3AHS)4?<+=N(34Ng#4Pxsk~%cQb$l;i7tn-L~(Q@LvbgHpkqSK z!4AmtDA~a6B4TY;9A#TcFxZ^AAiQg2$7K!IvAMDjV@q}*=Fx4(B%~h|V5abyDB@mG z41B!D1)WwmXyj;2Or**Sy>{N1|LjV(A`TFCHQ;(-z65JyAp6%2DV?P(IiKt?)ngGw zp$Z%KSGnE+yga8lL*44ZNTg1gaL_VYtA%)Ou>LIWuV3STvhbg=q+N3ni?WEjRbMv&Kf8;f@`Z%UaSS zu^2nDJcbcf9VL{&e&sW66m4L%y4s({jvS7L7=rfDcO9*eB_X)yUEs9EmZmMc8D}~6 zew9BpY(0ZBzw`vw0@AEFM1Deck;nkxWOTN;buCjmeK);H4aeURd@UJ%{J?ViX2R3A z=ZGfBvhe-`Q?-ew=lg4iQ+V}60ygbV_*^`p1V!md{$=a@sjKYKK4Don6vn_x3>4S} z+()rUQVZMUNnU&7h7M&U$SX+xNEyi+JE_V=GH=A@@u~XbXkgW)p*TZ7Bbepy_t|S# zg{8hyTLikp5aS$t>?axj z6=MVZA6<~))hvYA?u6vzmx|xsjZosA^L6eRR<9BXM}S0?(`^cnp}PuWv+*9FVx@is zs)<>^8k8%yQKPvl;6FKTQjaY~5%N3%OO3~i2Cl}c%*?fl-z`FMl9hQ_jg2`QEEZ`X z7(i|dw1e^0%`zImkv;WJoU(XM$do-U&8I`ilHfP39#r8FZ^nz9V%}ON=X+^TVe-c3&H60 z{JY{#!0HV0KfY`d$$;}%q4!9KyWH^wX6j^M+fc9ZCp4N~od-#`l1c8-P;$#6k0E?w zOxf8MgJJ;CT;?<0?-fTQVbH9O+iY&zy$kO~s6$#|h*d*E&YUZEahBw_&G;0~D1>ff zvu5|5A&hGzwd;U1CwdqLGuHvvWO5oBoUDJH3Qkq}0YENvhOyWxdUpfB_LoFn0siUPw4QZs)nyW>0TmNJw;5;RWLs;^zq9 z^6#~ieJ%t0fFB}5tlpTvw%3UY++VqnlI+N5sC7C3x9ot%(7*F-I#$m8@Ct|wJAn1X zfw9-s)?Bnj)K4%hY67PxGW5yRc|$!&?cnP+f(`m|uF?$DFPJ+m)^#Wm#Nmt$4(}~@ zB_W~g%xM_r(|5s57}o=N{6OrSs#S^WDUY3c2x0ei@@0tu=e@3V9Qe<=7vC{MSwkI=Ys+*b$?{|2L0@#!%c9(HIR01 z-MV!Kv#!TX`ynILJZjs5Vooo&V}h{g>tx#y*UneY$Un)4LTywF`gE}F)@aSLK}yg! zj=}Jn``2J)_hB$tJq(($0jYa8v=r33q@5&nW&jvwJUU*^j#iy}QhC-a7(0-V5>hQR zf%uV4Up>m=(@6{YveM&E*x_72(aoVYj1lfL*!{^1G8J2gcxme8Doo_uAXG39-6`pP zWuq&TA$9-~NO?0g-+(DhBQ++vQM1j=m{1@8iES8}FP*HoA7H2rU^E;F%lPXIUpL$n zs9~f0wHpF7lvpAw6nwx-5(cQ>v?(nO+jU;=fnj|2;5LGSZ)0YEcO3CWO4rJcZmdkc z4&|TvGlkrtC?s!@m5mOzAm#Ymp^8JE&=RASnS@|A=9MBq0pq_{MnYSig!QK2a>+YE z22JjL9W0fj4#T=-%?-Q_YWD%@HTRr%4>=^D0fib(zRNshNk4QGFijOXWJ2~BHHn^G z!t6-5ji~cYQc{xnD9mCz8q{?#DfTLo#tFX2-g#ECy36NdEpY>Ppj|@k#pu0-83#_v zUOX)u;~@QD=hIS-8dz|Z67NHcKsYVhVkliF{1Su%tPk!a0o{eg&3&^lV4Mt0aUB9v zq$?d5p4B=x(H&CI^OSL_e`Q}CyuqSzBe-j7cNsr*QFu<55dZdfffI2lgV0wmp}dJ) zRO{`B%gUouR50OjRJ}(Ek~+FPQFa;rbL{bX#{~HFkgWuD4GeTY{t13LGXi-2dMnGy z7uDiYmSuBAp85YPHak>)>X_OoKZpRVHCW)Ci@Ila)UOcBZLTl#dOrl7g!e920J% zmF>aPHqx!(8xabsQ5u3~Hax%hqWf>La6I0BPZ{{7((tc*XV*Rq)q7bw=C}+;K8S0cH!uNCit_7#sqZc~a*AFg&|pQ!Ex(8`r||F}~*S zcmNA#e(m`yc#f=I` z-lPda0!2d&C7D{_;@E}32cfHgE}%I_-V2%;ty_I@+RF(};z+j98G$X>+Er<{$p{6s zT+_8%rEtn85Fd4C|F&9bkJ!seBW`H%JMgIihDZb7I*)WZcvR2O_LKn5+Xk^*w`vQm z$~;ZG>&$^J;6u?EJd_3n0w6(yOg+=0P9c}6&rby}kHks;n6?I2)7J-oZJE(H<$;Dh zkF8$8rMyl@=IM!Y>mh9bBDLa@PU`NeHhH)NY$`W(xlXYQ&JHD+_j#wu>lnWmgWIT(AWAMakQ}GJ9?u#w9N|Zy9re z)-Gwlx$heABNE3V7gtpqd$-%NN`MERe2jo#x~6QWP+aNklWcf{H#ym24s zO9Lf~tYHezfz1Z%WPO^_jz$tvSj^r`9Lx!HA`(;zjBg!WAfhtrl9sjV3h0yi$jH881Z@%;;a?qeCEiVR@U7_~n0 z>&iWA6eMS#zf#uRB316#w|qUyoDOyei7#WkGO`u83MZNDa6b7Ld`P(XE)a28$vLI& zM&=MM|Mv`Ir4YG#zME<;{0!GT)TRc4kGR-gVQt{`G;gN>Tzcx9Y@S8@L#Qb{$cnkN z7WE)`u-kT+3w8+5BtWcNBWatEbY|Rf&q!!L$qfuQqgdtus61^ElI^f-VL#Ir)WB962WO)uy`dW=Mcnr)%;KxQ$eW8~ctFGk0hudI&B zaFm4T9l;pjB59(`&J=~SOMN+HC(FoH<;)^dl zal7jlDIbC@SQeM3)TLdud-LqcmX6Qa&Rhqh!jT`JYB#r9iQ4o!9u>6dOb{X|& zcBmZNxXx;|iN(nvWMpU>fc)GMOqf*tFy$B(&Md0a2|)uTOnqv4FtasyvlumSb~yG( z2RXJqC(9;GFH<8wiO53@6G;PxfJZ8C5Lp3$9Q3Yq1SE?{j93D{&LN1Bo1Jaz@{5+1 z@r8h&uOgIXE(x}iD@-J(Mzp0ic-;YAtuyVf&6rt-Bv#_yqD_1q(93i8r&{-RjOqj!3gQlFhx~(j-RUL-9<=d_?O7}TOE5Ow2fi1j?n+Ta z&O8p*RVYXVq_%wP(J24-cFu{A z$V@dWWIXPt$6uqN0-ro{sPW3b7d_RYC-aA`CqkKs7xLO7`di?bwTO-~82redkTKZy5n2oSU%&)*AdVDKu`1C4 zebXc@yb#>tq93``qGF`WxW|a9J6zwSdV&p5M|Q=P;Pjy!4GYo*V2QY2OhSUW?kJsg zma>vOpG4UCqUv2vO=wg>h7|(lf}JBGqcgg}U%=!+ay;*%BV3^Wu8=5zI#X|>aXNEB z(8jhat1yz4G&7LBuMZw!9Cf68wA9oU{_Z9qe5OlUj7G0ffq3Fu1b2$T=tza;AP|{K zV|fAbiBL$w*1PT3NwI1u-vL&)Gk}I2o50G+RahkJbSQo)hTm zai!rdv*D-8=#iNxM_mR4BJC)m&V3J}G>+}BxaF+rEU8~!I7oPFK&==>hIVnc8_K}L zk$YnON#r@kwd7E+Ll;v2Mv$d#cb>y>9GE0L)_P#OG(40;kw$Pk0>_|U3D4W7az&=G za26Hz{ALYv`vX7*qDKkDO%O7BG6V1%T4eVQg@eRyM zsG+G3cy=)&fL+$CM?Rbu64Hig_UBNjCrPH*L zIE21Ve_c4`F%O*)X&5^D$ifc)Ng4>Q(&{W0un$fSn7PpiwsAUCB#cb`YEfnkyc}zW z99JC||1;92AmpGY%&7k!Y`ECxW1N~PXw4*2*5ekHY>6}uWA!f~Yan$KvXI5FAzFdD zMAnx@9)8SCwnezFr*2X5LFW;KF*U>uGcm6{o73~m2li7y>DQ166;-6D(P5OGZFcn) zM)>^JOAR;nDIYC_zetdIZ}*bRd@k@#4D6dj4h){Tr4KjTwG8Z$M-xk=qRf_ibRR5; zZ?b`_4$MeXt^71PnecWqM1(0!>hJ4d0x|(RsjJ`(8%VomECb601mOjQ<#P^^i{YCW ziJ}w0^~5+wsf>WsP%?zAD(7ywNhKtr{Y(YtZ#{@_cH)26H%tma29YHZi${+F6B-IoS?34t4K$B140sN;poFOdbi?)wLnaM_d*X@6V!@inOxuZ*b4a<6!uW-ZI>G z?I7AjNj-WrN^cvshFajN1#tOzi>*+ih`OexK9uS3UlLu$#|dgVv&tFrWk(<%jhLhJ z0%=X3BX@!bj8`GwAf|~F?>`LNbPI!LrNQn}pL)2%c!8^t;`Fc1>$3)Pw5`H9)L_i2 znZ9^^2V_Uac?70&D@;pBheklPxg25iHZ18uL+H_NK;;!F%$pY9q%CWLJAj1tEnAIM zXBwTFso+qeE)w`$7db@aMAu?Pm>_-s1~C*OM_P3FX*wUY0tRT9-}5Hawh}ITCrPM; zHsdP>v+X}l%umdORp%Z)PwY=dEI>K9ug-Y0uhN$KFJbACV8}Gwj=VzKJb7DuV9^=3 zbK=GknETX(p06@wj&z8~zKkj^QXrFzfUa`#ha;IYfsxGS@kga9(cFdmA6km@@=l4! zh$51qllt?ecK3&>!*Z)|3JTN?^ju;LJ6)p)>K4n;Gyq2$WbJWCk6Avq8DE{n4hOHk zgJ|P^!JqJd6xaW(nC;kDst#crau$Ci3NvmGgw|Q3QX{;D%3H`hW|sB02H?(#)^Fsw7#PR^4uZI(~TF+p;h7-nK4k^_iaa}kuPRDzkgAsS~QiQ+B@Nve0h^9}8EUqt=H<38m4wpN_}y2%r84^?z5kaG_vgCs`H277h{=|ETD0WB4ziTe%bvO=0r^ z@JTyWj$KwSK_6TP4rBuwgRfrE3XVYTgz$cy?6N_{N@a5Sbyl7E#ih9M#fnJuq2AH$ z*=c+hq#saJgka^AB$)|M+I1izNfaND1tXmRiQ4wBA0Clh{Lh!HmBg6ov}lkKDk#7M zBMq`Yf}df_jRQqKlq5WZhLHTuIr-(jooV%D9?fyw1PqJIa2r5=aR1a^qVp3t`J!P@e7n36YJyU9Ui77HwlQc`=2fR2K~|P-t6FyNr}jgMyzS48_C6DZ z(Q!S?q5gXZ_~S8zY&69n18z*z&3X?XvCj=dZFTYH`&<0_yrL#4JOcL#ZtJ;Zvyrun ze;@ymEe}2*AXa6f%2XYd2a(Y)G-+Tu@TNbP5g$a*iY$frYA6V!)>)3^tzQL&0TfOG z&WnK6kew9q^vv&J0-RDDyFrf9C2R;WaAh*QI?%TR4i>d|9s6bEdZ7FoAk~D=*kSPI zUH7#yubQ5@5v}eE9I<>XGM*!}* zaIHaE(4}&POQK2q?eD$VADArSZSv&PN|e&f=H};6OZ0xREq4s`_m5CQV(bjCfHZ3? zz=`dMZuzl)ub|zv_qwlWOQc}$(T0TxgB*}hTL>7K3%|g|a77VA=?97yy4ip(yPIyX z`i@Ml9$G{CW3q?sLP3OlV1ad&h05f8B_J#*xR4`vrn^5nE+NU>Mb|j#pqkP~Mge@% zyTmozE~Ut6Dq6Yc%E_0EjB3Ul032#GbK@Ab$=}}oYz2*R0P9m0t?H;V^d2({&PGogf^(>zvc5tV>cKC^7j)89FT^5yw4 z=bPO#nRKQTEYT};R>|@S%SR#dvc&Ap=yX7qMj1O19VvlO|@1pAp z^GCw36{P}vS~H+9*}(j;4l+G~+dEZ1tJ&C)+Waz_DphO~zU{z31$g4Q!bOby-O&V2 z5$~+^h07ck5{?=bGiK5lPim5o)llsmi&U@#{oixcSy<@zzLPbw@Pr|T2>**P%2@1U zC`EB4A_XaZow^s(O2jZ0Rt3g z_VUBT{?Anei$7|R5fDN+Ck@8QsBk|wL)8OT(Er0(Sv4EJsk=6pD*7jg8|}t&J=VOLBSJ($nQ{v14lA@rQ@egZIcwh zY6_xVU!!njOpuNrs!yfWC14pnK7dly$Nhm%clfTpI{&5cx0gmC~n*!Dw41h zh-`nI@Jm$iWJ5lN&02dHBg+pVMTd82iLaddpC!J(fR5hVB00GI%|WiU0sMC)O3Rf* zj$^fYU8SVOCXC*Wy~qNR&^nFw)c_}f841n%V?TeEPB{}z!PdwM== zW1c;fmwOdZgU?y9yK8^*q-jfL{4f28l!7`;(tl?7a>q)J7g2_~= z2TcJ``fOBm7rq)NHMJ#TD*8$0_d|1QO~#EJPU0^j*ay0NWVP40N+_+#+PG!QB~mvb zP_gt>eO}|d$3i4jR8$IF%PygV}Nqlkg5k&w!s=qZc9)?-#BU1 z`dr21@@uGz7P&;!9yu}OE4m;cVC5F-)@@C|zwYvzgT7mb*k|e>ycO_qZVS2>fsjNr zD**;5L?2}?iTwmS9)=fg24`J#;rHmDk=*<{@hV)%n=fXEwA z`7@<3p%FruWFU=P!4*MVw`$%(s|(aneP_NpQYQ%7jlebnTcQ4ZB)lr4UIn|p2A4$m zA&DxlM~btIwp^$S3T-Lf?{5l6uklCnMO2Vx$(DbzLB{_9<7gbUCf+sGSBPvG|9EA! z4(GxO}36r7>iLdKE|(eUztyDZlwQfHE3+vkifQ)*K6 z=bY&y*gSR`++vQA!@UwSSL-WGy3U#G@n(4zC~e2LMPsV$qP?HH&_jc4r(>mB(f1us zT1jrjVt2n)j~#INIBn9M?au@$8SYOTf9oW;b22}mr?P13iC7UU0j2JGA+lYZQ-=NK zYlew4sfZr1JL>B~HpOg!*QOq}c7dZyvMojNwD-qm&`cY>e{yg&T`59|zxrZQ*Lh!p zT>gCJ#l0K6Mq0_|@_$cqLH6xF{8G|>K~q>lW0xl&l-NL9MSW-kP8L(|_XiaRpadX2 zrQ7J^99zI}1C&MF$3z!=OHk%^b|@pClbD30eMo4KNTVzzkzF6efOBCg41$bCmkF5c zVQ$%ZWgQtsQa%ppMJ9r2ETn2~vKr_&gohi&PGDCsiWLKxh9k>rrJxZvfU!eScyVn2 z`I>d}ar-P5$dWl_QTYr42ynWhRo|#^EdL6T{q;1R0S3! zh2)#g`>EaI@@6&7Rx_zFTMl?REAjYqHD~Q(<@nL&MNw%06-l*BbRz_zL^SllYYu{T zxQFT?Fpaw`M!0P~rosc~|K03cs}YUEFf>%z6d6BFgCRg?E3I zwJ!jtp_Xj%d=%_ai1ry|v$kX#k=nNjOZP0#aT8iPV92-DzjyNzq;6`+M!$|I5ffaz z1l~+&04t+D9>@<=M(r}%w;ElkN6AF|kC*HMmab*e;Rg5YXg^O?`fyNipXeKal1>~7GAKRbd z50MyVf~t`F45k(WaheB@vZtDb;+YuV^#-ak`eQQ@BrCuXs2+{67?0#G22m{rbmsjk zC3R%r#Of>dwd=!2kbj>YKN79(;CtC7TZ7!PFwGmA6uS_T2(hkaw6GG<0fW>R$e?2a z=NXAj9f$WbQ1Q@YhT*E!CaAdn^UAUJe67x@%Sv0Jgl=z^VjCMz)#aOp=V4N=NE2YY9hJ@ng zrf1@XViPBIK~YFH#UGvO|JVxnjgEEDY)leis6V?j!gGe6xo|H_rMDQ<>fH~ALaub5 z1MW2XNYN3WPT4)B`)(XklGRAzOMzQOqjc|i4~V=l@mloh6!d1)6G{gGENNz0yy=t* zD>q(ulr=b8mz(16u01|wVQ2XcH(_DnM#3k+VNB>yur~9uM293s+?egx5E3tTgqTLc20epC7dTc+y|?Eb9nV{+59NSc<_&Q^SXIwM0TD zz)^&OHAW`(WZeGV^Rar*1AWTWQ4K}r$f1LUrjk;FoGf2w6HpIiiDb&ZWy_XPeU^0g z(E*2zO`J8{vNVUF20Zr!&RWLcCdM&1hf$|@a(TGCd-9a}FCoD3mvwUTPTJ-imw!41 z0CfW}r&D}+OJ8S?{BtQ79zmlA`8T_r$1ff@2hf9r9$)}4#?D}zXEv<8A_yme+F%_# z5Y3aH;tPC)J-X~Y8gD|L1Vjh{?0BtI_c(Qa^Vnst(BKLWgK+W*Ai$gIGVBj6^nPPu zX>tw0+F*mJITopX&-KZ~BZm(e+iM`oUD2`)L1NC~k647(+%|b!fIu0#TY|qNL4gdL zcWC3IE$8h~0%b^80|rG3+PLJJLCK3)*8rmn1Ke;lepS+Q+l89NmC}&S_PWiNd@Tba zSUXH{4BNEn|Gp#Ny?h`_*#h6h$v%=qZvbK2aAWhKsCpf4ppvq_G((0O6-l5|)JX!J zbd=S3nUqOZ=Xet~LM0!PF;*C?@jSEI<%ot>ekG3TH3%a$DmIP)wQ}ybN%8^Ni%fC? z!tIS=tBa<9;Xt{AWFug-3BWU`_yvc7={1=6t7O;h^R9Dp{I=UjnIA$g{%4^WPs|R#-xl>g&t3Y4NZL zaIeCXBqcFiK=kSGuH=ICAagFL2V9gyJ>ZF~Yv@5ElwA}o3u5Db6hPE8R!@znm~4c> z0~J#W%pd4@J0mPq? zhpl0QgI{#Ou`>o4BYX3FUyPo^uqvJZUTEWBqL4UZdj~QDVh)h70@lwcNI%sx)*A^T zc|;+h2WCRna0c6zXa4JKFbtT(soc}Cu{!(~BToZxI>`7P+#!uB7WoEgswaQYBQL@g zz^TOJR><4Ks@~AVt#ccNz4o0q`!1|q*1Y(nfRZVJ(@8Q%b3iz)S)xJYXI2SvUWqeCZ0-35lX zCQLEpzu@^vK2BPcv-+~2JOs1-B=G>HK`xFvE8p&}zfbM&nANNUt4MCrD7*^-dS_ru zoxVuD2x>$j5)0g=f9dWOYqbPTI&c45K z$!Y%dTT3OCFLKDU#a%^;~8>8c(P? z!Zodgy#v`QP;fZ9Q;QFcH$e`WV|R+;1PXVCI!W#xo)SpF=UGp-R!EhZaB5U{S&6aD zgl|J>!*-;9zXHt~ibZJ;cwX;`fOIOm*61oDoI zUM?_He2!XNcJRieb~xQ(J8Sm>{@i?jIK>)0hoB8kg#`sZ6$yYD$?Og+{AEJUQ7Lry zV8|OzMNb&bh=iQ6U-7Kr2Yb>a!|X(FDL=U*|MALM`@`JNC79zF&01nMz%=<86i=)! z_M2bGtY?rd(g)~~W3!(Y0ke-hRwDz_gON}vdR%EnGC=-vj6EW>9ph>?h1La*_B(+@ z6oOM4(*#wY4&j4JGRx<(n?KN}8k2-ETO5SFo{Tg|2If%BNgv%%OLPM^4PYk1=z+dv zE9;BpD(|FRe)tfNs=M z_+(rKmq7!%X#5q7fDf$iF*3~q^7iLTZS0TR!0!yZP`2(_abrPsl0Fh*45^p!F&D7f zI{lzVxwqM@z`DPHkr{PEqpz4uPRRELRWvn70;;A_Bh>mCg45jJrB#2y;fwN_p_3aI z@lGE892_k9y4h4w6nuO%7azt5Qy+Xual3g@J^KsTnwL9v{2+A6t^_jX@;zMM8_}6X ziL`r|;0Uyp!4{0F1e_vb?bW|#CqMp0>C!mOlDt1ZD@Y(#0i`^xc_H6NTpNb4+#+K( zy3_O=yzq>(T49Eu994aP4&wB$35W{%Y8v_?THAe?EWor2Bs7)HhVa{rOFkn4z`^HeTD>aAOUC`;j#jg{-jmi zlxouA0nuW-Dxl|ZFc8|1Tbt2hJi7X+peBz^kI$>UZPTV zNGOKGZ_63#<0pp-J6RJ!K5}^`n>C!F>4v9J3cZ-b>DfVJx+nq1b;E;>=(Knyh9YIU z*Q2Z@+?d1za=ji+sEa`0p*&&vPmy5L7OhzzWSamSeK{vn`5khK#c%_o;maNB2nVpz zfe0xXirRxC-Ty-W~7g`Or_IPS4@Mzu6jwf$E7^+AH*$^$?l2K)63H9I}_BX_w>~p(Ow0Xf{!eL(}7T z_0K{7D|~x*gsKJ588nd4XN>noG)~f_Et(33CDY%xFByXyZELr+Vwx8XxFL(dBv{D1 zXVh*(?n(7ngj?WCPBZzq&g544{M!5t2SPQ z&WO@@>>XkVfVh9!A41cM@AY+VT!S`Rbe{u2(xrDs09Xkr@`yFDt3m@-G}83%mmGwd z<+uW@OH@c81dPzg&HX=!99gzeTQh#$x(0KEqjKR3H#7k~>lCkPfwUQ)gLpO|Lvq{e zLH}nFsRQrzRWxe_&;-pH9z8O`dF=j&sLUEKMy901oFew5_GH%}b)2{=A|B)yn zLp|M;hBOz>8x=F&LNbSGVKWr^zts@LBfcMW5}y#kAT(2qJbrO00yG$O<)ZClJs&^3 zb?g7W4L+p?C2vDMlJ~3KKOxVLA}0B9UqJK3$^|Vrfh+ z-8|zYPyG8XWK1j!!q03VD{U1Z}28w_1OGI+9oq&Q3sZBqN#j6qAM*FIlo`%a-?@ zlg@g*G@Wp6?bWU5T{{u!D<`%9`OsAP19l>2oD7FKm?eBFtoMTm(zyL9hK7ba;WmlT zkURy`)~+w!wb8mVd3Y+n=G$af+l}6~%a`Bp+>?Sa!h-iYZzrg#sOalI?_BZv)I3p9 zuZoI_mx$YHK6S~iW=NS;EEWX)+IkGe%T1y*hl$?kl06^tW#Iv_72xGf8u*!zP-PmG zxOM4kC>g3T2L*4yrydNxh}n3CgDU1~pMX90V#T)Lr#VP9Y5>}t^5Sx@TYXvYS!rof z#)apK5QO12&yHA*xZS)3g0IORs&1u-@SP8;+`kBB|LL3(`Zj6OE-+Q53>v8-1*`7J@CY_2K={UqywhLY7iHZ3fJ9g|8 zcdqTce~y0}7-e7i*0JTSLMIxXWx=o3AW-;vefnruS@1zn5J*f?pDUUQ@Xb|Lu3V|2 z@(J!dl~e`&e0_StJ%~msvsbLU@wL@I1)7i9f`Y#CU-M-fqa&*?RP|bkm~t>a`GCv` z3zBoR5jVRh58Rv=kfOy-AVMz7L3M4xKH0Ud5oe$I2Bw!yqRn~5^N|H(Pa7OVumrpm<_}sp|w}-$W4dEfwq}X(F`jL#3&`hE8!aNC6%1$7+$b)+(2IF*kjo_5s+|zYCIcHzqeiVUfuQ4la;>60# zvum#kg{;lHnKyF=jO1Nz-5L70e0}742H(ufO^`+JT)6zzO}^+&$@~eKs55=R9A9&X=-K z_3Y)#sX&=33*XabxO8bY z=h6?SVW%$@5fx=ocWQ`m^5s9=Q5Sliy8XNelJw+4`d7!i26gW`8vIQ%ulA;V;z>%$3X0MUlp{>&FhnQ8o$QZYXzk6Fj;B&%bR=)1O>4f zz(G`l*gOSF-{ISvpI-<+Z}L9;Gt~%RgN|Zv`V#VZ7B_+SAVja^=H*?budh!xK#u2S zy9Nh2+=aCSo)~Ye&|HQBP{eNs82j)-@qdi;?^Po}M3UY7aM8pp67=u_TZA8zX zuX_J6xb)L*p(Gi6rJc2%V?S}??c29Os+a8inY`jJl`DS)H2k_kb>h#(qQCik`R8c| znz=&{hNQn)j6dW91dUzr~P0UOoCpgc|Z^_$9~BTlbGYo;CYN zr1;}c8O)=J=_D#s{T4w}w>z`qc`MJngf?wghIM}XEH$VASHLVxBK9m zJ+HB;>B3`7C*(s)Ws7;DEvG5pGvD^^uIU#Cc*Jf4cC0W0whNuv08q4j8Q(F&HFgSLrJ zQ(-~RZuUkFYC<189olz0-=;5Zs!U5udx`l<=|I<7A-BzgKiy*0@D^CTN|`h zu9iqF)+_w^kn0^O$w@r(6UI2MPdc^26f-u-I0V3BI0qmBACmGd$jF6^3*RTcf=sUH z;!-J47fG_{_^ZYs%c9oHugLVl9_KU9`f3O-I#`~r=REfaH;T%XfLcw-8a3TZ+4dVklVF*4w^?* z#9s7*+ZKiWE~o@6(#&*D@wbujGx+l!4Iv%{d?;b+|wdcQVv|* zpOl9rXr>uRgsmXuoGK}j<4Enk2outq=+#PZ#{XG+5ebg2zW$l#sc;}XslRsV99Vl_ zzxc%VIy94F6roV#Jg)RbDwNsRyc_B36EC3UA8fgPE?#l!(8aSfShvoq=!$mOz2|MX ze=2S-3p2%sk%jaTjooj7YoNq7ISBRpQ%!YvinLW@K;h%{5ANK0iyToF9TJf^pbOii zV&da{0gW$%?z(Dr51R45%c83+;Fr84Ye8)EtzU0~;Za5cLI}tng3Rdh{sa^4Q#j_| zO0O!uh<`<&4OHCiSCHX}17Cd;pSo--vU@5MB46Zg#a*gvYC029+3XX_8eLp#2t(cg zo%5hOt-(XuZMd`&jW0Dbs=lq#y0B!vhK9zz&aEmcIsS7rtIY;F%uw#d7`9*pBG~{+ znCE!aLlCN>(hD;B6j0cQRJ%bcZQa>J>+ub6l)%-HeH+$(-t`EDSvAtU-5qs}05a*q z#bJqn6Y{eJv&`+dS@A*E1B9d=PF8f6tphTC5}el}H!P$YY$5Poem)NdN5s&$i_IWQ z;ev`JkwnzjLEug=!Wypmd>=%8+lp{fDx~*rD}C`IS9QuJZqOaJt<4}-YSP=D97kA| zK&B~mkEM+dQ&e#4h%-}Nvt~_MFcqROJ?m*bZq^&|_>TK)C^KqYs(MV}mUlV#4xC-Q zcDpuhw12t9)6(_Mf!6Aj8sBB+Xe|xP$H1Q%lvv+wvAvE~_670jUkh+bakuk|gdN;- z5m=dh@p5)ml-^}sl%^wb&Gh!1=`6bAQWa9{DN20}+BHb8swy2J5gL4*){1#Dq`t0* zNKBOcf9+jcSkrg5rQb|Xr*#erwkSeGXWCjFE{#@b0!llgj9REV>KH(=P)L<11}GG{ ze$&n=m7AT4APL0kP^1I|A|iKBiwQ~~C>lgAX+eSxHz9ze1UPG>iQ!=$&cpf6JeVgh zko@zz?7jBdYwt}?`cC1Q@$vDU`}cS9PH)R$=d2h)TjE5_8(Q5j0MvdLqgnh=GCGpu z5VJan<>Tgtj6*m0#1r%(eAGIYe)c1D_&wm=V+2D-n~;>&paZ&7NzFrO7&~xk`8YRa z`%IUZ(HTO861M{bHh_djztWY}P9KR<97#huMuycrReII#cim`&l|(k3Xc~!VnwQvn z%*>3U2g>FIDzZDf72W8Qm$56$*%+q%&{DFM@WFI+1F z{}+QI<{A%81PUfjpsjcy9v=^sKRi9F-5MSed|7S@$$q!_ljnTr_fVFgY_^zu7xO49s~81#$JU zJTWn`2gWrCqoXfJ$*`0*#6buWNAJ-XaIQ8y<)8jz-curw`+D2@*;U8x+___SI)CuOci$1Wa+kMv*DiAqUx!|S3lPL!1Hfx{nSy30XwCzwqKBhYiHH_U zBPV2xkouJrb`FjSl~=TCfkX{~@tzMd-d@R_+LgDv0~$c)!K?8}!Q{h*gNAd$PB)5L zDukHUDMhc9%toqU0XMTRp9{_Jx@F6jW$JO{U`25?&Gg&M&T7hdJkYjoW>7U_$DIOk z(er(N;R#%mq+myY4np1cT%tgX!JC|7+A#gxgc#;}BwKKu!_9gPmBWo~RcaS}(-F*` zXZddgn7RMKrPOC6aR@hRF66mkK@nquyLwb1WP#`c8?P*LX+MBwAZ4;;iTab3V0vT4Q z<3$!s-$AKu+O}&8V;_B9HJH6NU>jL^OU>!nF8x!y*Lv32Y94~92JrA6Zvr#FuOW5g zEhG|_2#6=B7$2N_l$Vbd1dh-gon)eX>;!pFhf3%h#>85JWl(1y9Q_2tsjVY2}Zk? zC$IRFc_CHa6Vu+o+w1FV1KH?d{uV5pR1BJ@I=8ePwHjyE(_2QPt7}uk&?ZyX>)>Lu z$347Xsn16GSF9x|9H;_%qP*Q}kevBKs7RiCq-kw!<;rUx{mtQiz;Ft3WCCUsMspE# z9(K~Nn29#Wc*XL`+@3Db*>(SDUJle1DA~Q{&gvKW(C;*5ia_2(cE%wq%WV_^((hG} zfU88Uwf%ELtKuP?wITyWH&VdY^z>?H_CV2(=ZcDo+H64&Avc@(*pOR_*BJ*%>Fb}s z=^#hcN*+>{hza9iKbm>rb!P5$kamUe{RFn&mu-VK2Ff$4h8)n*B(p3hnulg=mJ*5{ zz@~d`$S*JjuY_3@P1+7BleCn8U)zFe?xu)Xn*8pEdsClLHa5Fb|Ls?3jZsg|9zs2^ zT5AL0^%WVYCLNG(9V|irm!@1E2f zWo8D58~l%5+Ns4-80x!VRzum6!b1DKg+6DPDjc+s3I_qQS^;vLrk0N)^iN`<-Fdt1 zS*&%niSR8FYH#3uH-3%25r2RftK_XUzdR5%pm~)2azO6uscaQbwE!Ep_l5p`VFPw~I%M*H;}ZB?}(!EZJ< zpJb|#p>x4(EmnKF2T-`wDCP-^F$%XI+`{D5rAT8Y;$(L(<^W09`doP%&gIe97Zor| z{%x%|4K|RY$r}pVs<0*;4YKc_)-w>PHSAlo1Q&y|{Y5@tN@HevT3Tt}CCBUIDL`u6 z#wB)MuOe=_@vvzEt3Eh@>a!=W_^ea^95C2NlY?CSc6p7LD&;qF;WIyOgfC>@PXWXi zM{bYU;)X%;M{~pP|FJOQ((a$qU&Y_%z2QHM&wenhh2?_(UXG&5)Ki(&FgwN3+R;Fj zsg$Z;`mf8WP}@fgou-UM9RY$qyE0K>ijF+UiVsVWferWytbg*OH-~l z8YUsnNJ?2H%Pko7^~2!yyMo_Zv*yObDny&YCd0zj zyBS3xm0v(+PO}yyZGgajs1nAcWF3Wwl3-I5q*C^5#+3Iy8b*|;!3;7&+_y)0Rjm8n zIp}9O8Y+(Va4K~#j5}Z#^QK{=e6A11EjR3&3415hwUj$(41{GX4(YuiR<)l2AeE>Y zdlms!3SIubQG)2aW&0*^p+9Vo}qS}vLy%k>tnJNDC6u@=gkd(qzB)b5m( z<%Cv1VlCQ{W&P_nM>3cUU%qM~gZ&O2-p_XS{}#{G-$}tHGIhP<{{=xQc!{QrRBP5+ zU<7;?24_CNAUdf_Ywr<8F?qIP0Q*3Bx}EpwajwBSHG3H^RSv4`>0o67V&0uf;F!I&gTFz5#?inS2d2s3R zz21QSaW@(|(57{5& z&kGM_0a{G{u%u=Y%7T5RPP1CktR^GJ34+{sKS2w3O_P0t{eRp z#*b1Ofz!UQE$k`zhm-eOYb*d`EHw}w%HnjgxcfX`dL{}e?*MijKk%WILH&v@t1 zE~OI!04B_caG(-3S6;SPYI zDRUjVolO)JAMIrWVkENVQFW|7VN0PF;Lg74(GTKahzHHl>A2T3EsrQE41Tvj93T8R z>GHcky8iHE92I5Q_<(0psH{iBiBv))dfiaF+Pm%V1hz!-DEugg(;;%ys{52g*h z9AAfuNJ-YOMF-8p6?7);ky-`wMh4OH&kTfGP>YU%Rvd4bvLBok2P5cZkkJO!Sl*}! z3JU6RR%tycvrxV2asvgF;7tCabRy9P*tt4nM*R?Pvf8JS+K^HljnQ2xB)I@5cv?dJ zE9PGB_08KgDmj;;*IMSd2aZPvS`P71-i&D_>yYrU@1MexPD1F#peUDE%4=2xi7&u* zG_;9!?AWmg>$-n##VfI0Ti}~)_Z6Mp6Wa1H4A1I7OOy(!#Hvw>;QBiDqUVdR4kF$p z08MZp0B;ohF2%m^blpp86F@@bHU2FYJaP(^aHP;SQoeZ-zhg~7GY?r#?nZ+nJGoFm9NqnEgCD81G>XPg?jZ3Ce88M0 z!yS+^ahMo#qRXK+9=hEH9zaTbHA*D!<*}UEB&z|Z&xA*@P}zgMg$Avwa0AiVlsH~y zWz{}DeFXD@C$9j2_MoaDsTNfw1;1pIfuXpnuR(vr;tSW@F@oPFXE1_bZLCJ1asxK{ za=;2nfev8#k-QIM*7QkGqRs zi8`0zw2mM(UI$8Q5RuMMb0>XY6zLGnX7r(>#_6H37hv1I5*)2qJp}rWjzI{!5-okH zLmge}^T5TdtZBY}=?cr$nUqfO3`Ci17awlI9iQu|E~VbJ-IRyK9IVulw2RSi3~%Ps z$?!q4srZwd=dNI+_IYTHE$a7b*|GbuGi4fV2+P-0kYcZ>#I_; zu(c-J0)~WaVTJb-fKWeZ6^2e*)h7`>UX} z!njlm{`7T80TGcO`i;|@$k3_Mg7PiU>(s$Mk*z`4YK9mm2@Q7YNYWAiE7JtvX&JIl zAZ)Kl6sZXbP%hU|+J`tBa#x5JGe(ZUIZ~1d5z``M-aL0ReH>A^l|U@R0nFasSM0YL zqixxU5f=|*25B_%Y+rlko=3W?eYQ3CLlJo-(htyKBK5|pw6sqrWcmj;VaFShM=O}5 zWdK3u>j#-A@f5gRH{ni^Fib|n5E&8?&QossUyiRbip}-^Xkv15(L4uk_>cI7M#PWd zUrqiW68A>8?AdetyQ>YJKlHP``oGn6JYVVOD-Cz_uVg>ZkH_=l@%(rgp8@*wb2mP3 gMgEu8qkHy+ps|wLk^YLq^fp^Jd%6|=@!((o2bPl@TL1t6 literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/hero-transparent.png b/docs/assets/images/mii/hero-transparent.png new file mode 100755 index 0000000000000000000000000000000000000000..fd758e074883ba33ae8fffc90d3188bd246a157f GIT binary patch literal 285831 zcmeFZYh06Owm#hXw^OIB)}lq_Xs6WL3Su!02O+TCRZ@$}ASNV)LnT0z2sFkJAO!Z* zsRvAgqJW%|sZ`ONFhDp5szQ)cG6+c^KvIbi36KoNBtkg6_d`3gXZC(Sy&v9B|Nh#a zHInDKpS7;F*0rv+9lEir#D6>=*gZ`HL*e%06)8 z|Ns8^HgL@lzrtJ7{?c*nlOu)y{_pCxU5EeIr#C$FQfK+{{8>@boHD6d)WVL*(3ko zVeovPwPUGfm6x}u&s){!&0N7RjOH!Iua?YY0kA#8nzm>nzAk)TeG>qE*)ci3Q1{+B zbTRm&=POLxB0JM0?_GyFxlS9tr+vQQ{i+b`+6rk`PdH6)zmD*4H}k1ic6U@=tD@!x zUwiHcfq2oA|787JZ5>Xz*LDH?_asSoo)AopwUd+bUbwvWfE9AAGF#O~+IJh}*(HBx z{pTu8Qd9Vx0EOGKBRVk&iW!Y|V@U3G=qUReKLVGPN1S*ouL_6R5{UcY#xnZ1FFfqn zvH6$GhZ|v&nKu{yIcYzTT04A@PMWAAFI{&X9{)LWfz~~^&nHZ{$TIv;^ojhz$DVz5|4aJ-2G~UL zpmW6nJ#EI;&-0xrY1kK7qQ7;j=Fd47$?|*G=Yk5Ko4&OEuB5%7!^0q|c~Xs&gdrB} zC*j+STc3BR$gg3V#oU_VUaJSk&a5nZ)U~d3g=e%RgPSfk(Jd#^g z__24!OqexdUq7;AxVx|D(w9Bq7|28wkIff8=^@9wl$aRIm-on@b80#JoxdQM1A2Wc?F|X ziO0xG+3TCO`Q5oSnYTxSHeg2sZ%w?%S~BVj`ZMnkq*jOI`M~e@(%bA9w`Y|mq?6Wn z6~~{R+sGQ(%)zHtbS)DSlT?`iQ&exXV9IV!HSxkT@7`FR0HFFCeBTJ64!p9=>= zzSC(>StbumK9hR#m49=X{lQiTyhs7TqB>w(e$s1>YmpZ&-vscVPAXeY$ zLE3r$vcF9BjNWE8pSlW8QrPyNM;x|eB@a&S6Vc)mrsG&8?f)f{B>uj`a%w+RhEoN2!|&mnCRZ(?d12y>v#Y8woV7P$spw1TTF0*dvCAVM^XH zdX)rk=y)6$va!MSYn5NIlPr@> z1j*mqfoGa!0A8FscCVgrEC6~(V@r5=Pdn{x!r9`eetJQ_AwbP|kq7sUI(sO@Qd&+5 zJzLmT;-o+XJTZI(@?So}MAOmCYTNVbw)BuYobbja7F};T0nRT*lk&Q#v|u^#P_>|G zGQbtwE{{iT-?&kBtEC&CQOrc`JTqUPkQK5N$VrKH7nRc>l)%KRh1q8 zKo<2^WbKxWk^O;RR?()MklqIyqU56*(UpK2D;p_vT{F0o4t2mfojJxvNewu`k(cl8 zIvn$@wCRsn$tNsMy69ui*5Q1_{3ohtiGTiMQ~htzu;>NFa4&e)Sv&cIkS~t`HwKC}P;P-?hm)lcvguXh)$R}Al^~Sz%$@aqMe%Xz*rAW3<_>6vNg_-vq z@Hd2?4ogA*>sAOsPXmKD@2RcrPy}iV_Rl?3kTZry5@{br3ZrijBw@=^>W444Y8DqC zbp6BBn{kL2QaP`rQ?TzbLDCszc^jMcawE@wyd^F(-LtkfhHw~NEPwhN;%nPO2rl8@`!eOZtTiul&;WMk(_3y+)q3@R8-rPC)*LEOuj~3lifk@wOb>X z7SHpjcLVcExKDb8;(WVj`2RT zwLSVba4#I>ZU1@ZE8(6x%X=x7??ff`bo7B&A2Ap{o3Q+7nQqF2y zhm*Zr((j#N^~E`;H9<#)aU|s&-GGIU9D% zrvXpIakCqh)H^ziWv-I9i~RTdnLC)oE;vn^agtc|v8Vl*OfXY5UGuRgEvnn6SNi#m z?)&*Q;RpOU)HAx%^}9V-gsFlISDyBZnlLxjdFcpet_{W@GVv9pk_CbKb&qv|&gV9y zo*AmI;moCj9Qt+j%7A}W4RXi)JGWCu^^sLaLtpLn?C8#4FpDOfhO$}v@oH0wrskF@ zg!f+Oo^PB?Re|+UtMrY^eT8FXv4ES5i$b2W3k4Sqc&Uz#zH(Wpt71qu!pn z2`0@QKZPhroo|kGd3-uds<9r!r=_(^7oHeK5+K8t@I_VjmaddwX#@RZ?`{!SO}a$- zSbp+E=Ou>AkCP!v%-V6`)p7xH^~A2m0il`ecjoKADHYX+%kBG@V+J z)23r=m{3~##rVDv8R@1{Pp=HPJlolft)wvysQOPSS@~y51L{b@ zw>wZ0>?oY=lZVCG3S$Wdtj=A!<8UnQGP&UNSHpviKXR$Za8-0rmRl%Zo(X?Qu=5PR7d8Gl`U?&z6b)r~s(tvm28jq^yOLpyho3`e1h}6*p|N+t>#07leJ5_ZIzcU~Gt3VN>&~}L_Zuc!WRq>pivrA8 zL7nttMs|bjOhv2B-7(aS?!wJh)m!fQsPXlG#P>C&G8?b&Xyt4Ao0Nlzsw}-S7#ZKl zSQz$S^Xr;&(tXIWxcW3i@`#B^J-fcifBj#hk2cl1jF;7#_-PHu35-7NC+uQ#`I^#Y zYLp&hh|w0;QZ`OcpV~BdNsxLgy6f*JZo6S6gln<-)3Cd`w)1UVQcuvwpDJTal{97d zdH$Zes002Gf{!87rL*t4ZZw$8n$lQ^hG15!P$I= z+#m(m zNMICA=r-q6?!mAkU0C0mRt!cmy;$*@kmM=bVV8KNMjl3I+(%XLW7D06{G*)cT{_f!i-KsOT`V81InmQtGnLlhF$cKB=A{nQAR8t zrLA6QB;;5-k4o9?p&Zr@8-lp<2j1cc%$JPzlEhKg%C0g!rqA?_TCAW%mP9jk#aT6m z=-x6S8qY3@HAD~tM#u&4)>%dh`-TLDPXw}4tsA+Zi)0oA*e!X?+ zEnxy_)_XpoqwqZiZPq7h-=me~I(VW__S-9P*%x?M9b6~=Qdq#a;-P%%Fz=-M@peaY z5=A~j3+FF2CGB++U;O^;D%WS^F@%SD%6Nfm)pdl3&77$RWc}x2jCvB7w5M&X&3(#g z#eh}ItHx^K-g%G97Nw$I^>>{Z+tIP8QVVfk80T|p=~y_9xmdUuP3)@g9}ha(H!m`f zcX}!g_|wuLEKEBQ^W>RH{3`?0+3Yi8EY}hN(evuqnOI~!X*NdUqQB!MiQhRNG0&-` zupbA?Ev3mPb(o26;N=r~!$j{>%)KYQ-s1du@j|JO^lxRBx>qg8&eu=W?S;z~WZQV1 z$_P&Dgm~HRTbsy`L3bQ9MNxC8PKcJ8hh^~Zz8rQHXHoYg4y272HVVq6R9LT}7aNoSIV%(i}D*RFWQhDzl8S7rT z9Q2$$Xg{{$BDzRh`!`Rk<|tC1#>Fl^Xu1A-qFOwqCS*Q;GxHP3#wz#GEgod@mP0iB zDpIFK?X1lVDQn8eW5W6`i?aRV5MgQ@Q~z~N`}y}ni(?vJ#!e7#@jP2Il3xXQTy2D- zQ*o^$E`V@>QC%ngwZrJ-KJe5>qHA-1R+xKunUTgU z*+ra*#Z9W8sjY`3qn2q#3r(7(KJd2F3Z-O8fLFwa@B;C3rWado8)wAXC2_P(VN%<1 zvi%Itr(iW{%7z&%;Yoef>XL~B)Y{W?1o`ZsS7jf!1Nnf`zrWb3pJ<6w-=*rdaBQ}V zyYz{wG8m4BN0@|-7gt+MIexOihC8~g9D>S!h&r!*>>omu%*JreDD{8MztWtAYiWS- zGUiU5aXq;nqeJSE{>v>YA!nD0zFoJF)jxE4nbs39D*igo{@Z@5!z&o%a_E&>;qDeXX5m?tn;Ts0?=VKaAxaw??g~D z1_F3iS9*y0X#?YQBsxPzojvWUJFRIG z5$au1Tvh${Lh%RY{myGb<)gyp%{+5mttm&fir_nS-}iYRfsaLL8~+w>+nP*&uUKO* z-lF@G^3JFw6iPp5I9hQ?aDkQSJwwi6#}Z{{H%hZLvFV$D=dz{ItYDk!{!&ZQQJQD5 z^xe|WKc7s5O{e`qF|eogSsUo=S{3=tE<>)^#ePF_Yf?qSlJVI#j&1%5>7BYu(@*Mi zrV5D#DubL!>0Bw%-l$+SWgLEm2;N*xE@Ji! zi(;g$0_QRtZz!isg z;iD|6^G)e)RSV;<=iCOXSI4Qz7MAYDbtbO9W=}|MMq7h_13u+&yf>`;s^&8@%X8ZD z2Tu)iAck{S7r2yGp_gWw77u2~DzIzP_QcMG;MJ;QsKx866}wi8McjnMWiPeE@xHzH z1D{A+8*2h}6PPYaHs}u`wPL@D^74DSSejn8xt45vKAuBZ$8@>NQ2X3ASS{v?&y&pU zTWQRt?j7!e7kx?IXVPz$Jq);=^K6-(_M_Qnge(jZqgj=k9Ipm_y{HG@z?i z4bG&tj?h;$;jxG63{)pY!z62~#VXBgtN%NyMKGfW6mE!vXU8o3TnO(!wg#l5f!UHz; z+B=@N0Xy~X?YH;@_sPm(9_k3623SQy^-it7256p@Y8Cw=sWM>FJlEz?VWoi1Jdl#c zpR8k0^*`ux`n@={@A5354Z1SN2vw`^K;=%*m!`s%#O`FKBi$`l1+wo}8E;Ldf1wVS6Sl?DFF#t{4f zhLvmyMW8x#nIRd2?cC)jPZxrpru1vNdQTr7whLLG!Tdg5z-DMp=d2&Q<@{XL*{;F^ zRyN?=HpWWNlcg+lOsK?kIorPDN?!K@_ZSv8i8&mf!6eh4^xg<7`jB*6vFXQ-(W8QG zw2a54=14zg{$BdV;#YOfswOO9O8%X2^TU`mWYtmYfA#bpucZC$tuUb*TD21kV=FTG zl{ZA`VfH^Uy9)i6ZibC+?I8Pz&U-y>&V14<59lsjEG348XXH*}7fy&R_eAl|F4(nV zs{jPA(y;m-vS*2?d4;nNQInHb(_?7nxgBTtt}tWY?oI zA147OQWbMn$ozpyhM(&yyWeET{N5zxHm2(f7mSG78X)jx2pppZ=Bot>XO69vM2qgu zsWEfR+$8?zt&N+2YB_WHHP_&dRck+sVqzccy1T!47^Y&~St)fXf%|-+D(2@*wPrbA znUxUZbApi>$CFWVb+fy@AvvbtI~JVopR$RmAqnNax(U~QW+Zy`tY@(-4>;sdkw`f4ZxT z(0P3YF%+PD)mV1px_Ds=8sBs`meT;)h>8!YO{B}Z2-hlM`j|0gKfPY^vFF+uyEPTt zhqRJDNAgQS0;;BOmmg$2tTsx3taisrXRPwExM$=30=M!zwyIxZ19j$RipVz?#=7T$ ziIiX0(f^{T0DlNan_HfjDP9ZQTDfe=N^ljF%}&@F%=dCx3)?M899!s$6LvH}j~Ti| zwYA+@ai;&nNv@ES&L|oM< z>De>!`swe&#_rBzvvVOhpw^VH1kSo|!z$#=tZS8#*e=CQPo+EpJ&%Ru6;atL37S6> z1-B9n_oSJ}jJ=0r}NS@jh%zniGM%c?K4KJxsSn_wy>NVf$tr?u{#ekx;2Y5py_2W z57y`PwK!M0RD-A}T!>68E^{2ot`(2hZAs1;$Z|H%bZYKaEiJ{Enyg> z&sc*d15F}lETcs?iUfSa9~$aAocpfNRrKDm{cUc%zj;Z<=4m88F!s(hKYPx7N8&7C zu0oPGyP+TJXhL=kK5--k_@Qx=i3iY%MI?ie-c`K#Y4xw*xmHveJyEaJJeW?)#V!ia zUT4y~#s^+9T;`(n(u~<-S$uPB2rJv)D!X1}ns7GJ1XBEbcEAvIjyVC7{`F(ea;)f1 z4S6>8lfZotW+wAy%J?$vRu_C_*w~*Sy%c_lD3v70cc2zN;GY?bK|Cz>Mb9x6d(kU_ zxA!_yZ>bRO4(2K+qO=-BX(wpl(^%j0lt+Qhu5fnnzMY}39tFWbhU^aMuagcC-Sjmz zYti5%o;~SWQT(KO5me@;gw#q$97^(#oeK_I(|_ygK#J^p_(}l24P2mV}VDP1Z zo-{wt28Qm%xU1kwWuOf7C#3rN0!PlADnibKf|l1HVE#y*!}^_aG}ESmupIU>yN+=1 z@J)m+I8Q2haT;8%h+u_iQ)i(3NEe%spwNDs$|=a+@cJi6jF&M}A0)->QTC%0La`Yk z2D+gO0u&n3^Jq}O|&Apg)&1_3SG_bXr z^qhX1>V3TSWh5^X6PN~6&WZ@28fM(5jX6#2udPL9cTzm9{ev+_Ypx75XMevpm5*sn zwnkg_Cir;f6{7-`EF^?Jq&OX@qP}%0Lr+j7?8Ig(e@X;|8@&`{K$#)VHF2s34gm2m zWzak87D_LaJdEv1vj-2EM`8$AUU-LB7h@w$=)i>?}C9ao*^KU8z@J) z2UNpRP%LAK3*M{6wz2!b5}pFKk7LB@izXn^$DM+QB7-g=Ystm_etF8`uWnD+mxhWJ*OyKr^he5WCQ93x+auDrH4xf{dS^S*o7<&WDT#Zjp{*-DHEN`Hy-q#rocfmL5agzN~S1aiXf7 zac5p2+gx0{@+#~#@JTF*h&H@(n;I^mO`eu< zH6M9$ve0cOQ*rw{Q03qAm$6Xbw)h zKmd9Ek|HK|GHbB)&Wb7MTn>!4fZK|58eLR~zZaWP%s#g0eVY>3^>f+6+=R=UiY7vm zDN$jRkX>cWnWvaGT?}W+!RMste48!vc?@)u=mERhIv}D}@O+K21q4Od%NU;B;7SS) zOh}DVwGYv&Qgmgs1?)=DWGOofEurz%=MV8u%4@zI2#T`s>AxibG^v9SG(EisND7r5 zek>W+c08G1*%zSXi+2H=)HFnUk~>FsI>tjt;7bX|`umv+>=B+a_d&e|c;B-(30Mw{ zf`PVwcAidPl1YU{oanAcYD)CK6^A&-R$2|Xhi~qFp3kPqy15DsOsR`$t$j(rT2g05 zT+c|=K0?U%Wjx73bE76unxweL$5hWOBDdoacxf->Jn;Y^??2s5EbOW$Jx7J5YL3 zTG_ql^&zi)l=KiJltl)b9*^g$xe_GfFqH4=6XrA1n%B_!7@rbFP}c%~d5ioW+29~5 z8sx+eL_SCoax-@Nn4H48b^JvTPF6HN@8rhHB=FU%j$!y0EFecZ5sePdusVzEmOOv2 z?dp1__Z7P7^2L@!w3%u_lsp%sO*_b)0W_mq6e3)i*@-aobCbtc0z_a|3LZrHDxf!c zd@%s3L$kYe!Qeh@r!gn(tM_k;8uA>z@7^rW6iB&mecLV%(v(gMgk(9XbJQSEQi(gLZ@n+L6C1zK9ym zX!*Q`eobP&Tq5BL04u3X`wPce)t|qE!(WcJG0d$2{jz{Zh8E(a>8vMfM+vxaG`LhG zlr{jHfLyDff7jz0RneTbfBi$_-Y}`8tj@#KOyl5DE;bpo5O9>Yy4Q`<52jx3KD0 zeIepq9<8S=m;*d+3l7;|9OMT~*$bPEOHo{eaqcU~K~H+a1+v({WE6sCr-!b$7{*kB zqu;^d%1ZKI2HP3@v!>k%k`(RJ*L-yo6=FVwEVLHdG!U$(Z|E=}`I+2wwUdzx^;E$n zMU(LUg^I`%oqslF{mzN$a#E^396u4n%K7n~f~LVA$gUKoM;mu9Dh9L_fZt_(CnG6e zpCsiTK6^6RRs8l5*6FDid`=E)slFJE`u57i9@9{2{rczc5 z?b8)FbYhcr<=jCPZH(F;ZaosCd@RWKa6Y3wYAjO_!6A0uK zFIm%WU@6eJfDMsS1>%>9?9ptEle=3Tk}Cx}%&-AAunuijYQk{{gjej@gY! zeOKS4qt_Tr*HZ=4DU1oKh9WGmKCzyxaJ)d%`$HUT*bv;@Q0YWlIsk}PtFpB#s3%n_ z;YZ6IamEORWQ9-Y*;a5`=d==3B^xwgGY(DQ_z4T(61wJ3h5zJiXngjRodOkXn%d$5 zY>iH?5C?ipJCMzP>}CT(VIPmn61mlcO7<3qzqwOF}&=_i-kos{Ivf7+={0-mYqt|r>UKY zX+I&OEw;Jxm*cM!ry5%b!8|f@(kvz|$o9wz+GXxA)@ALUPo*Rc@vyRZ(9q_wu*1DBtU3Vr1P4YGl#mwyLeCi*E46zX%8 zOtiTkF1)?j5x~(HyV)s1A7i)mhojPCVCptiK7X%7u}IU2%BuKhlPRlD{4++IqdXS$-7UUzZx(Bo^)HG&ClqhsPn?l z)446_o#0sqL)RY7(D{vqyqsc1sZ$b6IXoDs5=zo$%sV`3{f14tI!3dVFU}5r8_+K< ztGOL@ygTXYU@>0~ipU~pkq#)p2Wa}qZ?rJvyTUi?6>j5%jJRW6x+WZ7_&N$vQXSwX7 zCB}H-XPVMPEy>r4IL@M*qKtlSw$*B9K)f;xak~<=%<9x5yU$Su_-E58#_OqkDf~&V z4Ndeg4D{}!$eY}zu=H|g3R7y?<5_#a6Z#s%m%x}k4Yqf1**vpNHlb{)aN+$*$!NUb zP7GnHXD?v=PZ)dsDyp{SqQb8M1%YxybFQk4_JL)N-gH(xIcGhIC%^hC)s%zQ+gugK zl$eH2EjHP8aIG1h`zaO|DrbSExwDWe)wR-Ue>AESOCfXdFtPiMjz|qzuRMWnZ!GJU zr1B^C{EDRfz=2u!0_QsJ<}jlJYPq84=_*P`R+ME;Vg{U48w+-n+*xsj#xDypsM*8h z`}Be#H;@g*DR^L>=*rf_V6J}vxe{fZK_;WLgXTEqv!@w^et}k?^EqH+eFqMf%?oHX zI0aDBj^A~4i^uW(&&A5SBGBzN8JT_)wX;$9tl1~zT-xIlu){(HaerUY#7`wk9cHuA zG_p^K@AYVtDEo|9b4q8J$j120ro(5$9hJ@ z0VC#PT7p^m)rM@)Fy-&K%3t2bBWCc{S1|@)%-uUhC-C+3Ry&k zEM(eNf}*inO#R;>_ly`{TlV3wv zj_w*6+$=_H77TJ(jL%v@#vwKD56y}1(=d--;}w%3 z`v2Ya?xT(V)!g*W;HFSQ2&xJRd!l|yYi#1Hw|3Wo20`qxR0JXl?0Pls`8=Uo@fCe` zVEWz?HygBWXwL@CvR;iSth9X&Ile`Is#a?FvDJ5#o4x1ua`CPE(xlm?z{d=aX@S8$ znwaW@pg|S0lpwCjw^4oQ)=a@Vi7>Rc|HN^`aePY2@=5?~cmZt;(}e;XFC|D9vHC%XQQDt^# zQ`#SieJ>omoa8V&JGN#&etro9v>B`4l5y0R+Oduw_@p@&*I0YEfG|<;$e;nL1PxZq zzv_vBP16L^b9`DTM)Z>w<;r)f`P z*&a`XQ9yc%oh)4`^B{(qCMIiB!a*a=Inxz`~ltC(ELvrwd zn>6KoTBYklwfzv3z&ObRfMV<^mYpU8C{VpW6RiW&5XrgKK82d{Eb6E`L^(YZ{r>22hLyG+6>^H5v)P;(~c8%rCg z42%1>?^~x4V!6p9f%+Q^CUZ?~C{Z!cT*dmM8jGc$0?Y1%Pd^c2|GO$wmXE=qJ}*}1 zV5<#aWxl^&5LWtoZOLAx9pPkUv;ZuO_h+2+x+325zOwTS=3h#I(>&vVZW+#(#G&$l z|36k&7HJdU!h@jf@o6vEq`+Q}r3T-2WnpYxE#%IW99iPbh<2|x&kAv*Qgmu*Yhxvx zomlhJ<;u8$apq5x_gsz)M_w<=)5p*eQ4B_$ZrB_ISTTq_r3pd%dM-<$j# zf2@N`>vkla8~zv95BZ`k8*oc6?~_Io6b+IE4D%GE$eD-C_kD!_tOZ(s6)|S(cBJSP zJF=O2yaa0RQxrz=>|WhvI89IU?$PSU&AUJgK5@eu{M94sp)uV76WfA>Vjn$6r#4qP zBCVFoHr78yKh(IXveD3fW8Kr`+COuH!>{c3*=?G>^RXwc6f|I9pqf{w$OghnDQE)V z?H=l2X?(`&X~%9b1L0s5B2WoJ-JnXv*fo25@^bLR!xWjUh0s5sEA$b_;RlnWHVh;Rz?^DsM5)*|K+ zU?+ZhKGH`dtn8s9Vo?Vqchb=O3RTZHWjRP;TK&}v#)6-*NVRTdQP z!kr{lXPkJO@bV3hC776NdXZHl{q9UFY{?g``h!xYsFo&OMU7ddcz7;$R1i9*pFZ<^ zt1gE0yoJ@W2OOd!O9*=Qr3` zv(dcS-vg%`*sY%)fBY}GU2e!r?++VZWZh8-484H-xHcF6>SD#x$e0vrC}~T+e!_?9 z1)!HeqmdE}Q$9#McNH|1txS}n;4~q=A%BYR)f{!iAyD$Bf|)VEBwR&mPV*Yj-4`UA zZ|y}_OcaMBhWD_H@xy+8jSmZnXx+K$4argm^ETZL()pwOg*pG<)F6~s$JJ+b-9#qT zuA@aQz|F6SF&^cB1# z`Yd(~6{u1|FJ(+t>a9a)@`w1FMOskXePPqpw>hm~6_Kj>{5UH*tO_tY&_muDY$SPm ze-AgK6clMT22dlA%k$0rxwtBU!V+q=>MI?Qaa7aN(#2+Bj^7%gma@$C-B&5U1KGow zF2jsj86Hb{C+%OKt54_&BwLna&lxKmaR$U~kiqM+(e8`6lXx7OwcqElIWHYBUAXn2KYl_i3s~%BsfK z=+sy>KGbL!{|F3fK$x!6cc1s22!bBCX0NwRSXUK0#IBPT_TT(aG!%GE>=|Sa#Z3HF z*8*k`)Xj~C0|(<(qwWg57>)lB2n5k2<(v2N2fsa1f!1>nJBj_6r;U&t9rA3H1aLeM zxnzp+sPI_PUbaXT8@J*Qsc*KCnQ0_Jv-p_CUZ;ooSHP@A7_yy z5M5C#AyD21oEs7axM0CHnB@$m|1AY6D!`XIG5e{=fpm&#q*d0Cd6E-awJrsq zz1$u%ynOXbb-u+QTsZYW;ak9TWudK~@!H=ajMMI-S0-AD_XCA-e#~!xVk$aYstcd+ zgf4%AXRV1PI3We=ue7w+CG^1ln!TEqmCHa=(j=GM2c_Xwa(E?GoXt5M`mp1R4Vi4l8(#vHX)%nF}Q2LsvUqq;w;KbY@R@93U1OBN!T3%i%r zWZcv!ffL?h8ddB6Dc@Q)q45qPAR9o8e1Ae)0%J`;Y;WWeOb5m$oqTYllWWijgi{*P zl?;!6Se6Oq5V=qHyls`KB4(2Ws65bSMC4Agqrfc2vXn|jEL{<8)+Z3>cNi=Cpsukb zdg79bl_iZZqoMavsA#X|wJ3Q3!nRz+@mMdw>*&gC4>fuWDSFR z?uG_#P?QylJhfbhL9S9=vL0t_CG6X}?`fqvo{BYn5 zbm|I6d`!~Wneg9YbvU{#+L9h#8Ch_eEFF(Q)88$Qv(1r4Q{e02DqtpRES z>mUgqhYJMF0dZtOiu>)I*hL9?uGChwbd5CZpR>pN>)Z~qZEctBse#?B&pip<+5u{* z$#_oD+Cc7YpjnOfaw`dvZrAlaB-Hr6X1{){q#LA1&dc}DOA%mO03uRLKxtS_)*B1L zRMAjl0KT`&-9e6WqnVJWy4VZxwcgXy!JcblfHS|Kg<4|%pzK7#cM?sl|A3v2Yi5WC&|86?4TMO^9W>~I9p zwdWh)(7J4g(QF-egyz9dlMT7319|O0Obh^%IDgf2UHUMRu~^~A$$SGv@9RA-a4)0j z>|euJWp&&&qyq4eI%5Ff6m`)ZG$~OS8>T!?S*_MXgA!%RcuD6WO4@gwl9|i!lL}EI z9d$PG*DMTQahay@u-ae=8wo}?I)sKaa585W11Y1K!}Qzh2;_nKn)|XHki5#e6!M=0 zHnGokG$q!-11Y$3AGO9Rv**KgB@042*{j*QTh-~z%a~_&?|C0|{j@GIzHJW+GluMx z`^hNa5)-pvz^$p%eNtD(5va;%AD3lr1e!@img-lkOy37lxz9!j7^4R5JJ%nwtLO(t%PwhfFy`kCLL;v1F3oay^u(@ z&dVnq7Ge|*bIrR46y-(oeoTw^9#*DrTq?W)sey6zJzr-WPZDoa{dZTy)GA-|)H_@Q zOk^~@xA*u&cAUS;B)|t)E{oR!dN*EqUPwo8#6eGPIh%e#{&;lNw)p;ZI5e=#2VTte7MUPTTM=!Qei~X?k5JG zY2caD6?~Fu83oo@;_+P?-+c@G%#j2|u?OcMu1!~#rCxU~hDomv@r=)s(oI4jlZFJm z55OP5(}#B{+fS0HUG!FiSuOrjTxS0^T=Fbt0wfp))x78(3>L{eg}3a3DMxJ$_aXW; z7)O~0ASLoKb$8oILSC>0C&yK`ZrJP8ox+!;q+`Tc$L5X-mOkLmBc<2z5u_=o4<;Mi zONvZP+HtAUzW!=tf}(p}7dvHf4SbnJ+74desu3m3FP}8$=ojxz1_B)K3AVKp=TgaC zet<~fJ1p$Tu8eD7SI3`7uW%Zp)*1-cfSj7Wo|E|M#!uNixWn9)hul=6p&yf-tEfTH zTdoppg0j}@V)q8L3MC}Y@jE?yei{v0z4cicc}ye2x>d~(2|raWz> zjpB_dU%0G;Wrk z*1z78O>EREB4Pty4-eF)&>mE8!fR{gp`JJU3Ys>Y1TW$kKZ_EbXTbCco}TeoaeDaJ zBQ71ZM}le4B=55r0lmuiP!2ikr_<7Nj0-H{^cg%#bVt)HZu6a%XKtIItnbhHOk)=j zAVRbO+v5>9-B0-0wu%IdY zB)P%Qm*-0Wb<1oD3MUGrxx}3SCQ->1aHAqF3j0Frnt^Gv-T7|d}sf)&K0R%ucf zTLj+Z!0CNF)R90m*pPLz@Z+@U%nLK4=3nlm{q#^>ex6eGZm|+&@yU$p9&hGk|3N1H z;}cma%up^H9N*E;TvM*fyO8L1s%wG3Cz`1*T$-LvNOKK2BfT!l4m~&sTz7Vt7w9!@e_R;nA5a=Fx0{(zwQ+f=&3RSM1f z^i3%cV)V3bUqC7BGOCa7uE4x2sPR|_7xK!)5Ibe{g23lbOdJ@q!0=~Ur_~EW7EGE{ zgDQtQ>f<1+@JEFbP666SB5oJUL`h)e{}$1HzOgwN`r82_K+4)F7ztc8Rb2fMZmUdT zSMwzWNqhlbwrBc+N6l9+gO6XKwCfi`sB0T*$@}$&(WQ;|jOj$XU06Ug1XgjX$a4aF znhaVhG}k84Pg*7xo1Ihp{BI8^qQ_lIP~~+%1!(;~P_BR$6v${Dkf$R75x5mK-+qo& zeJHSDoDixv#vjZ$J~fC(2}I%*x~dy)84%d7!&fSaV`@?#CbHX=W@?Oz9vD?NF+`Wi zT2b{XqiQmj*cu9kx8UvlNK=G)e*_dIYd>Q|GZu)ViP0f#lD3^uww<|ZNG@{ zyU<*6?b{hHWlTfjsY{CxWBGe^aElBi8;}^btYBcD zK5K25yE1gTuhuG@y5Yt~*QuWVAvx`!9y z+8b&Ji{4gcJvoh4z>-MFId{qUmfabTxJHQuO)e=C1mRa7zrei;G>nYNC171q-LZ#F z191woGHey#FY!RbZm$uIv3SM+So=X1C}QTk1U32E&xz9ciCf3ARMO^E@IIGgG9SC$ zq^>*P53AZo#J~EC7jV^jh@)}R7tF`^mXSrq;y5YE<8-{pleVe=sb1Dc}9uw&vWB|Ssfs@ z|3B=#d011|_CIV}TWuW>M^GRNP81Q521SOn1w=)piZX>kWm3Z+A|Qk$)`=MkDl?=N z0cA)8Bw>!qBvVqR1PBlzLWCql3`roiyT9!+2sF7Zar@JhbKUn?cQ7`>TUgO!# zhN8?9HSh`0_t>L_Bm}sv<8mbHM+;K8HSFq1D6ZnJAN*-!7&(}0Z^PJ2G)DB>wD%2# z(Net9I~AKhQ~;)$dIDLeN4IO` zBd`sf710+0>g|Pr=C1j=am+q1e{aVoL(r6-A_t=sk{SA(nqp zLA4zdQjMSwR2cWog0^^Emx@e^i{uv7YBZ|%uh@^wZ7lv)nCGoZF(AbBsQ9CUjwoTZ z$Gv_hBFM!xHek+2soEX(+<4G4n2Q_((KIfn}f+Je>L7fl*B~Tzu{IA_W7Uln`kYNww!ErEbTw)m zBavX1nQdvm$_6>MsepF|>^IW^W$wz1?+7+aVNRr{eP2%cm(wJ;OkF`-soR@-sT1dPbQ+lN<{h(jsvJR^=jrCj@z6@yyH z+fJ9nL47JDX%3x?gmIYWP!_Ps9*eptiF+Z(m50u*Mz)1~{DHF5{xXz$@Fq)V^S=?&I zr8@}Rz}zUet>uXGNUSgbbG z%}3R;x(jpzDt0=QB6}-`XanJ)CE_Pr0viKPU!2UcMG?jfR)GQY`zIuNjV4y8rlH_F z8pbz`WrbsV8~12o)+M)L8mpiyuwI>droT;EMEhMVM_$Vt)@@QoVf&?By z1G8+U>q#;Ghx2OPcs0nPzKqNUMN|QJmMP=L31eLghuNI06Mp5J(Mh`qTMGOR8Yb5t z?$)ckmQ#GRCmAeVO!bZxobx79_k=z#_X+f43^g1JQ==(mYPpteshZ5-^&eNHQ6|zI z2Q7EsQ3Vf$?d}T%YmX}}opz4%9Fy9PjwT8@_pwg-cO%jr27BWxH>sfXoC0p*ajIbA zomZ%4^mr(oa*Spak^CSRY?Q!gNvbHnuyjAMG0>LQ;prVa8?0u$&W12>HSQcmY1Ii7 zc#yLk>X}=Opv}y#k3U0u+P}9zJQWuVPPYo08wVcw)((I=4n~W*!RoxW7Btw_A*-hc z*%UI!cEXI5-bX&gIai1r1Keo^(q)J-degb-2y~oJ^F)6rmhq$$G3enNf1YjcbIPi1 zzNl)CAAH37+zHW9ws$m~`CUN)u@cYO)SvE*xUUSK$k;#(7GzdrQR~k@VPF(~eLCm> zWjBrH2d<%IM#mR%#wmkiq47OXMp0FEMs%0A5#_WMhSgtvYg|xbU~ec__G%wil;L|N zV6tT|#fgpNS@)p=zIJfVhO;wvn-EJX2?GrB3s$#=*eSu8_pO@?`2~9~SOrV9e+UJ| zhd8uhsOV?LOLR>c&$7JE@OMSo1|~7TfHan&?1hUjJ&i2zY6LADBe%l*vdSB0>`u_q zLRuy*c&CxtZg5&!aiB@O5Xp{s*P&8DE!gXbjLpv~($xi9x(zEL&h*Fah)aIR zj|e&(DK25Vd-{OU5MB)1R=_=Fg$XSXl;@FOKJ}+h4Aq)f)Wl3V_w=~(GvV}y)964m ztQK`ZZ|($mMN-pVFhJ&FBaBn6Fq;bzRo=%}Y$EX^(oc04k8OpD9`py4rfXo#5b!;OT>CL9TQVeh^^Wg3kWa;`MjPLkaG+<6pKynr}2E~=3hAzYg z7oM&2i)__sJiC4yGRz`qdB{W1vpf@&-H+7c#)=6meCQWewc&_mxcetycP8g@@(NE3 z;*_rLns_?m`L%@;X1Yb+Y)4^*yaSQH8_~55SF>$vu9>mL!#xuYhaMV6M|#&-^wkcm zr!`uwDL|>7sK~+j(4H#t%Ytwv`$9`=IX+$)q}(E#JH|*nY`tK5hib9`Xnw|bA-p$XD z79&YQI2#i!lxeW^GA7b?K*(Pc*jNVEdKeYdAY+5TqdVGeif)x->uusMiEZa<@vu~?yqHbH`#EV{&7HB}CqO4_Nkg4o;yQt5g1T6?G z%*-oX=D;Z$$DFTM$td1AA}vETM-Wsynu;bJH2f!f<>z@RU_Pm$ZEWX+vBuW8dqg6s zQKd(~?7gMUZ5uoNb%(sLHdyGh{0Ay^Lp*NE9)NIg{6vT){N*2naN< zM!gyK&cGVx`E}rtV->7kQ|`!p{$q;Pe5c-lvtd8CM(Esfm@V6HtZdO^{ccZS-(qo9 z>P9W=tl#GQ3SY=O(fQt0~}h&yYuD| zUfZ;fANW&0;pZo8F?w@#ea{_UHyREO9b0FlEO@t%aCPV() z2+VvotIw2O)WQ{v& zfWx0UH4iJXMcS0_EWym^851a zqKda)yuHEvvSs5y2i58A7k}JbhCN-mV)6`P=O16Z{d@#*|9Wxo!%xfk#GOyV_Wz%~ zW6h#qyKuk*)z4l202ZgXYo_i}CQQx*z@ED;D^>wsx<6~09+qP37v?TybOG_F zKQ}ftl0xdg%w1aiGWhi8HaCK!fSQ!=!O+DBh<`@kV3vMnlzeK?GHQWZ8+MENf_M*m zAW{ADBE7$k`j)}&I^DtuT{!>ONo3_fUbhpa6BpDGn`}*%O zhK=2xDdNAvSbyTHk@)WI8lrdWZ#>Dl!+SpI1A z5<%T9V$ngxOQ#~fe0%-j*uveL&40BxwlMUD{V%J3hQ#P95==oPX2d6Nd{(iP?Q(44 z%afbU7f{T>8}CK|gkbt;$s5gaxpm6hym5s>qWa4xE0(;yUb(y2{Xo?B@pl(bFD>4v z`8iCjd}b{~=Z8lD%G8>>i=RC{5VasH8ocR?VS~)PyzUP@mCWpmirJ*L+5Fk{ zO>5s?!5KdVr>AyM@rCkfaMI{qjb^>eK|K@(bGdSvwYv~`Q1Q{#gw>xJtpjgLm1!98 zw>p2w*v_WXHOi(&2cjM=G2QX6Yohuun>0T+PE`M0uO6JV^khsVC_M`n%%H0meg?{| zXpYeuAT4)-H(u}OI5??w^OSpY-E6+m6eK1{Q?EHP&pcLIb;@wzf(0*Sm%jcytM$Po zC_@VSL0US^5!KTP0DoKkhl(s$q1d`FodRj5Gf%lUSCE)DKw^R)lzB2=)!q6eXz_vt z&m>bT7ycogF(54~=7`!}F0Tt(d}-4gg?!#1tLUlH2uL#*r8&yIxq|av3KBD6_0L)J zWM1TU_!@V`f(4I6%q@#A?X0Uz25G4T(lTI-9LeNhL`bovlTk@Y&{3jdqlMVXX68&U@ zesYpNIZ2TjwUD=J!7OXN;FZkvxt0*?vQ( z*&6*a?qdErZ*|!9O36C+Ps7d^%Uo{#MC4B*^&SBGw1NKTa3NeHZD&Z^NwZy8`Lxa| zfPlKTVfWyoGw&M7dMJV?4>ey3>2gZaAGK)rV8NOBS_AC3PD60CUaVA? zkNU~S{N&L>Mc=0vA-}#`DMFs^OREtj%;W!cE<#J~=|%jV*Tn$2IzRL++5HEQm$+I> z?KW*Tm+t(lg2<&%IW_b3rOjI2uJEbvbCGLar_|?^i_0%_Pe^Q{*Cxuq@&KvHfAQz) zpWU&JQj*ezy8q2ODBBX%@6?u)pOMCKtAG45(=tTDcw`#{JqI8fUa|TZ&(wx2^rN6Cz>2=#S_xzy_VVU6A zX^f#Tsm_~XVB%-4;C4^qCqZcI_IKr{cx;y>1C@b6XeBK1PO|QYq>1*s{wr7g8N13C z7MHivGKp0Dp{R&ggR}Q7F@=4G@s{|j46C=r5}e|jbJXFTk@SatjVXPG;pFij)X0|6 zobAZMejJ%)1H`F)d>z}zfWk1K51}TEj>I|cka{uy?X9-VUMRQ zlIr9q0cRwkH{YfHC-jtpb_WpTco2N!*mT(%2|GpTkxRHUbL?#ek6V1^{64av)2Q(` zxtcmm9||N?UCr_H^rn*amGRO#)_(sD;!3?s8hkU z0s>(Ma-dX{B@t&Y<7QhOX0s)$s>dl?_`5#DV4?elnN!}ci0Y*=u&=7MTxDl=sQ^A- zD~`rPc#S7pev(@x9*8jyG78&sq|acuO5cNZ4I#e5uy{Ws`qf_L0-LZJ!mR8#9638t zz2#c0JS`IfK$O!yL{m4N@a6#CUpsiiAaekA<6H!~Lpv6ZSJ_A6H2FC2^gr7Vg zR%zW2tfR%8xMf8K*zw=jkzi@772R1-@CCP;Z;RQAo964QtFyTXwzj75iNfsQCSf<_ zUU1y-zAE8xyl16d$Lo@_uH;`iU#8B>IOr?%70q_FZI#y#VwAAmv+L+hq$_fZr2A72 zTKcU&>8tXDQ?5#KZOzZ~Zoa96E>}Tv=Hb$?zm_l*RZ%nj*i@LoaAM*cYsH;1N$_!+ zQNFzFu;|W>eLLYm{hu@Mtu z_e&fTh^twoX`j78XO7iBqZ?R~XQP9af-_9tC!(RseYLM*!e#L<>xTl50fh+RE4`9^ z>B-XgB~~AtM)4_z&ofFt+S|Um8RC)0vUcXznM+UVN*R`Drxb)&7&WSW_bscY-%wJ! z8*T+R)vs_e&OZ(*<#$C5xBja}^PT;wqd~WuL=i1&u7h3nZ~}6XPb5D|ypwK<{lG2} zYW0f0c=9X}_~vo`-RLjTM!I*zk2So_hY~XlXR>wt3$^&`7&7~tnR@;Gz=fg|eg*pa z(cCpn$qDb{h0A^d-bq=~UI!SvV@N%>U09h2QB8P23uCWig5`&#TZrftYdkqW#KAm1 zMTuXP>R)3kut-4wodoy5e82$pR&+19u==Dq4@cL{3rOSMEL@RV|3!%z_C zzBO+4w3aaGH_ia?qHB|^x8;Mzh`IL&MfTIn##c?@cR4#@vrS^ZMkY62?P#u{mvCV3 z&}~U{N1W~5&4zyQSQW3Zr1y?*D;>AS%x0bv2LEh3ebG}W5^=cU$y?19en?ZGw-o;d zOj<;@1&Nm|Vb4yHUVO)QYq+!U=j^9A$W9+qYP`}-;grG+ifFTWo9sYoLo25DvBCQ{ z^k!jlm28Z(k7$A3?4PMkgR~6n+ob+MVi!cV%O)(YwsFF*?O{>mmv&{qx9)$ap0^u( zSuoz%v|=LsW(i};!A_{NAMTjoaH9kx3C4(mVPt#;oB8YA2%Rl&O`3OMQoA@li({zM z?+Uvz?AU{ra0CS?{@$S|C-R3#j~}+ih%K+w4?ung8f>Nb`uHUd6%W>=f{l|x#1PIl znzW$k=?(`5(_XUIV|%HVrg{Vq0K~2U8#6m+06_#XfmZZ{+h5b0W`2CH?xerU`$AYH z+U0~00OyZB)YillT#GpQArzu1Vh&?v8+T2OHeH(u7R|c)n0q#p5m{-gb!n8D!ZqnR z;;H8wTjkbZ7~(gF#7f|_M9KBQTQK;G_3CF?Mo|OXhd#Z(l;(JQ^-=CPCcg&pL9X1c zo^=E82ciz(M0W55VzqLf^YXytNaeYx%suye>`owOr_;ZX=P!f74sooAH9R76x<@U9 zKr&Xr^cA>-#v0@btb8D(U0qc8wsS{J3-zD!_jZtPN~)1mNh;44Kw#On%*j#r5XEwYMS4COVjbGv?@DoV6y-LP$C15aqd9MN4T1oA&hgL{)oKD*V1Y z=QzqaKwEm`?Vk9HlPC67bV@bVuR?E)+@Uadtr8c_eA)0TV8&^4a!)eP77pL$*XpA*n!O~S&!}W z<2Tr0n>E!ZT5+og>NBT5G@VdojF=gG6;P-9#kp}y%>g&lB3y0O1y@p!D(!%%lF1(i zc8Z;U&3F6IteSIWo=l6Ywmye~$Y-Q`JECOQhkH3dr;V2I>TqcVe@MPjS5((jC0A`Y zU(|k3(X9`J4-$}!k>LS>t2ab;TZw(PrUpB)4fkGf`*0vI2vGuyVw@`|c|HhX>Od#X zo{*oM39~z9c7fa-FU1=*1Xi-;4Z+Rdn5OobC2lzsacTKmVSUG3`WtT$udEEF}+dcMXoQQf4^!pZtA4!z?n{f#$Iozqxi`i$F*T;X}L z5^Gr!&~qcon1(oUYw4(Pv-vGkWb0Mt3a zN%T4Cl++%VSs8h6TLGq#&;JPM&8ojoxqO)Z zLalLNJ4`-kvhUW+RjJ(ZCZZ8CqjTtn(*22Z5ZIO9NEnF-1Bif?LgtD_r858IrBf&0 z%Hcb1*5AZyNr@&E8^IAOwM`9yp#HIrZSpDL{~r8k_B$N)f-Z&kqrUoU#Qh&c2o7F& z%moljQ0aBT4ra=`VQkq#fd06AXeHp<4oIG@q;}UdXv)0&x!Ptxu3!h3-7(EwOcb64 z-&sEmt(xo)!CtVfF~*jY^TZbn|Dsj?CIE9t{QOGQHgk92cEn?n4+IwXZaQMXdIhFl zl}0};Rh$`sOz`d_BfAh@n0vKJ_waZ(9L6i5Oe4lb%_^|SAi2a-&S>aVc|8Hke=q?w z{K^lEp#zC$AjXM*9{jx7$d7_W&#;H1A^;K)=Cktg5I_F3??BvUak4Wr(^CEnXsgT% z8`+1QX!ZGv#`v(L-MATfZQ0j~_&+5f&{sGLFPZjZ1CVe#%z~_*E;?-kKyDET-MMga z`1ny@c7;V3(d`_Sr+qv?SztlRYb-uH8_e8oCpvRtYnUDIbQ_L;zjh$)|C&9JEu69# z^snid`GObhj#*gyygI73_Dq*;uHc#_y$4IoTZW8)-N6#FA?e0UqfVOgPA;zs_KL?G zcLw`{Zb3=w15Ny6T_1lSc=zn07;?UL1hZy5I)zNJlkUB#`{zj9XArMBjq}GzAFu_gMe5LAEUPQ&9g)L&4=VL z@0jdz=`BvvJr_|6oZA7ZEP zNUzc!%X?!57pw*?Iw2H zFnOysOGCI_vac@78zOEoX4Pujh$)1aqcEC<5W1nYY?|f8m4HIllk8geULkvu-061) zr>h@9^KRkxZFjYNh(It1-b=m0DwhS&_JtOfE*t95V+mAGR8i~457<)FXl>&IGq$yE zh#fZ&G_kFijr$F@Cq_TKLsSf4Uo@@HuQ18aJRu9%dCF1V0>ymuQ7NHSdt%Oqx|Dk| z!VWp{BSLfCk@ryya`~jKSSa)Xd&78U?ayU}TO`Jc9L4<7*_ppz&&u?3T~-9`Mo-2{ zF~XdU`dMDb$spCd)K*;m2&e;Ml9Qfk63b|iuYhjl0PE|mO#E}NqvYAyg z_9I!)sW^0dVI)mH@Uo4d*3h9#53t?wC5n!zyQK3Mo9qh{^1I4ta@z~hS2XF?UB)^C zmH1vB3fE_yr9rn*r7HnMfOBYw+u?ZjF|n>le9Thxby z%*T-JdTR-9w0oGQY`?R2WkYcr+SGSq|C zZzg8qD9xdzN_Y)Erb`}Yor{I{|J3kf_d1q!tXtPB-UN@nnib~*zPwX0%iJkAG&5M1 z+KJr&)arE*&m#3`(=cY6{rXhu1{P-7N4W;6y6E=+rsGt(uCI%SQLETyRJ2D$LW96w zrNLj&Z+0X*bd8~F<0g5E0ll<@h7oXUMRmZh0ux`szwbYP7Yj6BHTKLygJN8!w)2YNuMH1H)@WfbBE>nAvfPB)Y=1X9O zZjoKGmGS5`?l`dhQ;LfQ4jXHuM7!6oHyUBUq5mS_t)k5ycSSFIzvbY1h}2BDn2lpR zTf*=kyWv9kO$|5u1!E(d)*0k7c*A?Aq%w)jQ|S^vsbyF;A#{57dhj0vEf;pW0)yq4 zJM9i(J2q`#Wsgfp`bQ-C=+N5aSCJ{kCAi>enZbzs!jeDdb^e;5M_)nX4TN9oop#+_ zC^Q$H7Y?~o$L@P^D|Y5!ujU)0D$Z;6DR~Ln4fg1u`n28piQA$J##zJ4v{dxktiOee zl6Ie;j*L_07d7;7sG}&oQ?}o)+}%kW@Wh(%8-y_<>Tt_)))4PC7DmSQ*QyR*EM%D& zrlXCX1W%7rz{ZThmQNeSXT*p?E{5b{V!*JRJ>I5vg0fOA*aHqzNnOhrkA zEbIKk@dKw_108{GL(Wy9Pg=H4)x|=w?hx4c3ss?w?6VZ!%fp6yIeCd4F4=yBPkP^_ zWUZ^EKQ}~};_-V1N{095;Caq)UhfV6oy=;^%vw;V|1cd8%2~CctgE(BATG`#Y#T6K zhp;ys!mcNMYmx-+d81GxiI~_gRZeo7_m8+dSmDzIeUUjUr+8xuDP?U$4L!3sY~h#Z4^tG*xq}plH&~n4~+vx z<8mRaD;=+$9p56N~G$i<~ zZ^{))Ze&|Qs9|mfo1`%$;o5kPdKne^Cfx(Y4&{DL} zjk4P$J!u(j66HB9P^+*@=SGTfeT&-3_%J&^Dq+hsv+S-6*)%f<&BI=YkDWib;|NTF zQcj*OeOXv5T@Ru@Fo==+9Y}nxLo^&#X*Nz?tU9s6SWiZ#jF&0&I}J^Jx1ld z4FZNO$M^Ild3Ouak;5y=y}zeZ*ZKCHDSScS;5qGqJ7fBLGMBC^+aU>ylMiSXPr+oI zS#0t3?X#Fy;&?8jZ zuEw(uzrER0lT;H(Zujf%!I)HTNy;@FL}ik)=Fm8N-+aDjEVE4P4-5og+X(7k2HFgqF%U*|@4}=mC+~ceyampD`o{ ztr}kc&EmVUpu4>dH&u`V*1N$9LCTU^g%QBs_qKDGv(FFV{JFV9Rm*eKlP?GL^8cu#vM}%+`+m8SPE1bZ?W)*fSjh z(i%i-hZ@c_OlNcyxm8>wYl+3wM$R_3`+0aKM-a4c4&)R9=VQrpRUZHDTS~IwqT?ct zhEVbwQsi~DFxI%wgp>CIkGvMZBzR=yRU2abWRIt%yQ1&^L&i=}*Ka^t?iq{gUCl|Q zu2ZWxs1&GuA9FCZXorvK@9}=ps%?!@=Tyx;0aCo%qK)m6%l$e&<7O5#pu3A=;n!x} z4P6i5$RT@@r$T~;jxj4OC1tm2t>A8XkefN&jI33-3VBw59=PDRK-gC`7LCWOG4g%_ zBkp6uO_*d0B?(C3aY-e0@~p5vYwTg|gMJpLl{?;atVF0f>_nG@ET{dBWl%bX#HG5; z?>Hy2Q(M3mZO#M>X%pC{;yv9eYHxXG`$k*pqa+B`;-f}II)(c8iNE;#=qb?AEWMor z3iMPg*snA5YHe5$w>B0#^Vu_1aC2Lb4_d?a4<8frpWNd)BPF_!ObrBUZF~l!Y|35{ zY_93Bj{(}UP?a8G!dd6x{uC}nffALIRm8-=KZa(#m3z{-NBwdbH)ASAzth&)y36(= zt*7r5!@DRat@^U$1Kx2-x9)?z>QNPXxUOV_0v*y7FnS`XJCwM2w#qwxCLU~`@t!Uk zb8Za1%Bph4i2mI&hEJG?7HWt78nk_bxvr+drYFj%RIUCM(@c1oo1-Vr>G$OJ7xWol zmu%@V;%*=7@;scuogrgde}Hcgx1z#nJw#(_mReRj$742^UU5++53FB!L9Z1`vubhB z;uRbNjL#0iG?O$o0N)M9O!QGEK8UrK6?PR$fDk93E}04Zs`?e!=10nc0oF|8XJAhr zEZBEM>CqQTQ~=lucJ2}JaOd^NmoCrxi% zGV(=}k&`KG9m36P-03SzrbagD$ff!ep-=xl21=4d@CiLqe#2yvmc{6zl?8=Kboc5d zg$i9HL31X671o-b_T{rL{u9ZybnwM;O0@s{ZVe3kB z!Vra}u)NCswm@P9hXu=E{?J1#{o^n>j*m7Sbb<$woZWghAXw+ zJ#pS&7-#F)_*@hTYSL+E`+|LA6Kb=JVJq4?T)nshqVz!og~tkO7sHW7;}ye->|iSi z>qV3Qc$hWiYg#9kw2dsJ3YzUF`jDXV{F7|_O%oo`Do&pzhq8>wDNrlhR*`3n+CYt| zTQAo4<+2Y@Sq9>gQ_%hXVOF!hLYmIw;=$lvfH0CBmajvcOVJ6d!J=ng326Dmy|=j# z7*k5e2BZfl%VtlRyXxypgq?m$YDZa5JB35PE&IV$U+&yNtk6mKak88CUW)q&BnX0a5dAO!pS-jg01 z!Yw-H?05iM1Bhm@>E4-9CcUu=W7Rk-|)l40;}EGx+zPozWX`@ zHgb6PD!BUj1?%q`ti5pKb+#zRKc77!-@E|0U z-;KjL4kCM;%MdiypqG*q3t`DB+Q% zOL~7Wn7pli^3);42Rm~X+rtgzh|`KoOFn<1ysB=nu2dArLZ(9D#z@%1G=@dHE&h@Y zu^U;$>y;Z#Vr#?uB-PtQLt5k^mhtS_fl^~(DGg~*CLc%}{lk9E zE2+m8mhN76OTGN}F*BMi(T!Q1g&61dQvS$!nfXsaU0%HU2bWcEFzOs8Dxz<83xWTX<=-xKfbDKD4F4?W9!ox)rq3bPkA@6agrx1O;vcABJYfZ8t*6n2mv-f68 z3qtCtyeaQi(K)s#kT7abvLm!xv@zn5O+BPs&uF{Ynp9$$pmgvxf5WQf>f5WvOM_3_ zKU&`6k6-=S5}T;Yftu!z`unF`S4E%vWz7n5iqfRo7&0<`U>kZm%a_k#vqUf3c+jz8 zqtc?jQXW6ag6IsZnk|C{I>`D<-Mr}(;Zc$;{~IL7D;||`4CC8A84?{4@ziU}k*O-1 zsEygHA0O7d;*h%JkfPI>fIC~)PUTE2BhGFeSSuLw1jK2GJw_~4Px@v2FN24}_6xJn zVkh#h8%2!P5Lwze7h%^jPDtS&i?-&Sism5sZ9XIQmykE@de4)MtjQBiTWuwD}#d`Qv#mU@b&`7W)1A2hEl@j236uU^}& zr*Oy*wd>Lltww<}NFUwv(SLaK0oI?OgmL1T`s$HZc?Tu zcSqx)qA)ViopBY}Xh<5%qsS6wJF1x=-NP{MReb$P8CrxtTj3=qOA11Q;%ZJKk?lD{ zdi;rp|I^uLD?Id6I7k}^T@XA(OIqZ|>+q{$G#T^Gs=)!VHqOT_UbhWT^1_|~(hNi? ztS1E|eEf?h8~)QT#!@-R=_nrxy}E30mM=dm8|#>zdLf^L#p3p`{(^ImmQ0L7hyD_T<{C>T z_+3K0@iTfNR|om1A_whLpwaXbv4=#Ei;QtxV2DQH%_dN|id=qcXMA3)CY+aN08J;t ziBsPM8uAHI=RqxunG{zG6h_s!h_%(;D~PP~At}QeX_ED~2n%z^iKytdR$b z!mwFT9>2p|89|^RVcAHdeO{BQb!9w0wfdFu9=-`74OWG0>LumY___#fCI@L6bn*pC z21?{~u0%Aw=6}hV?ox$Av~WKj>KEY}Nu?b)l)_gunh$E%Oj=FTE=;X}#Oop9JX^ht zVM;ZlW$`U)jp)tr#i91}VGxe5+O^b}Q_o0&Z?U70sCsA>Mo(WsNTs~;D(eVMa(=R<>|tD9SP+ILGN|+j-pw>*KjP*+{HHZkc zwT!VUR)xWLZ_ zl8@h(kz}k^!V}rm$;Nng+6Tj8|0BKp9@SLuh46^5qiFoqs93xJd|}dg#|hWCOGGpV zI>0M{Rms)wk@^CT|Gn;i?-*&VEAUwspw$9ul-m`*u6}%7f7`j!iVLbwX#d!K^GItI zApV|&Uq5L>(vh(UO|F986BxzsfM3yO|A)aUY;T;2?8yS<`Ojtp$giIHu^i3j#?ni+ zt385Iyd`^3`w?^I-To*t+LRQtu$2JJ}5grHv{bZ zX3!4f)Yy=)!8|s+8&WQzaMwOTb(e?~O-5naKly}iqK)HZThl|Ilz-EC%BEj|11Tz@2*+aVCD=p#PDzwN?;oCplS07Ps#ui5aB zVjp_4y==;zJ(M0SgT_=DIf7JgH?sPxL-0A+92PXxg>~Y8Yy)9 z$?C(N;XK#5yuf)EU@PbXNaoHG;(#3`bGDT5~p1^6geYxPI>YNLhPz@po1iyCU*fFza2($HN71p=1;hdjM91>K( zDWC$j1Wj&g8~_6N)xyi?8|%-1ww#8AmmZBValPj!s?#_ ztO%2(^stEC*CGm&W?1p&fwa|NcmD@*b3lJ!$WXU1UKDrC422r>ZFW6EFTz|O(b0hL zY1{iHsSL~Zyqe^q$(r%C1s|n!`{Y#QAI_HH51Mz@t|>wFbgP?M3Cn7q?E+uky65PP z6$vL+f$vbgH<|VXd?x_41B9z}&7hefNSH8{PVEBB&;FK!vyt-#MF zMHZ-RQn%b9pLIJr2)z!zN1-qqgwvggAg;09%%(OXF9SnPsNZ1@!2xKo){;Ji%P-NQ zaV+GoVnuHq!?;Bu(8}7iYnwCYx)0L4J zq5bh-t5e`>8Q3pCXnnom7VB&`8;~Lt`^53?@uNS4gMmd>ROuJ9vItWJ*c5?_qu}$H~Cve!02Md*_L`( zz>fT?++FJyTn?OG_d}ogD-iNChl;{QOhjFsTtGtHS^PU58LX-9Vwk*GZY11OXt>Q5 zlZucOwz}Q^frM5K*l|QQjXqsw6B4L=XNx}Yk7VT8vHG%CKaM#@{~AdFocjPJB+{%7_@JD(vmTQd1x?&mFEHD7@f7H4|cDU^L&m-qcKxDa!GMY zL)n}W{wEcZmuy_<%J2FDuyW?*2@nPwzxmu$QN1gE_z-%av8KPb)CmcL4_QL}f?HaL zT1$uKNIO{lk4XDq_0i}>eI~xM_g}t@^O?NU_H*(rjgRWKyr6>BD+)N1)D(664s$1& zKMax|4!YhqzhTck z>!ocjl|LGbQ=m5ZGQd3wIT4it$+}I~3^)^Z*%s%8z7&)&tzZr)fF=DQfN4iF$xdCp z#Nd#kO0I0_Pj4e(uT94bgL60nNdhgWBbp;lIT5*SkwVGci?vQ4HIUfdzPHs6Ee|S?0~X&CWHUB828=3z;UmQa$eq=XPl+W9{_JwT%d?^5QBPgcWniX*-7dL*Jqpx# zddb=)fmwrN!!0Tm1uBXKu)jCtGW-2t>sFi#l0_@Rq=KmAFdR`hm0$8>A9d$w9P-xe zJTr?J&Wmo#UuEJk(92fTGm3 zJz&(+K;8Czw)9KcoZ1N&fVR!P8ga5>;v%8WO7km3*i#sfNLjLE35@_uyGCxO&$@-# zj+>lw5tyKljr|5HbnvGdG+)6$iNcCMLZ<&FD3hS&NfNmx0X8DwObJOXp~z0J0F$r@ z&Kq*abbtVCl)2lh^MYB}?1wDuF-Uj(fu-zev}9Ii{n8LGgFC)NhFxNw2wCvpK~CXM zRK|2g8RuxkcD}XpkD!h*6MmFdNE=Oc2?9I>|GBC5CT|&gd7unFdL0y?TP*|1EsDi6 zADT^N<9YpKiB zIu%^fnscYi@&-cV1A*dxX9C(KP$Ga%y-NO;Z}oa|aUUhrYn!!-9YtCb?U3>BUi@p} znUBh%ZZXgjZoFVvHv6;OkbhqHPHj*>H6vMi{W@fbo{RFTr}2!a2W*HQ@1{qJ||`*H%Gy@vJIxd~S$k)(M*I`b>05S@%AtShK7 zBgm5MM@L%-en}?hPWL2h1^o18w21$Hv^;s>RnD+{Cnn~EUuE`5e_GI_KcY>5DDH<# zr$cIxfXB{nE#`LpQ$sM9@%O-ZOa19~^If1bX*+i1*ur)F8TTT=^ZyL|A2C&`o%pxk zU%I6Zhtp`)g)5G3p#!(=M~g~igzBo*9k&ebgC?nZ{CxcA=CavzK5ii4bxeu!95w@! zf+tx48BDScakaMEYKv49EeaK@f`}9;AWBHBP^_pZ2#74HETV=*!~iiQajAl!;=+y+ zH{4iKK!PmcQCVe6RUn2yq9Tb9NQ97ttk3TTYDLki{k*^b{EmNlILDLc-nnyUUh|r{ zGdtC`-2PRzC{W~ND~*tusKk&Y8BR`Rf;IeN2B03&In+!%Ok z>#l34^wx922fx^gR?pj(3_aL=#mUy0vje|>#ko?!nqDzL%rL5`6H@F}gjDpM2S_^LYKkVyE z;CKE{5LlwRTUV>{>JASSc1+R4q(8JF_|umCLV=U0v9sw3#U&0_`r(p~!Kowmp&%kr zzYZc2O<~-lt4ULRTD)3(R$FA?3Vtzt%S!gES8}yAQHQ_uhh_}(Gi6xrjIZnH;U>15 zi|TZ6iJEog-(UqEiB|H~J+{4^j|naQ&S{dD+BHA;+1T0-))iY_)annMcd?5Zr#YJY z{M+RoQoGzj>3HQSHhkl_@Lw zHY?-T)Y1?``$~=)%)N&he-&_zFyrDnpih%8(mYOIvwwdVs*b+vfZrBN z2XR}0LDH(f8AOUxhnP&30>Jd+!sNeCEf8Rp2th`#d+aTj_G?wqTZ}~wXr#-l zJo4W@Lw@0RbtsTylY(-X@UTJ_ky)ObG||V|je2X%3mlVA zF!_Z&wq=U{pyGnXcDAXHFF=32*u_wx`LkCW+xU@>E9|nxF0a!0lfjHAk@l*48WT$a z!=l-wi$%O!sHv^er7er?c$m+Gh_Wpqa3t2eQb_C@jj^O5a=j zA1r}evcBx%Dyg8tL9=M>*OC+Sa6t~)17p|3(3m*P`f$Fj$f}#jzLGRYe`oilrDT)| z4A-dqV@L_b1UCSeiRtepH#^nfVN?wVBWtau` z+l}JA-grmlE;lcn5<$i%zAE;3i83yzIefjHgj!(cqrP+kdGaOpl^;jIjd)%hMJtrI zO*s(q*eyXncgBoe9Du<)+SrX@zjU~Y(`vnZ@rTEJe$T0L9Qgcih6n8vi%CiIpVogA z_4n$_4pPe(9xA8G9EoXyKEc4E*|Q@+x$d+mLY|#A=Bwd(!<)WtVry&ol-B96J9*F4 zsa-K!6z|i3La1n%*Ge6ZM@O(?45>wQg)S<7^sARej@b{cItg8l@xrkxaPP2;e{>uE zhs?>|nRBKLBkT8CRI#K)4JFK8K?!jmpl^xAPgtf27|R(mgZs=*Q#o411sBH}Rqfsp zl^bSO9d@sMFK72EOYyg#OLWjQ&ud%nPi-VtU06zL)%i9{nX8cG(7FZHyGjkmnDOB+ zzRIA+NMh3|-Rtk~>%4VE0hg+zo;%)}AHk@k|Io*OyEL|U?6rJlM6tBAP$n-Pn(HZU zOUoiVw)P2m7&-;w1~iYVt@zuCX!n8rj`})1%W`YIsG$*FkZKj$xffh<@tI)30fX&J z*1VNdK0_*2^Mw_!61$W&hEF+yww^6Lf;t}&JF$zX357YJ9(MHzst7WrN0hSXk&jvF z2P~yT_$#lanO<{+u(_4Jx$IV=vAhds@J8{^NF*x`O9p^6mf9)lI`34mnxvPvqO%mGw?HLdYt=~)G4rg`gC{zrK%^coLK-e<7x*A z7PT2eWGweSnVuPAPZVgIAZ^Duh|=A!C&ySIyfa+#cvcPxTh=PS9_?Jhq@F zEO>!4=aAFHL+YzLYuFY4JRF)8xBy~1JysK`(&Zmes%|w#jM4iA+I(RCN9#eP=xYWp zcyY4G+r4u6;IGCt8{eoi1F0LUqSJ;#P_45{y@#o4pYr?p;SxEe8$3TUH7Sy@ZH)I}RLNgUmOVAkZQr7dlU~`iq%`YiO_Smq*=Jbe%DKNT z-8{CiV>#D#nZFGPV+_=HYh*1WHqIkQYf}rGm}uIZ=OtHeQ7V~aSo2RAEI|pTXDO~_ z<1jkT-@tl$V{~zWw%`qMq{MzhmYpfJEOLfya$sD~`X^gH4}P=s?q5X`n;DZ+7m3;w zy9PyC;Jqhhtby%+Ju@76e2;QKe!K-Y<;x(Yn&a|#v+xb*{#lQE2~v>ZKCedeU#QXR zj~oB4VS5^Y$)=S>=kpOzJumDzDsEUlBb8Mnw|~s3clyzPc23ZbG#VRLoDDNdU8eU^ zu|_`kvSv%MM2Gk+ci@+S620jri1%ysoJFNyr0t0`yclf6v~NUMmXoQfGr?&@P<-%u z+2-fV|LwEwuGRZ^WNXEi*lPkaYzM-smuxo{fyRkDUc26*`RMb8SBD%l88#*Db2@X< ztMk5^^f4!y>Qos-QxI;bGK_Iv>pS>4WO_5X=1REB7{s}>?e&~<&Ar?MGR5A9H?s>I z3@@@CH;NyAo8@&2<2-at=kBq$nE9q0zq%-Jt2U~CdgNXk3|G&HcV@;6v_WeK?7<}H z#)}HURld~dIdbOWd~%_+Y&IfmOmOFQ^Yw)PiBR5=K!@yoF75%zPPU1CR~_~}T;VQo z^O^Ats49K4Snu7o#c{D+0^O^_`(AyVrX=CJTti;Fh((kk*eeNck~3qK{Lbr2zWmo> zRx#$YAxY&8w?s_1kzISq0nh1K%00a8FElL=-uoDp?>)0t(}P$uc+5jy63|=Zt(T=Z zo%3*@mOGgJ0qY1?KIDe5H_b8NDYs-lcMiE--Pj(ja*N>ft)ERvjZ#YHgicT4ebga= zkbrwT? z2{iOsU2ETTpphc^m7HR(#F@d{>XSGHi9eCowsfq~KKy!xgAb8W=t#l^H@NzDTwSYa zEIJYtIAC8C%%}AEq2jCuwHHU~ear<%c*L$DM1wa9N&>G{kOYFdDHt3><`EX;N!6>4 z6iGag$NMwz(Au#cbmQwDw3ip1ek1*>ul$wj(guDypItP^}JSp58JduVRbD^=|dDr*7tX1<p8mYp)vyF)$1}>JgT%0U~p}PyhKqxk(+Jzj#@tL+}mxJ#U;* zzte_a-+_uIz4SrDuI9i<^*Aj_-)6lcxTi-@>Vq}jxI~V8Fve8&5O*(MJi02VcYZ0W z+c~7Q`bJ1tKjulobJa`X3(E={|ipzKhXRd`gN zrrJ1UH+}grm*_wM8!i>LJ_V+^RIq^=(%Ls0T3;_MMRvq5B_@6Td@lzEl?&B2p_4;mrLb&>qTU}lvZ{E4$X`4D0+clG` zc3sD@n1z)KmxFA;$6$id<>PPuJo+4U_~#Sa6>;Abd3&(w8Ysy9*6+lR!IWdTa+=+= z?5w{@z8L!T%>CI@|GJtxJxgz)j$1M)A=6{A1>`I^Vyn%wsR9ywWNdwGQ*q+DRE?Ji zdf3;0HWDwqQNMUw5~ewkxA=8Fv`}3nKD7B86$J6}}2ZGFs|86u9%$T%Cjo*6+ zj(Qp!fSPun!p+8e{m9p^tIzJ_|4u`&_Mk>`#y6D9ClD5?i+O=6BhALSq&R41sJc}9 zwHH$V4z>UFT9w_neDWCw6&mRSMa=j%I1pyg;Vrd~l^=tZUM~j+2?-wPF7s}pP9F@FN*#>up0e12#~t38T+@rJa~72`me_1t+n+3 z(N?_m1@Hu zSKyxnJND7xCz<^JL?$cShdVNxNbA+j-^lW2e|5XC$1a&M6hV$0$}AJdi!g2!H;J%j zOQN>ZTibz6Q`;5bQ_GL)(H|R+X}7qY9|xgxnv~TCS7=y2EIm@|~U>R9gSl1f2n#>626^+x*7T>FKiqrBgJ+ zwB~!;zcv%*W&gKT->WyRuPjd?uC8aH@2a$M0E9fiF~UYVq$E#oe`i zm~*D9hj)wge-+!+y*aiU>%S85XbJlKO=3|EmTm5C_|%&r)?ce>0_x!ZZ#ywGwEHvh z|Gvh(hEJD|*m*-{(nJn!J7^#) z5HGeHv^!?+FB$L4RIRO?GUhqx@Vt!4Y5~~V`mrnH(Wq61Pow_T*lgw-PwQ2J*6FHx zxI^JKw+Aij8$9X~2_BBVS40ia#2o0wd!=+-=WtA%d^_mRUefM$>>6(axv!x3_d7vj z-sOTuT8ytJ4Bt>qhGlCTGGo>;xB>~D z6ACsRvY2OR+dJkt@)-S=uMs7m>pveQo5D(y6_#u$dp6I1B?jpxIn~9$vz#b!ZDfxu zonYljXXlf85fWf#?w>CUPL|xo`i~l?Gs?PEc$Ko9l4}-gvs_|#)Tzix-{s5KQu9yz zQaOIiBi1(|Xkok)@qV#rl`PF(_X-(0d9EnTF<5y}I@_OFL>M3C$lnK(_pz1u{aXed z+=*Zj#qU_e>K5F>hlm6lH^EiVo^q2{DR(ni()E3`?m8Z8kH!h;!h0Lj7w zWGz{lFUlzE)Xw)Wr6o50->*6LLw$A{4{A8U`}|EGyW}gaPkZ_xn=0-j)A9@SEa zy7enxwqPJ@LFW*ni14LnIYDHWMRLftsBJxdSx^f4bMb^Vz(kh$6-n*}yf%Mi6e=hU zMe_JK_b|}3CG0m!L|!A7IwXk01Rv{v8fH}g0JjRGllRq1kYIQZs@V{t>e@xx(sd=@ z+jxya6-VE)*9Gb`jMmb>;kU%tvSx2za-&Fi%!N>#$8SVg1b`R8ay6*vDFvdme}^bt zsFtTY%owLjj^i9Oh18S)2{MFS*UC(Lq~-VlOEQsHZC@{Tco@(=(uD2;N=81dg1@sm*(hzyErjX{p-5&Mzv{F-X&H^wNCDx!mSh?VS`gQs7#;N%j zz1nT?XN%asyjS&BQlBNKK>?}-y#+lcK9WZ?*O8TY;gAcoA*~IqQUvI3QMba4*spYi zyOGo(|23dfUzGHQvEVT6)bJ=kKrgiWYO60m#MAsBH&MCE9>~wR2cKN5wJ8Zz%01KJ zCKI`B75tWs6J<1&sSbtGo`X>XZ?sn!W5&A|K+Ot-Nc&n|or9nJ+EEkmxZiobGBYCk zbe*^Jbd)MHc##Krp)2T9B|Pm^nFZvu;H8|3YnB3lxQ7oASuUY8G=#%y#svaR3dWTO zY^KZcSKSgYyR4qlc>9#RHgqQ%sfv&HBNM-0tLgZDAihBmjrybfnf6(ccK$sIQQp=TDR82?!=pNiR(WSGQCR=8FO5)iX4*`_tn6-g1x- zc~Z5Y>ac9TGBHR`^%y_9rV*}!u*Z;5-r7}%&^&rqUfp^I8lCTC>#k}A*XiENaX$P! zFkU6*?8;Dv_!PL!?|1uG-Sh|~-6M|9dgn=cHz0apv_Rh10wX=hOS4c$FwE3_Koc{s z9)A)oqNo-eDFxFayh(}rH}tB9nsHAGUe0`5E_no60opd5dl&gO))l>YcxVvcwps*w zUwSeLf#~{jy~Y0RzV*vbdm3xDM+zdS$!Id<8hec;@*Z09()u?c!hzDA3S0Cps&EWE z^Zh}i22PQ^z?UhmEYf?Rw?|;3mxnodY7+j9EXe7@j1DJ*->!@~Y{=h#Tm z|LvSy^xD|6jmpAF$^wkMl5ixtpGTDNe!zBD$T7HwV$00(=ts?u+X5o{3erFy&m7QG zSLX4Kdub0Y+L#6e>S;#h8LF*dHCS#MtO_F|`yH!`|El|2!53sgU5@u4$$X51IH1@|| z3LaQ<2x1)x(Mt?c<}0RV#>55D+uKxy5@$1r-&P_y0k`qU3HnQ0vx*DJXePNh;@DZ| zk(+_LB^o!B)zL;T&^V2h{i#S&^7G;Ynw5TXXpx8-lqU%Q12j491#kf9O@+hLBA$#g zfgorb+aE+dYiTrhAWkI6pO}1GxeM~dy(nLTyr~r$#5B*VbD^`G*Jlru2Go$K6b?!C zkhnsz%rE{Q0~n!!LDBQOh+c3KPPdiFnKyveXrT8OweP#8%D@Z2Dm~@Lc(;1!Tido2 zV0c47Z$1 zg0@x7%IfemaV<6k9qfpD=W~l{{yA}Zu<$dk@H3VOBDv_z5fovKJ#-TU{!DUx?!8vH z8?TnCzg*F6)~Nd&zL!C&%K4>|D(pH%^CvXsNa%i~u2P6w+6^)}ui)O>(?Mg5FFDcL z8uV7Ikml8y(6m0wrCKln2ne4aF=PB`dZA(;UI||%i;9a$9wZO-s?&3(V;Q3teyH6A zW(P4Yz&*ekQG>Z${EC_0T0-duJN|> z>=N{6CXNg(UfHN|u4od(om7hIpSI5d{!x}g?l5B3P&pEJcx3^&T!}L#^9%it;RrK@ zGV}O`Qw)R{5?6>rWHrVlrK$h z?G6)OkozgwVW44Vd3YXqR)|IJHfj=@$xjsV36A&ZM4BV${pIY8qO&Ir?d)J%hOc%P zJP~McUu|Z0(WEAB7ZOoMSPz{mIW9dQCg_>RTfa~>Sn%=t@!s6I^2k;*c7ZXukDi64 z2W%=@N%lZN$=GZUaR-#kIfccqP>=n41-sQ9j^BSP!*_`eNLAy92H3>e^|>>2f)PN6 zd_+?~Z+6hdenm#0GHpxOT}8&$9*R#QDmVywwte>R^^bCo8=mHKThTpqIhY)yTKW%! z@6{sB`ipx<8G!1 zui=nq*1;ud5m8WtdeqvhFziDSkGP^j7XPfLLQ*82q0yy5ri#uJI$X2EpS1CM2DWr1 zmz)diND4&yOZCWompE-`j_67tRsccq>gZ)Vh>E!*(Tl{3GpU^$UCvwmTuEp5Z`=nd z({eQ8q0V_qY1kuQJu;ZD>n-Hv;`PL`CHYD)-LJGcR3wKaxbP!+kR2FAI*LR5Fwc0d z%~9h)$UTI{Ujby?4|&DSh0zM$z!v|LIdVS>xGUQDXk#Wc^mz;b4 zizRuOr2z$KRI$%Q_FaJ%Zlnu}@=TF@-d5AcU?KzEyI?^Pg`FVs7PHhE`#b_i)owBw z8TlTOqIU!mKNcf2{zP74hc{|w#z>z?qzGWE-O_59%ZLt^m-EISlYJlcjI&x4L>+)b7Ox)cLCb?w2`jEdcE{(_?SoGg5tLUY84J9_ zBmtY~9r+|QQ=n$HP(3nSnRb)$KZ_nJ7yl(K^KLY1WU^#0z z$A?81XgsHrt8PRXk{#-2dK5X+397PiKQ^`z~PEUc|UB}qZaPtf#rB$|As zBEPp^+2Qq{WPe$VGm}dfi1D zU^rU;`c7p`DDuWAPMDw7sDY!yFI`C&_pNC4yeGIWh>xlckbu;sSfY zi3*(w%i=GoB*sNH%wD-NR2If#OC>5NTU+mwLyN>&iNb1PYm?qTeZc<&BW4`olE$N_ z|8-K+wmE7NPjiGrwwK*>vYpMY0>ixoPfR-N!4%zBgS_}gEzys7;qrE0!Xvqh23Eq% zX2?z|6*7{pC#^vPq6u<_uhS+G3N|dVfu%T?Gsmqw$edOR7XuEz0Yxsh$9Loec5oDF zGPb17hH3xR4_6-m7d8oJXX}T1N_s?gZPEeV3IAnQkZ$$JdWX|e_{+swq2hz9T%Q_% zS!7Zp(WoR9KC`6JMq=|^S-ZMRl2TMGy3;RxA~e>>wG`sg^&YT&^;}>P=`^)Gt!8T& z&nyR3?;X~M;P<;Xn~H#+u)FX(i{r=%4aSp|Xml=~W}~)Wy1g?rPxT!o`aoN)LPj;f zeoGz(H_Kj~S{TU%R)4c=r*Gyjwk@MFf)bbf=tkZl7DkG==Pl31relyEWx#b}b#T3x z(j;L<3?3KX+%Cs%mpAo_+y_w*rO8!jLEJ-I9+HYe3Gb^5IesU{!~{AjrpsIEupp1} zcOTTZ(o7Z0hx3PTtLGq}MqE~|>-lVemR2MUG<R5^Ga)|E#dCdkb_Z0d0iCcrt8 zAJMYe-i?l&PJfglzCE$l_P z_gGwSQdJoV%?mOO5iAnU>LG@l!aI5^@obk5J+Rw`BFG@}nYK|oAWfUx)WhbZLq`s8 zI5YHO=46) z&RI*)aebua_v-}3)12mXVmzxXT~bJ2e|rU|M1U3)5|pVDOgy4`cMwys55-XR zu$FynIwxgIS+g90AIT2|biW{F`WS)B>$bj>&yAOBoJqy@B=(>;)Jj-op0Ukyy@ z3iSl*A5XkWCw|QuJqwF(bW=_+7U@2n-^24R%-O|ioi7RJ$}fu`K?{KLZ7Rv58nb*s zJ-8XUc$7r3gd-AQ8_k8`ggpXoXrod2q7N4Q3kRZeFq*=ed=>?t(7C-HjDkHR&S<8W z?efIM&ttmQXCZkW0^T_`o#fYfqn=V1ycL;lOleBTov7-?NVfCxR54OBD1gM^4j$J{ zjhE;~3n}r|1olNRRFgQfLJytq6#A8OKA+jEM9SA1fg#`X= zRDu7AUa7mE{03uym4?+prb?*(;7iil6vs?;8KO8grdIsElT=l=&u8p;h$HlwS+Ya! z1^*e`SYT-wKWjDRxD9z3ds>DtSHLsIkmxo-6de&A}zDS}Qr$n=qc$OC+u@J}RkJy;8MGEq4sg0xnlGO6kk-Jz;mms%V zK8dxD#E>ETaPfF+U*1p}8j8Q=46j~+VT6ymp7P9S&e4%}(ER)BRh$0&17%vmj#*^t4I`IP*=qF($V z$<==A0em(`@Gz;twRQrAM*8~9>2M{Xa1RS)Wk%*?ydK>iM9I>_B6E;hSsZ#JQ@(Smq^HQe#`U2IP9zbfm5{J(cYrIscj{Ik*s#l~IcsQbT(RNvZGQZhoGN@Hem(*MG>_x4HL z9uixHlBf3t6_+@)&a1w@!1P}B-$t}Bwz(ec-Cn-5*MBf6D6F*q)>uQ)btYueN4HWbWXcS{18NAn1&*;hjTJg zrs_fG21&MNFxcH9ZfoxyZvay>MzAjEZT;59)rtq~o6KdadK#%JjOYm4Q6@rrW5RZ` z_z2%(e?o{k@;JsDp6Yw82c6#)pHj0KE=>#hMiBb2h>g;*H;WfC1XVNRqgXgRL20v# zFXGdLZDq2(v~sel(AN7^#CIy za^83!2sQt9ACM%U-IQ1xbdXWP*yvzxXxXk(p+yMduc#xYn}xE*GR6J8Mh$@_4$;ic zk{4TWbyL&H%ZM{2y&yn{?~yo%kt2v1!T?H*f>EI;R_;|a$>^!H3hV-8Kw%0-Y@W}!%IG%)$Jv$?zmGIbHX zoGXkZ=UAqrA_Nj69oS7h`u$%J+4yhNLF9)75icChY2o?iNb4xtXQfJ+xx{*h;tV!j zVzXRvrg!`kbmuwxN}s7MWx;2H0xF6*yJd*3Kir;6+{0)U&xL4K z_bI%8*o(vrrFSD45~b33tr81b2Wbyl&IrIb22;6#N{<@4f#@O%;4SZDz# z5^_A&OXzbl_U$jmq`vLmUsb=ZiWJdz$@q#0|l zn#eg!{IBB21At*_(w$~gF)Iz*;jPT(lG$=8jV)m)M8E^m(x62ioaey0K5msC?X}(cn zxhz|XXUQ78pWL_F&D$mc6I{$_-Vq@z5#k)T2@#fG`yMONy<}RCa8n~ z7-L#*fUdAK{+!p~zoUS0(dy?p9}-s%Wp0QK$nP58E-j?l>`?M2MRKE07WDnS=q0$x z@FJnAKp-h=D%Cv?(ODYdAyQpYzQCRYrj>a2>@ybPc)~Ni<{qc<0+PAFm)D+(jQ8+u zuqnk!B(*%su|97Mq_p5#m&Rio1UBK}_rW++jn0v1<(&n}9|p!(73&OiW?ob# zAJ4++h9iN}5U6PlWlfIkLxc{rJfmi)S0Zx9As=-JvN)_s54oYO&3P=LOln4!c4LIH zcupdl9Z-ZI9x1XpgzYdEUL<-oB0AfbQE&~+(nNd6UW8GGv+%vW|J$2foN4nTj(4Y> zq+8ihqH@g7D|?`Lj1_Mb&aVDzkk{fzITqn9X)EWNss`~nD994{T4mFuQm&-jA4NM6 z#)@b(_Grx^;Vs-iLYLK$0jRIAn{E00KvlK?+h-={wa|W$d(-`eHnPBTjxHVavUBn3 zip~F-=wQ{)A=D{Ypi>Z%%>LvMUbgCPLc5c^owZGF`s7zypx$KDUgvLIPV zmCx_vyM|zswtkqbYJp_x0n9Ps3VNmftd|8RYJc6aNP9FLRd$=LlEaE3NT{J2AXO0w zKfP_XheB<;j5*N+bTDFnsecE~PfpClG8wBI17@u@BjXwo8j&;suXXhcp*R?iDxmRP zbyMeiRI85lQUm*AC;i2xhghj7wJcsiO^B%1f*aZbmv0Aq#+^uIn8~y9rTq^*GbD^X zWVeQU54(ZuPCr5WA$@DmJ~Snv@FBi>hz>cQBGVFJQM5UEsDf9M{c)%ad+xooqV@6t-+-Pnx}$O|gOrFr$yZ zed+46kxhYDhKCuB3DWc?=mG?qm)YkEMIsWK}>>2ay3$VQIs$f3;PfjG{s0K;!!M|;$;k}v^6 z9Kmff3j5c_R$6d9kBceV*Mj3W zGTZm{2Bv0tw4OsOYobc%S;F%~d?O+Ztt&JtuZbS61z7Wu8fkw6TA4vygb3gXu1)GH zGfmddXK!PlArS(UxqbYnSJit{y=!mxhxfLzM07~KH-tvF%8w8uabcb-!ty4So2h{t zv_O96&*NBc)fwzJWlyO{UlpND7R~4_kay$48Vi`IC}MpQ#?0e{2nY5jQ>hINJ;Xgi zT)UjaK!I$q{fX=Ll>UYk$1FfL1%Hn?^XT*4PA^B!+%9>`fE0BG+z}KEs;L59f7Q7c zh1R0dr+Zi_4w@3aWqnS-^n8(^aT}xo6-~YWt%-cc+j!lfOz1Ju?_eNyV+o4PnV~_0 z2F8oF6CBNp)bT(8k8Vms#;{}r3*i=2KOSE_U$8AssLCH?XcFBV? zuy9Kf9a%sIG>@3w@nfe6^fk^GT(W*wMcsNNp@Z`*4CX!+3nGX8uG*>=t(EQt{=uJ|iQ(hT+}L=#T;1rYQk zWzirI-XN6k6KUitDQLcw<>`#f!r~+Zf{?3vDDS1`Fm@4?W`a_z7-tTtT2m<5E7OrY zoVUC|C8G!7gUmuYb>quu5C5BjrVyjOh{&^uf{nsvL2Ny?bF1OgR^c?^kj#%<-wIjq z(Sk7T_@sGTea!UM4-ad7GZG!tmU}F!RgCL%B$Mb=$rAy|Q~t!CCoa%S;p5lHr~0O# zBI2dycz%b#TEkm}$2Tixv*b2_U8AQ3G%`yA${5zt?l2Oa{dBINwC+SY2=FebBm~KYS;q&jAi zCsM>?$geY&RbQ(45#sC8t392e@OW&yhfP653j3j>a8Deel-w3BM%*gw2+U|VJ0Uff z?AuF}q!lR}Oe4@u!R`?j#4jXel=676O9rl#5=aP|TY(_07c2MeE&Gq#ULZdgHuvFK zW5^NO*ra2hs2_YB=Uv1x_I5e4<7neMeEkDdFTGiBTJQq-g^OTsfK3gQ7Y&?2`=V2e z+~|-f4X68a#$2l-(n3kC?jOO$bZeEpO;Jvd4TqM$KyFqcQP`*ivKV7INd6 z5{0giCkqO|hO74r`Ky{k#pE0q->CK{0igo4oIXg-xy}q^HhRhxOdgNh{2I!Sv&O?{G%h6lK-1dc#SC7+XEjp+^Yjx=L7*`D3mt)?~C z9}M{Ro~XGIf{I#vkH)X$eba_wOdFUbZWc{ABySfXWnVp7&s-q9-@dZ7*KA*l z=gyujplGs$Ka*iWdDbE*x(+A{I+Y`+&oLcVTT}+N?<}c2UU*B;m}KfITn_3sKy~U5 zkqh&tJih#LOy9d&5QLLFM<_4EnH5*8Jcw*-1MYEqP%=%hB%qftgkKnoA|BHTDEKsY zYr?!|&nq`8wtz!>{n>>gUvz*eUR^OCIWDM!em18^!xTd@XK1XTl_lf)td3wyC27Pp z?E`H$h{{&F%?*OLG6kG-hVmF?N7xlMiW$&&Da@Z2AnsLBQY3h?RK5qDtu{hHPpNiZ z5Ftrv(Z~wcugYm=9}ZFC;m`diTRHfwC5%DTH~i5T+gAw z*L6Gt9)cjp^2?0hJ`QJIKC!cj1;(nvo%D+HDR8n*dCvuf#*gedvc>;->%H!=DI{$`2(Dm(Bz2FVM$f672>*DR5Mn3Zu&yya zb4*K&+t&vz=bg~Sly)Irg2Fa8dSybUMJ1Ve#&Z<&&;rhsjVN7CK6g_w0= z;E7-RV%v7P+uU5`;TR91)zj|Ohqvg z6UIBHW`q{rF+1utZqduxkQ1{V#!k#s6O8g_f9JVVn@t-Tm$dqGf3fvy^+vI4avoGV z*GZhMe)(qox~jKsxxvypqkQ}wFOJU7sHDy<|ar35AO7 z#jxpL%@(0c4cTyXoo4?){M%yFmk-`QTC$Vrv0q9Sp2=~e>4X{TdxiLJr^kLH;iUdj z?ObR5X!D7fWc*uMi*bMV&QJ$#{WDZAqLGTO5J)Med#Prj4#t1@<<;y#ZM-HY{=`7V z*S2PHk4GfvbD*9Vf%kmr(6^@yg->$#*uEo_8!hfCx0J6miAu1HC$vt7-N}$r0!u@P zTs=h>G7h#r{J|VS{0s1CS4{s-1y178B`xq?-UIfr^@3mgVoPp_OYe%=()U&b`|_Fo z&-)hU#l^e9i~n$%7{ALu#!OfHJz-xVgalzthPy)vAFdJ?Ks8Y;MYg6%M4E##6f8nCohZD-(eVg-n`*+J59y1rlV`z zCyv`q6eXYUzj)u>&?|~+uzyFK{t<<8z&9nu=*TQ*SKi&-ETh={lbp52(=rVvWCU5m z^Zhl~`F%baab>FZejT=I(?sZ4n7-4@@#ET2$^{0c&m(^TpZv+@vEEL-0ln$GsW5JBsO?T8 zUI0e!;EOn!tTZ{MbgP>c;<7QHGRtQjBYVnwmKMuyUd_J|JB#t=u;s&yvW!;Bh(wH{kNKVy6k68VteRDBuA5O0rU|c*g zAX9m+(e&NAuhGMsef6x%RMRUzF%Lf}bJH5HaRn&41Gr+$NR@q^eoF0E)Ap^Kz4UfX z3n_;o`dN*G2WH~X72G2iCoj~pJGn_~75Lvd+m;%{K28qZU|Xj@ZofPWn&_`RH~#Y8 z`&4a>K1baFjefxz>%Wk>+E~$k*a;biZQ;!3RL`idUp(FPiZL%b2S50@7pQ%v^#6(4 z^Zhk=_D`R{`E?ib);w;J1<75KR0DT2?KC?d75L#b>H}(4_&HfSdGuV(esDZ1c3yQt zaLJEnB|!1+0L2S4jNKogLxX|EnC?FQmw3a9&xdZw3d~D&4FIF&%<>5d8fpjX{>Q%< zncjBSL%fdep)w zeI*Qf9Bn2e`H7j3RC}Z{BIRpoktzc2hb4L{0o(~4RYbsj@N;BCYcc+`U?<{o8UA)= z_1Fc=Pk`{pfs=RS%mfLdRCrk@Km6@1gWAcurjv_SQRI3KVCU7Pyj7X-@X4GChr>c_t2j* z<8m7j&rr(@J(pK_rmu&HO=&)dsGL|Weg8yMK4w0;+KP@^Vv<$XqNi~NR|{%5Y;9Mp zmEQO&`3Cv3H@r)M0JIPsZS9WPX~KL+Lb}Yg)A}yK1$j|x^<`g=x~nzHKH$)tv{s>0 zro`$G`JbJBDu430$EqHaRSdh>`G#J5`%JQ`S5! z{DG-Fm0!o4Dl}7}`@73#s7s)wo9pNIPFo`1vs;cKS9X8)eTFNg|lxC;CuCpyrM znr=$j=t|5QNY;B`n$vi-TJ`RgRUI8!+00n|8)sG8^SAy+B0jruF(z}u;bNO;l5t-a z*&ucnC(~!LdPUt9U1%fE8|B|@e8VY~cOD(>lwJf>l)+hI>)L^%cMqC>?A5aTUkmxhq%VRV%<#q?SL(lo>DyF@TL56{XCPp#n4nFac*9L!G1DOBxW(Sn7hBWDxhezR}OmP5p0;({m$IwzK;|*B4x!M{my8B%N6O zn1hFiw=$nU?y%EbP0ETjZ(Zn(-kFy8&LcZKEEs`FlAah&G>Vf9CNo!LzR17`g zM#p}4M8xveGHqC0HBDU)Ibd#x@_phY!tBqVyfA;lAG*#*ro0Fh1)#-q9RXc(wXBNO z_M-TL!P6&4ipz^1KmN>4vwOMzrpJGs2-u=>%PaFL+Tfn}?#0-t7ZY_j^OpnKR+-E7 zcS_{0=C3_<=R&lMX;E|5 zODk{iyG~3(i&Mpl4sE-s!VM#*6BSfP-}(V69C>Zqi4fHkP2UxZW4Fwy`Ll_C@T)8D zJev`5Bt|ZWw5~+fs0`^M_B#$hrK%2c6Q_VG6fRwpyBsZbzbwhQgC> z0nB+4TAY;!!1LRIU-9>$1_1cY)Zm2}o_OR@d9rYZRe^#2S9Khtt-izLO)<2Pa<(=SEYYqUX;TJc~!~F@>)lWF! z`k-3RD$BrUX9HS3t5sQdG3Ne{i)PrX?aUgwABs~sO&i}NZzydT|r5&&mDjlg1ZGv;i?#ZtbRVFCc(w^9>oMJP2+xL z=7UOdVfmz_s}<-OLW)v~h~KXi3bDpULWxQneu4_T2f;)Y1Si8+;m0V|`e(!!j;c#A zfzs6BT>jnN`7QMEZ+P7bNf_vFGVc|bA9U5y%s2fhRWTzvQ~z-7gP{UZ)@7oxzPo^C zm=%S1SJ)Pw_ZYzJqCxFR@UCBsc6P>}-K1sec5E7B>&cUan(k&z#i3{Wzmv@3?69^? zY4Vc(oIlCO-z#mBZx<#%+Lx3Gs#dN^erl9^!1D%f+UnIl_HNb68=~v$SEQ)yj`7y4vW*GP@2gH|Q3YOy z-0=A6@7&0p;f>tfyyM5qMHgcj(5}2fWiiDE>+bJL@p^WQ9$ERLTj#v)z2BftJqXf1 zB2+g;nnUXR^=L-amNY!f^()Bk#(|3+9p%`pv{3xBTLmXJ1vE#XE%B zcIUn)>Ju{7ZcovO)lZo-~BPd#Q20k$KGwr z1vBh!9|B;U>jWZliwqdRs#h-7#^c^pAqab?4~L$W>cHs7`(c}Gw=A|zsn6*DhS@Ol z_ez@2x!!;UfQGKJd{^YMK>O=&GWXsu6pvR9ydY>H6pK&C_&eBraSCzxi+O0H{37b6 zR!&NO-FA16Q;#gwFj6$%9>q>PViolp2&l{divU?bfEjmV=B0l%!$-MY{+qYf`t}~O z?ZkJg<`cDQde!STv(qy#?w|JgI7awN;ed%pW#s8TRc%(@QpN-RpMhfXu7qsWl)0p3 z4PDN0^Vb%6xmjlK+prHxFy-+WLU;`dZqnEpfh0gkV#^Q|k_bXV5|SvK-?ZGe>0j|-MF*o| zpj4gRVo=YoZEai(cXz+o?ax1-@f>~a6A$=}%ZK2nDGK`aLhqj)GH`Z6A748=vO&MhqP=VrV03`;VXN_>1LDVA{i#AX)Gwzs8d;Jc7(4^s1?`mBfRr`CH`r1|%C&sSuun*XrIbtk_W`V=g{_JIa;JcxpwLISM7cf6@imU;>R8q;@K%Imy zev@ZE{O#8tFK%L-zw&m@YR=bn?pG%3{}b3LJG(ajpR0K}heOaeZVo^%_Tn-%wFur% zpCsA6Y38Bdj4{RdGUENcXV)4^pV{_Bx|?qRy&&m7@lxrR%k~~fjlEI-z<+PckC?9Tq_S#89Eg*6dkUWFeNO>=5-eNAikweeE@@!59a zs(lB>qn;epe7~PxQy{#Iu@W@G1`&`tYz1=#*!=yIbo-CTJ#)S65uc>m4Z7-!QNz)9 zOUjFViN~qM+bXZUroWv|IloH&El50Ovix+w44l1kW;y@od;iD?kyN_fu1IPcbQ2j` zT`wfHF^dU7&*IXhes9#yW>7l|?%sHz6zpl0f5p`K=KNxp=0@7WwQYVDa>JhI9?SVJo zag_O@oS#n2I}tZWEK+i--0F|$Nl&hnTlj?4Qi6uJ{Z(1Qh^vS@!W}oUgBJOU_W$rc zr|GSTjWla-d+2J#lLaS(vWUQ zNdNWJ&}DfDx99|2L(NwWt$3Gn$$wRz<@_VX@Y;-}{F1;?W~4GVHQCz}XN`yvC9uPx z&0N>1OK0}h6~^yVD2W)mH#D`+pZg!M(_rH1GS`z0Pn1XR1q&0fhvS&=tf}#{6S}Fm z70M6)y;1u*z_+%iZO3e`<)4-n9Y-PD^9%Mv&T2$}Tcj#kxqrdC3q^@(*>tdf_lC~) zKVS8l`UCy(g*x1cha1>8IYaDgE^aBt`eJ{I$zMgE>22=Oq}_|g6EC$!jCT-}_wbV$ zczHFl;86{SZ@%vNn_6Z)kelvBuei@a0s871{A59uJYj`DMEv0$3Wyq^7ZGkhSN=Bt zFY4#=7}(vHfE#bG=Xluihv@Xe&GYu%)uINxi*O$_5hDk+x zt_Exg2$PP3{;p|Erqw$BsyDRzeT189!oj82R z>X@Iv=XEum22)12QilDDVe+S2XV&3fuL9?40s6|C3*Ve7Kk{w)m9HNt0vEhnarWKQ z$-(CS-^J5Ej$N~zBq8iO@SyqsWcKuCEujFvel58CK3aqW4}2MppDv2*8#d_tuy{j<$Vjfn{4StobDJGTq02>tM_F?I0T-)BMzqpyZV(MUvgx?bLS1|-+@o&sB-YuwC5yV4&c2WL6 z=V?~F4`ztau$0Yi5~nC~Pj|if0MYLR$hZCqBlyz|xQA;fVEtfHx_WL;nUjY#J$upS z8l(QhBS|9rsC}OQ4K->$_=DY8?B(w?eY^+639y@}LXkrpMLtV_g5GF{>@}IpC0!r= z7ohRHsQ@f2Um`N@n{prCFRU77oSenG)3bbu5A0OME*^C=A-N1E`!ezQTGkjnnY^@a~=HL+5zGh7kgJY z=*gMoSI&%uQrJZoH#|P`ngcMeYt6mmzc})>!orvwB4IlnBE?zkvU3np6b7zm@-@!18cB zYYJKJZ?Eq?EdZd1-~+lxt*8rYv&PV)qdOq<|F%vEquTdC>y-n6hVaTgXIG=l?LZLy zsA6YR3Ibf*#{c%>{%Oiv=5KH4&2R|)_$v_CzS`+DZnyaAa#$c$M*lBQ763LzZw?PY z@gWz=zc~}!`YxCh&hZsQ{}XTQrN3GwzBwnn65oW;D@PCQ2b;xliv5t~8y6|#4d9Ye z-o5RbJV`!dW;_A*SNhFwA~OWX&z^?Apvwxsu3I}lYnC7R`V=Mq8HsSpH8?vdcBhk^ zwA$u>%N<@tNX&Q#{y7VZ8mlNj@>c8HS^rh*y#7$!e+=jUFPfWgsuISR{15j&!9$j#rbH}*H`z42`HE1CIk)HAVp?QY`V3DY5xp zT< zH#k|lre^YA!O!)#`ABn>Bm?0KC!O+0szyQE4D^G~SFtLF-IjD{HF*Expyj#>Ndfqf zu{}1|$b#Q6txh$$J`GdV?l^gilgw~V-#gqKV(sf2hH8RDDQnWT@W$dG71M5F>N$J@ zRg-%JO4d1QV(poqi}*$HBbodY=$arAHnn#=xpl;OGAkPNX(*>(m$SGfGeX_>n!RW9&?jh^kF{$dre0BL@qAWm1YrhJ&$v^;@KvV` z$36K`QK1P#mE`9@?eUM!x)MIl(Rpb_{T}*{iE>3aEpT+;7mulIxu)w&PCtG|ga$qJ zVjnT4&}(~ODELs_1p8uT;;!PLE!14O2KsPksBfU2^(+`%=ngp}Y!lF2Ykg8W>a%=g ze8eM|n|bVa?TuN_-e0z|s$rze`}*h`rVZfGMY(81)k60*@v zz>z;uGcxr{bQQhB%=T46kJS9MlDr`oGIHZ@Buz@MZnzWqOM=_+wh=hrWzHdjXC2~m z%sp?KlkYD!pPsSA9({P0;X9r-Ja9pGGeK1;JG{H(s@N^Bj|x01*VBT

mcUm~Ai z5k%*NB7FAJ5(lI;_`yd+jmlcxaEGi67ZEsPK$dJXS} zR$7r-cR)N`;Z(UlceFPt0m=`BjYZ2Ep2b14l)WC?#F9`J5yC3z+opo9_m<$#aSXX9@9Z4QJ&QYQ&%B} z7lpANkHShP(k--bx|HKdpimlSzCRm>&{?$$GyXLoCH9!DtrTxS`b&*ABuFxnr((8V zeEx)w+*ku0xLl`$aeX9La3356O0KgmAdu5wVJqJbH-21(j;UkDSYwENL@`=mDI7Q} zL;DxoS7;+1E>dH8R~>vYLq zIeKAy{5VE8dS^yP{+*edOFzY2#a5*i)6J~IyR;jj+=%ov8=hHK$;h7b9!Yd>v+?-7 zRJtal$#YoE<-}dM;cb?g@PIx*BdaU|v|?kJr!Xh5w#QF5vYmYhgCkYm42)ulf}7b` zRht9VJ0l0uV(_^Ck$%Kz?$?bAbii3z%Tj(c0r1a`Vx=?~{t#CB>t2z#z$p z@x2(iJDiAhufl!tXI1ygf?i0GCiUm+v0;%_T4Xw| z2}+kzN-_%VatIzHIDipG;}-O=Mjmfn=;OZjk7-FxsxHjl$WC_?XeB+w zh%W7Ngb(|g_JP1`eKx|sGZWs!vOnI|9DCJ0I#~pF$bNFxbsQK(UiYy!IuXX&+M6&) z;dp}jy-i9Z5Xh=QS<;EJ@yQh8L|}%uCed2BYb!Um}pckd4>WoKHwq_n# z^I5s=z~x>9k^ITecEX3$3hs{+DSo=prr_rl5^3JmIjNfZSQzd2c}+@PCT=ZUa!=$` z{b&?Mb9YtyoWtA9`1dt&Ow2H~5tz{b$QgswZ1-*>i@tk&&Dzv=aMWHzka6WWMz2JZ zpCet4n9Ex5kvCuC0JRm~#40GPdgXaoD&90yNgYLYo zi39X2Hb9Mt#8eC+5}%BL#vL8hukZ*I4S#t=T&~qzHdRl+$0skF>za1c32SSHvb1NW zA)8~aI}*Fw?+BC7z!l0y!1VAiKN+r8W>0ZdunRZvj3_);Ad+;C(5~#7@wWN{vq9Nr zF_edvmN2iV?1bq>)|$iKr(Hq^i4bl{{dV2W?n1L0txgpp@V3OApfM9w?iqu!8W1Wb z_NbKYs|GdGM&1Fw0z*P#LSsdc3k_N=rHY)6dnnYOU#}St)hUfGUP8o~v6uMf>T#4x zV!1t?$yXv)Rd92qgV-(TvMk@+5+^qeg?a)E*jbpXzBKEEBX40Iiro~%$2u?r{#<}v zX-1DUW3*fzWI)uRMA+_d*&S2e%Qi=sZcDNx!5+}B1OZMS%B{bMwknOfWRg`?!9%qT z+RP9hZbQ~>ULrrD0-GHw3hEgyD3A9rRZN5TXs_zWz)Uq55?v5pw51|G9H|Ocdxpsq z#H?^p(3gabfjU_D#^Y_6*1u%@4xdV!^Q$g+w|q6#59$ci(Fe5Vj0eNqwe2Z1?(oD!FC8lQ8)5eyIG!H69 z7!%``$Rl(+o1t@9<~0DdlWYv^2}|Me;fQh{l*CR_;f=1oY=#j*rvXR$i*{cmfEl?2 z!TO9%rZ7=bE5Qsoau#ZH(M&!M8$&Q7;(S2Kb2XDG$&g-US&ekomH!~sr`QYF4)*vf zSS+QOL9?|Y2-i(Gald;7MwA0PkQfX`7EXj2j(F6YqRBz`Zvc;3Q#I-5D!mny>EsQj zaztc@f!LGK@+la^2DFTL?%k8UZ z0b+T+>tIqHOLf+D-gNZdKRC z&lxo)(eNuqdVF&$lOFXmzeP=jH{?#^u!~S%RGLe8HaJH1)f%0Qpwu(8O^vg1X+bs- z%pw|6BePMPfKmneZrNx6iS-hr)y*>6AF%fiW!ENfptG4%MR1$grBVWCW5#M@hr!_-QG|2eqfZNK}=>b5ixAL-cj`M7jsm*xZi1(O(i2 zmBuXJbUWhxQ{k|TVD82@@nKIm~1)@FDfR8Iuhe0oFVS)ocRMKjB2*R?yi zx|`TNkqMP>-i=;m@ls-h{?=|V81=zIb?r!a*M@yb!&y4zJw=}OtTs-IerToGDVEMK zH20rzL64KC>R_F91XE(2Qa9O`z)lLC#wf6!{?-H7_CcVM2XLy;AR(fcZ5)4C5>f%UZDEzv^YX9<5-nIC z-J|$YCD(lWi6tj0j$h(Bp0yE?^tvI;e|$?Dtzy11K?WIw1Tc`Ol8ln&WoRWlx@#+{pD=7aoY zh3<;PEl`K4BI|UaR<(+2Pxp`@d)3Eqy7XSjo3D_Ozjiu z=>r(f*#pgFCBz7fup{_v-8#))%GBpW#I&hwCHID?Gz`6U6B3(4Vie>+X}ITkRm|ut zkT*&zWc4UK6#Uw3;4=be^WyfTt8~VQk0jmZfkw=?&LH^ZQ)yRZ~xFmBl2*3lXHsQgh>F z0>;7OEYL#9553oJq5ASWGlfP3=yMbWg>69(SBE<01kfa3QY@K|6A`*F3BK%52jg5z zBV2QsChm3(wglN+`uQrtni_*0+{bqyq(@nKJ71<4m8Ge1=`N1E*#T!9>_w6M!BV#; zvbWMP`+V{fQ%!zKfB~o&bp#D$e;#m~>e8iM!{LOytE}Nl{^%z)i9#&JwIH6GIcwvW)UZSgUlBZhY6wL*)DIROnNeZd2SSg%b1i|@xl{c< zS=gonVoo=zYq$?BR$Q*qQ{+M);U;D7$Wi6QgWrk%UF!cyBW6Z*)4%_}>;C*-O#1ZH zz|X47%%uxY?sKQ!Z!nB}KQcKLQ)7~{Ogis=L%Qk7xhGG}Og^?{1PTk!V$KF${*cyg4uypfg zBE*CmokgJ-RdT~tE5*92zDZfKkN{HZ`x=Nv-Q`k^t)T%9d9t*n7>5S%Eo=5Nhis5FIkB?kisO1Yr)>PD8o6-^7CTTIqYTN-86DP(x6dN;j2Msi^naI_*qQk5QxT0* zm2yV@1+)h6LSXk?=akh*z|l@bT4#AE*9d}PsoLgxaPk3ie6rT?_+DP>=IC5omA7ha zQJQwlD3a$Ev$pv0c%KQ}QW>_u7AAJ)(6nf~S=gMJ?+0gMf{?{!Mf%#z-$@_sze5of zp}m*UpMT#c#4hoLDu?x5*UG`6+3D`ercPn?^g}xMv}l4KW$Py?#)&InO{h)rcOpK`P=sM?Tzke}ACz$*tyBx{jc==%RCv3E=ZqGaiSrZ0Noh`{{WHIqvVz#}` z?SXAg`i}nViPQxKo)@>O?`Meb5;IXGwzVG^ ziB#JQ#^iDg#kZ}}Dux1-PFxKVIAy0k9jtFlKcj9#fQ=nL@J*sHh9pua(L=U8=)E9~$?kO%O<(duJwOmsv!e;a5b9GSIQveaiy@xH65@m^SG8;Q(y zFniaijpeLL@0{R{i6O*rzutLVtnVUfRzgxYMUQUCM|36FAjjhc?$qG|2SOeg+`QSA z#mvG#-I(LkH2)#vNq-z`m0)6X??Hrb4S^CTI@1};r-k94Kglwcak?4iy$Te4>v8qU zt9lBzO63x2fs`>Y_~*){(s@E3@HNZfRn}6869wTXuTzSvd?%g{t4u=iVZIn4KwPP! z`P!-^(yBk;N~D|QYuVh??{mSaG1kv-Yk>X~k$RYCqmRpdP}XqltIlGSC0P>_oaS8lrpesmZHAvQ)6` z{b1j^c|lL7Cv0ENNo$V*=6?%Xl@T?%CI_Ikl-$s9B=|u>Ky^t>3`gmr5Q0FTC`fPT z{90Rwl;2@!JmCJl=jopCK8WJ35A3HT;*AqX{pIA2x z9DX~cDAAot%1EhREh{t|uLB}sMzc9spN8kcB7tpZ10og4yhIx3l9{Az{MI2#qKJBXswB^KgSGxzMO)qIV9g+NTT%FTrdkLSka+&XHz|hOsuC5~`2_ zu2J8SBfDIi8zVAYQappG3|Sx|&V0$U46qCzbkud84OfR=P*bUn(s|Y&yt?%0;WcNS zl8}ZZH`#;#p%cOcm#bF?iMV+gzmfF9I6RIDF0UjINUB`Q@{r8uTvU5pi%<>K^l;MK z9WmKJpGsmQU4wBXE@dUFw%&*WcmqmlR_oal;fy;zsgTGg(4Kfp~?zQoG*|CWPfe%6}7VBvA zR~YHt8$+iJ;YY-DH{FBSw5?PUgE&M5hckUkM37d2^OXb(oY+g00CubjOGtGwRVLeF zbP=`~vwj_Jsfdgq!=>u8fsTW`!K z$q>UrZ3?sgro;LQMs4V-#y{GI7GR_jEp>=YnHGlNUM<*K7}ykCS-4G*9=jCbe`U&3 z(3lsZjgQeR7J-ct;q=;qN|1t@(!ff5OOf_;dl!z^E=lAyF{4QQUdHKH#JgWBl$L7^ zyAA8qE+6X_nwXpnxSjBn+v>=^DcQ)a(){DW@OVBjndRC?OEhzueMMg=PHHn zVs^JzNfbvDhrAP2m@thsFGoMD9}$zrw-f}Aepu`X<8>A{)C`p}kwOj8kQHxq(lJO< zLKMrrqb#-VdD-TI$BH2&L-R+S8HMVY77Fm04~oRfT==$tZEy`VUwuU9kks|fMt51i zaILU`q+gF91p998GfXDvlgx}7q;V=5E|&7)b)uG#>hp9ifR)~cBzkFRT9$W94M)Da zxV+&ayP%5T8)}i~Oh!QUs{}89JHO!*Ep7|$bV~!HN1B&PTW7)V)(OuK>EqI|S6@{f zQYgJ@qxE{eTVny@(^n+$WprBv-~NL*V1BGDn%h9$ulb;7zuBi`&JY!A6&j}IDtMt4 zEJHm(Q_l~b*cs2y)}D6}0@aiKY=$XFNbE4gcoB#s{XwXHWv2(6QHK~p#OxMndI@>2 zetC4f1Yy%b8g`Vnd2 zdqx?Lx6WVn^j%6xSXd;$7~Sest8zh9g~n9P{B{-%&{|R@)>GvkcX5n_LOV4o@2K+Q z7@y7cKUaa8#I}0$8~^!!2fv#3a4?lm!Kz~(7A5lz-s6G4J!>5pfhZqh;rx+NRI&Rt z=xvUUEJ{d{>pF@+B$NU#Qs%IHgn8lVe&P6?G{EI;g(oQGB9iH5Qikh*`4bd9I1}3T z;0CWO3|=ahSPNEa;zYM09b%n~A@^~vJ=YEA72Zq0JW?cRiZmLk{Oeyq*}xY&Z$uL2 z$FS+nieqxIksjpg?r>V$`xT#R3Tqzqmnu~|V;IaxB30}LQU}co>VO^{lkON>4X9)o zyg8nfHr4Q!v7hqo(7cmxAL}es9MJqA*|;eo0?a6=8j#B;fw?SB@tMYFv8ak&gy5df zPF6ML078n9ePTq@)waHX~gVeyBs3 z8D+Ezy{gbu;2ERnY~WMqTKaXw#`CfKhA@1;D1UmwrfhD7v#{_29XkHrQ<2b6PbH6j z;FYNDq}`<;HL8U%WGQ9o?KN-g1KiR&`C0!{ zBK~lW3Cqfx3cdrHlJxV?uT|dWeI(P?EyneSITJ_?$5EQj2Ocq4=uy#S3mIrQ38nKI zie8?qh*WKi;do2u@j^v5S_;Z1YIHqCd=<;h#1ozj6K<=9U*dG=dj`my40JMoxAnMg z^c@m>4``gHDKXwACOfS>s3e3fq3L94M7Rj~BMFNbQFR)p<8iK`N^6mF%yuuTZCI2p zDj{pyI9mt<^us_Sl5-Tc!du+6q zuw{|RSRf5v2bAcozt2vF+W@++A~|5XBhRI~TjDxq!I0j2R4hzFCgx~Uh49tV;~Wq< zZ>=X;Xh%Ank9A5wm-%5+cZYxF(APgYs8)s9tS}ujX+QoeCeAYBzltP$#JE`gE@UiY-HzuG2L8Ns@5 z_oCh({#h?xhl7s41=KeXLI3sUe?Arl7}AH-Z-W*aK0Nsd#r3c1t2V4pUGx;WsY%x# z=NO(Xa6U~InvOx#$&p!t=5L6bP{#Uqnc|owZA@Bqq&gCLI4F#030y5z!R1OxDnr|E zWL+QSM3e+!a-i{i&bRG$0#txpvKmOotJ88jN4*t1kZ}ejZEC9#O%$%49^*z8O59%L z5iGj5-Al^v81*e^_`Vk3O>LaamYDZ(v?Bwlw2Oi(7(DP4sC z%P^R{7Nd*HCYEEpYtj?&tY>NBK2(LqrOJastV0Y7e32}4e*i|f2npmMR0t~N6OA`O zWqBAkS)m`{oD%T2v!jTczKCQV%}pgcZ2fg3ND8j>76CbTCeqi7eDfNx%R1zmj@6*Q z8tPb1If<52>z&Nx^4; zoq~i2TO#G%n%xvcod;DPj)yBC6u?ek$m|5JhPb+MT#T$VHWy|obvFbavF>D-_Tg^# zPp0<9oWW@minGyd6yTayj=iW}8hS6d-8eq=Mb%MXtkGT=N^3iSWa{i6^{Sc$u5glE zTje0bx^RU3sF5ipAu2OjV|mVjSdQ?eCEQG21i-yyTCv6jff#M~2_=<-uiT7Y1`Bl# zUK=J#q)O9DK#==kAspwC3oTMcDzgL=3tjtrLB#|khHh6Z%oHr>YF`b9j8-I5!Y|T5 z>w~LJZj&*it^KNt79xC!obdy$;P9PvbXI6BbU*<&c4VI2R`H1wkb4Rt*b*9F9jFh0 zWIK@_VL>L_RWOvoTa`Oil=^qLR6t7gMLZ(~n_YK8k*#_T%L^ z^@B+8qQLIorSrPe1z$$Q1*Ki`AmXS+9o@=ILPXc_b%^hTpf-`xH+oRW?=qybRR%Ij z=Uj)V7HF?l>DKY^j|NkhYC12uz-+**D7XeX#NBLf=BFU*Vm+n$fCNco&8}D&jU*zg z=;rU(VkAhTq1=xTDVsb^bK`WPv~9MSQn=_cW-8=@x9$rAE-Kz}23Jne=Zm_@e_kjVI@!M~9Ji zaM@8v_8Qvz)a3p!ZC97J1nipl4)n1`I&(&gTE>&f6zElU)8Z~l8 zRu8`mu%d(bver&-#2;frHo<}B$ZT*b4F>Mh_gvjqv=7bJjO_}FHs*h^gJ!#Jy>DXMU%|Y5xI)vWmgJGF`a&obU44x=Dm!*^M1Y@4jGLkK` zye}T<=3(*wC%N8kOWWMsu0heJht^)-$=A~2-HAysfg}!T;Ii|I9I-Nv2@I07Gm8X1{R_p=` zkwviLve8-Vm9pOScVtdsm`Wimoi_r)-N}SHkZp}Gut))RD-K`|yZpnxf?J%(xY^Gr z@aI7eWn2T>#bs=?NywVHn)jF+pu!O9alH`q!BMXmIxg~@=If^p#MF)urFt!#_->sn zOjsS^M)G$x40+zN!{j_z9j5WGkZd~sl~=x5V5omRM#D|E6F5JBZXU7jQ(ZBF>!Pft)fk!~G^dFX0`5yc?ynY|jjusTdX#?Eo;Fv-jf6x-!UAye z8=8Wo`<7@AYEF2{cN@W_H!KuOf52JgRcvZ&E}dz_yMcZFtlty*^QT@=3=qp!-35(6 zCT08z6Xe&4xHtIcD#3nC9pX8Zh~}v?99Ta^XglE<0m1}owmm&(y&#FGXep`_QH9BC zgHvOm#-%R?PR*O1evto^O`I>O@=4S{j!o5RutQ-`)23pT$gQPDM;`ny>c%bW6wiTx;X;NZys95vcvqHCZl5+a{ zQ`y9MKCPhjH%aFmD*s*mJxp!sI8Biys`( z;Y>tEqiX;~kT*^`uYTuTH|2i9pj5v$X40^lxs3uO>Pj2#qg}U|E#PrJx`-qJ3fh+D? zb3rSIGv+^a{XN)M;k6dX=d1T%y<>OUpUb$Z5GL5%&A~IjB!Yyl)a+Y6IN>k&jd(JG zCK2$TLad4>V)AMc0`ssaq|14r8Qoco=CIKkql+7VjwSv+_6?h?9Lv<}KA4l(T^OQu z6SoI+pvGrwbUX=;l2sFt$YzL+#M-M;ZW;z<%K>WZVu3P+)fB5$EXQVPN{}-nnwUiX z?ic%fS6XFeFttM69&MRvol#iz2*!Ag+;M)%kHfYkUfz{6?fJ0! zXeif;kS5r#+uWn~tNxI2RM*}Flk@Y;5iduztp|E`!E7hWV1@KWa6fjSn4uTFwvwws7#ggtJe*VQ}7a zik?i;lh$Fr$Hm426L1Kcj?1?r%dli=bG@0iv%DPP5+% zoMR`p*3;Kl4gEM0vO?L`+^24SfV?e4L?N>Der-vetn=&Ux$oaVjY<=;>gU8;nHXjf$N7Fw7n#FMIq4(6T$j~@AH$>UAI>tC{ zJw%{8NZI;60bvu0Y05dOOE1Sh#Uq5{B@Jnk?<(&f-o?wptH$4&9sbh|zbjPT+=V&s z{T=<$k0VLCG=VUqj{kggaLvfrc$w*4DA@vpfIATW>M%%PB~|XyBmd{SlEd6@C&O}; z`TCkDC|}Uc>NjvmK|vt)CR$A4&#i=M4h~53P*UP@fCpqF7~8TMhS44Hm0EwAZySojxxnkg ziBOUK(twG}7E=<6$aQW~_qq2pn3x3dd@kFVJz@rmi5cVCOxb=F3vKN?d*MmU3lrbk zM>gwW(l-C-(0<5E_x|h!_v7b+*(BwcAj73I_lcI4!75)o0xT=1f9@~({faA0=bvyt z81^x3d-a&>cnkmavlp2JzsM=KmXh)72lg#^2XMa<@@=vZYdU+)tyM3A{HdS1`FS2V z-PdWBhJ!7h0CWO;{@2`CoEQFZ@>-_pW2Nb!Z^W7Vj@I!pgqbrUCMMzQ=ZtC+w9nkX zrCB`^Z0b{mYiDMRJWuEkUW{KqbI270=$$X*pPRsB?A5(+RO4GxzPW*(nb z&HZn~Z-IcG@S?eOEb3p>fFA8TYvI-=oPWNTMq_=Sou}{L*Z9TU|3*JJ-rA7Q zcJnv3#ZAnf4{|eknaQ%xyRSJuj$bv(tKVGQvFXF#XzWs5Fnwc`zLn%6MDVkcxS(R= zUvxkc?Crwqx0H))2bBeRWd7z_c$If1ziV`ik&XG6`R#w2SMPkHIg@6c1&}*(Bnx9j z3o*mg9woHlUE{3gv5vg>+t)S?MZ!Gn;5I3^)#f*!eCBN`D$(OEihr_@4S1N`GJnx| zbjx49DZH>7SgclevHtNz*t-UW@;gdUwIk^pvj@V)K;WeIu#nV{_fl z;)16MYRk^!ym8udoO$RyE{!tJqyysZ zoj3M+F?pR8X?i=VQ6XdR3H;@D`x?EEC|ip(YkP%&#G&n8PM)6A=)NO>w6K51-%MGl zebfFX$R<>5`c5Ty%j9JCTmat6j?!YMEjuw071`m^S&ooRIqK81HJ-!y%GS`S0RLj( zG9;=4oU;g35Vn2~ z5&vj9cBN}*?dir#h8jXHL|h*P{t@N6T~kau$u{rUbPPJ}2bNjkNo6reaVyf~q{Z#I z4z*jpSljT5Q#Jq;fJ>ER9HmOvT1Xd%V&F^pSNTVYQ2_+vKi?B#As8|uzd=J%4uxrN z^JG(R3+M$#9bNIR1`j9%jym5Uw(ArUBz}9Gw>_R2gZ5;7TvzAhCZ#p#k z=q2T8gi!&G?o?ob(Zv$h&1K~N5R_Cg?WQv>7O!&hJ2<_AW5GW`J;UFdXd|$L+bw~e zZ3$sTD*bs`z8E&Z6yfAK!8G8qi;Ue_6}Hg=YQb-}cp>7?S_d&ewwDX-pKqM=sC&$e~(cLE(ax~36SqkQ5880)7 zv{yAHiDX~sy#%`9k_gnMNqWXh>a3)qMdjo{CJ-YSTKdu9 zwTWScl`6jov&oQn0Z6uw~bSPr=Tb>QB zjz&p;U}%JAH9|EsR@aBbrUWqyDcKJElv*cygcM?)YY&Ka^4eA)b)IlE!04liZ6mdi z=^)jlP)Mm$cZ!gVh#2S_raol6 z2fF>O19Kp(YAtdxzfK8ni6e_PMU1galICG9l9y*X`5&o-k-$D*8-Xs@ND<&xN`)BJ zSu_$Y=T%+ECT5@e+UoW!3Q}$;TXjl)p@Yg3r)QNGsF#syi zm+XM$#8Rcs`I>{tO$-m95i?Oi%`HjZHPPP3@%)*3QH+@ZQ$Tu7pw?-y(IA^fv{q@m&hzE*Vg>b z%S!-yJ>24?iQpL=eqGp%bzf#0zlVCk68=}V7}Yy<0GkY+aSujeQ!7w9Z$6rub(Nl2 z8w2Hw0yvEuu#xeAdd!=sq>wO_6fGcs++hnC`G1d~X5$xIMi}=D{ zsJVW{NX}zbmg|-PVHNdU{Re%SFG9B*Jj$1^EpAA)>QJhAoe%yj1XeXXTOEyoUZFF; zoU%Xe*OYS=Ad6H2x3J;XCZR992Xr3KvVb-oBv(4xm|!={RC-v?_G5si8;U8O$D^4j z-a8w`SLh+N>jTYU@UTr^IYc}auHEF>3;0nyWxpx&`L$8LY!6sxp)HISx6|@s#Qo`< zJec|kh(W;$-a@z{*()PrtEtJ@k=KI%@kb!P(rpFE;&O~5++IySK+~6D)&S(k<$SVc zLUTS%M@K~FWU;-=7tG~bpM0XQgVQ|X{d0ujtj>6(#3UeU*1&Fv+5wnI67B`0VM>$c(Zo3JsY(agQO7|*z*lSXYBw>gRuwPoE1iQ8_11%NiR==% z;zYdjHAEsib^o8ih;1;mxrFAn*Da!_*`EbY~E$`*!sNROPH5(d@9SOlfre4Im1udP6Ox{zX*py zgh2w=ykO81Zm_dcchhhh;U7H~F6xj!yi6Yb^;K7B@p9xfHNJiDl}H8PlH+Sqfm8H@ zbnL@QS?V7=_AwZ!B~@H{)u`Z492Onk98Uz#qcti>{!KX)fG$+tP97jgDDgf3&p%%Qzg$)$?P`YL90k2$M$-Wu#1{RxEb3hCV9cf;(9azv^ zAClbiB^2BtRy8~oLQaIGMFebGk~;`!DS4o0ncwuf^a2r{|3V+~K9r7!7cd8mm2dW3 z=i;PA$FS~2@iQ8tB+Exg^6GlQ2tQ!*V-GNLMb&5wl|KPC+vEY&7EpE~+ z!%8C(SYDc~3nbW@=xV07`L+BOO)>`b*K-t%@F*49n1Xq@cI5Fi&L+>J?VBU5WdU#@ zoS)mk=R9)xa|htNbZQ!hFV~hN1>JCQlQ&d09EgC6a`85cbwAKo_!NXBKIyYr73OvN zDp7gh_XI($<8yv&&(~ULlZ}Gr+?gfbDn^JidQBaKqW&#$10+Bk9a&MC?V#I8J4B64 zkjCe!8Ifvn zT4EhYUViX(lY0OY0=U-}SGSKnFU@sMwjLBgQ2!5mZywduwZ?(AulHViZ|kMBbpRn$ zsdYd^q!bwf?X{9tv{WS`2_#j9C;=&A7;*?{Yb&Ub6fIB(LMu`+kVJ+s1QOaJ1O<|U zCLs)oC1M~692t@WguvTDak%%s_11fTy|vywD~sWroqhKC?Qj0Rv$ubCnD43;yFe}1 zCriPPuhfdPg0VC!>_Db1jcZk|Go1Qr%E;QGs;yCC{n0*E8SNdxXN~#p6{>#lP9V8~ zIH-nXqxZr*R2{h$d|I3`|F!6)m=#lAQy^TG9adJ@OqOW>T_e9V=COFU#?a^QMt!w* zG^HbKRb|z(pP6lEm&s#B{n7hV|FDwwEq#ZB-Q~+G3hz9PE5$!1ZH33E10m@%#lHi` z+S;IAjEn!8SNZ#Qi#~|(`#CNEF4G{sGIaLbgQQ4SdwwRjIrsNddkm*chsWw~QG>+u zZ9Tp24z!3X?=FS6(yB{@$@76VjfzcYUwTgP{d$+LAZgT#PZGZANo#+~T5ptzWA<-Y zsIhqTF_x8AWtNLGO#V}9#!UI(*GIj2&sNAr#Z3wZ0`3&AQJ;M0*^P#-9M|5@kKZY@ z^u{AowblUW2TxNKei5{R_fN=$Z!O+&$1cAq{lBc?l7y%46{S|zC4{Ik6HXTvpZq76 ztxbH5W<7|MT%1EcV7@3L+c5019= zw^47LS+;ZV<@0eg+g#rbQ1I)jw|0H}@ipi)`53T(@IP|Qqk=v-B>}K3z&bGbfLDDy;t^)_$w*TvoYUVdoO_{YWVNh7U zN&IHh!r3fC`&&F$N#f&9q9UH_WZ&Cs*KO}~u<-)J`-JIj-4;2ZqxT%xp&ozY+B@sY z?V45JWZiFIWwnX?#(M`Hiq>22vrd0?mYp^8`|};pwG66^-l1?l5)dy^?DI=~gIHt3X*%;g*3>}U4s5$om zz6(<$YIEE!Zd_H5dFbCK_n8f&Ve<(Gvg;LzZqZ{dG(fDPuJwMm$;LtGmSkHg!A!D?*&yFiR&N!>HtTdP;`uc zd8#diq(?GCsnp;@W=9QPC6>%P8rbkRLFx-Nf{C8=_8rmmd`Ur^rM~f!3r4iymgdR3 zst%GxNAfYj_-q>&W^Ynx642fX?v5qSG7!h*1&=AAOT=TKWW8BU`3uFB)4a52HYV{-k z8Y85_kunnMV?WztKnOsxIAl^RtI{dC_G;O~iT5!sZ~e zKryrS#QRBp{a-Fb!D)&8=sh#SG~+vCVgBBjnqdpgpi}=QnI4;9c;^`#Jk{Gj^WG4o z{j6p_JYQta6l(w2-#`-5x89bo8d)!*-*K#*5^AzxB0AvLH$#BO<(atscs#7&5^xpRVX$MXmbCN0>;DRW9)17Y77$qai!OBSe0Z9&K zxKj>mF;Ny`AxC|N%4!GYSB(rGyn~Q%vqhEIDhN~KD_b^*H)*3r>Z0{h@D742cn9Hx zeEQ);PSf49*%vkFgQ88r=}s(7{|c+H4O_L#);fC=*swQOomM=^Dz@=ytD4;1Ar199 zdE}oW>dg&(pFJ3uX@4SrSJ8{Fl4pa2PEy? z6?f#s-5uv@gwu#>^1aG;@pST$xTGUnNqt#U=e#&nv>n#rY1+ z-ggb@7+2C37Dt^lIO&b2Zw*qr#p3(W=j+{FX~~FB0dZ9As`egS-58r7%{Vms*{Z9Z zu44MgjvP~ml^(BLF7YnnER1z1leWf$UAq48`V`BXROkfcWjDffRcl0>lCCsZNBLB+ zi8ceuuZ`;5JJS{6CTORGooL;glQ=Kc{&S0v^*ui&+Qgv1BH|)xYT(B-P&9x@ z9mMm{TVihVgFky}I2{cS?{ueRP0w6GX(zfmfv6rVw2pSdY#*Ald71 z%3C=LORiKslyp0wCq!-&jE-^DX7vvx@GmLC-juhV2gcD9YsB;~?ezRmW5!HFSChT%((&oOHtyV?~2oi6=FAQ8v zl0)6y{Z8styQrO4PytoD!0Nn4j>_Y|jxd3M9^Q81tq^(C$VcBasD8Dz(kK~4y*Mek zd?Cnel1Y&9C7;TYjKptsqe*u}mIsYy z#$DbLB-x<~s!3Ply6^57n(Gj{eH&*KqKe+TB(qmHe8aq?9{_-v+*pg>0SH+Hs3_yP40Yf^x?FBnw1Po zB;6_5Uk+(C-6@&lzMn^ZmXMTojL52RDlmnXBFbFHn3b$#V9|MSn^CWqv#qn-IbUS95aIOChwZ@;not3C5CAB*XUc;z2|dG%H2 z;@uBkU*8?6-!k$SAKz-zSw~xl6nG^ab^ouo;d9yj6&@4mGVOywC7y31YN6>^QnY}6gD-!Qd1PsUTXejR=s6xMT9VaA8a#Xhh1=-z&54a8x*>Hdj4Hw^u~Xk zfA`Z`_IXEo4aerCzXZH0JD6uk9dS7+b_zG85105nv{PV5^_8u55b-uct&^EI3a_qo zIzBziPaQQLGA2a+I0Hwv&wAsz$!@jT;acTF{tZ;qoYz3sX63kktWx64j}nW>6CS#`$%U9wQ zJt)=u7&^br^iOj5Dsq@ySQ8#JM>F*}V=FcAA63Yg39{L5JWc#^H+` zVp{|_6pufVDzd|p!xx49pLfW zQM{a9wNe|4yB&IMa_+u25eUPpOg0-;`c-q+io{ z{(T(ja)H-?X&*WI4C~_F?GtNm$-D+?J7#0%1ZCy!=HEUszVgkE3|OF=L!qwSAUX(ZiJ8H}uV;*W( zAv+(A8!uw{*YoN`T~I~||CXvKOxmX4u4irVo&O|MpxU9v>5ZL?&Z%-YRxy&E=M1^G zE1?Iwk(S8N@VhSO$n0uW0Ex}==+op^A;WEKWTDL*qc^fyWmLnku-p`>uLnd{42Vp1 zBXz)(LCZl)#*{irm{SAxN`NoX)X2mlf7&q-Sr?sI6Bp4Z*fIZ)g%4sNl@C2R?qOmR za#D;AV=$Z4n&GC&DhFC8BRPtR^fWXUq%^JMK6gjPw9wNOh^%z8dE|fU)hB{OS zk&#@SM6@9?2O?f*sT#AwdhFxpU}cM5b_z_7obK@YMG=_cJYP}9sW3L5HS}ZXH!F5b z+K0vn!9sk|HnlztH7!b~ZPk=TS{BXo5Li~ONU^e~CeJ2qh{ zuAy}SN0t}Q3-PyRDtR1I837hbxbSZB8MZl&xewOo@6$H58@bHBNkZA!xzMHrjB4Rx zF>wtqnfP(BDfYo7?jLfIgK!xe2Vaz{YQjhOrBULd8QJ9f50ZM)6~(@4aT!-!Ird9+P1R<}V`fi`g|_vi=s zQ3|gzG!#xl-?*h=(~F9y%^q5*#QUlTf{l5IV~(i@$359&@qf zgml3C*Ic5W%AkBiC7I%%h0pqN0@Q_od^iz5A z?GxnQ;woN5dzU_3&}Z~VRUx8doMWvPq?F{w=8>HZmR_)o`H?Asgsa^hKiWA=cPi78 zN6GCip9s>eKj3qhl6RQj+PEV7=gv1_OK1sN&toYfYM z44bR@+IFKuSPHDTpVE6_H=Mzjd)_%glMY{mu8hKlNvC5b^WnrP+IUYkzt#Vs7M(E}P^ENxj((hCc-=K7XuT#gCB=^f}5`kERWnL=x9Tni5Bd zC@))2C@_xa87oNSQD|;vw&xJJy=zH6y1-18*_UyS!qu-3$4J$ED&(+3)4fRPEVnve zr=q{D4Hv1lwiFq;0Bja+Fs({cE6^KT)kc2=S5C9|3AJZ6g93%|Y@|XeJpPNJw`#Bs zm56S)+HQQ+g7Ly^(Z|WaYWvg*W4>3Nv~cU?5)&Z=_*B!tnyJb+ z)GrE%X9i1vV0U`e4rouA?sEIj$eMxiP8}!VgxFmfSu2>e$Lss3+|+RWObs_J7hS`~ z_@$V7E)E>Caq8=a0^&J*1SRb~@tx;!q2y;DI!qR>WVTBX9b12!z>8f7Bt-h91l4eL z)5UuBV}%ALOis3^s~WgH3%c2rO0oi^j<+5W%4RfhV1E`9!iM$ZVE)e}N4r3bHs`on(lx)EEQ43dc zkmR{EKAdMr$I2$7!~rXL^n>et=PShMGYe31ha6om(|DHmbyGX#Et4UG_LX8RUt&jB z^r_q5X4j+|<9=qAfNjj~XJgB(rJ=-hgQUju(eWQmBw`Zq|vHCjs1fJxtl3MCAq7i##I{L?kF0oahRbdYW1BC%2OpqTGE5rf*RA$ zNJhBg#6v+9H=S7RFmP%J#}vs!$#P+VOb|>ZoXaofX@+r%l`;J;cwV1M+|etJCE^e{ zu)%-HH}sSp91R<$acahf_9U~|u^i<%nx8GeC>CDjSZa)Z`9KI6w>KgmcA<&H=kuAF6U{SGDK`2>N@zQPhSyfE~r}5Ijun z*9iNQST3CPj2ltRksCe<d*#$N9UO7ZnvSjnMZKsgR%idD4W8dVA~FknvY~N%iOXlzE{($8=V^Lom-8tI6az z>AtNphZOqyiex}{U>W^=`@)ILVr@|brKyI{pmQfR5dtyt5aB;D$Yb*_0;5Qk0apT> zXKP-m9YzNaDarL_#ek6LGL|7ED!RyCb3fUyp#a(a_>esia;y3^wEJb?S;12cTU=>1%hU@cd zRBxRVYMYuBmrA3h7UhX4&s-qK$A0+vRY0W!?A^(8!!@E!>dEC(x$1YN?2Nix$0ACm zXP_)?r3byVLYrwDBn;>YJE!%5T*Fd&n`7ZX%jefikhzpYks-n{%$QzG1=b|wE{G+v zk&nRR2fNBpk~*73=7(r9v*suUE~CiOz{7<_9CJ;X5v<{-^Xab@Pz`A|epNaYcmp-R zYT@D52#e_GsJ2w!1j#Hb1BJnC?QMoMw*C$|49x7l_0T9=@w3))@|w;Y&l`Bc>MjSp z%@k)76a?=tMR&(S{;g)cO*gU{$ec+GX_iMI^#Fze8Z-T1;r@>g?CV(pQm8 zCFdR0Y-+%_9!O85EW@D8)Oq?sMe(D%dL~t+l9VZQ4L2H|!J7=n+Djo|sC+6o@l1Sr zu&2@^>xT?BujR87CXftG!_aQPsTp5Y%R%9TC*c4_C8;#_%bcq0E7@LtPiL{G z*ZOCCEK34(4GiQxz(5WKMjgiVqp*bq#7_uT+s=S*MBlUQAGiRClDFpg6AzM$5W}Z@ zNlU3QBMjKVL2k__CCJ zW#;g|`18PubI$la@jALZO6+t4^gT@hMzwQcL?kd&1NQd^xF#-Ff`V6mt&HZQr-h6j zPM+Ie&L0|9g;r{6*(L)U5qAYPqGaAr2yY)y$Dhb7m%yAyOBH7T<>$r_;dM|4*4{#? zj=o1H1M`b{@}X!oC79O2rt5e1pHpf~awvi+cbkCX8tQVJ=nE)tRS|CnNH(c2aihv>Vd-8rbC^x#vv{VHx$ovj(bxB zYCEQ>K3A1cxHmK(I|`}X^7bLh{YKCFluqlj<{Tm|J}bp3N?aSOoK(dtt4NC?&*Q1c zE7}*WU-q`lYobcSly^^dPWqN7-k3ey+~%|r3!N}!{=Gt?n*$^qDvvO2lXq{QSmr}A zX@E{)i;ILsC{PKRZLoUNsZxFASjT*4j~M`SP(~mEcInMhM7ENt)hXdYSIXAQzem4U zR)Ub>(0bJ)0~DQ5j0mD{H8rHvxv|BJxddhdR+xPFo+?CIW$fjU>+k3fE%I$2*uG=i z1lrt_i`-vQ&{(Be#63$(xXs|%r*TX2_aQ-);IRR%jy&Fm+{^L|)xN5XWK3aAQLyk2 z_Rz4Uf4fFMk`)6dwr5#NAOh8=H1EZ*dVFu z-w8((2KTY7u9UxhJ&1hX$X6}or`GCe(Z!}OZbaWA^bd(jMakbzn~nS3!QM{`Q=fkP zQAn;{NX47oW8K)mUIluztdR%Q2*8RLX}t7|=UmDsl`*630-b(*7&&6(Z}t_%KBxy! zkM5+}(K5gEI4fwH5Uh_6HzW$EA zQ^W5zo`G|Zwvfq8oN4|;%~t5p=&mbJlq(*BtRVRSrUbFZ@#!7MKUU6E4ff<83I@SQ zws4?|5erl0iOFyk7zZ8;(N5;@wwM}aS>~&6T6M?4JQ8ncD=`JQiawPZg|N>k+1fBx zVhi_)pHP%;oH@heHOLU&EIggzy?VT)!`nSh+lFAloYcvVOh#2X#ksDNroSafX zA>6J~Et5ge6nKH}-g!r&=R|lA(1+)AtbQ}4S}?yu(T1bK%v?o8bZ|zkJi`Gj6B{TD z2=s!O7#NOp9;=Yg{$2{jMMUep%ZxE%r%^k|w=!SF4|IiKnNHv|8g|)ATba*j3lmC9 z@{MAK^->L0QDnTJ+1}~0<&~-u51v^$SYM{^Fw~nG42NNx6EVHBYrCvw7?8mSg%8pC zen=fkrGJ@fvV0C+;IMH6grJv<(STD0;vhT*ciawyk@vxfiVU12Y592&3~opth=?7r z%sx7<@MVVz6P(C)6{}JNZi97%#0ZRORu^ya0`di;SM8W|aCKJDe%URhw>#bdSO{%m zMk9ehN33!D^-`*ngYtTDizl$lLUa5S``b$uS9|qB;sLgvp06@TKA?mu0q60ZFt8R- zH?s?{UYFT?2yE+wpcdihZ#Ybrf-z&(RMg5S{y{LHFlR)Es%YIulme8lg~mJ!wF!Xq zSqpV^%Oi`{l;tGEzswq}bL{A+J-w6pU5LUUq%9H>##GbWbSDeB0w9`VKTk-OuVVCT?#d#I zHa&+>BS1*XMl$V$0de*1D8g@Y7U3hQUUh=+6wbH@#XGBPWZqQyKD58DzuurC0qUw{ zb0>%GF{{k0dCEOP1@1PZD9nmY=uK@#{D{S#@>gl2$~j_$Ws{n5g<$=X za2hCfMrQwkgNrFrftY)LH!ud7|Hfh!qI?x(CFQGf*0&sVf*pDIGGS> zQ>GA9k_N1L4f~wa6JcEbfRnH%G@w5%pLerW%r-Knzg9+CCmS2^1{FjS27p@oM~zAI z4=SN59}NpxY;(0XGghYP6H7PhWC_{SkZQ+lsre@ePQKcympDYNC9 z-i;egYacd~O)CFpmemk16>?PMJfQ-3%P+)n9)y+l3$COXAc2#%G;vXwasGECNhO1xT!;E1HXHJipQr(* z#l1<^8Z5;oN2M>Nit0=TcdHd2m!~t{0BIA>c8&icRMJMN#{c*dP&!NP^;jOFx&S&3 z{a`|1&1murcK;Xr#%l-7p1=kAKAJMn=fTg;fLno4{NXPt()Ir)t1ESx9za${$~3%* zZB`mX&EPF0 z(vyHbkC;Y}^Pbo_Y|FzeNoXLXh!H_B4Fog*WW@m;6+_s^dVd!f;6GbXV%5}YT0l;m z-ae*WIExtbD*2L9!Mrm(z+w0k7XdO%mjIxZ;N~Q2IDbDtsEij0A{Wk5J_s7<$$SO0 z;DrNi^Ku%;tdr^jwf-l|^<*uKer~nm$-%$Nfujh_8HEsipuHS9Zb&rFL0kGGXeU`y-))nD5%wDeX$GNF1K;56#FF?ptB=!f!xVG)(`^l<#5JJ z$h?)H(g8u;8JoE?+rwE(c+d$HdIAmnhI%WhoZ#WRZ&10*y z#Z4$M-^uB+wj@{nt)yr%%mE4?{+c8zl2~cTa2uBQg8fC4+Dj4fa11;Ss+_jS!P?%G zdaJw!09plZaZXR49Bj&D;=dQ0)(X{9o&4{htcm`J_0Qsg`{HRk>$I02$-KsS6R+H` zl8}yKH)A!psRZ-I_<_s7aBpZwss3DcC}n0XKKg-=Cft>mp{};KWR&`$yr{e-cz8qr zkEHzAEq!7M&rN=!F~5!h9A!9Jh{h5uk41F`Q)4o@ z*Edz-C@b|_nLH1z$vnT$=;x8Kjek8h9v!d+D0mia&I1^c#U#tNZ4%ju5=&}1bve_p zm#<(#+h)_AaFy;7lRLgg#xkb6f{+PyN);p>-c7FWrB!2tV6^GDqCqB8*E0FpJ?9z* zRsrq=#PDzhmgwAE3Q7(r0gZrS!mxv2>XNGSIf{B}VW8SsmJ~lK2F77E@XlPYG_-?7 zaLej1l?AfMAaLTJ-RM@9)S`}NreUSj0IOeSL|I+@Y$`qbxCzBnsl{wwyAU}+rX6lI z|BOqLvO@HZGr(;Hf<~S#MC04DAWE=Qw81KI_C2S+N@rWsdM5YYztiTl`pS(Hd7fKA z(!Y&=kRaS3Lru-ye6STtHTe`RTF?6Irgl`)wbTfFpue<@E%ctq7_Q$CW4N201e6sB zi#!~*m%g8b1}~ADM3I(hRIR|hL({^snlvxvvI4gFxJYzRN27r^+FLz>8<% zN8JF5Tolbzvfq??X}rr6(@(-gT=R2y0I^;UTvn^tzJoKR|Mb)1AAz*526c<_cho9> zxmeyz7u^GcwWI{EHsYED`7ROas`)}TVUoprE3b;!oX_S(gRTU3@<2%tpq4UiQT1$u*!L?wC6vyd6|e{%Gk@5*j9?K+~}@SOK}N$ z%4u3k#shbx;$pao9x8qM##;8krtuxG*grUu{G2m+xa+p>xEgUZ7mx!3UfqO zis{2aiBlQJ7qbKgG)maS#1cJ_esNwdoEa;$!puqPNbg(;nXdUL+Juh#tgv{fy{txZ z`7{54S!2wxd^vTwDbF@w+v@grc;cbH+r?s$is+vA&NGDH9e^1TNw)2 z?+d!8I_XVC9*ETOTX&g$bjF&>Yb&*WVKU3jo=FGUOkn>j&g8TfACf4+>V!ULCOaL| zFMH!$alrS=*=TPd?EnE!FUS zvhe5}JkQ*vfb^p9@MtGF)_me2y59`nDK_{CvApJIIrlN99keoDXo4gWqhUK55}43U z3Kt}$*jFOjLF{3nlfX`I(Z^FVk?`iinEX%Usk;rF08EJbCQX5l2N zf*_gMGOWnGV}L}pc7XK&CmdT04|%APJf1{{4iVc~P@PZk>I%6|1mci5EH7-wtRQ~0 z`3S7YyrW9$1fGw+sWv~fd>0@yUFTEjlo5;%csupv!%TPS^vmMN@pP!Ef(~%R2c)M} zBc6S|r&z`qZBGNEe1}+W>1`+qRX0=c3iJ_RxqFVGwbtZo@3)3TEM8$5ZX+*bvytYD z3XV_0L&JOs5AI5O9NK>;<45CnOn9<=;8I>1CB+p|MF$}kriQKTXll={SzyftKDn~N*8`q+DBRu9CHiCj(n0UW4AOXXGPA8l8fH_K~9 z4-Iwfd?6BAJyaHvPcg(jn>;BVSWj) zUQyMGfshYCInEVk3!jU;>&s=AAQYnS#Jo~qQzf=$yF#C+U0Ipq%zG!wP~&^2&{8{b zZN^vIG|T?_XC66QgPheKM+b*vq&8nr74^nfzTI`jjS)W@IY*NURw})55j2}Bsyv+I z?jnyU^n&(S6RlePK2Dx#h1XLeK)r`UZ8X_cerI_UjZ+Uo z_H-v{)6lTKJ1HPuNF1z(q!-Yl>kZ{e!Pe9nN2>xZ3__YqqcC)EknC5w_K`37Ik=OnrD zqM5}|c!aq)mV+n9jf!)Hqe^C~2(4B1Y7OpKS0NG`R9>N_s00ht7kzl%cFkp>FyQ?| zTh!kZSJ6erPTd>g7=0-y!_iFVv$S}*EXlBr(J4RAPgw{Yp82yi5(u7yP>~yMOf05K zFK`Z*0WYKtN4dN9>0qTh@KxmqnAGXz>Lle#}q=X3F za0=)Fu1w)orKL3tM=0{-Z;pDym5dz=0@Y*`xit0+$SbOK1Ke6HvrZB!N+@R07?JsM zYEi8=ZDo6st9kqooL1@Sw^G}Cp7S#@qvm|EVS_VO6m)UEEt&0rUI0q-DgmRY ztw!omU`aM-XkG$`td$30@{F2kuV02%z}Lm} z4F%SR{26c=cG^C(bG$J)g8#_yClnr_On`TQuU`fdX^v3?aEQFQSzRehk6 zhN(OtC6JhX_~+7@vv~-HpMq)e#%s}3j)%NH%ltkhISAO`FfaKnV?02MIy#9lmlAOx~`cjv3Ot?~1^I%qqC7C)1FIfO1pO0gth9u~{&b zxEzf|K3mLRstROHhn>e{O87bk6c$|q)&|_TfkO^R3MjI`H&fG-Rdzz1B{_y(z~(iR zOpol~OeF%_wfEwXRvD63o-OXzdA)NECfmzE?s=}f}*0R6H6nayW`cUTRzW!wtw zvY6tGHe0RtM9u=qFwn){mD1D9-bXIGV!=K7j2zz7$}#H=RFZ5mW5)r?{I*!)5soXS zsCY5R4f6Yc9`ykAFp38q{DZv7eH9N4^)-TS+|_-&!$WOkP)39+2FbYceb5`)i!&m0 zl|*O-=SVAV@QN}CT}(2J=Ep@yOMyz?=RGMp7c&RoQD;_-afbB0rGU&*VjrMI8BKgY z)ZGIVb(9zZ@e`pP)U{ogQE1GdRwLwL4{n1l0=Y&RAi-J>bA3vY1b8>3(S#7%a{q=* z5~0bpSS>A<+iTOp@p@I=gla3JyrKjlaK^+QfoeB^&r@5q^KJDZh9qOBofE95vb3O43ZCLM&|l zb5C9_f`3-+xMGYMYXg<5Z8(#KLirA01-O#nCSZBl*RwE*l43nMOznoD*6KUYQdkv) zP<=W{U5g=Jcjh(cjh)NYry85y@-Us`S%F$_dKVOVU|jg!G6iEEX}Yg1VU5Y z7%TN{+=e$5KX3x(<6IdLPAS!gNHs-hhUhd0nEt~ZR8&ivM0Gil!Iz_xssY_m62}Z_ zE*#I6B2G(%Uc&%pppUsRwBBv7f+Tkz6ghM$^*i)=&>w;^AnQQ2fUBGu5>D^WRHk84 z3XnUYsljyq5hB7K(;GbfC;Zz~5J`Q2V@|k7TcNe1SkNtL(#Gp66ImY6BZHnx1@lq0g*ZFjg&-^DOB-#B4Q{zC!@VtCO zd8rb>*972CJ}K2WzZI8W>?;~~;EOgn(M3~d6ZSVza}q`QAjT)77U;druVe!u>ekWQ zv{9lwk%^AummgL(bZA7PBb-oz=0w6l{n;id$_3O@Ah%>Hn|2>Ote!Rx+Yt`{xQ?cY zV)wSI(Mb_ifb=3#GLd-WYzH-B@QQ5m^bj25x3C}fiQ#^v*hJ9gPd9*I6uZR@VNB7JN{fkckT4c1fuElMJfZ?yddz0 z^;7*=z+dZo_=-ham1#Xi$Xh^L9dPT%K6lYt{j|1o*-;6}2840m6|w^?cLOYQM5iW~ zc|Tvm>UhC&Z-qXHjrBC|0Do;4`sA=bu z7N0_kr48?n)XaRiFk|=`oW4d<%&OPAZ_IUcd)&*X`~R~WbOY$7jgj}Hn`18jIh2_3 zCXem&Nam9!Pu^B}ci;o3;eaBvHQ+ndQK^2@mY(lujcr;#`-2xF5HGYTAEZY{RYqEI zuzsHSNzbFll2O6C&O5(d7_jCftRJ7t|MA>UuAD!qU3BL^6~z1hLHCC?E}aM8yXH$; zOs0a#vl(3&DViVAVy5XX{=-F%yaoE=L_Y2w{7iW~iHmo8dQuSmc=*rt2_%f`V}xHg zdH(ogP4?o+lkJbk_OwImAJ_1Y!{RTTe0N~`W58b+z)1dLyzj-9pVoFP8Wbm~mbC+_gt7E!%c}5c1`<;#0P# zmlkrjUjHg0V}9QUpWl3Y=Re)UZ{G9xFfnTNTGWL?{hq|dCE1_!@$0I*<<7V6H_Jc# zP5wqRK{+!hm4F=$pC0Ll-TiL9zIN*Moqp_$_FJl-zZFxq%9O7+jquoJHewut6yl(+Po%a$M!$Ic=2ABfC!zQFBZNImH~Tq z1b5kIWiej&s6M?8x43-NlS_t`JO8eK`E*I~E3YX(|2TLYzv*hK-?Oj1^r#g@U(+1v ze)6Vg-5X*@%E^s`?#<70&7)tAl&)R#LC@yD*-Bpj_*>hZwn6R=&)=ajS9t30&UBCF zwx6?}Zk++{aDVR=yFIJj-?43qAFKcxG?kkwiE|CghA{8%!+$s?b)^8kdwfw{c9>tY5yY1 zd)-sNU;gf`UvGKv<i?opcw-@JsYrw+BHG$g}*Rx;3JiHfsdL8%X(oQbc z)As|pes7PTV=gtZ{@(AN`bhWnxAHGe|NK0s{j_OKVAhe({|u|tGFkO6bn^Fnb@`Vj zuT9k%J9kcP37y`%;<^5Qz2NuHH`5ooa@D1uFaLPz^>?#ZcN|^*@y8ghb(h$sqpmLQ z$9GOUQ#0)bdyE<cQJ0`V8!YkhfcqZYG*_!%xmn~lYCZ_ z{kMnCMlN*)et+e*WzEJNc=*GQOHZ$3#Jux7$EZGhecROB!|zywe$e06tyI=Ck{`@G zhv-*p&Z&F8jM*`0*!H_p%M>rp&6kdxh2XR~I#_Spdi)yV{>A6ZH{B|76L(DiDU(lP z+U@5ay>7j9|I#2-r$ka{#8uXRpB#1Xzc@XvFR*7_>GT%};uP&PJGKe6#>t*J**~*$ zP{~I(^kBH+&;L?|C7#m21RAde`U$7$GDd6B6O=FSJzjZl@eXxA>rQbog5Q^=B4=%+ zF^Y+^bu78%@WyX!sZD=t((MGV0Qj!+4X#Fnd4!s9JiZj>ckG<~z+MfOS z)tKH%&uJwR?nkU)lsl8#C2x(QLz5Y8(A!VHF)hfL9rB_-y6Z+aUDhL=PU&0I@(ri1 zIX5(C?q9y^bN9F1e^bv3=~K(-W_Yex9WG*0GOSHhu||V>r91S6vu|+JbAsiIApftv z?DQ}2YoFbD%|kG|>_8+>z2C=V>(7jEcHmntb*o7ISUN?~8|T-Ny4#TEszqAhE?OPrnFHBONz|2VFzg-!?i~;KHPee6#R9?)4>DGPCjhDrku$ao$o0&*e~Ce?s|INdu^v} zxV_r*q%d`8(9cHC_b!zfue87Oykz*wzJbM%n%K(lKlSL)2kc@z#wF%kXrap2sNUHvXfHO3=K`W7;}u7L;2&`u_^ zz?(;vNgpRHST{(2a)@p|g*)?;0!hY?@B5P8c6ycA?jLp8F(uICK;pThn*2#q(=QJy zaZV!m*saV-&A1-CUaYKY6_RhW8m%^6{QMI#>l(H^C5o5o82>qlA82p*`EIp0el3@0rb;t5qU8mJUaW+g)G_B*UJfIf z)|%fi&dx%LdaV(aI4YDXW`CO2Xz=2xYOMfl3bI5)mYY+h6nbGA98;+srUf|FGpi;) zhNM6cP}`AbCefq!UZ5%S;cml~eWtZ4-77=RQQ92IS@a!A!~7fLM>$&b-7ZfOZ$MLz zbNWf56KcWEb7d@023XV{i7CI1^gefUA-FkJ%Ip?M6~rn$V$OwjSjAAd()(&HM11!A z3GXgZh?M}UoZjn{*hOn|0@@il5^k~aBemFO%t?krvXW{;AWDt1o(rrEC)RP*L7-=tu^ ziF!lZpSUME;rNKKbq6B%so`qkpFSDe_I`#R(e&f+Fb?Mw!;@EF$eqQKg;L{34~E<3L(Gmwo#W~qv-Vrr zdNr9gil=fDrDQ@OZ15ileEKM;LwBmj3a2MMDwAr)M&!_VoAB}y5A-z_E%6~R*MC^< z6wa5fmsZZdG1A>-bAdJU2;^jkhnHpG$=>Fd2X}=BC(omuzK8SB!6zTqvu(7<*ggsa z(953Vqe&;H`rExBQ-a=}+4wgw+xw7*L(k>_fM6u-U{mO|V=Z!p%o%Q363&ba+S4(- zR401MI6xXO(tiY+L^*88lrq~SZ{>8)el?M6b2~H;ZZ9@m2|}~^yErxn;VuC$5KaoB z%aXuo9xk^=OG^jkUR9v{$$t>&s2&7 zp@9C`Y@0&yrI-NB(b3VYuTRN6KSo3Tkzah_>YMWZ;4IWpSHPyxUwhhzf_@|Vf$*-N zwhTF_*YA@m&heiptq~=z#Xb!GaT{0? zPZ51lKb(Y?{bg`xIhY-Er5NMPm;0R9c-8pzT&}ch_WLAh<~)f9+bRA}_TB{?%Jlyq z*J`O&F;SryMjIWG4m6kw*;2{2yA_pCsZ>ae^XSBcOl>8dC|0!wkT(>vQcV%=6s${XV|lr+X-gX`!j;jF&Ifa!w7g zgGu>if8BXAR+V*?$>Vy5B=QXFdc{Gdz8*vxmgMbQhGfs;51ktRxH>DJcwumb{T$> zt+ZB?@QFg6{-NiZ%DusdiZpB3X>RWV77yyDYw-0ULdy6`E=E^DM{9ID3H!C&d_;Gw z`!Zq``JHd=TZ@2+L!S{mU|kl*nyH+-j#c*)Iw)AJKRc*e!NNWoQDt3SQQuXkRwFs2q z>4S!i8K8;JwKJ#Og_MmRq?dZH_pmTD_)ChOe=iIcEJ^d|f*XGAc75AJG0&3pvG|_# zlE{N1vN5vni@PjK{l{8wLhlm+R32q59+_vr5WHQmuz@JCAM!9#W}Rp`a;N%R2KgFPZJ4qKSaO^h-}&Mj`NWCNA%NG4-3Q{ADWZ~71?$GpP&MYdQVdh^bU*h1#RGahREl-PG)Vi-@2R$s&fjQF~isWf=)KZhXr*Ybc zp&cNS*V`Ot!M5OTlg?9L$`7AT!$avDCT!XSX<>PP@hq5Jfj<{)q$elOGfj6KKRbg%yCZ+&aN{z{VJ zq^hu^0l2>iYud{|izm28VwR2Tp4Lcz+yb)zJChP>#q*;tUFhTcRF5 zp^AOxW09M=+b*p)d>-pi#7*KvNzJo!V4Y8OjQff{3k~)^E~o9CGW&j-d7!D+&MC0q#MP03X^VvH%TFhBg5(#FLM( zA>ZV?;yaQ#S zA!^8M0ht8~_ryV&M*kqTJn$_)^1IrN%z+*-Ei%iD3hR?_257f{a?u_-!{NRsRX)_w z51N(Px7ES)FwnLB7dZi$6k!1LLGj_Zlf}I$FM*+6%rq~`UACjc4EJ%nyKv_)QfLUa zM~4zx74x5k9d~hbRQ5D9}K?h1EqZZAVLq9x5RJ|~~ zcaWi4KpFa~T=@4Hi3hbX!|Q=x-0wf6RJGDl&^IgrAYoMu@_+~RFrkuZOqoT0_BvoN z60Z*_J|K)H_IpYF>V+I8zJKymiQmawQaPar)))@-!51`aV-PM(wxq^MoJwZ{3qj0uZ$GHwJTZ$_RMtip5)v1KU|l7)pA4Q2Y8}Wnfl&J z_Dpqq*~tMFfAP+stjQf6lJ-=lp>Fqy2>~G5El{w26uRVtR8Bqwe}1yIWzIvIrm$m5 z(zuBJLn;TQlD>IaChQtD=dIcS?U%Jf+XJn`7Sv0ka~YZX(#ycyyZadHIM@o*W=d_c z>u|X&SX=4gxX@I}+x$(`Rg!>7##a{y?K2Be>YcLen%}|VNQQa$bm`|e+RIKbTu|0r z;sP+}EXk~&ObEV=w-Ci3{FTt#pK5_heaU|>l@x8zcqFG7U*C9QTfq;X|M8yNWDUyz zIYs+cHEk?Q-#lKLEZB%ur|zrj5g$68NHji7D(9{#dg-R;d_uzP%E)xi3sryUbUewq zau>)H8>o01N}Vp_UisP8_gJ_UM*DW@H91Qi%wtPH7)BGjSj$Pz`Jtom;}(dkh!b?_ z2k^VIC?bRQS2&e!T?((DCs3-00E(;F8F8&b!9M-2*;NMlvChU#t>|6@6~ihrLC9p8 z=f;R`rN@an8NRAMAJF0bjcCZp-(0#9F}v-01f3lm{Afl-h^h}U|F~*kQRoy+rdlL; z&*xRzHgC1?5VsojsLD_6H;MI(UEPU-GPJOa@}>sXNiqS8?3M{NQUsh-_K3CCvT9qc z{g3?+u?kpZ?eLqY*SYC6wRf#{+u&}Zl0qxHNhI*<9Jw}ewr_qZgkxeVMd2Xru*W3(nmn zCeaQ)55>HUWiK4@cMXkjB89<^S;%Btxf1wP2Wfr4u2`XRPWW5chdbpfn&_Xct$w(Y zH+ix(x?-_^QB%^0SMND7!H({}uShe!cjg@Xhh7>Q4}7uGG83V?Rn>6GgKN6?tQA)7 zCe>vnm;3q48TCt_F^hQf#2ybVxk*t9ioeLB0)QOStaIEBBo(-y9%)Ltja5=q3Q%Sz z#v7h|#Vb{?3HYV;Zh2~2RQ=6K=F%Hn^zbH;2BcZ-ci#nApSp8SCzG@zdhH)aEmul8 zFD?4BXU;LMDRVjr=Z$sLh{k!+_G@Z^tOLdKX1X!G%IPGEX{h9RR{Hr5Gtk%7)~K2h zm6*?CZ_m?o(P|nm>BVJgcPPyG#bRoAYq4Q!m{PR&qw-Jpy|cfU`b?g58ARsrbE#c; z*htYf#6e1wWNLqx+EPQbl!nTrOIaJ43^0i%A)=ox-F8pGDsVfEAZ!NcV+JmWpGv5z zAu&Yl=wK;tm2p+yNTs_tPRR8?VM-q^Za=qlSslxPJ6PbPwZWx1nbgd3Lhy%;S~w`kEpW=njP&LqVBe!n9PdjRj|e ze1m@>$tvTGob|Mmp$}(^A}d;=$OSfP;Zp7>yFN8k>#=?4?5HV|Ol|HPIY4Dm0Emnm z@k(-v!s^}VD9XCxv($?Rg`q?mBN)}Iu!(9IQ0m)}Y#w>UWW*EnrOgFBbiMMR?~{P9 zJa#-Oh!GHkG^tQW?w4KHn7+El=M+P;J0sSNvg78`O{fmxAygGJ({-W3F1@FcM%lo& zS6iU7Pdf?lb3+*nZwVd&E&58z4GtTSp`~_d)!tw zEdmxFSY9dSxg_EA2%J^K^=`&r{`K>m9y0)G&B&K-5`Qp|Hbo6}_ruhH z5wDGurh*J>w%iQR?PCMFZ8Bs=SpE97%Rq|`xQbUjd|%=C5!=c>->X&dx<3C@xkuq| z48mjlgcBpgREC9-L))L26aes#Z;1zW?Z`Jo95m3b<~v}X5^oX{bu82ZM{ZiW3sbd!kV(g-oJtY3xwVEW?8%Oue|64_g01Rj2Z@Aj*wllq<}E z^=r_5q1f{a@K6`7UM|S(MjN zQ>+AUSfUq>Jk!D-XK94@yHYsmqED@JB@E0W9sUi0rr}>_ue;ddf7=1Ql{0?`67e5K z@XTXjl{}9liYE3pR}_YIUV2C&b@K8B*vXbB%iUJWON(EK+v#z{Uio*Nz!-}9q@vLI zcbi!)hhvHKXV_BK{oJ#)8c{|&pzWmXLA`d}qKIkP{8Q2wU z(>HTo=VBGlietpHNuX%m1i~>XBawC=r$uo!(4Kja=r1o@P68bzY?i4K>Y+DdkLvsP zBUC>5eTn?L3et{`-YqF)56&@SId8Jn(Gf@~BRT>BfAZ5f*eg> zaGq$91uE$n(N#fZK|oMTroAlc9Aj$AXD8f^jv;A>M3=9$e8iFMt1?NxrLIS$%(VMa zBv+Yj%6gRJyme zZ&9I{P6wUIP~|iZ@6$+9-R3C?Kjrh=S(-J#1Uv*^_7u0H)c=gp-M&RNw^ITBFOoKt z4Obp2(m6i6W3;jK`uuR$4{8%kYRbJ`Nha0PIQ`2;zJ9Lxg18`J@I4)h_l@4|Tb490 zUwWU3Vj3IW@GN9J^#n1JG?CNr22DjPEsI{hx8Mt%$&W*4H?f*jDQeDIBc(Dzv5X^F zb3DO90lwhZJev-Z1X3QfwxHQv$G5%R9c`eLe`v%Xv(r^Ww#qGfIc00!<`I8_;{z$r zhQt$+;wS^hA5!~Dy~FLTdoJIN(8wO{Ba_K1Ep$I#B!>~~ov;fL^5=AjFmAUmj>Y3K*z58^rb2I==?G7namz7qx50OlMo3S^Gz%p;og4Whj#I6$ zX5~mT(VHfx6KrmnVLF0|0Y!2;d;20t4X;OTMPE+kVg9MA&!wS)j`QzxC;BRUCR$gV zwJjzF*>Gr&EULRkCuAD(N-2${?Xf_u zdk_kfiL{= z8e3@1%%4D6_@yVWod>LlCsuk(He@h6P8tp`E&%CQ(eGQJxMiL@B^?kYw zHS8V3YCAiX%=utIbx9}>@>xi(r0Bzj2bOwC0rz!feRATc3W&^r?0VO2I9An5Rl{p3 zyA7NNrC!oTHeBC>_95lIa3>MLJfOaRXp;6x)fnQ^MMx-W9l5i(%sYGGfGmFYpyLem zS<0IbOO{8z(IYw3<36Kg?SR^!1wzfYE2-s~17(yi?XgO}?p~AZr!6zj5|<6^ zqhq;FB3f%*D*mljL49v}-z~h^TYCfne@>GA2mDy4n#tLhw1?$aAN>XL=Vdk$X-%`bwYmG$qARf>4@~!(*n;WF>drY? z(RVoZO&RUo){l0|uNc%@Nih?YFOEDL)Fn(Vl;?7$?A^ZBP3b0Huc^eP0!$2F=WzDJ z903+Y%I_cPUd3sb?ZUTxkcY^(mJ(li1tnECW?7Agi3z4@Cq$ms5&gm_&=MV{U5|A|Q4oVKVNPyYB(t0^y8QWIiWc#@&Pc zS|8O`?whP1dG>m`>!#+UFX_IARnpB+7VaY`K`Ddo)}<;bUph_5{1>1E6$=XC0Lvh; z{7Ajr@YpEriU>D&$h9IrAL9Q?4l*RticI$C>N8x?x98fgbj4g z-P{-6@D%^>|Ndy5CN5EVfa=_m0}Xkbs30-Z2O?zzOi6#ZCmF6T?S~7?=x$J7%Aq`O zvgtpMKH#h=eT0iiUiY953giwYv!#$F>Fb-Y8cC8bBx(c;O z`PCojs>BIZTnh9d5&-3P6UI|8O$u7|x;!1#!ZXgI3EWajQF-QD3Rj-cs~;RI(MGVS zW1B2WJ9Jt(-P0$;DJb_tyL+53sJ4j@;XUj(^HNQ{v@p9VZ?#w4O%tiPK}W>q#^4xp z_(LJCzI`h#_f58%B`lqiAg&O@S~|x=WB5Z|3RMQHmhjUT?51;t*4DdQSFG3tm&akL zmwsE7^91dgdNF27D0_MiKBnbZ zI9U4{IF6B^ACzt`p~XDO5dvr`4R+F}qGKJ+H?dB9qRgK;M|B4)mgZU2OnT>S-hg<- zIRJMmC~q&S$c%~;vSIw$k_Gkr>_{Fal>dF4fxB^2`s3i5r^LQ??^yvp8+O*57@|~6 zqss=U0~dpk0xKt?oK} znV@LJw~n^ibGonU*i+0fqI%{sqS(j+_8A4-lvi?__1)<*+px}ZXj4Z$Kxg_eYg3FD>B6m`Q80*OaM!1BMmP%If>!Zg_ z{Aq~yBex$&%m+sJg&f-7NGt!+5btRUXfUKREGS*Or~q*&SbKo(PguZX^1MsBQsziH z*~vO`vq4AAVVW4@C}%{enU_lz0B@WaH?ba!tqjV6G+pvG%FXUaVk$(IVGc=p%8jZ^ zwZ>h7LrwrLCT<_OKZByGaT6(K$8Cn>aU>a3+xeu2H^a=-&Nl3kk^{+A%2FkGSk$U@1WhMu=>&N0#w(M(KwY?gT+ojUDr5S zv;Y=XTVM6jj@q#BjOt0%ZU^m#VUsoD1R?1tif6voU|{qm`FnQx*n|^G*=L}z9Pl-a zre=yOD3rwo!o|xMwJcscN82;wmu)Wm%AQvzvVJ`2Y3l2_jg5v(CN4O^eDro%M)>as zWuGmZo?oEX_bWXUE)ovx_3>}O1 z*^-08nxkw-hBVqfdjC3Q^0-xh{j+8Zb-R;#m=iI18fwnEcEi1D9!GW(xktf8$vu_q zijy-}eR)sUE^u|1@3#KI#<9RQvSE$VC}c@gnZH}064oBKX59YKNV|2?w1>gRE!x~l zaaP|wkvQrSOO=!?JRj@4^9=5i?V;;+bv5o)(2*fi-(YFl_LEc8!gY?yPnN+4p4jB~ z`-h5)>1py0rK22St)3Oiwn=wciDUDpitG4ehexBnhspYw*0q^0 z$&=+r;hb*cih`3&LA8g$DBPKvY_vY(YcIH|GRJfpd-jFgWk+pB;oZp@&Wpk(+G4_e z&0D~R<)8fcW%=5;dhW&;i_-@aRh!oa?${^68c8YK_*Af2dl79Sd$;HjF_CN{ul{P` zD4Sor?$}dPH7DxNN`@r8qZ>v!M`X3FyYCXOSFd>I zby^&;){2R=pe3|OeXUb^t5wdeTHg?*kaAvPZqs?|GasC6}QdL5a|a)d#{jg!ms&ri{5i8C0t{J7-S;&tBC)!-bfy$+cJ97K34 z(jF4i5ZRKOtLN@AZYk;hzE&@Of}N$D;W6xD>c)6t6mi&eHTavNH&0SeUhJh{h+9-^ z{FM)C=tL=5cr;59eFbLNoP-3MPBWrSW|!P7cx}q#S#R6wn0%s^z+O&eg@Ip6T(ErQrz>)1;?-MWwL4vcUz*^qFGWNtPBOtF|bUnj$|{(oq0Pl~pXv3P`88;5mKw zy8WTQoV@r8`C&ubxS_+A7w8izMNPGTY;9b-l`ouEYFL6Qcu(tfWuM#tpW?Xh&eIcD zEH2SNesGE>A{6i}?&-51vgJ1fPJF+XxRZIUcZElZ*0{9^J!Fl~$w0*Lp1hxjHw`Rc z^}pqZ4blIG@+I6pXkY*KG_5APYY%!h&yHH9QZ`PShloZ1@>9(AhEvkby(?>jizW0q z)PsAXk6nV!YxTMq9JdHsGTDyWFv5BOQ*ul%K1;9Z?dvFqTj`Y4Avz>CeUk48s?0rD z%sQv%>>5&ZlpNpIc^$BbLN;U&9l%y>C}3RU3sMrtjUp-%XauGJ;) z6>HcwrQ@F3pVSIhy(Vp};AKNAt%t>1$r44=8N2+|ytG+&=pA`w+eQ0PB=e(m zA2zGvJ);09pNiGN%Urs5EVshT>Pw73+galAwbd8eazON zW?0Yi9PLqed~HtepRZ%xQnMf{%3f$_Xee*m!k{~jswo>-cGQH+%@fomkjrTdFysIt z5`zGThuC>zrD|E{ElU(BRCc7%|Ln87+(iaZ;WnO(NT4jYxb;?XBxdg==vs6MR4s5d zhGL`p{FdklVjZ7nF@&91!?@ZNS$s#|M%jZk3B*fK6*hu|-;o;7JVWRVxZuahs_*{u zb@PsjNH5xq<@Mb@_}L`}`0Td(9Z`5uBzEdv&Mp1@+0BP6Y7K2|)3?8mx`chw+^B^w zdE*~}jWm3w>N4U#p)puK=u)|O!Q6nL;q|7}a5V}4wuxvGYrL)d(g)!s$6)Ro2Uq{* z89VxAYF61x=?ON&b*(#R_~z=%@POpfVjMrJ3G6E)fsFMzgU_aE%uNk= zb4i7>{VGS$UIU-vjX%xQtnNr6w!YV>kd_@)74+uPbbRe%vpPdt=E6>c&o>vD$u%Ox z;-Rvg8Na^{drx*b{A6bWaofU3<8AT8f;B4VjNpJG64oWdcw(-3_M&i)urD04WP(n3 z!`5}^3zVqLHvcHhUK=OsTIdmxlgr7W_%H#Y6<(#GchzWGWpk`W&)oIQ>b*-ly*ew+ z(fo7J3yq1&SRu0*d-yU(xSSwd9<9lGGRNnLbt9mK^KHkB?B;0DS=)b|@@J24#nO~F zTKK?x$~=HnSWp0gyZjRP_^sY3KY!Q+1~#yQi(4;hvRBOM?B~MP=r*=xe@-F=dl)K) zmrxWnq>G|oH>-{06ky!7r|r1cKWzDpcSGP^vY{eC#gL|$C~Vn!e6oE4ky8f}j|U;8 z9mKk;<~_n=SXW9Q_(`=TDl7d8_(db1%$>#$`*0k)BCEN+T=NAa{e zhdB6@Kwf*?`oLT2Bvjpt0~{@7XdEWF@U^Xy@9#!WZRdY53y-+y0a*A#J^Y}Bk_l6`QaB8|#HoWJC2)x# zLR8|z$~|{Y=1@J}_!=<#=i$Xy4#3)Yok4R9jG*C*A+w*?czCsnUA6m^A1il_?bWp# z&R}9mPA3rCIvAWHt{r31t^&BC`< z+p46L!Ni)a+G5e)3H~1rp#f|pVjd!>RpBVD0NAxRHH@4=0(8yp)wH_cbJex}>SV+h z<{O}&;==%<2665Ogw{WTIQ!`C3s`gZdGsWp!S@`YD~?%&AtPsOVrGqE+n&(tXt`}U z>#HZW7}SAYrJ9DhajPL->2wGQz&x zzMH%VBZxo65Z9NGIdN5Cwrg9bONpS;=71w!WJXX~yZzci!rm~EpE&S1HW0nTz!c;y zQb@3XIA6d5JlPcG9)UX_#x?`oiVcw9*0uX-aRv{TI9$}s0ylk{wUlTV{LXcpFT8Ru zQY-$e42(QNb8QNF3Rvi%f+A=t+Q72Uuoz@!>>u|6ClKzvuDEk*oY}t9mXBXncO*K# zS+TtF_K~A*h$t$swFB`$+6ST-OVl=yda$%!lj(9n4{UqrO&9J3cMf!Sf=-%PH})+YG1(a7It6pU z5_Rp`*YtD>Ai*HyFxE)Sa9T)=*|9V{W{0TPA$QUCt7CuGi|AppvGSAh%*FANyXZ=?0x@X97tJ4BkKl6Z!UASQj?d0LHKN%xSEcSeA>XtkeEZU(>(M&Ex4EbQj zQEZ6^yk6vyFkoc{W20-)Ki@EbW`|uV*K?lxbSvNAqXRMIqgf^ieA@76!Kq2?63Bk7 zjY>+7GN$D+wDd#um=})a>Mp(De9Et7SL2acopXC#s=qly1WsaWFm>bJ9DzV8W{L%g4URW0T03 z6Q_PI^0@D3E_e9CSS4^*boXW*!O;na?~nC~hIo^x7{Wt~xkoA&drfH61?i%{k)kJA z%{5J#y&k!)Oe&*7B0U?Da7FgC^*Io#B!pRebrWz9f0rtvbGu>`8?!7_&S}A(9f3Pj zK{9^D;ujim+)Y7F^fZ*!J7ViwF;?KL9yBW%Wq*SZEK!|G~sm@@d%hTp$RM%Wj%ch>WYyVd=S8L57q^`cIp{$L7Xs@XJlV95`Bh_g0^^0JU@ z!jjo>71?7!-~@&Sa6}+eDTZ{>pfpf+&aN{C7r7H;2C3ssLNl8?lfn|sDnL$Pu+q89 zGA?(C*PXK+pDM1&52U|G!x(VA0VL0nv}9Zc6cAiDl$Z#a4XlkW%Blp;3{?dEj7$9P z%GZ4>sC}#&Qh(u-S-PgD^_u>SVfH_Hb?|RU8Kp1;q&`%57f!6`^}hN1tEvX$n;~ER zip&gGUPP$#yW#Lv;|=TJkYia%8Ax@{6Q{bDe$`I^O34j5?H=Zln6BCJ#8#L8XwfP= z>TBE6G4fBoVae@rCadq1zBh1+!mNXG@4b9&8|J%&O_?&_KyGU)XNv^Z5Se}r7ZaPq zZaDAIdYOGG*A3c3Ivj&jgz?WY7y}NXW>_M$20e2KGi5raXNi~bpg#hlN9m4oWo7sO z=}LFx_1t9h$lRNR04R}kU<_}w3s>~l7_itd=0Afs^%6DASXWk19ay*p+WkUUAn=2+ z*hE;+7a#%-5U&)Cx08MjeAV1P7d2qH0YVfcSfUhvTceWl>rfCd?0TdG!ozIcxYh1D z#6jZ=&I9S$Q9*R}SD`phE`ngn=j!hTgDvd>&az%SiBP%r0>x zIOdahn}9-++<+k+>pHn+-cBLMzlmfaGY=sKW{gXK+jWO5>{!?;BjI7j!Va0kkN(Z= zhg@yr+OSu@Hk~Fr>?y`_j)oRr61qTcf*lRyRiCH`(;XJ!ezqG-+$vVgD)OGWo-nF53??NS~&NV z$|cC`QtKxQ!#1p8jTkkI(G1+MP0VPtdteZzV6%>L_>95vZxhcD^MzI@H~V3&>tpLpU!2w*E+rKe`L(`Ljox2M0VQiy+=rQNGY* zlkd;Cam{uLp51PT)I6!v5I}O=$^4`~oL(A{Rs!*elv8+hyBK|i@Vg!LJ*{ZcgxHhz zH#`m|#y#{bDxuJW=UIQFZTp3nCzzh7*n;M*;=7!jR&glTPh^%ANGmkCbx#f?@>M-= zd>h|>;XQy~*h)WFpnce1C1vR*1SYn@2rJOpOJUK&oE9ETw$T)V^Az&^GJq)eoo zW6->yUBMz5(ey-vT}<-z9ilXL&*6^SOE0vjMq?tAD2%k6s(s&Li$lyWnlGUAotM{~ z`~5Tr!l#vO;7)Aluj7dZPO)4^2i|FgEomxC4hp^eckicIpj&r;8wtp-<=>}oO{C4eXs$=;J;6zf6M^rw{G<)(HkQSv`D*EeZ{uQ#Mbza5>VGKZcdBqRj5|L2XAr^q^ZAp+#|pX1JQY)BepdhQDI_*XWi__p z8AxT9`);`)*^xpZE}}9EY_kW*lyNLdL$PPIif2JKzmAow%zmB(>ZYe=nE#Yw7VgW9 z%Sr7AwXUq?!Daf$ygqKgHBt&HbZ0Dt)piQWU(1-P!peD(nO(TY^$x(PgL3{#*KovYIt;9Z@IM zTB8^>zBJoR9Vg9T_U1$QY@u$}lje)=qM$?gqkWv@ktM$^;-cmmOn4l6H$>)i$MKcH6{>xJu?gT40L(qGh>xlA9rk6pai|IIdWQIla8n9aOWj?EQtkMulP z#tN*+iU`>5y@t!KTw33)iT|UdbA#j0wO)tn*{SaO;5{3d8tk5<#TPq$E5Plw>=k=C z8yxLxL8HV`nrTvchK?&pGh8LO!*8c!8NY$ItK)loXqyYoDq74~f;PBr3Q7P4&#=KS zHAYrJk1l1m`f<=5r)FBIq@2;iT-IxP0P;AIdhxT&?w`?%2VDksmd}WoCLk6sVBQ+S z0tafM>(3e{)OP1XtwLLz;0Iem3pdT%T#z2CZyr5@UM5K>E04{mF^~OT+flQj=YFgJ z2H!3HpBO!v7!9yhcv0qL(N@b6wRxGfBOf56+y=NhMIS;{wvReR~;x4qWjbP9C>%_S^as}Qyp_*&12tf zvtXceK?N=a{P?IIMZf2$XXaksGC!B6ySWqHLjR9TwjM!JCEE>XUVR-D@0beK0YcYV zp9Ctw6kad6?IZ4>r?o5SlBnxx?6s|h%qWM&_2Yna$A!SA#tA|hy-c`Ve4f%QYK5>v z?L--NmZ+NDg+FW~VYAz)2}DKgHN!UqS)DjRmWzH5*+_VkyI*UPjooP0num223Aq0v7n0bIz17*{d>fp^SGVwhbVb2t7NJgwuxiJtl zlzZYaNCyM20Sjx95?K*cQPZioOM#CbHAlC#u@-oW6VG`dT@*IotkcxtL}+52-~v&0 zY{b0-yrl$jK@zNPDg)<}sF7ZBJGb!%s7_e0J*&U7 zkjDpIaJjAnz#(M*lheRlvu7#Y-_Vx)n!;*2n9{RZv*k~-XW96Uio<8-P58orlcy>Z zF;V*BK8t|}uSId{$X&$~6K2lo%)m2s;GOZYeC)m6Xpnb5l+!GGU3h&OKG3GUHw7}O zu3XWC?2Gd)*ZaiWewx~K$P1zKY-lG`T4h9a|<`FU$uq7Cm4eYN@ zc#$^T9zv{uRmSsZL9HjJyudo3&`ory-TtZ~gHTFIC&U0tdE`9HxA!R|IEM>hG8mw7 zrD@6li>%zxp)7Cz->NxVQ$e9tC-gPhU2T=+SmM+IL4KNe7oMLCkLh8ovd!TzI{|J- zeF%h(x{+*J-7$wz1{`^x8Dep6=q4)WGfuW`+q>@}SnX}_>u2RC%O&DM^xq{ufNE?M z0S7)wY=DqFvpOc6c>P%hC{VaZJhb|*m#bih+Z$69Y@;GG0+B#Q%k+p?B1O1y$k&}I z?hMS6S%;2d1+^&lT8t^>#gq#briAOp z`0#FJyuXpr{Hs_vWG@rut(l=^5QT-!}mKS=;az(UVwp(5J(Z z;_(erPsF5X#cqiwK4tGcWQp=do=$?}mUkI1K)G!e@(Xb_0!W<7Xc8A~(W#{TE&?e7 zfRN^7obNfUooMs|05mj2K})Gf+f^6!3w+t1z(W_N(pl0>15~+tZ z^}=>+R-FOTJF|8uV+ZCZuqzgy#yokVBihz2n`j2(0av*A?_sh2j2+boEPJ=-W@nmT z=@#f6tK)c${#dwS&v%30g9$e0G|-gX}}L4C17z6>-E~Qb09rc1tP!RbFX6^ zS}x)^-&6KO2Jo>T)cvJuWg5_$NZ8EV#Hlt7f|5Wt;Xk&jxQnIXk(i&Lvm7&teie{u z^m@|QCH1+6h8T6E&}TNsZ7^Q}xlWNU1tIo82V1PTm=7A7?JOJpQkTUOpA4nuhI;Ri z>Qf1{C+J%^22Vr9`QbKI!=3PWSY-0L#6o5YV9}}8(rO~+qXeG^TgSxJpizt>k}|YF zq!c!k$L$cM9>9Q~sT=$exLsV_pD0c+05o(>0Q64<{-Yf1N01DpxUCX2t6C#_Jcg9X z8sx;Z0|O3LDzVFfx4nZXBHU6a(W&)Sy+VxofDi+cJj-nE1B^(48KE0ia*uO3h$XIt z^Tcg>V1g`M&(ytqjf&ljp(-HKR*iJ)!Ig9_60X75+?|+q~(Nt=IC2VGb zju_Znz&6Fz4KM;3N!YqXtaE?66hfbZ=L|qV1ZKcCL)=S=m_=t@{86!w%V_SqjjpE% zuW}TJLC(e! zW#WUd*f$7X%NlmYyHQqf1rv!4hB$?N5`n3A6iXTWZDwc1Etq3i2@#L;6wnDGo_mB> zKq4+Ne>Zl1C_=Jm9@dU_mf7a!+u=oy!npRV_|*U88exdUWvu$ARpdKhP{EEvX8s#n z#@GmqSVAs+kBFZ77`VV1WH|E1n3rM`{3Z=GTC1;}#@ziU z&yqk^Y@{(QfH%`1*Jt&%wzgIX;))N&IWW?p_h4aG8zJAaz zVvGnzEKZ^aJLp|>fz3Y7r7VGG07lyEnJ}c&!>kW-u~=+X^`9!y$myI}lVN0|Oayqsr73=i{!O z**)ZS@V?UM!i0pd31Vc7FVP7%&K;Yx7MtZS2pJ%y)j&P5y|a$THbh^ByqF!p3vu8X zi+w2f3@g+rkWOB&VmECBK$>H}5EMu-v|m93_&cP~JkqrbHb~}T^76a`@|C1F_U@)PS;93fC`Qz9^z9W!V+usBQ_eQmHVmm(E zLXQLq`g6ae>_g%y?nN3#SG8poVO`BgDDekEJS0(@CB*Hlhfyd8d@r>wW;C^i+aJYY zo0NVa0Pah{!oY@yF%Yn|Y0rSI1vUo-L${`=+|J6ntlndz*56NFHZwjZY0{a3`bBr;`?uG`(I={oSxn{{cCC_-t?e#9B42DIQ>gN zY=9c#5D<9z&-shMrYnVCanb|&@y$^%?8(2w_kiop#RzmRVnC63nUKBz$HJeaQTwR4 z=l@uU8li=$FXM?gUlAjR3-TtL+xt$JdbQ>|))1-^B3du%#dl?I1F5qeh#Nz~52@g9 z^oUHHR|h4#fq=jB1Jk0vn+JRFpOGX?1~PtDI)pU6r(418vYSEXsfAPr9V57bG=>=H zusB1(`9JI!r0v7~R4{E0QlA!dKEP8FF}C53E)H!-VUbXM&P8u&BJ^>4xD8e$Zwx(n`CDxxZZmR z@Ztd+!ymlv`qGh?EWBiXw;bDovYO>M7z_Q2qB&I=gkwXBc=VJWMzntCLLqPj&luZ4 z`Clp`{$Hg`y-4t$!Cc7hi3?74v`NCM}=>$m|$0!G-uSa1gq0j}6Tn*lwTE$JY} zAfx}2@ErDxz;5WoZecC#%v_9Inss)Lw4Zc74ra~=lmDle*8<_BmJQX_{vk~PMYei> zf+zi=U$4k5xCSl?JR1>6{rife;6>8{SK-umWU)C`e z@gR>fSfm&cyTcW40D7k?cp;5Epk9ekY_j;r*~oQg7)X(`8`b6S+F-Rp94!zb}RINZNELlm})v)TvYTy)JS? z-}c`o%}ib~OYbXUG6Kr{LmmamLwy&Ld@(-s-W2(8Ac{mFnqG6l?}WErjJW|4$b&^S zY0Uf&88LB|Rpy3U;+@8M;9|W0?hd%~OEw$+SD7#&_Q429HwJ6#upJuYP<|6Sn+sWm0(4AcLHVMHz+ zSXlv5w8U>yyD05=BH6by4EhL~6>_FB5^yPgxqbN#LwyqeIl3P-2mb*@K*&uu=QKFA z@q4QS+h*=u#|?mcgNptq<`I@`nGaXM?S9e_TpH&{CpD0O!_ZQtyBd7i`+^|K&a1bf z5{J#0@o>WZKaT{4ELA*l%82l1{U%WD8tllCPWb*qSX&&M%7D8YE#hLzuF68=G16tzv6;rcv*5EP}k&X8L8r`gN3z8r9kV47iE zC47rue*_5{=NzEY%_GkzSr?8Ld=)eLnq608!efJ{#Uwb*v z^bcn>bDv@B+oGyX+psNud@NK|=3bT4cE2r3AN?*0Phq?M9E(nZ@Ag|#-~HHDbdwBq z-@=`b^Rs>yXI-PgE=j)tF1Kr$htIAp(@>Ppgy%A9&lwfec5iU3_Mu^et2HKqF$+q1 z8~9d?igck-xh+N5&!D%0!IAfP!7q*kO@`o?8Vz@7Eq?ZTrUuK;+7X}qX~XRxcu?rm zx@Pu)SkBQNbubhJy-UT0@TP)jMr+hlFtP&v zbsHBz(`J973zWfyiSxzx6B@nHP=ubofhh32$EO-m_ibbZq(pDE?8`>J-3)FDAae$ZvGLuf>DqNrgvUwsIgx-`rqn~lJ7KV z@o#klb35PbT`36rS*d&9Mti}J{?PkNj-sCO%k}+MGeih)ZUg>I7F`9^-$Pu>0icMrBWtRwkpc#U@VA~JB*!f-%_nqH<)L$jVd)5xjo z{sqh|abLLa;|Gf#;|1&{%`Ml_QJqy~WpBHbhY6w}LKL2C5@j=kE z1IBxiLM}3fbyCmS&k_|>XGntQrgcDj1enXBj#q02L6&$zi&_TKaj7iO_=*%l2S>HQ z^_8750K39tTj1XV16=T>mJ_@V=!DBWzIzI#+YtoDvdA?nK=N61n+D0ib9jA%eCQWm zNO84*AnFbvSnMa=+gRJ}4z^V42JRhM4id~Q^7Xu2#@6cQyFlJk-cWMED3o7crabfn zw89FK&;nP=pO0w2>Vkx;6MZ4uQIHeID^1*yyg22J{&rAZrkYbc#Mj?~+bkL0J>V~I za`gMRz^|4iK;*!kf_l-tVoHTWOMs}e#+>}9_*eM0%K?eIh&As&!Qdg{r;HZ_?jK?b zLj!c`fH|0wFcE{%?O=mUeVsW%yS<2&u_yN<7cGpfh{aVF;*5fiFa`e;T^mJF6#f~g7#?e*YLMYMMsPV0xb?D$b?;9w?!m2Ozxp3lcLy`SM^esni;ZfY_`B@c5=4u$#ATgIWAoO@_63 zrnhEpcLcPzEv2L~75Yg(Zw|QCG1eqz*NHYS^OJ;?DxSAHEc$I31g3rsnDGj)t1<5h zs_(nWO2y*3i#edq1(dfFo-o6CiEgwNFde(!;<)qS0$6RC56zSu)QJ|p5MIJvsr(ZR zz|m{>^JPT9%y*Zy26M{rnc-ko;Fe+_K>)_h?)K*hxrm&#;kf0FY0xKY_(oYaxYat- zXG>;tRt&Nl8_eFCjs}`WS^la&?$=N;Hcge^IfL&`Gn3Ng5QXolCNVA6L^in8j2reP_bwy zdIQ=Z_V#V;-X!?`)|{FR=*9KIOa|Kz+zY$9eaSQA>_xtK2rrBV4f6S*!v=g&1QWEU zZ)u$gH8Y+dI{_EJvh+btkz7wGb+19Xqu>$AbYM_=`c}x_(5KUgm}IToRg!IHBje~e z@a50JJ!JEMRwMKh5w+Z}MUcp<214{ia*Cjgr!4^2DET%*@z4o6H!E$0BUCUKUGJU) zDmci9))%K4##TcDQ6^OtAK2W%$OYG)_W#(@53ZmtNf|%$>wy&}cZPY4MOagE+VlP_ z&ODG16b}PL9)C*1v$Y$AMcvion1M-UCarYX?-|n!Z0{wgdk6=xuBHc*F zPn?kJM1M!x0LE6gL9$HXYJXn;46w_+7cyL8!C2e(8*66aF2oZ}qIaDbEu>h{Ca&qj zH&6|>F(IAYNNrD|nS9^miJ;K(&iMba_b%X2=k5QvQqkH%jtLXCv`Lbb<4on$c9Jbb zqNpg9BsmN-wJPI~$x5P7qavqqj5JI#PC1N2OqBCsFc@Qu!OWQd`!lL-_xpYJyZb!* zJo{YN|L59kFEM65pZDi}-}mc&-LLz8_pj96RU#1Ffy)G1`qIy$>#wASpUbG9s=iRO z(n6{Hs@SFWdpObUze9zdtkeXPC|ZFJ#9{ivpaJEOl0I9oKmt1K;Fp#%6Zbt^ut0sM z0`SVDV1gH*umMO}E?`ry|4NbhrNsKRHc<4DnidJ#)c=?nzNu6IrfmRU`s*s6z^nvS z*RPtvGE_>zMJon_nPpXiZUAt~06;z8E$H643z~Z%q+L)O|AsbS zuy}zj3K>{^qdM3l^pCT_4WospO(ko7Tax|jO-MlKIW%&k6bJoAboj)wI^xB7*3-3b zEJO4T{bS1D8^epMtw=e)Er|(Q-2?@Ozs8>}G(-v6UeQ103qRw&R-2F1;?ISZUH`{i zg4KIZFje|q{(^@0i;{9wU=MMt6*28C6iyjbN~rR4lmfIxu@wiYB*7I0EuRNJ2g zwSp?yCuavW`+^z2g}5JU!Ss^EsV%|ca@0o4dmDHht%Een1iiEWH4oS7d$+<7QB{TC z=mWoI0JHk;Nt$hP|5dLXG}D4>(WQuI1vA=tUt(@s(g#5tg4Xk^_OZvh-vRYNWZxYS zs%8ap0bp2mNnOlEV8C!D6jA@D9aC3zwm1;3Soa$L=bFe z#kQoKIN2X+C83t;V6{IP6`+PiQ%T)VLFTXY!$K9mqk(u6aSRNbOb5vzIL$Z0eY%MD>FDB$ccg??}<-w z=NF~)|Fthb?{P-KXHW>liN6V5e{jjbq@&gXT-UFHo3^n3w}kc_V%`gly<>%k;Pm2PV`PA&?roquTef8AkK(_+IoQQDRW^J zN#OJ!b!DEi6Bk}^I$`dXh=sT$wEy}g%%;5efx|%nkOG}Cs-aZ^S^xjQ`M`9avSdGX zjX-MO-?;bJdY@zVlkz3dn*O8?NLuHgPJ_6+Te?TxY!fE%ARMq%^)K$R_e*~>o#9^#z zN$*P-imBtH9Kh8<__=bpEMAOpQAZ>W6Go)EHy@T}L{kr-mGSTgQ?5-5*AD2qzswy# zq7&%+rC&s3Upr77Chq<4TQKm0+WrKr5|D_xK{;^WyClpEl6?|%;Z)BdeSCVYOg}RA z0+vlMaX5+J@!{{d&}z_Pp4HU~+Pekk*)2&{HoM{jp7fnWUk0`v45^{O!8s@UV9^lY z`3c#7CqQfe{3eI=ehIc;(+po{Bq02O_wRj!I%XklRq!cCQtr~`7u!Ej2ZQ%}!D&BR zjXOcL7WV3mB|&eVEg=Zv3l4RmZ~v3@1R5nEi1J`Z_ab5g?&2pOIrD=g>PJ$}w4q}_KHwt#I^q-c=7QPK1Y{r| z`W=lha86DBVV{QtKq|0b?lO@TDXa?>9SesP7+$8K^b z_IXNccV%yv1HNPvw3J)_kM!w}I})JsfsPveMbY>*_6Mi$NP(r>JX!z_9`pW^$EkoM z7!{ld&ZQL8`F}qF01xS2>oLH^2mmcu$4}Z1aN3=qA6uyJvkC&z#r;})^i{$QoE;=; z{gsgen*bF6I}Q-}ehb;ht=2myl!0gY)u0P7?|RjLz%!!V|IF_D{q<|NBE`$W?v*&<>)aDLjJu+b1tH|C9u=_gXTP8V5@Zpd_Uyty zqeWXHKGRHKb3x}UE(C69cEH!Pg@oBVh}#n5Tu`zy?f62aK*=zO0bl?<_?+KH%51)} zF+cZ*3!700IcU6w^G^{1ehgxKc7g$wzv-j!{S($? zUV~EzTN(B}zxc=lk*-|uCM|-<1{~ZUM6mxvfPgaM(_;|w5fEw+jq8Y<@2%VmC46&a zg61&taV83gr?Ki6LG+W~&% z;IHHnn@mPe4k0c)7ofF&YcTm_&hl!Z_xbor1nA4E>o$IuGX`U2mx~- zs79Q_s=^%w_eC7f_zX2K{c)rQ4TE9s{RbQd0K@CS=5ToLK?8>Qg$w{lE}a*ljl)n9 z`0q8ID(yJ%hJ7c^RI;uhfM==9kI*13LCvi#z8vaV%Vb4Z0g+n~^No|=V0{qwUP+fGIf&i{ z^|V0=Q*4BU+1>ewA|p8`8n6`S#lY+TrmOPb5MNwOC7}`%O_R)hjt0+%UdhG|-hag6 z|Lh6$^|mj|EJG)vRL`w^+1DF9!OvyNl|YVOY3}4Oy;E@Ipj+K)9J~YqMTeU~XR#lc z2L#%LjF>Ab`6&3pt#O}#7zRz*`1fI)&t|Cw%+jd!8O3l=zyDsC&lG6j<2ZZ>rXWGR z0^kt+{s{7{#5}a4fK+Av&>J?^ZuFh3)V9t@277!HL?J3m=jNLQCI-)PWp!dNhM;c&AOFkcwT zg6oKWLU6zE^SB;kLvLCF=P|LVC9zTleuHmKMO>Njq7jI|5mw6Kv4AhY26S+JT7$&p z5)u0VvB4e(#76#?G*5t2TMg%Dy!hQ85{=Olk9*l6H~m1821aM(SonN3@J{rX^z@(J zoX~&)Xhj6(hQoxKj2{J!OI+h806q@suir3+^1S#~DHzPN&76&3$|0aZs|mA*5f3NX z-lJCrHuT;Z)(myyF@ffV-ueFdScD;Bx(A$_IXKKrG@4gZS2iOl)uELw*Ks?L(c(SC zOvX#r9O45-DT$<%1llhR(G#VRyDAyAzyB>G@~^}bfX;e3mDLuKpS%+t_G-o{ zClk0QL40fxfLLe~JF6qUhvugMrw2;u{YjV?hJ)3G>#m+Fa(M))One-AKsT&$zYC~l z4T=YhVU>95I^f(_N<4AqF%ULIF?{IO>%*U<22EYye76Ay4P+f2k zqRaQ}0+c9U{2~86XCLxn+ztB8x?koW{*~sSH@gAHGC{6JvtI~GFAMqEr#p@CtmKl9 z=KHAv?*m6#43oPjPj#&lxaP4yzs;R0>yKUyH{46)$Efon)XXHGc4*a?8-gZR^AJPx zaMiu!r(s?UFm85X($p$``iAw;D1J~dNW2ZUoDSG>bXc*OWVR*KQU*C~iO}=MG$?}R z)E^fLi$ecO;`RkP0+d$H#ztYmj5iZ7h&)8~bKmbmf)90#ys?B5Duuz7+NIQw+C*Z; z0kRKB{GePPI;v{M)eIcREa1)Ld)3Vxz^(B&T?PxF3R0c`5LwLx)i$AJt3K6&|BUa7 z2PC2l6)`tBgPPe}oA!VzsK2)*xdU1OINdfVu;VahI833->kZDR49a>QNS+{;3{rZ4 z=(mu}0#O5$_}o9y2&Kvksiq)vR0=`cNQ3j!?;2jkVR~?wpyVXrnh+{#X;kDg@#ZSgSuId(!T@WlFpGT)*tcm4^OKMp1+FPxKk4#GLWeMW9j74Qlh zA*vR$T+rM36-$%=(d>dbWTui~nSxUUL82UgMVe6r9O}`4&h-5i0GxhhO<9~X zpz6P}<{{AL}|AMWAsB)iRAj0xfx(-F} z1u7S#x)IW7O!$7ZT9{e~u>?@szy9cUP@(>5f!=}EpEUn+E1!Ba|K$vrq4y4;=14&f z7@jDdogzpE1$9)RZ@>FgllYf|1w>VXZ3(FgO8Y!5IzHtTU>kzE`~4GbP`&!A-S?DQ z_ZGwqD1R4>j06_)|E^1L!p)ywuURs2oQ^(bWC@?CjgN8cFrtc=p!rW14gK&>(R=c} z5qLd*&kjKvhM*$!jwKRH0P=!}zfYMx*g;`H3kF#zt{1AmVk?!rKT)F#3;|%-p@POj zho~Smx|^^=17K1RRa8wN7DpcVv(K(bJ>eK#1MXon$RhKjGUw};t^Hhf_*1wJqdo3B zzN=Z}=Y$e|W)hlze<4N)GGqaw`3sFSgxV?sGqz38?aFW-l>1j5t{+a8QQ55=0F=7Vn?9<=wAZE0#^e{MIc7s|5N}J#0`+p1E5+D z@i|bGaR&ZYP*8Czc1ftQB5}JcTLv46w$h6l9{&@L|Dd$CgEV6ZVRS4m4D9BSg`l+6 z%9NwI@VLQ1Xj940U`@1ht*-AY=VRG769mRIURItw4EE*%wjtsiA#=>RwoCsRem}fW_TQ zeh>|e?-$Rx!P!bcJ___YLh^h5sl5ftBLBa-E&zTRNDegtO5>mbbR&cM=n0@#2|V)m zEMp~Xwg^3=1WwVMnV<&Txo`mtALxWO*Kb*oUxHEupG%vcFEB>B?b%}Ks$B}^B^rv) z2lvw1Z;g1m9>#NB-hb1*(CVGIhoHjRjPfcx1mx$dD7=Q!$h{vUS3R$Vpuwtrmu+{Kiu-MC#G$-y6gyC*SYBEUnG=F~}bl2swm=p+NyqclN(fAo=5n z%X7R?LaLVD?C!+?*~f93W(T0@1jWxjESxP~!izOj;(aFrRzwwn0Z39~QQ)ZF^$VK#z zBo)x$vG%}!(%Xdti~ytsy^kcp=vNT@B7oGY2&81QqjY9`K7YQu62UH5Nz*K=h%fUZN+)$lw%*9sx>f4KWmuISch2#GE%`>VgR0@1q}s z15GcudRw~`hNZUTf-Fjkl`Y$YCoBZx znISj$XM`93o^90ZCC#Xf7XiVe{~i=PqGp-CM#hy(AYS>v%f3552S3->KhQ5=wzs)K1W211PUwS5yBSuoSp|DBoO2M}c=*IP4927Z3#i z#_{WYxh_;l-s}%V;E)}@IEpfO&61cM>edm9$-VR2E8nAs~Q2l+fM9wAdE^LK!R zg?+n_rDp?{UUz}(7i8*8lGi@zMNd+0O6=3&bIK)cKhMPYJ!a{P9arzmyr;hU8+h7>mPm?BfrUo%KG?XZk%^<9~iHzqZ=6RFcUrk=;?tx7U0oVXpYOTkXGW zN#71$dMv%IHoMGb-$@czYTf(46|Vi`NB{QTPq$2>m0gs-bRqr$gnQTW+Ib-%{?req zyN*C@l7B7Q1z7WccbJ7aKUOtNN-5t%_1;L7&Hf|#-rr!M(Y2+!zdz+>?VmoYZk6?U ze$Ri|HUbyqu+KngORN+ZU{Rc1nU_ZHtulQ47m?|jTVb7$3e-vw6)=37{!=1A55 zoj}OVCREiC^D?yttB>8_{G~bjcT}@~IrsFrbPdtGkc4BQ%KB9{aP5qjkf*+*9ym3Y zRah4G2k6oNJfL&~V-@%RS3>Zexjzyn0&1=6u%5G?)RX_6c>=h|!nL7==6?q@_6I!8 z-kzT}^nU3E{!j9V|96V8P2;8*4&(lLT*ZG^deD$Ay%`K8AL^i^*t{4g)NFHw{I8FA z|K*?Huy-ca4;tyU?8rBAfZzTubjv?{_23++uDS?rU2T=wFK_bO@4N!lLa${viKMGfB&G2eS&7#b16XBoxr9xKYvYMAh7|)K@(pu_1`tQe0}YT9bnAH z?puBhOT)peliSUAtn;_L8mg1xh&MW=u$CoaEi=9d~ zujS9{r+j@~g;%{4wS!mgyG}}y*!}I-pZ)c#{*B*r`E7=Br);l&e1EXEZzS9A>Qz@7 zZt1|XJrjrLq`l16j@yKsdcJkhou;!y^&1kqMOFUa|NizhaAhF)TwCZeh|8xZ`8*#k zEs=OrVmD@Ey9szawLcY?O-uZb2Q?!)10!$~#dzW;z>6?lL2R$FVemjx-NYq#i)xF_ zV^z5G*D3$w5i_oG5f1i4XdyN5XIxn>!aN7E1OJ6TGnqmk32xwH!CUtY{6*pOm0G(+8LMZ|`*Cy?XK7-xJ3`JD$L#ZY+%9K$ zNEv+m{c?0n3;XDoD;#(eT;c}aU49q8TZN>-MPCVRSa`oucreeBZ;=DuP=Ce{QxYty zQX<1g1mre&geICee!V0Yuk&F!a}9|5-~&=5#?0Olz?r>3uf3tC#1sD+{QBbxE{t~; zCbb(wHwR77V*RDV{GfwI1vh6lO|S^~)202@)kx9A>^!Wfim4I@y=T9x*rz-F zP=%U$hc%#!IDNyzy3ZWr<1@unN^YO;%4(QbA$6fQ9S#&_2W4bE+t1r zPp*X(q7!cT-;?OIJw#l!Thw!3Rw+Yn)soCGTJ==H>+2?t1aYQGoERgm*}!MQjmIB<;Kj#DPz+ujePmx8xLPI&B%4v<0KPRn0pckFh7;8 z2Rlr5UVWCMw=2kFkv*Gxi0F^7L+`8-Uv#kU_Nc?@MJw`xF^99~4y_0fF>rwl&$z9} zAvn@~SZPZ@RT!VQN zEKpoBVblmk4Up%p#4+orRfM0_=@yO{j1*f!&HrMA)RmcSb}P*vqgcx3Mpm~U9+)bg z8uwN(_L5B%_f#ODQFD0}a=evpA34Yj? zIsQRQjCvai&og_tGY@@aH%o=&Bf%riCBt6dl<3{jz?SIg2`FTu;2b4W*N5atzx9@# zSb_wY@2H~8uf?Rpue>OVhy{b@M(oc(7qaiJmQTG%rAwlE-7Z${QDAV5c9#BTyu}YGmm+jHh#duPY&r|nH~I&M-E)jIo)O8rtl7XaBE+tRMv0B58Anz|8|dY#?WPDVcd zyiiNL<=afAo^EKqdMf>BlDCA#9yWbx=i2Va0qtT_{q|KQqZ4|54CAALDdzBf(@7~t zvx+IpXL^gR;zt)L%o$mn;)qn3vleNQs9AFpsFdyXOFPrn^PP*jiK$q!vvJLw?0i}Y z%kPL<*RCt|yQ`!;E%dq=1~sho?Ih|o+er0^y#zTw6_Rk~qB||Y^)|@oZs)s%HC)OS ztU4{In|$B&ollqWd1?={BmNi|Altcb3fZfBiW78m8yY6V^R9p_|5#S!*`N>NFvX2d zmwi{RTD_>&?xZQ{eu1n~+#5<;QS*R4~n> z$#)>aWA&X8k!W~#*g2Dj`)RNx5~*1Bk8Nbunzu-++X3pa>XUt_hoj#o#Cs7ej;twM z54Y$~3Yh(Nve)R!<{2^mgW$7wTY|TJ6sC334BN^0b#K|LElGj;#ca>af~l%$ao$xH zYOB_!hRTEFv3OI?;^3(gWr2-g&{0a@5yS?Gf7?>FnDfY_Lf7~9F+K(l&L*Mw9--92 zR<_hATJh;;SJzn;2i&R(+cahJ0eMZmAr5_h;<3MjlsGMI z_s!V&Sm9+vmAC_K_B+c=6WvLrornEA#cL~5cZ_~Nn7HK1BpES@l&+Y2DcV9pV)E$a zXJ7ITnzJSR81b(w8@u`kF05&-q)QU`rYt%uUI>95cpe(h*VE!`%j}I}CXrV`rTs-0;_?Cv;MGh_SN#2$>(9-iwP37sFcp zZ=_rVE)s(;_i!+tIpp!UY)|5zf<}|wdFHZ5kClY?xU0;)8SY}M#8c$QG>qUi-hn6M zgINk=*4HR^2I-IOVb?0{O?j5#QNv=EkFv$bwCH8D53~ynh)MJQwTX6GB)83Y@Kk)q zauwPJm}gCSPbk4>x;aP2dn!2+mgZOL&Y6w|=~mEWhb&P6rz7Nng&1dLPN>%SOxNv3 z&QVuC|6Xmx8xu9B#E!A@MVa|%TfPgrq&c$74poyBcnY=l&>Fb!!`m{6>Z#G+w40pE zg71Ik@{VG<2H&+`d2UuzTZD#@uXF9(K~l6rw-vL^*(nn7Dyd%d*XJT~Q3i*LgF^?~ zUfOcz6ycPlVq@C6CZv|*F@aC`C=0K$UMbxuha34ARedE5m0dl^-d*x;Sc!CfWgq3v zfX@>Rc82VIEpi0|bp$D02CvIhtLP5EcUP*NDG>@IX2}&RbTec}tU34f*{LzNbK`1I z;=3{I9%gY2Z8Yj8u#pt_)oB?=V$s!pYCV}n%YsMe{`lni`(R3bT$%%H#FlgDc7V7J zM~IvazKK{3TalR&`O@b#0{v7@JDJ8lhzoo~@xkef&gQM;PhWRyxkH#acxL2n8FPT@ zRA{3bm^&xW*q!bk%sqbGgxlh2Zt1i`hpNLMak5`oSQNXHs8R0q)R}NCLc!>?4fixD zH)Xqo`oPTQfW)e4;qi5{O-NW;QI5r=IdB%XZ-NU)VyqHeAFse4cy?@{=D<~<^cyKw zE&GER?kUsJt;p4}uBsyKHtZXiDsrlmvohYN95MYo2NnXK7s;1Ao=uCm z^Ah2%N=a)iAe^@IBuW$h8S#BqvSH)Y(z$2~FRVDiznqU*(7S zx9%stq5f2<KCJyCg3Jz{#)YM?0gS4Wv1- z=;EoerXG)o{W}zux8j0enc1!LLSz>+81os4ZNuQ8Ihhm~Ewf3vG=+lH)?>F8o#a;kB#`{^t5&rMm`O|HJh z(fR!yPJ|cHC1t2R&BeZf8ZS2^GAn6m3bTp<5emlY*;=xbu8MBklVHe0!=BEF(V)U* zc!pahN8y;Ah0)PA$d^dWZpqZtT6Vr)J3xYvZtUr%#pmo)hIEYsU?7%dR}^Kx&hbIAeewNtE6l zn@FMt+`7nNStX$)VW)jv=Py_Z&sj5Vfr(UIeox4zL~HHe-ygejgi`7hD$#2YfMSmX z(at|hFs)C0VTIrAhK>#PDtY>J#094IvbG3&!>B!ooo21Jf3TxUL48$A<^Ih~gVf;S zODOha9Pg0QXr=qd8lPED0}+insQ}@Zl}YkN0(wl-#kDUY*gDb&a=Zr`C7* z-Hy&b=&DN{D0dB+g0T*Tm&O&$2Y#O>?svsFz1Y9WfZ%lW)a=6nw8kZI-h(kcM_Qx< z*61x)n(~IV4whL?Gsbxz(MfWDxH2zwM7M~JVZZj?l0%1bBafSWi?pPPXRe;TCzp^N zcsU8fgW@Y1cj0mFc`s0=h0ByG@G5EIUHCIzamLNNcm}F4>ot@fuUFBO(XewTv|dfI zPyoEwm6n_`NQIxucK8C|`CWBPTd4Sakl?c-a&S%xwId1ade{o-3#sN2>`p3fBG$!wB4L;sACw9C%J zc*gm9ra>Al{_VLg;mKYd(<1VWdd10(W2aaucwk5ws63HQolSa)xdi;O@uV(an&})1 zWAvlboZ{Td%SzL9W!Y(e8(KQCI@f)|@drA+6uAg1h#{vcRgMJ{ZC9kOK|fnFXdyz7 zH1;c5sYa3XyE!y_lWk#Sd^;V!=?q#TtW}0mimXldz=^YQqV@c@osS%KE5a*Q=W@bk z#5k*1I6L~5>F+EF2d!`011nJLwtVt^rmOqxYx%Jq+L4`BrgQA7)&x7+cesa{?__Dm zW7R57#^(l{fob^R;kqL3O4@xQgDDXe05pp0D7eme2c)OZYNW$=OkTZ`nV1Bh@;hTh zlApD$oSM|$mnXa1@m*U!x++Y!0r@C;_By}Uz6N`|GsM#u|C%Oi7g&|+&ak&dN*WE% z`t2}Jj74H^4tU+8w>FQW+6)w}=?B+0`CXO6sR|*Vxu3PAzl$8MpT&$aCI+rxyHAJom*j7Tvec8RZm`(aasjYadL%m z8tb}VY19VwG;$r%LpGYVS|E949ob(`_?7MYe{}20g=*4pl)E8~Rn>VCj?^ox2q|VRr@VkjP6?0BPBnuJN=52YC zmFu|e80yg_=5a>-uwH5bUJlGJUp}YP#J~)sOm+N7T z;2Oo>uEh9{?oq(+h@TFk9IZUy!g8b;p3geH8nJYy_jdGv5Zhw)E5kBr2D z(4n%p$Mo7eekUFqKe0UGlAAY;wZqc)dk_aO)6x!MnPDQ46;-oJah_vGT1oN<*HfAD zm7FfxuE-reT_!kJ?P z72n1(eQ+LoqFBv|gwPn1@aGfsBIX^IIFiE`O>Y zm7$vkL#76mBd<9*`iRxstfIc6h*ELodk&sqoh*uYL+fY@@>u(JQFda=!Alp`bp=`; zj(^t{&FnNBqVslk2Qq~0r>12xmo7o-9)n>FrOxj7Aa!&{SftmkC4-!NYZyLj+;>IY zCN2_w#ykA7`Bcb=FoopWqo8g19)GU*QecYH?LL;t>o7aSwL}qOxjb;_rL#Sep9s8qk?A@8NJB6g!AT>)L^J4Tt+Oa+2Z?Kx`TPJAx~XL+|Km(|Y};};am zS{{WpZzvi!F@AQ?a$o;!Xo7=biz4>iN)->Mw#OpWofRPoreue@la^`qXILf|%xRr% z*m%?SI$w=WG0G|0CRsltEeziFfo1twoO@zb(pbajy}4}YF@3DScQ#e6NPhM zj-P~WRYgb`!h-+|2&!JWL{xExYhEVuy5&IC_5wrLmYfw2U2dGOTMZH;YfH2E^l6-y zy-#LjtK7b@NGf^6WqpBPh9PHSg=yz=_1h;ERBey7vqmm{NAXEqaZvT`Y%IKwl07@4 zy<5f`xk>Y8(>a7zm zAC~WlKw<~RoF1LFBqylnqSli1j1dyK2`T0km=f`Nw;|&#^n*q&r(bq7U8g>L*0y=; z7&1G_b)qT(%vXh5Ev?~9wpY6o-`+^=>8CkMc08Q;*9hF$zdEA481y%b`v z+SQ&jcoordtNaIDXL4uLTkZo^dAAYfgw`e-I!p=2D@IKAIxCjA(smN2gnO)WTDeIcyTpij#z zHIl~g-^bK8_xHC^!$jM^@*)<6rie!j7SpXK*RFp%+nR8vr7ERF!wVg<2Y;NrrbQGt zka*waNzCZ98LD1#HY;tKB-NkXVxeUlsN@;mua8Vq@Oz`c&TDJL`>qIB#`tC`-7QsN zRvFhW-_l4nR&t}VvZ}TXFy8ifZmnx{-IQZ2be(N;h00-e-U)>J86SMIj=QFmju?$j zi5g&SDA~TEr9uPlDp^dpO|Z%=IC)#Ls;;&3ZBy5jNz_gV-MX}$$UHyFb zU?MhZBnY|ZE;hYwZ~dhGw`gUtmJndAr)TbDD;yPHb9ei5UtzXLs*(s{^v)suLNl*{ z53b#1kFcRJ#J#kax$=!_Y1l|3r&x_a%=Eh9PW88J$Kufx$xIjRU`m5?Yj7-+)Din! zT!N)$+Oj4SD`|-iu?w^~p{Y5ZI-D#&wr+C35EJnJcu?|B4iQsh1A17CaNjo^EH^-6 zY=^97j-T~$zY?zRO=i9=>@~LF5a5NJGdkBEI!NQc-tybh{+pmnUJTy!#lT zvQP6&zN>iP*aT8`(l4R5EcF=*l}`?l?+lh~@cE9&bCB%QeLJ_N%o~@`{H!HS(kj#I z#C|{n7`{2{m}4V~UqtoKH{a@OEIv-_$}P-QZ`8q`8$DU9d9a4*p*ndgnd%I4NDRn~ zj?pC?jHWM(%oJs1c;k#+3o>KTnC20+rq{GO`;3t?nx86NGF1Zrz+`rx(;pO|{H!v+GgFB>H3e9cXu-mddt+Q2^rAX^WuO)axXA)B?WRehvw~J>Pm2-@Ie3ZEc4Q1?nS>G=AvsR2q>NMb7 zBVVZTjM5@)oI=O_DDW4lbkSJ3x3jvB(#{U)W~w@s1XYPRNHI-$b-0+WwvVCe@3qXS z4|hI$9-r@@XLPm>dv2dm)i$#!x7kJ$-KJ&jyOWgi8(pq&zdse112f9(jq%;uEan@5 zUCnDVNf)0fx@j~owk;cF-6#SlyVZ1t5%0?lT43$8t|g}N)XuDTZHo$(>wO$Wxy>p= zw_!K=$1x2iHzW^;QRgDyZ`>6!$ey-1*Akp-2{ZqnB@x*~=&!re#f(UtE`M zbs^YTs|HGr9HV8ufa856XM^37CC8Ye4z9Pa4wXS~0I~ZHE4M2u>fwanoIjEA%Hvey zvIHf0-;e3r8;vC!11;cPT0zu@Ex9Z;s*HX>sJi(SA@-SJUP$XsQ#L6-l0ghLrhZFL z!44^_p0Ox-Q{Z;HmRyx+$WE8^r#`!<6&q7r)XND`k9{WU+tSq1Jo&D5*QxowcBPQ1 z;yF7VtyITQTS7uZ15tTb$!OV}sPhxc>(C87aP`9uLVg4GeFZ zB+;p&gouhB<4AY?rh*(@iAIIOXA0FJdD%m=Ib~wA6MLV1PkZNY*`HNXRk`v0gO-|b z5mZS_9Px|_I&o*R4SD2jbX_^iZ8Nsgo$+MA%!6Mw=0MHI^mA5 zV1n{Tk8WB=Z}$!CevCHW{Y0mC&17#(_FU24W;b3GO|2=&B?j)Gv-TDLAQ`I3sbno@ zVDG&r&Dd*=6JpKQ_|jfWoZYeSs_z6F#tR^ZA{=fHK1lo!db}vUHzYW*fZ7;gP(6~Q zkmNU2$K4%hit4BiI(}rjEA^tW2LrYJObh*0LvzF8zO7EUj|JDde7jU9VH8qOD$Ker zmFD>|AC7Yfq~xA)?A8e}C^kMNJ)2`*gN7<{2(NV+G5ZfE7Bq66>2P{0{ zj3&Z|66dqN86(_hU&LxH6ecF*XIs`Ymo}!|mc-@WM33RSqK9IA&OMB5`T3m7w?i^#+uF-?aNeDfVqM=- zDwCPb=zF8#-mYYG7FC(7z4ytE-GS~aTD_m4%`Wj&57N)BZ7O$8h6_qJGpl{0CW9pj z!IX@kf%qM{Zo+gWd7Qq6f*DO^LjMdf@f_UuQ{69%-;Yt0j2RO;wk)QXgpR2{`^`D( zW>bYDiS!Ousr%U6BvQ_t3-L1Z@#P&zo9K9!WXsug{pC?sh*aL`cMo1oXY2`XIdo*n@``uK0A;;)G3DJIk3i#64z->q1{C z&*V_)15VQeOCj#}%w^;{SJZ0wjLk{$GtXN^;M8z@*THFSv1_1Dpiq6Z;hgrqC;FMY za%-`vD7%PD-SEl&MeY|JcfyuXbFll%QpIHplYL+NKY+b9V$hGBa@-J z=iIX7l9(^2&roJh=LAgob-5HoOr8$kkKY6>dvDxOpKe;K)y>Y%lPZahCp)hGK;3R8%*#f;9Kf4GD?^o?CfS>iX1 z4q+n->aPv@bp55b?arINb}zzLD^5zVLzW>fbvU2lr;PW>0*j~YUxIz^7OJlaD>t#g zyIXgiTVdLFMErstC2+JUliC$;9C{h`!t%Ynj{j+R7ns|cHxlG>q{eNm?cTtT@kFR`!y(73=aum)srfP(>8e1m)2&=&OthP~y zAH2@Wy|!NFeOlzpgcdTR=E$5d?&p}u2p!#%4@YZ60WDvqHS}(>AhqwJlci9*fALl7 z`I=*XQhmCqQ#!)-$ELP8>5-fsg|ScVNR3oz2+6O$9A9Zaby<;#3nc9ZPh(Jc+pT+= z85*AUfmx1r^R^*V_;YWWn`{ux@h1Y0wVy%m#GY4P%#MPI7s}=5^<>Lt%wzROeWWLo z#HridD-3LP^BEZi(th){SLz2};H+(vnVAVq?G{(l`E{qr@VSo#P~p(3z`$TEACGt*?Zc$p3yOJd}hm#Oy-F}xRb?!HRRahTz+ z7Jr8ZeyQ`|&T{wBhHVDhR1fM;oF>E0*64REX4(?FCtMAOCB_q9rOHhx_t5W&`w~Tv z*Yv7`hh`-s)#;hySU=4K-vdZ}TBYj7e3g+)N_Eo--yHowk)1bBfjgeOZExzhhpbKL zR5KWFg>w{r)*}ZuZ7E6i%v+yXw<<$DAqyVc8n7}qrM32=6MZG^_#(&m@C#P@QW%9! ztmAl`9O|HVW=fB)Qni<_i%RP*I8tll3sk8CG0jE?QV4HF zk?)EL|L8q^p)VSZTB@b$Iia>jB39*;`UpbdnumB9uCHl}NjQ^v$;0o*WNVR8wF;XP zmIsB%4#NYIks_GXPAjFn-Q0B}YH1~>PB~^|ax^YEtqz>8im@BBLDYPob?_tY5oNPx zlJ_AUpY?Cgw)#dV1Z#2$v4G6I8>B(zM6gIk>D2wi9tB4RJ7NgRuy=nqam$yj1ve%r z=Zd5bb!Q7tkK_r%5X7ah9DDldo%;}ETxj5nW+O(??Gs+9*Ev&!@X4eRz7i$@(e#Mr zSaB{8Bj2gvpH-R6dXUEOb8NZi-uEPdxe{=`%^6G4?JNde2{Mt$$yP;Vxb&aWYa}{MT)=08b&feg3$ob6`iM5EL$h+ zZTxKq_jzQx_2s8edHMEG_wdQ3jz`0%)`#cMrOu!&3JXTxp6&FKF!p%gEU9Jr+TETN z5_snsXQJpFe+EUEe#YM$IOC^23Uf?r`#7MGCEuvz;@1`Ge5x*L3#>TUFd*c8aVNf% z_cYa{zcTe|qpN-$R#wfmrwT?&N$8Af<&E!r(l+ios0cgGPo2-6@&ajMKEYHy z!}(#btk&NlST+^Ey>ULfSx(qX)5oTD_B5{j>@Y}6_Zjl-4zkqBXvc?Pk(L;%D+EA8Z*k{G|KRNi zS(Vn42fQzJTF@p2yB^Hq%7QQ7Abi;OB-@N;Y+8P}|8;f3b2ut^wNjgKwn*k0!o^bf zR7IHQA*m#VTo+0AtGwyL7N-+W zdzcS~ibRSSPK6N~OSDDY+nB+|*2bgdv=1M|-@oeg)m}E0F&&*kQ2`4_DMVU^Z97(X zGIKcyL10p+zAfTIHAv%)Fy}Tj={BJYre`)yGG*pFkE<}HRRR-D^^0FwsuGj3w>#!p1#*`;T9_`y6Z5)6DEr;TdllSn%P_OlqZ0;@io4TG zUtTOVP&*>hS9|XMY|{rRoL{j%4ok_$sdFS|e>!wvEQlTD_^9$^gjs*&Q5TX__-UNM zDyFYy+M+tmnc_BjY;v#e`GKvJ1McZ5QUjNy<>gpvmaQNBnIx&!i?#{5*CT@PWS0v9N5Nr#_g&Me_`Z$YO`LQrBNcfGoJj=?>a3vy;es2OkgRe389|44)6Zt z)l`7EBlE4#aP2AHj&P;*sLoX)}Tr z9yq+auWwxs5?(1HYsfF%(q6mqfi$@p;hd>ME3bZ}Y(5pbZCd+5Wn+=E(zIM7Q!g&a z9fuw1>iCZ zoGQzfw+fEU3D|VvSs2Uk#yNLbYz4gA>6tzs5)l&;PlS>(}3*kgo1%%aJ0RVH8ra^jmC9f_ge~ zSu4vbBR{Ig{`$plX-so@QDgx4e)lD zgp7x8W=h%1TKm3yG_lC_fx4Qv_o|1vz879boNx*ND@g_`DLlQ*s*WQ|$mGfgd#&+) zQ#*8?^`eX#)iBS{*CQY3lB|0VgP{<+Pa&JCc&OX^`!uQug}EiRK@VBVy&g5RjcO$m z?_UXPO6r*Amp}f9hQWQ1#u3HLK8ulo@l*DfMw=?;;xQrq^@gmuDIR^#(~3u)HZDff zB)G2F+zOhj_T~DwM&W}@NaENl-gsRjtnD_UonTI}kJWI#I&*n(hQjk9nc?uGv(9BA z`vQNkcQLCOx`Q`JO`4n89LeT9QsD0y14Tr zu`8k1>H!<>utWTO2^!I)K;gn_{5H=LsPB6&(IOH^%H`>gR|73AfCpvg6?_5Pt_A7X>@6RpK;zPCG70iCK+Z(V+ zDI>A7RAa+22;DgUG_q_UNL<7icfkG1iW8tloLP0!BQ<#H@KDPhYkbN|iQL&*+VG=- zTf_QkN$ka*dlTPyjr-cvQ>~T!j9V=BKQVMq@Dn~TW|1&tyk4PFC9(aK(}dlJa~)X) zEq>}f`E`>~fx~8bE0OohmB?;(*x2gI{gubvq}=8NkRUt0DkJ1e#}8kM4Xtkjax{D9|DEz2_*tjHn;JoDX$ z<93bPh21an@_NThOzjD@*rd4K-9A9|E+)Z^7epLt;oC`6F%4t8#zB5S(B7#MtXEBr?an&Vox(>#Dtb8 z${f&MANSy901(tM-qbETt)25cy_R(%VS6smROBjSZEB=U@g1U(oOTq2aIZu$P?o;5 z_lDzu*QHA4<%9V2I``-B`JacCcsq42J7V&bV~J`p<9i2Vv64K3R%#^C)bGLYky(b6 zRfZO*HmQy8x%WYbx3>Ahi|uCyyB4*secK^<#yA3SCfVZSPMTX`0C!zh&w-!1&h}wT zmjwGZG0U8j)dR>KD8504SF?<90y1W@KZ*Ln!F0(nJJ}C749`f zIag(K6;iiP-W*&*^Cx4Ia?a#Ao=!-y zVtw1Er!|hMIIdFjA+^=`eb}ZW8X$Z-Vzx`1mnF=eT4Uqw(!)8jeFp>l`5jBo@dumR zSl>i;CbcjsNHCuC#lCDx8SE!sI!XoR8;+rR%c&?8Kd6Gs=x|ZT7^2&z$x#zistCmEksC3u@1W zl$|lwTd_#f1dRGK+W7Ec-2}gtcF^9)vc0Epp_W! zYl({YOIN7AE-PcarNVtkc+RHB+k7zLCrWY?;%a}vtSZ8>hk2>r!e)(*{iH&#jN9>! z!)=BzHQk(~lJUJQ*mQFVHSagH6er$I-J*?7b;yWUL9SDCsy34n2KM1U>2vKQbll?? zyDz18KlhWEb+=;aGs}CQl-zXfU~wDmF_8h-)KL=Z74H&b zQJm8M5-%o}EjL4HXXhSTF@Mv2(d?SxcTKNw8ePFjKOX!4Sb7(Kru+YYymFPUtE-me zkYdGEx=0c#hi#QpR|hU{=V2@4w4{>EPLj$9%T+33D+)2@v~t)SGLh2~+fe3w+Az$_ zHizG<&-eEayl%JGahI# zu=unkBl#((=Rz{jfMADlXWkOBwa(47*l?P$S3!nn9mb*}kCPfvM-~-(=VzRaB#UXy zdb(kzl|jkvkgeR{ck!NCy+3a_<@YELX?^9^D6LY}_2m1V!zA3qiT0n0<34Y<^3=1x z$1zUpV?UqWK^spxMIg}XY?-s2kdWv3@d3;hKXXd-+C@m#`FA@P)iM-56R#w{iN_rF zbMrI`C7V~s0xwYIHUqeS62WIGVduQmz;9* zUCZpMx~~{(`DtZ3>fo+diP$P#h-gB3PO6-UVv$sb4;C#3LT_@`DEKCkMu(w~x<7+GaQ) zuJaH63))VZJb$oO8s|0D7&Vb?)j>IfOBV6m>fzjM|wJ$>;Ae98P{A-ElmCU z?Z;1+XtxY2IHYU&6hP(%_IK;SbE|%UD^Vupm_Oof(HCs#F0&)Q2 z;8?R^sy>%zx>F@ba_uU@9OSsA8rbH$UcF+3f!Yz;G1#D9$_xANVn|8nl!wYp7U@Qc zN=F(6-0!?W~|G|8}fPC}MJGV6Iwlnrz zpS8<*Lv-4d>NGq{=&a2Qg%#$g6g>)_KG2bB?DS^q^5&_B8@l!jS{@ATeOL{g)4u4N zFn0W%6-(U?W>B9ZT^~(q)-!mcBbuORS=G%}%}39a)6cIq30($k%BnTLKB?jSQfqX~ zA8|R{|Bi*f8Xi=;X~WVvB}my&$0|pIyNvSue|v}#$V^Kdk_C&{Yjx+&UxT;90#-j9 zgk1qqYD~3cvXvi1+`PzPC_=UnrQaRQivNF`1u>cag`CLyj55zrnLuZi>MUkP4Df}+ z&6E;$2|?WyVH?dxeK_TpjT=ad@hHM-71BS8z1LbuMN37`hTtCrQ#m7EuFupL$G%=b z#CbW5IcQHgcj*QN9L8?YXq>cFzi;lkhzO(iY&ip0Cg=iwWV%!{Y? zc1$-BmkIc2^-+4!f3n9yzdy$g3(veR1S(9+%V6p%_UZVirWWZoZq=i~82h=~%5P-t zra_3mU()oxABZOVic}=ot&Ez6Np@j`rjXrpAf_`4&qp?)%pM4SQ9-WHzYSYDJ2ctZ zt}%G&?l-H1sZCtVu{%bG0*TyxziYE#%9HJXE8m+_bDlkTZ!8y}mGSkbKGPh&5)KUI z_iUv_My;0X28t^@z~A_~7^Gx@RZ)SHWON9gPtP^Z@fz6{<1<;~(j;(^7Los2vYO)a z-s$d(-4WEDi)N=6A2eruk1s%n8B|#dy%kLGJ_{l|X99iv$kte0^6tZQlc&!TfPYf~;}xuSlvg-n~R$W%PW zOQhi}eNxtI6C{24H~888B?S?#8sFi+NJ^`tyrYbB9Xdx?(qWLXdfowOAP8X^p{qNK zjb-B!VT0T5!ek|Ll!4{@7eK6CS6snD+l~2gNV#!o&Wa?vWOZs7b}6!x_8CE)_#p7t z=I)lGDS>oBw5bc2_^ib&bKzlRyB}AC++5|`0V(NLdd%A^rmpPeY%4zGaSXWvS$HS~ zuIUMN2z%9Mm6@Y1`i=Mb#?F;|^nVH@VW|KrXQFvcNEY0S{xW~m}x+v6mc<-xDM&B_2seQI$ZmWfAjFs z7CS{oTL^>NV<%uylHIlCI@xiUy)|{0DchG`e+_%BA-nURer+Hl#o6f4z2G?WY`TRj z*diT9Ovh}3APsI^85n?VP|K?wN>aS2GcHl@Ujhk0=N41j#8DINw5K6wx((s@-6rJ| z#>14DcNzg}*WHYlN9W|ojvGY*;}1S4>49%yuFZ(# zC2l>Al@OGhv=3Y`-|tobEKABX#mzmqcOR8gFqyVa`AvwNHtE}E`G;(gg$k3^Jt@$y z5h>by#+9YH$8I@snl<_5?r(HFf3&>3SZ9@b!LTqD;0LFMGc87psW~x_*KMh%{Zg`` zb&Av;EkD&yh343gU8` zZ$R5jfoQ(Gs!Z$KnRr^?S$TgIzM*0zH_jOPM7lhu3|ilFQr=zzLUmzQlKj9303X(5 zM>dp>$c#rQSqY(kl-lTVnk~~|hL-T;t2+?@$c>4D$)clr9yK%Or6rX$9}7!PL+@pF zmp~ORUc8u;XOQ;Q28nZ}QxR2i$(9XSZ1j?t$Mw33^U%9MEw z*AyTi%*~dkg3pp?8c{doABolrG^4R5O~xQ%v-Pmz;Lx@4c0+Oiq^0@7&Lr+zVgl~N z1FaVMKE8_+_Om7O?M`lzT{jx@Yy$EZ`aKP^fA_YD z`XZI>vDE0|`9URj{PcM!C!2`vV0?q3^UjB-Y9J)DmioppHfg zWka!F)JK@NNQ)*T%Dgm&Pn_hy2Ys7J&bDfFvcuD2MJl)sWPO&VC23SZyM;i5fnlDR zx$J$|O7)2)b$FMN>_%?2r`}>Pm(m_Sdk#1n7@1X0$a@Wb*E4R!XgY05tq&r~7ajd} zunoC`0sDjNEBQ~F`fRN~tFa5CFT@^Bk1~zCek+Vh{j~wO5es^5=~*$YU604by*Pu* z9Qz*=a#Ow$@p|DOpMNF5>M6Cb!(R5}`nqFsW=`J|e zUiv>nO{MT-w)+bcUbxRVxW*Zh6FlC87qZ<}H!TKe4^Dco+}^$nL~Ym{#o2a0cg7*I zxVNo9nOSX_mAc{&hqpj@L0Rt&926@}e!T-e19G5G@(Xi_e{ zvomCatW+JXZ4`KoN2qxyQV`^X@iAVGD?!##^%~x57LbqtWG9 zN~R+Hh;#dLhMMEF3%a70l*Q2)tVNFY>=`(AU&m?u)T5|SV`76D}p!gSzBZ`h7VsLxbaF8_e zH*H-!{Lu2K;t5GRTB|5b_*s#EJnE;^HvkXYVQoqdAKRz5Q>E@*cTHshI|xdXA4SC4 z8%Aq~BzbzduvVDXpM`@34chd>a&(qAA;A3MQ~k&{3&hU8-gZ!QSMww;cVDqvUHk>H z#btl1jph1R7dG4>`)00R@BGK}%1~X7%;$r?31|}kqJ~3w{}X9L!0Djt@y*R@5aJpG zOKDrws2*h+a4;W+c;|=~+5PE~>v?r`zrju`hyG^rL;oHo05!l2>KMWw=sLZqd?*=Q za=g6C>XJJJSf+<`B}U2B@U>PWSo^~ukBBoF$Sgyyr=+tH#jJVx2T&MeUaeibG2XUd z{2%@rmGzJ1z~upCFaWd)JSz?n&QO zUMhG=EQL0g>LGI?%U9mZiJ>uOCHCX#w57@8K3#(lFXKXf++SB@t8gX zfZA4v37^X6YSTiKSMEE$I{o=LiDj5;xH^wWS(g0TFR`QIB4w_ffsvU1VCmb^&@aOo z@k!ia+(AENp|w85S(q-gFzcxbROL27TQV~s2Ih=TDO&QrkG#11=XV`u*<1l)Y8v_`OtEXC#+IC|s8jqnuB#z~x$ z_h9o0?nHroXn^AE-Y)(S^ZW(rDLssLg;|!b@#5lLTsS*B&vz`ocZW3d*pNO}O_sBU z-6>l@0PS?_^plN6#i_s-!B>_SkK4-K{pU6=h=kR`vCy980rgPMi%TMZR}rZXFPOkr(EmskA+p%LUtY>Ws~VHcj7{(!P9pD6+{~MG2DgQ>W`#>vdM>asS%! zO(Vyl?vk=e(wF5e;u}oCn&ZLNNS~SvFR+Tf+;upSk;1iUi4)G7dwpn1E!BVEW48ji z`dhn6vI^6j5i_F205c<=4x)a=6}{C$B<^>3UyLp$4kP03->bYiEKYBxw@ln#;#wph z{iH`Z)Bb6@&g*1Vam6B&w-J3aoH3;bf48W` zw|3H(`RdSO_+OKH;aG8wmc`d+F9N?E`kuGvmB#d$(NtnWg%8KnTU-Hcvc&ChCNXMG zo3i(hFQ_T7W6f5bDwHFbo3>4_)2jTE^N-u#1xt3(cMrG#eM|6z4yDv<`~Qc!yp`Wu zX5&+lUc^^u3%H{bpbW_p&ARc(jzYI)-EMme{$N zQS{Bn;2&C1Cnx@1{eT&|2E(9MFnfgcpPWUN>P^TqG;c^^GyLqN`m1JyXC{|+x$(09 zRi=NBGB}lFEi#)O{2kB}H9;%p*Ovqrds4wXK-!(1XF_stm8-B9Oh$yX2F!5eG9&dP z(Xb(CL0W*0{)5^j4@ybf`oVzx!zq)7ranGwD`q2UD?ME-VGKkMsCW55Q z@1cU?`PzT+2y%RHpMS}^8eMwHR!h%7r>I>m!d51bjvJ&$ah^5D^K@3+BbOtBCQM(kQFERitXup`lNR>7I*V*h@=;7 zSSRGnGadnoy?ff}7%Av(!3@}qi8f-ST&0K}^RD$gO9T@ydpo&0bpbBw&E!Wx2PB_M zdBLTyHR+lo(2cJgi6KlS=GF#Vl-fQwjRa{gU% z)8&msToKESzp*8UzR9UF&rR=jRN1;=62{{YT>RHcUl7*(&Omc9nG|`iNYZgjC#62Z z1GS|>&QFoV?3_{wasJu?WOhBRbi-A0;PR?^YZ^vwc-Ccus%M#gB(1)e-@od0gB;)dvx9UrrjVSpa;?kT3a$!t zX%7bL0NNXD^aB3=g}S2#)pk`X9izAB|6#2)jU+PCK<4)5e!jH%xnlIdF`UyV!HuJi zn*Vw9gkz+0PTmJ;n8r^%T4gg$7gnnwy7WHioCD3!A$(`s)|H3{`sLA6lb$n278ieW zGN_QJk-8w2)yKE^k2_HJ6-(~ln~z7`I$|JIjY*djKSGKt(U*itL5Ff^U>ib3ov(wLxkDnWZUYE(l8x(|x;)bge=YPA{? z!6EU7)9lg7eR|@Uts9x@X_M{Cj}c4Ul!=Owo9z=fCzOk&jL@PiJoy-gBIXgvb@C)r z^Y8x``4YC?3Z9`JX?$Ad9 z?(oj!eEjra&ZqFK%J_<___EAq$%P-zkQ{SX9AZx1%Xz&*&LQx%`TODWdQ})VTi|vI z$)XCfVrBD?f@T+hziip9D}KL607C&F(UQ*xJ(zTk-!Zip|Eb=*{agV4OE&%9a&&fZ zk+>~DD}dcL`WA0?YOy2x{q^POGjPYju3E>Rlhoz@(VYf+KLgQOG`hN?}%_)||xdhaC0?w+Ioiul7d$(GIjH!)S zx1{z3>y#B+Ven&m=LwJ@rbhLvxo^k2!#Um$t%J*5OjoudbIfxzB_51y+@P{BI2QLY zvv?v>eK9wAP&x2w%ned{6BNXCreT_}<2l*gtv)J^Rp5{W>t!J6%yoYyu+MqnQ)a}o z1!4c@L8MJ4?kdt^sVrsW6LD9x{LCB@K|Z15XM(tT%3wX9C?se|YYQuE@Al=lITiGH61% zLQEctyBV1dV#I5`ssevkeONB1tvpi22m}}Jt_(u0YqyvY$@?t?%@y^l z={jk69a}@?xq70;{pKK9Yptw>yRC0>IJoHcADKdQm|~f_Zgj%9I*+&jr{5jPe3^?X zAw%zl{wOrvv2bv4D`5%4O$_SFsC@YpTz6l(y`O&H*vMCgXC=Ojx-Y)Hmt0qK>KKMk z*&?OB;Ae_V`SV)k!c3N127URj*+bZWYelh&9=Esnx5<*EeU-cmLe$sL-0wd;CK5f) zh`hDzt4aTVG%KnO-rG|ovO#7~3>&|<)Lhl!!yeXN1|qqT7-)HUFs`N9YVOdB!E)KO z#Oniym0w#)5ePPmx~PZmj+m(HX~_vKw?E0}t1J~as2-2n^=&zZikT?!m;r+awGXxF zAy#~}4r9Jl;XGL+WL{|QT4f84isMAl7n#>B7sTSqL})6BS%oG^4X7>_-2+>>{SP8h z%$KRr-y3nIXC4)3+!ue`I>F{Tm?_6fSV&(}-(KSR|j1?E}0aW2oic zRh_Pv9ZUHu`M+w>Zgsn!jdgYc%4cOI|{w0Ey$*;Seh04$7?+i#BLJbMX&72hWBXjpi|&n z-i6~qOee|wfNkwSBxrcjHfb!6mQ1!!Ev}s4l~fk;JxmIG$F^n*APVSzrgf~)n`NDl z6+&)NUEpnx4p{OM0i4cN*Q0OCmaBvfZ5FCOl4Kh|!gJ1)emR+SdB?^8)|JxY!Lg;h z&|hcnGtzTff>`H`KN_Zy-pB#Bin4-h?_i&?`&BxG9V(!ePd*|w9-QNVa9KUv771=0 zmNr;;caepS*&g~`*6pc-PGil?`L*JxT8X~}>i~87kNh1R`}X#}c!d)U3b&=lp>!QG znDG}T-CZ$B4D4rSkSk~^+57~*`7&&T{lB$8{WAAub)ZZ@q95vMwe*wHm&3Iu#0-Z0 zRxb9i)Jft9tS;=)stsQr+8jU*-o8I!9#v#GvFj8+NLy!lvr!NTJu}*RZXv}prmTcl z9-Xv+5cNgcW~IrAs}DA!uA9TwGIa>YJ`XnXUNvP!4*o7(0+<5oZX%On%jK6|U$0EL zWHm|`M%OH%j^8#^FNmb6A-+W>AnE6Hb4T6oS~a~SYRsLm>z2dSgBpQmBlxX{&APAp_xY!d?J7gIA42V%|L zuhLYQ>rs~9XW%FCIYOv`L{B8Qtld8sLRtAUZ^*oOj47R{uHp&oN-$){|N-$y|imfYMr09sPQs^)>&tmIq0>t2-Wq* z1{!q*92a+yA%27m_3G7ArapETRZJhV$B`V zkvqTFY+EXM^@v`~b$^IdK6J;^a4=`+>%~E_tg+2MTdn6~*!626xy@NgJX*HzyH7D;Z}HcX3P81i+4<<;dHCY7FN}r$K*$v>1c_Jk zDclw+gNu2r0|*D%g4Y}4EztHQU6Earl8#k`(?a6%xO9^*vy;i++WT5NeeFRqA*sQGp29kNuPl_wtKC79W{%H2;%i>bla=e5$HY{(sD zE3zGsbPZPuEL!kiPPD zTD?w(R)z!QCPP_*Nzv`8wKVf`_8UZ6V}^uy$GV$2KAlicM-{ z-!fc#^BUy(-tKC+%f8iU+drlMwU_5_3lxJb;uN~`(izLB!{7UxkNFf9n%~=FTCI$x$gcQEyH_9Ei^*SvN$pH4#QwJ7{8J~5Z>02+ zU?9nzQ;W@veIG1R^RrZn>0+UL@Bn_%YJs!EMbCv z>c^aFxw+$vm&bTeWYNqzk^8d$*}kt{{MTzMg^arW)I)xBj}>z)%1-)eHxM=S;qJIm zRr!37cTq>pXG;m=^SZDZ1*=VQ#~1bNr5zRokf}==#eYi*w2I`OT~FEWg4;l-06P@i z#O%dEtJn*tgMWMvi|6{7j+}cp%Rvw=5h4UY#t3p+!T%;T@3JS%G{6+WBI4aA+1ZC_ z9QiQ74ZLF&Fq?9*A zUcbU_qs%_6^{3rpmKRJMd9+xJ0$0vptGzoyO6V z_^QXaC8#Rwl4KtcIpUG730I4ww)?Xyw?Tii?Ptr6_LnSg*Z#a*Z}EiJ681~W?zN_8 z+@xhCR1!P#J#>vcmX;)(A3Ii=RmX!wFhv8uS8-ZWpu&LqN1=(WTjk zLQy=p{uU-Y!%d8j%F5a_dDG1hJv8YyIf^G6#^{_?raftL8#I}(tpGPK-3;;QxDES~ z&Dqg}9BbCfrI22{(Babe6w#w$tzX3WCg>S3T;cTGYP+*uH;88>dJCJxj{W0m1vS98 z&$;X~)G0arJ^VoDz#r5aPTTg6U)p~nKMR+_*fovK$OjZ@7W09q<1BWgwrK#e?=793 zg8sWmNDXa@4&Q97(xqVBTi$mDL-}1aO5m`M|C$Jir^>kvtWzJknx!D%kwZ;uAQts?G5ynYf5yivw=eby-Sr&)Go`l3L!2UNB{u4ykhbfjSB+!cG z@d4&sv8^TjZpXX@X=+%sV;(b6I}$63glMmMg7bRp0eu-d4@zN3jfWK%!bK-m4?L`V zF|hV{%_S_UyjvA^GC5JGx{**TUM}8(b2*61Q#F*R(60s@kq)$qAyv#g9O$ z2(ikbZPwJL({LBv=9pB`IO_Q*5%4%0wJabBi51$Mm%OPCiTM$*bT~3=He&n2#dmH_ z$UxTq6Ik%V%lu{uapK?{p9W`#HEf*lTPXG!XXk5uU9fbkgOQdEN*xR5?E6X|_fGyU zZ41>-)VC7G@%~5pp*)Fs((dvwbS!~=yY4i5td;`)Ma9T>L|n_V|GEeRSZ0ZMG#HE5 zFWmXCH*R&x^t;t2QvA&>PktRf0QJdwsJ^#mBoaLtQTEBXV_p;ZSXZu_K2+B@8`D6a zvOM3j5X~=aXjkrPMx8Dw$2jii3cZQTPiwG!0uT@F=ypZjxAv&^0D8J;XO?gWF2;7`I z=ED2a3DP(6O7ewKm8@t2?aQj?!f%%6t%R@~MInzqnI*ct`f23}FWBCYJh;W;>ZpMt zttA2T;|9{^Clh+c$(!Bgui{ZYGMP}~uEwB6v8Cayd4WWZ6oiTM<$YgI2cN! z>ZQtkwK@Hm&wvPLQB7;OZXE)4+v&7izXVJq1@4;7wrHeLpC1KfyXJGg$Vw zU$!-E0_|Fpy%VM0;BP><>gcWkbcQy#Jxk<#sN+Hl4Erw5x9*QR+QDpS5U)vT`EPMN zot_^WovlIjGM7dLH=#~A;{%0a`2XpNruKxHrRCqe$B{Hy-`kY0bJY`@BCyo2k2l;E z%qiFktAm?nCyW06)>Eg`z!o2(d}+B`@h={yj6i|a2C(A!)045QxRU>YA0w4wq?-E- z$+ieLvn0ZOU~>#RlTi02knw`pNAPC~=WgBf7|LV{nwu`J5$lqAHj_G*9%*D6R-W=k zj-<7i+Fn93n#l)QfJ1EqAo}-9QVXlHuZRcS4(RzaLWVsXBzm;cw#x}Kq|?_I+ANRy zJZX4$lxMia_y`#5e(Nqwl$M-k^Ad3A_md9vmo22*Cui|ai#&&`u-!&Qx1Cn}hGC6a z*OGBQS+8g6IPk;9EFDjj)0mC1zWo=n<2zAsiUOJ^pBk0&FPPGTmI#lNVHT6jo5U{M z!_(9{yX#IjI~+2{l4EXenq4)2QfM}8`NYo*HutBZTYOktUJzmiCJ(3)+*yh4hT=6w zK2pM!_={%JfxCUDt@25q#=yAx!J@{9aF>&C)DTl(Ib!^+Q|Q&Vx`s~S9}@rN`NO^9 zj;84EX6bIpF>p=*^B)2ySaJ_sRGTeFgTGlU=H0WeeAEsKekg@!oLuxzX;2j53o-SmsgHz;9OC{y||sOYb}D@f+BXj;hmw^^K#FmryNH&VerkpX_D2I zXYvdn*1xajcJH}MJsP9{6$#LkDO&DIojW$eg5Fd3>G_?{M@4aThno>QlU9Z&voghf>zl7I z$Y|eb3ol##3bWmT9zO6pZ7VEhqU&bXN!V^ZdgPXwf9xoTHCOX5XV8&Rfo6f|_S!1wl09qTeA|7iSS#+qvoO8v_9>+S@1|&Qls(VQ}SkC zPs2ixt-GGUbnx65IQyl@E6cM!YQbZW!ypL~cw;67bn|Qm zmzdi{?@7t@r!UNL;q`-hCH$X3#|o;FO@L#znwj(d0|?Cuh^XQovlmYWya~HRD+pKr zPT1Kt?N0fC*f)*?#VY!M+Riy!+FyWw$ud`qeK}oxJ+|A8FW@isr;=|p?h)x(-by+!vvRQ5rV(i%pW4w8 zlz{2F2!KCV2l6>=S>v08alKtO=ne!i5=SYY8nLJ9w z(O_BnBOKxLT5$9FW}u<(gT{JxJ;C>lOKHzS{*&FZ>$I1aev=L1D?(}iGxm`5gl}cX zW9FG#c^Hc}<^_xOeZBGfVCdwUNnG0H9~ZA4zq_Q8Y(spNXzx`Qzg{y$=Lr39%q7fr z-V0fp?C%cLS!sd0A$$}%-~M@G8K*qMWT>6r-W6~+XJJ0qwEXcaJV# zld!igPtxKjp85hi2peuIOsGTSF;%rc^h!Yz+FD6&4L~t3JUTWA?WtQ<3F%8bO1gbT z21${SRA%kh%JP@2WtvJ?-hj8r*)!k?y4^cdpi-CJzTXVU$YPwzgBi=O@VCH)!G=!7 zk04($pr;z6w_*nE#_aYQ!JIx%Mms&b>>VOQRGvQ66J9)iu}BYQF(xwfm<69DP*Z1B z4Kqpyzi-6y9#(yhCr;^X&)NZ1y6do#(J%;ML`4-F2ggl{+57EY)K&8L>lHUEhH^4$ ztdZDO!!)hZ`d$Pnf$OryLKnfQ+_NBRIK>VzzQX)yCpz$2_V?_M70icFx(!McM4MO* zvUTkY_?2jlr}Tfg2A(#&B(y4*Nb4H_2**+_%@SYSkHF{ZM>_>h zJn?Y19+sa?nqd8nPA+cs4h~ne{>?1-W6~l!qtKa5YS+LLtr-O`93Qh^dW>604LL3; z(otVU{HmrBCYo|A@a`DBXHv%^1T!Q`YLEBTT9livC$6omw%`)(j25rku@kjQhNQ=O zP{oH936G*&pgYs_IN^W(x0?onia+*(S?hj#$N_29b#o@}#kfAwA0DB(nfs z)IM32O5;m!B>gJ?H!rA{+ad|J;Hk&;Uy@6V(~No>Td$pO9|n2qSq4QOq4AZ#Nh+2H zM9q`3{MyAStn)^{66MJ}{HKzHcH3TfNMKX&R;n^R*tT#(^fcT&yY_qHMfO+g;rtoV zx_IF2;P5^;`&xgpHR@fIop;~_fVcCEx_7{nLIslt-IrJpP8+vxID!3A%}JEP+joes z3`D8_)1Q<{Lm|;z*O(Kvd{VCM>_d_&9jblit9TfC;)^`SiPRpSh208`Tiy^ITKu^h z>tWRyTX+7QovUfrnTMUO*spgx{XW*YjTBpK6qT&|fdsyTJLE5=@9YX+O5{Ty0y5mN zika0LhMd z-pp(?jYg*H6<^QOZ%IXpY&;qkFt_Q5r@BO8e9IMGOUUa3GfY4Wotdy?$2Zu0|{`YFo~)KiG|Jk^gHPh;KWO z7nuTNqgMl?3(%2HY`uQ$#|b+TV*&za{94itncMY$V}mWyB3{!k)6SD(_wWkhJp79hQ(sGvwWZvTOZCa%h&R5%8*1ri;v#+$NW-9-3<;hoH-aWqZ$Gy&H zF!8NgTJZE*nm(_j%xPvxe)lB!-8AIu>hflhVdP5TyvQIeXxyTJ1Q<Hg0hHfn$)}N)tk4K0W*4SSPkerq~V4h)nr__A!qg7Sl4I7r=@t1jf z^P0}A>97f}sd?N^ubs9guh})inYkqH1*hg<2*)K$1s0g5}!5Pl`T~!`vv{=e)2QP`OcIHd(5!mTbb~E=PDvm7=H) zgd@Kd-=5PoU}GJU`M)nM%Dqa8(Y_4|&n&$!nqL>Zz`#2Yz8%X)5SJMLNlM(4o)*D` zlfNgS|F?$dVE*}*@KKo?IsR#vsJqotUzNkYQ8!FynFm0O7k2^!!4u2$;xy*@HG#Bc z)jo45Z!p}@q7yUyCml5Qn)i{@7EQI|fT+plXIPW-bmCJm@85yZKG4J79oQ~GbUpTt z7L#xl!nAv#fD9z}$yR)<9oDV@yqN8$i2N^Q)o-I$>h%!`yI7Th&_mhD{4M@X(6XQ5 zO_zP|&mhW$18Mp827pZ$b=mf{GtAvA&4Jedth-y+F4guVU^b`(!p^l#C8U;|E@mI^ z)w1N*{Vx+WUu3gS7D8fPI@g7cN(zG2B*$1S{EogcNl^jPp(-Rjb~l>0Hc4XRnRo+n1o=ekVXK`>o#)$4nU9Ldan^*s%Ulq_9Q& zhzQ>b!MUVS>;=0Y>CnTq&sr|s)@JrE(1T+wMq&qzuY5|!M+7)608`f-k!Rs&`}&x) zP?ps}`cMYop07`j$Nq#5m z+0n@k)!_JG+4sWa8+aAfkT81bh|P}W3+vSKOD+HISbQ-SvoMvGI%3e zCjm17xi$YKNu)2nS>Yk8>hDGlV*Z1u@!_`|UE^C#4%*a}NQX4aL zhj`m(Hl}b359yhl9yVwFHY7c~s6^-Q*O`;sImHd)fa&Aj3Z6nq3Bq2m8IzKmU<5V9KGXk0IF>#DXYm*y3dXh||peG;9YH zB%G6WJq6H^UWqK9j8=DZ0v;9!8TqIUi(YGNZC_sH6xVn_?161vFZXn$|0et^Uo@$)suzhO%`w9xRV7tH)^fK zuPp{$BBg(QKO%nE40nYj2reS1rK5u|^v=@9fTPS`!tew68qs&O(?J9pG4wK-h`cYh zYV+2~EB_8tA?liP@7Cm96=8IQWWBqfEU)L;{$*C-G;W z*Ku$icVh0YhY*0^RNfCRuff=iVnc1Pk0kr9aaFs%Z213Uj08mS?U;ii#BfYy!7_*( zWYSODJz|WYyvRkfN%VZV=H~Dwt;|V70#UyuYD|AihI>Thd&k~31W6UI3wtMwoF{a^ z*%Ot?9x)1?USTL{)o#YiVUdIdO|BnxHnv%Ux|}jjwBmVOQkJAi8zuG51N&Jz@U_F$ zDgO=t@52N!GZOEd1)P=KBL-0kuNrX9Oy=ikL<8VAn(3WWy$5BRp=+RkNeiuFl+)j5 z>zN9FSH$i{y~<7FgQt<{`pbJ{R}r>!PNCtU+O3_l!R&Dpj?Tl3td(xvv z^Q%ko-?9>AJ{x~_lcd)W38$X6{?Tg}K$oLPUAdX#7&UC;%rsm#)6ckQj zNdl1?Eo8~}_yhcIz~9Mzbx7w_cMXG5K!V&#_N=f@Ue=Q>Y_hanI{;XfwOW7W0+J-~ z)TAVH`u2A85&oqyo4o}p3#ePWuxCVt%x2N)0>Fy(*_yDGzqtW`mSnDs5wo6%?H~Sje3LS2j>3gwDUU80euLl$X6lq4njfh4`m$5CU(ROeD z5@I-R;8@pYXE-Sqexd`%bp0B78QR_$4e2jD+e0%DUIiEeTZ}HBrv1HGdw#631-GD4 z46;2OzGgiQuqrkpmKa|JtS!=zfx=Jg|D1ht!BFVGZzk5vH}CsP#su*w&w(*7#*l$W zvZ~Mjtopd(3Dz18olWeT zF|fKT)dOIv1Jco;qPq5}QAra(=>ImT=!%oRR5QL6K6?rKRlfW4(nsQ#R+93!So(1O z1fflp0r>sW*8G!OnnP27_fZs6oNQ~Ay6uyt5=^I5099JGp4B!fT0^^=&Z>#x)`T2o z>#*n)=@=)72)qVFLnoK{kmOD%>t`JfuDnVs=VD!VY2RfmCl;vkm+H4Y3k!vw2n;X{ z)jpE_ZL{8ErdITTXSv9$2>Q{46{;_K)_UD0OsQa)GiS#$CPQ ztl!<`z}8?G4@jR%){bGSJyw&q0H!%)eRSRXm(d5W@D>oRW^6y!j%6e@4&wh&C)?DM z23ogVWe`=$Fq#$-CVd$Y;}h)8KMkYjQE28W@Q%j0pAbsj)~R-QP&ED`nXsieHj!KK zTC|x!U;eM95Yf`6$^Gzvhpe6h{08_QV~s&yJIVa+)Tb2C8Me1G=f)RRS*iV)zpJs) zs@CE!D^3eR-tNKk&%1lO*MzC$Mr=~c*X7YP9X6j{>y&8G`zaBb&LgVT*{y58J3sbh zSzMPir5a-S->4^;3@7S;i=%6EPU~6jIOqxj4k{oeHWCz5782)Bp@NWB)<|?;D~ng?E<)WRqaLL3=2OGHsqtFh%X_$|y+Hry3y~ zFb+(?Lk8jWy%z$NadX{8N9w3~@e-*W{7^8#L>t5V8$CoWh86K+B`J_1Gy~HMU9BAe zb^;y(XG)Voof8*Rw`Wm&4u!mF_g+6_gE$Q_1lDMJi=`vZtoF4gL^k1+5Qx{7$qG^` zBwdjuccaWCneD*7G36k7X@K3ylMc>$>0_zC>!sy8fyplOT5iOOB5opJy+b5v%LAqV z`T}LYcU$<$^&NVrGPUEkhXtcblnbk6^xfI^t~=CC1B4%rf4QpdF@!Mh7XoxCF)XGOm(iMU?E=x)=3h@O5p1P@D@bE|3}oh_%q$c|NkzP?%a%o5|(zC z`%c|LlEb#jVLG~%^I^-Oh9yP9Hj}$#-~I|2hSuBt=0Lqm6GyXqK~_*Yb^m3ZW{E`=$U?@s)6We zuU|av@a-7Q+P6U4ZW8OFPFJ4!q{Rre;}{0pt;fr7z>OD7eF4~_*a`#$QzcH~i~>fa zO*^A*x@x5xEK@)jO()Uk2NpdW+5=@BplOzW-V;a1gCR^xRhRXtDt|kiReF&ik{(2w?(h6tOU-b|Yscollj2x#K@?SttABrpO^wV?&q_YX~58q~v2G6>PW!*;pT zr&FGHj{o52I#i^&EMfF*b7`!GynXi=k&y?PQMFIr3gSgT7DJbNO&L6WNz{>CKI^TOEUIFqOR1G82cpL%LK zY4oy+EBhdNQK49pEn#GcR_jOVc&v7ud-iMF@(=#r6yM`8ZJrHx;@WUXP&{Wt9_(o1 z_*#xU#VI6wMkiP7wFo6|cnJ8|Nm8_G-O z=wm2Gk9idtC>0oYPEXaNf)}xSYVzYxzCW3M%P@N>&Mvt`_F1ba;cuU>1MC;p)HJhu zlC_&m7qVa{=D(m+ewFMwaY_E}-!hMy_7@RRjKHAmwI_Z5?S;(3cLE9^aR+2u|7V%z z95a3$Z<4oN=01P*VAXcPjFm5(K+H$bECNBaql^A^X(+1Lg6Il#TPw)U3jr^Ee-t^v#8HiDf50d5SM4c;>rVRLE)*MFx0T#X(XuFF z7VKi^8v*=11XRt!6gQ&+U)LUD0SC@N|G16Oiu&Q7IZS>|>U>2G!)@lp4@clNd|n^w z-k0*F*{kE)e(i*lA5YRaUq<~nmImW|U<{ItrPzPH>MH#er{6sIgO?}zS_MdbiUM^- zx-*rj94^MYbMud3wdETrPRC5aR9I5&>8b+3?Q0k9cpZ7?bH?(5zG9KW09$o&$(5^m z-<9CEhx-&6tZCEI_YllA9hPpEk)Jy&EB1Q|zH_!Ytq=bD=JxC;UG%b5p+^6UmbNq) zvJ9vfrKdrFB_jswFQ4`Pc|dU5K!@i^&lIFJ1yp?IAh1s)r2mgpV!W`ebbj3PQlymgRwI9#ibPW1bF?y57Fm7!jr0{>=YEXbE zUa+kY(eIvXg!F~F-R;0;pq{o&S4mOT_J-1cihn*=rlTl3%0v2gk=p@U7-A?=tUB{; z`I0w?^Jse*Y&J4I!TK_jcKK~>lY`)$*up6x+yhq!d%}2@hvcPKr(ip$wGE!mpp7Yz z=F~1Vna8}x5TeoOn!H(&Y=@W@)gQIz^Y@If90bG_LD2qOavy_qn(T`FP+l7SMWFpX z?O@2@j69>`4vqL9EfPJ>ybaAngo^xH_&twVPDM9=>v8)b@l%rY*Z1b!(;LwQr>`AXT`?{-- zqfV|uHuU9m1s_U!DpsBD>D7dF%nBkxirCA%a%hZs`uj3Mk{oy!%?q}`5ARou${{As z*82rGPDPZH8RHT{P^A{*SQw{PFFvNU75fy(e>tK_!w;B^{C&qkue5g$?*h>D-jm_Z zMqeQ)UqCjb5ldP7_-GAw``CU%JzJ@5+?_1*+06*xYeASp9CAZX5w&R{My};)3x|wQ zXNq2auzA6*G0kJlLx3e~e6zU2r7rZQ)k(vfv<*}D>6hTAGcKZfnYoE$xv$TgdGx@j z0z<#wiCn8k{=m#hTmM|4D+0;~bT6QT_-E*B@gwL*%|%P$7LFab|3vSxo@i(PWXYz3 z|DA*Bq_;u;@t{%6FF99(f>75ZQgq3q&M(s^KXC52b%o2X>+3yNd|S&2MZj=rWdoCP zqY+W-3ecTm&CkrjACnXI0uYfp3q|QbRz4+VWt~usz(2dbsd@}#Je;OsQ6pI0`%X=a zSl^EItep*NC!I!>C@v`LnA*yJHNIk#|L@wXoXuk&cTA8!8dk#vb3*sR4~Tc~k!@m> zN-`giH$1i>S!c2TM!zxpXb|l)mq<8lGyJJdoulUNlWnS27T4HxTa@S z9#G*Vii7?a%W%JRTC33^CV=bAdwdev`!lMG`)qdqZ{6hdNz0!n_vot)d`@pN=5rbg zzi9URMt!AdrJg(1GG6iApxp}$VKxQ_1a!Tdo8DK5nFI2$qh@8+JdRbdXVO+dTb^Q0 zLwB=4z3OhgWtM@#ybaC#AF~^|i`Z@O5BWtB?bsKNRrt*v7g<^8FJYSO-d_cS16DI@ znA0+v3Vs6u6!O=&!Y3Td?bCkR5Nbe0U++GF@H;o>Y3nkF@+PCRA+`zWN$U8w=pD8- z?qvE6%`fud`{AgXL@Oa(cu1O;J9)Ixu!w2>n3M{^_|e3x25jUn%vb4?WwFBvcfA#) zc9qimV3&^ebO0R6NWb{4qg}&jf@fHS&6=^}x+!^&S48CipVd42*f%mm!mLkP9UnQ) zdXc3o_Oc~U(nmb<9&ABeD046Zzm1+qu*1lE-9-(={yH;bl`zh-meUO6rOW=JwYgk8 zu4fjaxfkfKyd5h9K*1q(F#90{vQV$NHH>-1VBcW&^4mv~5$y{7r+Q8b^{I!JK1==! zuEB<=`K25URWpU!t|8F~mb;IC3;NWr{XuUNToS7XW71Ft?Wo5YWiHzm(8DqJ7;>c* zX*@%*F|cM?`u^6VT2$zqQ9U-*8p7(NR2=43<xl z$p6+)*p|l!GkA{<#vTa1U35&ni2A!N!Wc2dN}6(`MY{p?CyfN`m&gVSquRzFOKY&O z8#b&mRepf%7#%I}SdluP1QP(TG|Ii~SunGoA_0XXGRY|AZ{-fI2tF)Hsvx>PwTqF3&^Zt6;j*t^%Ky~RhQ zdp#cvct|s%$IjgSV`@4kXl5;GZEtU7X7I!rmQpI|R+f1WPB(C%M{NzswiY_X+)TdM z@(cl`nBnAqTh9bdMubdOiBpWLk$w2dF%d?%g=-#|AM8hIn5l(%ABm9gZD<_hzgVz4g zOP(;HOg3a}km?J-noMt2wS`Zlb*!U8pjF{j7fdy;MOv`_#DEmw*@fM?o}E8$$XtJL zvdm&u=HuuFe`fQO%OyfLf}HX)J{UsJeH|zP)97_V7cYyfen6XU;`m<+r<=N^k+#6D z{HQr9DXNm@%;W@Zz5Q%#sap|X761Uj;RXlYUr2axo^5I?~z4UJ; zbacG|6S#|^dMw5(LZ2QdKTDf3W&DZqr^S}Q+@cPK9b{T~S|pPW$2R`yEVZJ$u&xvh z?T+=88!`>}cNPomPB6l1dlxNTM7|*|ia26e|5@!?@HeGjHclE*BgS~E1zlQ<#TvZb zrSjnJvlV{;ZOD*-gkMvqZ?i3lwj3OazHt8a)z@|xSep3ziDS<5kb}9ej8D|#h@`iB zdk<3MNNEO7MajtW6Y&TL-|r@ibCr0vp&~e?9KYo`xYNodmVtO1CkX&XwIUn$!$Ehb zaKk8QWKiO#{P~$@k3BZRxT${OYS2);&*Jcy(c3h;?K8k96|HFqoe7vb>fCm9I3J%J zSX=q1)6GZJRt^iVI6>YH8*L>PS zlS}SE!ec9*t%gIeYmd?67AK87SuJxzI_^BT69h#X?gEb%nkZ54yTAaZ$+M|x7W75i z>MQd?%^nmnN?;!{f_=~Ua5loMnRd^kmOsz_sK`o31K^gw6`(-N12{2E9!V%CfNOZ4 z_it8SF?dAg*~A3uFoubWc>(Ax(Lfw<%)k6(e&E$)l>^p)S>j2=*bVLtkPkYd!8_{y z+LYJCk2dG!{-?U-YEcZk34hk6dfVN9jJgMwvs>I(PK0z;{GDs<3-^@Pzi6%HT0iM? zWzZMwUn^jakDYr+{?;CWGFts+4eQMOO^C4ty=c(CgjrdJ_e5^vLa?hWg~e?6)2MoB z>dJwKvjNKe>Yu2j@&tgWV^y9RE3om51usu9S4~-eF+W9$#~zxxnlU`8GzYUS%Lu8h zPqRF!T|BAWOpv$Hi`}7@hV{LD%u~yWt%yZHe+$o(<^IOQ*^vwE8}**#W$2hwEcay@ zi6M@32!Ig?oNqXrbJ|&pd0J39BDqVV)(xeIDKr6~cx*6Ra_c1-6zV1I-JtIPe2SS< z0{tGYUpWtrBn>3IZ0aDjX(N3U#TGHZHAEGhU#jodyZk?Shuj?)7mI6)Ix!{RTRJi? zC67iElZ@EbOAkdsz_qOV1^OB+Sjk^)b-zBTL##(avkQ#?Qr~6a?>)cx4&CW;gBnN4 zNd_APW=Xy=^-Th=i+1OTx5CcC2M!#kOLA$3RC}Lcv-Ma>R`Y;Cb?8p-{i2OM7Y!Nc z38k~z{aJmX3F2izj00a_L#kHfqAgrk7K%cCzS39QFq^qm%efX_!ry&r<69pOYA<5~ zkYOouDsNq9u&lTS8WMACZPg{WMor-3;wHwuw{J8wMgAi8zgsf^B(MNc5iwDWp1xjd zq+becILkbrbJ5wx!?vP6=8~M+tkQWb!F!Dce0vlh*^Jk@oxII^A}UyECe zqnjYUj(F=p%k4g)JQ`TQ9ClDKxX&BtKlI$w?OIa%80oQsUlLxPkIcF^*OCmYzQ;hX zl>qI&6g#6@Xch~C$=Ss@t7+<)x&p1;bQW)3y}!^6>4h(YIP0r{zdcxDdOvTAQztHK zc%EyB1~|iW-N+3lC`caD`V=$DJoydhwz4qKGa!{=qvy>EgULmS6@Z)Dx)|X$D zP?^ZxyuKPuLDz?)1=kBs(wi`zGrT$B`4pl-&B>$oVL37Ta*h0+3@`53)a?{GIUe!9 zd}v6~Q+`A{)Oa>eHFE13SakTIs8Z9~ZDk4T(&Yhw!89n*`2y2wL_jnP7#2*^NOL^Y zlMK~fI!#Ltf`2o=#{f_SELacZ_qpsdi6aVDK(l7aGk=uKjQIEq z{`0BPUnG}L#iF0T+wzMb;uIdJKhQOlf~-N3S!8Vz5&w?%Alg(?rS5Urt7ActUFng# znw`TO&TX^)vXVSd()`X=#z^x}j{b2}GHA7QKcWr4=zpY+zX%Y8W&*ATKMHPG zl@FIZD$p-tY_}YeOj6%2Pgqg}+uWu*F=NO;at(^#tD9v+KVP`u>v_=DwsT?^Ri#`Cl;7D>Sl@4ZaF> z-X94MKy3k2(I@`X!udMh7>!UZ;u?@_EmGWnfPf9%VD+1zZ@nh!-{=z*PO91Ec;Q#S zHxdN&F($Qz)<5CX8fI~Df4f@Hi%#Z2@a2H_2BUKB^Qs3tRNDuSd8k$Ysb5#)xxR9R z&8CVZWDnM8%I@`_XGwU?LiMy20w}8sX5ZEykY;ze5a1B1G30d5pcu`^RnhyQWzf9X zQ0MW2lM8QuRQX==A{8^Lf^DmNsN`LRLPvuhqg}~`{ioj_7UxYTAFBmWPDpUIo_hoKOYH9aqcMf{|#hAKT ze#5Zwfx`=AZD{n~&XnNz7Q2|M$;Q1;FGh-r_D%%Zz1TOObyIoi_gQrv=EnAWtA~OK z2aeP8i>wC~mcdxb6RtqNchC>$^&|-ldWH3f_i5Ksewh6$VxJ#gccGxCFs7f)Yj;q> z4yzh_n|i=XBQvTqjD!KAtJfm{n{=g!GSpCrY0s}MzGrGH9gf)4;Hl;&9;Hto!r$nI z4JVk{(28fbNa~+LC!{6WzA04PcHGrm1F3Dq*WLmAe;`ap;BY;l_=bs62297J(RbUe zTemWm!;gN<|M{E|$%lppjviTD?;Uwyw-W!P1yBm@ef)`H-}G2*-II8=deMxz++l@# zfKt4(3^FpDTM2m2$W_68E-9NTed1A0@%>n$UHOhQ4MvpF5xwLgg_#WUcIKSYh zo19@Tv=y0BcmOTq8gp?7TOkVFtVs)orvXVMdcxx_l;tl% z`xwDdovsn%$dkP-9W8?dtQI9E&cD$R1uj*p}?;?0zF zi%5fI1rCJ|!)h-q@@?HCtL+`*ZtYw!f=#rU1(@~ty9&PSU7AR$?&_Z?8NX11e)#QuCmQKUuc+S5d{}tUSZ6?I(^wqIq5>hvieOMr*q+g>~ zTX$XY4)?Ajycm&CnPFr`V*u<98=6kf0OzU--U$Y=2XVX=3Z?J<>QAiJ74<a)462o0>=#PS#KrGS+x``5ThS>igi&ayMuOJ8Wa9uH{VLrs~3<5@bYjwZQi zYt!}YAr@+hciwh8Tlc)Jik0p0&k0LM}Rk8f#VofQy zXT|a(JY^zg8#%vX^D*iloTDo!ho{+xjK4FeU1%0{WbOy;S)ov{OP~S6280_*!*W}n zDjtJ-I`uIs0g0*V%=VDmG5=kZWmOPz1Ie`6#ap^-1Ow?$_ww73zsJojVq5EO=rWCv zW}|!AM9{u&@2?&ff16bIpHqZ&HSANSn7N*q;6kPrwGX;tSLqQ8yEvx}H}kMFoofE4 z+clt*Yyuu75UFw%j)XVvrIoPOG>RYsV635)Wig7MBWrEZUHaaw1Wbk}TkMk5b;XIbrsEWw)ko%dzALIS=`lUt#h(FK{%il14b{5w6(yPA*d?s1^_A3G zFhfF}dd}x~7G2QiZs*-%Yzm_uBjx2?3J_fCYCH99^=3gVv^6Yd;``I0IAZ^2Gcja8 z?jTL69GEDulK$48IOUc6wf~22h^z2KFCpRw3erAKRS|qm7ySIBe1CM)CgrSRqe0fK zJZ*qU1*(3eX{va3TEID#qMLvrRsGxhvwA;$fs;(i0lT|dx6S4^rDm%eBftGzH&|4f z-3pf7q2pxG!)e)7RAIQd4p2%X3R?o;Z&SbSV8nfz#hAH@<1;?xH&}SZ$yy7#xQukE z<#tBMB(9DMxcmUeMAR{6m#H=mue?I}SHa+y&X@53Rj}_O&tconM*JKv0 ze#Q_1T+sqAxof+38kJ%2Oj4LVVA&H5@-}43vqT)ih8_?<$SC_0M-Qj3YT!5DiRFPT zphjBxMTearOWVGpCwiT&5NXhJ_dNr>;eI;&@pMdItHK7XyC=TIGkL-s_`kU#yTL~(j5B8UByb4q_0;#!d$ z>}P*rL&x2%LtV7$go;uIf@;l_e?OF&x$O%-lK$4vvlII}$yS?^?|dR>235dh>qQ<( zuhA_0j1mqoz6o>TR}VQ7bT>OPzQ-R%<^gU!_44ajoau>o$$hGJ_8 zSD+I{IW5qb$Tj^9Kcu@)R8iz~2NL1*u1)a%6%i9@dB5l4r(~(9&f&^#vpix;w!A$}ywvmmIJy1i3U5 z4bp3fxobW?nSfhPPl~2fYEGn1x;=A4_97RGri}#6OUzfi6tJNW5PARjreq!H9d7YN zEkU>Ni_)t;}%DpdJ2?kMZ_^JgUYOSF#qqs)bYZ-V%OpX5xXv#7m&f+(C!f z#JOKa^TK{nSRBW{F0!>MiMxX0=l z1P4eLD5ZbJKFXDB+T$0xU_uRGpn7H5u&1;oX@l)OKV!kIbz#K=OEwXI z8_Ysr_b`bv-%G1U$fVmO$i=+#T{sg*1V@cgj8LONr&d1eME5FcuW^m39nfH~^uZ98EZ;LJLu2!g0fZ(LNqiW?NBQIhA;uj*vfO76KX$0F00>dXr9Qz_fyw z3|C=att7WurPjkvHh#ZFWdO55Y^62PL)O`@<+=H89*nx!q|lKr9tV7JP;@hh_Ml|>wF++323IG#O&X6l#PJFs z(djsoelL?_9>QRd$>!vx-|miTFBwz3Fu6!)d9_tpH1{74ZQDX&c3WatOuykm49r=} zznB0^7%U4W$f7OF<{?yl+osHd{9l1JB@O>0*}(--EKfV*UKEVWTrLdL7#rK`e#!)Q5PDK~Fw6#DAT_18?$>L4}CMIiX6PXTek%5lo(I^kYu-; z5z|ry8zZb@5<4QIHR?FkOHiDnJBz)utZxmpI^2|A@(h>3{)@wXMq2=+e!f?w2>;V8 z>_2f~n>f{&5TzZR$*LtwxCVEU)X z`Gd&aYlk;>&+KUmB!5}1DphF?1kEUDILiMoKfic3^7C`q#wQYaQwy9JZwa7v+J%4dq!|ndg zpt5I8yYB_~ETrcH+AHhO8RVNQ=2i>gyRt}m7W2HV?F!MrF;qEbon%eLJq#L7(v?_b zI1KAXBbP#2Vrp`qsg&{91*~^F{IJod3cV)kzF@~P98eFSMZsOeT1|UtWRbuFKH%Ti zwbv=dp#}5F6BAvT=5WD#oZsJaWEE*{A^>DPoqn#8qsMpP{tKS`6&DtN()}rGhbA@s z4#gYX$Cmk;`7&I<{!7-wC*7A^bcV=6{Gw* z1Vd~KUa-iMH%l|88$1z}qn`I3O+Hx|FII%Tl>N3C$NAS>SZv3Rt$PW>To)83A!@jw7W%gD{&}ruI|hvu`V!=|p^gi2hPpX0mna z{4h&(QhJkN0-EGm7sUkFUKP6#tm8D1FOVy<`qWqFlfP!E)^3#)YZiGnTKefNT$nZI z?4JM8t;6w>VKtXADqS*NY-#N5C1EnD_N5s>Ro+O__6Rx6*#+Q6f3hSVe$?N1>1}bF zq32+G@$TQ4n4mV5dqk2}6c_^;F`z^ksXdME?j2sN$c)EF*E93`7$ax;3q6Iu6{;oM zjf6(C5BKgbQ5U3Gd-%UeSo@TbZJB!!1E`_jOBeO^fJmt9e9e9*^0)NOsTZP(bkMy${u_ibrWkX`&mo0}8k4}(%;-|f^tZUt@3mJ@ z;*|xN?2qnz3Y9nwV9$ay1!m!W9-*hHR{wUR@vSq#MeY= z`pU+w8@T7-LV^pV*ApkM2&j|+;qU@hTSLs;*Gy(@nWGa)BK^oNOxLZvE&*=y>FRwQ z1@EgLiA84B&om(sCuB^-M1i6Yh&SIil$}paGceVgD^TXz0J_Sz`G!C+dm`!ts`2&$ z+A`edy7Aakcl2A=fp9g2+$R#^7v$$kPv_u&_!lBpe(%x18kb4^kk+-z(2zU1J@iKI|T{i z zvxtv}Ycgr}Cd0rUb46E_Cjm5TO%CKM;r(&aU2mtLp%S#Zi+_g__+QQIx`*Cis& zETHKZwUNsY*dyGM{Tvurk<4j0UY_+bd~UvKL^(TkA)+R*;nTN5^^&xsAJjBxEq7Le z6$C=>zFu{DV$#X1Ob9McVhivkv>`8b@ns_toIZ27-9a!m)T1ZLyy&=XC3w=D&xZ&h zl_oF6k;WfI*niltW46(4J`h|XbWTV4|pDtXB;kAlOiWsn|))c&PxloXo zo_(4aeGq&jcxz1&U0*yV?_QhL;3gm_9CGrU-v3=4r}ZuS_t(Zk&);=NOrJL1!9Qg) z5s!}oTY94uW3$L>%X`=Ij9zinL=bQF;S2o@s*5V3`iek7qH#H@I^iO(0Pc}I5ni4; z>kGD8nKWCd5wKP?N&fzfFM>Hj>^tavt{0kUrO(bPG?2dpU4Gw6vW?}lMIl(Ki3S9 zfu%}gk2vYMZAC`K#tLOgzqLz6l|2<|+%OwAky&SYw%4WlRvzkZ^7OS2<7K%I z*MclkHV*_SxbZZpNw$^!dI;e=B#U2OWb-RA<_8 z(NlH*`=n{aKw=%Qz5OttqZjT%Kucs1)yO=SW{|Gkh1g!RT`TbFP_^7dwLG%y#VA@Q z`aKoAGr6={ zkTq#4K_i8CXUK*G#22STOYGVClZ@Hg0$P%Jejw5@SLPvif@>a;a}W)(quS z)`q;q#^Y(ZiZMCPZdSPV;Fz4VucKWA4&%rG44UiacNkuiHHgkxQYh(F$3LoCE2;0& zHU`#7rah;DWQ}opJ{@@^;-=TK_ffSBVZ#~>e@AiwI4g6`_$*eCpCMsxG=$^EoA%c1 z@Pz(F&G^F^?nvP(!~hl0!W2?!QTDPv+^^c6NvemPzq&Mk01?yWx_olh?PuQ`ftjBx zd%2*xPQVoP63ZWN9Unp2-t$TB9g1YMdePgs$V0QhREqHUGD;-!7ywXq4}}#&d6c2k z&K07V3qMD4R27S{QN)jaouQA&W!T&P>t}u|k#6r{#GjhH-J&=H++@SdcO}mkb&@=D z#*Go0Ox0g*~Zyr;sLYxqvcxlAH|Ge!|&=A^H;NZ3GXRsy!GmZ_V zr&VDs#($OWOltv%yYA#sW6eAa;*J~qhA(p|8@eQr1+Hyb+~}yT+8W}uhQ_)yuLh=s zj&km^#x(k|Xs$iXeoJU1S8_Q@8MmgS%cRr(tWA4fWXRqzB!j+9lGw|G!8tN!b!hId zst^)aH*mBf?tK=_x}TECf1Vjl(RD%9k!$23XRw#Gv2Pb683FdhR?N{OFd@7P*xrqDhvwK}#%>c3_hu3l24=POrsp|Tg&WDSHy(c(J-J-6n^`f)nXRrW z7OoBExhTs$cd`>=1L^>9#jKq(`r~1)0v}FcN(hZYKwlN(EUJGVxRL0IgQCg9n4KVO zfD~dM+ys*))?6>2ZJAT~_0c4XZmTr`aI8WLN#ty{lyrJjCDwa=nM?ZYSNQALnkk9=3bwPDp`f`hAs z{57Yz@QqzBan4?gWf=-%ag%opV?QBFO9;tLfj+qpC+U;H?GIc!(2orT5xc$jL)KJS z%*~y^f!zz3t#aUC3dDQ#%&$Ps;uXFplv?ZkK(k-0M?I$5M*#yQ66kiZGUJuS${FDA zc|A&?SOzz#fL2MCAnSLlPNHjHy_7oCg~balwWS*d@QqTcqwD@5e_(}os_oX*a|H!e zPvwFRR;`Q!h0zA@+WtRL=hr?R^YpYwUAe6suDWWqJV2>9`F!nSq#s9E=2&RaNlMCb z-SBdj5A+!U7lW!3!#4fHl!C9VGcw@GcmH98b;dh)CSPUuAzb_y&_$mIp3WLT08rHn zhs|e^8OD+Rwt{p*{F=vxq`Fb|QLE)oXXxl1s81cu0N~m>wRw;p6sUFay?u9@-`o2iJz(1Sx|7Gycht22V0&RxH0O~JC%uhn`EEv~hm1*WGI zDj{T3xmua*m#LJA&*#dq4trdFxC>mb&Cx!L)mQ}`ly3!@Q&TI6j&Eyx)+8yvJyJrc6i|u7MeQb?d5b)j~154{{ZA0mu&ZOrBA1Wwg zZK;GmXN$Q#Sda$_V`nG3`6GiMxiA++APb?Ld9qP$j`G=xs|Lc7-5Lx?tm-MjLHp&& zJtmGf%D(_Q_OUw#PJLA(r2Vr^%B3dtO^^p z>*$fn8)k0oh^CgOi`t^Q@sH9oGd>+O_##Zli#~C`MSlDXPgL|69v#RM;#uj!2gFft zx3nRUKoNS1HQ6SLVyclm;P{Y~IhV8E%95CeEX;73eOM;uP3aD9gZwCP*es(sPQ(o; za4x;qT9qyVkkzz4+j9GRtX|@BwYCPBqL)|GlS z|F}w^y@HNFqjGWD;iUGbz{LVFz%3n1AiMo2d>ExwW8~pZkV)(%gMkcJ@QVi7-+kSz zGt)Wc32f#8s)8*mtzY8M$9_O-Ic-&)5I94eUi;g?EE zbX_0xKg(FJ2yX-fQmm`W|BqP}8aJ@Ryip0$`)>2`~MWF;uBw$P)10k7y_M zPZ{s*yK;HL%W&9~toA?4m^G!r%yQWL!s{5(I4psek$U3 zJVgwAoE)V(S<;kDpF5kWxrW9m+0}s(=7f8_~-3+ zC%X=tq9HF;_}*HV!Z|fqMIvn|mZ+uF(O4f@ZSYEw^-ReX)DDI|aw#HidJ;1cWYz+K zJJQpB)Xq;r0nc4aM`h%}-fxmugJmE-<2xfgddy!FiSGZ_>zn2o^N>HXLMl^WNfN6 zUiGy&jE3t}Qf)D*XBlcc{0_Y+SLK$FGqQh*!x~>L)@^y!a1C&9Q$7)AwT_S?Ez=M0 z^p<$CbW$OVpY&>I>8CQ0OK#+DQ)w|BA}TkLbd4lz>2F3{FxbrdCk$`IeeeZT_r1(s z?w-5JUle-L#EG+>gK{-#!hN^ZmJX*F3c<|>l3mE<+(3tvNHmv6&J|`JxFI`NPA^~! z12pN1eRVo>j{Pq_ZlcGn%S88f6%Dy}qQ!6JaY2NAWz9~ln&Q$3<|gOCv6 z>t(w)lV}W#lOY{MJ*h?J?V@3tY?}L@XWX1fR)P=)iHznL0&v(xCi3# zAYqhWnE$T!13BS2bJNE+=EjJbs!ML(CupoA8Xe-Rf_)8II{(((?~t2?iYwx@Ri=l%NMRr4;Ctu48y8P$Pc7uwf;1` zEx(nl(_cKM&y1L53=;MVSj7O(T(nu zCkqiKj<=ZzmSbT$4W^nQ%X!41{@YF~I^ko`&T(~+NRD3tf6qH2`aR6u@p0C7iLeAw zzKE-yH(b18CedQ>Z`L4S66@tv?TS-H;)@NzsrK2x(4&6oC>kAn^UD z^^OFzj7w9?v}MtTdcgJMT~MPZXZ#l0r0b9{i-EyjjNvbYu2%QI!e}H9X8@7 zAmCB0m{Idz&@HxC13m#{N*c}}(RDN!joOQ^GxE~WCtFbamv;H1rvO#Z^NDN^)n@516J~D%r8j<_V@Fx+$7JN0G3&_W~G(Vv4~%hKYhouX{anGGBNhcV#lU39Z0t^S>({H+3(YM-kn_0vyM zh~A>!ed0=Hmw!N|Cy-?CS+gu<<|In9)6~+UiVP(*)N2FEcQJiiRwRS~|LC0f;5=pi`+uv_6(P6h^*uk`(JK9( zGPVs$$mnQELR)JA!5HANdkY%K*P}(2P{5nT-<9N1-G{9L7}L!gvx`vssdILSp_qJcIbKKi> zdj3L@;INF8@bPRYy{ymlJ0My8>}U6(zfzN0QM>9nGowCl(9b(4e-oMFz$wtA+}SyU zS9VD;MX5r#i6@#;GfTo_`Q83j4FFs4#>z)_SB7aVb?8>!M!Rz6O@2k@!5xpkJB3> z?!Md;aZpZGytBHeQxYdZtXpRlXsVV;OU##jb6gx(5ug5WjmTV$NE}^f zOawavKAMlL+Y4m|eK{YpsbBvkhSyS2C(K{)aJf>q{rwRxM)Zm-`8Sz)RWqaH%KmE+ zHGs#GYc)HWS>nl(rgA=UUfhe?U8FxXx-K>zQxg0N32##GS7R3%0!Iq-q};W_p}i1c zV1-I4G9hB}+;4Y{bFpYLl7y(CW3gKy*fT;0bcq&9OS4FCcZ#6Xcfbv>_N^hEga6FV zYzi7yZi-2qedlJRN7wCZK9#XyS{0iNd}02ky9+q=5!F%8m}dM5KYlCLh^AeF0qcKx7J zr*jz7EiK<4`sQq*BsuU4@(Vy~3c=M#hZbd5-q0_%+%0o&^A#viJkvUU-H6Ur&06@l zfT=tCNzS~j>uf{%pKRWalY;&ow-WwIzXY>%2vBU=b?OPD1Fjp@LYVFr?<=(t|39Ac z!Kt80Qwg1Fudqi026~&!Hjt>)7A;iJVrS!hJyI%W3$xj>ikYg^tj(XjQ`XXH&CUuRg@5 z01509entR%rMREyy70ht0}-CbJV&aj*S>B`(5GVkU<;ErL@RYsAq`1`MODvEY>|+I zhnK?bd0WK+0rctdiuuV(NqsXF795q!B$(bl_T|Ceh0X3FMb?=D9^S}XiTNZGc-R`# zMnPz{JuLiqs)}mH1IkBD-NxF4xhFnF>f$RXcaDe1mR45@X|Z7aL~=NTV@L6VpsKNW zcSVai5pVBcd-Snr3l)x|%LRJgQTui4+=7!g!aXZWx^9N-1!BNEv{PnJP(Bo)bYml_ za=i14i~cJHsdJ{^J3-d`xo{VJy>wp|m^dvQ@x05biZ)T=o8(=3ZA75wD)$YJmc4J=1>O$AY+@ zKu6hBljqu<3XzM*iC0m-#Nbm#YKg%q9Z?6xPl7^?fgCe&Rf+$jCsX7MttPw1d?p{M z?rQZ0jFsmYu#cou_FZ=Y^4a)Bau2s8VQ}cUE01j3@7RrS715j9YF4Ccw3CJ;V8teu z=Y*p5rMQy8At?Mf=O7l)Khd59Sx}wqFnW2yK6N0NQt@1r!sj3aB+4acg`O3wk!ryx zbETrckfA<`{Mtiop~AQc99^fj$$mn5ytQ137qgw@uOAanT$0^015!a}UhC3b#J@uL z9>hRhzzJ&=#tLl_cW0rg6XAzq2eoJ~MqA!DC+aXNnp8FgD!d@OGPd9zKJAbHB*P%Q zL=;!6W*YJHhE6oP52|@zC-en54K7AQb$62n;`VB%T!CI>MFoU6$=dLC@;kOQ`U}qf zQC)-RFDpxNB3uCJ`#E0dbwsLATZ85JP-xxzTtth~?_|V5_b)YGgEwiHkru62|4|K} zD@bWoH)c@{*!cNljvq78ad$L2O=~@!d}Ip&fY_d2U=Uf9Htk^;?N+3gdNZW26=re_ zQK7xG*S|@f;p6h+qJR|2w(e`vI~Y<(Na8QnIIoYCP}6j{aFe2_oY^tv$i-O^!ui^8 z>3uTwsX=$W1jM+S0q|T>RgDW$3U1)9{U&`S?Ni|Hf61bq^k9{K9@1pXB9|XV=tjT4 zNd5y$up?blX#n)@R{edpScv^P&+K6Bg6(UML!0>Al@mz|ahU(Z(wB!NnZ18o%}h;A zsZ(jWQBv7AGczMsu*GGjsm#fJqq5Yba+F+Ht!Z(WaU9Ftl1fF93`tRzN)va+6v!2G zLq$bJKtS|;n&0~`a9t1QJm;MIKKK1u@Y_=x#1`Ljt;HDU#1k1a5Bfo?B;=+0+D+iZ z;G%)l?ncJr9izq9df}irQgx(Kck{Oyl0+WpVfN?8v(WxK5WD9L1AVerYwe%P?>s-a z5$z?v{SaC`g?gv}Zsxe>M1hwI|D>;9D>LzWA+LWyN-K42jb`r7#vPKTZbEoL9X9~* znAw)Tj-3t;1tTu6&xq+@_Y3CVt=;8yPxX70h0$^sAtn9VK$K*apxi~@AYG?5Njcqv zHl~q(TAJ z5w<2P^TtX(YRZ&bm_Uni^!cdY;Wm|bC)Kb-;b?{`D(>wgm*4Chnx z6o7-Bde?nDD)$K;(D3<#`SvaXKfaTn)|`Sn9mn6}K{l*TtUTo(vl|sB7=~;9Ax+6u&p4 zBh4=~YPF#Drx9ZxCjR|h_JDGr8;nSo3#U8ya*a#V=rIfz{H$x9+;zvZaS1n6c zGgb|hN5ADFfIvTXA5{mFW4G)&7WLe9nf2UzN3V79uYu>5zu&&0*rLSpO5A?T8sGZd zPLoJ`KnVMI{*bw`K)K1}YY8@lG54Re;{fCj&1y##vkz`Dq19km~@t4Y%ssYp=8} zOS`2{l2u=Rg^WOqaq}r4y={uZak>J&)>3YoX$k1J1cFajMvwGP!!+3y^PX{lmFk6t!UYCV~cT{+-m6)5uNix zcYRVy92l6`-#in|P?@q9M-`f!rC6h8^tA_o$@Q z)|7=imb@C;86{v%(DtIba$-5gcf@B0v0 z|96F-@p2dZYVCU5BghGy=}_FV9t-}7)1W(X@T2IplVN)Jz3j*m_fJ06+HkPNs_l1z zhuC~@k3~wi$-QV$K5kyu=_nczhA4S`v_#>Y{);e}9q}Os9GtxTB(!cAoM$f2k`-jy zU#;@Jk`#HcO>v$eU18q1H$QX@=k`8u+V5e^yQ`Y`Ko>a7Ts9e6{QV2uxWNemIx5Vm z#E~(B(m?$oJkoX?=-XZXQC$xITE|(quUr@RYx1&8;W?)oXea5RJLQE(=k?@7e@Oe?j zp!C26FuzB>4digLMqN0Y$o&m8`9PwX{f$t8-3jz*Q<(cG0rXDb);KY+CpXwbQoU#H zOt(J~v1)3~9a;hdjm5~hVJwJAyR@;$la=LS@1Hi+YWH07FyR)i^a-Dd=hF72Q}&C) zpb7qg)vtBTmsOjiku@A=6fA$Nu6Ab3l0jHL4$>Go-{jgA9xm1mwxP`Z!d$>-Ocl~Q ze%lshF+6a7urI4j53RIMrKF#~xKb^*`6wv=c0+wsr1*Be%zVPotOv1 zenEaCIOWes7ZW$}{_!(i;p^8jJl%37!f0Hm3l6bBqW`T9m}-LN8KLFf^iIqX_T* zEQt2@gkskTGVb}MlXxe>#yN2t+`+}z5sil=A-U{_s*WoO#Z&5+96@nmv1A_5?gX-D z!H9OsnwFmNbBg2XBG=#Gl2$-Uu8OcVc8`8B2kS_ieCzmbb+w6LTjwrmpEKUm%~9VE zN9mGN%fhuUBr({|I=76W4u}EiLs^Zjiy-n5vkI-t%U1Ei z?(4sUzju1XD9fDFW(GgiCEv7am>gO(=$!vPlki!+TP(|M4sIlAzk5ya)Ba@caR~{O zZ42JQN9@AXe=0&=)!twMt=i_(Ch>CtH}L!Kb)g^2TB8jFakIwV24fkvKbqnkQKpCH>EKlZ06Y^=$CaNwx-(g1&!_fp5_gC-AY)uuM-F37gm zz2l3BPh!6RQz&`N04A|^dQyDL)ve0y(F$yY%+q(I<~2Bw2iY50a0#qJ_Kjt`Wxw8s z{YUMST;TWqxH!ANM*u1dW4&2X>MdPdHSjP2|1l)E)Z2h|rU6<4%;ZIh587+8T-9Tc z311-k8|GRjg>MIPQ$MEAxU6@JW6N-R3j<*0tFIE=pbNomPKy;cptoKN)yhD)+3Kdw zpBheHXX=igp$Oa{mk)j}6oic+Bhy+iS)Y!s%|5aHu&ub1$b>Fk3rzq(7(V=6fW_Fd z+(?owvsfjs$x;NnJX&uSJ>0|NQKEj=c-Ba9EE0mBkCp=Nf>hhqRFhmEuk;e-mZ<_= zlQ*H{q}7TvT|hn{FfSE;Xu$mRKa%Xg;#~0L2UvTa&qi1nh8dnU_dN_?bKcAN zS$Nt~M41AbXu%dM)!%k005T5OtnA~rvIYY>@YR4n+^FDRf^9MJvs#1kI^q6m-G~XW z8rr9L2K?%zVfHBJRixSHkK*)#cVe*WMsPt{ly>;%p6`p&A~?fnxZ8UiZ=i|t&`}Qf zK=}LMkG-+aCsP_4>nY2Bw={aJ&u>*=9!-qz81fgpc{V3)&UDfo(&yWJW74^jrs^Din<}X3#_;Dpj*5ofTl`pN$s|8a0n(|5bo9fa%K9M z^I(<^(Cx$*S)Euv=_}<)qOkMXa`!q(m{o;e4XkPr?RaVVt9`X^xxO5D%tzd{$y{q~ z?|Z5IbAx@UUeC90Eb5wiWC!+F+v{2d!hx3G;pYugjkrOCNN7mTZULiC{D7zAp*vvw(&c9zG(tt0aR<6AjFB?{FqM7&uwPcv5w&WEJKEl*{tlI=?3M zKZ{K{%$Zwl73emo@e&h2wt+HKeCFHYID7kmYH@?$#Oz3)0T}RZubSZ?+O{oL_&OzD zpGZAGIk@(->`o2oIaw61CN1Xn&pDbTyFYmHvxlcRX~?jPl32sB9dSdK@h?|=}s}>J%s(_@kvVN zEfE~3(iU`i#xOQFMB9;yy;~?zz9E!r{EDN;pNd^g0^Pm!>y9;9{w>=p%MxycZYlc8 z25WvP;%3f$Z*CB65dqgN-q&_#eaN(3Q>aX_PnpmBR3(A^w_!}RHbaf-R`vDNOLiL* zzMi?U*^c#W%DN=-d)Pme%7aa%t*n+Aw*ql8Itw@-OJZD#&kskhyQQr>f)F7({lBe! zuE_L@54yF!Z$P_JFZ5m(|LpQKFZcmR9|I@+_HzD){|2-Bn14z|jk2%A99CvFBavuw_ zL6EwHG&w`hcble#!rfoYA7O;8FVy=WVX-R9>(8re^yBu67iZ>*H{iZ2I-RhpXk&e3 zUVz^Ii68038AhE>sz{Eu@81?H!aK`mE12j1J1)*1Nqn1`(9*`2ZvNH8`t91ZGXQT&G>mb{R`df52*7CgIuuqA{gz8 zC^-8xze1H>wP7{zYVn;g3;d?cU@!6!<|ATlY|Wvn7dAk*96)awN+P%MAxr<0x-KWz zP;(C2TPDEgS7zyo9gM~_)m<}xU{JmO*>kL(0-OxxoJCQ?Izw6r$1^qCEHz? zJiM6^UP!K*vY3kT5suB@fSe>wWI!I}9BiB5o4m1BSo=eDUszNAi_MRhcZ9%>zeDR7 z1^J}uPCF36pp}>vy|}9){XSzvfiqw2qjIFs-|~NqCm(?p%Dgh*zj=er>m`BWyQjDD zIh=5(GZ)J@CpY{pGXtuh=H&iHblE{?z=&(-v}477_5R>KW^qkYM2G@<>7cZ_e{AGI z@n*`UKZ}JWB#^I6YX#NZvI@WEUD06{$Q`DJ|K|`tKh=8oxyNs2!`gtloXtYOa$omA z4)8)gPWkQ1a}7JR@;|01S@o-oab?bLUp?V5T42wje6yf0v2jm|e9rp~TSf`wnlVN& z-U{=@XU3Q_HBWRTkEE2(k+I`=ZPsSkGx@=9heT59hX?JTk0cN17H@$*(pi;ZQsRcM z)Ej;FWzpr7|2g2n`Hq97-u!xp0_yJg47?+&Xb1wtkY)PLB(&XGrgM^+2YhtT5a?|` z;;~9#+aUjkr;K}@*X|J02=ze1noLeO1!y))TxhYTnLCo6;ycndKPkqXr5Ny1IOmAr zN6{%D5jeo`MsXsM_N43zuVx}uPB8F@44AfJ6h)M1i=tj9R3s>U7bQ1LEi8^`9*LH( zI!z4N4<;PR3sonfpRL5&##w~Xcj(Mm3NmGX9gU6G9l8QVq<3&gRe^CT6Vu^z?$=Bh zHBbHjgJu%`bThL(>)a+kP~3YS2>x7){x?zX=#%82J7IhMUBa|mfxS61 z`5GupV7Fm-!2AuXO{#c9b&V%S5FC0u?TWY63c2B+q%%+~#-D9NW_ll?Zv zN7QY)0k11OJ<0Ab;+Cm&z~TscKz#9C6`>`psTGPyJe0ec30d`BL2d;5pV{g38n6M6 z&&8R1$KQ!r7+$_7D|%CEVF~1C(2mI(Y)&KM$^8tx`y_xN9^%}R#iu^4a%7KgXnrs3 zL+^C21M1)gC~f$o6;~|x&EN#SQFi}wai4py!*grLw`*OJfgHaQNMC}L+jy8eoT1Ol z*C~Fl>N;#qxBM@~V(m(Jj=(89c05R$Idk0qoD*yota{tmW3%ME858C9)`Z^{$0!9w z@pGpy>dt=c&^9Q2@3h57WiJlM1*k^Dn8|6AatjDbUXz*5vx!=_^c`XQKPa zk0(DQ=vrfxN$+O22dA=gnk&u3X4@&BW*$Nm53F{-tuJ?`R9)I9z2=h$V3E7jIt{I)${OT01skkF#` zN!JGoG$J&3zbUzMn~8aI^1UWV^)v5uXOHqm?;+Yt@@{IW<*@2#|0u9DmV@9%$KRT?f9+Z9{ z?8YR`T#I!FH11px3DdkyTqvEO1QgN#6`{?20*dx6u!^~mm-g{8aL)}_o1!bzUO=BH z@Z3|0b)mpX zqQaWKCQO*!yJ4|tB2K(~DK%>W_wuuotExBo&8ejG?#cJ=xNs{f*|M{m$iiHM2gM}f zsobh}0x;HTtZd4VmyT8bHtu5XsyaV@Mh`3=&b(j_-^^s1K7@K3W1j%{DG4)d31R8M zZ9n&KBj_YV}Pj`H9=7Ve>|BQjUVU8DMoF@LU|*d#F6D5!H< zv|ZGvxN`|*>o~2ioMXGW4tje z;gOEuJv>6!{^hpCeKXfox@KlGW*%1qDO^v%=wD9SrW(^bB}QD+r91c5T1QrQDn~=c z(|QBUw-$adxq%um&2NP4Z!%uggdPwPVKJ)Ut7-CGPCpp9j=fEoEf3F-`AglfP8$Y# z06i`%qVA;6@?;c1_Y9iy+TX5BdA4Uvbd}t5?cz~vp|3NYkW&s0;Lcl9n}=E%XFSCR z|HM67yY3V$I?R62pm?EgUX*$&aA)@AA;;5qYnK~JYLRmVjD#EN?=U9@oX~5oQ2=-Y zpp_lhgh6+W4lF_k75Ul2MKX(7&=x#^X4hv6dcwMd;3kCnI2A~s|RPm%=c^mUcYxyM&9h# z6{;FRyNeHtsOtBE+nqr7uds+^>kmXNT|Mc8 zVmwxHAvfr(zu63@ay|JKaz$m4?RdArYh;DH7vue&UV>`qVWm^lYzLfWwrX2gr0uZG zfgXYUc^Ov&&0Rc~m(p;Zc0J~+x+7xP2gAQ`vFN@egvoqL3ze8!zEnfr@QL;qp{{*c za@+^HPcjepF>5G61pP1GHXR^0j?O-?@V}DCmJk&WAL^Jj;x0IiGJe+VOMWk#g{BT` zM|5KYA8y_fPsGc%-UGV4vJbXJ`N~GfgQWS!wL)J9j~zayJw?fUpNr#v%%5fJGD`;C zZXj0j{A>tJYAsO-iI*1I3X1El^(D;6cNpFiD>Y*oEz!KB=pqgBillmFMrX|1Vr=2>1Y|BxxW^nmL@brxAo z2PM)B_qXGw6Sn)g=Xd7|KATXrt5Qkt03rIDg@6alj@v_9o7vWQ%l`a39Y^~m zYG*no`;y;|B(q%5mWe8zDA3{q7W<*O{J`RU=xy1%O0%{^D4>4pwYs%0_iuG;0~+;1wVhL)(BB}` z%~hM{HUOIMgtFS?%^si>^kqK975mWz7nbL7o{NwWK>BEVSy!P~e6?kUs>Ex6JJ$%qLCXGKnt?g}hZV%PJ+D;w8fO@t4*Id8!e>y$2K*pWt>u zKm5<*=_J;T6&gC~{CujgQIQ@|*?Z}p6OZ%oy!+mR0Z7_<;pvw_Kw_}=m3d@Os(MAo zZt#ltRc$XbNbI!UoOV*CN;l?9?OvwOWXhhZO|lmTEzIDxGwt0*Lthi&=O9auXyg?U z2Ydv-U<6tk+YRXB8toq%-o)AzKlF8TjY4pAJXr^Y^OSF0w5t=y7BE?gK-_{QIL|pu zYd`)$nZjquCNItfgc9KXZ?iisJZskLzD7PlSVYQh0{3t{Xzj)Ad+-+dGm}_hw|Zzd zUstsP$_k4a1-&GeRYFyb@}Lpbp8XZ+q-XzP`3i@hWk=M867t{5i4YF%QQg)qFQL;y z+&fU^IF0ffk7Ld(I&tpZ>zc?h>8`H`@TQP>le7MCPMZ5S?V#>NOvibao9v;l%&X~5 z92iaYnL&zw>9NC_gwe$`vOKF0Kjok@U)7Y9q$~Q|xX;2(CR+&Xl>(a}>!DXd%yD;Y zQTxvQ(Vif-TaBV-q_U*(UJ-c$> z$Ii$#@w~Haq*>AQ%pCZToh}iBolXKqSuRdn(=z9}^~^EHBtjTTtb&9}#{~;criAc` zWGqj4E5q9a9IM5Ct8TGb?EHh>)*{c#W@XSzXSWe+-(*?G^0TLs{q_TG|BY3$DX+xsn(%(}yF_Q4dE=yi&HMUO846NQ z>(Ve_up<3E3BIpQ*}W=*ejhafdqATE!v7n1{7U;o%LYsPK<`N@RQ4En+w%`3b6}@? zC%T6ZG`GmZn^J7vs^RPS8O+aC&TQT_8cV#)wruQ|$9Rh7#f^IE*))bVqgsFMvr(N^knWf`&e);>DgSno>EhtF{$#DsJBqGNV{->UVp53lXvvEU?dC<_}AF}${CuveWVGF z4f2fmYK%#b|9hJGxwZsCs|EVc*7?HEXk*GXa|yFKDZ8i!QP{H}+<7!~q3B$~BRDmC zGk~}XlWklB#G1e=ntbE5-KE!11tG#+p{cZY>)qRJv&4^hq;YAX*!vCDv`prhjcPj3 ze<~*O>l~=EsI9A-JI>F-wYxTk1R&gmVB#XyeQ{sA^r@S@WwOIx6Dz-1N;R7k{=*z5 zfetwA8C6Z~w^aY>m7EEXh0Z2P9xMLjfqq)yxX>LHkO7g2ffc&zU=|Z=IdK zPwR6{7BXqyo`a%YHLyE*flc@&JE6){cP@O+%wGrHWEr zC2i(GCnsUiUJCC$ekXwC40Cj~^L;+uL|5)}YhbX{dS^`J!M8MPLPsP}gi4}? z+&O`Z3H~{5pV%!s_=i=a$_e#1k7pq4ME@~Wfj#pnXJ&)$4S31hf4}Y1I{j|gn~juG zknv=zd@2(Mdnm||#{r{_|KaL(Z<+#7Q!|*=9&K{mEz*_!0oMlKz@$e^8=PSJ$n`sT zDDx+>U|uWbvG3yCuD1n|sOpde5u|BP=4PVo;o>PDOdHHq}NyW|F^RMgSo22Gt4{&^BffGha7 zA)ol;OX`k4z8MAycO=POe1YB^^y?b?vlQjtt-T!qyxkdA0XbhYl8#_+DeCDuu`V1o z-bW{kw{9$a+*u#0kuFA7f86)n`S!; zBl9T75xtE8l?Lfy7R=_u!bMv}8=YleDq$Uq|FeLaM5W_tW)p}_%-in8je6!d66GN+ znhjE5th0&|Ny~e-mP1^N@?>{^u?mjCg3x@B9o!m8 zZ>ug?+U}|esX3N`?mavT3FK_lVkBd$`S7w&pN5Si@vi#m){!i&cz8D_L})uBQa=1A zDF}7cM!yQfGYR4`Q`{LZ(8~?vRhZX2WgH5pMiCsU9)ImglRrr+gh33r{^)cM7G>6| zcfKR4DqlQ^Y!JYRke{PG8&}cjFD#Gkq0xCwb9+Hdn#08;MpG)t+JMD zYxoS-(=fz%*?ezwg3xPjr$xzKuRAMTk9ktgkhwR9r;l>0klLE6I&-`0+~J$4s8&wr z_?;RzLRWgQk1=ShgE5@7tjz5LM{86$eohPXq3X_V|D5Fb!anf{Ev?{v@Rju8qu%;> zVu+ioKA~bvYr!NM4w6Yp>yqKq?KX}|pK(k~BspSNwf&`2^&fgid+u&cU-JBPdwvPk zfB+9dx@=PGn;5C(Beu~=lCnG`n(gNemTU|h)3epedY$|Y5lH`j+tCQ8e<$S?QxTS+K1*s~aJW>sgmHj~G zTaaXW1gJ~Cged01o>2v5UTb63#$?x33+{k5{ScLQ;}ScvX3$%_?|PiYf;GwCq?JRG zuvG=F%ZQLeoG@1encxsC3h<;)1NP725!_M;Q>jT#?wxyqJ6QQ?#Pd zqn5Zx|4y&ow;=1Ypnic!dX(T5G-d_)IwV8hdnNKjI+WD2>P>T174!SuyHE>wbgtc)T{-0+_ z)KzX-a}vjU+-imEhx2cWvPLdv_vt8vr1$&8mJ+}Wn)^>)>e}G@H@r(iVYX<5gFqq>>f7~^iQa{-7Fz}3gN>$VDeD- zR5UrmsGN+=$cE`*+WT@GaJpLeT{JFzv)k`u_t%;qTTw;%*Xev0pFmIK3?J`dyD<|c zR~hR~LZywRn^glRW9B4gt+ss0qqEN(bT%Uz=D?LY=0GI_6O4|g4o6CJHj*_|=H6ME zd)|=SuM8;-bLr?EnO8y2RB^fknP4{RC&U<>NArf4_No2BLV$8{u zQgSA~N@V0o&!2X35WT6Q40SU)W3c^33E2e#BC3c`XeUZG1vRLz6$HqrN%7d5gN|Ju zg{qLb3685Qx$a87TSZJl3Twxr$U2kD@-`RAGx)lURD1?^nZSU>@;_xu*}O8zatj@@-y27%9qsHch`DG9NUyLF-K!drLm>5RTfz z-YahWe*b$+y9VbHPqHy_HvL)V)a~q{lkn6rA_8nJGs zbj0I3JP7Y5O-H$L=^9USq+@g;p;I>oRYFt>IhTaFc8O5>-RwX@uAPafY@oV8py%c) zYLO+VRx9PBK%B~HjibRmMV*yr)=u?DRz5855#jjZ>E(vP1Bx|kR8Zx7&_%;A#cCvE z#=#WttfhM5t)YW1F>M@etoR`ZJ)W^Ng-b_JcnvPO`g1{c6l< z0*uuZ`GsUcFl$kG-PP-1uHDAs+r6blUofL_EJUY*?r4DgXFVy`AgxP8Ak}iaBr}6a zUl6XOJR@VSEjNzrHt*HBm-(7L__egA zibF+2$;ArwBNMp+k+n1Gm14ZQ=ty+@Q%=r61AQKeJ?ZKw01YIH&GEmpwQIfGqmzltY{HLM5ZCwoRE&{~vq;>s=Mo)PeYDW+*E+lRbxR_7Px*1!0~{ zGhmU>@#gA*TEhv5NikU(o|d!+yadussLJpn*hMS$MzfQ1rmq+o%5p2zx|B?XlI1fu z--^1h<~0%fYTZ*~e*L5?JS$tEJn6M=mtRW^Y)0|oaRuqPSUe~!WRY@&4e;VD1l7SR z2sKL*(Mfk530THWhvUX$p2soy;S}~~jLP4w3E_CLSkNsIif%#NsN(-EJEit71S}0U zVAj0Ggc<`NI^AE}-qGh01dCmsJUl!yxve>VcJ_L;%13hLwj@WS zVI=gga@mUAnj))#+OzXei_jVsnK5jWicMSUb2GBG=7cqq&O-T~sHcqzkGY-hec)gV zwjli48oRXHzlA{QrAQKHg4>lt(@tJ{{&da>dNG zL8|6Zk%W+5d@xvYE9R?WTxqhHqV!>QRRnL-y01?&&e$2xz2sEb=$>@a&%+L zx{N<%&BT^QY!58l8R$c}l+ugT3z@)vOG(meS4)1iaAmOHd3{?mV z*i@yxzd4WqPB|jR>-*5u(zeLdoB;>Y@J)+_?Ojz5mj;XHyzUtLn+ZIbVK$UF;Gtd7 zxrz)P&W@1qRJbN9^4^Ar7fC2Zi99h=1=*k#|6rzM{_xWG@KvULRf_eoLmP&BrVgEM zD!dL_;WI}v3=?s~VX!?8q=0Q0iEzH|D=*AJ}IVyOH%v7g*)P9X9V5b87I)L z#VRt9#>zcVn-gt{K4nxeBRF-%VLbwsvR*YF{(5+}VhID`XjG|5?8ee|X+0d=-Q&tN zZVidPSamKn8(L~Gmh4LJz%+LSXL4WzU{%Ohvk9e1^=J23FN*pKWxLk{HRXwOJe zsPHe^A7k4`kn|tlQ2H%>q)$1vS>oLZpt*sFmD&m<)5)wkMO>`MxKt!`leIzK{V+mc zJT=YmpLlX>yL+PhhjicAAFc!D~FFyjFD1Zv|v6w!I)Ks z7bdi+tuqUsMlBH8%Q(VCs^b}mAZ+QRJ%wO9*=i$&ALPz?kfYN>Ct2vlT`1$us>~Ti zQlz)0E6f!YQC)$yT%U`!gwOTR^}?i%qA#okJPGk8J`L&x3usyek#q5P3d!b^&a8pJ zyUI>FWIf^1u(tOS)ZZrzaj=Lh@`ooFPQ3eMOtP~Kvt@y-OVZmJ z-36VxoX*}d1NWeLWEfs*@Vgo7)0BVZr?x3Y?%Nbx95lWfgbr(~hkzMQLH*%^Z3^zx}aUs~VA- zoo{BLa0Ate1mzJ(4tMFh3LAzi3Yrq1ZkF(1^pf4WS1|Ho*d(@Lp3MIln?*|Otjs)A zy#5H%v(D1BR+5$muOAdLBRYnvVJ2BGr{kAa2kjoGF7-%`0+~ zUCdUme#wYfYgaOe5UZ42 z{3((R$6sQL^ubG29$=PB%Jbb~c zFB>MWvSVXK3+8-~mKOUZyDrtng*o$4GF4*9F5fsJ^}>AkHUoQywgl5re=8;`L5i|N zUfaQgEy{u0MXt2M^!P$*fAldCjdG=GVm461y#i)=@ft$~zfjV5iYI@J9u7^A91-d| z*hS78iLV4I3**q3VYhg9`XycW>0f=)(LEk5bK=>Ko6_}|CCyxmcs@9QS5An>-%CTA z7*5RnC~#rV-42$tjrngvk{B+aRGLmE#vf!YenqP7Pjk7X4lJa9%X1afl|!hDw47=9 z#b7X_xy;+T`gd-m_uP-Tz}B9=FBaa4eRUDN4&Ia3i-bOti{a#{D_UBhL&9C@Y&TDR zY?wXuGy|x=;2L;eav0{_V)^ltCKk6-%sW{JQ}W%rCpqPh(dd*D-~>O<99_D5!Z8vge^9 zl5Ce5rih@!u^559sUXP zQ6d&}_-$Xx%dr!cM_Z;irt^P?y?bnVe$ZTEL1mKt3fHYUj(){l2joeAce_!DkLZ1m z3?i1ch`s29x)X^KBk{ll>jc%whZ*ke6)E^G!IK^|?c%^Z3P0!UYt5+0$+rp{2qdh# zcE~dU`>!Yx3Qi{2KE;uL2*bUeN!0bo(nIsuD!bvy8DO6;J>#Sa-kyrzOjP1eI;mrj zv9l!nZRj?wk#iH z?6r)DJ@wiUAzt#I3OQIzKNa!8ED6;Q`=ub4I;n)JjH)KBGCD<+3|$qnxTaXN+POB5 zq9n0Ft(*XJCkha^N#aT~GQrQz-5MzAbn$f%s=V3B*putX_5Skptnp!K${HwS5_iZn zBIM4TA$dcvWPVVz7+k%B?NVIPR{-y%sCrGcalxjch|lfHJ7Z02|GGJzveyefI2qhQ zEsTM83XH7Qa;Z_S2qKijDiBanfB3*xV>3&0)E32eb|^@Jt_3_TSa&$fM*~*8@>$N= z)8oHMYvm8+$7;e}C02`dg$8GghBnsHrIR<|4|4}8M(u!8Q)D#;T#1WE3F24t+mo{C z!?4}~JAHhASz?Xt%s~z8rD(Pt3SMKCP|ks%mR(*;j=A(n=va$Mz?KvW+ zv93W;b!A00%-tHJ3ZmZ`E9*@7HV5i2sD#Vp(%dgW$m8J2XeXgJP%bOT2;nw&IkjK# z1qsFtx+bYJgos7AE^hZAB^njG154`0O4OXY@Qqp{FGfwN%m*`;2e`wY6WiP)KWec8 z=GD4)?F$TtI{b!gkZXix(5?>bFKt2+-6V=byJR>J1JtErAU#=2n{TQ1NN)EMqMM?K zte**DJAo%O3{uM5V?8!*x6o9St(MQyF4FKz{^c31qboCYFmF+#O^T889dD#A2i_S5 z2hmmnf}O&mp{BKgf~Ec?l&AnIcDLw#`;O|mWT%@6*NEag5%00QGv29Z?c;JBYVG7NP&Br}S4Co>mSd*Yee*3hAd($#(q7x)aZCjp*`p!I9BdEq-i zdkaG{r0K3wDEeO>>pQLYjWYc#x5%=<_>GQN=GW!-fuoI4FocmJs!Q|4y5^9vK6QVk=?U}eMqXWO@jze#TLdK5RA>nPkN%#mHlgY4#RkEFU}W4>q7|zvxh75p&p5` z+iR?SDj?7GoW>&Nr3Ptsr!7P?;pFl9*-G;@uTBNgw~$L*q%6DTcyy8nk^V-VjX9;t zl~ZxxQLdYKDtz{JdKNc5YAmX%RhG<_B-sbqtsBZvcdV?#0*)aI$j@>^MuyTRF(B1e zEv@#@V}~W^b3eTgYg6B6UhSIiY!AIhWj`Z$JY&05`P}`^2MN7?GcA%ox!mQ55Sh57 zVV;VqQPH4`Xo@;1P&s5wr76K8B3NKv6?tOwNT5Ch;JRxajq6%zTyzJ05$pioov(Ps4NHzFs*=RGLK5ENRt2>Wx~C7i3Rr znSLp$jWZ~%A~VTm-MgmW)_gA0%C8W!YqeX1KP7yTC$0#=5*?rURQ)KWheh!Nyg8{! z#foXjIBtT4tYN9N>8cS(G%JpR^H%fE6ExmlUEZpdaI*8=3VUzLGeSE8GMXH{!?cUv z%tN2BHXbdoChPIre7iIrRngmH0oTGW#+$pAk#|mVTrsipmdd)^ zwJV*nJ|x|z>Twa0QYg!u0Qv#q2iO9jTyUEWiIeYhL2qr#oGSZ_*l^)k@cjcgONU_l7@C&b1&lB z3B0z4)fkY57zJWIs-1`y4L5M1Ir-3r`iMo>YxFX%-z@Z`)pX&TXYEO6(7W@c|-5M`{o^*L=`;oLS>{+MnEXiMU6TmJNSAPyZOAZ74p}*dIC~| zsL=yoAMz6a2vkb+mvxaynLYBPFb?er;m>e4X}wRZ0J8gqCwuPyD^JVLuS|=a>==)_ zcQ>^x8NE7po|WG%>%I~lq$%>c)K#ThF%~kL)Sa%@3kyj+)itkN7*IiuJA_H=QRm?* zi?~ULt~pN0%E|>Ttw6-D&SyO9Lpmb8ClGG%#?=kJRgp~|(4`{jNuU^wtaXZ%8_CD* z`Xv^0yawr31d~u&>>{rjGZMSiNl6U&4dozpteE7#sT(u8s7JK#ke6Ma6Pb&sls1i@ zucdTJ6)v?G;bP&fh-f*ZbQ*cm8jdWCm6Dznd#OTZ3@p!DAW)Wlba{DmQklGnY7bPi4snIMqpDDAXu7Meg+dcz4t)zZ zkCXAu$|HWh24xV+T$UB@yhCNOG^sM6NZsw^s_f*v^KC`YC>8B778ExGu??4D`T%_uU@F6r+ZzZ~{?A%kP2kb(3=%>MW&#mFYWQ{3w3_*2#(J}G!+mwy-N`RV z_*e#(6VuJJg1K}Frs682e;@LlEiP7`j`y^*u~2$b5ml`S$?S?*N^p;8s%ov9ur~^# zAmU43ualx_5&uYxtbv*TdPQC;cW}e20$~%1b!WQ%q;GkU2?8*prg(t<=3-o6AzjTz zgmu;lHvM&klJYL0CZ`(&(v1nsgkK4&@@ox)$$?N}JSR4{dk34G4OBN-Y3wSP<`w)a zjz7e;AXjg*CY3>-svNs`H0gKNvhWv?s|BP;MO0#$W-6xA7~kfhZSTLi0(o!Q-VH~# zcLRN=rKPp90m$xwMq~q6NX$nGy{W4&zI1PIJYuF?S)=lL^PLZ6iyS@g!niyoQKrG`=TmBqOgxWeelUJC+oQjj)t z;>wi_mmLJvs(EyQ(4l;cty=Av%|)fp$_q~E_m1Z=UWd+jkk7@`xpg6$+6?^YLg_ua ze&;d;__e3_*P*&|MPs^Q%zFD$LJ+zq&%>C6?>>7eq07T~ewTC~{}N-k!{qz8ax|cY z&PW^|9=^^4aIvmVhQRo=oNnB+HWNCIn#7G`?p;w2M7d)eKxF;Ev!hOtJxZLEt$ zVxsJ+pufBRc9H6ZZ*i$!TN3Fc+Ke z$gQCLE%9hYIZ};xiPUY=dCy!gMtT}7WNbvyoK9#PPnoA^4xMV?TX-bO7sjaOn z6>c*t6=B6n1BDrqq9PqkQaQ{i;sK>pKtUlzL_qZScE9icJRV%H>w3Mf>v}z3&*$^H z8^9VJS}xHs!gT1vkhIjoRgQKy#f0$#`f>t~m!5d6r83>hZNEm-gzFY4A>rRn&Ym|O z*$3C5Q(H$17LbGy``s?bcO+=9_;~%e4S>+~s{F5%m`h)glM*)A24hLsj8Zw<=>C*- z7w>GlaW}SlTlN`*jB~HXfVp(52C$?Fn%fRNv|>95Gcl$WwqY!mTZ@419DrG9mi0jN zZ+Fh3B)uE&_DX3L?#JqY+;v9HX4Rc6XY$lM`Oz({L80vIHzSkaF>X`adK5mIJhoZ% zwJWYbVH#EXq&?YIM1AU!o9+?Pv^zBqi&uV8x!v;9BL3%g9r;g=JN z-~dwYSF0M-EqP&iOR>K7o6bHMxY*tZ`a!D6lW@|HgV>e~%^sbA0@s_rAdW>~{^UNJ z37eH)Z~lS$WUHtvI=YHB5=+yxafw^9Nkw{F7Q2gGJnunm@`zWyH+SmbFg1~;2y0YU zfVZNg5_Na4eXx{F8O?4VilzWp;euE4QgZtdg&A(YL>`-goP0#0d|3w|RRD(D6?B%8 zxdZO3*D)P z=iD^pqGP)G8TYP|Rn=9by54IjoIizv|AkgmqnjvakuE+^$MFq5bZ<{)=PJ18bWw2T zlb~sbgQbGu+=0d}LcATfnsqN8i39AHh$#Ihy0CB)ILj663tkNrjDmGXQS*?Sn9$9U zNI0}7p+o+)!#vN3HF+Q5qfi-So#WVDnB`^y6Vlj+Hg2C7vDhl6w=wO92i4?g{)Sgj z#vl4!?C_S<4fW)g+vjv!c}OqIllKiDttJ1Fyv$EaGPRPij|*ugG5Pg_2!~*t z&(bmW=?)_hJ~|r(Ql^w2ubm~caOAB#5Zk`eZHwWzM6lds5)&cXJN?pERvL{PW<@)q z*qSmk1EajJ`9f~3P#J0F18AIgm*9dcl}gV5+B`F39J&LA@erSZ5vlr3qR-`GUVGoj zY>_bi@+D(pcwRw4kJJ;a#}KzJGN3HW7f@@Ld0qzjI#kD`W800sJ?8Kg zFTjOlbGFzE!YXp>&yfKLXZKu!i6!Gfw38tgfeaVUK2#OK)0jT zHx45VS?or9BB#pWJ^0CxMO%pqJ}w}fPwh#U^qzWqOp%-%km&6sKyNB<Pl1qUa*$utJ!&1!ZTTQW50TjUPAOz-$ckn zcGZ(p(rhkkND&}9QeKsykp4Ytqmefmeu?>yBNCd0E;vRe6qVcPseOsIk!okVTLzlu zgQZ%d=e-(2;O691A);qqe)2cV0{6?T`AgI=aq8y<$4S+NwQm$4OVoTAv~mIl@j=1x zJwO8dtvvJ4RGc$1e!e&uO1bVvHm?wxzBJ99A`u>5m8Q>%QpzVF@t99B$q0GLycZRX z%L@Jk@B?n6rk^Jv`tNbiEemcMf)nAmeI=b&;0E34j&yk!28W{MbM(07j9pM?gDY_( zi$Jw+UQJgX^{39M^UvoAfzNyBkzALhSqc{P#l z59~>P^R|JL_43|?6jtR_3e;C$h8Yzf8#A9O7$vj7UZsyMq$uqs00MX@%Y+=sW^_Deyv&F56u1wtsv&w=PF`zvWSdQdN=c zol%4Z>7xR!p>?xBi!!}9zb+BY<5~{`7_zp`P4~gL6}V&N$kK>2ukbI7!NMhB8tv3c zKf_`f%*lLtev@&~U>iBMZ=`KjUJ-r==~U6BXUNeV(cafF@d)!dyNz_4dAkRa7?o4M zb8?vk+n8848A@fwli6C#;VM&G=UI{!XYx+H7b)bjykCyRrBJ2)bx=s>W%b|kmxa~g zKlN`zr|B&8FXl0~n5rugVoLf?)JHJKT&zSgk$&8!+gvZBg)OBGn{x(+c#rgaf_HM#jjrKkS0Y>FlftPQtqKKqh%? zD(NICtD&#ydF_76_+E4S>_?G1b!c(%0yx}lq83u{mzss}royP!fRitJ2j3ba%p!&r z#;B-JD+cYK0AW7;qqn5WBQWS|^G13FCU_gJ*c}mRlL*xH->%+^Sr#z71A`3hDdjqt zqB6ASf~AGc|BZ1|x1_P>jAw2YLEUr5Hn9WRdp!jo;%IEUGxYg}GnfcV*o_Lq zy=oynkv`&oXD;8ON~nDSxFgBzDfCsgvlLVhiNw+CkCKmnl!5xH-~?l2$t+o^GW3q0 zukSGP_=Nkks?jh^)MQgPUWAs{l^ug2N$OgyKgw{+@R6vVK?TJ2;@H&lbAJdQ$;DGv zqvKh6rX3@<$E@-uy%zqcq;&}%&AZvU#r(TnXMo*2m-J-MI-3ITKqmG znX1N9f)7-g3w5*?r`oggZYc>f5^#d*RECaUbv^*9y69laFLcO~xdX{P7z!adbn95} zF?k{=(FI&b4ad%WE@&D}SjCo-#RILt1#WnG#!M8`>hS%sV0EN;K9JMTJrcHZArX;BYNGXhnWh>|hU;LC-k<@^4M>fD z5HrzJan&Fjcx8knm-OI+S+?UsvyG$3-+%*11^d~c`3_=R_o1)>gVkK92(#$uiaKug zGMIEbU$`6?KBNz+O(*0LWQ5$1=IJk3@Ur zqaHj%%^m!(Y&#J1I_d!VM@>WzzE&S#KAqN{Ffcx|I|m|<(gCn9JSR5~2)gpE(Qa#* zf3N)Rh<~#@I(lNMo#KxY4~3gHFNl`Ns99; z^l9mGja2E!cRUd|Gb3=KZ?2gu8@$H)W>z+1{;1b%9oS|1LateIY!U27c$zkQ&Th1s z*zx8O?;a5fk26u|W1W9`zdbj<3~UMVU&7yATi#!KrBhp5gDwo-$@m0xO5hsCn(i&( z5&9?%%lDp^yWa%P@%*nkiwhGu{a&P#ba7-FLdQRV--j$!vbc2%k9xhn=__JkMBlCm z;~gt|YW$5iGoUAVzIx1olRe2Ih1Y|9)*F>a>Lvpyri`bR5UeZFh}B`qW9czxq8Sqv z?d4Rb7E!Ldu!^Sw9cs$Xv`uj2lVG<{8CTZTi=dJ*#9M0AEi{-|-)g3ds~&|z_orfE z7LK;l*mfEINIMO?An~#j%YfSiiK&=10 z*OHL@Th$AwpYn|Es|?NqtH_TiL|>l*-5Q522&l*{h0|)Dsj)g zt&BJ6+eX1}OIRL9Bnzd%D4nVTxqU>6-$l)R!ddc!Ns|D#SU52U4S5>#%0UUYdBGTk z&BID|fMewovMl_&)khXw2)l^(+#44@!zm&_*3!9bJmNSR`C#SMr3<)}!yUkh!`8{3w+C|dpEK{y`ZL2PXO@!U!)Jz*L zZ5`NZs?(>cm@S)%n*ZKB56Rxjk#^ec9-6(%*qCbur?ll?Ky-uu2`U_sO7bvJ^(i30 zMoe>R54QiFVv<6$BBh7v8~Ryo2x$re1N6uAy0_;$R9U4A^N~9k*ceM}zdO4h#;pRA zElV-~BiFpjexM;OEN4ceVH4&^(ZLANhhS=?_#3W#HC8ngIG-^FUM%wj(s%fr?CO!s1JGJf$@3{xA3fO*4!L|js*P}Bhzh( z%-A>ZEy%Yy^OiM#`8cObUofewt`A)LRFRBIRmdHprn^J#5fR8Ie_fGjtRD)zz+#>y zT|6STeGwo+Ms<+k1%2scHYF7o4#ayol9kD#jb2uPuYq6`M{^J0#^e3(nd|O1!FQgL zGm!nt;SKLc&I|hIH{6QEV-(z-DCnsj)#N`sF$)-CWjdls%`GtQ5y3NZemavG7A1?4 zVL3hLjfqZEtvaaN;IlHecqO?El4stbgK@jaiDOBWaZ&y5$niGdKt%yW@8x>SS#XLi zg*u;Dq7)AeqIK5ng9A@vxcxm8v6xjSRx05$!Z9B zPL)gko2-W2JLBz99%`;%I+21}9MOk-miFYJZJB2tw`H|$3LfS;(@XAJ%OckC-U0`chF+k$?Ut?e>+tJWR25Gr!C7OV+3O?-ptcLm?+1 zH?4jv{4!>**XDpE`GA?;zTZ>xP1-J~e+|5|b`tyvnS6t(g4uw5eIj-u!AUp;HhmCp zPE`2r^n%{h;l0)IWy`H}%|if`=M^F&kyL&EPT7UnaM64qnWe{4#gAz{JXNu_0F-B% zYV++!zC1e?kv#N0zz^7)BeM_Fq_!;$>mzdEHBibx#1^!0msM0z$oN+NFIgLlt7}+b zf(2@CjlXv&W)nA%66c1J-d zIHrfWJx8i)oUG~~^~y~UL~m7^bAkJ&=sUi`1BU;eQnSce5@Jc#*l28Lw|S-el<=Ue zDLJw+%v19~p_#hNY~R{&lN&ams3caAswG-Jw1&!4#p{U3@Oe|P15cSR*vf2}nA0yY z3mI?RGrsvj6Xs>Ggnt1A2yM>)M`-g^q`_dV6&~JQNW>1qUYT7oI_!3RxPxp%Htv>< zpQkev&A-v#lT^<7g@*55 zR)7K$V~A6^YjOM9-S;>*-th~H2kxBXS5F+yohSr8f6W~6WkENv(qHSXU}IYXBeieC zJFDH{y5q8Q%R+sUYfKN8QKe858zJL!p4{4_v{r|?t%}C@a=&0VQ>AIJpX!2p>5_7Y zcPc8+2(E%cz16hR=@X>(IvPr^`4mu#! z=;0CM9xp0m{ZUpup*@_A-Gag7aVQ)jdQv%kXmxE*5AMwjeo__Wl-_0s!6})lh3rcc zg-9S=Gr+L0yUcT6@moT?mY)nk)Z-K|BNd~)^0)7e^bAc5^n zLs~rlAZ1{!pj@}=qyFEypkPzmvmZvAy8!42SmB8P&JO8lJ^?G!g0{WG_>x*tp*P-pXVq9`7kT{`kXD{OS9 zEGI>%?K7O5#$4)WKd-$SnM-aNQcX^va++EXZfECIdXL1;9z`2UKmeDsJ0rGwU~-xN z*Bykf_0AA3)$*W_mk0yJr~WvQAiqKSVRqV;E!665F{KWBG<|-%|9oy7#OTiD4ST4& zCe9kI*|&MI{yWE2`Rzv>37DDXnYWwmpl|TfDy2fdY?L=0Tnl-3N7SvAtH&v=nIBU# zFUrP($CKLyyG2TX9Nf?fnGeoH&0n8)+hM6m={(zFOysT&1_p}s5JL3Q^jlooS4xhQ*-2sQd%D^#&G)l?jRybzF30bACy=! zShPhz*}#srwj#qP5jEH9dEU!)VJ7&@01Fzk|K0e7a6>Iw)o^DroTzbg9L?ciOVYQx z3gix0N0T7OGeLv?G<&9ZFp?i!RL0~dqVizo{1IY&NE@g1xX&(a9T z%X>z_Db8e0L#O4&pw!aqR#>Qu6wkD4Pd2JbCJQJOPenNQnQur>lt{x=RH#fyKKz^yrCu)Cjp)eqp>0(JjY!l9bqs!AC( zEkkt_^ZYftKN*HdnwbouzInKZ@kgo)&rEK-8V~*NUGO!WA5NNUT3!B>X?Rc#PLR%y zCNDyjW%_KbtN^6QeV$O;mFp}3ck61ku5kT|KV{%BW1>3QqHB6x`DKkOTsvQl42V}4 zYq7tj6IpT~LXCym{7_I?L`Z35x3vG@`+7?v6+CjMij6;N%$l>-^eB%KDQH8^PjNhm zF~1ty7?{cq5B4g;0ELGDMFawifSxScKGO2KoJHsP$TiGtV?or~lI?RF{r%DltvN1; zH4z}w&@?t^qA%M9JkZ;0lh?e;_z^{|E%*Uo|99A22fBng-XVR;fX0?E=~?f)2_Gde z`nI-$OO#C!EAg79m(jw?UXNFGsN?e62)yUAaKy*Vi*qRa@)@SKm5^LW1mLoBlg^B zVk&Km&NI2~i(-%ZqTXg#W&=L^cxFP6?d1nbqD3eXR6*J zSnjKlK~HQZEt0`41?sWMAVya8+y{06G*D+wd^8aKt8LXBpCQPZX=*hWJWAZ0Lr5uC zdcYmgGzQmm=QyAEJB|U!8ga8ql*fpS|8V7?{xl$nv3zE-PAQ)%RN4m4;~C=R#Yb}FrTHa!Dx-l|6{^eT z966k{zs{h`=y}j?7T61_EesZXI)UF?S=jt|-~1`n1c$hj`pf|1hkoezn@lN)qf|hk zZl>!tqqUnYcx85^LmUk|*X14FA2LG5a+rD6oBFU$P)p05x4=>-3d!_XMJ77xS&V?K z_NZp0BJ*Ze!mQkX0ywLExR-IS<&938ctLmN@|U8y!Q%Al1Y}z)soG-2Mek;uwFI~am;`HZ&+j7V?>_8E?bfj=9B@`zEg}O6SBWVEDFaYnF z+$B6;1L#gOee7P?-Ac!?>V!a6O8@Vc$P5iNHY?;t1^;U|>vaKFI~lz%ZDmoHI_qcC z{p%E{^_pbHy?j*og|*4o7+#(Z9j@QF8dAk|LfS)o6_l8h(jkCS$lwx3XmX^SQlXsE zhpZv|KgPCyKG7-B4GZ^L4-o0`IdP*WE(9HxtDL+3we&%Nh^oyWgGPvl(F1Y9hc||n zH7lD-GeGt~uR)D8F|0(h3@w%OXb~Jck`-=|6%bo1Xw*38gCm^2(qt~v@|SnUsH6v+ z*^uOT<@B65(PJyQTVaA#E3{}B|Kt^b{R+z2Ms3n(A7x8BWgLne>KeR z1qAi zaN-69fFHH27o{kWl(dJdzv@-Y-wy&#g63wH<$SmZ#!$ybJKCx>P9R2hIRT~c;H04t za6U`cBZjShcC{f?+2M)2SeJ-oT0XUL+y9$cvP@y+4ZQLeY&+5(0jOV|+^ra4Vi>22 z)C7~77jN>~6%_n93G;B6I`gjekv4;pg7Yv1=3`-i;AvK> za2U4wIm1^y7T0>ymp5l4QI{~jX`1ytQx6|i9U!on?sxq2v{vJWU|X2C*W0dnd&oD# zsXObgKw0!H)FU3VmO!Vjc0G8;A&w9%cR45BVK^yRYHRt%D%Wq3Gd#`HER|$Mrci|! z@fsoxr~5ac^>PubC9)16#ea-IqU-s8Mv&l~(QY?K9yn-?Uz?VCH`c-)!ic1`42ewo zDhYzEkxy2J@x7kXRYf`8EIX-aR2WRf0fZ()*b%<>?2KbaC;>ZIV&kFstH$ z%m@4<{exiSZtsT+Vg|s=@MbCr_jzH(If63L6EAyk(n{(XxrQ?HDUzC0?PcrPob!i8;v?FK}Ni{Tse` zr8lv18gBDy_u2r&b}{Xek8TetE1#0NkSsX;A@tlCVzOXIf>1V;?*zjU?xZBBN!~8$ zC%^v}rP}td{<2A-^{m`;wl6Szm#iLgE0zjPWI$StI}~qb+K!2WW+-zT0ZxdmqQWqA zOgP`5KEG@K=ic7@T8fnTklH(Vdciyy>~;mrOlBy2x#IUZ=fL6J#B!@g_VK!K!xijp z>$N}u{AHNhb)tF9DF+sCfs#xG9v|)E3X@`93+!Gw+GC=Lg+m z7cb8EDGv!y1D`PN>w|c}UFh~s761z9Ep^-0GV@*A)nm)T(TkKqs8wzaz)$R=wB|sT zZ})DLF$*z(@&~){=KqqC;E2tf8@TNa^QX^nr4s<@ZNczsq6q74`QL8k@4thYsVm(nZ>2QXL=7@>_U&uZ~p>|RNlExhTLAoQ4*%pTh(CNOTskx6JtcI#y`R2J_ z3zq+g4 zaTY?;nY2Y$lQ$|3Ipl2&9OhwlWX_%34qw4z@8W?Uing~Nv9$5m%AAXzspGDTs2x(> z%HURBL{P!&Y6}K55J`R!uo9NIEiz;JxM&}`GkfzbzqFMZ**)N5`^upC&f|{UbDAoA zENcf|{Aj1n4BUHp(oD$ZXe`6Ybw|GLI#o6H9D|k)rx&-8;WrKS@8pgN)NiqO`B(KP85xIu--UdXj6(Px$52 z2QZvu^8z_l=p{w+|DtLEvJ9}NT?iGzSQ;sHUEQ zx^*YNjhyKVg{8&_wM3klXJOAHkJ+RjTj*Mj$%F8D9_uYHN{IZMhbswlPy&CEv6r8p;zQDI_M{gIhGPl+&F z&U19a-;abr3OFx;oMWIrMMHJItemfTP=+Q`d{$Cp;iX5*wV#pi@&nP4xq~|l?s1mj zo+FSNH=l71P1_DV_Pex*4;vBZNjw$={}E zADQ$z091u{m;H6QyHNP!oNu?ZDmOjVu?wCX`6r7;>EF1?L5fqxIkbSgSUm*T&NYyW z0kXI@6INBo3>X)%_5o%IVI_i4{pX8o5aQ+3+gYdRqj$T4E~#F4iAHgu ztQW?7#(7^CqBC)nw6i9rPF@--OPV1CJaD^X{XsEx&hcIO6_w9CBCw2Ejd?v%JOC#V z&JY(edRpvB<)+&;gFTKZ^I)Wr963ICFn-nTDGw$*^RgB?+X6I0jxw09MvG|^`DFmpj5*~8$e2qFochC zm!vUV@$fVFP&F;Dv)DpkZ}tBne)gN^E23k7M-IA4rK90vP2G-}V%mtG<;m(YV3zRZ zcr&j}0}_0a{u}_A3LY@11Gfd6fSBH<*&fQKt{!jyIS@0^xvg~Rf|FKV-GqypJOy&g z(yae-`BpDMnxUH#l=g0T2c5P&7?T)wgneZ4!a=bxk3^|%s7#mm8ZC-sh<+NJ1yBDE zB6cL^Q_OynDRl89*okA(QXiqKHe-u{_k@O_d`Kh)x=M_Inxr^-W@G!#_<=|-@vwcw zNOBijbo@fXyE8YuaxCqK6%5Y8N*Ent1vo;1g4=z{SGelyJPXiWkd5o@zrnkeLbf@# zv2)?;wUq%%vAyi7kkFv&5$XMks^PaE+OBH431KZb5#~kC0lMW`sGC$tGFqwJTFCo< zZbG0R-e%ha7|E^W0-E9CuGJ{mh{8p?lvLhV7^*)qYozJsX7;zSdsSHtAz0>WmFX=b zbn!N!TCBFK>a@PPCohd-E*HPxl%gEp14B(teJOCYC)LZX33Lb2XL7*{%=I4!P6>Zm z`c@E2Rs7lC}^fQ*4-N;-{!%np`o9LnNkJHhs-!*ADUbn=8khQz~ zI^XfDXtK}4QJPBlrYtMJrQt=^wC-Y+(r-RFvmvy`#qVwH9p%LI3p8x)N^}rIG1zc`KIKmQI8TmChp|x1UsZ**q!AS7#fjl%;X}!(g|)- zD;}MuF%jY%1bV()elB~F9-10-xYmX7h1}_mfuM@Y@Bss8Zvk8w9b>cuQC{h z!fIk(BO*uMBc5G6+zC!i@PW&IW5myT67Px#k5zr}#c{xL;09A@JzxFO%tXeMw7!BL zM$X^DJ6&oQe<>gPdieXb6(}RVLD+AUov#9X~sxM}t4Yzd8kDrbJo>R$mWAI8Q$aO7>*7KzLA94J)P$15M(! z0k@@5)j?)%?!#*I#<_DWu&5=oGCg(rpTl8WZDvQEs#X$2ADx^mAJ-x!0#Bz@GK5PU zT~{(F{NO{zPX|`H3pjjdyPxyB{rKv;FTmUj2%Q+avvYXVO4rtI^@}h(_>eCUAEVw7 z$5XChd#$wtRgyF9QW+xIQ(Zuu;VJI=Q#!rsxjD$p=RIDlW{?S?OnKZoR$iY7W-%3ucae zjgBjVF)e>zTgl~C(7b;`bcUvzvvn9)$EA7q|jh^mSC!K|08{F2yu3Wq(k z4a}V#V6zNG$P|MylmuWR=S#9v${p2h)`>>*2%*<}8P#us z+dC8iiCOv=Bn_=LT>1A&UnJ1@g@*(5jiFJJchx;2=2j z438)77<6sey3-yoF4xaD-DdFG7I8CX0aM{CQ}Q_1lj(~4o|vt%9|0v7zwb%}MjfU* zDXqJ*rhDM0QAtA9LpF{&F(N<{UGtxWa!NRq>`3)QDzimm{K!Udbe)nZ6t)r5gex!O z14wTzT*~oL3W1?NWmAXX@Uezoyv8 z8dcUEGk)=%p7MfXnTASZRx=5{jq=|?AGwy-Uu@m<^4!j5*&)v zZeQ9MiO+_r;rQjV^=}JcV{m&6)E6Z>u|7;$tL83Lvyqd+^BTS=Wnm5iozo4CL0I1x zXWyDgroP|F3QH{&^X{0BrnNt5SJ)K^F;G1W9Zy}Wg_pi$y8A^2{WN|x3erB>as6x% zc0RyAUd(x3;z2qD;=*;~{U+ zjGuPbq|bS=5V=q2EKENo=6`s_%vf(6X7wy>#S)+T5$XBa{YG7ZWCEs0Yyb-UX;0V~ z8pDvM;8I=-B8WQ57$`Jiq^JE+7N0gVQ_TeIIT!6dkrN&KZ^P6(Q;|)<_>D$s0&RrK z$86jHY=7qr6U^Ys9^9BUw!S9+E74ea?pj1|8zR;keBaxVluQH_uvBSr-7GK3JZMsV zbuAPY6ad$0$^+t+euT1en>RIKzd0}G(k{_#vcoRoOoCJmF>_6<-GP=9MQVOh(?w14 zl0Q*CB-tS;_i9y$)Z^54KsCdI(h%)1ZF#?AM-f0 zb7?DhTQ37$G9P?{M?90;2BDv7JvO;idv%T=8|8!zNOIp@en@cQwldpu2H4}diE;8Z z+Gp6nROhjF*D|X6Xzg@K=8A;&k!gzJUJUQtUJuXzrFWUJ5g&8GdZjxye9~6Q1|D~2 zjA!9WB-)eyvT9<*=US(Jn->ezIDE$*&|i#I$PMl4I++#0Cna_y*4f?KcL95BBt2+w!_OzX#v&m{mZDjQktVP-_eEfEip*>+YBRV7N6>tcF! z7jE5QWwwGPmH%OvPTDj3bPxhW2{a|M-3Km5n5Ed9N$O1`RizE?*)h=!+^zr?kNb+x8rg!Ls) z=p2ddVPYin&xoaeJMB}*ysSr)0o-DsMdcQkJphj{i1SYW_l%pRx;%pe%=mf*>EZx` zJ>iq(+JN?$4G0vHT+JAR0zB*aww#$#w(dpD4ykSGbE3Li+_+t83g)`CeweWa zzy2QB@{R}f+6pQPe|}&J^&bK&RJ%|fFkHZ=EFBEQ;2TplXT5_ja_s*_4b*z&kfn+? z#1L{xSq{3tb+sGZWvkV$0z_qEVb(2OK^?=-r0`P|bk8k7I?-JYnF37XegxE%Us}=G zx8)UHifi_D@DjVMJ6Gw6BwR^P-ENs~Q+sLB8`ToyI{|rZh)pZC+KZFc+I_aAJXu{b z9P}ndBPci^M@lCrxb$tOiNKiGeadpLa+D-EH-1z!qxjql6DL7j5 z>311lscF*E~|2 zTE9@$yFI4QI=fY1vdX1y8YP050(i?3DCwWLPE1naE0lTjT#MD-}d*AbZO8%K5Gz;m+HOxsHdgL80!Wwo4<@IE%F1JmBZqhyRqL} zmXb!W@SKy1(=dFBQxEm`2Itvb!lVk}JAg+}%mpR$H7*Wt@?;Ig#I17#c4#2goQ*iR(eRjf=yQAN~o}StGXHCo2;1NIyimG?I z3eJLOF#N|ga4@i?y_VD}4@=xiT&cW=z-R}Fs{n(U7AEq!o1__WR|mA4dCAg+Hg3yji~oAn|sE)v$@<$8#yp*IwYJSZ=vW zyMK=Xs#Jw5*>fCOE^J z6>VYCcdMUzMuhJ!Q0V!6XX_N%wbse4Y{z#dSp)s215HWEQe?3=G7i~fczcqKXI#)lII~(%#>#}O%sjpkN z{rW@}FFTD``R~IUwYoj4Fz?B=wt8#E?{4hhazQoroWrYD2+NXV-N1-(#ic6`T}YW! z#q-bVo^I}_EaZREFx)rN(%lfUsqNSp)CVy%F+$IIvOWSIBibq=dTP|rG!2%5WCw8i zty_@)Vr1xsYrC93VQHnYq83Lk{l`XM)2rMDa$R5(AdsY|gvcd{)n|&GPbP z{qC5K-V%B~r=|Db0462ELNBqkLYLH~pO`Ki(@NfwBEsvbwdWM-`wyMj>4ec*uTaPL zWD?>IAf}fm9qv!-*X_*7G@C>Dgqx72HI0_1O!jjOc7Tk+OR60k+n&c*mX<@%8Nb^F z^v2A2lUIX&i&M8Y)*4f9r~#^;$bZkNhhS6uCM%m$AB$Dmq~d~}azS!QlY#TkHg)Pi z%Li^j<(Rmjik+&jHKTFn5bawS)4l%f^|Yu-+xcjbsJFeEsjgyUnls~yAl5)T=Fmt+ z-Ajw?-_@#y7Lb128XT_LmlB?G^`E-Q%k<`Gc`P+^-)uNC1CDl|0Df(bt}I@#Xf zz>Ds$P6n80eoq~2xH=xf7B$2Z!cJmo$X>fq4H`4?uEBPb^9cwi_I~! z!_#$%AZ`GD?GE|W#eMYO)gJ^M!$sO2PF0&1C@$Hro2o5DjNN%;Bb#)KX%Q{oqqrcA z0g2!HIikr3UJisj2^hR7SU_VUWSQN4a+0_k4wHwBWZj2Xd%kNLuan0Ggo+}OhLdeY z)m~5^YHJ7zvU)UQH%S@Fv8DEshwow~uCIBM_|5+DfSPSA!TX%>Pbhj(^_TU(;fBgK zU+!ch_s~b2M>%c{kGBGTnc+?u;3WeG;nwi%S*5r$ z-zK3PLa=kkeLB-+$x%yrC4Jg@xibo^^{$fw;EOhT-6WZtq@d{QSMuby(J7Q91ploGU95qtuRS zVWxW#`ADX0Udc`;KM{$SY_xLaPj%h(%kxG=cxt3!ybcZ$$ffju<`_5>7CWL=w`La3 zr#r(`g41`QZwtx^n84>7qaoiKE%n^0K8C;ku0x$L*k>wJD$Y|JNG1tj&ORsyu8-g5 zW`-SXMJy$W;4`wJUQ$^`IrR#_gKGkw>m_8)H~6EJeN%+|ZA>rGmsDQhdOoCyiGTt7 zLwUfu%5>4)*}V$9VEcYA29cq>7LJnE=9M}s>@c0xV({@+QZx|2@%*UqBd&e}GuJh{ z@n8sxL02@U@#N7+Q#tLyu(qE z-X6MB)1|Zw!=<)D-^n5` z*HYe&%oC~sGk}K2zT5@y(nc6HwW7vGzyLnm7vdMqDso=uy0a|$fsvCevur1VdxRu& zmSUI%n!*WLdT?kwC79|MQrkX-f+Hgh_m9lDXCKdXVgCmDM@hJwRTZ8YY~CCZ0mg1{Ot1D*6_ZHkX!qy{pU(2%q*%-#yS(BFCviqH*LLvDXk0i6)vh&5=J1-8 zzO*wjBIL)H9}e$F6?E5bgTZGT`S99N zmmg?D$t8C}UvFZ%m+-u!H7;?iD(c)|@3wu8y8_$}q|{@B=$`$5bLYcnX)hez&S?MC zj7F;GZ{d@vXvZbV1hY*uf$qphPe6{+drGU?_hBdZcFv?SKeW8A)*x~pCc@v=v%JU$ zbq>%OVq1jqkbnH*NGLGF5vC7h1;@gmAwi2}JhNHQ9|5L_Bjx-;9W_oTM#hTb58->{ z*OLK0N$oqP&*XC6O3t19Q654W>5(B_`zYHU<&{m>WbXU6EbAgbJ1DXh9i44kJbjF0 z{7OQGB=B#evLxbcg&;DmBq7k`r}qMlTOu5%wgejH9_6HdU=P$5PS>@;z9yGsK9#Fx>SUos+na!# zk{*Y^+7m?fcjEb8l!4chfVW>L+|pg;;nRmSJmG@>&iHbn(Y7+p;uzyv+BiCb`dSpvp`qZg1>gI-M&>Wx} zKX0DHUcNZs%_6QG&yA)GnD0lzs65myjBRpSR&X2<%2R$$M-|i?|Cc{W4@ccRrw?zdg1c zp1xkaTZEp%OEOpIAirj9HT=#{yDnGOL`PQJ)bMLenpfggIyp;a#McfthNKe2M^pS`gsSF9=O?$_-LC> z6aH%DBw!NpfD6Y%o^lv7duRDGYbgG@r#;1xuUj3RKao7W_2v&`kQ410UetEN%)9@3 z{hj668SmEA^V22vlbf9S%3cyCa^suL)fRd=y-fU=vlOq${n=@=4o&>b_*xHYaOvQm z@K%sq14izQHxr18=j|tZEZ3K3S?S?Pp_l9Qd%1dzGb4HW%Gw{Vd%$yfVQjNir2c>H zU1?ZT*Sg-K9&EX7O0|HB@iKq0ZppdwO~NkCKKNKn)W$QY7f6+ux_2MWkk zp<0X#B2ZB?DM&0sA}WFblE@SYLlP1aGM^P3j`!#Nf7T!JWUX)5i|jSN-}^OZIj3I` zojGlh%%hTfj>O(u%q;A(AtuAlODR1^4q+H48u-r~{ZF0JHO|X4&p;3G@4LNW>l&q@o`;#TG{+Lg#|Src(d)zuL-@7A&At8^4it~K)ATEv z-|5E1D%5X#!X+0qM;Xk8HOx#L&8=nsILZ&B9_36a zlRYG8>Dv?Z(ebD?{uK|~oXge$u(hls|GxKx=$J=~OcfEEoxDk;a*IUOR=#G7tom_* zINfJWw#sYZ8agcQ;90~FC%jX7nOau4@MQNialM_gYKhTyhA*Y)y3RC|Fg(Pqa~k}I zuzQ2xKoC-5H$8U}0suAm$XzA<;Bb?XF6cxO<1SyahnwC%{;d;DbnN)W*S%v@5$ze9 z^JT3|JxU!~!s_PMS%84vX&=H@f*pL{Bz1Zt#&gV9W*ZXsMcTD`(TcZXVAVM43kh&O>E_7cY8<%f3mU+ZA`6cg?S)ApEQ=z{Qzud|9&O#e1o z=k^T2qF$4!JD2>c@a-=2^3Jyb)-<$Rc&t=AR$xRt?`lLv*0gTU7J7@TLxc&9KUAIP zBSmEBPEEv}S&GiqwpAEf^LQ;bNX)Meq3?m3ws-#-HaTtDU0`$s${#Mu1)l;9E3BrzqC-9|4}o>KBpKUy_q+I5DlIGch}Qj&f!`odhJ z8XXd4g;uQm{lw2Rj*+=6(`+gK(QB5-6{;01eAe>}q#E#D+fA&qEvwDdHsle%jx9$| zk3Fme0Sw1-Sp}*@ZYtfXbxZhuxL;$IXL0q_s_OfNJ&Obh!@5KFIb=nVg@Vi&Nq>sk zaqxSew{W@TG#h+_dgpDV+IV zZ2xEt?5;O*>%jBjmw|BGU_csNlMiTT4&cBFUd`*$dn9yRolPxPJxOLWXs7*MUizzr z(U)p&M!&X7zmlgqP@Plgyo6bQQIQj*bDh%uB~F3KYKxUYK?CuD8lsu=0zfr|*ql?M zn}1P%Udz8roq1Z`)#;CPQ7zkolwi^SvL+OaZ{Ho+Zv7ZG=I@2)6q*ViQ`Jv_FA2|Fud=*HxF_(N!`4DeR?9SMlt*S*ty@N9A0XDb0=J1Ye>VG%p*iD(jb)RxD1>`HO5l zWmb>h0cZ&{NM=H#M>H1qOlUK;&p$C~3Li9_e#(F2!EnEfw!y=)YI8wJV9#Q=a>4pDH zK}hQ_Wv_S(8$uh6Aijf(AWojL92JQ>53;d{&1i5p7cR-JY`!0jryAR9=Dxy?EG*I+ z*HqLJ)z3&$)A)8jL3ktLvHbCaXZslbEp+oFCE4|a&^_hb?)+vhw|7GrR>F=)lS9YH zQ+Z|MJqHY2g*im=IAR}>(D^{aHR2+{T;kPj>SFGMxjx33T@5R5;9FX4*pc z3K3S-lkl3lISbWhQCl&4qJt|K2*Njus8c=9J07S)#Rs6VPAYD^dG_^nBLBDT?=)h5 zBK*8GPR{tU#9-+_&dp4|f`(A)@}CZ_qz5iy?x4E>GGW7ArR4auIzE(sPNoO2Pho5e zo64CG?qI;Cs1^_>PR_fTN&A;S@6pJ7EECA{5CrdS$cD8I`mO>|pQ6yYWh%2>Qsu5v zVZ~4yz1UQ8Oh@mE95Gw3YTrqBZ@yp2LszlIR8(|LrvN#uTuEq9zOD}jNsw4+7!&5j zUW8g_r?-B)85}2&?)T4+4v=I!1$HbNkY=P`(XP2EGTWZ&*wGP@sYfcDT;#OW^miu}|K{GTfjf7Ik2$V5DqZ1RbvT&PI=2jg~G zJC^1?8i%g6ixw(k(C2#^P>_HY)I-PiyjKhDo5{L-2fkwzVXs=hRNxVa*?h^0aHIh} zTA6RW2UTx7>Ygk=9GpTaFmzwfp8eW9Rv2^q@s=?muA|Q>6&+eQbR0e2vjmbX_wJ#; za}@$~e{Pk8CM&~nOy!oZg(EP#J{2}{vO56+NJ^VB#@6fPc1{SFcr|J?LV)W|yZ0OT ziF!P~NEH{;unQo%PvX=>*n3@cPtC?Iwxz%uvwLc2tof<-<{v0y_^)ilBZISEBqeaB z8P6Zi9KHs~QCS&@HPrwpX|r()PR47vr-Yy~1V;04dVbJ@G7xuVUgfvcCIf2+d%JqU zw%Tp5ACT6fsEblC6*l~|WCpBSAv=mODoXWuKp$K>>=8W*cO6!uA*QS$7Slt0=apoQ z8nLLcK(+Ox9JY)dxhYA%?d9$kj-c5$Lx~hgr74WENxA$e+C(uIZ&h}Si($((-rD8w z9|>?t`(OfB{cV__fZb>DQji;k{s%WQaq|20)1L{zj0qsVBXN<41%l->n%oNIXR6*V z%@q-?qYFRJ1Y%Sp$iGCd+^in-Cv|k?=jPHOy!>W)$_Y8yF83fTLz&co@SJSJpQD)w z+`RF=?mS*Xpdsdw+VoU~@}G{A?_V@<@779 znF+7osEVkU2T@XP!P*in6Skux4@QZBrNFz| zhjczIyD{)7$2y-tgO%+t78A}qSH;*N~X`3NjF@?lpQl^5U zAW3R8?@WU$9(I`}3aWJia-D5;A51cfTs3=N)aXf#&QJ0$K&8hU_$@?z$+jQGn*%X>AF2=ku3daHdUkEX3F)el7)oeyU>3JUx#(SK@?t$77bvIJXJ5l%OLj`?ZvT*MunyV$(m zfP=*y^rW`7O&DjM_eIj`jS0NO!h%lRiUS>E85&+S=>bv8^zgNw#ed_Q+339mgw7O4 zgE;A<%uOIu0`L*~_<-~cpF#JIGNg#OEc1{cB4n7;~>oAa9CpRFr>mL2tfhCa<)gfplFVWcO4l@>15dpbN%Gx3umb ztsbTzmU*@0pN|j3{BS|C*bZV3>+BWZKtvz8fc~w|q~0!5Yjfj@GNZ8PLP9PND<3M! zR62YVADPL&7W~BWB2>_=C>lWUOiq@G;O=6%Pzzf(qdB8J7^QILa*%0^%dBeKR;v=& z2g&rpLc}+-Z%J0Se6`%4kNJ8@Gs`XX`N0by z0X{;PW!{a?PFaVhZ;UoLm6TZ4E*0+^Y+7QldN&aR8#XoSy)WA=!Tv6T^TJ?vwnPa@ zVdgU`fL(Opy`=5cq*w8S0fn;S)4lz7mt%r|O zKt`_rl)tEhaPJ0VIBf@g9^?AHiuuzl%re^0wQbe_#aZZoG#)vBC?|^?4C@CCdjH{s zJK55~&=22(zZu~NfGex@kGIbntj(iZhCZo-5DBwz?@zzbt}v|s*k_<(E>wT$Q$vzE zZS(wIl7CxVzrH?v--K{8+TpYmaBmwx56bH)l>;Qvd(} literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/hero.png b/docs/assets/images/mii/hero.png new file mode 100755 index 0000000000000000000000000000000000000000..b7a6e0445162af2f42bcabdda09ce2a364f018f8 GIT binary patch literal 270485 zcmeFYi&vUw+AiK%{bo9yo~DzgiHV1=$A}Weg5s3;Cqi6)d zZ*~u962+QSBk{mACYpdi6~Tyt(!;7WlU4r26On(1{`~5-f^7X0Pi);h z^7fz8&ta@S_~q4{s+N-Cci|skD?S|i&%uf!q4Cw{QvXYOfbqfS(NVwp^#>J;3(Bsy z&IcSl|JqyLtY;qVeehp@d`7dg?EEU?qkHz>L_X8Bw#MHifmi+OSKwlvZ^yq@;2#a& zfBC}C*8hF}@jssW+4}E?s(*O$XX}4_p1Ag_pRIp*FL2{$%O}pg_5u*Xzkelu_Egfp zS5G_BZEU=Y-Kyfphi+3S67}wK;K@n z{1!a+eiZnr@xhC5togUtlOs_q)V@W-^8A-C(0bNBeKF{I=dlgyG1H;*H`0(_cc}!$ zwT<=em+bq&_b1k#Wc}&Lefz=X=f>c7=SZixYXfuklP98IShV5H3fS9k$pwZHnw}<)6|1UF3&gnq}3{W z%e30wf(joPkQhJk@u(zbEJE9VnV!d!U0#(w^3c~ob-`XqwBlO-;fd- zbkx9SSo?1#fejL&4LCiNy~- zxskvwm|2P=u57+?v7jmJY@C|`3ot(*^83~&zP?*tybZ$JIL z;M_r$pVP%wo)m0$Y^1LLvAw_D{hgQc*!h|%7E?~|FI+ZYp&{U?81?wU7g1Y(_&w?Gy>+fbhc~LyX|Z|dp49AT zD?ax#U+O^ZYt6s{rKad9TjO0C0knTq6CryJ7n41Zvy$-e79kK7b=@afV;DRgt!1@; z_V|#RVflXE*OzZp+b+02v}Olwy`02Z4r9;@YbMRM0YbObUUmE3XxnMk9e3QxD&XI~ zx2cad22HeBpfGv~r1g#sX&DL}V?s;Av|o9pmCljZMKngJ6}vQz@79^DQIz9p^xD{y zV0)Jl(~$75vtPieo^lF;z7^4bwcz_y;{0=DjpeC7&`7HD*pUYtiV$u89?s=d>J1nC zwM{*Btm^yt&_y2j)RT0{{Vmt%4v~yJ#Yn$xNL%(Kz9Ftfti;W6?SstFv925VsI$4Q zq4R$N55qrv!^7maYqXk!z@g=6(J7;QD1BdRWqSx#}M`s-U#Ug9s7RgMfyUf4oJjz!`%VH5He{^o`PV?}vgPyo2w2)=oF z^#T+GcI^z3X0MQJPYDm#{C0eP=2VpO4bLlQN-(cb>no6|^ClQj6Z$7LIe9tweX~fg z+B!*Y$4t`Q&!RT|{I@1n>Z#NXY?Ka>VmCfKRfcY=m`e1eKKVc+zpwd1-X|1&!*d%Rg{~ z-d$}+Vprtr*UcFq%4ULZNJ2GPN2;KhyZPk$D?C+1E;(%EQA1c0mzC>FC=mL3A1+nR zrEOrmYWKssr2lw#WN3-9D~VI{NPOT(JSt}>6hX?q0jloV>uIEy4GR z1VRGUCHMizo~27-g_n;$_-wUJ#G52ZUVisJ$y%gY9T=7k1F0Kg=(qbWrzT`aEsr=a zRd^r+t#aLSfA9;5Pw8T`@K1$l?N|OKa`?LMyLbLA^Jmxw`BfGw=)(s3KSq{zH5E%= zX~l@I7yAdo^H#0kp^z_ds^)(30XAQ$P5sxFkt3FmD-i;n*C+Pe)$cDAjX2>krWgKt z-%hlPH!iq;2jVG`B}{UzyP(+A*G}Sl-Z8WaAdMaD0MeHBF;&mCz*&G`HF4SV)IvL1A*C?Au2f4Oewc+S*R z)>xR}$x#`=#_bd2jQhD@@EipXEagyB%p8$I-S{F4ntu zV(rl1R;l`-@RXhIz55o2*t~U?r^ef?)RTOYM?_?8C+)HQWD`L*%`US$I=3AepGN(+ z7yG&Yv4oAL8q>`MYxm0TED7$#(32aN7Y1k;Zo$mE_xD54hhEKeJz47f^V-6}@1SRe zu!i6N;j{dpd%g9*i&q>R#NS72uFm0`onljLnB@<+W@oH8TvgMcuwM!DvdOMYr~d~- zV|Py;2yND8t2^UkfUZVvbIU2}6_861LR=8e`}zUz?LB!_B61?Dk8!jY!CZ3_JlZht z(q2XhX)>-oOSGRlyK}U!nbuZ4vB_1hD4bM}l&!Xn$8!gq3LDt2f*_GFLq+7|Uc@Wv zFz>p!<<%Yf=G5A*iN_9{1ZG*6hX@SIOLuA?okPiPn|HIqL@~_*a^Tyjo}-1dI)th( zjcqzRhw!@{UpgtyjcTU4l#d-&XWHv(Qd~2W<2JoBDHe6~GvK)Vz3Q2Ju7XEK&CKFv zbNPs{?yfZo?#wx*2tRb+H(K4g{_S8+E|!zpvDU!ME~hqMG)`g+n_9@1$^y>|0?)zOJZngPI2Z5E~U()Xh!yXX$Te8eG@=!t47A^oS(1h)n9Pfgk>?sk3z3+0#L)5Rs z%8dtzUw4i4#Xipz=xM5&k(&=T=W@mmnE(8lWb^e=&!{+=iA(4>gE(l>QSQel_l7eC zmVv->w5&uMq)lXiJ+@~l z+RD-Sey^WCzQbFR<`Q!mfgfZZAC7J{GxIxwWpAJxCMegqeZE92m9t(r!>AZtnN&M_ z2yqsk&VIt$*`j%IMdxSvz2mgz^K15T@t6naxYO6^?K@ksx8qC>ewXDXOXWwwdTK>b z5WLGGwY9B(Oj8NyRMYnhmEo@Wk?^keFth^0N)%P?vb0^WDZ7$R-NavOEc^D9aMn`9 zj(CHbt5dX*n8Btc>wP(AV?*a{%-?YTkr{~ zO6eqK04W#j*@e!J>(sxtxY=hpwOf+me>>ZCB3NT-amgR2@JPTY$SDKs3y%?6rfDx9 z{&+LW5o_#KQW3DaiDp5#D0F7ozGiRqRjW2HI&Z0~kM?*QBPyy=21qaJnNw9aD%Z7% zSh%|OnNoP`w&+-go29b!XnWlX~ z_D{YS?Vo#|tlGW2)6zKQ{+;%0;ngg=+2BLkuJ-t0^9!n}?QuzJFS`$;&QQ_gsFGVE z`@(vK%l~((l~%GOQ?IX8x!Sb$2qnx^Y_qN?T}4A%O8=Qo)51Hb@#a&`8>V;xOg552 zmg1*$GW^)wKpV{1$ERp|vm5;McTQ`#A$p!5S+Ty~Uq8fgRof%>-tOz@%4Rj19-Qi$ zoi9F+L#yF`8rEH`uHNmnTcs0qjH%4?VcL?uLhDtS87bPRq*L|cIcsmczl3|q)r8%Bne!$pc7jXWYA zyS0i5UJiH79K*Y)h42vrIoEDUtFys~xe0xXc-IA5lq0)(-k&3)!8a9F77Y7pc_A9x zT4;5{%$_>7sjSdQKrZHD$Ufbky)ed%PURqisPD?K$%18)YbbsrF2kGHDjLW#pN%cA zzu4#KolU~Ks)j4!`~(g!D%es{*(0Ph=W^YCou-1Zc>lP8t5RH!Vt0^=&Srec^odJL zW(AA?w2YjENNvArl(1TVkljozz+`pqGH@3ghW&DUWB1WQR>LQ>&%!#3=lgbbTD~Z? z>~0tzTiB_?<(w$g%7VR|svM>N4w&MmpG8o2{@o!td+A!4>slt!)L34mj(e%kgtC1A zbGh<_sVQ!+oYo%;QwwO~4r5*UKgKiT@9*rJD}dYXD3&hTik+oqD>*JceB`^f^?Gjz z`n;c4EX0hk2}6n{T3Yo)uVkp0<@V=Khggb4jJp&u45@#}Xj)3V<9UF21$|t-)4a*_ zHTn6qQq5$$csss_swe5QSPO3$RU~wF`Ad*-UaZ_zQS55 zqVu+8%HX|=LHTpO z7}zM5C>H|k({)&BY<%>{`#B1K$QtVWmF(iSsg?o5cJcZQC!D7`Jalbye6;7R>p5(Z zr9QY7!46lyUm#%|@LnvWk7pK4el5<|UJAw*SXw^JG{$ZoT`{Vn3rsYSp@!;0C+Pb9r3!eAH;%tgUXE)z-czVa?}l(r9Xf{z=bh)9iLB>u6w^V8)N zNkdYENGZZeh=QT=JZE?beEItJOVCIQ%uA^#n2qc`uI)$L+EXSG406tr*2PhtXD_t` zXk*~W$!Ass)+h_xH{#T3c^xEBS&9aVlNkQ)j`CL+lP5Q-$l;pG^DccDo)6+$Jr$aLcragiN0`cNwdz^7$9p&k(cD& zY!5G~@G_b&;86K`lX=>ciSq(6}2qE!e2_HzmPe4PkvMchF{5%ml7BymHtW=!e27cJz^hPK-KY9qDuOHNK#UfnW3 zILy+^yMO#@Q@|xrOQCWY6oJ?l$^>7;rPZOE}_U z?p3aBX;9im^NFoF_3Nm~Pije5U(`#4$KS7zg!Hz7s98F*(5Do|9Dl3llqdku5FUJf zQa^NJlDs>Z_w4m|>XAmnNLk2b;dD%h*ZPF~u!(NObi@iQo#$b}@7ZzNva+B#d~ zc8#iAY}y8V-oeF9kwX6lMY0seON+3Rn<~XUEqS80*LX?HbF&6v(IBH#o9$}X556>$ zPMPm@dOnrRpR#dFJ~3S-mT%|{z0dV3bsEv=uv+%I?*8$iaI_|LqB-~LuH1pDz{7Aa zB@8HqHL%)qRIRR|&R4sfRp}R;`n^>VQ|jn3WuJ!FbisM+fW?oi>7E@~u@gbHMz#&} zUuzcZqjyM3lP$ztQ0?>dI@F3lZm?XRJ4e#)9U)R3B{+*#fO)?H_|XPI$IdL=H!mDG zzO4@)Pp9eh7T$O_;pILy?T%A-E|s7ZhuQiqYS|ilU9^olT-ctY(XAq0`^{ z+I;)hU9)W6rZBUcknqT;b1LhslQ#*{2v+fs$*8C8vY=8&7G{kwj!SFE%wpfGanHXv zL$F<0%W{ctu2n;pk!zPW6otYav0e{3f>xsKI3{XG)latAJ6cEy$&W@%MhF9y z|9HZEb#%eZQOWGo2SFn}OV>4+_cX{|{nqPZRqn2_iALhHaDA3dbMDYY*T{7)@waIG z$V)Q?r0d##i^y>7kg~6{c>PkNFU!_2jqM|#?a}Gp>v;cJBp+s~Dmk{gJbAh#v0#QI zylGw|aF2#kX=$}?h`qVY#v&!}ZX?_DCbkCYz^s;%j-hOVGX=+R zlzD+L5QKkWUh>%A)#8E~lr5n9@BtFZ_sERy(-a^besJ}qsNSmaBl>AS&N6>x~!RMUZg#P8Zy_z*%1*qScy*DTZk7_V$Be@*@Q<| z`}D**o8KVsFxp#G;p6EPHz&ra_vAK7Us$jn9dKoI;oJo_Ew&F+g#vXhkrZ^0qEK>I z{Q5Kmp|CVqhEPi`r`A^4JOOc6@ugV4pZOS|SC(C4<)6^bXoLfoQ}yVf`d)ajIip|5 zN_MB(ov&HB_>CIN@a}*Srk~@t!4Br&2tGYPFinGNG}M&YTA3GNd^eg% z!fVRamEop?^H+VtVe>3to5hpk(bilCWuT;pM8A|NlT@Q*ryC{n6KrAv#hRjAzqKa` z;%Gs9JdHN}rQq0<&al5>l1r>(;rK<9BL1GdreIUqZQTfd{`JG#0iG?+wCA=kq&%W! z>W{6Pr<)`(0&WaTqRDV!Vgy-1zrXIv#LXNa251a1HW8P&7*M6!yP}fa$X;GpKZ}Di z0*6kQ9P`(1sm|@rk}ci&u#v30rp$KfdN^Z8g-Xf**|Iw@YryGQl}aJ25YmG688*|I zf;N)P@u{SXy$4moimfd~a+TvN9LnP|I2ur226sezT%SfsxT zcI52J_cEjiUABzi;Q|O23}1O*$R)QitRG0gQbidZ!OysxZ;OO`f>^N~C|d)!j9T$* zx;LzjM=B&qM9oI3wUDJ$#n)R+#n1LkUCXmaXEFWA6{NgmTme zH{q@Hq7s%-k_%I;Kq^ro_GIyI$rchdQ;2353bV)PWi-F=327>GB)7#d!C>;ezVARDjFo)&A?nu7~Tv! z8+fmu*;svGN{Z;X9bcFt;tasNeY&bnGbxG%h?CKpP=Hn2bCtypbKrDN@0ChFWfMmuAmv`=(<33Xp?VApPThLm{p*xRK3;MY<>S{fn zX*N{KJx_ci+Se)t<#Q5Rn3Sx%8mcd^v|@T+0NTrH3f7Cs(y%@aE;kCpUNKUSXYi9? z(aqH^-XVzEU)s&Ex+j>l^!oE}^p&C=Pp7DR&sO#X-quO|EGMhKDud6|MT~srr%eI^ z?kKD~&&9rkAS=>AktxRJePpm~G=b1R0CDMn8?qIRLFG)lGKa*0Y{wZ{FynrcQj;H4 zfG8`?jS{ifKZfZC!EQqYQPCXi%gG}1MIT6QQ@Z$!?{?T5Rhx<~T6R;gx2(vryS}a+ zGMmY<6TqOBAMkd_T>^4@?$R7G3NiKuy{zYMV1H|hg21{}JfZi>79VX!6B4~|!45Ycs`1P_O;##YnRPkYmsBn(uOJ;dB6CV{m;5r=b!>EL@Agu*&r?nePUw!NNI9F1+n%X0N&@uRT zJd*S$xMhg^E2T@NUbXe3Do0!Kz}8l#Mb7z$Fgty)uE(9K%PzkCS7MxST#$+ zS6Zdr-0F|5QCWCV31{irnfYr$6im`~tW!n1mffLa-`;N~czU?b1WG{9e6@@$o@3?9CcnWxekpWdl$0k0fT@40ADF2$%?;lYAtdkfZ==9#WW**-FD@iUn z9C4GhUNgkIoa+6W?QVJZSQ0e{m_pJCVMF2Y3kUl&tNrASzC}T##H>bYIzWCvBK7Ja z65#TV_bqCWny;s^Wh(rI(0Md)3!9ZPR>2LG$xn(Newx}!F0|T&`&tntB|gToF|Ey% zrJ~dg%jBokaoHdyZJ$DDHyH(s`i|Iej%hb9@Klq*H@Ix=Z4_;D@kf|~UFPXaI&I7a zLq45paB?w0lx~UlgZJi^Vmi1n%LLCf+oy*WvNeS>ik>}YRM4eHQCq-}fVYc?pQjf3 z6OmIfK&DVsAY0<3s`!r}dj#H5S=pB1bz93WvB}TCWe3@e@0bxILX375l|eLWeOueQ zXW?d+$%PmdLeUklXszK8J>SPUh~AJkZJvOmE?=#IxEgI(g%!4Il1U3_Zzo0lCfSA8Ya`yXxe2=c zNUmFp^(We9483djurwrW?7f>+wXCA~Ua(;RP~6i_{n^SSl;xbOX1G{-na36LN-vb? z>dJ?*T~~N}Rq>3ROMIQe&%T%i8pg*|@l(4)RbJHYIEAVvIqAuVSwD6;S=u19ETmP+ zrTDaE6{;Fmdf{qjsb8Fi0^UOxFXd?e;H9hApvCp#t-3lUmsV3~MVjdg?W8XE!;6c6 za|+IlGB&cY(Ue)gqLQ|P89rz1tA*<9U!V}jDXiOimZNE4uzHGF9|N#^O%$q-ah(8m zv5WN-_mn)c(|ZRpW^#{$p#&2OkM8uSUoV(Z66QB=)jS&h#%?JM+*tcHa@QThrTaZG zeW8YPecOgGOM#*JmR!96B@6_}rBKyJbvrS5*NV36Oh3o{bzKN-tW@ZOA^ro0fWHjI z98~?);#Ml-(A;uF4zNC()te2R-KmgCo+mO` zY_IBaw~fnVS-jy4R|fa)PR*s8OEgdg@Hv=-_%Zf++wAhse4OcL_GGBe3W@~aFe&VN zAf1so3Yoww1SdkOM9$?_CN5by4ao|%+;eDX*3AW8Lo0Mfd zlcP)zO8{!_G+8d1L=}~Wlz}3Mst}VNqF~A(Rc6F`3fToBOfp`0E9Ll~i| zmG$lHkDc5dNX_L&qI8M!Qaf#He07*4e{c|qQVb0g2k>grxCYswXrFjV*y-)LrqtS` z`BHFbgMx>TDt$|yK%J_~_W|!GR@FI+eH0lUh}B&L#LDLBM8VtdgUHRFBmf6GU*403 zS2}O$_`!}(w`00=7#7sm{OFHuev&VcyT+M8y6b+s9H%FHaPoyymXr0DK25Fde^+__ zgD{khRoqsaPe{iVwnMs@X7_fa4yD)6bQA2LAl0F$=BZpWIc@t#HmlZNl~c|sn@%X@ zXxuq51Mz2{$D9z-@Qg2G{57zt#L5(zW~Yp}D31#WnC+4tg5$ zOsD5#;DAUbN6`RGV8@W<#|fP4OWBc+1J9_Ouf=;e8yBl>b-t6%50$^dyDg1u+{XkD zS>Ve08N#2+REDAIzsnps7EMGubxaGzRl~*v$wFJDpdcV-0KyC!Xn_)(0sP(B?)PNc z-m5BSXu(Y0w6+}1u6fkPPHnEDOKBeJw5LO~E3EeTgvMm6YvFep4((la4VP zV=gg~da84;QoZ?gSCWot%92#Rm=qMSi;6VWxXvm*V@A+95xrKk0NF>*Vq;>kR4}SS z%n;I{oGL+LYwhAM$j=HE`9F%H9qyIr2Jj79u|T6K&ank-N!Uash~=S*R#Gev&vs=2 znlnZAr5b_IYfZamZ~)a0m*v&JGTi^N+6GeI`%`llDFOSxN2)2XSLWSPyx!XKVt5vdO{7PTQ- z@jDcH+0-q=WMrr2_LBI;t&Y^Bp$F2_w15(hD2d)Y1q^KZ|MbQEn77K@QHXyD48laOTS^hxk8M2Y!~UdgI}Z8doN-nF#`u zq;ft!!y8pFGk`QF%VYj+UE&ta$pd`$H*f z3_fWp+g?24?^lazV)pvb=Cvju1~H`#WI+LM&q384<4|~xWF#3#08C4%I(!&r_0(+q zRj6}gI#(IMDQill;!tfdP5T2tG|}Nz(W^PZ?3MDH1{=qv>{c*AG~wnmr|W$8huWb` z(a}*uu=DmD?QQ5=RDxiUi{fI(yYf&Hk@;M@>H7&s&yu_n&{M6nhaB32Ap?`^a4&3T z>O6I4ZW3~JY^>|Hdz_4x6%bu(@h5Tf)siR%*iA=G4<1Q0O_!f5VJQ>uZ1;#@1sA#kVD@AKQdr=$7 zhTN4{-yVvy+;{btWpR}~mSZH9N9MuJ@Ii<1UCTDCAkP2#HU(xGe1oT_oRWhOG`BE% zN46ksfgo_9YIiFzi~0KM+t*TF?Shvla(s`!j=0~h+t3@b4Mlm=;465)2 zs>b^XOxCSCh?)dGkUfwlNf99(U*GFbku7UOU`k61nCq0<7G;!VFwTRU&(w>P3uYQ1 zYJ$7xQtuWBy^)=cah;BZnou9U@7}4ov*arXqY^X7sn^yKe;D1MsHykHAvD4I0x|~& zKXFk|@TLX!Us0H3oTC&d_$Nzfp9HjVp-poFvV}5GOz9y_g!pFupKiU zenwSaUy(kf_h8>*L%L1#X{r%?X;n^dRmq8$H0h3nU~~o`D@5I)qV5zBb)g*Oxd&*w zJftwNHy79zFpm2_;>sS+KhCcHulN$Ws^%?-Q%W)ZL!_oQN|vE{JXnI`5`9Mf7So(XN)grm(-IPPIC>biKenRMRP;e$n?#kkJ)l7qG0`>4 zxrV~UHEV}?>_{-eFout&>A}d20GICryp=w_RsiG(5-K<`$ed*rd3Z%2f8lFQ&Jp7& z=ceeBl2n&iv~=xTuSfFe+VB7`rM}0sQ@{Q++LCa{X}~a7vi@?Lvh_S@+X)a)rkikY z+atGuslLp02WhSV8O6`*{DhF(T)3cRp-QQuVYYl>eHwH#TzmCYl3om^dH@u>v@!@i zn_xm}I&Xpj58UEezXx^`uk7dW?^UG}m!6foJU&7?jhw$16U6i(Z2yC(O`-a!|R zm2pJAcc~f%ry*ruYqW*!&-g_7I<5gPoEF2j>-S|G*k zolQ1qIw}Mid9u=eUDBuL5v4W`Oh7Rvhl?C694@Sz0R&%n& zmOCi#7gZA2WPFDH_l#1RI+1@rp@U6MNC@O$KZ6oyT9k5P+C;uFA$P>jgQAw?ZN}32 zCFSm8O@6Y0Yqc9xP)$KFj@=t8RxthazVAzTH|)h0_}|2s8meuBBa*%yTYR1mwmPFkznJ@iw$sP_tF+&v{ANU)uQ_YERUwc)^2=2$a< ziDXVYItPGc2Sj8-imL=f-Wz_JzA%{Kyb{xJookbKktSFDM}~6AybPqwI^n#vS|=>_ zudAE6^|RW8m#J*Q57X~0JZ7)sK{b^xfz0|Bc2Gy$8<5x8hsyxwMrFH{O%%6pW(+4p zK%fzo!JSkKZEWA`LWo(+d(chHJrF7gZ;oKS3rI(_qlC8fzw!ntG{$gN;BC)e%3M#r z6=jreXbi=Nzc)1ib8#;u?GIu8xXlw|zn5`VlH{{}0W_l0wqr zc`&wX_K%FRV}jB&QCrW1oL19coR9>VWx7L}trOHuOXiRkKY;b}fVOP6AAblHAraz7 zFv!Zl)s1fXcqLN9NUOV&WxjtuS0>S&msr}f_}HkeGX>~n-N6(v6>2@#<9Z)z-N?Gh z8U#$>F%qWG?$0aVN6SOo4}v(YidwW% z-|644hBph`m?Uff|AAscs|-);i7ZFsYnEI7$sZz3o7OT8FJW8V>r<@1vWb{WMUPsn z0at+!BInt@JkWcXXFFa2q#NY?x*Zm2-H6qBR29nrPN`(im$@h}jV6Ztm>sT1Vp}`j zBg=W>LPCJ7gO)9!7qlg-Y80jJoenVN>Z(+^4vyAP>JJF6kwCt@iGS$K_=WDZL=I-; z<(YdMuK6rLh(q8yooMVSe|!w2hcrDzrGu_wX!{~moqW~tpJ@KAv#u$8k*oFVPr4=V zPwBdmRgyAP(1HAQ?P}*}4I9_vEJX?P3c2>WNEkz(i_O3BGoN2IkiR=3gBX@<>-l3! zI$u6KpJCbce}#zxV%XNLM&BvsU_*nezr5>ls~eeaY~v3& zijRQ4{qzFfHKWypvE4N|75$dXu;98t&X8-e@kfXET}`F-XS%tj{yW$tRAGlE0?-#A z0R&k<2{d#_gpeTfsZb{U5fpTqvkS-8v+K_%@O}+07NPACaQo+=ZoybMZX;;pxArnn z1Uzou%;`>N4suoADp3Fai#PsJ?+QCWg(}a6TKQjSY;%60TuFyv;%gOJcQm&$>LR&@L zNJkFORWc6IN(}@xj`$wmc;VMf zKUtV7tq@eRwZ#T)X6vJ@)F`eU@6NJg7pMPVoaOw!L>X|T;AWv{tTK*LOckSwbSvGsC# zy0w-grULa~e}$%uzq)+)*boD4`eU6xzZipRd!Z8qM&RCK)zLO>elBM&Oi`-DG%+XP zRJ!kDY_70sgI`gxg|s}RCQ_k%_OiasFV`)vv$2Hq%l+w#>+&4QmTS0@Gts!NfVc$# zh2OTmzVte>u(X6+muv_cS%Hgwq!pBb1I1SS43DbE|E+inn;S{*&q=o~E0dv|-fXqW3hw3NfBJS!*W^UTs{n#!>EoUrX1g=MLb=s2xH{7$&z>wv7dxI)khKvG6OFrYIY-ydW&-XL_8T=cW+zXb}B4 z6vk|i37wmu=)i;CSL)H;=RjEQL{i-Ezq+>-t%t=ogCtUw0WnCud(5EXW{dXal-aUS zMEi94>l~fbK=h`{#I`F8f71}J3Ead0vs}Iur9j?O4ye}9BUB^>01LM<*dBCLmDqqe zgzqu6>#GYp(zu?)sB$#N7Z+LjZ;BA%``&DUGBPSoyaitUPv&RZ7%AbkL{l z7}lq$N!hs7DX#APWR1wspTAeXo|`31vu8F;XkXZIl;t^drm&6v_}prM=~K zuprL2l2zxlLmv~j`FyBiQ{7q6+Eo#)Xp{vi=2b4|GOGt-i6(kv zhk6e6Ura6}{B#d6R$BlprF^(Kr_gmj5X zkS&V^?J`NbmL!N56IGd@*$9V>e!JX&?hairfs0Vo6u6lVZkO<1xPKa;@bg`m1rg$j zHrkVH!vl~CkmQwy+Pt&@;jlm-dB4YoIH}w_$7B;*qqd+a4GUsWA;IZZX#E`7y~Yrj zVg?vcMifR7*zm408nul07@P{=@9Q1J9w>M~*=~wGrqV{_wHvklP%n=e;dxHxJgpKz zHCljo?Q~iK-f)Kx9uA)pABQ3u9n946kgFXW2 zs)y>b%^YK0q1C|+sT%$|16)xW;(7jZrm{wP0A^`H>XS+~Q`kA=si&do&~<8IEs={- z4xW;1E@J@?AHdbw;n?dGVoMqETG{hRN9+(?^j*q0TSd~i-qar+^%w1F{r>zZc&*mzHQQ7*811`SuRg>$J% zN0oi{#Qn)^wFOGJe~z|U5xv$2c};$1SKW$ZZvz?F7F-X`B;v@9WRPnZ;iQti$z#~h z{Mu7gr}%@HrF^ah1ECxEMF4f}P+>n)M;^{k=$t^>TUo^=ZBq}_OKWw~gBh0f@7mDF>ABay zyi0~(HI`%egiYkSQe(xnjPyZDfQT`t#gV2GTIrQ3N#Dp3dA!$Pj=M+atmoyOxe?9m zmELj8DG-}yODxx3qA@_--ExYjfhp90c^ z3hC`XTls9;B5>>MZqR%Kxk$LD3Jh*~eA}LVuo5*~6x5D)&hG$aDhrwWgM0o&Si(c+ z$M12ev4ZC(MYtq1yUywrg^rCw+yWVG=}6tW5Q%4?(B$LtoUwNVvQt7Z(rRIcbXr8h z_DcrKpK)W=vqQNmdPmuFw_Hg3Fc!>VA{4qkXzeOetCwB9q`+9dExGBImWpxldpuT$c!Sn|_)LOD*jIt*UrD)HSMoT(*a+ zbEgcHfze{9j>ZI-{jlEClVwcd^Jx38f+t%f{E!rd568kKt4Sq@@5W=>gr#64M!V-p zJYUG-f!R!u!o7ruvm4Fqbx?C$QzG@WLMht(U(vpd-axdXv=)M1pnwB!1mi|9^J6uA zs*BH7+vVcJc(=ZeQHpkoJF#SF3&J!kO|VGi%u_3<_|2%=90B(Q_BVCZNN6X zP?Q0sb9(r7E{w!bwI1G03ucFpeO`r8MSm=*CP-Yo7!Q~HTQDVmah=;Rtwuc1ZKVv< zmec@-EX-?K*v)m}l^Y_)sG2@3kjr~1qb;%e=0;OrRJ5i~lR=ykRLzRUX0z=ZBK6{U zwJ>mp1pBw_^9BtGY2!B%^^uWX%_0W4q{ml$8Eg^q;$H>%UL@ozN=?maK^hpbeTLbB z2vt|Vjy|4-Rs@cK3m?%c? zUccDs*h`#jq-7HU+27S2VDSU?hzD~}^xJ1Ex9q~GkCS-`sxr)82?LjG?nOq;-<8k3 zLpfej*1@XYGK%TYs2bW4=Lvi-*O)v|McSJ(Uo~18PK-;icMIpnMWq2(gUu;XBl}1A zV`5xkPdPcfBjDU3&DHOfmRs$F36b66<(PCncKg(MfT2T-n1y$^TB3IbG_2w1oxJ&xKVx;IQ6 za9?!14(pSkUFz=m&F|!hhPiOf`9|8wc20`RA}&r46lu7c{qqrLxtGYiw%HB?+c&sd z+o8qI70EmVmCIin6CB`&S0Tid+^Zrh_paI{N*>q`qq5Syh_($*KL^KG@pWdj( zJxUCy-pW3t38 zeZRlIem}Xox-NHZ_kF+b`+dI;ulM0@=u-&#(Ly0S1u5%jM*($d=LY)oh~xmO(SH*u zKC@(~-Zz05Knz0m$|}?fuNJZJP-W#pZaW~L!Ul7gKzTvd&G|-Y{y@*XlM$T~H3sLW zaC_%~l#~z|P<4?S+#c{FJO)U);kKres)*vD>;z%GS~1F?O5fKA>YIRcH|4*eeVo0G zVU0Nhv2hM~e>ZYZJ{(AL?_;pd-Ck~ya4o)#XJf|suUF^0_lLmJONzs@kq1ky(IOqG zF{YKn-5F3#OWw8B{HDAJF(|sE(ZDrTSxOby!~C+l|+U**fV zKwdIp(3x91iuW*!41+G7Eg0nVs%2Cm%%Pf{g}q1a*(``%W~HjTQmH@XJA&1X!hAzA z-lZPjE+lifrVkr4DA8~)dQmnz(clkpE#f`!>TNG!4dhAY#%GDu!60UQrM|#?s zlXjexItW3N$oS<%3*Vn4xHaU?jpsvU7*@W3cs4{q+~!nX=1N&GOR^jr9ag36;sls_ zlqEod`kFds?uc-)vJ1>97;E3tPFSu~Y1+Z@fk0&qLLLC)wZ`N#!Ui_^p`C=^0X1HT z4}&>GCJ+-xu($!2HP<6GT@Cdllq=U;$N&pp$F9XE)9}7rctbhGD>NbGayjahD0am# zq6ozw+C;h_U;xNIYfQ{l-Pz>ni=%2t;CXE2>9B0a!D#r$4B;|q${EI`@VCmdO-@EuoJ!IdyOJB#G;}E6gL11s_vUf)$yGqkDBdYHWpq>(6&Fib z9lOo~Hb$`W6Jj?;le1nqG}^*ziYEXGBhEH=Cm@ALlLwUMlQzT=fMB7bEU&4$T|AVT zAS7wDqSJDp(Bc_23{f=ETo;9_R@33VZAgJWtf_)RB8#~p#``Y;HX(VZi<2;ngN`Vf z!IPr-DHkdi%tGgK_^ z;OG=PJ#e^x(wj2yW^860q!GusRdaeHER8dxa+(Yq^n_VLJ$XQ03lM#p2ve^;1{D2s zYnftLU^^y|U}T>P#PRlKE%EoIUzpB~8;*|3dIcG$N}=51OTM`1X>4NjlSdRU+BEm*sIL zKF(H>3zj3`YpZ9ns3AZcPSTk<*^!`vt&rXb8CrX6V9G=yA=0CrjQ*1xlJmtI<<%iT znuIdE2@d#FH6ETV?msyXHEnVjtpW7CC!+Ku(9NUN&H648D4}f0XH|fKN-#8&mCr00 zs;zU5%GeMEWBDNa*ecYx@9mqDE7Sv?BIj~aa}tLYD1x5@5P_(6OJ#gDSMFo1OiT## zZ9U8Jz?!Rrtl#HjF$s#bg?RW7_ChkMHHp01$GJikH)`9^*|Tz6sZi&ufw18)h9@j7 zWb&rDjch)3hntFq`*j6Shh~_7V<9y~_C>wHByfK;@cR!RPXdab%Vh84Qu)YufP;WC z2Te=@zfPt(L(RsOJG3?o+#w~ql-j~_!E0+%4Wj0%^xK~3d>7ewg9JDj*|&QDei^E` zb7`^yu$@j}vFc>8<5}^o0dsa+s}N{AGA1RKTxh!2kaeS+!V&p~0HGEWkWT*H?nbC^ zfe}L6={1|(kffabERXu>2)w7NBHz~DYCkP+uxo0Nb{E%O7Q(azki-r78B5^e&yj2N zSJ<&}R5HnW$%!#`()c-$qct??MHygw(0Mbl#O(3aTT)Qv9(x7}QE9AC_` zrV#1>i|5ZJa#&xw7a_Y67T@d-Xp!O0Ok z_GTzfT7rtcQJxde_ozlpc1D-hvg7xwe0(>S1I)2SvNn(JhFd&B{ERPYClQb#6W~X!`cBkLo~l z-whzczdbu!{4w}}-027uWooJpU&G~<94I7)b;a>#H@`dPzrorZwPRM@>?SUtr(P0eCvTAQ?m$j67C% z){~wW)6}350vVwB;1+$4fl$E_mSwF5lM~i}NuI|MG@vPpEu2XLTulJykRal!;W^`X zuqD?{Tgy}~p0SX4Y|TEH@Kg{zZ4w=^h7S!cz?HX26DX2K*{GWpHP zjB%8@UVdPO%u6`sJHly1oNY&Ol&EP7tfEv2MoQ6H1)6Abn5_^P+-q3Nw}9c~j8IO; z43RbzqKa@&Aq;S&j5mSK2Vo`T2pO`M=G)bz>{55P>Px!q;k??@KzBM%#)T+=J!DyH zfEGX!AVJk<_{~Gs0be+`*WVL}2-Qz!XjUHZ5?@HRfCZ>GWVffISe`QCa@CE=m{Uza zcRV?Q+vzV^87AA@g%>p6Nv|EeLyC~lt62FFK+}Y5=G0bDL?0mMb>)YBVj)e88m0R791qXb0PRP1wRLzau}g%5yTKW%csHJRn6VLY+VdFv7w^j{J^$9}5RIfBVom+)|nvdBgje*y?mM^cUP4S=_NF}!XjJQO=4$sJsVm9bkSBmfjZ zqySk;=~)RNLa%RcK}&q>6(2wP5j7P*(?(#rw z;w~gwsK8yp3D{GsdTE7OzUwvDraYPeoZHb=h!1mbbrsH@Xo7mlBcP1FegN$JoD#&z zg!QIG0!i@a?^sg#wTA+78VTnmXTdy)KWh@%){#)sxKH7=@#2~?M3^5goRwWA0;*rL zlvQQg)=wIxGZKG_r*O3}C(argFWZb<6}GhngdF)I{aUj+Vq*p!g{`Z7HdfwgPNl)S4#mVvXUbD4g1i#Px>Sc2qIZk=V4p|F zSa%^0m$qvo7ty!XQ9j`FIw6V+@udU`ov$jpqB+VW>EX$h*cu$)UvoUZBsFKl zq-R%CSScPznpF9>;IT6mO)UsmPfH8$qgIP|~MWa!?XBwIi2S}Tcqo-VD zC!6+}Qu{h5{C_@4k#!#c%yMTJX4T;#)IOwfgNt#})d=Tv#qWh`nypF&6SyH230b^$ z%RN`)TSEKdc+8G;Wbk+BePF3EN@@t<-K~ofDkAYs6E0(L=4OS+-V;z_>g)sp&pO;< z-)de7DK?*>&U&}E+b?H^8%U_RNr`bBAqH&2-5I?6Xd+?Ei&+4(b(EdS!Y8JH@qpcp zkZXJMDYPWv{6I}+vwLjUYAe$)aySYs6m_n(qP3O~(UCJPvFajaHqVgi3SSB-z863Q z)!gEuBY_kiejRjXpFLXlLd0W3eT=_pZMO$sSZF@` z_Q!^wn$hpoj$N~Vd^%M9w4bLfwdmGD>;w4qtrgi5?nFz)(nmAdMx5_?*TUGHWy?LY z^`)NwPl!GG8&7(F8hHMf6>+~A@Lg-{#r(aGzIXwlKZemA`_U=`DT@zxUwj?HjCOwf z`=fi}$Nhk48IbB~TiCr_LPLy4c=55V<%_oqNRFED+^G+LLsO~@x!KkhXP$iYHQV=R8^?^UhW(Ge zw(EFI9NZLRSEScLzIx}sBYt<@xlIi@KJDlB-|xN*bNmO;0)cfX(jNJE8}EBXy*ZIB z3c9YjIDilM-|HS5=E2mrpMCuQr9BBSJl2Q1CPH(p(Yd^d@`%gAvdK8`!)9d7q8r@$7wH$M)S-J43Df>~o`@8o150?kOvhXR|?@`nH`^RS16+7D=0I;*8 zpMBK&UiYEqMDMbNHu_(RhaNS+H?2eVj}{%G#E?M=c7eA)o zu_gD=!l(Czk2>;y35k2QTsymGyKCc%XUwp0JO027S8>+47z;D_D#gUJ8e+~`?;o1J=9UG6(lNVo8ah4SKOH-2}!bV|Hcg|WPpTR}Gq>1H3R)%IJfiEtIx@#0makM)<) zauww3-3!+`_h0rQ-{}=rGkwhp6R6~+$iC;BtV)RGn8!6x$;-gMTw}i;*u1f%SbPn* z`d95^bYE)cE(GV=mf-$i3D6S{RuKFP_0)jaJSsMq6kGKWs}y40l%BBr=gk`vS1pTM z7_5nIHPWrCmRWT;tBz&8DRhlpKW4Ov1l=!39lI;sQ9n^SpRh(nQ(VD1T)D?~=II~e zcc1WZ14eA~*KJQ&wQQ-FwXiVI{9S6ny=9_7FFO%aZ?7Pou`Z2UkbPJ8<)&|tyqwpz zKpj9&$zj_#z}p?0nWqbNXq!+(>wZ|QnAnBCpVs``{pcY~E_uR)d+X$aG^_auxoK5Q ze{KA4NO84CpNQz&(WAAAx%<}_gLBa_+89vFol-fkX%&t5g4ifc&!VzUY&_ z6>~O<7YJA-Ss3RiD6ZUNsnTo{^rrjU?M2LrfL}%rEV@fQW4)io)PuHfT#(TI|IvnS zrk#K;>>Mz0|H>7%aU4x=wU!hWDr}z%P>pwRysEE7o zbd$eR{>T3TCjbQWbpR*uGw{wzW&G|P1rVF=fE4_E^v{^0#C@E-zy!d=oBLj{ZNy?o zzss|ue|GfGZvC@c|4iUN6ZntWBhPgGGhP48%zOTsnfJ`Xe`eu7bMBrwchCI#XMX)N zNaz1okPc40m})gx&6tHyPTD^M;!z;GzX3>bLGvx{P76PtT#Rp_+1vdMEKX*h&#x$3 z2(hS+21ke&fB6V1{ok&MZQ(P_g^1mG{k6JA#iIBBIt<4!9F5k*FHC&Pbax+D``jZ0 z@{n0ng_>OmF5X+FjFutae2Bc+2#xv26V`fMjZD#W2HV1r=YxBYO^T5F51_(u%+E~p zOA8UrLHpnAD_7CPmoB^>`dz?MMLR9*+5>De*1X-_xZ!=>fou8pW_y7Rq*Q-$tz$rJx93KS28z#LTWQZJ#Hh={F2N;-t z{8IFHWscEx%-VHqF@(na+Y|o>_!aAK|7xfII6FYM)c)xG12UuCqsS8xojw?#UEr_z zqFjQS`mcv^6`L$~e{Mkv>dnh@{B=uJ#S^B97|R5Rc^p%0idl7!OtZf-!%G(TvB3SR zfQLbkzak6i=_K`)tG7xPzX+tub(5=-QjFN_Onj=mIGk0c?^9SY>M+cyp@w(MS;&W> zoG)-uwzbUnJZ+7rS||HMJ8JEh^nCaV6x4NEk2$-(+RDc^#<0*hGe_ob8FRmtH{;dv z3Jvx!ey6>&!A5xS%u8dI>=D?|te!!SQbK&+(EcKfSKmh3`%mIW$uXpiN%1?_NO8m6 z2k|zd<%bIM>u=e9S|ujmu>9L|uL9(YwQNsFngs1{(c=-B=6o$w0XjeA$Pi83AAJ~0 z>tMLqev^L)e_I7fY)hM@n%VL0h*2w6y{1x8r?^0PqN-6;pFxGcHEoWO-;DOVm5#h$ zn{n5Qf z1(QXJBNS=4?;wM=w73@F|3GXHc$|vl0>vk^j4iX)3j;6aV3qixjO&8;fIjDPd!=m8%AQCBd+ zGuLoFTi(*=u(Uzjc zkn*fC?W3Tz<3cvgy&^FG6lO)DRDR8#X3Kr~67;z2KpkyzH?vG1e!SH>C3JM@C#_q} z5942Vl0h;<7~}PeAs=YEri55~^B;DgeDr0!I_FrhzwWRmzy#!&J;`)4z6iry3a?U5 zSW{_i@5>h6jdLRFe_E0;gplFf`$sGPFlt+tJBkaIHpMp<8>gvSg?2yljF)xKQ$}CT zw@!zKnv`Sxp!j^pPk{C^kgA@pLDThtRq`%Z1o#B|X0$jB)=nptx3=SuSm42(sh5mr zQ+~hg0j?Cn0Zs;?D@>#Ph8j3K0dL6*^dzVBJcVC-xgRcyP%b`I28l}@#Ej!SmC(vy z^KoOPUWGs?rp=y+DZ%l_z%qq3iy@sPWoU!l8gK%2w%xKXuj0UMDaowKtGc zPa$(7)cVma)mwesqc1?GBPLThpsaI1Elw-qvW6!Dop0f}^ri^odp*R0=6>K&ps?Qy z(BT&2&5*Y(Xb%8_AazPI%lIj+7$gze~rXe*z`M?=zJVGg2b%o*p-E` z4p7aPr0Ou(lXZZ%On-fM=tXz{v0G!m0cL+--v4=zT_n?`9ETS9o%|op8DjUc=IVKl zP2i0t$3M&??21A0tW%$vI;#qrROgnvXjZKIVzkmY5@3~_*1OuAtqz8PMMMtIUIVhFL zIG+>#Wa847*{1pE^ULk(yHMOr7TdvoLmV?|x(s0yfxfSY>?k#De+O-~%IFpu#?U}F zG#rVRsrxS$>8O9Wd)^YXb`@uiY@vN-b{KEYntjT#iIZj9v6sw8KUN<*bqT8JN|f1q zdZr?zqU&plr|oxm3IrB4>Ptty$qD_4HkpZHjryUWS5QUTen`uBzO6lIrGb zT#R?+SpOBVWe!?1ZNJmA3fSTHCYVZUT*ycKvhFutBK~L5KCCrO!DG-7qaaq?aJ*Jh zF~P8|X+IQIp43v$1DY$Vf5csRqW4J`r^;{}`HNQZGxul~mxlsk>gHky$?_Jbb*MN! z_kL|8qf9<-d?>blJMR^>_N$tDBnx+C2;Rp-?SIcV| z|4~#t$VvL`te=c9L|ql4<-@aPhxOGl*@S#q`Wqogt)`V%LL#GDpQg^+FsBP4=pK-B z3*@sNy4<|3&LH_1Xn%>?Pfd{FykVI0H!kAxtyAAov+upi$%bR@)&r2neiC*AJ?4S* z{Eb&TpL7`FH_HpNNA$2rvrOt z3Iwc|h-B-1;L6^(2&+$}HcN4CFw6*FNQ;JuscY=E6eRBwA!H}rSfp+x%> zJ>XsFZh+g{m28>2a+N=EHcxku#Xb5?PQYu*%ZnkiBY^?#zq4)oo`S|G*7RKe2Ncv- z$U~T8#Fx%Qh-y6T(Wyy^Brayh;RbtT6nD3{!LbY=dxf6}VCcC^6}U3l!(mSbyy(p<$NP;qi} z`s>CSf@PXu$>Gz&9X0o_GAIu(|6dr90vK!WIOc{ERJYcEz6k#>-u4^bdK!kMs76VS zg5wh8RSNV{ur`(N>iQLX97!gEpL+}FkjfA#%mH%opgncg*< z1321Nt43OD{|WGTrVGr7xOj(_x(ccGzx~BTH*Yq4C`vdN_qfg~e99#OX$z0-^`BQ_ zA|IVDWtZl}b`y-tU2(U;y%VlVN|COyo(O}2erMWv%x=Nj<5FLF*o-3%%*@|Lq6%o()l`CDJ z%wAVNY{Q&4xLReQ^j?F`R1SGR{&6?wF*KR7%NVZ~8NcxuHDs~($h%3j19WHOoVo1S zn*kR_R~$5pnREw^s1=tmN@h)tX1KHx+uA=};41y{CvW-PbppugQp;CP7g?66dDDq|#k5nt*sedK2O%?XLB*`!reBGIOlb z2nXX6Cjr0gooTNl0}oF9Y4k7Jk2ybm%6zSXMS?b^_6W5ORUApyYPVW&xQnR2u z{Rx1NxPkuCh^TFJQ`&Ujl76RV+V%rQ6MepnIW9AX05zGWveg~24)w8l9CUN}3acfF z2;#{ycVsnUv*YNgrPVre{K)(zY#67%8=iy{_6NvDX-s0F1uCy#*(q zARzh_H1m3Skg#lQqprV{;fZ-=<(smsOd)o}G&Dj54CN{+p|$$Xt}sU)788#p+q@TBPKdH)yINUzzzCzPAu5$%+fbJ?l1r5 zc4(qi{pM?G*|{>~WD0R$l`4Wcdx0d(?mdD_=8>Z+vZJ8b;X{DGaxoQyOOC-i;GRy& zr0B9~^I^#qYUQ@bk)LPSZnFNMNk{$?F8k1EK6#U>DC>@2xo7(8a8*p#bjK`TU~yt> zu4Yx{YcVrGJ7lCBAra}OfX8Tvbzpd4Uy4yMbu;U1yyi3_%B%bX%B=Vb9r|=aM0fke z&o8icSgHbr9SIhFH@Yv1>tPXV;P|#2+*XR!|Jn>^E+l}p=dG2S>NS{07FYb5VVNLI&~-SX&~YNeo`>N@Ir{{4+pEE3$(R<^?Bf;q7L5zIJWHP6~+2 zES!KF-#H$?U)H)+vXUe_fewZ-cm0qOVO`4x}ONm6(*P(SS+sVJ!j9t}Q#O5Y5V zW`&Jr$T^NjlTm_u04fnMGzs*qIh<4nZV48uZ}f2tQudq72&+q1@{{RtG=-0TG}84F zg-df2WK0Ax)ddw;QEqyA0u*e#fC|c#`nDOpEl5oOt9+@j!$S5;3na^yHUYh(Sk2s7 z9MI9z{HCnhf#S%g)ig!*O=-wd=2-@X7Zsb$Ey!;ZW->Z46;G3I-;K)FCiS6uG&Nr| z4(~o3lLfYB1$WpBnup7?1tJt@R#F>cY&bjHPo1ej-j?B^VJ>iU>r_2LzwwF6DmKfB zsQorQobD!|l;v@PnAtRTS#^Di+@#^Ei)2^UGIOIe3TUHr*?;Z~sp+GKJ@cte#O~}E z0?*kO=@M8ntDtCNyaMtYV^$1`@NyHh-Dnz;0BEBJk{2FHLFVN|JUP%g<0hw0mL-f* z5Q>*@BZ^s$TkO4IEgq^^L>97*Ci~2R8DRut75M>7lMq9f zB+R!2aZbYsEN*6NJHRD;xw11kN;u)dATR)vU?3}1%P&(dBSf$y^{CuFhgeNBv5h-x z6d!tWIF?9h)2>4v2cASxgpsrxptCvJNET-FP3CI$EYLIQ1~v?*@!B(xqS)5QYUPmx zcvgpVjTv>&teQGL(TG_d)$S^e`(v1yE>;C`76JE2DF(N!AtSvv6FQRe;=uXsswU0g zGVZ!m2d0`1g|?|;NW@W#^GHcFA$W@{cP2@@heZny21UiE6r^?@DLCdN@xbacLI&zjAb^Stwwwyq7U!{Vv z=!hMu8R=j_*1fzaV^xhAm?6jf!EPaq7L{OBmFe9Dk~VG5mw!qjP{Puf_BP2odJ{iL z2o4)*OO>t~yk+_2he)w=V!kEyN?5BvD-1GKbC^0q+W-NG3Hyj3~vq z^0NmsZ$q!8PE|c1}wv5gA}eAz@6&%m|0c)`x|D}TBTM1)}eI=>nU_2 z11^ z;ZBtPoV}(Y4|34q1B#<8_DHq^dD48@{Y-iUaIzBnkC8x63OQduXmyAR^?MOckmQT@ zu!zaz3uLYV?!gMrrcjZCs}=GBC}V~#L55OB9&JEOpe@?+_Tza6Gj6ro=Y_~-&)Ib( z)tNlV7MM7@x>khjkjqE{p|^aVDm>d{t!XuOL?uuVa4ASrzVv|!8e~(`70?YI-^x+H zLmLSCLtzqds+y6K)b!5-!2nsEH8Gjv3CS0t#Qz@t6tF!0DQ}bKH)6`o1-p!bcAccp z01xh1MUBQM&hMIswlQb;bQr^h+iBMCospDDR*=d=eDa%diBd7L@le-=Y%mW3<3-Xp z3c3H*4Da;{+N#(Cmv&|q#74_m71)vs?Wc46SmMuZmv!?3)k)~zsfa#spDQ2SUY<7W}tbEwf!m~fArq0`p1@X~xJxLAW)CNyVdTYiiR-1@l5HL(YX zCq|);yJwvIZCRWmr(l=*2jFZ+&J3je6Dn`prEGQNw+uzH+^h1Owg^SL{eW(3>)M7% zGPWppcGP;ffJaD?3M~bFhruaSz#Uo%t{+{z2d6pRLmR~EL*>d#ZK~3EMm{`5_8O%6 zok^aXq%a$zX_gwi5FGy&lxOS!S+#bQmxH|!>WBX`+Wj;mAcP{OhFVuOYJc`1n3YZA zn`a`Ja`vpYRqk-qcHH9ZJh@5BbslcT0&WjBzM$G>EB`NY`(z*0db?@U<{XJNF*c;fKv#31b6~xO?;@YeXS`2I$8lR(LZ!#85XP1@ zV(3gM`=-Hb7gQUs@F29Q);P2-P=;$@k^OA<`HAc-H?wQC`&lE9xDr8MdR%*QV zpyHNWP9V%E?>`(ZM4I{fV`^D#>h4kE8478l+7_sH$9?kMdBya;g39VYlqV*pZN$Fe z?T$fKU51NwY`hX=zBv+yI!99bK#40y`X>0j6K-V(~xCN$|^$mwDGVWu@aXwwyzu zkR?My5TSGBV}8QZPtmBTIeTKX#D6cVRGuB-nf5I~x+27T4A-C5mY`BpPF@lLH`h{X zO)GUgnr%qs%Y8*kC>AVJ7bQ5J45i@CMayO9PK!kblCdCZ7pVqXl?TY7*bUL-nqY-8 zn!a(hN6P35S>DdtSQ5~pjuW2jFlr4Z0X8;jl?`_PO^OqdvX<&yHCh``Pjz0U3aC9B zv;PK3GL1afsu}Ii{;OG!640%j;u23c?XAoJqc$z`(F1ZG$hBfqNY-Uoh&i@>8clGVf7?A!UM zbDk=}>nd<-qbFgooSNl=WH8pF(enosoaSM+o+RT_K(+$~+QJ(Qp?-ui;%h_Q=uDS( zff+IS{tTcAXcsrqr4Zy?cm7}0h3SICiW?qy{$j*b3$>40H zDp{MM$kxmb*Q)G+Z^9Te3+NMIrHHCVRpoDgEauedHhcfBnqde9(=yw=^Pp+V$=)8d zP1R-cmcX?9P_C*tkc$aU-Q{O}Af4tNnWAczyrn_kJ}h_OsoRha4o>m{Qs2ZXc>pf3 zjd3V*2Kdg;egT{1N6xJ>wnf#|GAL1WQ5Fn1YBtu$=8*+?rL*E}&;g(FNO*;LhYgY_ zZK5oil{~TBclO96ZMM>QBE?hjakF%px^W-H{#=9Q)w0pzfY!$BXqXV0=_lmvXZBBu zGhBoc5TY~ICkMeK+rkan?x(0Dy7=&7IUtPVSV8^m>Xh2DH9fFl4RDoLM_FCvhQP|~ zs^G>C6YH9|PDZEg7tdbosuT~UD>|C%MrBhglcX7wxVK<}i4sSn=3C2@Oc)z*#8T>( zViqL!>4Dd?VFL3VWVh|q+X)cQp`jt!5b?GzWybl}(*`}UI61q*Q~SD*7YLng80vD) zOF#*-R`Ttg1X=!FO~t9z058tw?RtvP zl880toq{#pFz>fK<+9pO{178Q`!`VE@r|u17O7D``F@iv&?!El49VO0gZfYWx|K$oFKo)jP7U4G(ZvHyY{=dGCu&H;vv<@cq%zApc=^31p<3kkEk9dx z8%fwE9IDZ9DOn5D)xrLhcROs1&+`M}F$Na8^+n@U}UQ+lYu}PUh`M)#f`=W$oyoEr+;W z?zXEbe0Yj89l2lI+|NGl=yO`-6I%@A_#oXkH^zWUMT9iPORNqt*cW$VP}*;~)MhmF z`z8qVfyfqKs#iXK+r@O8EEKBkT>uDbQvZrHN`|JpC3v?tRCmf-5=KqYER~fd0;vX6 zfc~x%-J1N@N|*B$#INiYZQc0#y7vSpzKZ+L2 zzWm3A(`7SAB-}OLtXdD*j;Rd`0tEcsfaR#41ZNg~9@PhFY$~<>bASED;-wiC z{uR2L{>p}ou0f7e%KKs>qqM*!jC3jrbru;tn%$kF?<~`ib$m{2s*s=o`b~9TcQAIw zf@-G;O`J32!otFVQ|ZLh)yKQu+P5rjHRMuTRpEPdJn<9tPLIo{omaC8UmCIK%b+K_ z@?t#L`d2yM+02w z#d^M>Uu5p8?j8&qSLV!j7^FLf`V~@6mxBCe-UM|}xXlToO+@X_QZAeK&gb+1{4yKM zc5WQ>vE#;@t_uV@-1`@jTUL75)lU7-HHWP@DwRSV^Ryq_yBL{U5HHMwmB9nIeH(_Z zFiV{Bo->#6PQ(}~XhPULV_9DOQnM30*-{uLQhml55y=WJ1mr8wqd}s{P4otnqvL2G za3ZI)mKkH^gq$it>Gr#->!p_G&85*^z>WRV7(V|}O2(^SX05VbA#JrdeRjFZCGeH2 z|2Cwgcu8H?uV0@r0dtwWc+Rc=_Vbl}_U|@rTtA?IH||BTPxan~uYgEV4pHA1?1;W^ zx~Y+<_X~Zx>d*l~F@g z5N<Ni5#*<5_wnz2$Dn5xSZKSgsHj-vv{Q^`Ra1b%{ z+!Z$oh|BH6JpXRU$=<@}R_+9zADQglxnoCMb3t+M$o|_eR93w;tlNHpv*R0mk}3Bf zJFWdS{}yupHic|pY|P1k&>FYRRm~CAEQo}?b#~jgCX5Uwj@jW9PIk&rB%K!mef-QBh8~S0!b!2|$UJrABw0Ug ztR}aTn{--F9AR85aZvRB)3An`d`LEHrzK7;<4&%%lWn#fL?BGiANG1hl&t)6&peNt zJOtaZW5?u8a`-^d&ZrZYU)o#>+8-IT<21exm&Q(W87svIcUg4-tmQ9DrH=eCJY0xr zE9|@=h-ojRY(Qsoy>aVE9Ei6?yA-Fv%`HqSw?$B9k`m?g$DD6wYT+`Si~JcC{X#St){o~l(?_n1jX=C*>&=o%_1 zceg0CojUDN)(|^(wp|(Yf5Sk*5@=+H@e{0zhT27Ndif{Gwz>89!(L|?>eC%QL}PFP zR<>ed>L0VY2dv#r^ty(*fSAF`*d|ThJlJ=PQ{|KY3J%SJtA=O&w?LLdIc9melVQZ{QFYOOn?)2j4 zZEt>BJ{kUbK_>COpC)VnjO(ZtDstPV}iQj&?ppcC{cY&;eO;o=$H>@tM*7d z20ktTtB@}dENOtq%g3P?hlz0AO<_kg)Vz7X&7qV^wL(+o*^$ftH}}(ZRQKF_U(>#N ztLpXHpB0`z`9Sn$S^9i^5nvLJFwO;sS7+EYGIht=+m)cNKH zTDeJl$iw^1tnC*!_HRLdmgKecAUf3ekAetG=yAV!BP9h7*;zn=T198g8^2?#|CV$% z!gVsSsZMNB;?08!D=%yM>)k;ol1x9`N?!kysZsNSt|f782c_%>am+W8ImM>c+tJ_s zJ$_>~c@=b1u)h&k5%QPc`;u1n7VWw5VS{<+w%Iy*4sh8ZKdPJlq5uKcjoeqx`tE(4 zm*MU1SMAR6YtuLC=E)z;8;^gn+Z1ik%CYJ2P8g=P!r6(o^vU}94?HKGyYx%#MARP^ zT}b=!?y%UvR&4soqjS5|c^Wvwmx}({8O|ko4z{ns?pF4%xsim^!PW~g-vWWE;ppEJQ*&@*0DGx3Byl4ZhfA^(Z9+HL(i6dm%= zLX2>0W9m2k`W+HzJH9qA)1MQIo7^W(K61NZ3CLA(s0{KO{b=|1X0VJ0Th6C`d1-F; zVj)yKu+&(D_fpau7$M@Hl|=~AFUi-TOiK88@MBkUHITj>WD{*4jhw}gub}IC)2&lD zOCpniRyT;P&-Z=)zs;o^r69KM3eR8pX^5(~7Ul1Iab}s&bUw$cpsl-jr{y<-dy(B8 znZdWj;kTtHU+z& zt9IhAY?a!DS19dMQY4UnoT|TZsK>749G#@e7zJ#s&2ZWV%O(rJ8W;-&dE7!Di}Bhq z!qySVIMbbvG(`y7$#3nK>hB#ym=C<58V34nr2;0!wEb5U)xJd2-4f`sVtm3=H!u$! zY%?vGWPlqebsZF?6RASGv~X(C5iaHv}G!!IB12z@C|F1t6V)WA35afOVlMj$!~Zlmy|^dy0ARm z8@TkW}C*$rmDzXz;HJa=#`Vp*WNyCJWQ)j*K)Q&#aCtQ#u- zj76~vhk27+^0RK~IprAJ`|!Na@DISftGV#pjwfz0|A4u#gqrZ}v=XkG5pJ%R9sn$( z=B=oj9 zgVZzV-}!0Sb;+h&2J+23jq5wL9fJGpxxaFdOIJf2QJWYda+ZI}ZC%e+@%BZ9g{Ju{ zBy{uFfGz((R;iC&9j)BC<*ogtFTpdQ`}#y=bhj*P{__4(5K%Gm_FUukI7NHO-6%lX z{mISXti?a@9`)H0D5r3#RU?bB8gd{w)U4Ox{lIbN4BM?=4|x&;5qUqn24OB)qNF=@ zvd4A-O_}IuKApHxyY-Kr2Os^%SQaaB?A~Km){!4>Zt_OZ~Us^tWF8cB7+q zM#;8YdSlS>RKZ#wDCzKiKh3+iXjUcf^?C%kTh)F*eHqX*e}%~k8k9~|o9aO?uhPJw%Okv&l?LE z|HRUO(L<~nx`|+|6HnkR-qsZTY5e!o0Wx8c*r{L_#qzk5G%b3_{I+$GPLZvRzl`xS zfODd>ciyQF#EwU~%sKxT1NhDMyb_pdDoiMx&W#jtT!>_RA~&~U&l<>~J=1547ef%F zo#MQ&uFTX0eZ*9azwlGTIOjiNj{cJ!JBBtiL+-np{p}XOiU&ex)c8Q?EH4GcdZD+- zThG^rYUbRQ>rX!Jw>^}k%U;iczlbaaiIYF+Xi!S9fPuo%z-a51vtctX!2MdUINoPZ zyNqA_!arO4hq7 z#|yn*i0xKoDX8U%U7f;B))!U)qc1I^0=lExkZ1RpFVv-7_P`gyCz>wRz);0~wTub# zj^U3uKP9}(d#rO_y?!<^^pa+DuK8j~val?tKnU%IvZ}&FYTp|6!|Abqvfa*@Ip_FA zJ?#DBf9wUjeb?)qvLEMHHC^oktYIlQ`DtsIi|g^;TnrcQE>qoj@dTVPhc(Z7#h*#) z-)5w>kFe)|1hgOUH~VLl11CKV^L9j?4BQfV#$1lgHANrob0+!Jk38*+)ypEqH+O5Q z)@kNuUp$cv7a;q2tD`lbJetA||Uzj&fGXf>-UK*V25 z)FMENRJWIiRI0hX8%eh*HsEh$)&J?}L0gj6CM9f>@HL{#0yMMEKP9!O;P{KL*=~2I z(`tzBBHGdJF^Fu|3=s2Vi0q*Y06>E1dbrr`^}$!6{waoSU1yy%Sp70_7aOTTqw=k* z-?#0@_(`_znzveI_ZV83rFduq{vIv-^_~L$yy~Rf3$ZroPDkrKr~Ssn&JWwg<-y+Q zEw*6+WuRkO0zZAbnR3JhW3IMlzGJ-}V^+}33M<1Z&(Y^#jF|6n`j4NpeD+&L!mK&| zWIW_ou>^o`ra1QLJzgF=YP=Bs#urO+yiqf9_tl@xUg1q0;dyK!UTvul>-6TVKbUu> z`=fBjy2!V?KZuUfEg4XF)AfTI@2d-age@^&h0OnkZm+rx02}{`l7B-|ToP8Y7c ztgJ11VJu`6{qA3}V2D@o0RC;v4BEfs&9Zdweg3}9fG+4L~;neHBLlp?T4cYpE z>exe!_}5d1d_6n#O5`F&<;CK#s&{DY`K6lG`)89oblen(Yk=NcGj$Zrr;}H6|MS<<~^02mz z*jmPjBfWxMP!C`%e*z3j9xB7-IvX&kXe@$@=h)KOK3t9$s?GE$)bQvq8!Wm`KhI06 zmqrgZh&9Sj>ifP=C44-=CI&290?P9XS@zM|7 zzOtK?80IS?j@@6e!I!^m0)WMq2Cu^;0O=X(!%;V4sghic>pIA(Kb&s?aK0Mao07yD z6td5IHAJJ)Pm@vK3!cdj#*bxJz) z$**YaL#G}T02oy4g8RuTsJ|j&QJ{?-L@B;U29WiLn!(f{5H)Pg`vHT00vrYa^3ety zegy%5L(SzrT;l0mb@f(JlK?XgB(tGp&~6(3qbJR~kzM%OlBKMwEs?DZ{$x>D^)APW z2uV*ZZz)6hDoa!c^)b>)XHx$T&7x5q+j}42&JlSXhv!$;OG&03zY`M;mMMU^E`Q2d0q89&wVM|8NRwkM5 zhT#6r@R}P{x_3%JNMS$lzri(SR8&Cndz~_B@*@oRS8sp&5y0E`M(sfBnK2fN^B23* z{!GZpr5c-x94dGqIa<%f?V-t?5+9<ClJV^aKM9Gduk12 zJ@gF1^iL$KXkO#EhJo?^!=8UX@@p_QVAIBIP>`u_Ja?QuzPX_44-i!-X3<0S`e1y# z$cFKd{f()v0r=wBrkXW;BwDR2x8&hS7%B4hf5UE%T=g5jhC}a~HC158iBo`$Y;CXg z^r57qU;JYv&ocY^!+^|!+}*SV?Qd*!#csf~2Kr0)d>DwQvU+P!7R^k?)pYi+LB1!< zM#b*lsF)b|M&J69!m~5uT+QK?e~5Cbb1DEE%oagj(4ep%m6Eg8=i1It`*5Fu>*5uy zSJTm=M*)`qD$$d_;@Ve{-5GQGsqbb3>!l9~wvRf~vSRSK!FEu7UHGk_AakWJI`($KUZqnpFr=xJan;jpK^nhc0qzj!|hHo zw=$PW@ABny?-G#ALZq*dnxuPmt;8CIeE)`PP=2nvH!AFd-;5fuF_fEA*)+BU_R5t6 zmxg~G1Mb`1m-3c*SKQ6-ECaC)kgjz35m+ab%L$Y_qi#zG-2uNlOFGbUlSyx%@$Bu( zE?l40Z?%?(2+nOobXRt(v-#|izi+tuzgVrXKjhqQvI)o)j4z}$F#TMV2Z8u%-hWmI zMWMku_60+(lbDe89Lr;Kuz$kXCPyGHo=k-SJO_8x(V>Eo5LU*1Jqg5&$hY~OtRR_D zT%8o8Geasz(J=cV@y@H5{_%Q;1t9nc8Zu)2Lc0`R$sFa<7X{6sAhQLD*4liM_H@wfbu9BWqMEmG%KM`jc zW?6gsBXxz}YMVDWVu1LT8yfC=eRrr2f`}8yJtU`3A~_dwC+Rk~@TVEHQDVRP0-M|$ z53PgZb|1H&&&IAVtUF1s1LXjrn|~3Xeo<-MEKE7NK5d*bf#d_e{a~QWpp6|p4Ushz zddyxm%VUjfYz5A&&+0R7l{!NAwH}4iK zH)&&U{ubWuLz9gSXLb8iUcZO9GeTYi`nyJ7g=2WP_-eMjNbWA7pG4$Y7t+%w*|pBk zl6tb-VceKu2(hGXv|)bo9mJ5!PsKy-@I-fw&9btWGSid9Ca{EZ*kO6S4SX-EErJe0 zfR+4xSXPNl-bC|t982XoOp<24&z+cxPHw+~>52zYymiE3Su0G2;$|kd8~VP6Q%gN{ z8~JufabLur4E9Mt%dt)FLi7BgY>LZvb|}VQK+CV3Y|t!y_UI*S^6Y-AIvoLsn+~e< z6JyyX|IIb*w3WS1$$4K%h$d8g-nwlll=?6m(_`mbiiVwv9g%l7RFD~L4Ut|EJy~K@ zJ~I-G1pCGUy6kaZue^r4>cNX|ag}s1zA7-kiX$pMZ;`icG5C(ifJnX{hC{yBDkzK_^h%GCh$nUy~G9LV*Rr_>!0x6 zojH8u&%35j+}-47iu>g_YDeGS*C(9PxTX!@Taktq0Jqm;=0*s2bmdEhPg*H9N2IOI zdMw4jf4^XW?Hl>Yd%|>t@jdP$x_vb5no$LUix!j+ZSn5x&N;qQyyq9B=Zuz@V%j|C zc};u9la2SS&is`fc%V-V0tt^UFYQUS;z}F3yxGP+zd?ylWX zBz27F^OZlq*DPuHJ#r%F8ADl=23s4s%L0V)u~e#)r8-n1KB=YKzABg8Hpl5Xz{$Z^<9YJ^owUP8%`o?lks-QudfFg{SGR}n4}R38 z*|Bk(_Jk6!b4@y?kb!Tw?}4(;Q6X{A1~-WN9nly(KZ{{X%3?A8^;4A$CnrJ3KS)ZD zwC01t!NMWo6$sDj#d15XS^kC;emzuMps_upinAJb5Lq^q{l9(AiPIQQ21!^uRHvZf zd?KQqIi>_TJ6+fmw$K}1AZ6*|m1nBD+_Kjzv5wBP51sxsMZQ>;``E*HGeI?}tWIm5 zz8P;#syx*iNpD7)lj^qMAdAv;MnIZ9xW8aD5o8xtT??mxLufjVW`=qM` z1e~9FSMPM}I~_YwnfuIre`giHk+c3z$G+3C?{w_*{ebUu>^mL%a^vv30L4U)<9G`G zosJ#vf_xWwdeW%-Lb(22Nby}rvG%1j#=BVYn`H^#`LXZ(*mr*HbCU+%>DYHV_T|Ry zcWJwcK|t?x?6VO>|6k=}!N4L7S0mP#X!|*T-{U{I^YvMk(4&yNU@T2*Sv$1VSQZJEyRcIiwg= z%f?%zI>k9d>$~`LFu>;7m~whJ2k4bP3+`eHA#EJ`agM4fRpsfS50oVYw?y;{&huv9Z*m9zkLjAasJ zQs1b_nu)JLMX-h zSvH3sfdNKOr>Z^asB`&kylXwalJTqS%MJHivQ0Eack$YiBHFO7>u5Msdarw`|0k|MXlM+3DMSg zL!G*Xjq2PAQZ{&ZYdjtF9si4W_Xlrbw~06F$z;o?UCSHCd+0$I{pv>>Uz+{V4rYJE zS4LYEfQdtnZOjT%*xrBj2+n$QjkYJR3!;yX7;6GGR~~jRAeF6Ib`$%`H|;g8f=E&F z`=@1nFAY0*eD`FD*dLeXY9S(wxIoBe+*)Bm=w}b+y>gGerk7xYJ*CrVId@XpvN6s6 zY6m?zQW0~)%mwmSn}2tIl$`mRohHo75zk`8A*be$V_yr`GiB_iMAU%RG@!HgDlJB1wiLCzvjqQkdZPT)2PyBn)gjZxIh}| zt({2_3v?RjM~0#H8vp!i@q$0D{8?bS7~aKPx3K*R%$h{NN|^4WM_XJSvf|8D!gYBi zqBI5OfKxO>uVUNN*`NKKCvP`cM{|b(xmKreXWa>37knXhUw1>t=gqIYXn%ip+K0wh ze!hRSh0THG#1*W0kitt~QB*UOgZP~04)#p<;aH1fxz7gBGBUM%7PmS(#+o}{qprN5 zFK5MViQG&^N9@J2ZCpp}U&;5W4Q6%klaY%*>iV7^=*2FOEgf5cg@i+@8k#kE&7JI- ze%kZxn0!8+!MbaMbr(AFD>V%ncWD(QM8vO@wBLD}v#*i`>8-m$^z6hxKgynWg4C@G z`PK(FWWio+f(jW6c<6(L&-lJ-&#PwKOGkpEt6V+1N;-Y6eA`Dmfn?~Le_$&(25gk zkgT?mQ6+9`!8to5o~OTmHUJa*M4T`0-Ijw%-jEf9IqTU?;U687|7D`Sl6Lo!V;VN6 zNYR%s6m)-6FJ2=r-413SM{Op- zek94Pup=#K$VDVO;WlgxbKrbzaCav45a!u{*C#eeuYt6;j%ZR8c2x*vm+$H%a8MVG zqHTA*QWK?nxI;M3Ir9!vV@B3UXuU^VbBSMxY8@8CvluKAhqeK{i#sXNFY1B8)`{#%kcrt?79EJ8ri^){j&?qaA?dg?DlnR-zDr5BDl@{*T^|S-d*aUYmdpMw*^d- zURP0HJIzF~rzTr%mj~^;P1dvNZ5M?X|NfJUQfeA`ZFqQV{C}Hg=pP#koNj;WxsDPp`Q1&T5*2<8# ze$gEbfBb0kkz);t_nl-kSAGJKq=#({T6f6^at*Lz=fBzLip9G1wx2o%`d>6^Hp85-> zT0$jg?y^Vu8YM;Zx0whdK+1UlgbAvj{*o(^MBa8g}B0@uZ3f%R{j!2iLq2)afQft zL#e#EDimGR=2>s;pMmkV=ZV2Gv`$cC23n?Zn! z<&+tPz{5utF50HmkHcWpyQ)(vjf~7!=FWW&Eyc26dGRWD1^jqS2)YAoP#0g1!2%NzCBIWgea-aKo+gZ&sX z-sp>G8~j7Rk-vUW$fO=E7piO_J)5n*qqgCd?a)RZg;5X4YI`9YE!f+5^-*o6pd59V z)ewv6!Bw$au{akA3O^Q&zW^e#oliWX*Rz3l7 zLIjWc%S2)+sy$P%zK~i;R&DRXgcz`;op5O_EVo&{4aBOS8rZ2GQ%@y+>e9-qq-l$C zLQl6~l~%~Is;<<)QgXB{z`B-#y#>WSl;MbXv7LsAQXAk3gF z**?U(sQi{kKn}OCwr@|q?S>`_>t)7pnCOQjKSaMhvmVNxjpR@oB-3vUj@FwyqhuWQ z;F+TN(`I>wN_q@Yw~(CU_UF)rYFfo_{q|%;5~Q)LD<)Uf%_=n9`?b9|Er+!(Yv@Ak zmStJZ?3s?(!Kd~??S-E8{UL5RwG2IH1oE(0x4kk^s=%#7dE6uHF+k`)B9Z4Rmm_50tPvNOUj;L+4YON%6x+5B=QnvEh9Qtaw! zEIsxmqxSIL!s6zJ=l5yavtEh$8Ebx3Ku{+flBuLnw0fo{g|cx>a7B%{*Nwu+}}*Jp6$-YE(hk!s;h?+F2@e0bej-+ zy#f6jrTsxpVtoDix4W|}oNjr8VaR7OEOpk2~SpI?Kk0`m+Qu zwd1=8e$dZx6Z?TC-WB_zJo=cXp_*VG%VF`?SvH$G@=4KEb^l>Yx7&O*sQPp`)^d!3 z;CM$Px@Drim2{rd;_Yumb{@OrCP!n28_tt@e-N|CxJ)n)Nf2<_?#`FO`eqJ$=7`oK zrfU5b%5i-q+%p63M^-OT6+yWH%E~!PaS$CymJ_7n<;-IF=^U0{G+qCiEfhV_wL}zV zgt^j))g-@_TuF5?my#TV31&KY~vBSj9G`w6IFI&;2-r*(k6M?~Y`M zG6q|*I_-l5Bl+QeJ6=o9E{rSs4gg7b;v>G^ME1`gg|GfF?#$9JiWa#n!}yQ`*2YsZ z-Ihdm4@Pwf^EHS%U7;92$(&_K10h3&f)FNUR!amc=;zJj?|Op%9nKXWY#vTHMIWNg z>7l~ljzR1C2zv6{FxUe(tF>E#0_=?8GF`#?$PVuQ5mjzDOCU1bO;?|;0G)aAI%g?!WM;6Zmzgf7E%0233HM3}%bO6pMOl z_lIhEp=5k_e`(~jL54;iz2Fk@TI?>r&~|w~CB{<3h9Yi@rQuQOa(f|@l*4I9rujMF z1(aQ019EdPv;XctH8B9`rRfdY@&CU;wefCk6c-H7gJiFEb5HuZ;S*8vlOTI-X-|0{ zLZ8Mg*sqSK*U=Jr;`bF+iQ;r+TR8OQ5`ZMZrksn#SF#qjVqJOY#t0)ug##zi?!1~$ zj<$B`4uK_;#DhUY6a&nNL;6NQM|zft@s;w+H*R`Q4E%a&c81pGoM z+2hhf=#z+Mt;6;?{k|))N(I@S&G!eKJKTb(RJ3lADCwli1{tj_x+hO| zMkCK=Wuz3#YXMw*G6j0TS?^PGf8ywj3z}~Zva2EAWvXTb>45-6 z;r$I0wD5c$iC)wAp~5Ar6c_2>!f7&_!xP)q*M=J5d_=yme3F?61_MEaG1fYm1m#{B zXsjSah)~|i&%yV z{D>!yk3===L>u194k7eMZdW7{6h$!%Rerf5c1LhF!5b;=R}>&u3q#R?=i4e!c}+aC zF%<;EZWD|RVprxyH+`R6EY{WH>O5n%XDJi$3XIya_cI?IHnzEFEjUd?DJIKWEpr2~ zZ~&$XIAs^{mgt@k$1w&+j_PJXF!fN31KSV>H^Q(4ey^pkE!D{+ba+&O6v7GeGoz~ z#tP%!j0pe5T*{V@2;>IS()$rdHck>j3gjraV;rdXydUc*!;JcVAzO+A&tr#;tIK9X zrNB7_UfOLY1cwurD3xeY1aZ*JF%}L7*xs97b1pB5;%ChJt4zLuEQc2XsIqla1cHIqk$U@>tOEp>b%OH2jzA^3&!j2Yy zCEZo&r8$ir3vnLf)m4!~5-7MIn3QZLO>J+HT`BNY*4I}@etxNcsXAKH0-V7z-cCf* z3qtpKsW%dV7o^E*+B?#%h!m$EI9t+XwV0?{p_qQ9h(WJIkg&Z(+^+oSxS^7+l@>y# zRt0qNu~MldLDtm}R@9Ai&T`_X#tk;Y!pB%Lj1*rFFkN;eo?5@IJ8s8M4^~V}7kqxU z4r?Q3z9xT#SP4&idT`I@1bur7`Xp#jXg+@Ks9KB*)yI z{^%$_rogU=JRN&RY_~ynHGya7n_JKm&xhT48_v)e@Xi=qdRKvsQFbNrxsEfpeveUN-sJcei9g*yhLhjQv76 zH?NG3!7g>Z%3b+ar@-=!)PE?33a+75rq!9&Q|B5@WY3pvuxRLCS>$z`!p zoP!s_B~op-$^;OE`E-nW?Q6DWSwxwjuSmh7r_sbY!cn`jxMLw+=wc)Q!$b}(@g-y+ z2X=?T!H{`97QL9eL8-{4T4sp@N=X39%}QH#*R@AS$DXoaXUV4Vk)h8`9&;#5OXXH2 zCnL%~@Ym2!--|m>i0k&&{S#A)AntL+hmg^%Gzk3W%7ZamBm!zB>vN4m_v}XwCPfNW zAum7_1|obJ!fFMsS`P8jVMyNG>3lvr*a(D7Ee=O1Mq#D@WVR!Dz!bD6zw;i z>>r0~dIf6>Mg#rv7qmO7W$qpQ}~2W6h_1G{ySE z{ANWSm!+e^L)o0fXw^PqpG?=jf19Lpq@9>S<}w*F2e@q_h2Ok5fdJonS9n_40R=`T zo2BRr^T2Z&xX4H)eEoFR)CA7x^slY3H`^X6>R-#He>YZnrdur}LlZB7+?wNcyRp@4 zDi{=@*TLt<-7!<-1U|~lQ|F1qGLBf;5)*PQin1qE(TI&)hNWJMRdPnFsr*4jD}1$R zC||kaU(*21>;mCWu+`MSFfnI$9}3TDTwjh^8r_{KD#~rPzq!&{p3g92rBc#KOD%0R z(%r&1I$EE}>}r%n3+%z@QBDzXRm<2I1NN>(0|;-GcMo5%L_0OUh2XePkdGB$m6w-? zLW?>z^gXUhI@g(b1?9M+xw=)hsyk$mL9x;TqI`&$aLFcDF22H8XCY}~2bD2*1M`b( zuI4l96MuuE_4N7tiYV$uJv4J0C#vw{*n_?SB44Z!#iu}U0EWn^2F@JAGQQSqslV5k zE5iO)h7A-6mKEu+Yuej3IZFE!nG^$duBdNe7nfrp;8!Y|YM28IKA#1kJlncdl_aOw zV`yTCYvd;u0BlK}$BC@X{?{j14ByhPFVAu@-vHzFi)r)?_M$DAHIap$d*GIl;r`?>{BUD=HwASz zL*EnG4tcOJO&vzQ9B2(}M&=j}gkp0x-oIF?Po>D0NOHIcO3N1d$(UW6@FBRYOIZ;n zjRJKJokS~4=8_n`mpP@`3TzJsO&q$|63VR0$W>Ibln?VGP60T&XFmF*im+EX(Y&*G zvLMCK<6~8IgfuReEFyoXf*O8*7rtU^6$vAC=z}N%xblTmHHvy2D~x4QD;vsXayw|) zWkeQ*E3TyRdsxi0Xf>X?&w+UaD_DacOugPm(YFuJ0NaeGGZ^$ShogK+yL_bO+NOUi z%I=PaZDm%jE-=;mP+$SW7qyHI@+g;xJ(;4evA_q?k6HhWkP_YN4MXvR**Ly$oD`cx zfz+9a!U*BQSo{!L8Y!_8mqD@hj{eAC zk1K+WWn8)NnWDN~K6^SnJpo2My@kYO?>rZqjntR#m~)U~VO>vRUXP)BjwrVv^suD; zpSWmBHk=z{IE^`VO9*b+Gz0-AoZ=#E{8xIY!Oly-rt(#1p{O{V6ki6?LI_T9SsTC}k>C z%=dHmPYwR;3R3IiZatSrC5S0_1&bhE&D8iryzE@6j&09beZ8P^wPi4I&|(!#+yc!} zjmwMu*l~FQu`h^sV&*XpsWDhAv$&`7t=`q? z56c_nk}@#CQGT_4NU46#Sj>(Z48;NgP%sepfa#~3i+umsE6^P<;g4+`tV_`7=ti4t z?QrzxEFHz7T=&plBFUk6HK(gr*^)blmAk$gS={PSFr-8LtBQm>`)%)F?OK_YHnAcT z9i@+|wk1JSFjOm>%)MnLmkTbE5F#<#c9lCwvl5xdwYu*Yka0^D{u@^&;wmD<_5w%=pa*#k+g!NWr9KoW;jLF z!_UG|#X@Id$#t$9I1>hpbv}^)3Q)*#3bHyHShuz&m0fJsc(b#`N4+`L5&@?YhjviN z@alNV@9@CU0Qu}mM^Xm%miNH-*H;r9hg84bgRcmWB1bO|7576Zbt%n~B;Su!zEG@G zFNRNX!aB+AxI!#rx1f+w)5@)bs%(wX+)^txo8&a$O}%!i7!x&^ajDUYREM&!FO@YX ze%nWZ*l+ccpXLf3jKAr zL$5UR?N15WW#2pQOexG^Li`SAk?iz-KXhW2Fi6d}Wpnap^8xoMD987b}ppcKhO3 z0?AP%>eeE;1l5X%HCaU_Qw&-odh=7F%Z7Wa2j-!(2zz`Ulaz#=NMCgx)mmXjjra%{ zT{at|mfvW31t5>9J_xbNPbwElQF-_s^yX0=$|gRWN?GD{&NEjxAw&W|(TAchW9$Zk zUwM!nfcGH;c|O(LKd-yI8d>D6`ny<`mIh7(=cfk3<&lP%xl~7fvS=Vy1{nZ(Aq0Lk z={7c{IC?}?BT&$qTj93h;43N3ox3KCw7bu#3re)~nhU2whf1$sr1f~->{Yx(=oX=T zf1&?U?bCp?E_Pt4NGQ3Kg>+Ikgu!DoqcMd5j>q)Br_)~yFDizs3Grrxd)@1Ava=`< z>@UA6tVrtb97dxnKaNbu;FCTYGQU@~o_Jl<@drt^!R3(Jo=iETMVze4`(R=Ew79lA z#NFEHJogUl2$4-vNIsNDbXBIZ@0w9pbX*}W-+JZ9#io^mSJEpGUy#i)7Y4T6wu#$kxlDxLKSa-e$tvf$baXM7Cm$4*6EH9l9D;BD&CsZXQgW zg|*DD$87;s1)qk zB*E_>C1&W5*zLGe^v;v&oshghAQh)&FOpTztfiGo0 zq!=WG6b>EVML9j|FUvw$&nf>)QA zX9mCoFT$XN5ql6}%cbUvh7Zppqf;|qDqaJE??|1Q{q^rkVuw==Jw8LJ@}q2pvBbdv za_ExpITdbl4B0BNjF+#9pwgoeJ-D(oD^{8e)9Vxq;JLsY@eKRJ{c^OpzPSS=9Q?z3 zY9pwFrkY_AY_&k_W>lqcO1ENCkv_anw1${*_YZHNiqg~(UC}s{vifExBT^SsAtbTO z6op@qV8sN}xe(En+WMCY1j4pTSaqoxa$jn!I@QqoEQI86{r+`VrBIevUeC2VOuv$y z!*d)%9ijiEtc~zVk;wyzMnMuIM(&O@yOUN_Gb5~XVUiU_HW1Pk$;T~8Ay{HrF}?X7 zYQ`?izIqzh7dD)2A8~9cgzu_#DRh^3|Cim_*Pse_ADH-VokhJkR-*=?#1u@T<~3zBd1-;8y@MC2Ny`$Ifdm zmuNxM=%_#0ZejFBaqewCk2;3b^um=Z8U@?C4W?LZEvDd*Vnm=w*_fv_wi?$}zJX## zTdVlx762&%%u_d1MG+BPFcW_67f85%^9dNX+%guU;v~F~9wF z^_jOh6@Ap3VgT_dQCvDt8ZDM8W0qi(mLm7gAcvw~TB%~*f!RG9Gov>bJld7jJ(ln` zEGzEPB1Fa&Jj~weWE=jA4?6%XuxIiD8+$@2G0n^76gs9#b(up@tq6b9OG8(HY}RP& z%}zwpLek>Z0CyJngVsmsKg^8p$ZVZdGiFlw1N7#XqT5Zbjc>!Wi;Ck4ggk(dU8!-a zg0cxc_SD$unU#~xt?|6+YC>G#@k5$z@V_L<=~}q=z~h5&DCBt2IJ`38!LUOb>}FpQ zq^@-Fclh@cuK`HE=iHztP*!jH*^W;e3V=ExFg!v-DA4vP*P@-E_Hnw>E=N97Z; z-@YFp{Y`n611`X~cX=ZV+c`OXX}$G2kZSrUk-mTR>i7{7t}%P4#}`yAfNrMI)=X5$ zjLGi8vx?g{Qf0-Cfn|1Spd5Dts>I8wH@E;%XNZV6_pm#MHunqoujL&84d}V-=@ZOx zt#t~o(&E%})eR6;!$}_%0B6(8uARWHm^41T0Td}rIHREnp*Jrs{(^XZOz#g+zwc^| z?+)dcv{i=7igE5l+XGX14o^DbT2qwEZ~(YNTk|0xyw6ffLe+OeH2^BLSEdR2~ zt_JiY<%D*|6H2oIoE9@v)k}&|ehfeo(XT!0`nXciR)=a_40ATrpLU@(HVKHi0r^SS zxbK}CV{|kp^@p=U>Ms^FU)F&qR-8a65(p%UAjJ0rfbcgx9XhUwvVk80%aILUxm+6m zec+1~f)b0)pm0EOf4=12KcQ>&w$151UYR;1STc}nU!Xy-P|JzJ3pRCzA)^6{5?*0@|N7d>nA`m+wSAj) zzo*1-%+Aa{(}t=^9ooTWrZG&CMq6|1OyGaXd0H20!z-Mzd7n>H@9-25)no_N&lbsC z$qTl+MK(z$1N1KzrRs7WglhYL<*LF9A40>gZiFs20ysO13WdBS0;Hb_(g5b^ynFiL z2yuT-muYy)>EM-VcA`21MEKc_;2v%R9LQ(}5thQJNQuGWXd4Xga)Myuv9ccTxeyCz z{y0#|qzPl7Krj&Y<8+bWCt!JLI~m7425n5boezJe0WGo?SvD&(LdN1d8WsxsO1ija z@<=!~&aRZCLoa=hy=U39eCSy>>*(uhk3j5fRbtN4rQuf}p0Qc$h>hdmYdEU|;;svM zZn}uGZ8A=N??isog=e@BBqWHLcV-O+0TT?zl^H-WH@+3WxK$@OvDD2 zzKPMXBQA#IY7~!U)fiz+!6zYb*Pk5Dl6<@3Ug!{$YpXd0PH3PItcZr0ot zh+H-4n!>nRSJz02?b`dF4vj&6{o%Q0ILGNyn=f$Jel!meR1lBr4P{j-_R0OW-9~|4 zjG&$dMP#=f#3t3Tt=W5b*u%_3jq|EIX9V>7Na6+08Rm+rL;|v|^$@qw-eui7jzP)y zPm1m)@`AFMCoC6yGHtY3BZaG_KFV^gTFM+frO7j73I*htPnzwKS}4euq1|+?kgI+N zc#|C&!cR#>9z$?ZRRxL0mFncMoc#g%)`w!fRobqu$_>9dJgF6%$TUQfK!5l|@s+2G zvRKop*-A_dI6rzmVDut!z@rS;L5s+Ht`g}PXYS7P`-mDhP;Tn3AOW)^yUoW>5Q!v_ z4EG*y4Jx?irPSBtrY+ExY03TXfBI63EHB|Pp%rMFHHuKYLNj_-6KZebc}Cu4V1z4W zrN#}kp!vJB(2+0W%DLRwRrQHzcGslT8kapd6X;x-P;toB=2v$`Vq6Y5 zD$-BT4g+oJgHD>e)^Xb9DnFV2W@k54hMmvv0;tc~bH769Qd1%_%(Q@8F9XI^oia-; zM}5CG#nt+NyCMOA&G`z_S4*YQg<(9<{I(dTE%ov{e`Cp$y2y`y8XuLx(tRvO>37`2 zqIn3A3v0MdCYhq{(wz0`!=`>dRGyy+w(c`5203#>;ZP;e@zSyfer(Y5!I8jhAFow0 z{aOP@{S$JAN!?m(=ZX_(ZbyEzQm4Jgj{l#jOvJ99AN<7tZrOV4u8>_9iwrQ)TEyjJ z&H3oV74zmowAsd05z|shCn!&Lfmmb%7*_|PnG008boy03pZhVE8=rs5tZq)D)L4s+ zhz(&A3Dmva@xTiR7}K`plUrv-ztsG0H>)I>Qf@F*Kn}1MbSh=OUoM9 z@mTg#Q^{>WCFTILTmI1>$nEV%>7{@`5Z$c+!J1K#1cd=`=71SmHXwVvV%6M0WxR93 z4qZ7KhXooe`bX3ZeL^Mq`44Bi;tMnQdt_l9PVPhV$jC*D*a5`mhMcD&X9T$TY@^`DNYf9 z(6ZkfFFy32-SHP?)`6)+X=zc;ufFFiNN8@G8!mU_CduL)R-t2ibIM04i|nQpWywpC zqHRu!kN$wVeCAP@SqUafNQv@3n16^e!1};AtMyIKYE{lb%9bEbyyLe3-nw5%MsRQD zH`8@6exW|mWk^c*)B+`bSCwm#tPTPNcy{gk?6YI>-IYQ3dvWsxJ(=1yE4}4BxWWLl zGK_JOCm-M{FKm-T)cpj_5J3Z1hcauJigrylP%QHLa1Am_F_?{dm_GPr>M&ZlNI>hqoekvj|@ib@#1uDFkz;$ldI|!Xcp{T|DwJTqn_%U z+uHXT(@6*&i05gW(6x}f9Q81s0;{?Puwnmr8Ba6q^J~W`!PL~|9cI@-h5ItSoDV9ZX*?&Fo_`ZVdI95Tt9J_B5$60cEgQ38+ zYwE{3rhTmD&BCLB4&+=H9V~ZqxU?O169_QW-V^eJz~-IKbbEdP_7woBsqa-L0XWKO z@OncrEp*}D9r7a5{b?8d#1ncewfFbv)!PmRdXaNMpZ;PSV`{KN*0vC{0)8VOck6<| z+brFB@4wB#!Y`?Y0#x!wg)ONcP0r?;uhN}bw1s#7;QvSZXJ^(dUF?wK{0D$Sc0O8} zo3yyG$e6m1da0;Ylmm9fIDZNdeJ)jy3~QXRYW~#esORU*1=+aa%34H2p|qHDz+Shi zpoOYhfYfNReU`4`X5w1S%1)Gb4DFGwEALJb9mu{Gi|-C z$CM$ElxUj+Z$o=RUP`F^V)%JEF9oM|pMK5zOX?OPm`*&cz6;g2kELVt@YpH`;sdWd z+qYP-qjNOu?0ZlnU3-zm5-=5{K5l^Js+yMWjssJtE8P+mQW?+Bls6sK%6n;K?`hdC z3>W_!@@)tX=rVxt=w=hPZl<%O1%^(u1}=}hYHHE5@wi#)M(YFXzQ0WiEeNPHqFTFx zZjaR4!ePu+YV>OF44ey<9-0l>klvii!qGtIuiiI^i(t_XlPh%)X&XG_c(cshbYGZf zq@ggF_9wHN?|e}6D*E;-h){R4<2kuiG zHstM*?(7g4&i`P|f#D+`bg8!C8)}S>8&+jDYSPNLsZ);T-u{gkkbS+=SXgShV)SCt ziW#X18!39i zF}`(eYUmuB0#z*LYuSGS$3C!`1Ftm48Q)ZSjJ`KR`4zKo&%6!9?s{|OPt?K>zpz^Q zP?uBZG=l?xEu+1_kjGW6#)*q&elv%1#cy+E_IcMI@0wn;z!_tiCEZ?g;IW#vihw1t zn%$LcH%S(nHrDYA>Rib1f_o#g1NHEKc#N$(sGvQXIb(GEY=3~Wd5r$;cIEBYNwjqy z_{WQ`(^Gs|6||J$NXCAsJ?tTf-$|Ix1GTww#tiidpOJziP((Po=M1S*BP1vx8lgeU zdDM(my2-EyogFjp19Z)p;yp^l7s(a{r&rFPJ!<`-;-I7UQgFYu-7&3}9gXzgIl3~Z z_&|2?Bj1pQjRF}XTu{x>-fG>=d8;;2;`bBu06Za~+A_BF78!cYY7X44y9tvouJn;W6~Qf0A4R^Pj*tZdu;Nk+9{l~(U~>7f_f8*T?^tEh?FR1fq3*qtFtQWZ zy8!(FA?>isS|F?Lx5!)AsL+RdvM*(CX_b=qP*B&%P9fG-(>Gm)E_^|p$#_hTNtZ6`Q(t& zvmNLm4@GhbQv2*4qig&h>go)$Z_b->-&MZe68lZ~vKt+n#|}nE%p1*$S(<`FRu@Rh zy#*gOduB=BKNj{HBBgaK_o6_kPU-tBy6ncNftQ!8Zf6#|db6`7rMlCdc|Gxi)9H`T zNt4dcsXeK`6`_YS&mZMwPXD}Jj3&MHgBye1W3}abDh>rJR<)Le(F^$xuK4=5uc#Wy zBEcVCE1r(1j1lkG)}3{YSuHytXc?Oy`pW~>qD0uqV@ApRZ`2PuPYNNwZZ4|aT`{Bf zCfaO~u1Nd3BMGv$vD%nrR}OuO)&KdR6=lt-sw{-8zcq*F=^(8i+R|aB{lA$jzo3mI zd@Mshm^I_k1?8DjPM_J&|7qU@ImNM>PWeI;zi~zdr1XrS($;5lfTbmE6pct@;`(++HK~y3S^7kW>QvG zOfL>N1L>~_J8 zH(0dLbB^^*vc4NbKNxD6U!Xeq$Y}Zn*9?7%GtP2FbS;V=u#Z+c7GZ9lNnA87S42GJ zijp1D&H%aQ$0)xCh$8lJomTv&n0Y)1CuEUEr_wLc-2ams=5a;GS1u(13FAWf^StXH z4VF`qh06@br)06~f{Y5hv^j9k9_KT)It*XSIoobF_!debS4^8%O-qoofuLKYnp&21 z)1SRZWPxCP&zYlUdiWDPvfELuYV!4C!RgC`SOb|*=BotB$m~^PQ;X#U%rg!Ox~jM1 zgmi_nr63^t9MV}u4IAlSwD+OKj}J2;+*irV=|5lf{OA3+3H|FAAUqL`FlrOS< zrWB&-e?~tg7dvFo-``8AyF2!3x9z=^;#1^ucDpU}=T}JY>2GCY?6XUcDAe2ICtY#B zvdAaA;wEXsd5LAJxyMMg(G>{yfB(Aps-B~trVUZjev|Z1u2@9H_%Rc&w*AUo`LBp^ z9pf1>Vv3YOkO#y$@FsO5iNU`e`tRQG%yVyOc;(RNjPg&Tca7@AO7@jAr7KPU9ZNPj zJY`8RQ{~goA6^@{Nh-wAzWLw3XV~odaemRqzJi-1h9V_4arGIcjhDJ{9x$=!(`z{` zZ#y%A6#0?%==a(?rp6tVLy#FaD56A^?azV@2d<@#%og+-?cXI;nHKccYsGe`UhkfN zMP0M8SbIzB}dOz*Tf8;yj)7)pFT=*5Vs7hMb<)b`(N;pqYbWHQHU%`4ktB-!t zgLze*{sS>C>i|Dg_UTWu6?ut9^ivZWo)zpdBW&3@D?R*3ztdLdA|K1zs_8Wm5S)G^ zb0*oi!u9hwH51y~x2*6{wo?B);)=G98LOJqg~y%vj-Hlfud;Su|52&=Z!O6Uhaf`g zugCgSxl0+~XK65n{0-!->58TA6k|DItrF$sH%Whl=q~aeB@dOJMTh*oK_JVH!_uo{oKQ4)g8lVD!u^!JY{vF1V~3 zSmeaOVG~!c4GnV^l(b4WRjZV_5ZT*{m*+og=rO9pWRKe-AYn*}Ya*QD18T~I8JDQn22fjN!nCtY=qN2}Y;U!HW zsob;EQz7(zYqdbKjD3WfumF|yfUbTX@)0iF7}~&Ud!LxakxPhGvj6S;XmjPqjPRy{ zq8YwyrZoQ%6<^E#U)YM9a7d^1Op?E-=TuTQ)E+p!Iy ze;@u(-~4}hn**}9H;UpKewVF!17G-c(qTr7<^LiCUg@$j+xH42TIDVYtMaH_^W0D+7?efCe<&?4*LyeE#u%WO9T{5|?*JgmKW6f8BR z)ua7C&2Sx$Qoj4tySsWv3?P4`{d>na0&vIX<51d-hc0zm|>%fVDsGKh)+9$A5i4p2E@oym42X93B64WIS=F{dwSm zHj_F2YuvMU0p@O!jImp6`+EdJaxEJ4vM;dCH`M*Rr zI5+HwRIE?0U{imD1TFJYc4cI(XuG$h;@(ujs7uyTZOS*>nzc(7fZTY zn2wYSqw5X!+54xGd77@ctQ8>DNgFEQ?MWDN_IB${-@_X`TGYNOtLE4U+hUJTD_8J! z`t6qL1w+y0lDnqF1Y3C{BWk2%kMqEz+=$qt7R|j!!y?E3r`B+)6K!s>VR7vSX2(|P z0+sTT=TNNlw*<313QXvr6|&mDM-z|r!8e z$}7Qx?c45YUst*g+t?3I_1ADR`m2FORrQyK9dyR}2eub=ruycXuMV$}&RE>Z=jOWp$kj*?BU$N%D_ry<4%%vbG{MhKiGYUE@<~ zB2c18z^%o^*h|;eJWld$+6RP3MqE0hnVpM$8;HCBz;fJ-49JcG~rmO3z zLu-S#6>&Y`i?^RF-EqWik5hAA1P+C6d{BQfzo1F4CTdUpkJ|OjiW^L&2~i$Fr9T#P z7s-wmP?PTce2`5VI1#eFxYL8O1i1^2>yo|1(B{>%KZNQo@2|2&oao7j92Cj8Fv^85 z+j;FFJGX?}2mQ9mV>NC!GobXGOLfpT*UkMdyW6rI_xZVaQwGy~+HY{rPWivur@ij1IY3$}DfD=p4Y#ytY zI*iG6suik5&A0e0oBk;86L5!8__}&6{ZvK0#ENU3Qh##4#^zKlc*2MKZiR2H1v?x` z7#K>eyt>{)+E=))>6vKS?H}4}0$(mh}DqjnmrJXWt!M zxmA{j(z2DOrAza?tq(zy`yTgaI;*YMX8QVwIlyT)o|XQe^n?- zwwW><7ef;`JK8_y)>>mu)E@_+S5?FK7VFL#3*Vcg8it<(%& zOYM%ub4u`V8w8O|l_VZ2h9wFp@2@h`)IuK}{AssqTWH)g;7Cf0hVRtgKa6HrQ6BE9 z^YiU?i8l`qZ6r?HWdT(v2br zqzlz29%_v!;KQccM0-OZ^drv4(b5K(q4Rk7D{8mWeO3dd=UeoF(#JUGHBT-@0=ASt zQu!e({YxKGdWhb14sy-gKEibVtt96P_nwHL98Qojg>6Bh<+P39;p7JbBu2;3r4!_! zi6AjLirY8INIa+>1eonorl(PP5X>E4;KPF=D4bcA(3|RAFqzmy_xy^NO_hYkQYxun zMIpCMZ_McG!WXe!lRjeGwv2r1&rcaNz=*a4Y~|$Dlg10_TtLnd7X=-)#Ib7ykR0)l z&W?QXaP#f@>zGbuR_Fx_CW)=nmL5+l{+>CBS{#A}%t%mE0QML;vvYXW%C$bQ5T1x= zoA!uU!TAtu)R@4Tl6-D5@xvmi7un<$ztl6Mt4=o+ylPjzJ^tk*``q73Q|}}qJ716n z-dZ=5C{^01I__HSvh(80cDL09=%_l8{ku)1Y>%ipzJ9R|JssL_rYIEZr;6Xh-))b! zqYVsCUSQ|QYWOpU)q3M6yd^Ue(zu0;J`XbvP6%vnARHQxgohUJ%lcy>$uWBC;`$#J zcLS_A_0DvBl#X31q9wP^(c$w%TB0k#b=bujnn8sy_E{;dbih(%Q)_C6Vi4FOtoh8R z1o7-fBa?V=sLt>L&U!0vfEE;@PC2EYeXtX9I)bHKgOhK-$toJd0u*5+=6P09-j^3)J1481T*1! zX((AOSdCM0RWK!j@?=Xe?)RbT5p1# zNy}4_tPyi0nyh{@I>;4u-`&G<)~l?9nV?PB3J9()c{Ij*ae2`fr8em?UG+|C^xH3t zM4{dtWFYOoa*V4CjK%+OLad6Jt8|3;)xb+p#(+No2* zhyVm(XcE9w%-)1Tg-X632|%iqY9ZY0Aesf7gUC_Uu~+2&9Y;|uxmxXMz!~JLmmkGW zv}U&I#3u~G_Z=+3v7R+}lXn;BzPbr(YgGNov1dua-1-qO!eUk=VcRN=ZvJ|9S&Ms4 zPuP~i5)XVpDMfQTiijQepE>wkhhcL&z(2P2kdkCAdn;$zN zupii^w>7dia>G-DdH;3h>#rRfPO~~{`r-AfG25r`{Uo@t1lOG4S zeYgfjho}olBOEddb`;fX3w!>HI9C)!2gd`UPJF`F?1gl638am)x2OXLC224a27B=YIk>o1!;H$i_BAyE z+J?R{cx7(0TowYcsrzg`>&7vrBeXe;e5{K)Mhl7j{tF`6c*-z?voGX|CiVdk=pNyC zfd^V;*{~pl;Z#_gNC{P{0wZJv)WD}DgYd=*E-tqjnBvMMoee--!Pa@BrIckFBk@w= zXZ-Wh*35NIVix zg%y7R@XTQt7kox|lo}>Ocge9`$gI3v` z%NosQsq0Gjo;TQqqileF2pe!Hd7prOIoB-pYgACftf$Z7D=Q61C#s=q@9X{G zo8<+aR95R|`gi|77i098aJvpPM(JOxp}hWWlFJ)1(0%&91EJ@EwA)TQSA<_42YT8A z9)A}3zCa>ekqTG3wDc7TeTCusBjvdX(6yfk>W1jrO7-*6Px5@g-u1-Ns~hu9q>g9W zBvGzD3l^+RRqC#O@W$0nr$FZea-glis;rmRa1eWO6; z^r_Y??by)^^G4w3v7$vMVBw%M5tKwhE~`HRX?Op7jV-n0_O6?2CO;=X9%as&Dc3%B z2kw|?#h}YT&k3)W&cHCx-Rec{mm$ZGuIc*@_6Md@o2W%DmjJD6W3Hg~A{pqfRrj_k z9l@Ek_l|XR#mr1 zW~*5qcbkrd)mO&GP#6z$7^Pz_-ecEdu-i$nctuiZHs3{Yl3~(3SbvgIGdy0+hy)U- z@epqk?&EY1;F3XPdmtU0z3)P0o$&VL!yM(?ZxjTN+FF`4SzcV&pzi$n-EGPT4b79M z=*D!gWXoh?2QXd=+HeJ(PK!^#Ka(>e7N}=SYHf)JH8~^oWjME|pG4L;DMHdU*@}zq z3{AAzNC>@LSzTq8v(%AvNpdYr7JfrEJ!Js)Of-+HjYv_zh?0IZFj3z(YMu8w&fNIQ zam{x=slLK3;Jtp&x*N_25yLJ1@q@@uKHvtE6~@|G~Q*v zpl~{FLNK4j{tdyEO)(;md#nDqVncY~Epze{BAorX?(!7XY}slftxgb+r+VLgC-n&N z*wHkhT@Q}__F|0C7|g?g=ZWb;YWn$d_y)R}d|`ZVRk-IV`vJ4QEk(gR2(9ILc_PgM zzJX`IcTP79l$+FK2OF}o!)=PB9;ITJl?617(bC_)^YSd=GM{BF*$Wq^r%dqYas-aK zHhNkx$Z~;@ z6sPP%xG>=^lN@IV_Y2h@!>Fv4Z?pSh#)wZDwGzLT#EH!D%tymQiQ?42mVjnJ63)#k z-JMlOs4`&p(yfJ&6qPubq-s`SIHo>cl47a40+{ObH80OoZFT)-XWAfyMcF>T=;TtM zk9to|W5rMqo77C_N*khpxW!Wt@VeIWegl2}JNn93QSd8=;l+1A%TAPTdL+>>mDG)=gm+GtCs)N%G_GR2bRF8^36zSQdTIzO=j)W5X0Ey1u zu7^ZbnB|YD5G@GfDZKECd_FfGcAc-kLFXf4z38y#XNL%BSg$Swwzj()w_Va0gjE1H zIUJ@OoS$M~NUs&b=est5Ec1?ilRW?S+WjmFgJa)+RE6d-Y!ptoU%@07ohOl*dN5VbNnk~TonyH{8swK)M_5@m8 z1AH!)Y;O5R!E2}#8js>LKBIVW3JAFXpYmSklhOO2&x>Kmx6Y4*bp=AN^gr8CNhBS~ z0>iF29G3Wu$Z+|- zcBFN*#u7(9M8z+N8X@wLVOy|=1p(Jy#nM%K^d>dQ1!1-3w7vJP&(^fKW)WM807|kCk>gaeSGjhywS$b2 z`K9<~5)LCdRkZ$GH3c&~d-WlOzBY=%SMdU5_(NE_9{28HuNmEB@Oz-V9?MN2#^^cN zkUSH1{ni(^J^IRejt29|Vz7R2VAeBJ?|UDjS4;eE;}d~-)2QBIUQnbQ;CFkU_^_PYJ9!$15G1iNzV+s|A6 zmEHF6(QQfc$uRb{kN3I!AqCVnR=#h!vxD0Dk^F8Q9*^>Ll_d2SqJoRjB~>Ol$(_yh z^-n$})MQL2H97-l)U7?g4q&#*^+#{Z*Y8gAh#3e-2O^u*B`^8cn zaD12TuPnC8Y32rN&_C3=18%@ZVh{(fM@}Tkaef?L-!#`E5+ELP;Wc1mCQJScbT!ny zVH>g&STHYf%_PEZ44g!{#hV9&9S@uY)UBoSXS&M8%WKye)7&h{^j*1Q&~aOjc>gF4 zS-zwjrk?g|Zq1BHKFQ7`7ztN0_0%Sq<%FW_*d7_W^LJSWTcD((w)A9YO<O7Lj>}Rm6xY%_*f-kyBkfv9gds+&%7L_^d6>;`j~aLI37v23 zipbA1DKFTsD8sNf247d0IzS(`O3QHVPOstA1+iz%X(X@Wm%uZ8s^ON$@Mys4HPpMs zVry%59pdQNbvI{o5O3du4SebK$3emvj_Py^JW3Yl9i_ZSRhB%ex2@6BMUB#w7d%Sv z5%#E9xyDP=V(0n&Jk5(~d7R0N=6>M=OXO&96IU^wRb-VSW&#L2-hD0bm^`)Eq+4tL(e0DcMdy7KX zF-gY9;8}gmdSWyks0XSeR&n*4rw#<;s|_KC#ugzYxJ84M*XQOn4K50*!E2kw<2?61 zB|-V$!0;X2)a}TH{T3wkgKmvY-t{2tM}W0{1#Wtf)`qG?%sK5Q_?@!@oz8M90G)T> z?f5m&ZF6cI-54xC@Yl|unw&&=MYl@n>uRd}H1hMdu3#^U*r6@KqDvIcJx?&F*xOZG+jgKxL_oBoY~NPC_f&ut^?xfa4(?$PX z`ByCH-bTTRx9+Wt%3IHjBK3R>oxS`ch37yfGc7S;`+8~;p2kV$yP13N!r|z_OvZS! zSQ36yS$qnyRe5E-92bO}i59N)k_5A63Q)m-m1O6IDeY}ygK0Wj&0RiKJgIKd*s|WNZTP0 zC?q8Sl)CQRv5h^Zx1;j5bC8`ChIcR6L_#xaM-bVK4ms}roNmW*FXk3r^k6Y0&Tem? zL?V=|l}}0vY5p^t(n)p5rxo3~Bp?hfv+6pJ)nKsOa2~C2_K>E|Rt-ou7w(mnzmN3b zmIG@noHXk;ux?B#_t<-r8#RvmL1@$0PS4B-tR(fTRkwN1aH-^H*J`!ZxH#tHE*H>W zW8jS-x1)|dJ4&u@FnC`q7B@|jTad)T0rT90bj*h; zDG4hakpX9tDoV>d&0U6xISp6*V1HbP}VQO(NR}*Yt@oF^VTgM|{O| z%G<0SF%T)w!h-SN%P*kY+zF%LY#_by%%fvp8LgxnD#Vq=lF5&lF0v*5Rdu!YSDH`6 z^>oL#L0;|i$KCuP#?&w3=*I|7;yj|%@ zT-fm)0f=$ed(;=64*-%=kAUt#C)PQ!C$E_Wd0v<^-T>-ey!j5u;FG*9%!_ZWe|)z* z(u$9V8pSXztE9!NuEY|Zx>MPgr1miVIu>eD) z1Fy)4Lu?);i!$xeB$}pzJ%P|MQ0faXL|q(*64tPCU5wnIWwRzV6Tt;N|5Cp8$Hrah_rOvUFt6hG?hgzk*&HiZilBpu=6C01Gvcp1 z7dPt6Ag%EFLyqhNuw|egMWY62>~8h+BQ*V%TEL;X-mu2^00Fg}oD1YUlS~g^677rD z(X`gmM%kL+Fp6n8&G%3U;ObsRz5r@uSE%?H95@V7_CW68t2sJu8)*t!G!>FTkG~`J zyf>Q)+8-@@#I*d-D?0hxqGOkak?cn<+r>aQP#iM82tzG_RpM~@UW;;4LU2ty6&p2Z zlFe{{Yd$)^P_`bzRfqR0vrQR^Ai-%?QlBbxp`1vQF9n!c*o!|R^vD&te!|vw6IYix zLC+7$g_g5QrpciEX^R-Jj&AQi@f?yaJ)K++`m&D#1zqi(Ten=cF0mnEY1`@do|iP7 z^ehFFsy-?gAGri&xcD<*Ur!9L9R+0P+_^a$x=EW@^UNwZ;!yzSS~=sET74Kl%4aLH ziTVqgGiIA6Rh>ALXIgEK!gWCDHK@T#Q*svO-6M4Y*YUj@$A{sXqaXGMk!XKe)d(Ez z2PVJ4FkjwB5@{1$1K?5_^}<>E{vZ-s`f=tG=j!%ln4YEMFb5t`^` z`ug~{LBCD#*MN@hSXr#sUd|y9-GAsYwX_cAZxTG-6lsO|+=(Rgi9JN7u9|!sum(Q**z#V^3UNG3L4^tT1*;E+z(4VX4#n#W9Y7+-b*jVrE?fu{rfV=E zni%QT?b&UK!djYzdPsc`4bL~(Wsj`Atjn0)PrkI;LwG_I+kyPOM*5I7EWFS2urOM} zKz*=Waq#!jP1?5Qu);^t?B+*EThtOS{sIwoi!IHhCraXnL!Rb3PVxL_3UQ5TKt;Hq zn*%%@P6>|zJ?6DHznz+%>6V(dZY>bZ6vD7+AYXY#LvvtPS*^SE15=fkS7zz_up`*i zO;CW!)+ZhZ(Xo2DiYRnv)z%s{o~E5r8L2; z1r(JDYq9(H-&~<^=_<`HTT62=AvS^T9%aD%M_Zeu#Br~@B^ zhbmT%?@`t(t?)rNt86XoL)hKdU<4N*epB{dRbnJUE83h3EAb~kxP=z>x7P1wW!8pq z55aoJPmLyRnKcnyN=>)Qgq&S_9I zc`J_I4x;Q&Xn_Y8sS@QfjJVaCn#~kD+>R}3o=;QN54CtO7G=3e2^I}Ze<9nikRGVG z5nJL=_bKSPG0>ZHuVZZcSIgTYJkNh-NY8*T|6wpr+e zYk2%kz@oKu$g?EIJ*qFGrYxX#@ARGE#z-0(=XsG*)8vFUz^(Q|q$=;R#D-%NM_COR z#)uWOjx_=nAZJj?x#jX+-4#3Q5TTQYIqS@Q%$y~Sr+3#Uj#SE|?$LoWrchN;)>KRJo#~*A@d8m+ zXwJ7Y4*nO{3tIy%XnsZ0o1vPW)?C;^$Pjn2Rsw`6_h%8uLHB|N4K{e0nu8=|Uy#KY z8S8(mH+D+IC4~!F=H+(l9}{&o=6MrP*Wa;*QeamWF~q*^wer-Ok^}SeMGKLAU~x$7 zCL^rlx7>P41Qi~YQMdF5v=VAM$MtJ*{y)J5kx%;!*b+?e+s}ZJ1w>;9+S)I{e;YQLW^C!CCULSm zz6z=C-sr6u_2(knxn*EG`E>7;nn&;7?yXV|{=@$Sxf~e-Z^8b=EW>=n8Zc98UM`MYP-Kl4@vqC5zhmvIrnt}Zp5&4wSPSa>vz|H( zZz9x5tfyak$JIIRVyu6nZWe0EhPan5)q#q>E`beE_0f5H{3ABC~k(tymD3fCdB#sKi zSiq4d(!O>aibs_ABNA__(DGqdLutPRKiihOqR7INPOktofr1-(b9O&E3`YbYsY zz)AhoDd!!~NRDp7?0lqX?O(3PPpws0MSPz8+(7n%&nW>f@oj@w+c>K}xRp}Yv;()% z@BtXY&HHPd&duk0dtu}M#3Yw?-a4(NTvLJ30g z0Acz*=q_QNKD64Xa~|^wK*v|Ijf3}JJ~m*s8v&2|>gI@ty_osS+s#x4&!2&A%O&$~ z1Qj8yP4BAEhnD1TU7mKM^&`ZJ_OZKA`RWk#6ltTF6EJ(UD7g7+`9FBmz9$rtkYHnKNC3|m#dGT+06y*Xf(CruBT&YaDft*X6=&n$qf4tXS zA>SX~;u{sUK~;v`h;p}}haA1!Gh9(6Ncf@Qp8XPDIs)e2T9MVHMCN`SjN37vg=2fR<_chQqw}mk%vWS_3{nj!4O6op$IiR;0XDj zjkMPp>+?OYV{tJUw$-qY_1<%0l!!yJFC0Huh;CvsE$QA&`$832Yz%Z!@N<}A@=XD* z2WgvoOEzE&@rs(HM-ff3Z|Arp+hK%7-tfMx4p|5ZY8cEkbM6Y!faQB~)~9RhDCvay zYF0#z0qD8M8?CIsgqiO>#`ybQK^)7JRhHfJuupc+S2qn`R=iT$(klEJ=EpefC8j5~uK~%v`}dXu5UA`{DTua#DF`bUlGmcMaROxcT_zUq6d3C3vtr*?$(Z>%(88%VpZ7>AKegjq+bxUoGw*AWoe9Gq zfFAuxd1sXquc+Tz0POj&=Bs7NdU~L3)YacT8o|SR#v;@p5a&N`_`mc3iGJMwvw9Zv zB>$h#T=9Psp@|ExJz_MM1N4h{0uBm9c;k@>5NO1Y`rKk=D^Rh0^Q(KX6?ueKPOw~1 zZs2$Qtc}n@4Zz~|eH*`+0QQLN-+~Vs-9#GiWP1T=*I*pm19WfKn-=2p668Zcv$J0^)soo}Vqn#KT|-<1Ah zX3fN17FnYRHIab6_X2w`{<{XUm7ZFy)(jxTx$EyudTS*sdn>dTSZa z5p{H~?-0-*)#JP()DpD9p!gF!15)X}U(w3a?(R5RGvE|@bhk}495R@4zY7?snLVpP zV-gJ(*Hz$rJH_|&xw_wQl+Rm*ltWmt3kld%iQBU=CbYPmiMzA#i;B+vw$Lc!OYtw; zER>~4wUu%W$YV+IArLN^8%(VM9vpkIU{q!YE{%g^?-3dv^s;o0$=DN(VO$={)wBn+ z9w+Y~XBwYWT20&er(AernN~U; znDf}#wD+r7?_&g%e11fWjt>+B2(yNI z?cE`R3i8uU2GB-y>8I~|B25PzE|3hOoqa}ylK)pX-Y91)1qR}GOy38<_c%O7{Vpi= zEBV^$q>ncmoR$n4bTd=b+XMebf!k%44h8QC0~LQ*$7Bfw8e3qW8iaH$=w8CV&&9j! zJB1=k8@6H#^g^GcGamUIv(;o%{CC!d=3 zX=gRZk%!g@%`r+OB>Q$fw0b419v>~%_y{Bb^o z%vfR$vo#QdS>SVsS!R@{tLj^-t(^`#&#hkxQqLg(lpNV+^X6Q5h!{IwJhL&*RJ4*y zC6UbistymLj*!EmDpq*TTy2HwFDljK8G=y2F0ZLJe zGXK#kZZg<;Ls0QbkvlOCH=OS6FLTthp0$s1h`KwaIZu^uk9aU~6sTv|Qmj(9~G2NzP z5rEn9*c?0vJXmV?J@vEa|0GJc`?Z96g@UiPy)m*Yn{sln%G!eDols{%8n2gvDMMF? zGKAPArzv7Ek!ROPiRIyz8>dHE1g$G2nuy3QiU^hGz6T@{rQSvgz$BfM=Nr%6N8>D9jR%BE&*sR@=A#Nd8IB`O*e3v@yK4BEZaJ z-5unP;I5P!L?@di^-}r0#4k2u9pLezUg}q80u<0&nfq^jgK9+XkSwjXy+70AuR61z z8G9w`@=#(&JEF9t@?>}zWl=_iljN#+>We7^RH_oG4Y}1yyX1esx!j@GhNrHpP{#f) zNLbk1JdI@p%Ht>8We3Tn?81caoy+@*El_Q@kNd8&yMSwdrCRL+ZTozfBF0TnKLYi_ z)p}+h(KEYQ`Qu>sfx9*a;_u}&>P3FBgXw_X{}YuqP3qf;Ny7uSMjfzQl2{Iy1iFtr zY0DI26`!xU0loDA2=5Bc z({i~%LSv*Xx4>}fIs-VsRkN+Vw{V`#?5Shls6zy@!$(6PG15D77bNiow|y&X>BOuS zpgXL3h;Fu<@1Z~lk%ulyd>LL*jR9>GeHf$K4F5xLR+9U#0>QK;R<37 z@cQ4bieU1`6Pi1<6rtc^o5+K@F44x1K+5bk<+WsYXVb}u(Lf(BKFp7sT%ox^W8sRa zD3&79ly_hmn0-jZZ8g99Q8nXx$8ho9p;lqRs`E*e{<|-s$v=pn82y_1pu6pIp~^Wf zss^ofhc{N5V#+RZnHr0jFt!D%hQN?H*Y9HKEj1Hrbk1l>4OacCbOD{W`mK9*4nn(kEj~Sx+l5e{AiLr`D`;PaONy{dNfC} zYQj+ww;_?SKu60>K=}=d^iCYaf6;=-2G+j;1A(14dOF2J~1W|wV(F?Tis z3U7)Y;>*@E_JEJ2w{QRs_(ijf7_Q}+y@2W0n46;eEMB17AE1?`xj9r1EEy(P zVDJ7TLc}HP&n3zYa&^I6Vy?xQTXta(0Bkt9JtG)OSP!_##=6Pe)*<$|vXq+#k%a>R zhp0JoVNi2(S_@X^>sid8Jpfp|Mh|O%=I?#b2Bkh=(=NHG7{1&fKz%QdYt_rpI1GLjc1^5-c$3;}?O6X83BV1AID zfRTlJEoASzEx@zeE~|onkhOae5XyYD%*GP&P5{e*x=Ng#e;Z=~$*cO>=prc3q~o_~~{{`-?Eg?2tY0 z5hCkCD}ww_p@k4oFc(2t4B5Q_9~>T4qjG1K*^4z8SwMC>CAiej>*?W^*9ohwrsPN> zlvU3xpzYi>sTkhM1E8{U{EFU710iup`nsrGs=+DAT!?6pFGY17Pyy5#NXUcbxbrPl zL>7Q}_tS3vP6ktx1@0X{fL5USYyeng7JHben+8Q$nKi3v$7>0I(gO%h5T5s8)RNEe z91PiYPhYvU1LH;*_a!!@$XH_h)5D|f0peF$6r3mTahIKaL8mTge?cc! z^5Y4`Z*_r)_=)eZrQX4Tuar{0Bul;;HUq2#p-rnxE-uV7SECHvC+Cce62Ypk*T?*-r)#U5 z*HJ|>bMKC1W1D&b-j_c(*)IBS>Dhgje+z!1R+3y1YdXNT(kt^GwO>WmOrz)#f0#hs z1_C7&zDn=((v8rCX_JLh)OcV)t=twLM;Ns35IFWvI5Fcf0I*BeB*G za2}?%T!W36Nz}wwi#JB_2q4%%&0ofazfx#-p8M$*`VgRg|HMZzz{ioDF;qm_{4JKp z4cyK4o#k-!1ofDWt2Vbuko*P@%f%XV;!5Fc_&U&x=#f4G*tW_;#oB7ltiml-lCELR z0ruA5rZV48Cd7P+1qd6DO~A zP;`I?-EBOq?^aS^h^cydoGT&(C%9Ghm7TO>@YH=3b@Rc%Qy?Rs)!=MRweBH8z#A&F zv`=YFXbU8CNOPJ+`+WregV3G9Z+uzcz*(+VAIuIPs>7v*c;3qBSSwV~bC}tU-eFzO z4zx`#Le*tMA3=_PbML^dtdYW0_Dzo>&$-(q59WKTDi^zV)(RXY&| zRGQrKbgLE1M_0=k9y@YUB5%aK(jW962X=(pF6i~=rT`)H$q2j-7E2rBUHzTxBA*uM zlV`xR*_&@Q@bOaL(B12$-MB}uASZJXT1*r;00ruJ-P4};*irgAhv>pwH^p)bml1{p#)6TQ>v?ab_h*u)ayvh5o?iCe zFj?#j%p~fyncNoG4TvShtp+)nQ~z$GL49~3z_H#=rxaaLK+odaW`%g@jX3J%(>=j; zN$dZ*9d8A;EWBuuVP)U{uKZ_t;F&f-v!_3$^&@X~#{4Q#5YJ1Qx7CO!7FDVe?oo^x-B#O6irkRwn499BK)~i_PIh%dVB8KV;B<)Xi+s*0v zkYS2Dra)N@!CZZKBB?D2af0SOr~MrGx{RT#gj_AXC$O1n3?zJ`za40nU>-8d z#j7cJbq)|8fbKQF=+yw;ukPD)^@R1@XPdB#Tp*JKE#7?X@c{h>P?jfcm(vdE*CbZP z0S1H4Q{TJ<`0&McF2K7N+ve%ty4?jI>7zvxdA)tx5#=Ig?e;2HxYk4{|@})L(lV8)SxvNomr_ap!9KF&*-n2d$Ny3~t@B0aGvaOM!(C>Q(yR&I3 zG7Zt(p$HBpcC;W03M|acBa``!hu9zL6)juzw<_g&?P^jy1i;-KJaA<^)oL%=1p zT!Hx|m^$L7QaO38w%wYlcs8r7PwS?&&+^V>OunmEtjO0?d18G7U`i$k56`@%Ga9{D z9X8=QgcpO8XXjx+{}kz&MdsjlvKu*%+sa1fd?y!HX80fSXQ)XdICpbTML)4b}D!wbg=C>t9f}y7~z8#4>i=r z1a>(gi_pxZ5Uza`UpWx}1I(m3Vfk6T_d(Za(WSJ*o^kltWN~8pM#9cF<`B5SK#j30 z^FCyh3;FYp{-R!UjG;z$mcY3P=^BxXVD*CUmZrNE8iPOp@wE0WkHIRjjR04lI2z`{%ui zwM$+%2-imm$1E=wBN;`FRF|0OrS?ZiyB>wv5nr+<uq!N=~c_r*@_RI*GW`eIE= z9NK@7HpfRJ_{u1LeI=JO6FE27boJhLB)cQ#e5SBwS%v7g0f;(p##gx`28)H@I&pyC z(_+n(tvr8xDr^#_4&-!281LdEY@o_&7=bv|{Nu_Vb0i$CT&nVL{hp@|-mMel+~4Y9 zwe*hRx1!z!%MVL;y4H15;sN$R4X@bKJ&K454rLb~XG0~IHRIEak7c}P%=S%SXGmiI ze1>gwgTQbOO@}X1OL>F-&v(i}(lu(l*W%Lf)@x zKb=99XTlpJ5j`8}wN<5g0u&2?lvGv-oX}Hxk{&p-=tS(kvQc@eB|wm57P~h=52LQR zm%g>OVwWi{XCkdU&v{}r*iaKMtvtE#M{|!44`(jy0|Zx>MJ%kXMMU{xm1L|AOlTBA z&o7^pl^8xNen3YZbga>u_C5sfnVmwg!?qC?X!o0Yhl{-XAX3B~8?IHGz#`qHHdu9n zjNgIt^zX}sq5#EAo(ijhsl#tX42nAOG83oVGMJi}-r9UHqEE5Dc&E)wJ+i8O)??(% z^d+6;%Y0?1f&o-x?PkZeg@1gI{QPp5qN42axBQ3og&v^;o{++kSrJS&|C`gV_&5)! zlux46x$_jzmK)jTja8j7%2kHi%r2F}h2O~3ht5G;ya#Rv09#UVXzd&tm=@o$@01n7 z*L8vR@R!fxS#nueRcG1C4*?Z%L*I2E_OQ&s8>wbuO>wH+?Q!uttX=08O+DAsrV7dRo)bp+pMT%)Sw>e%I5X(L) z&yP3c{TISX3fT>f9*t<>-iWJ5al2_Hdbj!<3x;h`x{qhJDwqa5k6?7) zU4c88hLHE3@OAbis=w|gYZF5HT6f92R71+B-12k9QU`EFksxuR3g>TW7D z53ZFql@1|*UBaGO*<(WbwwRd<56dv!|5L%@zkuc$|5QXJ6_)87(?qsijJKs?fxOg} zEFVTykADgm0@5#xpp%gRI>CqE-A7fugED6M(`&P5+XQ#Bh0V)z zrRa2YbN=LTKw;t`G%A$ba${9zTyuie-aEt_&t+xY$>bkKWQG>(f9lWZ^;bT!e!|M*m6h4EYG8}74w{eaAjGaP(%AX9olV>bOl{>5N#9)k z5Q!ye2v_#g%WC@Qi6dx!;ax8|nf{F|r4M%h8O5FF0Rh;dyt%LL&%>C7D8B4LKYHef z4_O-au#Il3_aV{C?K3-TBs^3*xa1(G>v4e`o(>$- zD%|I@mD~3{@YJNRq%u7BmiNutNEo4DKFb=_D{cv-EQa|^mq_Ek@Zo1SLCM<4<#49^EKbgCi|~7$JZG0+0S`bo0DsnVf>>! z0T{_}3?}CY5e>@tl|6u|6ilBRw;_{d92`Zdf*&cJ7tG#S^m4a|D|P7lQszQPw)Gw+ zbA6Q+qGwk#U1F)083_4SJ%}A%_m=#X+J$iYpB+qbX32};uJe!s#XRvN^!B_0s#5e| ztb5-~i#R0fsV)3&VL1wHj7{gTOvFh)q!ei^R}xtT*e8e4+*_uGHgxd;Q9@ADs!N@; zKcG!>DU~Nv(&k7QN&iBfGSFM%6cA=k#gRLM$6H0Ub221No&fF&sgGDjkCgGW4j5Th zqdcQK7>J(ff1t^j#F^gGte-$>iE!=>%x<~fi9~0 zi0?F4os!)l%gHAcBzt8q)btjhM)|mmxh-hU$|z?>B`ah?d}g= zHaW;8emdNbOI1!4i;+*=i9ak1 zH&45>cP6?DktIZ64}~f#Rm8ap8?>_70Y-J|8@B7KiLGm_P{o>F*WCU zaiNv?QJZq1E=Y!pOCW?xf_NT2FPV3sPi38nER^uA8a-A*6oo?o0&V^Wdv6}r)V24I zw#W0W^)garF~Dk_i^)Sw8ViipY( z353xkR>X+Gq(sR!0g_6D7&3{F1QG&w1+Z%E@80MBcklDO`{5DXd+oKq>pOhD!`gc- zMaDTVg(-8WN^fD2bGw2s78;q=dWX0i=k%mA7=-L#eN70N@&irQy);qoQ71GUE^t8Q4=Foh_Qh?OW9l1Jk;p^)a4OKL2g~0D4U$iT?1pSPi*dQ70^Z{FHZXy$c+{D zZDPpV8eLq8xr*2B0KljQ4fa`y35DauXpw|9e6rjzqCDO4{}f#83vZiiXBF#{CnJml&z=$zK8@xM^Xf4tyUDlyd8#vv8$8Y%4&2mNn0uO0XR2Sj ztFStncCNMlKuK2%(=-qpUKdFj{Mh=*YZ97{Ll#E_7e=fc>Rt$yOW74tLo7<;fS@<# zwKS*`XMMe?&=Ogn#ZB8O@F_dkqaG_0`+OW}c6MrboP4!q)I|FFL727?*&kDu`hW~C zpn4^NfLe=Z54UzjLYK8*iv-?jP5&)b?&(BTB-FesO&fZ!bwP?hGHc&LMC*-++{f7F z6{#h7{b^Y=w>7dq41XEUuo$kSS0$L8VJSq0hEpQV2=5BPb5%@h>en?oYT7&fyI4F` zSRd8HFOIzkyPQmxstAmhQK9c4>(=KKBWG;p$IEx_)E-s?r*$UZr<>}NbY1aPWg9iY zdr;~lGBgaw_P7%1*3Puz+Cbq#?n7(7OB*(Om74qtOPNvJ>Ae5K*Khl+?}Pk45g6ji zjqky8{W~dhUt(=uMuOcZnVlGJuth}v+{`35rtxzUWSD@vwRJ>#XNlxyXHnN=eK6GJ zPm6vy4}4C`zZ5e0oLSRO3eDhrughepy0IJ&fjhacpu4isBE{=xvQUoNu^PJ!;aD9~ zpQW2vBxc%f)tOu`vq2J`9y2PtFtjlAQDxu-rfRYNp#hELi3ebZ;4%#7Iz2rBEuv5K z6$DS6Dy0}oO3gqa}x)(bTQ% z62Y+0rD$wc)9TczRaSW=F6B!Uv4(nH_)x}jn7E2v;07GYu!JF4XQo zX=-D;n;~j>g3PCSvgb@D1ypmR?u{Af=OTHwrgGm!?X)-b(H>@JR z<%E+Sjv}cf^ivkbOA+Hxy^h{ZNWR#R0Hep$YZ$}%i7TgL_1&+^zxn4t5L8;ioNKa% z*KX(T&=xOd6|Ny**7aT@W1+J3_*^E=1}(_C()md$$XVCc+%J***nvVwBqVHV9}cel zfy-Rm7Te4kd8lR?LMk>mFdCjS57cjES)`*w=MrU=&}@3FD+Y))gx4&kBk|CeVJWaX zBGV!0V0{vn9a`wyn~pa0x@j}n7vPCEuV^WS z6=U$J4;vh}@uxGjm!Ma;*`Gg3hok!0k z)O=Z1%}mxAW6pAnro6}I)Bd+A*w{!|t|(P=3riU4@x4b&3-_I>9@vb!OcEPH@em=X zggVl@Ywk3zMZA^&TEwA*tp%Dh<;H>o0%VRe>u{t0W6nlD6>fOv-VOmOfBl)K_Sov2y6@{4ApIND7%LBIGF8=n^Q;B#6F+k|DVd%W( z5t1ijXi01)Ji0J+$bR7N6u&`S;38=pc-~xip&1y1m%Ir6Wk}y{l^tslEXFY4bP8V| z)&&7!HfV8seGQUwJxk@P_bZ?#^en9tO&P)}q@LHJ%|@Y%Zx3lXSktFV%F58eqXops z%4P7e$@Vcjto00dOg;LnJL-Euqw28HtQ8U3`vi?tjadcEie4VA zV(Ov?Ned#IL4Kb%jUuo>oyE#PrMJ(=D3D(1^0F%>wdtKACI<HHYZaLDbw9q|*?rT=sc~G%H$xy|fuBDGx5Qc2cqt>KyCvq7ZhI_~o7Gj_15el%48%a}-#Y-XPIdEe?T|4afj%rKz$< zbg<#haGj)IJt`^R@G;0`8n2W?CqhQs{!FQBHxSHwOEBJF_9$+EXr;9`-3bPxGco1d&*j~~$3N2a10s4_Vp z8Ff912OM7rKQvL;;#x+W0-;L%gAZfEM!X=J2RUW40mG@3)InsHY1GhzF8vGdXt06e zd|EOzP-V~z(2w;4Q;?Vjs^Fdz~V4HMbeap(R^xX zO*9~Lsj@@TumK}a2jBBwu_|5btkN}9_4zW04J#zKR$8GV1FXdj%2AwA(=#eTQBcR@ z3F8&8L}8_(zslrazJn}chL9#)dtzX)m^?}yF4~rU&Yocjcr+Aw0LcP-&V-62PCYA; zx?6&4$@O5V?#NMo##O=&E4i?klFI26qw{T(-HaDG(2SV1#lzo4ZROo}2cE^5)sjTK zE<_6Q7q4KM=%+?}< z^#fHpl(m#~_)0`^cLnXdG=_MReO}Y(SuIb{90#B_P&Qp4fs6I3yll9!Fn^RRZb?hO zuE@7WZPetj(mKuvQ@ioY84N7M`0APwppvfJO6OaBMf88_O#7}_$54}Wrp~_-Z7?XP zF4~HiXG80?-Y61Xi@t{xgjy1|>S%r_1<(b!cNxuEa)m4P$3bZw)sWjqg(XfK^I_<9 z%^Hx!`3jD8JAij{v6efcpZ(Ww_CrlqjNc7(Ef&3$3-$*t^q$k#oFdR?5N%<7Ka zk|=CMY|R?NfG%6`sSa^x%FL9(T=U(f(YaTk6=>6s zJZhT8JJRozRr@-N-zryzT#3>AmcbLd^b6JpGOsX{RrrIGfu;+W zyg=*K;n~+VHV6u=o}yB5f3nOQC5U)--&3t=Rrq(gxb?6*XOmp%xTacOX!I3S>N-dlf5rB=I+=T9?6IN*Q4%HN6CvBM6e$jJAEl9-(zT5oLCq_P}uSPlBc5< z@!Q$h>Cr0qx@2#2Ir=PAkQi|_`l^rXxG3kMG=-m3C`DfOB=E;w6UVCY1_@&57fszW zfN7)mq}%lqo5>g}`eRT5678$R)jCT;z}?f2iJI&?wx=rfWge=-qVw@fEv~~2BbI1> z{BdVkOnrN}X|qncpx+oHN;gz7xuJ<&Dn}67#9fFE1140OPgDCCYC33UP{`#bgtxAI zJknDWU+BX!4~rvf)9DZXRH2VqlT5uo@lYu6iriR7OK>=|15x1_U^TY7dt7mIOP(M} zwl-ZEnF^d1uU_I&94hl=YL=$v)h8$`SuJZC9#7AhJ^TJ`#7Yx66d5s#s^Ak8S?M_` zudpf!c~C|T9ol#3#NrJxx4!6Phyq~f2npC@hLE@*y7U5-*}$@TRQ#do%HzGs+PZg_KHf9AoSs@#ez=@0KLwR|PSj<6DY4+ob}u7W~- z?r4<8PF@}jK3ZEo>p;EFfH%vfPyEf2yWUJN?Rxl7C{WH8uv1a9-e2N7wMx}lpKG9a z2wE2>z7)hUxj?B7i^jZsLmG$08uVbUzvxDUS0$xe8R~n8&IXV%U7-ZDGfZ4m|G~{=FxQ_mKo>nNMJkKXBw=wkwg@NS1AR3_C13 z>IjFxC6`B_h83+nEDHxfm36ddoOUiEuLrC(S>Ik_+MKXqH6I&<7SNcwyD?-+u*~al zF4aRtTn=}V4Hy7Evp0NP1EOnpv|R0~FYai(_Zuxua?z{e4ePrMiO?WE8=0bZO{NTH z7%%Ub^sY8M=_y5nihqn^XuW?ck+;c$3NY`pT6Y|C>ptuVpp4~RmPk;HPqRv`tVD0J z7qpa4BhQg11z*l2PAz zI1kT{@9_b*x?@nYtTMXldvQU{+bYR170~x!3J+sG^h&jc%3>Py^8(Dz8%K$kG_lAdHm}S_HWcY`{`wA7WE608GZ|9PnA!mQRF7S%iL}c7O81Hb)eq5p zZA3_;rZHFKDP%Mzmdz(XEip*ebRqIk$J-J9Bf=K8;RVb14;7mB19vUUL>u09b30JU zb4u3e>S(j58EMPkUkyL>&Ebb2x?tsxnw0l*+N74W$}XHQ)(?!r&0BH^$zNSzy$jeH ztC1XYHG~6H3_IP8Rfjyh2i- zac#l)NDv(%QHPpWu>4aG*}5sidx9^j#c$SCJjyrR6C{C*@^x4cC`eE(%@vKKi3uNg zFt=iJ)f&M}D4J?p+Q*K*OoF9g#Qs)57i_8AaR+s=5tL?Yl6sQG!fZ6TAu&0(+BGvVjDjxXl$g2tj%ijn~7?KU>4Y?o%UZ58($ug(A?lviu0!>ycNY z8^G%u?B?#+2!E{KCUnCn-%=%aOf;_n62dA=R8k+>IjP>a=M5bi!J^Iayyq4p#3r)Q zYvoO_M~4H@6Ot}MTIL~5GpSZU72JzB^iZtmI(#n$RkH*l2vx{Hb39d=UK4zA)bQV8 z|G-Ds!eCC*Yyf|CQ$&(?YY84&-oq~R;Gu-VWm#z4A@r1lO85g<8U3MXMYTJl5z$!e z>CfkPCuXLZsi#cLTtgjDY7b%jnVX_PSd5Wg*<-M6=*4~oi(w`vSLqYLN~_?=|7#&b z8A=%IBV6<69Ut+1T)Umlj$s`Q2k1PNH;N(soR3W_To1y$sm(wDS?8v4PClS+#EFhR zKlg~RvxlJ{(7u`su(dFlIU}D1OmRMwBQ8 zeRUwW;dw_xsdpK~ID5`xcmFb@S{VeKd(z{9Snxb2r$}wpsOk5qHfbNM<8XJZM=3Q=uR)3-r>Mu5?GCd)o~VS9sy05A4QeowiYZ`QtcnU z*jTW9Yti);giKz1*886`=*?9Iec43d2*HcXi&ymYyZu*1d{)P247&G?BIm3xkGL6c z0(xQA$5+NPxXIN9xZ|*5>*JS}AI^klX-L&Xt%H#bnuIY4L>|N~hGlh0`^`Jgz}YdL zVmTrB417{_ZVfTKJBq?db*@I-T&oDy_3*Z#dUL?oj6d|sn>HZpNpNO@b{W2yppnrV z&z4{*@epT#PhA|Y3;<;Biq`+!0bc-w& zG)fNTscQCu$5QQ6g0|`MtLch@U>!xIXLGA{x@g#BNltx-l)t znI&yapE?hZ#s9EBG3*fw0pK1Jib&VK&@BuWsQaB>YElD4K4e27fvcTbX4Q9-wbR zgzD`P8vTL&uUPHf&q5O%F%Q_mfL+VV%a)S1fImDy)8y{ntWnI%#q*h*YfA>)`m|5M z3Kql&)MWb(O$IenM_6x^bg~B-gywAHi^3zumy1t%!UP`Vy@yrC9oI#=>>?hQ$qs>Q ziQw7I@nH8XI;9A%c-(Jx7YY_D7_5ROvU(UQ(claB;8aFUGHzk} zW~OfI^9(l5I226A!$i{7FU>`8Kv(GvR)Aqb{on>`#UM?E;p4DYRl1QzM@?m3XMMon zLh%_@*9(G;m(u9aWLRbRRO}QxDWi|sd&V)O#StALaITg*R;!2*U9`tn0k|IR36pVHxac{>?U0>5mCSg&)tz^I%;p47(h~--gu3P}pZj9F= z(qbacSoL(8yU~FY?btm8nnTuj!&ON$Z2Y53fIS4xa(ZoDxGt=WlkDaam*S?? zq}DPe#^q5FxSV)|)5c0D>$*hsl*+(q_U^bvX3)i`j$UK4ff#RDb{!7&TAB($P_otV zB>#rRFTp}QB|jHMp?AOBx7)c|yO9S)YxWJth@svaWm1Z^gW|#pY+%!nC>8<*nH?J0 zoWgFBs~Ztk$afn(ig?V==zw1@N#>P)3ezTPnB8)zw)#ptHg!iJXN5E1P4Jr#l@p)k z{fTHITU$iU`kGNuw3DB5!@!EmsPSxM1HRsT=Jp@$fPJX`sr_b@ z@MQ)nxOD?sYJoE!gIjh?-FcG2U%kKx^Y} zmM%qQCWID8Yz+%BctZ9T(lxlO04Fu;k*!=pYhyu~fSINXd{=pN)dt6=m15|e&yd?@ z!x0;WSL)qnK2Tt~H)AY^THu`Z-?*UHxSd1B?|?8D!8X7bZm=Bt+0hXmAv^pcD2gf~ zrBP}I*oy0}A*j5y*A?USW2DD?x2T9mmHB+H=Ki^k3$8O(RmvVt{Wt zphcM~jt6?LnPdDvh+``3}LUA{L_UYCMAX9T;18?hzW=>gP~|h%98h zg?POLXBcxnS8uwo$DF|!koj7Hd#`E+xP$)|xS6Ge$gC9*c>z$--46@Q#K+@QcJcuw zD-YO{j8TBTm5vOscz*>jdvl=2#4o_q4uKJHDw;XBKg3S{&zC+Xx0D7exAvNmzX9dB zF?a*c{V=OzXWcV>2H;nBgh8)%;HPNe(tZM_)#kuiLQ1%X*`jB6vL?YsBe3fRxD>Ag zo>s88)vfU;adb=?#59o}+YtrvWdB0KShMfmh0rf>e*oTSmzu_?vqlZj0zygS;beqdkXEjL^Ggt(r?BE$UYBDzhu0OOd zQy3E%gS5xw?w8VrYtI(x`e-^^z@Y0PZ*qv)%(?X;dTLJkjvUTsaXTNbQY`77-1Q?v zk*UDUZJf-x)T|P)2U`d&*-V`MfLpN@k%~NJXR4g(*I9zDN=dUW!!e;&;m|EjM^n(i z3Th-QcDt%fF@ARPvR{>Vn#QY|#J-D->8K6xDU=v%x|{27L_N%)Y4F&-HgnnypkETL zGPSls!jTy!2>2i-?q8H6Q)LB&@$pYl#aK#p*a6?t+yZm=9y zU});AH<-=AyDz06x?S=O3P!SVaUF zz}3DZxxrZr{D7LZ-yC(@uYKx(3H}r+AU^Es&OJ=V$&A%9T{AQgb=Lf2xQ4CHuyL~( zAC#-Tkd;KohFOEu>R}j1xHfd1krjbcOW+?7bUg#`^S+`jiiO>3cp>%N8cVui&qF6n z@^!*d+4tS#3vd$B(~0G3_zc>)ke;o^;)4nEPXyET`K#kns#6ek#s^d=S;JgtxP=)c zmk@VcFJc+%4{6}lOO<|5Z~;8+ZoDBgf?+(c!Yn~C<%cK{yE>%Mz#ff-tygT>p@Sxb z_^|ZqB9Fc^LP8YKJuaap=1&moD1v^}cwK657GV|xf4hqbmToi=FSgp8VImykMGloQ z&M@{A)}^_<*%~JxeT$xE1J;`$lZwkutkDKgPWUG$(lL)Tj~vj z_31X$;^eekrXv4y?s)>sl~r0MYf}x@w6G8-Bn7-Y6{In@>Yo6BQQYV|!-6>F&BjdzPaG^GsEQ@RAR=lgwo?jqN@2;g_}aOG|`Fr{V$T!i>)%T>7#>$SNV`sXEw$)Y+&th2)IYrE1KJ48^+MT8M_#v;YUt zUF6F}R_hWgjW72U%czmmH5O?Q)NI7Z>pGZ53}CYllBOVIVx_lWfEc|2y|>>d1**tzq=Tc5}aXD$P-OQ7}+0T*+EFQP7FQtjL|S5smlSC zIhwyp(R|hFo!7@GiD55IY^r@lu!cZ`leRizBPiwZh(Nw6jY^Wy6EmhdSs&NY^MSSpC}U4JT4TZOjST~cvxTUyCNJ-Qh`T38W^EP9-8SBV z3EUkx!G7t=jD8=0DUXCB#b#u|Ry#KbbPHuzf#hQg?8_cMH-3Zd@P_#AF8tQCUNtrz zq#XB#sn<&6y{S~pN7x$k{dj0MKFc++!~E4B>w6c1SC~_f#4LTtT(B z_J~8ugo^60(l;CBJe&zo&ZkFDMSWE>Le=%eb`Z(j?cr$CEhH^wEgU+6a76=7+qvdM zKJ4HhP>ygT>l7NX5T|mcA$;jhz*BthiKcKAvfW*~(1PT64KBPPpqHW^FH8fnsaN-s z@mgwiGv|?~+j!B@?RK1JCO;5H#aM&6?Fx3|So-!3m^7$xidKxWK4@Ee3KGu_g? z;EJfdDz!?qNP%L!DTZr(t=0YK%jHQdk%Pkf6clGDE(f7Z?6cBVXLJp2C;_^d>Za?F z2w2V{Kiw934hsr@WMbCWJL!_QreOm22>S^;>l9<~Dfs6o*2e@+O}~GXptXVEE%4}; zdy>XmT$}x}ox3rL#Yy8v9i(%3)O-$OTPEw-YM21+x3P!Kc0AQrX?CEr=nv zMvz=R-eu@T+Uo5JC(8F%a_*m!Cyev*oV0x+Tn-3YMDeLfo`kGL#Y#Imhg%Lo(h*H} zfw7Gi&$X%6B8#ydBE_{12SkVlQ{Q5PRt=L@5rYh2SdIDC-eQ~~43~Grb!VmSz(cAo zyn}|#UBA*z@X&7FS_xt4fK5!qMoJT46KiF; zN@~VSaR^9S0SEq*rA^*rUV6*updFul7-u0RrPibr^KdS#^#Sb0Gcd(I#wZg-fNQ%< zdTa8a|I^;#?OA>l!({nyqHS5HYD0Lcr5Zb|0ykc`fG~9@QW;To_O!xL556BX-V82A z+i{p)g9GpEmCh76UVd09g7fWE=yFm z1_Z~bh*H-?OK9rjRLDmT|2jN*VnjWRmEhedCQ=}X>jY+7MToFpmck^|ZO*~1W*Wol zbb-qAuo@?JI$GxF2?(Y46nN6RCD_B&aLCHMzRR!T4dxhScv!!Cc=12sQ;f*O6e3~j zf0l2DsBo?{z=tczh|2nMfuK3SkFAeLxa&=aQ?M`&$9U8R$!u!;lKe=mNmmdPd=sH*M3lSRfWN9@t1E7=B=A6G=!-3lB+yQtrfp{hBBeZWUdikU~% zdU?Yl#f3hBa|6i+2{f^bHq!hVSae2`8yV)pI@FNXpkld_OW?^nl*Rp};W=CkA_a`)%fd?J{TWVO(#VCVU4F%6zD)&uc(hl7+4^EL<#PN znDKO#ns94?_zTNuU{`FTbAmxs^L?c%VwYXIfDpfsE5_N7tw!9kvRv^5CoQaIQHICV zUSPpaN^)uz+8!Y>lb)FU_o0*+=&l^8STr?((hQ>|gyfYlZHk>-B2qmhGxzw#WD?WD ztZ4Uawmzwpot`@HIF$llZE4>)F>)O}kv)bkjkKd*F%*8u94V=Z(M1T?#C2FcVre5j z!}^0dGJ}X)QTaCitr5V_f^$bINh23! zw=8suV@?(gZjGR=KUb<|wOV*OcacHiunnAS+02}R6})b;>ra}N!ql_9BwC|4l6$LGdKW{vJ;7&4Nf8$j2NR0cT`wnyp&IZ&3 zU$rbaBACs>U!SsGi#ssOus<74|ABfkQ*M3xY{8;gKlnz!pv9G+|F`S6xC$7exjG$I z{e@A4q1X+!U?ry4#}6^Hn-0vJ`es!dSQF{bJGZc~5K<@3ezIdex^ z6}+7ZR=Rq1{FpTl@yCw!O(#>hy|D+AuGI#o-5QG_%x-pJZS{x$Qqt{dBq9G-+y^aA7M*pI$m|}3+$3N)%+XxiDLl= zi&NlFP9D5pd*ZE|K5es!U2dC@ur%?VTRs5(LatOQH4AueS&g;3l5J&_Sl<4!brY** z_wTSW=(Zfl(f;7k6u9ub#OJ~N6OPE!GS`Le&>KefVhI@1A1@o@!xpujZcYjCORB7H zqwwZ^Ywck6a5(;S?(&08`SVu~o~~`Db%F1oC(fUK|4ofC@?OkYh~+MZWwizHo^n5X zbu0L7>{xl{%i5UCu7m$v$nku}C>X0dT62`I4!b%r_y!mpGe?cR|6k%`2Y0wPT+3UJ zzc&9{lqH~mpw~kY9Mbqe;ad;{U^(fBOj=>$JjDLv<(xRDWpNWJho;LXZ?+R^)=oU( zznHV^yzMc&>LdO!lhuy@=SHVw>ij&|)*bsH0Y;|k#25o%0KSBrFYEC zf7vawjUIEq=?T?aOsZg14;73wrwzR>G+BT`w<* zv~wm*KEIE9qr?4(Hk&c&sO?(AWI(PkS#pxR_D-_h8y)`&w0lfCu&-aKjC+)P?^>Qe zHP;oD8#JwFo^Le#5q-k(1uNm31#^BWKb{97Ybc`OuHJhMaq^q=kCBu&*l~LwFld7K zg`BZu@Ai$kJO&rjIJzP6F*yW3DR~mP+TqQ%M4-7F#IXmHk%5kv)=xYs>Uu*Ow}~F$ zs=ILCOWSqS&$yu;rt?u;iG-?nqs{&`CmertCXL*qy?_ zi@?OA;B)J$Wn8I@n`7$85i_-gGRFWR^{u^cSZlbSK>SbrtG12COyP zM!JmjL*!T&*KJZ2LZ9MJjU%$BuQK-zJ(rof2xkkxE~hgd+98kg{xl^|R@sK;qJqTP z(dZ4P>}2@_W5mB@>=#eD`^0mnnM>G8Q9eWA#vHKx0)|1TQZLCjuCmhc&A(<4_E;X{ zr}k_5v&4Pti_EtZCNk4e4-|to8MbvQYJTC^9-+q1`!gT+GMP;Jh2Fd9pOAC6W z$Nw@+aqj=IOO*L1ar684ohH}kGzG>qi#$B6lH#hm+W#B0`Pajvv5t3O;q<56$mxO3c6Y-P zs)H~Z&*a(LXfxV$J3xH<;A8v)|24LWmfH!L;c2&4`EQ@tvJHwK8wOVMdSKrvnUFaq zZoT$Asb*H)s%9gsH)tUEe{<13c_{xb{6S6E3wiAl(rEttH38KXYUbxZ3kBz|6$)|R zM*>%4rTv(G7B<#Dm|@=majanOA*AD3ii7faQwmJ%xs2qusEy)3BL!B&<&G{oM|d-G zRzYvRI||J>7)4@Q=kt93ljs&cdOo6mPXBOkxy>Oig2L?`B79RK(`PWfDkMxDOFJe} zMFueC=W0@Q&3*+5``zQ=;hcc0D~3@WlYT>A5@<*JzGpzG^#h(Ie@YW9E&aC&A_39_ zJI`i5x1bFF?pLP}q6D?ZWpWjrvNW8urKM4Ld;eD02OkdWAwj7vTT`U_KM3$P+OxJlU5MOy^y<_xWx~v#3ns-SD;=NmqUtCCX64W zWFlrn5KYg1zDom_ZBwR_EFP_+c-hNkeDbH&@ZhxE=ear;s{rW4a6a}I-r+H$M6aXG>GUTSH#IOQ^v3(zgmd!_ZM#~l8T8Vg=&3LqdJ_l~p) zQz(o`F3#`+p3MVq;^@LB>LkYKcujK*H#hPu4Qs0NDplc%cAGHY@8Gh}!Ud_g_=t;p zeJIT%oXMiV!dZLyBs>TD0f_Nub(0v&81*5Kpj~{wuQ%)yy90EYj){NYcyX%WBP(G) ze)p#xTP;8{N9#=RKOb?%-Yx#p=)cuXlMW0;5o)9uxABCh%-kFq-k|W@+XC%IlIw6O^uG?lU#F46x46 z+GCffbS9XrewP}mO()g=6FZINm}L>rrQmIjccg3oc`c3@;Tp{?MqN%z+No@lW6xqj zs&%-nnq`f&jmqbgLAu)Fhk7~Ek4h_!)zmT~H=+6eZGG6UbN>%+G1IGQ^vgeZqRelz zcyseT;M|30n&yge^~?OWGZZIL9b|jh`0q!Q=Fgd9zo%-(0^{^Wx2X0WJ~`8n8%_TS zynLEjZ-9rGyhC4siZo+VHJw#|8~IANdl*colViUo0kZPcsxs;}h2-$q&Sm!XnPlPI z=E8Zl_)^&d2vKq5V*+9i?XCW=juwTzq_NK0M`~GnO&PVXU4EBz#^-&D4o?Am{n)GJX)E-HUne}s?pLCxk88#u$ zVX|;^RdqyR$Dd|Hn9fap{rs;?OiLdA?GLzP6O8yfNttb-n7$>3MH>8uC#uiZln=bP zbi}@^7*lp~E6?Uaa`aeN;Eb_B->zF}E{atOPN=bk6YBVJTIVZ_ykGjMRb20R{&hrZ zQU9FZU?2SV@PR({Sl2=^1RMW$1XVtgFdilzE~7feN^u;54T2FF2%~3_p9VV4a2_+K zt5~R73m{A4jNr_lsdb2a1zDEqz&3WxOwf`YUHF;fWM2RAJ4pXEw}K7DUA=QNjG4b? zRC@teHxD5xiAYmF-@*csfstr2Z`HgUinGx;~)_ z%$|U}&PvjL*2n~uW&P{ey+6%x+0!|7*r|xR2=kcL`3Yjiy0jC3qk(LHLU`HJK!3)P zS{eiTwtkO+xl^gY7@l|{>RwUkF%!gs*mXK?%Z|3vXRG+71bx=qbbPdY#A6~hog}U; zqe2Moi!#b9`-#0SRC%-n;}hbR7r4*#u}}9moC^TX0bouVNi0W^zo}{W9wqVBV>@}e zAZ6Ph$eJ=})xiS5$CrAsYT5kP125l($iA!slSk)Y)gJyXb~|g+>1xTkL{iF8rwM1_ zF}$aN%8eozz4C}IUUZBWtsce^PPM-&xJ7mO%Xng&c{!cZ*vqSjoXzI8lTO1T0Vl+o8t5b^l*H* z9EPcLnsx5WPKvHsiiF*ET1svPFQ4rKkT;}0A zCvOdSef6n8X3jtg7BB&mQ2oJel;^8XI1*(uuggVe~h%a?V4R|9Qkpaw%N5vyHL{3Ta74(k~fVI-&(R+7i%4$&7M82nX;O zTP;+rTg?*nV@0!R&OX7c3YPt86#*baNh9yP?|<^vk*1r0y=5we2Gq}R+coFxJy52f zUP#+0zkmsymzVS~t03l#zQ(L-fQftJRRW+!X*~0nuly_(k!{uP_YB{>qnBOQY~uyd zAfPPyud}pP{fn|Anl74dw{lKz*DhATfb&zeuwhy}^ZuzIzyvcfWCo9CY zXSRs@_jWl9!o!S1`X!jCZH^4>@C0tkZJfRod)B|nz8Y?HI^lRJ&62`Lmb$eXn(3ae zQ?V~3t9?bOSeXKZ;XQ9alz}~+yuOE;HMlp87V(Ym2GV(y$NxNW((VLr4*Ks2-)*|B zk518ujjog80&}CmEX977-$7F_giJ1i-q+fU)1>B=<6is3*h**J$kEQ=SrAvf8(o}q zJfDYqz}L5wo#j_RFPx$T&<&Vu1o(9et_&WRY{3OxS=r1;pfcmIapnu1DJ>X9> zMua<(Qa$>M3~UJw zs(fz~Zmksf*oWo@7smvN=l+MX#8ti1^he5(D^2N8*)gxiGHrxm0-1wL7{2rz%_cR3 z5sJ~7eal!i!&GC^KRILch;pZ#uG1sx>yYXTvv{3^!0xW)Phw_Lo7vR%w(_k1i>yH# z)4jQ+a`s*Ls=kY{n&(d)9)n#==wS06a5UM>3aI#d!~0YBv(3}U_1==a*?LXILOD5M zbU`H8gvolmKHuL6_7<}qKfWrTd7T3dXKufJa$?7#>8lkBINeUR#JkDw&s;gSU{(=+ zT;~1)6jt45>P7FpKL0N;EBC6KYZHmH-TYumbQ~7U^s#@>E1JJsZ#vn(;iu^*y?6WC ztRFgh)LnZ&z;VIM>D*3P?DTyvUeIVXVOQY9Y(a0yh1oIMxS8`D1E$?H*qma{j9Jgz z3^op(rFZuK|L~J&{Br5%G9D(WvbUY)4QK6nqiiX>|$rh^yN8R7V;%O+E_3 z$fh(AuG+T6r5vK??DN2{Z*5OF{&GHGk@)uIV9L{W5zhTR@yZi+vI#|2V~dV$Y3rFC z>GUlIP~vkz2P}fx-@Eej5@HlL0#zJncU&)>ddT4{IWm#Fcxp<|Hp_KV^6N*uceC{A z+_00EHm(2liBLMsm3DEX?>WA**_n1u-+u$ks;62sI zJZ!m4T05E0HEZhx4$(9ZR)*Fnt>k-L^9RE_VjE$_M~!L7CoQmUb}fVbOP zCmIqGY7c?PIw4`_vdcGfXQe#d*Ejzt{YUl*#-+{H_ywJX42;{fkhAwc8SnN1=R`Yw z0La%ajgR8?_$>FfeN!*?{}a@Yf2V;EVZ#x1eo4@T$KYFfVrABen!rWa6N|)KB6Z1% zZ>G!XGn};kYvt0qm*D8S{}swd`ecOZ3@3rr2;Z8$aC-6IKgM2J$2j@#`wI!|hPy8! zecvX+|NW2WpC$EhA3b{1<21A_f|EYhrD}@VKU3M5d0Ou67jx29=eb-ipOb@`lZWBL z-_mgFNN#S7+qa;4X~YY9q zP#~F)cz4*F=51j7b~nuZ+CE>?hbJ6C!E&aL>3;(NX{UsrlsyI%*SP=ulza5A3i0hh z4XAX(oIYuzOtQxZFG6&-DLzFup^pfg=R>tYuuT;a72Gp_nSkbp&CF^1`i1TGs;@uz z0?ZI32UdcLq)tWWkSs5GZi^dJ$A$+G6GXh<+mo-$m7VR14Dr1ebJm~ycVO6^E|~tZ@6CykmhcmfJz#=r4sOmb-hnm<>g0-oz>x}sB`!rbH7JXiq0wUD zTzQv9JL`>3?pN;zEZRA5WBj%@3TsZD(7PmctoEMhDK|Z9qVFA3KCx{M`Pxw--% zTQD7gi||-!OH#uJ|DuIJm+v)vjOZ6Yq=&^S`DQ}aJBm8ngdyWENCdT@F-7NWE-)i| ztJw3-ePD;<#6_y0bM+1d#jhO9bc1QLyvL~BT>orCVpPNiGR0h$Z%%tRs(yn!gjklQT4}HY*$|?HP^r49ivD- z_=UUiq6c*mSM1vqo8li})v}omrvJ49ii+LfgS`p!k|L>m}Ue!ji$uefK*i&lytZ!_b5(s4M zWv?02~7gSgfz>v6;H#lNapNDr`OltYXq~SzZ^aPlq;Cr2(A;2*|jy(vDi!- zJ_x!2FW%w?;Prc3mXO|r;@ckrt66vg%z?h{`kQ}%NB`$(_L~|y%i90#?hW1jU*7$% zqWu5R$c`-qK?EH@E@D*M*Gd7wyXn#x&LR69gFo(gVt%3gMC>w;&eU4%65yq^Nse-hh0ue^^H9%v&+2HA_@H^5Ut4ATUAtR~&1?8DI3ksq60~ zzu)Xv>dA2~Tes|#Xi`K`gAzwrY}<>s-dO@}GNmGZ<8j!IvK(H&H>1XJafB_{1Vjj% z`^BS?Ztg2KJ;VEYQzj zQq4z@|As^-@fZ7R#)@c86i@Ia^1OdHOnyK{uL>`H%C)#p%vOS?LEM^t^yaJFzr7x( zb_WYScy9cKke|2yt2T-`^4*a;L6OIJ+5_sH9vlAAy!eD8?d@9ct>@~hLQc;4hdqA#+y6OWbZ4hPXH=S!eAI&{ z6=%mSy)IzeoIg&yQPX#fLjz2)B{%a=FZzD?*?7o@Y5N*p`CREE)5F#M94XfR(fJUU z?jAM0V{#vFc=gBu59K=|TjI5+T+98>xjyvWeHIPqRciw7FdQS^b1hFoxxA-f8z_%o zk#66Td3;h1!LjuD_f^@uW=~`kX|C{{!)Rl*wuaazxrsj9j+(@OX)byCbi%GSF6%bE z{g8WtN$X2&)ZFJCABfDyom|<@Aid0`$;}MnD~lj&*B{iU91z`6!5&2LMG zxV3PeUWsA=N#nUT!?YA;7bM~GYn$xepnoz5b$Y!Li-Q%?c%u%Z8ywAT z3l+uEgtf->?WG;WakEfTgC`76e&W!1L}*T{E2#)LG_BVSA9H|^#j>jMmQa~POx6>B z#1@D%8+R`LvqrNtZTV@tIxg!8S=`v5Xj!Ny6f`PX?!e0%(s<>{g-Yf#D@(5>z($*9 zxsm_JVNpcD_WRC@h1;_FDy>G*zMT#B9dY9Jx?S5AToAb(jAD6H)jfT3`ChyGY~x6p zhR(-0!q7h^&wYJdsmLpmC!jjpM_?9$Xsowj-AXo|Ic3-%j-p3DKB`dPqHgquXrpHA zL2SUQ-?xQ63>(^tP2%`Dlv?39^2J4cEX9?~eC3ta zYwztDqRgmdwbxM&d*1z#zo1>QD4xQ15<|?`+qPWEY!zsTvkgsYFL&FN=5aMw6x5il zSJA0?mQ`Wjv5nc!*iAB20sV?Tyw(BCLT(MqFDLzA6kmUtsHnW;Uvjcv-B)tV#mF+!eSu5-YzO#9L}yA#VWlID43ddQ!og?(53b#W8(Vh$zq^zPt$ zrlq&zXKWDuqMuqIi2;Z#lb#7PBQtL31*<77TVhcXmO4A_~cMV zMD$)R!3gm)eG=)$8h_1G&R#ivUCt(}##qnv>4?nf%SsEMm;&r5It|vXS<7dO7@h_>!1V*ELum6 zrYWTRIldL#p~OW(hiToPLaK+UJX=rH88y^kkDyYo`x6wOX`LNM_5vST*ZpfRWx+ei z34k{N$&^nw*S|d{(E|P#Fp=ZNcVDh{#8@1j+1%VrNyC6-9!nlMzxM@|%oFLEEO!Aw z@oz1A-4x=ig~rCl!~n7v*86+LqHrUg33=DpVZz{ELsX+(dDYzfph@AyfiQC5zJH#*$6tyb@%~tarP7 z0s|JgD}XrkT=DQ^@^7#fY4kki?ge^jN(EBzA73;8MujRTflH9FY$1}co+}S>HGxyFmm8fS|P|Mw~vtRc@O%m5X*K!dK17ioLv`E|x)Gf!AcDo4wak zDrNo#7L#F0IcXi?A|{Aqd!v%1u0f6%4)_@JgjED4m^VF{34^3`)@ktj`~hB2qlh~^ z6ZYqHC#wtpWZQEsi7{Ze;K3ZGk>nwadlx~*{3;dZ?nP6Qw^SDM!iD5Oum#w0A_7k7 zUzslK`|aKW7$Kw6NGArB-DQ`cF9ORT(&`jEUU1V6waCNJ$=hL#&WZ9dHSZy0d{KLj z0Q762*b8v@yM6Ebo0_|tROo6_Q&+3)f=-DqMye{NWSOcQJeWirh} z@Wea;uq3T04q0U7NK6t48*X<9JH?AXrMdINs}#$1sW2edUZC z5{Z-;I?f+F7g|mEZ0z1e2}t_83>@($Nxo)n;}`@i7#!C*Z;whM*A zHJtLqj$c8Nh$qP$>+{Xa#vncM?%_p{F#RxL4dP#It*d;xSdbZZ`{K|{1DW*nSjw^F z#>|$Isk;;%CON*Om~-+|n7i*}Q91FYRbmnlrTe?L%e%S68qCqqwBmg$;eERsKZbH@ z=%44Cn2^ik^4a?d8+b>=HJfQ;Hkd+Kn4?^Me3H&rC!DX~MY01>B#Q}SUXrP*eEbPY z6Q^0E&>nS4Ihho&^5Xc}r+-j4c~O=fcf7vtCgAPRK2Ux7Ct2YPw*m41q|Ic=U+dF^ zk9Lh%P^>sJYtAS|!wkZYl<|}1hr7H?ydAJ4Ka5^mK9>|;9Dfh_r#^BJK7ygybVR(w z`^59Ax|=gHbBC@PV{)7j^t51_TT-60-R2J4DTWu;PjReY@;&=LZ`od%=F02tHt@0I z8zy_1PGb7aSh;xI>ipAlZ$@1@t-?IuX2+lPjAG_o}i}Yt=@o$p7LP&wbT)1WNVSI`WG z(st>r56+lgWPg0yc8`YtOtpEIS-E*PU#nP8SV+|#M~;zMad|ub$9t8xkvBJQm}4-B z|IL(N);fQ0{)BJF*K3@tCUpkAB+P65_lzBy;x=G|H4Svd{UhrhT3KyY-}ms$G?!;; z@9X^H1Q>xi_*e}?9Vdz=)BbqQjW9;>u>6&0XNE4p z>3A02xizpGlc08(FJ6C^(j=`N(ibn@W*`*s_>!{(&d-UGq8OzbddG@_oCrR-1FLn* zYHi>cM8dc*r(VE7Z5(RU`uX1hi(^h3tga2FDyTQLl1@`c!DK2#O)mBnwY2<{bQe0r z0|%xVdGLKDt%rSd^#430_1x(Hi1e`S+Tut*p8p*8af{uj@yGVRt;wrzD6e~Mb5)r! z+6v2cyd;YydfEf6FN5j+{AlP#p8?j9MvIV^_OV5eG z02n#e*`%ohK#&hUJO*5(E^9l@`jcK}K_-T$5K??y-QE2_E(IS zPRv|5Rn6GLxJuXSJh*6KJjY}OE=}H}kxlN-NA#CMdhl%2aOcS?D5MAT-tQGBlqpLa zw`~#L96fx3OuRsD8{YYI%k^8+?R?zl%1Wz*XKITc`z@lA*rMRm*IyuyGB&SdO2XYT8~oc z8D+a3I8L0qXGH2HsehP-8{;nPz%3k0E=-333^-7W*q=2gsGS&Smfkmcr&q!{K-ZT{ zeR|K}>M02L4i-kw(wt!TtWy(vujr~pIX{#Sz^_%I2SEyU%-0{4cf5leOCcxZR7Ozw>1y|LLNwp zt?oNA4`mRz`yoZiIfj(K_eh^vt-bNoUs&XGvQAfYizs z!4;}bWIFF0j*+oA6>=;=>>1>=Ge;3(0TIkDs?+sHO((J?NdpQ7;e9tSb`_!>6*s7v z?(@oNj`cLUpYlvOdkl2=A_bD}u3RvWEUyYUdWx_*!1Z(;JHYu!pfq3V|I&DZ`C3Z+ zd*?K1SRj>ac_Ie(@)pPV-9^?Z69e4?@@7XalI9kAw2Rl;El-O3+>K^VF-NJk$lAQf zISpJ;x0J~x3WN~jittQ`jw!5DK*t8DA(HGzXv&*pIm_XCz`&EPQ8Yv|V|wCl?{k`A zs=!k37VI8ZB6NvS#&Y*kZWZ1k-eCJHf8{R)Sfo-b7k$ZGUaFtFKN(uiGr8V?qYk*_ z=T4-oTbk~-{j|>h>}C|X%vKAMQ*t@H0lA}Pmx}ExgHPsUFLvq$2&N*L7sm+7S_U62J0P z51J2vu8vy_ebw)HQ{^{v?BQwQqM0N(@T$YD>-dJ`WgZhH>8Anu1z?FzBLOZ5Eoc%y2#;jPMZ&pmoij;xgV=k+(%C@I z-_EssRPj+qwEld3_VxurHGqG*qG7ZFX=BMeQznPjak%UB6R#40lYyF%@-_MADV|2d z;i!$_)+t=ev0eTKVMquQuP|q>1ezeDT^l7VaDEy9^uOZ(eHSU-i(37oM9YaZ%Ws!e zGPavv+a7yejiztVb5Ru1Fiw=^)k-E9q03>cv+Fx9b(s`k!Z>OaSPt_(T6&ghC1?`# zT7t3UB*y8$gE4BF@#sF*YE zE5SDZxd0^s*Ce~kJ)@!^L|3eiIytL9suHg%<(#f$vU>U(h@k~yy>4OgT7~LTR^CSi zdtp-jPGo!c?Gwx(r`}hye$FFZ2aw0K!=DO8GsFzP&wEmr% zHFf@kNr^jSx4&M#eBzk%m&g2^fAo?nDPr6)qq&c+di{QqXEOO`YwJUtB@gO&J{1c( zL|Ja*f4QU_CJv5|r&zTUX9$-y%n&L%O1$awsATrdSaz2R#(kHHc|Th>vI=XRuS`3C zTrE|Jx^{w5q2^KU8iDdEfnzcJb^TCKBhBdJ z%Prd2okO4BU@ajXw~je8hJ4XTFUmTX?QBr#OVUuBRS$QYA@rLr9ZPf&mzDBk#ihh> z=e!bUX%7qBF4mjNy6RNvC9sSq034^dGD}N%k1`^~2kt=jAu9V2Z;^b6Gk6ne^}D-{ z__0%{WUji9IuviyY#!w7*%bhR1h~*n(8aJaU3uTy~Lbsg|!ogZWyd65#f! zm|`lHM@Ypcq$+i1zIARCK}w{>PW>C1(?yYfneh!kg0gZC`#zU*!#Vi0b)$If17Azp z-3~s-oWw76KE?i))AntIZtXlJGQ9OKd#^obOY;Thu#4}TOH~_`bKAJl^S$8pOoA3@cA!PkIF?wB?;*X+@zQV|K4a! zjkGR%O2|;N^w;V2zgT*1hnuyga9ObF*A6G*8@4#)u*@C}nK@E}!b=^aZAxl>vFOyT zB|_KgG2t)E%WvFV66{*t%<~J*fA#iKHhmU6RGr%Lh+?y#=?~KCn@bkE@|x*^Fvge5 zQ`Qu`u3-yvL_VeZ~MovXeyen{(F-92dk-e>>5+q*a|Z^O1m+ z=|ikrFHl}|bToCXKN2sUR4l3dsf+TCqFJ6)qqdT3!dl_^;*5$Wo%3f*$qv0hp zD)HkkbY({oOL7*(uDSTzicGWyhp@$@2B&<2Hl2i~)? zJf#L7qQbdTL+9(n?%-5XzS4K%Ymb~UH&30G(9~dWR&74Glb zC$(E(S-gE?XO+9QzOOT>Nu%-hjTr|&+(18cF0U>W;4VC0p(U)b#v%`?Qqx^)mB=24 zB)@SI9!pXfk6q1#uYW8E@2ub_Z&apDHLBHGRr;>J^46vn0&Qk|hn8^LCc#cj5iRos znj#rAXA@PCyjqbAa@MVL+A>{8E+0+4nqsR+{ytFYxgr_VbOC%Bq@OaPl4%WpDT4lX z>dm(7C&|~2a4jW1M4sRDU{K9UUoO}R)=o71}*nOItMo=E10%H+Qpuudf@FviF zkoihAqzi1e^26J+Zg&ucw@l0aM6%9%l5&7lQ3N3 z<?76NT;*~cm z*PYb4IDzJ{cF+&XT%53B6?0;$(&-I@TEphXj^US%tEMW=?l=1AG~V6YjSuVT#^c1i zZ2K=-0vK7Gl^~EdOmXnryW!-#n?o}uvX2wvvsG!YR`eIb&5)V@PM!}Bob~khA*}L5 zY5~LKu$>@q%%)u1)dMRQL|dJ10E`^-NMxp^x<%wA=S_mZj?yLJN*SLMdIhvCDfbO8TJkJ)T2bi7- zoV9Lp-Vm#eg3V(LjvpymIk?@QFC6~6=rz_9xpL;QsqsI|6t#1ykIq=>-3b5pu*AU$ zDnuZU7K;m=woD%udu*_pbT7o8yp~wne}LRafw-)ypsPFY-@s&sP8h=o4W(<1L#1~*0B!n{cXuIh%$mEfuaxSH!v z8aVNt?cAyJWKP2?3=HnVPskEa&L$4A8hE&~>N|N&Dz>qJ7JvU!{EFuz`$(ufl$c9NKzWoZms$1`vE0ODwwjh zcGm#kW$optQptn~&i}|xT|VFrNkc=~7q4o8?~ebEm*a0_)$IcdC7!9uwriy^XqoeG zLoD0bH;;r3_B#n3VvCziPhoZtrBX}6Lq@sJ`qV~_=*l!nO^qGimoij!m1#)dZ0UCD zHJ1j|ZE4CXn_Wx#6OPgzb@R2p#2ewfz%P1a=y%uF+1_kg_Z8WR=+bze_9t^eE&tkH zw71LL7c@2++eYm-NdrTJs=kFi?Ppu6P}IM^uCL|mnNuilDbiTxlRMF0>E1-Xjk!9UUhm~oNdukxJ(j(@qIl;B0I2~CRH5`IZ z2<>wKiINo2L&mMBt`3R%io9PwZO0uaj$glt?ZCM0?XmmDGUoil;GyyuTvZVlo~MNb#6%z@(p*S0s;L4_t2CE=s|lo>`g< zbm^4fwP0TIxGd-dQkf}d^+wQrDD}H zHSQxON?v3CHv@h<4;{XKps_SwWg)E06S2xQbKFi#(SU9o4L6T?7}pszgO{9II>9x8 zHH{OOa|Ck8F4*pqJD0wKtMqTt>j1+M#I%*1!Af7%G=akT>zJv(<2k8n%Rt9Z>xj!+ z;;V1yxs=N->@|aa6!IBQTsV6y{cFCXH!i5vIz9MdqVJlX*D-4UZt9v~oiNk63q`@T zM&xqiwzn~Z*KF2Gr=iRVyCb(n%;dW{rxQyLf5Db)m?I3%n+WKRAYxK5a!P|%Z-}UL zZ_ZeMQm^{qapDPqcM0xg2PL1_@Mp(}hF4T*GxNy0BHb1|>mKAcCo@&k<~rI5DsRya zXij@@aJ~~;bK_V0b0aI*9$zx=`Co6VD&=ZkqKR!Wl`TGwKa!tWDH>$Z^Uo@KU7?_W zVUPp5bbP4pWSWt^=D*LzjeewPX%ioQ@&2A%M_%4f#b02brpM2=IAQ8>Z|ST;4MP0G zht7I?@<p@0XV(UgU!)p2 zH^9Sn?H5e%+0&th8_D)B*|XTfqM-A`{NsP!rEl`=sL9P|ma&Af1PbZgN47;_eR(vNhRCCbx<+3cCbdo3Dn7(uV z+qi(CtLL?=(LSuNb-i-?V8ZtEyurIDS5-mEyVRpI_5}YpMkBZ3X32Nw#FE92FwQTH z;!VSYe&uSqgw$?}$qcPqY?#f@8LUA>u8cim%#G|FEAsrV>*jTYjQK_ba!lK5vG9gd zY=QdOsR1ai8{~$ZKQ;7X#>hwERnZ5tMo+d&bs*auS-Td9*`ueiGee`w(?ZO?K!{he z*NzyhbG=t+Wz1eTGVi;<%&_a&yPK()GZyj%BS{#3<-v_*Ge(Y@!&RFl za=DxcWg{qVy--n;Xtf z+K2hI{Yfnuc9ESVh_7$hv41;XkQDyRcbao^o3?&PB9WbCmc<+sK-IU$&bU=3+OEoJ zHWwa1rJp<0$Xe^52RuRQ-utm`C)_GMZFm0XvaFD`YmY8<@p`jtx>#FUH?pTbJhmBr z%zgl6q@tGu;=VB_!z88QyHPRkz0$*xa}!A%^{#?!SG1>_Z ztuGd3&kf7xvMyK6)fMP>KCey zk+-aThOaic=Psm9QKU>uJ;eA*lr(UldLwEig^_3=%!-rc)1lSiKuWq z8F`8IkU97%(-dcE?9rwQiIOPuxb}|PEAqBQ*S3`Ndvkyyf|d}&Ek`e8I&FrG`-v!LD;%tIWZt5eM(f42YjYHP!BcK5zNy1r@v`S)<3P3-e%=DXmLi$wL zRa&^?L-Z-uJ5%#CpL)SgTQu{GbA79zLOyxzH@b(Mq`tGl;PrF)&`q-|nNIN~D|Ce$ z!o#2Js2<&&ZT1_zz)dZ%q*N?O9E6#Y1W!4%X1!jRnC|lx$)iCO=etl{AT<(~Esf{0 zN=x*=lABsTW9u}B1B(E;2+Io3T5j275NtYhk+i&&sO71Kg#%gqGSqT0ax2KzM?P-=@C=6Ac}x8_F=Ogcb&^ z#$!y}i0}bybtp!nyx+CUld;ZSwy0JSbvA(HFi|i_^G1ngf%NnaA z+`wPHZld$Kl5hErtY5CT37iT`%0IT)n_$T@0b7(WCMnl#0ssUp!8n^E%U$ z_T0|^vMyXsx}Gg18+0ZlGZL3;yO7zy7g@6a7xo$()k=yzh$Cz?L0Dx@uS-hR&f9r@p zDHXUOf*XvsLeCv~Q7$bW_~KDCgmLek?AsaR(%k1%Zy*niZ256zE@zBEjOJjJji+p+ z2)2Oi4ka5{!5P9DTZ}`iPnsWEU@rU<*?!~s$b`L}4QTwu=Oc$C8h@pV0R2xb)CT)e z&!MiaOiP~tcL!=od+|B}xEJ&-K(n}-siIKfyR$*!_jLLp`tIb9XK_Jz`vcmT_S*6B zao3rXY1@y5Z%DC~aW)1p89ptuelHbWnF9AkyK)pDX4u_Ch({*C!*L{QTf2LbdmFuj zVO~ZKJ?D6Jq3$q5weW&?X2|5v&BNf&PtT*0FUS~E5c|qkf#IPgWz8WI=tUTfmxQ{+ zwjY(jCw8yv1502_O>3ADtcZrbws0emSHL>4OIu=?WQqlht18(eX(4uH>C<@9=?QPMz6}SA$1Q0m`3@`S%h`=0jf# z4Iine1&E1Pa{x1MkAfuVS%SC~Xu;@>0+d-$$lEuLoW$8%TwW;t5Zmz;#0NTZMxdub z!yT9fK6vy2v)YHaq8RccL`#I_1%MxH8{D^0wNhc_aXByFkITa!0#qj%MbL@TESNe9*ny7@qZA`wCyQFJuSLG4KFjYob!+r;YE783P($*+cAKVHQvy zLPVaoTa&b?#2_R4>_oVKYycc)h?^XF1(#CLy0TQX4k)yca?_S-2@7)wM^7NqC5o9y zD4s@Ij&u~9s!)lH(cO6D`$R_S7He}xNQ9A@BeuT0bZmc z2s)D<4~#=Yh&>z5ipTGJ2_}edUI8(aL&)b>>Wp}({vhRKf>FISMoYyM7#|ahI^wL{ zpR5OC>h^c+Evyf6M#J>MgI-F+3~$~7^EMbKfO`=nX;PkZ^zusPX}Fb-)Vo0G1Gfsi z4Y*=@JqI?^XY^)JI<8Tu2~G?o8@E=tPmnzjS4o`U++CPKUIdAol=&Q@LaX1Wl(H5$ znaFGdfQXE<1%~LCxW1R`Yu9ofT+BKZ?kYiah-96?>|*7zFW5y;B_HW7LKd_G8X6{i z_gOzB&4WHUkDMX`3bVa-93uuiX^8<{UHf@M^#uzKHumolINW$e;zj_nnU54kTQ%!j zQDSMXTayZhAwDo<$ge7&m6aJHZyPD?NlbbL!TQ?8#Ncoq_<8ZpxnCxDkc926WZDh8$jL^$+}gH(7dYuH0_}s2-1^NJHX6d&s1Q>af^PfxRmXe z{7tuqK#NMh-K6y_(rwAx=x?MzRaaY1cbDDgve%=eTs$R5Yk*^dK9-EM;7R$^X*f190owhSBO?vdddIicpiq z7!s(-#(mvAC6o!AmjM-?4S`)1-CE)T#7dUOw_&m|zDP!8X*PG1NRfwVlj&jI9v1cn z)wjLPMMv-|RhY|AK>QJ>ScWM15O+)S?Jq<_`f<2r0A>^of4fiKZ>$k)o+W)$Pgdt& z+8M}V=J!Z5{=UJZ++=u9m-oLL`o3}FXwA~_n2Z21W9=|=Nuh+@Zj2k9ztb217ACVg zAkM3ylpIUD+c2)Z;w!4@Xy{*aex^wN^Y4gJrHs-_1vx8l^n@iRaQ**g|7+$!8R8ux zD3CvN4*&1b{%#rT^9-o0GFpF=aKMPx)!x<$Gc_QJ3WKD`;$J|vQV6KAQkoRYOo~#c z>2@CjFru{Xqlb)i>T*Pxc4AWPG{PRp`k()LLeYD(5|IIw#=N1(pj2MrvJ@n=lI3n~ zAoYH%`;FBun1Mx=QW<)Xs*(2c>W!+Ntz(K#u;iO9Io5);?_|b2%=5yAEoTO zmx&Ec0C6qXjiR@b%_@c^1G0xG#4VEmq>@_lLsGlTOxxiOWtsmbF_lJQV37fYm+(TO zj@4Ei>Oy~@fHc?#Z=mlC8zkheIE#niCTYeYlP8K#l=y=ZUew?vj7f*?+PzR>cWyoH zl3@8Dp2}}*o%aCM)d1;`BKrW4;Et8hm^%h-(U+j{ef6CiNOLpy&ll*CIR zLoQAP`EjaL&@~q1m_YRxb@D3HUdv)5C70S`Pz;psH~CA!GL9Gwo%@Z@D+|h))BmAq zfra^Bn*vBT43QqBVg`e=uYU=OkYU!yp1zt4FJ_0@Y%gE3O3>sGWQIz_-N7>)i%`+d z&NfYKW_QLvm{8nYK>xs#I&<7FcGmf2zmk=jPzj;8+mvbsNGTi^p)e?w_W^EQD2Jxk z;ZD-k9+?oPC963hi`H5Am;FjsW9--9;rh=VuBR!i@g9_6!%@lXM~Pu{)_Z$%$usEC z>ju7hl(oqNVN8E*$?-+}(tJe*Kqo_kREomrwjc9gcfib$F}bULqoqK->}sGCXI9Jo~EjY|EvM%={mqnG>OJ;S<3X5+`H8;Q+$y*6ywq4BTi;PML;gaj5 z{mmEnF(c)-Gti80O} zo>+A069l?Ov?8v2H%k?^0QPj+z@u(#Ak%+++|E z8*(PC7rn}kNeq)bWrsa0$DD5+!q{X9yQt*J>jdr*bnH;J=odtjrc|&D!#OsXHbYW~ zGp4n54esW8{1#zjMW*V1rBDEeH_{u8flB+EHR61E@aWm_km_Sf1}7B?P(*r3)u3?F zZ4xwThNHuSfJ*a%@mzWd>8m$z!t1UKf@ee1`wSAg?GC652+q~RY?6f6D62jcOPRR` zW0^nN>ac^vRe5NP^?R`xSFRzw=^z>-PRTe>%F0HkDT$xP1UDPf#Z3IV-Aa}hH@TCw zZGDLMROU2ht|O*RE$ilChm0p1@Q*QGpcNnmXI5m#x7D*7Hiv|aA7{|9ocg+YVkITr zvJU=lOx5pb%eDd*b)LF9{FAW{zN+F7a7D!^a)%QTi3dr?jw|A_vJIM_){B<8Z0Ec3 z9?&nNRSBBHcg-Tq$V*~M!r;ZzV!*Ni7kNB`8BW(jEb)BZni9=DNu`1~_qz}3EOn_@l-2bMax9+{ ztg%~&`@9@KS_lp{Iv7i_T+mb+E^?F}iWJch(2y-EVj#A21|hT|N6ko|e+wNAWlLWnnT%fFj+>k$x zI$Aa>OH0|AxMf8hsOk8>Eb(IYE@b-r5auTP7cDp<05ydSKYVi%eQFuJwK~bQkvvI# zF0hn2d&RcAjrz{bf8MUhf_-I+JBrIUh(z=#TP%oSjTM*D%}b`7VMzAEl0PLpimxT* zCbaw+d!PUQO1wlIquU@(&K5f&N@W<&wc&y)V*JNf2=){dn6+iRQ4)(l?W&$DvmW&XVD__IHi)@(9-3U7{&3V4IX;5>r0vl6f!Qtf&yRyiqZpAC6+2qK8=50k*aNEM?)?6;TNS(5EStfrx{Y}3($CQzREvEHiSbS^JNf4Ij%2jK9mM_A z=0Y<#dn<1~KU~ievoEx)iK@HMQD2*C2wmU5DYz?XX@GyiSy{Sfo_LK~T|J@c0~j3U z^9uiE#D=f-HOB+Cmq8Kf2 z+kb9=;Nd)g z)n=Gh{P{iojcz~Gv1&sZ0%vef# zow*1@d_^E4zv;w$?>fWr0mBd1K%|q3E$TN4SaBEC>&>8E|50MtcB8F^nRE~)ZrGL1 zL6-`n)tOb$LXD6x478IbwvO@yp~5WJ1Z_7GM2B-(C$vgVkx%r3*;k{-Q5Or2F@D5mxRo&q$IL zgI9>}lBet%4P)^kvQ=n{lUA7vHh&Ymj7P8rocRt_n9l?bW1tJ0esZm?d&g)M>Q}(g z1Q6tSS3#gwdtYxB`e0G|&Z_&jLMS4OEM!IGc|#4=l?^MviLFV+Ab?Y^fRplmooF{}@kkdC#7tTvMsla*B+Hg9%!r{y1l#?Ma*8&doD`j%j&q8=-y;uncZdS+S*5C#+J-=bZsf{|N8)vM}9|4i~02K zCxAu$dqf`<0Y0f^P-(w|2j5*CRA1=3a9yuIhcf~DUDf-}saQq3;K5dMdo#`pyrr5= z!CL{`s1&7Hv<{W*9f{5gc*W`0zv?=A${LX9Bbvswr(*w3Nss1l&<}=CDE1WCzcg(CA2e_bj7Xx%Cm8G;~nngPhmH9qd$0Z=lYT!5W?ZS03E#Vg;3< zQ8OT5Bn$XKPIxRg2yRPR~JTLZrO>H!8Tk?3C+?(T2g&SNPS{#Ow9sQLkzd_5 z0c;c^Li!Lp6v?!fGUL4%|Lo|tVmRjjVp>$LgEOFHZBsIR9hAG37UN<-qYuejftIPX zcjf2Fp_P`@@#N}R?r<1x5E7r;64erNdvvhm5zk5&Hm7s9z|A~7K8n?>Lkl%oXs)zO z2I_tZcc!oK02`kYDh+X@#sSWLy2NEcz%1Bdi`}*#Wy(Img&5pNwi_kQ7q}62x4D+j z#uBv=s|R;}UF}zccb5a~5AGJD!{K?Wz}4QhzXqsmOhHOHXqH&8=r%n!mm;3d)tYDf zHO(JuD{c9jCX6`V99SK(ioI4^;_Sb{pl=K|h_yh5lF6b*0)Ca)2tDFJz?4eD!^H8t zNwkFK9mad1+sKE+A?OPQIWDoG*kbLG`g>(k^hJPorwd2g^bo?uUsdoZSS~z*uINMmoHG+zh=w$(B*J;(C-)@xfbdo=y`Gb~lh+)@E0QCfh-GIGSed%(8 z6mvZ4msOAt0`H1-rvut>PXoTSqgO-Xm$-%g1-YM@`4CGGxlPW#B95Kpor-xvdy~1~ z9MDlb+?#}H6?8GqBI?EDm!m>-RqX{|DoTce=xM$51|4^Dj^l>c(UhaW68?kFjo{c( zH`2KQ_B-%8+MsffPZ?Et?F|J3<61`>O@=O8`JQ9&d}GMD1+yMaW`vDF(g)3tf4>al zY(~`V@e&XqaQc$>Z!l5nt*9{9kF`&m1IdD1Fky-LI{6XFzCfr^HTA0LwMc9Isvp{Awq~O=|grAHwe;XSxRp$2AzN1Cuc$z%9u5Hb6fdi zbFg@Tmct@tn~G|(>3SUu2h$L2gh)FkT!r?q{$n5+Ij-msE-RF?pK95$)!g50jceU} zY7y85Aeg!&J!x`$^9#zp&{3>D`6QZ^qPkoOcE7KSi+Xv5i4o%M}3TVWCa4;`|EEK-iqW!)&bUMzU70Kr7sp_kl^P z%q#lg#^PZYA48nha^G*W9GUN5Y6xR)36zN#=`d>3R09L zW-sb2G=zobf|B{M{gpvLLPhwNbCy152X{$HqsvAYZlB$YlQt5_yEHL5V#o&z$;qLY&b#DuX zCC&d1*SmbMwnn{ME@b8KzvM2!s1|7mH?S@3+{-jyUfz*GZJ9(9W$N8ikXaTw9y`3M%R|Ex}xK|g=}A9d_hu^e>MgE(pE5fowb3G zs>6Q8cH9Iwr@!1+0v*EU2_sB}ln^gI3E}Hyq^2Fb3NLW-3OJ zoNKeemQWJGvd{oIb~fQ{Ta=1D25tLDvSqVuRu!~Os$^cSohKCh#;55~$w5Zx&Ya!6 zLb1)c2wx)m3g*ZD!+Etghz-kTLwf5Bi=vH!ro4cHOp#Zeeve7eo0DRq49x`lU)7-! z0C@=joU{fP@1&*5G;~&u4^f1Jj$asP;T{C0A^9kTBqb<|{82$i@@lTIaIYYG&j^)6 z5UI1v%2Tn#dU*2Xn*V$Sn(XyLMMscW`NKE4e>=jH-1FX4$A0v*zAuHmNVueo*5xLDk*iKCZS#czd8 zonQo^Y;l+i?9UJXpu zF!?@{tI6M{o-F{akj7`I;8zIe%O&pd-z#j-F%iHF>DRYkmUz%Rzy$kO3Nh>8t4RUyTvmq$~$Ql>x_3+*(LhbgbjY9lb z47uUybq4P~B#7C0g7NU|i0_daNr5#=N5M(bF`^{zR+4M$IgaRDJwqYCx`4)St0TLa zfJanl`kXf@gVT{+mL?S~sd9ruq#uUj4hB3eT7V^~O5`Tocca9DX#kr8g@bxo=j{-1 zyKK$n?t-Qj;lipn#tD1Nrko*z1+h2SB&C_c%7Nbn>sOH^0izygl_Y#~n&xx@Ui~r% zXMskRh+1d#VB0H`@Bpiji7G6-*+@lYTJkOOh^WvCTLa}tUj(kN1ozsu0r|3ntSP@~ zhPkLnQS{=w&Xtt196o*`NGJRR5qiX?-gq`*GQ$q(7qgzg`;%<0E+%ohMzS&L2+#`w z{^Lbc;G8Sc_aK+hV-2HdX!{;TI{QjcHR=Ua#nBNsBLM9i^f16HUs#jQyN=K-uFUU`@#T`C)aOA0h;10 zP$%2$t&sR35kgfdT)=_g>IHcSl5Ya!c@>s#_6Xx9(q5zR1wx7#iaNOou=E+i>0wfA z?o*3{#Fz3Kpq{aO+ji_39Qgwn1}eiqS?4^O+tCx~=?LY3cn9tcg#sw@qVstafj(L1 zF%zsD6F$v?Fa(Y*h(Y836D=mta?ydywjhNcq{ThC`VF!IdQxdz3*yRjsK7Ix|C=A& zF`6u9)0>Xn4tK9$Jb^A~Ks%$vjm`!1g@K<_;7~kD<}t(L+jGJt2YK3~4P|lvW8gHS zLmQ-VE!q$s4l;dKabV>k=B9*#kO6>aij;PsTSK&#Qwme3p^)g_2b zj}vPvZtY*>@dsCPP^yB}PARcjRK8{b*augkz4% zhgOL&NWy$$oWwW2D@~IU4Y7YedZMhpiA?)=YAZSV z<4X?)kD4u|`g*_;`*j_MI$a2kPtwc~q<+|@vC%$d?N?-RmV(5S=bF7SeF>y!P%8Ue z`VDsgUe#+jfHfLV4A7ZixE7+L!Qn%cKO+v#ng#GG4>6Bw5DEkwl_qt_dS+y){_MMq zIQ}cgezgJ0fT8?YPtOaps+PQiO3AGX3Fkg^Ac}uX2j@_K_U}~7!j}ZRhm#Z6UdxY3 z+}uJ-J`@Iqp^uTUDP!HrE`-|6E_GTYJ@2=TQOJz7NRP0(l(tR?5!Bv&7Gksb47EP;skZ4`f;qcj29v5Fc4#eAH+LXUv8gdscqX zD$mO^PKW2Nl{vtoNzg@P^Lwp7A7V;KT9lx70kt)KDiuNlfzUqy8G1Ar2+L3$C_P@i zbCihOGeQN=hxec2YKjuSv$eAV?`X+%tHlST#;|sIxwfl33ej9)o$>!*2&vc^@Ec`- zie(Zws0{<+zaLq3EKZiFBZc8sCsCpTg8*J~2Q3uRKuNmt&;;~1E(DV1HxNKpw7r8I zw2DM`k~U8&Ny=r-1AxvYb#Th5&74Vwn&=gXIfp}WoUQ>D8KMxN01R9|kf{!M5$Nwx z?+T4k?1CH}eo(i}#h@2TB3er-_B(`tckErqQGt7cQb$N*h3QxWY(5H0!99AymoE(> zGR9p#FeNu@b}268b9lgtV7X2)kwtu9cn-C2BNz}E=g2D4vTzxs2;x3?L*khkuocKa z$yN9Mno1z~{3#WqP$LBuJ>xOA0peTIQW|Rm;tsi*Mw?pUsrfnvAN41woeG4w4e$v-jeX^MGWm@%b z6qF(G=aq$UPr;Jr3%^E1k0;O;hhx9iCFleT?92JJh?%B({Vqr&k2W8gkEFDjzk4h* zaO=$bb-DWj#}_2RM$>)wf}{1`u^Q>zv}#`Fk*2nYvgn`SowYn?tjT) zeqO5dVwMVmH=SI+4~U6>s{@lUOf(yt_DR#_E^MNb0>?ET{7_nB8GLXB;spMq&;50h zXJm!pIof6Bf_Q97HhN>9{UvCrlxYLvLH7%le7QmSgy?0RzmB!kJ@V2NnaxvErXyD9 zKe`ArpH7b7r#W((;e{%!<=iS1yL|?y8g$+}7}HX-VNUV3Z(XKX|FaJ+A3c8Bv#09w z$iGFjoLeJ&jlKVew|!IOY)w_0hkus++-U0W|Zo0<0$tK6uO?+ok{?R9MhWT!K zrxEbA{lmvD95de1yr@M0;QyoIwsKDHGGLMDBZRaU?@g{KjH3Q8NC`S4*WhJU1Z+J* zrFtb}D;08$d710f*?*Q*FDI{ix8Wa_w(;*$u@x?jrV_g+wYE2vY5%yuO{ZNtr-&?NgQI>+GgB>5}?NEmU<2xYur zCAb|Jb1vUq#tL}wvqHlz~Umnzb zd9@Q*Z!Mu0th)cNToyW+gnrJl)R-49`KrqzU%8|Wu3VjUkA%`(NRm)qc`7+)qc24IT>CtC#U`k&d z*&=Y{lqc3<`&1l5&~IgbN~cP{XhKonOkO8@xik9AD}=SW0vRp*T_%jFpT+D zu5okb)2W7(7P;MUfRfc!OL$qRi71bp%e=KuevwK5Qu;saz4>2K>H9xk(=^R%%A97& z9j2Uenp`qPMFE?swA^~xrnym3nvt5KqAXg|l+LIyrn$0AE?DD2RxY?eGZsfvA)&;S zv_d@*iXteAqTe&`&*S@fy#I&~Kk7MfpX<7>?fJaUeYG6;;=u8~;NK728ag=pzixcs z^nZuA|NHaTf*>^ge|{+%?m6t$7ms(!XUO zG(1?9Vsq_O$E|fQe(X%$!rifM-LC&5uxl>W?(_c+X-&g-^lVxe&sM5-6%MgU_Eal$uY2B{)a45G69a!>5I%Ltpc17Cn_l@!tGDOKLC~ z$V#YK=iy5-&eCtpb}u^BQmGrCumZ=%6(*Q<#m>~O+?j)+_?0sCJmUyIV=4#Fc1W{c z|KjpWXWa~L{k5;4!{(aaS>4ktMt^Y)u~NAe6>mAn(ae9lj$9N4N>uDG&EF<*#%?TP zKK<}&v7g$t==H2O(GilwwNYq$Hsp=fdBN6x_#B$QDchE?sQ3WA)@X_Scb-#lQbN@N z_45~%5`q7Jw#Ksv>w8VG_@!ZJ;KN&J2Gu$rzsyFJQ^UG@dpa|!u2*U~d)F=OzL^&V ztTRk5e|cG=c>5tCERby4b+MYTkJ{j8>(QDn+UQ>e^yFg?a(tR#~Z?Bd;b0Xe@?1RLVVK#s*FkGV|I})5k5+5q@BB%_viRM z?vA5M{9os2SGSPzY4h@>SxSo5Piz{?Fuh%gar485m=w^i%f6&tZpB1wpw1$;xBN!D zyZ)O({(8;EkUE}Ig-{Xkv^;7P4T-un!S3l*+=(`k#Ynbs>nU!&u_mkMD;N@GVtg&@PMm-_kuy*MSf}D#)eYN36P>Mo z1?A2czL!ACaq)AS<-Z+1t+o>AIAzHUX<-KHFDK6XLRz{dJ(FgyZ`((8d`{gmdqn9( z>Eu6%;mK~fcFCZH7B!e)wLg0cW%u!4zfILpKgV^AQPoTH9Si90ebuU+fd;B$WwM%L zOxH3tC(&jt_amlI0c~!6GG9YV^C{0kf1&nN?lAH`L;>DX_|ePXX&OQBIS>D*Thu|1 zUzD;_ibNB6-LPs&zG#`_KlkmfC!bWCk+4_eZ1s6qc&sj|*4g2r^$! zvk1{uYOXj7uV|I;ja36A zYTPg>0zdX0N2{bT?Q8_{M28-xsblRzN0F!)KR$U!^cx}Zkwml529JT7C;yxv z3`MC|^6Z#qvd?@^FZ&ZbH;wdS;7rQzz97H71YAohw9<0fD;4&bjwPXl30*BIpmtCpE&5+U0I z&zI;>sXAbxD2cmSn%JROvdkIj3n^H)w~#+HzcHj-X?ECO@Ld^e6ZXh6bGTyI>RZ%m ziwj1Gr{k3?0l>Uuw|>Q&Xis1(NW5G*?}t1=K!=5rQH4VrH{a2;M#T%uRJM7t{FEes zsC154Hltlh=%0Uw#Pi;@E*MIJ^iFTwv0wN31F{-BcF7GSV}(3)j)2@Zc}d(r8j`--FDuU1Be( z_^4uDA3edU^i%NgRtj8M!*#hT)e`VJ-K!BX_hNC0|5kY!k6VFPcyitu0|Q}1i9_6-h^v#))A+A?>#6*?h;M6k zD{g%{{jJ12dxA&>yk&?dP!3!;>(OK&T@{FGkjk+B;{ILgvA8?5{>4xS3)vBdeCde5 zjmTmYcr@f=s&F48rn;`eidp4KWFR~9d!B= zMh>Q0+qy6_&Kzk|YAggk0ocqWc$cRX*QN`d6x{^o(nV;FRQ8SRFx1hWiz-s4Qf)d# z$!pad8BhkTjR!XKG%G1Ar89iI;f5=Xn5#?=B{6E)#X?zV#4jO!*M8Ieg3_BrsBfb3 zoLY-szqe}9#8oL+%RZzY-uDRBL!@Ok1M8W9qWfm(D!3E2d z_#ovczZ~ z9PiJyiABY{nxI9FYkL+^j!1W@mbH6s_04IiqHvWA1rl3G`WTKRk)0vmC+b;%oWc=c zKQXtMMi`(g5eQL_1{f(9J!2!iwR81{qAO)aKwT0@dlB?;Ft=&_5xQ!WI&~k*Y3C zbW|70&2Nu?%Si_woN%GiY@>V}V5qay6Xg;%Y8q#`Ma$IhVA+ZZc zO6TD({)fAxH-Iiasq~m+$?jRf1x3KSMXmHjG|5_8M@1dOxl7a9DG8meM$1FbD?%P zIj=6dI*g5LQ>6rFqDg>vhPn9ENbFLZjPRNSVu}FnPqD2?1+Xl~QAw;@8pf&Oc1$xR zs&sT!G9qG%9cE;4?r4lZ|MYX46U{D|27h?1RMn+@l^S7dyp(QJk|EmHkl!1Nq~B0B zHBA!qXFJwzgBABH;ezr)vP}tjw1*mG2`VX4!Saz4?YAH!Zi)!ONyS9WSUjnIoR*+8 zgc2OnR>=G!Tro@lj(#>m;qK$dhw(9re9h930qLoiH%+48OJ!Ex!f;WquIZwR&^uC( zjN##J`}1Lna0~Bag;Ay)@vbh9(GjG+HAtLK=nB+wKmS*J_Z*G4$cFT;v|#RDo0eT; zPww!$Blp`s(v>XTvK30eJHs<~3Plzdl|zoy!0sNVxEjn!RVX^gR|3V-+cMk9yfw+y z{=|91S9l7g7|LmU+X=AVYH?se#tcKY`=d}C>A>C5l_vVD7cFc_%P1XH506;0#SUsQ zE0;bk!MUK}4LQ@$Y}2rRma-=-m0gSJsD23{KR^uZ6ZyBlqtqeLW_Xd}JL795@8JX_ zL63#{G51`0rseqnxXPcMa7*jOti+7A>_@ZwMOlP_V);@iDKf8GjGV)FGvtY)J&|w+ zaQvPSc<@Y@!kJfnB3g?&w}MnQOL*qD<|T<^hM@`8qcG^59-|-FIzdQif9DyCc9$xS z)g`lT;X20|6Ak&UzLYvPjxu#wM16BNekCp58SNb|*!{OP_x8VXtZGOPUf394E=lyq zes^b_-x?@zLk}tvj&tC>9q;7!$S5A+wotK#s#f7B5rBa2qx9S<(xoj6od$IXC~GGX z&{tw85z@MM^SeZgX(BY7hY`ukrCQ6bF;OpHI9z>z&47JL6CpHCEN`=D{N zJO@xcEd)|~v?3kFgEL|YAKOJw0c}SY7u$RsV+<)Thq(nDs0sjMI+OdtXk$8S`*a$P zEY%*!)i_5jhr+QqP`=fasWrTd(Cr|?20KHi8zTS0S#HufjSLADj^iLpB|)O=(Ka5tAsEtF?6>L zWU!;UbOqr(f>Azs>Z`LCRw@|LCfK*ldu&?dW;~wLWjxT5JYru!8z5cxCXM+q4z^U z2@1wYTmLP^i-$x5qX_4YUQh~nxeg7xK5}G7dlgiY6(}DZU@n&s9Ub=)S7fY%gQYO- zu)yg~gDLpKsBVZHzr(4dHGgrkJc&YCs0o<;WG%p zlh|L^nOEr#TvlqrmfO1eOQJU6qljlWk0yS$Z$A~M4wITKoTDvn3>i*WVviKkMiw_$ z%FQ16)9~tMuCL~x#^Y0hAq~# z-Wm=N`zt-#Fhxg!Sfy(AHZZ4arQ1<6lx~~?Xoq$5Rw-PqN8YvO(Fsb+J$sm4O)Ugh z1r8HnXsYJ01(Sp`(T1T0GzSuz!dtlLA%kf}K>yV|9j;d4)yX6z%rqnQ9vtrd!HiX% zIAE=R;xXgk;87r%k{b@!N+FN4K>H{MJD>7ebLEK%$jmx-v>H*Z>f-(Gju~aqo1Lls z=Fx^UN3^;)$Us=JnMvX)-0g@olv5;a=BM{t*3`~@vt#u@n{H*2@ywjP7ifno2>GII za%dXFlkV+fM;1TPYj6*AIsaN8o4vdEGGo3c>Zv56=6Rn$e$iO-H1Y~ zVrE{(+kcG1a@ zac}_7nh8pkIMJpav{d@`Iy+j7iPE5v&Li*Tol-^WG)VIXv@a%_7+pGN(O6QK#`0LL zJgz#?v1sJud>7B-7)gB;xagWevP*c?X z!^|2nx@!v^lqjfaK~}3BlIegLCC}@jypiD*FBMye;Klh3zZGenBSJbNJslOeu(BdW zNLw7Z6DoPmtJ7-6h(YIb#a$AaGTCvE(z$-u=$lRZZbzL3ocijWnx!Kalb>S)CQ`8^ zYkwfl6Hiy@te#e5uMR{{yIU)?`~^Ae=(NEDbVj4G{GRLruPm6>nIm47Do|)Mf9~CP zq{Pvp9`t1RL_FRc#C0m2t-bVyBZOZGx5)vHZ^(w>@UmSk#hB=xX$LgjH{UC>`V1+m z#xK)HHEnZPt?3xocu`mYiJ`=K>Zx726^HZ=Q)~;8sOqQok0fDkzf&*^QXrEqiL&Am z8ei%dc`-^vN_Ze#ir2Hqv>%VMK}}RK?V@K;*CF(F8LLaTte_KNU?BZW*`T(xz5W~{ z=ORbEq_n(rie$X%_}mY;f~qF$&sb@RI_UeQrY)bWEOz`+amB-K#EN}m7gwy%HjXRqLNhw{H7?xbU0C@A%|h>;W<=i~j;o4YI-g7`S6ca; zcQD?vCGzRtO{4oPd5Sxe4Dc@{_Rq!7Y@xkSqoI6LxzNgr^7a4M`Yx1eoc$jyq$m8B zv}(nfPo-Zbt=s}D2358enWLlZkt90M#APcLH7(ApA|QF%Ez*`gE*gVYD3GuxC@&{q zz5Wf(0Af_v0i7tJ>u^BB6=H3Z#*+t@{G?i%Ma-lN0mnT2`2HlBP@+I1&|E2vo-T&%?JI{p(sf8HlDFQ{SnhWs<~9A*nemvVK$nv{ znL%pAE@PNlmi_}VSBlHX!MSy&7RjRxB^t19DFRC0)_CBba^CA@{E>JNL4USOd=4M) zWHZAXNv?;A2kdksn^uDx&Ps*}!8motxCmhyNT0YJ+qu;*^R#L@IE0i}0-KSip9@zI zg2~LINg!?pi`&-wt%FfZ+1p1_|p^6-6X4TuDbP7^Y^oco!LLWIjz!^vrb@9 zZ*r6QLV#NGuJI2qFr zT5HyVfGZ`MnY@~Lk4czIUvDH_DQm4$BK9*1luKcx&H%2dI7F34$Cf2KhKYUmuXZH! z&-|OYYZCok7&%Qs(9vd1nF;?}m)M%TrUY#!(hmQ?ydk+Vzk)Ur{9D4?GDS=Kr83$e z&K+Z_{Mp(aLn{GhI^j_Oe41=M5Prw4S4 zRnUXQ{GSIy?$%SD+bS0mmaGfOD>W7(teSFV37q*!g`-{}!Tgdy+y3qP>xw!1&DK9F zb6Bp*oMBClQkNqqU@v8v&7w26oigma=!q|ke>C9%g>gCIc~UjrN#8%t;^q&#i@WMx z3zDgiQOz|iO`!J04LMKCcr3cQMT%=motkWDN6_c%1#TXY2j@?uM^n7Iz?!x4)sdyN z3#1Ob5bn-IfR_w09Mwf`GYLgEp(~VEG_674Kl2(W5oUO0Dof(1Q9`;6c!4KR_t2Lq zPzoR^b`T{5z#$&BJ=q?lfpTOl8Xg;(pr%yCqU{d`kvKyP$d~PX;|-FT4zzcR5nDiSW0{PeA`~xoJw~x=xbUu}L=4Ow zJ<3zKplATxJ)oC|ER!_u4OBVDbu!-#n@=8*bw40ahnj%g`Hn3d`VEGXXFe(SZja$H z#pzp$8d{)lK25(_*~2U#h@3SX*+u18Ib$T90PMC=$rA&lh6oo-CW{NRfP)Tnf8|Qt zJn?+Xt67ga8`ZlZCKfHRstiOusL9|XXM&0}u;FAnsVIQ9RNa>!@ke{YdOqXyUO(Hv!^Qw?w z*oanps!*Q*l|Ubn^E9E{a+tzZ*?5LLZN8hTE(4~9wMaL{NRyb{@>-{1 zcnwOmk0&VsfeHA_q|94LP&^eG4?4hZRIBh};31Jmc=(LBTc{iE>tuPDMAkV%c&8IK7{Pou$R}@H*-*lZ&JKHBdf@hk+o)F)oSgPh0Mg zE2wJH>-UzWQNUvhc-Ofw@xfl%*JN1|^?M#z=?XztI;b=aN+=jqvf0BrV~*Kx9;zuj!G1sMhd7%lmaUGe10xe zcgwo__eNUz^FKQx1ph{+-AfK2$e!tN#Svbpc*=^T5SFDJ;*6DNtn8}?GDuqindo0^ zjWu#d^iG2FTwSRa6UGCvS<)&+`=kcF8ZjpX?m-9~qB95uzI?^1m5#$y(xdQ4I*()C z#Joyoud>n41@oB{Fi7xK7f58BQf#v}x{r*pgNy?$#7PzCz5y7BhHS$@j2tPAVoSMr zX75EI5Zj~wjHf6QY6q8+xX5t0OtR!pc&xG|ZV>`xQk@8mk&amPGM8N&_BOvyu)5ZD z#EBJKv~qZ`>}X5Nq!WK(r~dGBT~37@c-;LasBGBm98Y{^F0g-~N>Mnu8`dRtB1sv` zH8nAXH=b$Vf%xP^c}t)*+e+LydrOU zcCZ>(79BKA=}XLeS&b{F4Q!VQ8BOraZy+N=!=?1_Bz{`e1n;Z?sfb0cN(m+L-g;G+ z2ar%q>zSoAjW1SdTjXLu)=5(%4LB~+T)(Z1lcQ{ zSdlMr4lEFznr@}N5-G2G#ri?6ei$}EOVBk`6B>09bFCUp0C06>4DaDIV!`s2)TOha zxVXb;sk*}&8)VSsaa#)7nL0d=&^jrW*Fx!+3kelqEi0@ycZ!3uO2x9 zEm4PW$cG97i)eMnl|)BI7I25A)Oozhpg&Y<0_i}t1T1POE+Xt(U3nK~wKlv@G-Ipu zcm?6sv{OoglnY{r|2p{es#M3xC~rS@Ifnbaaa2M_Kh!iv&Xq7(o5e^~l>&jus8)m; zT9t(jOgVU>c|6UDhbbzFI(Sqm7BfdWyQe!|Ig3hR`g04E+=QP!GwOkT#Gsw<4J|!a zsk+%kYc~5Geh=6XPoYSV)@ynw(`9h7fM;=T5!hhBmALH7$0Pwb&u|;I5>ZgNx&m40 z_xzDh%#S)fmI_J&onA_@<>hF>SxABRslkO_sr7!?@AB4Gn)EZxmYM5$3mUgo`S6t# zIV~wj$XOOKD;*jNB=e2QgBThq)vAFmUmBJoho0Y-L7Y;YTFx1BqW1AXveP8(-9Dwl z58S06g70R|=$5<2MVST!ER2LHf<3Bkac6c$K?ksW44e-0G*e<4<#K#?n5Z}XGKlMM zy5nW)o)V|-(U~`6Xaw;c#ZfT2L_X7=0V{*C3AA3QXvouQ{;M``&M@&LLcq3m5XHfR zN(Lc6Bd=wzjZ~v>(=T@=27!A64T~CZc7aFbhnp~}7?RX!PS zXXI;xi)jbdi{qRHLoh_7fDd$Q5U$ywY4lsm?ANuDa$EL9e$7*qX4EeX!J}J06LR1R z%Bwpf|1usPmnWBPSFFL-3{exQqjToPg$=u8LQiGd1nuH9qT_p_m%AqP!)u+PRi_|O zuzpzF^b(^Uy($G4m34Q#Mq^P4%otKcI7^VkktLK>de`jYDfagj0RTiO+!iFRD=FcB zNf*5J6BMtuFFez-un!2>sy;|r%M1c7SiK;rS{_JRi?1ZmVm)C}F|)j03}Ns+L&;z@ zkg$S`4I8KFoe5Lwpg9plh6F`wTaRGdrD zbPq%SSh_gPd#%!*R(m|gtlk>=G17EbHKAi6mDZ42|M+Uuusu~2MzSWf6ldgJl0#UJQ@~%=d^1iidB$V`~I?;%VCrydac2Yveg$P^ICRqFJ`ULu%vkgqZ z8-rlGJ}LgyKHrY*9PX=njyR^P4$0w(;o@!LV@H)~GEVABGr!aJD6oS_xqO1mPe7W* zGUyLA&XI&E8>zOiAy1MRD8Ntyf9$_%CH1zYVp8y8|D0W5Vy`#XorjAct6zsHQ!a=B zihs4jJ-}dWf;f+I_ob2#N7aTQ1CQ)6>Mxe+Rs&+h;AM}~c%n7N$=$)P1x&AvREK$m zsNM7wDx@e(Y%|3gZp7O=s#apQU9+(id@kef^K*Kkyh%xO9@B14Rp(SxV_k#1XXFXn z3Hn{`WMf71<2AX0t1}7vAC^ZtH-rJ53FU!;R_tZc)-Z=ojKp{a$?e=b?cD;w74RNi z<3m6@@FTHL%B{wo_JsS>u}bItOdaT92i0gxR~;pqqb^i?dQI|{Vpi5U3A^VY6oMJ_ z3m$fgF*qmLks|?KiDJt-*>uUOKmFZVlHRL~&Iy#hBB&DXgL-EaD0DnY-dxyFqlr6a$INknZy@hOM~7ot+I7OkqXJBT zvSg5U&d{M-=84N`gvJ1MMD-pcNo5W7Taai_eamF;N@9EUNk$+RJTYjE$*Pn%`<)=0 z+_M36WT^1}AKNXd*5+qWONwOlYObQo1D+zq?~XAz`_p0#jN$aE)wak-I?HY#n#aYpx`HRNFZA^R%HvG>j{4pB@B+anLRf-j5dfi=>N+ znnxhP6%A>WnX$UBUhEP+221efDCc>v0wV-1kxFMqbS>7qpJ${@To4Meo5X)m;vIN$Cq~W-v%zKJ(?rF zzG2HQMsR>VMxrMc3Ob@0iK2Br9sNu46#&4ifGg8-Pxo{M?(z82l|N|n`v|wR`)x)Y zZHhI>*Jbz%rO6Bqz|n=WGp6h7Qu^ZzG52ozYR%z}j)1 zLZB-N@=hor%t%=Q^n~f5qNoj%g&3zmgO)+h;)oA7;anh!?Xq}HN#KB9?joQ=i6fEv zIM55Y&Ndi+s?G6?6(^*qT3jDE-R9X=P71o;NxPD`3_&*h)EOmCFwuHPq#qUkR4dn% zwMP%adL`_{Po+TeQ?*plu7gf^aABd@`b+LqI5a6D#2Aqc0xz>ELvBu2~uH_8F4A=hO4iqDbFY!7xh4fiRWQ_`$se>pa!6%#r06*%WN@%YQ=uc zXG($ij+fA%7N^!m*wF*vdVkR?9YJD`;HNgHMNIYUG;L286;gY2mQo>P7q4D{D)Zr# zeQ3H8S6sp>us!1ClWmr!H-%uCR~AAc#xJdm?ADv3V#dl_(ZhlCm|kta&JqKz zsPnmv9$$!s>nx;Z#?DLv#LS|?1ef}#iA^_^JkMdXCaE#2G-Jn^<<8s5_5jy2CEJv%w?$nh@$Xhijd3*B+Ni0oc#GL$;rxStg|7J1N zF&%U4UnSDkCIZCI_TgFO5@4%Lc3Ug(m4Zd}FsW3=a3z4zq#zli*VY^^tp;OBFl*^_ zF1=PpU(KT%udaxB7$KYZtBkH`yiA=6;$A;DXsy>ZN@ex(I$po6g-Eez9iJGL1O>>E zXv4}woyWKc-~oX7l)%9H19wLVvB%^w=!Bf*4KI4<&sE;x~@Hu*{rW7M2Kow)urY_@<7D@#N zm5E-XNFx!V?~e*X3YO(O@%_Ze&NqwZPUvQy_I9*VK{YIq`9UHCCMZ@&f0rzmWFGV8 zxH0UA9a06-Xc#8sM=KG&qqo7^2-$C71;jn126+l0NZaHJKCPo>tzD-CC%%GWtl3j^x}|roxfprr#uwxz>yAs6bKLHR?*rlu$o$53IaE=_p2Jj*Cu|H310Y8UNJ| z=$g9b(exE4Em+7JIWvb?h(n-HgVtO|eF9X#Ry?~(k*>w1^DWqB+zrl@yl10JGUVHR z9H+MgYzgFvOu%y3i81RdH47U*F6zVhrR5old?9b-wA$SC#B!&rzbKZp#d?VWNuOS% z@IY_6f+M}DT5Bj}W%1|`)frfa7D}}rmhMNYmoWWTVH|+8LJt4EwLhv3eM_l_vUejX zP_3QBWL@Ndi9NXHWLVajB&(jQUjE*z_ajbGTQP%H-W(93QxSk>)Q40@7k4EWb%=t*{ZKE>T{&{-{o;fuPnPE<#UY6q9Ps7* zgL;jINk*5j+Mxw9lPSd*?l?;&spCOAu~82`mOFdtol%{;Z^cqser$KSm@qSv4?&A* zKM+kOAmVQ^fjT9nCSu_3Mn3sB2{KTzh=E9x1fw+?Z5BSZ${L5YYy(uBRYv6=8yQ4@ z^Hlle1zxMM9nn`u(k9n!-J$;nt*<~Eg_4|=B@W-1Jqf$P1JcVhi3vvk8Kt`u ze}_CsZPk^`N&>y~ja|SA(9mil1f2W7QYdDI5@JaGFQaa7WCx~kvF>O)f7T4!Lj0^<3+lx`-?DhR41u#&G7M}$ko+K7}#VQe$$j8};6A3@)Gv*M_4q-MU#&3~S2Uhy4}Z(g`<_Ff4t zWesbaSUI--vaT`(6s#1nRBJH4SQ{u#DB}?r%M2a9k3iKWOcSE^ucmQdjg&&IBVfe@ z19>bijD;710REXmq6wW#W0qki3DTr?Uw0~nOJyB_d#|pXn}nJdwaPH0c(Fvp1e6*$ z4eIWMSQxw)G@Xu>yy^!~Na~Jewv&xYnP>wa;{j_rLTt;p!F)F{%1<0;|7aYdG=2SP zi3SEw_2B~t$mr>_kt%s%hICZnvL!JGEQCmsPGut0=vE^eybTDr^@CbN;u#)TivU`6 zye6DC8jM}AZ<)ERX!lV@w0`DTJJGR)ZU#2k69cPz&}<~l9cKt-yFgJ`5Z+MSx!0!P zT(hT}0w1H2)u4OOdihgL+e>zPNHv!V6wnf;4bLOX>xyYOPk8JghCLzrscfx)N(fGR zGk6C&8h^f182~jquK$YCZQ2;}?Pc-M_ZNv9zNqAzK?+YzD1R-&elq0Dw%zK7vkjoOk} z57ZW+GT)}bpj-HjZO(O>hePUaH13^G-CvjP5B%kgYCtSF)@8_!xa6Fge&F_A4cj+p zbvpT}wcyr9-IZ-iugh(dI*J{J2!klC_VIA5<>FiXIzb3xfq#}Yv`J^J`Q&Ve{!rce z=}P1JV9hP?cTYxd9U?*>pqnFketr;L^COdkHZYqH9c!-W2CqYD%=GzNV@-t(#KZ!`Dnnx2U zceY{ErrumH%C@K&e*a&!z?9wI2?Iu#G`90e#QhE)sj)a?(9=U`kV&E%48*GAxECI^ z&M@+McG*Vbt@%gh$e4>TA8mTwmu#OiKm5A*`M}-%Ij_3{AOA#eXxbn0bZU0u(=f#G zfxQxo#w4vUE9TAT4UH8SlWi+K|Ee@B+DEPZkkCo{{E|gpmntK|^)s%;Rn%C zir-haWw21$*BxK%`NWg_#$mQC{jAowoIjY>Pk+~q>Ed<`fA`mt>bJqt$lh7}=&7|W zgM^|w9}>wYgEjeg(4Ue1-V1xI%9nV@W9-&ZNAKqA50Oc^(>FQD2Uiin8$z19rdk3R>V(Yc{%VjT$4^JDJ{3 zshdO%F05u_CO38TzYWXo5PzKb-f5?aJ$xzU>Cs`zFMV{22iq@ys&xoMDE=ObWFJIz ze|)|vdD2snK3CyW2n3&;?_1Hbn5n|A1lL)|Ki@GLl=Ah$-<$q~4G2+fpFI$Jx`d2B zaDQc$f8xWdBd56cwto3&sZXWc*M3WhtgY04lIX|&M=#ezBH1A+s0&`%y8Kx01$b3c(J94AH&RV_w%BU%;=k zxqacH^}$(Mi|ew*`TVdsqvm(Zm5b2^-|^s^m!eYCM?=|j7l!M{UG#e*4(4`wmf0=J z1W~VwJByrBDm?$juNaQJNrQ)coNlPQRDK=7%Lq#(Vs|=>&NC~^etw%WU?+9Td2X>K zw}DI)EG~-%eFurBby_^XHh$)3F08}XA&xscACgVFDf>~i%kGl)Xz0wq=o39G_~b&t zJvSfkoa~=WE_MQ0+@?YD`#Zn$t-lR)-d$FHlk(NrHP&BW>==FCxBH6o!^DQZ`Lisy zDdQpcUGM4ld`|0rUY(108W5!oYg@l!Kf3wpe>$IoZ)r2V$j*mu+-gJGE~yvkb3{^9 z_G4J7z_wWJH(5&GX6$hKD?g;7qd)mC)Z4=+Rhzkoxwh&fMI$c8@1$|fZqbut>j%Gk z{$^V=n%40Ie;zIugk8YwJWUu6x05QzZmz$=UDn0V?L%L; zcMtbGOt=y?cw=Z2-{*03^B}iCDU+aXq}ZIr-0?#y}z#XEU$?g zi^|l%&e!$7$%hknJeS$0&Tr>WHl)lv$%bd!3~LZGZ3*Jz^A}=ZwLVFI9zJZEtFbg( zX7ev63ys3NK_T%%mwcDN^ zVa|Gf4BHm*@ZDn?%z_-CAgMNxnwofO(l*;B{hlGsbn4v357{dXqKU*s=5H&QjTHU8BGS@x z#kTxr3;83TN!BNLSINP;peymsb|*h>xSDrC^=AEQ!**MPRdC$=;O~=t5zCGdcQ?wm zS4eJjxZ9EcIk1^Kk?WR`!4(|poED*D3-4uJ=e-x=94p$b-W9hhxaaja0AFC6fA+6{YxzZZf& z8~0b60a-~{ukXp{5u=6lI-gn6w-JE&TkZpq?OAm;-0mNw$6wCZgs-We55hIqjLm=0 zxw#B-`0B~u7U*A7f4dxg$oJUe?G+Vx*N~KlrXoFT)O4BZ=9ICKYg#c!&96k6-j#RK zj%d4U`>a0wxc;4%v0+9(tDRU%IS{TsSAQ>!Xo(3Scbbr5nu7ZY3!7E?^deU8JaC=%)30iCuJ~fe%qrk_*A~#92e~B9>Uu7 z&O}8XN`sT}hjB3t>2{$7kvy~5$n{NWZFXB!+2#YE<9sJcGf!PQN!Oc$)wVubOGUc3 z?=0dN2h7^0%)UpuoqqifxcCq&6f1T;lslMpJ^dBR<3AtXBQ(L+5ltVKzFBfsO*7AB zhFKmJ>z{a5Vh=wcVjd!WTthNeAs9EBb#ihpWiKJ<>@(AbT{%$=3M#ACkbN$bbO7i+ z@xdVvdwqBQ=~E9a-QUyVLPHU zhLid~>KuLM-g_%97Oq>jraOA*r<3tbJJdJZZhraRVb&*X*KqbT>e+;t__<8x55S`? zTRL-KGkzIiUx9z$l&^V@p6b4VeVPgWkUBnogs*`9RP)P@gH`kK693Rq@CU$572+dlydGE z)|nUA?>IIWkrjO^Z~b!Yby;P~&ST@gTgZh=|G-YJ+%P?Fy--z6bRGPJbr^PNK3Jpu zv?8itzBISUZoo6mp{)V+TH*WH2X8YC#~tYU{d9gq@DWu=sN(ma0Y+5rr5RaqjC0h; zkEID|k-679F4)8n5Mi6QEq#@DCo}WHQk-+r*b~o|*=2DzLcPwFy(B6Pc|M`acJ*b& zaP`@w;ESVg-fF>D$lUpu%QSDW9W%pykFfkHWrsRFaL{9O=JJLc@5*Yu z9{pN^t3F-9%Qr)*3o$33;ZO8ToDo-$4nYpPBk0xW?N~3x(F9TKRlXioT5mcULOH2@ zD>~HHprh4A+s|gW`DBbgIF|2Qhv&LodzJN=eFKs3Drm5aauyvW71+He-}Gzn)uEid z_aBSqlI>Cr`N}H;bDwZ?-14Jm#lMWr+?-pxbFbzC#3y;zoA(eSeCMI#_up6q{xr$P zhQGYFP^Y*eg0_2y(}4NXf^U(``c!79>Iw`S^0EhYA_(hS9rPFClL}U{<%)4z$ixGE z!c#jsv)mcmw6HGhfYoyPVep<~Ra5H*&xy}>IA+0T^ZqHHow|OM0$;rro%nn`e&H(8 z#crpBZ?@Q1v#5dh zzfWuy*uiG`P#Bk2fTiplHu2?q=G(B6`t|T{kA2y_QS$|OHA&)SI^pB?$2V%Hh6}~U&q`QX28}p-g{m0KaU43&o?@j66l^3gtHZH?E z|8&|(I@a;k{5PV3x|t`oW8t%8PABPwHOEe;#XRu+b|*ZgsLb}c*5cTK+UN@XN&n14 zOY$!+y_Px-6Nf@zE~;hjwV9luQer9b`LycNtKNXETFaVreSsAUP=L(^C2tT^~>l-K68hY-AeOC0%P>(#iE z^Bt14g0sfG=-plieRp7CY4r|k#Nhc0`jpJl+LgDXg&Q008_c`NgI~PdfVWyYt|Z!h z6?+jOAvxL3)w}z4{NrgDiy&h@_y;#M{C+(2zBa4)4X%JcrHM`?adyjc=^S~F~VT6cvTZEyV+MOx z&I}A({uDoJKo3dVmwU#DzL9Btn=?&v+VSmQkQF!pX`*`MeJ8~Tdc5xHgLh!Q(BHq) zl=0gKGMxhMO;%*25KTwdyXwkLH}y{@c2~xPw!k8M7pa3T+Y|`(^rL+E0oGI({)(YF z8>JD1cjq{#2|7PX`$zWEldk8bA*gQBAM|`n^ZJ9^zFs=M5%I^oOGgcB;aUp+XRA7k z`Mtkh9xpv+xNF4a*#q0qk?9ST?st|y9WwHV!z*}_%-kafbIv%}vbi6!Y91W74d_ky zl|uTja{9$y>Er5)CvDHj2MUVfvzcQ^se#tkunzr)V)og20Cp`U`u@we|B1NMHPBXz z|2<~^%$IwYIw1GSzm8{hY~FEbom>Cw4W|-kuFvn>DL<n+d2 z9zP6y6LTowCH6}%Rb|t&i-CvQFjH z-)8=H*k;YASC|8jC7WkH995AOUf6-X5AmbHW5Iv^ zR3zA*mibLx`^Q}~wsy-+URjm%L4$g~{WH-YpT8XrJ>#F9aCWNaHwY#A9OJ5*eaRsf zyi-^`e!5=u{jq-b>zTey$DjT<@b{UNX5sIgh^|HBNpA}OU{-G3%=C-6XZ{-B@ngN> zZ7XHn#n~s&)%m^~g;XDV5?`|EL(_*e8~K8SEogPy0v<$jtwb zrE~FTy8r+Gr(C5HBNs)n*j2icO9)}6oYvLJ<;uBCISorHWCtTj$YGbdD5tIHAT|r5 zVh8114oTahY|dko*=9Cl{C0hBzdvBN+is8B>-~B@U(d(=@p}FF-QbpNwU=vPWG>x5 z6YxqeH_cwZB9G~Q#(Wu6{a$aN-RAQfChu^{|4{Y$LW#xN@n0}1?G{TKW7JMuSgv&R zpO0Xs!-C8E&u(O)3%+UaVsS*c3AYhS1i8@FhxCIcxaQLK5f0-5wM|CZlVRKNco_5U z9wl{P(R zn+xy~cdPFRzvIun#xcJ1i!l+>eCYAA7Fl zUo>$}#hHVb)Ps#~vd2odwpT%CF^ z^ zv029mZO57xSYYzvyssE73<6vhL&ru4zr(V>7-O1kYnPXcxpFbLtjFhSfd!$PEMB&7L=;} zMGjy^*d@Vx_!9S2Yl|S!c}B%R5;S?BYHB=23o5CJ?=Y<4ppc{_+21C&DqHML+P#jh zF2+r7YPEbY`m6uMZ{-XBW4G73{Vx6=A_pbEx9O=OsO=c(AL)A|<0=R%doA#1$Pa)H z^5@4(@&ndJkzq_roiJ>E7?kcQh!mZX>M+-%F6VtrqJJT@T=z(oh0};=O}wQLFLIz) zP`EU`)iD&nG>l$(if0{ktUQUJ8q?T0FiYJ53fgHYZdw?{ znQWXzR~M)kK#-(r@2Tu>8Ul?}Vo2Z8k?9RX;>$H8(3_FH89#rAyd5X}sHYeyslhM8 zwaF(0r&B&?u;M#`3rS9bg^RCPS%BHQDA2t46gaH;13)+(6@ptknX5>)4|wD?t?Kd2 zRvt$Gm7TQGiRq3qTHjm%CV+yFH^B9xN70%OL5E-ampe(uO|GI??cW2ij8k5IR{>e=t2S5HSr23JP zi*)+1v5Nzt0GR*0jYaa$%f!Ay;Ody2^w~vy>a{XuI@yQ6t5Cl^z;$vnE{TY z1wore_0&jGGPL*PI3pj~5`dr)xG)|8@i2JHEF}B+3(NZ+cOVcMv`%K{=_1 zaiG+N?$gHo{Zh$cwhs2~c~9`3V%|7nhe&WJCqvt1Q``it&&lyD*Jzz?kyr0FxR412 zWgfBWXRc>kRW$#tnVj$C?BkFZ=nP6-9692xTnPwV)mmCQc^S~N7S5n>zuv}I+CD(p zHfud3<;WvkFzE4+WJc6k%ZifOV4?dhnZ$TddSns5!V<5CGfZo{1?H?@vB!BBIl7)p zCUSK}DXj#3j?3QfrJ&A52ksC{W|iCcuDZZ*7^3Y#u}6}%ztH-!IQkan;OrbK*ogbR zVK#dAiplZ^XEP0X56ojdY6Iq$LVa|~267vn6KQ(f7L1e_0YUBZ$b~@I6$w-sOL<_MKLaYSyO3%?^1JTz`xLmn+yd zoKd)G$ijTjfBJQ-7D_z`RglBIXmxpsKdVw(=<2oY{}db}m6pu@v#4o*(%(9Ms4HH( zMazEvkya~P8+C}4R!hf?$%;$2Oe=ZW%jtM~7Go;p{8vnM;)Lq69nS1^f_cFpkAKFs z3htv!x1v4YIadV!`zecgsWbW0ruN`uB0qxbV)$L7w?SIXc1kI(L>duGD3ZDQC;m*I;2`nD};^ZWMtHIh@!L z@3B*6_zR^?pbsISF6CS`hQ^f-W!-cVXSXHijMWVvxAHRQVw42la%T=iJn8ICi++}w zx0FgOSV6P#MRQw?jy$R}CVeGdW_WBmNlYjQo$R=|39tUE!;owt0#{u2B zIW_5*FYJLw0Iu6to;B{c`qZQi(d*Yrz=PUvZ$u8$qbXFV8Te?KiNIc;hHZbyEWPOz z>0?RF2Yb3pf(A>H9=H^2HCOH(3G``w3+ua4HZ`#GGyLVX!61F{gu!fDNqt!-V;j|O zGuvGwq^**@vgOI#RlJ9?Z?mC4yW=>%biAvnvpg5IBemKX`3=W0q~qHi7h}IV)q1Fo zK8ZC@QGmwmkIj;A8aYFX!sy4*JBdlTiZPJ5fLH!P>6FG|k-uf?a|p2DY)A2A5D^}@ zbZkbHb=qaZ0pgjgv<f?^ePS?C@`LOCLT@{1d`;F736bm;qNs@4$Doh? z3`_qWYRx^ce$q*=-VE&_`8fwiusAYD6I1jt=%sOtf8ugHe8%SS0AeA)zx_Jr49~&y znpTkgW+Yikm+u^0Z-sd}JVB$i=N@*oBTm+>^y%Z0L~8L?&OVc11B{eJW(V38L+{=ZCs#V_~TNsd1sd59d7VP z9~H5s)+DZ@{IhPnN&ByZC%?qC4<2l(PZOix(#;%y%QfDaqPHCSE9;u?oB7)v_-yC- z+D6@;n@xIOg?Go#cX&09N*%6o*USjGg&0#%Iw^{^!5q!iJkq_W|#E(KG-lNcZ zMNPj;o)P-W5q|c-H|Fu_-o4SzU*2GBL%oBCz-`F8%Gw~*i;6#|EJA(Gxa@w+^Yu#I z62)f-vd+Kw^T5%jH+HG|rMz*gBnjvvx7rMp60pR|XSE|1H@z2f`v7B%$y36^T<_-DJaW8yxw5Mb+R8yZYrT&P8k9!d6^Oz~yvBNl%gI(k0QyYl5LS?ul~ zAoANzvZ)JwxA4q!_(>?)92%$jX^TAmj#&e0mKQL8P%Qq%>o7%8h#Pc%+x*AouxTjK z04CVGWLBsycm@c+foF zgK(C6#4~|R%quxr#m#oA*oc8{vpSbhMbUg165{-;=-yBJ>G}AwJ8aAhrXj?M62>?$ zp43H>bQ~%9@11{PxVcSPu`_W@`auo=Q=WllsV(rfEm#fiX|KvJa1l=w!?Y%qqqJ=D zD~@cZpv^&pX3eb*inKjw8G1{Qfg(w61C}=y+=FDty4RvRqMKnELJu0cJDJ}nk)t}h z*FxT>dgXkS^)BCospOrZOkC@oxi+H?6QY((zd|b_PMj*M}iP=g4unRBjfCg zlgn5Xn1bu{qz6(cy$}Etkr#4cb%7=c_-ZVoeB z`*A0pVGnEfL3dX-z4b&e#{a^CtK^emP$PGCJHj<3*@-@oyI`_yNH2bcW&~SzBo0Rz z*Vj6su~Q(|6yNnd^pCeyJE^5#{5uR3jGSYK}4qy##Ub*TJ+$T6P zb#5;M(A&EH&DbUfYfP+!Npoc%p&l;_-uaxOPj^M?3b&&T)p#9tX63I8xnn(i?QW>xya?^4#C>#>3<=ilxmR!^ay{j91@ z(PXlL#kFfCxWJ!BEz#Z9{K+?A-1rP{?DcalT@`%20Y<4=OCYmlGt1SOH1mb58{M~3 zyt3&h)lBXzh;yPjWuF?TE-H?rt)$Fvms(Xn1P<}1hnusC)Jdpqml|w0w89NlmED2g zMQL3tHZH^(yPK;ov}WzT-34FyNMGq@Z5`%~N0DyqC=mwyH-FJ6WvoYR?XjG1e9 zusp}U?2MiN+T;({pTKqRmJbR7&G89~+$ zbNo0pE_%-#@??0!#aht$VEbzhW%~`Ycb#QHk*Hl>CADl_r z4uMa9LQ+~|)8)}1VMI87>1fE2^H2F(#;CDrD-BYezLS@oMJ1EPf6PDfIFkzKj6ihf z*v)(Y!b5vp=zXO~I!vtT@NFishDWp4vM4>td*^He+)^k7Kt?+1^OyUwlB+~unAODe z=+Ri6?m_RhH9ZTI5zvC`)y6}42~G_eSiEb}He;X7sIg7QWfhmuoId{&RH`oDP4pwJk?bEPykTku`zBqnV#`Jl_3%@+UbLeBFHt)KatCZsIk6>*_DBbnHk&3TrRIH|9KAwDqk^SH zBIQ4BjbAgRVKgZA7!i+NFF(tQ*Y@W};wKei-lo;Tp@U)oSvuwbnd(woxdD6<=F9Ge zx_5kP303srtpYy0uVnW{;){KJyCqiWYqU0q^JSumBE^aQN}afLUr*ZOrFAa;wtL) zg=(=9wtUTq0Y%Qtw<%xx11%~>-Xlz*CA7>_JU<_FE)~;CgZltx^K6fDy3V>x4E(L! z8Cqsg-iNRP2*2wW(i^R1kH+aoC0JD64x;Sz0)bzC958xALsfzDc9mLcRj(=XOMijI zvztQ~7H6=mP0_&5HV+u8-t82R=b@TrTSYtag4m;k^`DggZ zo6x%jQmgB*+oAEEi21&=crGr-0xWEvv4CQ;Fv*{>y ziPf5Xmx|tsrtojmtVt_UmSL{!W;o|}xpUjG`^hJk#IX4*$KW=MBWT2BNaz0DQzCQm zB;-0nFiw-vKR!ZHUwjce9%q|7uI__g&eQvba3^n+=O1e+I?J6+1Qsd^lIpfXIGQqw zSMt3Ji-!JU4kkXReE#6ac=zXVq+X@(tz@ndoamc}q6G_fh|Wc^^<9sQ+n-^agx#3z z%v3YFE_BZbmnn9*x_|uH`)P<1fSkYpK}@WNJW<;;vr+84 z15aFX_M+r(;auz~Z)vsF?l?j{KV7z8f$!z-A(@Ya!&=ogBs+f)BGEs3kjld&9_UrB z1;sMgTl$`T_E$ZCChQEpipEnQw@mA!Ju>A_66%%r^$}PEoa}H7QWlH4xTWi zZqUkj%I%^#@JgcTIh{g!VIR%hJjj6RsY)2*e!R~Ynzq|l24CD(x6`g8DlGRJ?C@T+ z#>ETj-)W0!FQ3Ov)vN`WIVSYqM%5(VGBXWs$WjDk%o{ccvi8q?i|5IGO6WixsakKcyKteS}wu3ZSWQS97?O4O{*+qWRlXI=2LBGuRyz9&H2 zuUqzjb7C&rZ4}tN{bRGsKyAFgDGA&Ap|LqA%xq8`?g6c3a12f^3Bvr^qJq7s z`Y|<6doNP|P`s|)HdHjs9QqG#N$Ns`IqYM4&SdoItcrL~Vf~)#kL^LWW;dt0AWt&p ztnSOw)ExcT5x}5aq$U6LBR%0!Pe|^Sk`>kLE$k)hQc??>0s3;)@<-AlCZ{R|9iQ9X z!v7I6UCLbr*Ch!#!^n$s%3W4-17`5m^C0^-}scL4#6@cH1gDe9zLD{4Qi|*bgD8eSO}U z{CYzn8o`k(eM8J`I}q89#b)Np48&<#wPy{)@7PpBA^MUL;gbd0fq-iHB0InTi{?mE z!uO}P>|8nM%W{mRyAe5wJP@DXV73Oivsw}ba#^sdHEG!h?E?e#42lUspJrHLSHBy(9gRt2*s-VQrCzO+m}!euLsZ@O)s_$MvS+7nX)w-vAyQ-41A901 zVVpx6MbH;+`a)Kw3%3V@$|>fICwgQFy7^ZR5NN!tAvF7Nu~~zo7(LP^2!|1)vPMQ4 zS=lGZMUl*8ZZO=e_1#704r4irxq>9{J`!A>q_aCFsxdpMmOs0!XeyQBf8gq(q^_d- zYXNOblEE)u+M4|;8tsJ8VI;lwn>x=8v5|dK-6SSlF`oDdGdr+ijM~M_$DIZ?2z)y4 zyL{U_XRpP(wuLpykND+cSmc7`c%q_7*wWS&a)%*w!fXjCO^D-|qgW2fDioL;@WJGh4WCf; zz8RG8`9~((_M?B4(X?GCOI`SidZ`WZ`#G6*v|~*TVF0sr64*WlzXWJBb6H)YNFkKv z=~3E3t-E7mLf@hTZg!lEU<_RHUmM2+dqG`TUB5RLU^igY3j{CN>Z3*Mvo}e5QnXwz zeOgFqTYqKOIXt(W{2ZH{TMql*$_)$p28Gg>?9q$HBvG4~${&mI*fHs;I6AzpZUT3T z5W|Ul$1RCd{ciaK%m+O7SMa>59lU})66Bax&HQi|8IUA2L6SOMzr2!-Dco$~Op zL|UMYO!#uJka>9~eRi^OFEh~jrKUh?;g0DZrCpV-BrV-PvfI>ByPBj<47RKu`E%9L zJm|{Vfwroxm59)NF%B0S#HhP(b(&MHd^`VhzhYo^ouBv<#l!5|I06$H_C<9_ zF78*qdMSdj%fUOY^d};#g{{tu7OaW`+L=T-%9QB4MJO1RC`+?$?zMWr9W)HZ8{vt) zdVpKVO=~MVp4zZ%J`W*|Rgk$(=w`Hw6>BDD0Cm~0_#lisNVPw{VQ%6D6Ty0ywL{b( zubWmrrS#;E?z5J}dyx(|FnVEcaRhv{yI)Kln2t+;2$_VN;3tB*kvYq)U!)o>_W}7FRk)t>E-%ox39C&?RPWxM(7~6QueeY~c5IV2swWYoo zu2lmV(%bO6*7Qw%E>-h0Qvj|=I%Z$WQ??IG0KVQ~$N7Np+_u%rj88Z_m`zS~B-p;R ztMRuPgSj&XDS0Zj1->iv6mUiT%Ypi~fuS+%Qyu{Kh_1q7MLIwv;a$D4*OiWke7a=} zk)<9`jDcIn>iFiZl@-uBJF7#SFS~qpqDGpdzKciHYv0{0Z`q_g2jG6^j%Q@7$<-X? z=*NRyn1JC@6p@Pj{Bg4Syf`fPY&`3!6n0Pc0qW_KvyB*^6k<&}>_FYYyY1rUD{<>p zv%oH3l+I-80K~CH}QR8PJUtyN$#3p;Cy;zs&tbu{z=-jhV4Q5x;SmUzy zAk7}&5^vl+;Nr(`TSScUj6q=?>D@K=?+v2Zo>5~W$Z39N5(eM+ieQqSw(at&iQ1Pb1w1@+p{yr%8xQ_IA|<}zIf#k z5yG=Yrywoc&7Ht=X^3!mjgf%6ok0Gamm2bv*O;MT0-b5qNB~BXT;81tN2mB;ve)*Q z4RK_@Z789y1o0`Q1+5h-wiI$ z2Z&wc?2Vq0=5hvJi_Cabwx(4HZt9K5pv5@n(&ldEIG@^)SyA;cq4bya&Wqr&*;_{&{|JvO&IiHBPpv z9F&Sy$oTngtKC3v?3k?++;==N2uVK7+ax+W69~W$X;?wOwoy_kSNc2^ALgk$3La&h zre(X-r%WSE4cjg!AM4NLUj+nmD(T0qy{0-~)?U>nF3SZdo#cz{D$OPc~kWc)6Mr0bhBPAeCt_jp$PsETjf#jf1+2Ulo zF48r3!l|TmSx)tfOXe?kNptb{9_#)b3Je!`gkj6R=F}(W-UQz0Fa}QF!NtUu-yx?} z0|Q5W8l@}OJ+zW_VZa=n`jJ;LMtyk|^>|?F-yyo2%P=2yxjvKU)g?aZv5Qx&ZXE0F zstG^F^c)o6t{N7k`}M=UNn7eXZ-Fu&n_TotX^LLa&QSd4MmNFe{>q|()YmXMw&|ri z!sGQD)uDKALG-ooM7@ry#%1T9)ludfzU*EDlWkL)_B@xW1Du~3nSH6#n>R?ob$XaA zAFXicI@^~GvB49|1n&K+nbg#u#PRucLy}&3HmYn{Vi$;)E!eliDlm|oa)(}c$WI_Z z73`=1Ao8+Up~4Pe0nog0zspQE^#t-9oOXpKYLT}I5VUT@G`D33J%3}=v>H^0Q=yj3 zeT(sain;P^{#l20NC0Naj;%uFQ#d^igrC6A?SgEsxwBbL_g==Mm8xZu(|F^Ukbt6G zeAx?VKNVa4Kg{2QFa2G7^Q^RBqvZy92C%SjlAm)xFA>xpeMpd@?bUlVci1Fm=K6kC zmX^OS5NSNozL%k|8W9@Vs^wkGyukc``8JyK^?T>-KZu1)b<-LhX|Zs>7%=!rMm&FH ztOdzTvK3aMCu~uxCM_h{Oox4jr1=`WX5jAVGD1 zLa1L&RIn(MylKc@GF~k?pI`y3N&H>06hr1a%X~zqpEcfGqb-TC?SSbu8)BEQ4k{de zB>7&5cP!y&785W`Amk9$*bwI+^hbe4wl9!P~U>@UULH2oa>hjoCe37-yGo6#cvccWnf6=4+1MO$R}&SfWSveS!M= zGH9{`)3Y`=?KF^15HA^!0ijU9xGlznCBpVsJQ6HAI)?cIgGS3wUC2MHGKX({{rcw; zC2tY!pX|28SCc2|ROdNg=hk&A0IRphZWIVB0PORgQ=2wJ0xstUq)XmDkDmJTIXT;^ z_o26C#b4#%w&oilgs1%fYE|Wf(&DBsi!^thk#t>fG7s68G>x5t>x)-5$tsOj)&Bt^ z!dHI@^{`xyc*JF>{Hd8XJ%_!)U9P-z*1IQD(YV;W|pA=yGZ+^lYNGqc|Eo`q?qu9x;23u(^z3! zt2S`t>a~HSR?er3H%v!XRBJN#+6!Y&0Gy>A@1FUpI6V(_hzmiq9-x*zX>sA$`RU!I? zcr#$wlkH1xCpLr>dQdeh_VL!PSr*ysPMIy!0w@$}DpYJ5#Mx>&DsEV?#Y`mMw+Xo8K7SE_af^Nl+oi#MkFI1M^EzEDr zPuClFrz}z~boH8ReEm21J5IrNEWS4=fc!f^c=`oV!M2$Cew>;lzs&)api||LgW}po zyVO4lbeZQm-b5KE??PO!#YH&^o=4eCX7j^#vSQ2%cI%6$-q(VmRYL0SWv!<_=v(_& zKE}(u>l zsC5YOeP&Ag$M+#LVqb3O|6fJwE>+)1Cag)sll-p@wH``@G3$>nnkb{_+`oWm%g-%* zHJPafvvW`2Lc9+m4f|f0HS0*TMOo;m)L~k-N}jH6AH&EJTsiqs+mEy1=m)a!NIEjI zL$?;t9(7*jdd^l;GnSjkWnXHluj4`0?+m9`x|8cNyW47F=ECL4C|`U$<8S9_qzk7p zwHs&ke;C`W-{5YKtNpUgZ!?RT-8$tBjt~8XX@HGw zqyL-l;;mHC#j_aijS)3M&SJ}3b$-ZXF3as}rjMHt^o8+ga+qVnjd905*DJ(7M1Alc z<|=tDZj831C@f&;+Ej}t+Mf~;S*!en9JQuB>8N^L3j1jhMX%2f0Ez#`!YwOhSrcd~aGR)ne#Fct-HNq=L5D-;f0!>CP&QHl1-uym zb>xQDnx3geMM-w2Ny3YfGcV%x8)0UYF$1!rC6Lt_LK!y_~bilU>0FyoOc=L2a!X`g>Z{ zgc>Qed>*Yq6&}=@y#{%_8#ZYWt6VEgbT=BJ`a}64f+wETTQ%=uZOs6+ZDRUFA@j?5 zoUk|dJhATPhO@hpQH?#en|6jWuKbYDKpzkW`oz4l1$s+a11pJMw1Wlr%DM`LEq~ja zT^P)ZM;;WK$G11mFa6>m7GP&%0))OaKW8V^pk*@|ht`%t?=BbJAi`B9Sq|9OVqOhD z|J84`&9O{UgI-wlZJ+;U<~PGBk6D024he;4rjqoNYJcBw#;j-=AyZYYc{sLecLP1* zDP3kT^**t8L4*X};5m;yJaJJ{kpz?cm)!e_LOKR6_{XDfLBRJ_M7N8{`se7NZFSUW z==>!?zuYbQz_ZE5@WxgZ8{gCfj-8Bat*-*##<4zYzCT`wx2>rGqk+UkGT>Dzso{dt z#%H(rOi3`N7a~s!&KOKNuv}N|Jm7zB=?cxX?_K_y+;~14w*$QKC_7=vPz<|0%WUo4 zCxSjJXXE1I4s!n!qx+$(;X-`KOYt9D4eMsSFZXt@e-;!}ocp9(re5vnDX7w4)}EH{ zxYhsAMRJ`^j+#>znz3Kkd(AH?60CvG=A3)hmNyB_6Xi9F@r56grXBuE!vvqZ=p--= zzyS4tXoonqE+LJkjp-4)3>lJTomsY20F5)bO33qNR2NJ`VC17tT=rkTioLSb737S& z+8g!F(Kb{A)jSxERxdh|kbYe=P}5xn=C=su7jjvryt<;CnHS1+Tp|mg>^4JxX`kXz zl~oPqZpayc{qnA9R3a`Cr!!$d_I|!K;6nK$4i0s+l9u`;zY3XR{UbG~L=up5zwOK{+tfAXl3OAU?xG!P zIqy-!+a&CXUQ(nyvcacKB>J4Z14|IyUAtU`B;2SX;UVkaiVzkGmhPB z=AQ7lt~HcZ~*Ev$KT(t+xN(@Qa+8t1{tuQk}SxeDLCWPgvED!7T~vc3y>+J-&ZZA&*= zzvk_OXvJ7xw14?;t=(QH1if1#Dbxj-PYZjby;9D5s&9?Mb^(zt>j!JJIzmjzZVQWoL$BBf0yA zFRXoIy>*&M%-=J3@RKqo1uil7A)B4`SUk!pOF9c-vuceWQ5Yb8h{ z+pNfMCthQo`t*ne)>c`o4*&J(1p{!PdC5rP^e0D-TeKpO3b098yi|AoD}!Pgq9&`@ zW_&@kIsPk~4iJVhKRtZS3414qPjxrHsEPvXu9w(#)#j}Z%qm+Z7%u@LgX6LzU!f2; z7QEtZJV74ehqxbqqEUX?XKxDKNzz+Je$|Z8j#kx56?B7lPds4B`X-VAU~WX!JqY$a zioNQ55Bw$QOgh)R!tkNl#O|@ResNLP9FYPbuMp|;IFFf^@;{IOZ!8T*yp>#)Z|u%1 zKjeAyc#tl6T!op+pe(vtF=YQoZVWy0or~kfF_*h8^WQ$fRKZ?TmuG^+#gx9Fftj~?{;8|R-vlwZHIMn0{3)Q!xL@gA0lPPg zeQ_1`Yf>$_NQ-YS%oLG997c~%;ArlM6H6LJ}mh_=vIk`ba^Ka zwZ+BQ>gKgP{%N94^!=*&jUVKNWPtS<4`?JycUjGZb@ZG$&E*Tj+H3xR_&qf%rpDjj zHCdzV`yl>PShe%EFkg*WV(JMqvg9lQPSAFA3kDp+_r>={3}{!E%2YN+1{JAYo)qN$ zGt5F+d3^9;^>XD%_=veML%tLJ#kBogww*Y*)b84$` zyEypwyA&hh&uxJW{>lXTcR<)#U=$N1d1~$dbx+TQ+TT}Rt8)ghCnoXG!1DbS*lm;3 z5t>o9e}iBb+Pg1Vo&R@rD-X%ksD+A9rUYTo#j;c;XJ7PG*uATL9QedY&O|@v{D4M# zIb-9^`<2JjykN}7H#$spf-f9{lkW^XH!iKJtync?a&2Hkv#>wzV6|5loBh_tjO~V6 zsDx^q^Yy#{3OH{Wv?5YZ$Wk~Y)W-aadO%V|s9Zh|2poncwHMD)A9ZIQ@&Qe-$M!8> z=lElA+&CXyE2`Nl*Bs;a3prbxZB@!!v=_%D7}epr0VvUJ+2&tV=PghLX6r8rByAF~ zqX!@E+!pFrdUo|;qyEH&*{=uNHGAx?Qz7Uff9InfPxF)p5NiJ}qdU)~d#bh!^|H?t z5<92bz-8=8+75SRb-h++cZ>Sx4+)YIQ=z##B)4}r`8sz0VU&RP=Z15*l2FMz=Vzw> z_Pij7CiU7J&!E8G2c&F6 zM8Qvcq_Tf~(|Z3bsKD4+v%{FW`~p?PP8#Tods6KH^B-N)D)84w4w#}t;3WUZC6QtR zUZ*}QRL;I%{d9l#+ot8`v~cXUE9i^vVbfizhmIS^t7cL@=~Cf3G_op;*4-78Pwv%Q zon=;lbr{=tby9S{j1N&;1OGI#A}Nu6Hb3=*=}G}p9UP0wmrXI@S;+;v2atL7mv9O% z1o&Ye)QA1n$-CCnPd0oO!%Wi_dTdN)YTIuvjcU5F+WNRa^G)LA`>vT)dzK^r3&a3U z@R7i`k)D@bED|16|6jK9b(B|m7 zs5ryM=FWaxy`L;UGCVwlaoi=A0Qx0^T*_DmsmW3xi}2L$#Jk6aF){p&fB`%Sr!q@BYan=g#UzJ3g2U0OEtP z)IX;;8ZqrbjvD)Y0IpO(c}d3G?ZYod?(5V`Z|r}q_OIQdUGnANH08G&N1f8aWx_#;E|36z@|*g6w4g^z8|t9USRzAz1#||W+T~!>-4;K`Yh$nPr@}favx)^dGx;oYS*$}mk~S5?vcbg$vdG23eG=!+yh9bKt5&}pf!_Gz>Bgfmh3 zsL(ZwoM!)b>V@2MiCriEEL}F7e_wqq#o}(24)adO&S7+Qj#WUH-dq2?<>rSy3UKJm z6+{yr96j{qKiBOXIioQj4UTEYNhUFuX+;c0SeH$KpM_Gp03KR?N`_~VjQ!Ji{=Dpc z%XKe*S7kemC!XOWf{6bxe}NG?$gd1hm*2sdcM)#rg$}{RLFxS<9rE>5VZq9TTcX6+ z)Dqn)tYVui;`ue_o}peC;QIKcVt3fml|EBd2;eAmBt#8Uw6aNwy85el?&LEPJ5z=R z`WiK+f{z+4#{EJ$8e4CbO9;F1;6)OsT5sidy1^9u0JIX8QMK%-^8b6>X%T)d%YQ@W z)3GIk`f&5R0q_n0KId}^KFp951GWwUcP~ff8z~~$yIe2f4xh`~+p7L#{7Tg0E?~`Ul_LUNdVL%%O3zeO3eX82YYXRdRt#uE@|yW?yn-8qo?^K-?O> zu5B-I+zs5ex&@)phs@1b`f=u2yl+_UQ;O7Ug|Kx8Z{#L8=?W80oM6Mp#0LSk;`(LKOC ziAmhe*TN>s2jRLh#>?94nG$xKU6axZ&u!e}5J%X|L!SXIKeaa7JZW)0-L_q595um( zE@jP>RsFQT0vV8NGmF=L<(?LT_1?#O&TOvLVRr5D#*NdeHm@FUxQEP87Ss|VIsi)t zU3NcBDV&rV_F-Z8t0wsW2GgVrh z`bs`>I2^Nm`C0C42}xx7vT?WXxb0MjBXO?E%oHAJIXa#h;9drMH3kGf4|T2<`&cnkUwdi%r%_Y$Rv)nhiNT&%CAIj8S%ho zs)ZePgd6)#{0cl*$&%T6c^%ic*0>v&eG0JfIHnKy^zWHtoqI#|NN^|2<<0T<#Y+Ml_QghgE{JP1z zmv2O(O_U=&6+3PK5@K{Gxy4!b25B=|1%-@MNM>8>N$47kAbxU`}jqJ>N~Rhuy$4Q zxP}+*y-CnSojCsLUV!9KTwu%TYm%~u$4J`KXb*o-iA#MaKUuU>@U9|^RkKzC_LQl( z^E|($o|r9Zi6Dl%X4Lm;J;7O#H!4t9Tt1(engLhY574BDPQsB$*kp}9uopyh^<1_~}Kq*ajzfaK(V7GwnG zve>`35(+pvbKD_2N0*uq=i3kx^;&d<*0T402 zAbD)&1sh-cZ+i}Vjw|0GXl~|4LV8MXP*VzxKX5#(zIUC^kV+sYKAn;jToOuciHRfY zI&))^B3tsTvkmW9{iS_#1~rIz`OXOd>!x1qQ@<*@vHXYBuy!WF&)mC=l6m40zG?A_ zUn&`a$SZPr`E#x%n^G>lFrTT+rk>Q4TMZv?ope9zvs+(P_Q;jWWFv;^_)J5_7PPF( zyuX>{5bd+!*2;vL73Xhf)*xy#CO&Qx7soGg3u=<{`5EXboB3?^s|l}TK=KtjfLfHN zm+G!?PrFfU&)-Nijisf`Ll5}FXR-{5v6g-mTZ0R?uH*7)IT2`?n+89bNLm8!sD^9*#f% z{5iIeX?sgWlcsqVabaD-4g=Ch8S*ypQ6(+KNj!`x>-a;Mx`_;ZA-u1ia?Q(MMSOtU zXA^}x`yD=a^@H_dpYddq&JztsRx_heSt}{TZV|Rib#K;dmkGVuXl3%H1iv-VS(<)>#dZ?3-?GyErYqIPke+At7C;ut?4K_C2{>TtA{=bLDug%V%(bSp1p zjp3&fBU}!@`zZLGA=8m2Q6p7jX(ICILp3vPrg|s^=z*XdtIgGMrK&qO{yNb$-lm%p z_kI#;uiOa%3t<}<)9}YX%iFYJ0<7??r90veU~|G_bO+`R!8~OTk$f!0B{0?pdl@jz zwei#ayv6EoV6i=Or6P|_$pq#s8eiAk@1q_T{x1LVf&V03R7j6a8ZJ)SsNlbB|A04m z*td&o$hALadc>@*Q3T$z``R8KMa zMecWoVdj4Mefs|XgYC8VXP@`$^?E)Z&$J+1>l8#z0od7O4&`_a(|jvU;N2F;9F^Mo zr;OX`id`uSnUSLbjbhx3Z5M>9loRt&f>WJAqHeWnC8s_$4=iDU(UL$aEU!&qKR%q> zeiM{)7*o8i#Igj6q2Zac&xJi3{G7UZaB(eyDqiiyTv+@1D3XQro7Vb)O$f3ey*EeQ zq65?sz%1uyVon?B_cO1#lCH&}F{Oj3T_$m5PVobxpXGV0iDQmBN*Y`BizD@mdy-t_ z{oS{6z3??DfqPQS5KpqRg7i~`3paX{(*+RE3ya1F3oXC)4~eFsvB>nFV^x0P#z7H! zDDPvvu;akm$=^T2tsJ;ZcC5XC%DO&G}@$+Kso`W349 zcFhi4$6SANtHXNFy;$Y&3$FB+-Vap!tQuv~!j@i5Hsk#Hxvwix#MI_l!Zu#6HQVC3 zT(fDwLaHS+j9l!&qAHzpPfJ&~#}AGhBP&T59!GKhw<>-~rNj`av1BKTWaEGiY&f9A!I} zdVqfnQ)$*m2q;Re;|~enfs>FgF<(qd_VUY$FOY*F2QVkA1OHbu)$WJRmG48IPsEVx z(m=h5mKQr5=Wud)VIc0q=Fy5@MkwPSSNonW>c$y+ub$jeXn0`O7^SfZjj)ED!IqIQ z{^hePTTqD)Gr5*_dGjPNKVzZbwsCz)sz_=@&+4}Wc2(Th!@pzsJ1HN3CJJ4{w-I?( zgRC=>zG*=jK-m?VJsgx%=i7&s)8&uZ!i{RyKbj@50{8cN)(72icCtdZJyKA$I+AVY}p8`X=srmPWb`HVk^Q?Y{ zu%bIRa08FfL0@YS;8Eg)H0(7 zW?Y!Vex-gh4bZF2eeT5K&u~ijvJ*YoXy1Rtpuh&@6|e$29aVw{Gsi2AZsR^%9)hu> z)>mIPMxs=9$1jm{1tNlR6>7+?ourSvsQ=foYvuTZiSosIBfNV0 zh|;CG$SnrQdo#HH6>5-i;Wy4@|7m z&aAws^60+p2P^Al{rmfka9USrPc)J3*I}ON4nAhabF_QfFNdcaCxWEIHX`*`0(E&7 z&@XWYNnT(tQH~eACYxy5vwtL{a`iujxd(SWY4K2jzUomU4XjU zBdmzby48Xdw79E0z`~~;hi%E5yt>>-`lC1N(`hSV0S!TOZl^2xa{&lo-cs&i z5+k15o9$s#(q9;vt(r&OW7xg<)XoO;RG9;3b_(q-Vv$%1uP$ zA3$@WmL;r{+tq|RG2hWmN(+Hu@=I$3X7ZaHl{86yce{UJJi!wyYIh4@u80MnZfm>$ zmq5n~Euo@Lx)*#}2y1AqbSfsR*`hP;ysbu1N`d zTJj?hSVn<*@th&SULXS^r^0h*#W^W`G8@y&K6qeM zW~{_P)hT8D!1xrlt!I#G^y=K~Q443?%Z??+j}l6n$B>C2_4B`9𝔈dMlS*8@@Ts zK#jN;_pe0s-P;t~9-_q{0Vv*>`KME+q#(H*8R8-yl-yzhZ{WHU%`I~PDx)+NfGT(g z%f)`j91ZqdPuOBu$7ghxzJD%=)bqvsV9rvc=WpiVPv^G4(g1YAxqO6JSIRFUS?B8p zcCQj97`W0WZe^hM%{M!O0hEQtvS+>p2grNLVMK|*^M+Fq>cG$^g7=b^(j{xX;e$VZEJxJzsSj9vb(PoUb~bstzD#R$~fg+DC567Rb(?kuJ>=) zkp)fE<{kYTN`#-um#QB(yRX*w*)7;Jn_hKVC3UW59iHUI`hckUn(4@UGaiIR7Jo~l zMb%Ko$%S)crRhU36NHL%yw@o<8=;P?7rE(GDcLO_SjsfCT8y5`&MI6z3Ff}zFe6+A zF~L(u3k7ikqYUS<%W@OG@7yY(5~|@VcWf(7k6kLkI0F}}tjegYiOjckwxCRf*fKJ_ zCRTa_L9n6E=`j}40+;r(B!@<#ZfHy|x__mnN#ZIBi^q!obI`k3rwKRiE(ENgSf|{P zM}EHGg>ew~&>GRJN|;f$A^z&&v4JO9JM)6l>wp_UjZCbUgo*}f9L=HYLy?cOjK!Ghq@a=JZLT4+!efn2zS?72R9hp@CKL7EyAoSLs0X8a&{I?nEd zPy-QG7vuSXn1<3&iSu%;%m+jnDl1-{uc@%38sRIqN^+MjRRwD+vpDqYJXKP|vmwXHZ3gP@jI^IXlCPH$FSyndLdJKN?@e#C{P^}8 zA+e{KD?}A=(N@L^&37Q}%qispJ@{BpT0Mv^$C%B?8kA&Nh6mIMCQbn&pv{^nWd*#zk0e-u^>c! zrJ}zJ(Cf_tn1mg`$UiRZ*b*?y(#4(WMl&A5emVK?rADK=MAa9KjAM+Y*#+=SD)nfr z>}J8)Wz*cOVy?z?!yyhcj1Z*9)kMlv)?h1&{b_d{f2&PSE@dA*~2JcdUBfJ0gEYN?T{EWDcd;kzNfrB9&Pg zXLG45%RxwW9^=UZZ9e1u`!;$lw$~mxP{A0Xw|CR%7INQhO&i3v+&-%rjk%gZw^LH1KkT%BC z5H*%hfn{;GeZ4WTVM=Qn@y*o78NU)6x|}JD^Ne)0Szp!Ja*56!sDr#7=2e-f+CNR^ zIe($HH(=iorGpvz8Y;2kIilh59D7o7gTvC56r}iaRwA7^AS_u5nZNN^&@dG$>>+pO zoIC66M%!x@!Fb>KG+^t_AmfJx@!=zL*L)ewf<+_I-d2>8uLauze!dIn;MX_TlWF)g zuk_rhxkt*YSoO6N)puH5o5L?k9qZYE&w`#_{Lf4f%Y*G$Q{WIlw_9C0oD6hz=-g{j zh?Xiv39=sGob7{*%_*tbV^{J6W)JW#X>4a;C!3|+E;Y(RM>+T)SAj^XPwcdRwL)$o zdsQ|MA_%fY)w2x-gQi(py9|eLSG3HYjRE40QE0;bnq`?km0JWV7sbm<$>||duon2V z)+qQ~H*!Y&t`&u6EJ2;W-U8RV8ye7cBOLJLU_MYG7c-KeO1Iqt`-OJ;BO3nk8PrZ_N28*U4tXl8s z($Il24Fd+S2#9`k#&dCGqoRE1vOP@mc}FT?zB_;76mh8%cZST1^%x@K6=njG49K8m@WBb_ z)HOZvS<6q84_(1C}QH3Me2F%e+`c=Quq_cY2p)O0jZoCTmqOy)*72M8If%nCNaNn zNKxO(+sh;Q%7(=}Yg?a%-K`oPqK0SodDE3Vb2eznbam5!?x%w1rFv70LFdylY=4$IhgTn?-=A+ZTCUkq!^+Y zaFQhbd4eXYnLVV2XTC6g;5xw#C-NjDQp2 zD<;nl92vS>bVVG=67D?I9}gaj4-(w`#vQz9_+iH6W;%TAN=;Qg@m2KVCEJqSiSlUQ z-GIO=5rYs#Y$}L^vjxo7EQHqGlCcev-qSQ2h0fw1vMAE-^kR6xuu8a>=;KcXfG8|q zsKd(PFL)Z%T-i>`$<~T@TTL2G;+4F`X{(iFP_XLTgmbKmNc zIKR|JehcRymxKlWAFPsc|9;I!xi~>0>FaI*>AhbdV`k8so7V}r4@Qovl64ob52sfG z#|q|dxJUg3PP$_G-0{l01f;V{Mp%bVrpzvwgTJyN~vfPGp4R9iiDpUIzXLaN+*FkqBO|Uu(O{ z0(Qj271<#0R2r_-(}^U=R@L51{|UGuCL%Vl6`mh^FQJ%`5lckRAY_Zr>(20}cv7-b zAw4QYZiQh!a>uVlIn+K^jGR)H1JJTHs=c5GRGIRdZ?#PcZuOED*0z#HUH9_j4X;A8 zy?z!S%-sZv0;{3JLl0r5K{Z@JjQ)Y83_8{`M; z-<1;Md9h9^Gw{>LQBR`RqX1?)ZaLL8V)Kv56$A)3h*cP{C{+fLy80Lzf~>2`yUSS2t?^#1b>kXfXb4ag>rO{6OZq7-n4wp zFGfBAw72C7VIrYH6Uk2ao86x53$~W{Gk1dCBDhvAZ^+U0=kQXUJIUR@&6NA8h&7Mk z_%Oy1_WE)?@*wO?wc(qp%rBVpuZr=^h!$(%9I3VKd|RLR#>37g%5q@kRA|QMG<87F z2mAij=PCjB@w=mHwX}N_8M!EVD}z6ETvV6+h;j7a>4EOpSAM$Z&--C%JFrZ(ZSB3= z`F#^TX`IwYlU(6v*@fXy%&uGX%$#8NYVeF|@Oh}`(=PUS%4>P{yIFe>lBzfZ%?*~z zp`n3)O{0=ylfHq=33^K`_R`CGeZahCb0j19bfe&>p+W}q6G+#`yRpw6S~CAmt9A|x zp^p{gS1^=V!&V;O_amJt#|_TvOU~*<90n#SLyQzrBJT*>-JnVPw!XsAJkEic6CgSs zQtmK-t-rqbKF3!OcEZ1)faECF<3T z>m_srhBPN0ZaHR6m8-DZBJEzz@hMq5RHE$k7B@0k@Z5y$T<-9tqd9?}YMMFjWw~Bf zv2Y1#T!p;?W~mdWRPJ?uyI&Vzd7>@Qg5(hN`$?K#*5|5ejP8^_CG;IqrDaMEQ&2=g z|J}yBYn78C+$001sCWy?7susxD?%vJlW0u`l&00Pw0AUbC4lcH8$>;t9JtGA)&r34 z%PE9GuhSM@K+6C!eS@di!Or@N*7?M#iOqtPf_^)&+hYlHZSt382hseYXK@SZ8CiiS zi5&6W_=MV@@=q*+^1nSjMjPZ;T2PnFz_SJ!o3(3iC#nz@<%6{8LyfAvb@xC)mzEU= z#Hc6y#=J4QG8xbr+w;9SWyd-%2bk1mdxV#2t=-6y8d{pWoE=cm^+L% zqlGWugsBW?jPB{<_ZAFna+QWaUPmS!IQc_iAPf*MAVIZ5?RIlsoH`h)4KNl>1Rw15 z7u=6cD0&;Tp*D=&+_z4-Bb=vy_UDF(*7~BGM!Y*ho_oC)>Cy4Gp|of3B2Yu&j9muB z8mvICHzL-nr|Z`p9aid*30^bfY5!<_Z&>K9BY^uFlaT21Q9jJYnx*TGUI5+>2z$namKio zSG<8-B8v>SKiWz&9lI02lo}#sA3Uh~h{#gR?y=2PDL1?MOu@JJ-Ge9xGxk9`ktRJS z!zq75XfU3b^jqm}G-VnO^tDep%pK?3>m)^xuiwWA8<@MD{avwP5Mu$GyD_2h1ki|0D%eoxhR5 zHPK@SzVt?EleKy2?;trRs$;;Ex0oWs#7x{^59_w;(&Fs;`pMQTGK@drW@fXRDC2z$ z^9rL)J|Q?tm_Q7HW}ToP9+cEw37p(Ol3l3Wm~=@0(tsXKVe*cP1H@loS)zXld=Sqj zXWirDPaKGoA4*s;oEj~oS^)5823E$m^eqrDG@wV_OD`M%s8vnCIe!TO!5u&*9{xVCrHuyud zci2R)Cl&qlc0Zz$UdbXHNY(;W5)$K&sFu|xMLlra0+l*Wjs%_UZ)XK6{rfb)_ez-F z(K!#rq9XPjT;o=D(x0@(`xV@g2KOn`Bnc*~rZK21pdj z3!+*a*mEYvhZ$S$?!4?4dD9;8G}~~gZ&m$f1CNJ8z$eO8FHf}U?&V*+G7=Gl2A7AH zO5*Qo`-dG{%{V&4C#*>$W%_S$tEA>6ML%d>f@tj$S9;XV@obn=+=zck!qh!L(mJX) z%M`E|FIDsA8 zHqi^Ehv$B*^RLj~0tz0<5FMf&7=&fii7z%~FBaI()V~r>{U+;+mV72Rbab9^U)RcF z&5rsAoyYZbT5ZbO5#Jg$C$OBU!Mz9^bCvB2tX9evqpII8xqy~H|79Y+T%NdAZj>9p z=jpGtL~~n!+(_!8jYI+X{Z}zkL-qsz9Z`OK#={Z%Z1em|LhCbG>hFo>RB2^oV9?iB z=U|#y^IBwz+sMwLa72AYpzG#V&o__304a5xzaFRW-0%KsTs_uN!}f~5JK_c#YPPn3 zBw^0AWp#|_KQQmF@0WQusd*u)&Lye8ON3XZ@+JafOA^SRS(8e8*1&z=2rbnF4tukC zF7VjuBRiOs!uz~*@T1>3ezRUeet$Di)#;1v*}pC+@vLl&oUJE?Q! z=vsce@WjITHiV%-z0q3|T@UpQl!$qE6=&6Xc9zpwym zlk||yii>gsFK{u(%b3`%vH(Og=}?J8d%tfOyKQp`;cku*Ebm_tqyK(dHi@ElSfo4e zG-4q+j@4pkbYHT?x!8gomMs2Eqt2cQFIDc^xnH|+>LXu%Uc9F7W62)yMF$lX^C;e{ zf+z_c43Ba!Hby^DoRG|34T5(xdp{=z~(T?+>49o_nb#fy2w^x1@-+ChdN_r(*-fS!Yr!}V$+Q!1y#m! z`_Fac_Fx%f{{A6Xu{;&|&g(}g>EhOcP!n{oT24GEAunY?6?UHvXOMoFT(_B@Ic^c$ zV!v=<%He_*f^C7CnJ)e?E46_~<+T$^Vs{M>xSB1MD`e3G$i#0huNaK$9nn5H*q=L1777nDXkEN1`u76ida*^0(i%;Q41!(#!9wRvncI4zZ_&lM zA!aziCSg3QqI)fSDngsHAwyfjw%|jks>Z-ln5ghYc70lX?G1+lilrcvXk>-?xkv70 z&{r9WTTox$?TZoC41nz%Q)_AgYq4l-oEAC`V3Sl<8{L)M%U8AYdVIZq3;z-dKOw&C zx5S>>WMK9w; zZfD^9slUzyYC1Tp9sTN{0RP+zaX#VMSOB*bW3|&?=0z zL42f0gCDXmoMabV)ZMBA3HSQAt@O{nCn&O5+tlUPJi726>MfpL8{_eg&odmYeb4%4 zexq>$eh4^VyI>Y7&L!NvI_Z;^(`#GP?LL;pS!!8H+khwbx#))WUjEtU<^M$APM+3# zx`yAWCSkuEt0D`w(;2pXCbI5)x9+5IZt2rnLp}W+=kGSw=$!{my$T%MC)a!UH``FH zd6WE_CkO1T&@NUE=lIbRRe0N{Z^nyfbK60EhlEa^RECP(fC{!o&oT^ZOA^%ajr?n0s`Dxl7F$hg zp@mb{L+=So4_iE0YTj?Q)#1Dv&eYS4MBRqSHl{ZtR>IZCJ4ZJg=8=uHKf_wmaw^ZCIea32?BvXGYj(!O0NwF`)8)3At=|ZhF=ff-SQoXZ#4jt?-cWj z)&^+ko@=(EM~t_Lqbx6*`ReV$MxfpH$RAv zml>C-Q1yT4@S~`CiHz%noIxgW4l{$}UY&iKXUna?m69UR6dh_OrHl*Tqd~*=7mS_qKGvBH&p>deix$ z=Am*!)Mi0T8(P-E3ZiI0ck$1Rn@h%7s-TelV zg_R%Gt|R1UdLBv4QedZF4tQLiY3z3N-Wol37pb5J(U__zyCA;$$asJ zJKSRH7^Dv{$)G>!JR5l;Cu@~arh|`p7ZU|QV!*9>je#mC>q&#Ip6kB7p-DhJzYZux zbH}vzb>6ldan|V9Ntc$Tj)y2YkCp(lRnA^q=Kg-#-) z-M|}+blz}LrD&P4-+Gbg8<5{QI^=md*;^1T%2w<#Zh*ZFI=ef2Py}z9;!;k917$-$ zlRp!F@j$O}ZXwzA`Bi0u)CDDV9Iw3{ws@4I(6koEPu;j%IDUW$by^jht)bsqV(=Un zGf^fGKV$i6|1K&n&-2j^HR>P6H?*j%SY($LcvuHO0z*V4=a*Xs2E|ds8J}N;JSHFO zk6r$-Q1Bkd%?9TPLzK6+sFlYc_8;Y@$}M+B^m@pJ79D55tX4{VXOIW0sG>CHExErj$SkD@Q23>X z^B!eV3zqUyT^ZaoMUM0w8UP_w`UhpZLNDP>ty;Qb;1{xnq8!CY?_m3u#98ahIiz<{ zlxS!HNG?OK=)42L>MrB27^hM}UT%O|4-pYe+H5Bk$W#43XBC5;f*Z25OS1-+rPaU` zl$pOq+2dUh%(2c$G>D$nG$r5_u8!qkQ>6aoOuFuS@>kG=NL-!cY0mOGOAZ*8SAK2b zcR_IdlSJJadV(%B5p%mt5w+Tt!VdV1UplbBL)nG~d?G)kDofF&iB&(trZk3(iQQRgi^&kmPRi75PdJ1;;;K{TrfGEtJ!p!xCkg7 z7U8)gr^lHk)OJ8uDR~HaBd)5?4AWfZ3YdEC<}O9Ov%lq->l1|na+o`+YQ4P=KykE6 zfAa)hJD=Nr@yA*s(Mx&tRh5!`P=x}lZkORV!q~l*xaGjzhL7{%oc0@7rg*+``gSUx z(R~hmD^cg=I0qS;onIO7MM;Kb`ZX(eWDeNXiy`bv-cK4#_>&WOrCG}Bq5VJ*@yC?x zs_vEw)NzW6nSXPCJE7$|Ixp*UdLZR_`d>c>Ih&VM4yeLgZ4Rhb&s2ndd-|fSxk^c6 z&D>veIy3S=5M|T!>rff3pGSt>&;qQo23F32;{P41g)%iMLM9-sP5WsCoa* z3hsR}DYdYhx;9cklG7;@jJpxN2A9#l9R@!8%*s;v51AGw%f<^UO04m^-J#?KMi1f}q)FEWcE8 zHZG_&15QQl=lo{AH(Sy3n|e=_2hTv+WEt^@J=JK^_|}0AM}oP>J|g`!>qETa3f3!g z1^~oE0eqjUJUS#Y`)koZ&5R+@B&GtlL#@r#keE2>16Xmcj`Qo6p8h?BGJzWp(eitW z@qjhgaZwkQw{ue)9Gad{NHLhtnQrn#-eC9~qAK(CbMn%8Mvc+pt$B9ht^MnOZ10I2 zCH_M0@9VR0mADF8l-FqOo%*NxF*1uQcIUx>qRM6CY`B!>Vzu+WCr3T})GQg8Uo|oK zjXAnJ_EOv5-xD1<@td6zwF=4VvdY?jQ>bUo*>8=YHJzc?4On1q$>)%-S_ITRqFQM> zElBsAg`RDgh?YH7C&b}cV2!8nfSEy_X&hIacba@VmL(bDS^dyAu0`H$y5l|1P?eXA zWQW3&w2>CEidN5aRi9b~#d-0UBW3uX4(P@|g9i8o>vsjK3>{#Uwb-*C2mVdj87m8H zkr3HjkQItHH$KmwBfDyh0Pv)05+to4`cj;aP94#UIeiZ+vb+F08f<91!Ho9e z9q_VF1P$k6e}Ggn9D{fG?@Dcy)>%z3*e6}J>e|qm?NLp8oIcp&X+<&c)3f_>qVTJ$ znSU{c$@>yQyLZPyG=zBH#bSgMqc)T@Xglr4fwMIUIMVToIig$vIt|D0j#39$!zwL) zU zTGMab)C5yzuA0X6JpA=hE)=s**=wBOvrGFG&xk99Gp=;Z2X$Um_Udd?N%{H*@%l~+ zIbUwCmm)H4suB${{@v=rX`*jghl##scU<;U!P^JN4~r0;hx|7Y#p|W3j(K%P6+kH6 zrem5`?XQo^KYLuGfIkTACqJT{)r!ufiMuj2tLCb!Us}Iq?bF~A&<4dUl#6(a(Hq zvi+okZeeWQzV5|V*r4wgRt7`DDdfQbw!C0)bUKG5^(E8_vC?&ZS)t_S6Q&Ils-ACwks_gR~7?@5pv@*Q~5 zX87q>rRT_T8Ra)cRgd@dJj(^@h_1s=++9;FIvSk_Y#^kB({pA`?LR$d?*QL5wBV0x z22-HVj{u?qLG)TGaeA`vM%tcQ!p5b=sZiW)t&tlNT;C<)kr#|RPJA;DRMnrNIda~! z#j->iTU*Gmt2`e`Hu~RXG8>|D39{(lzJFT{3pEi|(H~Xf@DEz{7i8x3B%A{Umz2=> z*R~b7GAu}7p2r5x1Rkt&POCgKyK3!Pm7p3+G|%x+vyxuWtL58q2#o*ofvy#TC<7r@ z!OihYe`D4Tp?6sm|W)pcY{Z*9PV(uVAIyw(AM$I)?HE>JWc(Fx6L! z$B@5ZFS4%UPmA7%qO4V_A8$uJf}$l&q2stg#6$%Bh-ushR>yuIKO6P}9+i|_74qyV ziJwmer~@tiMNv)zu?g zPz{YwO7RrLQ{@NRL*tWXH0}58l=y)El`70lMQ+#TT~Qqzy}CNG)ESf1L(uQzPLcRn&JN@iZM9@d#)L-0HJ&&^(oT1(}t|@(W&}Ck&@li zk9_F}EIUx9Q$ETVs1>;#IAc%ASK?qLX5t-vp=ghUY%%smyf!|+=b-{V-}MSTeR^+! zofcYa;h&A2!9nrBxRD0;FqX#EmwZ#0e}OUsuF&5WA3*Iuf-F1mT-E`q4)8bh3e%PY z%2!WF)JX$EAD(1dc?*73J@i$`5n8G7feBv@)?wJ)6}uT^|6bN=c1-*!W@%ltF&|pk z)eVK6nbI}x;ZqIf>fLXbkfWh>UUXf!|<3^EpJ+#IUR zAhM&4P)9N8i{dF;H;i)ei4n%Z5klrJcHw_a)9RJr?@HyMRY;p8)oy7&gzrS~QU-pQ zz>$>xgqe>z{a;?PlQtf>7Ceds|5j3eO6STMZLo||vt*m$f|(U1>yrW&cx9F^27`D^ z-Q1_T=&A$+wC@7X&PY{>Bg{tssp3x_U#yVaZ1s9QB~cY;alwI}-sI2$wir1&WT_V^ zFj!AJ9~^XdQzcnFux-`3D{+_(uMx@&8nbmhpc=@lLeyP0+*tdURbg$ex-p1(y!x1t z4`jitsd(zzpJ}w%?5NXS7jt0~)!vrJG)k7wSv8z$xC5Zz^ONW~+m_BSum)O}FmS36iu0NV95e$KJeLQ()uwl}s6m`6VTTeJo_UEt?b zVb$3n<~6t;#rtv{m|0j56guFxIwt4#ZvL}51?oHcft`E7iCt9D4d~DU7s3OSS05#t zSTAnG7*`&Ptm_pSa68Sg5ctNpo*HY`?32@p?Vz+p-I)AP-iDG~AkTT9YPHzr+lxRF zgtW4;0YzV8>#=j1|E{13u)!~34>GQ=z!7*jb>i0e!lIu3yZ7vBi)rsn1LhF-TM}e+MT6ipn$n2g!05v}eZaiGwQk#ll9bNB!ng(`WM4iZ(2nHq5!Pt*xISM@%JroE zSQ|+rFg-_&;H%Ej-2cxil>UQ8zE5hZirKc_>`9(jw->V6lY@0?booSA%1+hri(;D( zO!v*(7aiSWHtJCsHlN`?Yq``~Zt=|KcJ$Bg@~#yXcrbGqY(y3~O+xbe{umsS)sxuI z3^&e!02$VWc4bsa331_4$vp<0nd_-1*rXdRo!X)4!8$qyFN>+*Cj~eQA?XDky7tpz z<;ArGpI!hfN#js*>c4+q%2)irzOF*$?yYSgisx;mGd!&)*ArcnFj+50ztu?2j5MIZ z^KJ+PZKIL97p-OJtZJ(j01s|{F~%|6N7pa!>z&8iEhLGW7vHoDn+5Nt&SZzido@w+O^VI}O>SZPfau03poK z+rbTLhm>G--wcF`PJ6yAvoa!L59J4+;s4Gvj?=9|y$h>gTR(Dk2jUD*#Bx24=$%hT zY-%ig(c9uhGX+;+b3*3=d274{K0ZiJ6|l}klOlmfS`uuiS*_wBxx_*D=pM)F4<@~j zOlJR;uUfv}yuvefB69w$dp}E*_HjQ^KTT?-%nkyDdYZIT-TE<^Ab0F)MR{>Kgw7(I z6qjP42)s3Ic9{nSalfaD?)oF7j-zxh7{O^zfDTps&$p}Tw-6 z@ufJv&ub>_1@G%^vAyG^CB2|=2GNNDgb#PyRAbCAt}}r>e5s+GDCf?A-MNZsEGhNH z{F5Ql$z1%+8|e@4n0^|;viDO@T!OCIX2_dLuy+&Op+=^lKtF&LKl*(QfY zn>rAO&vcDs@vqL(bQ5-M=G9DKO=c&WPjY$A2wPnhPFsF-DXM>=J1&Tw95kEP>=}N& zr}M3!su1%jq1uosA0Iw7eED^w1}3-LG0eaGWsZ~bVvFnDZ`*<4dgp`HdIJ~bn8=#C z@6w|uevD_rj>daEO9Iw{H4cR!SBU1u#ofHHp!bk=%@~f#v!~$qn|=SwbZz09-EuM$ zL@OmS8TihB`9R#uq(n2&hkND!vf}!~r(TrZzop7dIth^7TZWfka>@duM9Mq;KYL1W zyZ#joKamYy&Fy&qK3Fb%DG^m~JXMpnNm!Vp2>4VE<%_-V1pc3WsEBOeZf*Xogb53W?kpx(d!om0P3pfK)kZGQUBtuGehPo~Oz8B^d!%mPjmaKC zezH1&MY;7h6`!=Eypqdx(qKW@n%ICr#j-IinNc?_LD5l6~`JT`agiB$jl*Zzk_*4IqIx~#P`{B~B zQ^T@-7T9-0yd=43Jk|0wdmr^Z@A)Sr8fLNB;xhI*qEt@cdt!F_n(aYGP+AQS{`v0> z$92U)zAfurW1YZ=L^j>Ju4I>FGMq3MFx33}<&}j=t6O9;_#T)n6XH(IksGmKXJ(IG_)U z0(KrR$^eHjLtsd@=)eJ~G{6#_J`4D4GR&CKO077!f5vDIT8k-(2-XlDQtbyaT!uFP zYWII8AgukGP-4|jERW4|wi5pACZV^AF(z14>aKCT)!RF?qVvl~&gTVxusnBGEk!F| zRd`yMxr_S6P&$2A8Sm%i^qfxPcps0?NVA(X;$5xz;i7-QJ~E?vKc4>P8>a~Dqks&C zz@1i*^q_%FYe7hK_TKy^n~<~#eA_Ue-NFuBz5)~#hN}tL2D;zTm8t@mJ6wOigUU;* zjL+0%cUylL&tlK_L8#1Gd6VM^q#0lBbhlW0`jrjo zN#`#Taf28_j8<#NvzGrO>DuF&Zr}e?DwS&FK`C3P6iGsI*j5^PI`GK(G*6M!k|JR< zlG7ZQN<~aj5o3-yWDYqOW?^&4oQ(~0n$5=iKEA*Iyk1@}FZcbq@9Vnm>v|s)aAirg z2B@Kx2yWSBW4do7xezVYmm=u%cqaGFr5+wxlbbvbA|u~O72MbSK0ORL^DOL6#{U)` zWph(okd1kuZ(s2Cpk^%#Gyjes?DP_sQB2p`ZcghB-}Vq^d&`7yz-zOjZNq3LoqK@& zYDyb9QXGgtrC+f&09Xt7vFf6k_1|gq@6%`JXy5YGm~k6z?Pb&Tm3s-9iPG#xzNL2U zzK*4~uZ)~_^H5#QX8`z;zYPmfcDGVb%KWzW^U-8Bk(McGgd0KRoKkFZY`ChW|G-4oKj`+HVh?>pyW`L6^>x|h+Z*y0 zVCBVXQbukd7w2sbsI5s)jF49IIxM+4K#etOT7K!GizHP(NWI#fiw^v7M|trCMnice zOW$QK0t)8VW6o|hkzR5?rG|2t{@9E~)7+y^y_(R)8NK60NK z6JJ5=i{6oglV~#I^zMi}ipA`vh^(5L^BYj(0RJG|U-dr7wnK1Smj0R_iY)^Gv^n!! zDeOfJv))Q^?b+F#<|gH?zGRZ}Xf?}coPHH7L>B?(h;TW(j7}PT`0lVR`k%?M+_1nX zA@qlzU(CY-uJX1fTQXc$ZM_pUd^F6a{0Q~@%74aNYP7#~Y8Yf_D>Et4c_3%uC+^FM zUtvu%wa?fYcDiINowQ zkm*A|FiK4tR2CPes@KH@(AXerP%*qnWiB;Q+DBy}≀d|#s z-j%)tNu%^;@>U5T#-^9`e&y#5Yuzy&C9JxQ_MfUZl9@)JJeX-d594A|{>xA%03Fbf z3=LXND2#o^ODX5v$T!)*WXl^P69AXnCHeb;uAq9tjX=fa&5sVY^%O0W>C8Q-S~R4U ziS*ifCJdD=O*3Xs=pgZ}u`nkUSvBqt*qaR82kW_W7|#}ec=*zCFc6EE#ZM=kXHFq(=7Q>@-t}eB)j9dqM2fz z!7e&7GBdv#fDe;*l5#dDk1lK-PcigOE&xF!2g9agy?1M}k79~hn-#C7rrUUo`Kx7b zniV$d1JgE<)XnF+YYGbE9~=~MP>IzOLD6BRf@H%?{#?GAY^6*7z3n0zb(@Qexy#ej^yA?jY#%Zj^4;&D!9miotQ>!dH8WVv4`Kbof)cJ?*;azq(0iq)#3htlmEQ6& zY55s^Xc!HMd1pJ!&z(&UqZ)#9$R}Ibz1wcFPPmZ5AXS0RMG2A`-22hik@;ZtB3VJQ zOXf9LK|?S_G{yrAGx}%iaN#Y#k$<9yt@#yUdsd}_c5W_lQm|2&RCNCl;z(W+dF7d; zE#SIWCO6l$iDM1eVp4!R5-Z9FNQ;+L#VN^f8o(B9yof()zx6~3>*2ef-{SsML9sS! zSeNbOYv3#3nDo9sXM^4V#x{Qmg2)bf$NJ(clWY<@euR>U0lFod&#<`!Yz?hX&uBv_ zS7FjX^uu^{!0^Mu${les_AmEf#@6FZW*U!1*I% ze^yND{~$qH7rF#*qJmm#t^f+Gtj%9&Njv~#CW;-zi0+44Vdch;&2RzZB8GU4XGTHc zjQUJ~t;@uMn~rw&RFY2QyHh`>Xy76(o|Xet>Z2A3*qC^N?0&^>-3E^Y_5G~7flr(@ zO+IlFl0q!Aj~#d{{**!F*iD=)VbuL*26v{Eo7JPNXJ?5dzYT{Hd3!3B=@2O9Py~aS z2={s4fL{NTLWGZTskOiJpu`vlBcL+YPwpy}FM*7w4h#Q@&%N~2Wy0zrBp-SR%nx2< zo#YE0t+-I*e>U+4`^a29}B=U8E6afPlQ8RbbF6dB!CGt((Oe$1157{XGcuk6#{r}b^# zs3~#Pz|kbfGaiYo1)WKMI1TiA=I`2*k&;F#K<2%8*-8-Mu9Fut%o5y7x;{cZ7L_ax zM2*7+4{6yth)e}?dn4BSe!=%fJ;!k7%#L;LT%i=w_Y)=KO7AZz)+r-HoZxWljOaxl zQmwbHWuQEV*j1<1=fwDWtS~lhMRD!L`Tz?aq>)!H9sA@;*{Z9%;f0Mq(&)Zi8$+% z5f`2vp^b?wsoF!Pz2BQE8mmxxzXG298&;J>jBzV&jGswk*`wTGHUU%BJy=|jE-c1W z08}0lKt#}*sRMd_=cJ$~TTz#rlotJ0A2ns6`+W++c~3k6R&51ZOKd*4P%=>I=i{aa zTfZan^wr3*cOf2t*&Jg?t*G|r3J*Xy1{HYY0W*7FuTxFHNEo3xm=lEeiefz`T&GBp zE{i{r4T@gr_tVMCpvp`6pD>l`J{F#7ioQ;J|G*iNOu@av&<@?c2tuRy#f!{cCS}i0 zhu)It$XvQ8G~7#6SsNwK&a$9)%^i_(9$DyjonGq~V(15UC~OisI6R|jd=G0$R^2%` zv{@`6ysngc8vKZJl&ZZ9uHCzo$S2&b`n4I&-tiGunwcAT#UsdVGh?JevlM|U*}9k1 zMj_M!A469W7S7GEaQAZoUZr+dw@a|YqVB=!hOR@^IM!WVhif3aHL2l$d8>(r)Xw-y zWqS~6%7D=mkr&{Ow%moeywW}WH~(YcXHscxirziYckf8LBn6=_{I@IqRjkM^F3qd2 z^HwISAV)vsFY^!kM8+L+Q_6!Sx_sxFd-A0OVy1SDY(9wgN4&+VjrHX)xmrKGBSy;S zBB(@u97UMC_C*HOjRxwt5^-7uInN<53($1V(O~dg#}0-ROg{^f?7K#L<*+E96-}8+ zer8%qzSZ<041pE^_!+$PM8~RcVR$NI3Y(d#(PXe##rS1e82|IQh035>mPjV?T0&5` zPKm$KliPsCtlTOvc{!kH-lXwzs&?5PNTz^_V65al`9BHkVyNB2HNd=0>|E_W56-5Y z;zAtQ3Y$dKP5jGcBP2Mvu?k5|gC6GGFE{D;?~My5P{g>dIs3?Ya&Pp1ST9BLzo9xs=I`(M)KBzd~gaHg*=O5*OYwl-NiC%}w$mrP_6z#w8201%wMoUSoRv5p&|@T>GlTiZVv?t7 z(MiB-BxB}m;r6;;xUQOXP%@maIhjtHt37$8WCj$JYdZkNAzS#BoW9UaH3|FQOB1{xnXoos8-R!2ruR{)xudPixcpQXLk zf7j_!aNc&c8_C>?8)fMK5Yvh~If;dVwrGMCFW|I;t&@1jPro$m6@D>4m3_+bW=9X} zA96Q=k-FvX)BF{H!6TN23i@se<_}? ze>E&M9Ca|9UI|~iylvFRH0|BGsL{kNT1(%$HJvG$`5|D>aK>AU1!c63GLTQLx}BpX zvvxslJf7j|252F~hX=&f$L0Ag3LlS2DT;9DcVMe2#OBHU3BV(r<#2qNiN9WV{r#EN zQe4QfjIfKBw-B}bBYMhQTAYzb5xk_vC%JPhO%h-@g=)QAvB=cNYFseE#6iRkWrs=`bzepWB-GEtn0_q^?v%!EMRAv4;NSx?v|O90ktQp{l2540s9l zIW%=10u9T)oB^QKltbT%N`dW}=Yto7oi}_b0@*dtt~!zc-OMS6VzX*JM8?SL98_v2 zr_D6@6yu-Q5%$D6sasEnGw71QYxXqSNGb9R)nG5>2Z(A*J4}~^YEu(&l3@t41t8##&v|HJ7t!YIGt5$UJ^exFoYtQVLhW(u3{^JT9C((RkwbT7rJJsn z6(mTFzrRxJ@Ye{+#9xnP8FafBvwy_AqK$T(dK{G?2-$A|^H1=8Td;3LYNM9Ll%aV> z65Ul{gHFNAt)7L;K@!|dRGT06R9JA>9rd{!2U~mSZY>}$)rGjexV|WkRzA=#w%5`L-dT(1t~t=s$niBV z7U=a3c-9=&bSC{2V>c6d%7(Yk{ORWZ?*~|*L5z~T?;t)taLudi%c05`R$8#rb>@?yhTB_ ze5?_;_6Iiqj`FQZPKUlqBh1QQ=NEQFtQl5Ds>}PTwntsnZ>>P@~p%eN5}n#5>3r zw+z_9TZ}L*$o_bHY-ixQG9zlr>F;#4b7v zWKc6Y8*Bw_1l?N1tU~^F!$<&fkS<~$VSq?`+B)8#U(LMrrKQranf{68Fh;_zPkg_~P|iPT^Mqvp-pJM1H8tMWv{WDRx)zrkOUg`yxf z_g3FEZ^2L*4_w`;p(83khx4EO^t)o%wsGZ5U{o9Bo9v9xpz@yi;jp5Z0>&FI0`N|u z*;dE7C>mJ$!GmozC~IGA!laG%Dqj5ml@DM~OuaW&cn~Ey&PxrJZ1X>d290G058ea^ zsn^(O$=@{t^j$!qbA|A48#{?Ip_FIQ$=}Gq1kc)_s1=MXvQ>Q9DHP|T#D-f~VeTNF zWTbEx5H!o6up@v@a~eHXJ2G@;B?y$fN$uY0n&*aUFFo!8!2t?hxK~{T`9Z_(%PH8h zBVQpFDl8u@v_Iue=Uh1ZDyH7Uen}bV8{u<7#?UitTV>dPiE{}$gS-W^*QKz+-@ih@ zOWa-P&=ap4jV3X>?=jc^rZ=Erzz~AY)%M{U7wJGdPkTJr+e8bzaNSJDG1#- z8z$MnTOIlC0QmW2u_g|2IJnuJwm+HLUa#Oi!~8`u*m$-k(vb*OLWF)R zn3Eaze?(%B@lJE91fy!oX$X(M6hp&vdA7^CX@SO&V0r+ME&d$al%oN5tNfNq<0mHy zI$SrLhA#k}WKJVU%#;`PAZ+$7U4hJSzN_#NANC(CUmE@dpH;Fp(g?aV|-DFS8 zqG4oNZPL+92mUsjhW9Qd1kVyLX657Va!gj;Nxe*3@)v);VV55r_!sUW_1E1`^5N zlSP2PtgGERQOHse`R926SmJkA!#*E2+5o|tY!1~^8;)8))0cG~i=B;>9IsfyVkhs$ zsSLWTj=Cm1z<;wF@DJWvvv8)9G~cV#MTh+h%SusMwr1w&IRq#4@7VZRs65gHV4PKz zhfp>G{b@ppM#AsGp(Vxv+n^v18{V>~d3WfegZW6!xr`ZU?pyo9nXs(eUxGjB0)}t^ zaz$)&uLU(apikta{L_^7UWhgx0&3$VdPSDcWGS8@{|>L&hc?_dGZqkTM)denR~7=I z4%iU(CT9zRAl-jM_Y3!Ee}kT6IUc=4&)Q8Ze_UgmVfKaxL)X50YqYV`q6a?0QxF&wc%jBCKK_QpCN#qmppLM!F&d6`NG@SwbPm=-%OQlYIdyE z)9<-CudTfncigCdB#RV(92fmPR*=waHj3}N_1PqnuU$)7^}95LdcnB|Pi3eHuYCI= z=u@ll?U0;@s)=IXogvviVT^6Y26al7g59t`jdY$IgbrgAUBC$P$WEcpO||#&VaAE) z0&jb)WoOL;4e4ll;50ZAT0#eOm3;EyT<_a85a38Bv9hC~(&^MwgYkN*l(e7>n#YnW z_c;6N8;-?H&B+C$*CW~F4)lj8Ai*ycc)mSs6sbFwEj~IoEhX-nH1{QJH4RvMEr(|sBk}Rer0>)LE1YeJvz7Ol0wyu&#Vq-E zMV3x^1)#8$+Ue^>PNEm3@Q|)eH_QjiyS(_qUGYb2nB9;u`h%%(J7tzV`5An95-+vs zfC*ncuF^vgnwIAXLv1pYSqU@8f>Am@L`svk8N`PUZvGw8OQG)22I2DUKN$mp%e!mC zjsZ)ID6Q`YfCyMOZ35}6sBO7sD!J?C_#rnv7!cvM8YsX@nkuq-NRlzH6hfB-GgmKt zk0my>-QC7^@P1$#Sl6UN>+?77B|X41KC1{{H!zcWz^7MuF5yt(`aIA+4h{UsA)=QoVP&A`l%WpX`zk?ho0BAJGDM zp*`QGZUW4{+=J^&AM)PgQw5TGP>>1NBPjMQq4h11xv!A~vt4zsngn%mOd4??yf#S*SZ-`@;JvdS0dP+g!xR z*IzG}RIAM}db6Ug@pO(;6MaN^`MU`$QXnjD0xGGYH9u?S8@_wwS1LY3rcvaWgE09bMV7}`-wGS^d{(g*HZ(43< z!kReg{v@`mY+CHO@Te&;v_B-?5(l??f6Ju?>~&7PS)ol}Y6CL)%=zztAZ_~~dp%*E zZwep1a!lclqo&cfT7~EH?m)Yw2|6aK`@IruPHlXxQvyW?xOCAv!K{gLmt7-0T^F^g zaC*p?R2@TNfgRIqT@{f+jB!kOS&-vU9evBU=KR_Z2h#NFf`7xlB6l3JkhFFZb}aX8 zC!pd55ab!+eLuljgYUko^fjpwrL}*SC2AX=q40i?|75*g8UH?t_F8`%Tu4aK%JSVq zy4wIvZ2iiFwrQ_Y2W{UyfY$BFEVfn|cl_ELWBi%q#BZr-Hdr($iHz!PkUy>l7bO13h(s~9-fuqYAxgyNw{&?nJ_II;PpV>NUCIaM|M z4QvMa1KxGSlj|>XmAWU3yb1c3R^^RP3c-)dLF6lHhuV0tWiHeV%7c3#gRUP&8}b8; zXYXb$0js6YsJ2z)fGeWIEO9_L!(Z4zz?R5gEi;^Ce~syU75p#tHS?PxaOUB8)rB3< zs8r-st8$sjT*T(+zar5g#xG=IS%{qGpAjH#?33Wj1}lp^z2uAoNJpG)u@)i9*}(}{ z>SmJ*FlYsCZga^>R-)i0atN9F9 za{kYUTZjYf?|$FzQbOm`SNBz#n6!B}{qR1tC1CUag%%_S|3!8jEP<#{fBF9!0Ei_f zfD`AAuqG|!P#k&z*^ZiiT53aL`@o${@urTW<2`PIvaBrUPn25^SC~TML=xa_ z$Gr;>r92OE6@g7+*cKdf2VA5+^^wlyL6K;ZSb-zjSjZ>V*1QRcTO#n$sZ6Q| z-$sj zLJj0y^E`0*XaG9AxRkE1Fe5wMIL5usM%Kq$jvk-lfW7*a)3X0C@VvserE~57=!Kr@ zmLaz;AFHQZ(2FM=w-qe@Xp9AV!UL*84=Gx~l4|3hb zOde$8yf^5}xjf8+Y0Ci7Fm4s+_jC$WkUHIG$TZCD`ffnHc}g`HT%gw3Jlrm@`s}U* z+^w#NR6cRhN>OihOpBAGhI3yTq;zYHrYH7PB*1Z1vtRgiwA!V zxrt6Zy$2{O3C{*W7X3jpvt;>HwpU-WlDWI8|10kK-re#lB*_gL?*&SVVYW5?=@1n48 z7FO!BX|Q?+4_7VHR3d*D)xJE13C-f#(6t}T1C|@WA%U*;-o!$k}^R<^1FBsqOyfCxqjjvB0xjn0cyS9~(sZDxqW%$9s;s5{p2<&nxkvyyV8&~`0Je9q)QULFRP@r zL`4=_pynE?K>L(NO8rtWQ;<=x53ioZLnM0EeHExrFK)WptPsyHFy1%m@fq6H=D9J> z*Jy!~YUCKYzKmyok5m;jznorKzK5~mik%5L#fTjdKj*$OB3&R_?)fTDW5N=rj6O_GX=x%IoTC&f=>Om)kj8nC8C#fw6X)O{PmS_-$#Xa z@?n*SdcPN*8e-d`@jUr(RC+XBM<_z(SS03gcWrni>yBGKTA1&u^V}TXZWbyQBFtl> zfV=XFW>C4@KetCxcPZpd3yLiEc-8)YnVK45DAXGQ<+*|GT(%+ktC$4DuGt}ceJ=%x zhRL{9dB7hPF1TVy3gi~&rD|)eYk_IGQ-(&9#&bhnaF~N=g@AnZ|0M;E+!sWIWAy)z9!fGrJ>EH4rJ+<_C(2I=s?g95n<|`_{U+Mgps9LqK^1O{G9_K(OtAySzU zD+-T|MaJ|hmc#ZFy;tB0C`>hx`ZUkYcqJnQb2&4_D| z8kJ3b{&}^Ln1H?ht{ExZ~bQ)2kgcsF0Nvvd69$kW4p_1i!#>&W3nTh<-Rr^h>~9*2zx+>bdE!CbaFlf?F-W z@{l6QJ>U1dQjF{`$qX&GwO7b9qv6mLVxa+ze;@6z%+RdXSP?+6n_ROMqY!WxFMs7i zHKUIX6s3w!AGKKiqu3-(Z77!s$!+FPE6<>3>dyfIdbY8&l6Q+R#s@*P3+ql<*uD)9xKyYq;k|i3Qa3Z{1{Qxv%XU(b{0d@KtfY}*?^rvIjepw6$ zC4%SVLMdxL!Tb*ILZP0W1QR0hSRs8xy$%>KJ(C1gzL8YVTEcy&_jT~@7j(ymz|$0e zZ$)lK*R34rBcJ$PIbIQ7{PfTTO2FxU2~2D~>YhSoT}lm!mU2Yg!DLW%YXpR`puT6? zCG_sar~|!*)=-88C^ZabMl_&h1k^L`lJJv)B(xN0?tnaCea#wNaRjtuwI%4uHHQOF z3_=zMZ`2A0Ei>j6m-mI!)gHkO*Vq5~^11mzmkHZl1em|%j3S@qt|coV=wUpM?KlDK z(mlmPEpQw!{F=`bONHUTmn&zk00&__Fjky6(y_}Efgx|Caem}1 z%S#mTmh=tr`25j7T{ZXcs#O|wO=lk|Sffs+#Z+oh0~eBK-864L(FhBye2ZW60?cxm z#NxFmjw-0U%t$?KA8)}ZO~!v z#B#&E zBh1S1Jna^Y1;jAp)~~)=aKY?m?1tyMN|RX@^1}NtJJdI17NvN3I~L;9sO|Q9mpE6; z10N-ViEiRfU=?8f6|vwa9WZUZeGC_M6R>3JTESNCGm><0=|pLI{meD;ExOI9yvmlT zlEK|gW#@sO8(l+Hx_PG*7ZAOTsyg=;H>AB=!dwu>r0X8>Qe-GKtrWGnAFzvP;E4A* zap{25O9U@Ca*dG81J8YGXg0BbQFrQz<@eCB&8P51KnCy-Q1$6A%k z>EA_hnGxjd=X5&^m-&nXrBs3^?K5);AC4j`M*ZJFRMvhkXSd!G4Uyw;ci~SxQ~E5` zCM^|HY)mtFQ3}l(v|=~_8( zjhd35!Z*3 zk=i}K_y@xO?Zo-+u8K`BcWSM}c|&5on`p7iusl$L%*s1)GEAC#ZI@y3za(@svvSJ& z%T!!w&FR9=gOVdx-f#*wRs(vD8N|6bLT>_Dz2f86(zU%yaQ?p?7SIlXUSEt3ur;AE zo^}1}C#DMBKty+l>=NWWI%s4raGiGgXF!tF)~LPjZK&b#7*s!8m2Qc_dq6_!?3Nn1eaWSaxr!dz0v+0vA{jadRA~TtdAOtD8 z=?LSMy-(=|Q=~Jo_0JXLdOo+Tq(SJ!NI&n)QZC;J5|7)x;kXWhuxq}S$%+w>L-F0h<9KE_MF#X z+9$mhwkoJg+FHQHJxrb+Fa*D!-)EsdUY>_n@@UdH&BBi#CEflUW`#0J81o-w&ffsV zE<<)?W)n_nxQvBBcKF+FNV5?-9}L4l2Ib6hD_cEe&QC(^O-Dyl#>NhXnnIZS0w@ce z*Yu!spT&-fBC`JK;KRe4cdG&?+G0?{p1@n03`jI}N2I_8#PX@wS6tt*ts{5VuN4IW z-@YlSG^wD#zenEQ*&2O#tN35+tq?7H_;`IKku(~>P1ngHXytj8ncP?C3YW&n^5$6s zr}H3%Dcc5G)XR|U^+%>3cGMj0+c{y&T@|&(dsK#^iy+-LMAzyxf`R`xdgi&w`yICu z7mx2+wcc7kU~%xw$VlV)`QU?q&-4=#vk1{ISZ~ZCIp=pjDp}u)P~ch>%TU(xHgp<*>zR(=8Bix zk2YsANfBGhlvKEr2UY_0*b{y5n7(L+T5>O z&at*xA->I?Fb_6q)l^XxQPm7>%}R-BvjqEA0RkEY`rZXT{rZM-?D2r&J;I0)k7wp; zD;yl~u6$Yg<&-H2sp=ZZzf=;!`&g?4WNxcfm(YERX;z0izdsagC$iHuAWw~4P2n?I zo9M&XnC9}-QBUY{kpb|j+vEdV{LrT<4<4!&Jv3)C^z9mz&@RY0-zb0Cy6jeE4=F0) zKR`b_I(MW)vcNL&6?rPXl_Qgf&Te`4;NiC%D|pXv+Y^>lr-5E8=?4?_nz=_vl~-#4 z+LZTfIc;5gmP^b(9bGLO2gTV&!Gp-O`e@N?4vCU&VV@6uuP$yOLQ;M z*&uvQJ6vZ12(b3mx}n%}Ko?+Kj=IHHjPyJ_+&gqo5ttPUfoRti!4r!Fzg}$z+gU?u zP5d1d!9nx+=!i3PQ+lOx`Y&Y{)%N6RY{eKt#^voe1Emzt z5&7BTz*D~;e8PK_@AOfIWV4qp7g{&P7VW_yeTagytVaZsB1j4$LT{O9_IgFO6!Xun z!U%2%4%U5k`HNg`il@dE+@*rF(If2>ZTK7J((D9JUvgY)zDN03b?oXr$c3Dk;ExXN zcjp})ip906m*q>@d#Q~~hc~VRgwD;VR%?OzYj$7NJ&n02bQjW{1=Nz~_=fcpPihZm z{uPY-b8W7Jm%{ioxIKhm&%~BWX8rRNmt>^73BYGAZCnVN9_x&C!}u82C1V{z4RKG7 zQDgkE?up;SW3q^*Qx7h`Y=o(zOf%2!&$tT+K|vw&fs2KJ;P6`2e|6O zkk7CqH0bYlq*sYzh6~zu=99jsd^Ni$`5jXJk6@*=&lg4s@5oX` z$Ab+@O*4#aBnpmvebsieZvc}^v-o>CcMp47D8;6bRU0XmoVX=$fRyA~=LOvB_)H(Y|RDzqi-f${cvc)XDL$JWL{Jm4;-_ zT2LLbGfjc}&)3!;*r!;H(2Y7?Ju7FlcgF+Dk>D5Smc?yk6iTaP7ZIU`rCAlp8F(|Q zzJQQ?nyhtC8;nveft%T_()N7onhQ~cC6dCuyZnaJf4_w_n~}H8SzJ2}q-S*^crUiv zT183@p^F56`MG~|0yk?|$2G;7xg(DK@GWF~&r0ZS+8?8UMFlsW)Lw?h5yH^1Fjb3# zA9$ky@f|VV_?^^H*hQBPX|EB9zMBe#pkF8&xtjFoHsI(GP0xKvn(HAJvhI(2U$%8z z@7(mH-IPib$n*E0(YO3ccl z(HTJOcR_^3N*TQ_dQY8^OlU0Le(uoPfYZNbpD-rA-k;4qq3dDnt84&C1o#H;9u7ow zUw`%B9P`1=id*alK8g#^DZCrzOou9ecv>@>QYgBMrMB*;qW1`4`8Pq#mDq*IAYK9#YgptTlyRBKuk?zY!m{6(Ec8p;dn)NJ1>l$5 z6EzF|0iQreEehl1Q#D{{>|RcO#azV70H*93Gmg1zSrdN)COFdB`C#*>KGmr&dV3R$ z1Fsw5E?(X{ln4_wbAUaF>^EJjzkylMnd?5r%<9ttK9!m8qqA`FR#3^mbF;ntk1?91_AI@#So%1$Bfzc2@Wb>oGG?Za5{}Qu-sI zXHqj!t{R|(Y!|#({Ufo8=u&D0m==PPoaJSiA^163q6ElzUr)D3AV9p*IGPVFU>ipkDjH+ir6mbj&94N0xJYce5nC3HDG2~dW|y|J~eDO0pBPG$C;!p z25lizIy5$yjQ&V<@odb;(!M)x4MfpBb8IYp9Zoq&EBd+&ByUk`r?RWRWcvSL{O#on zU#Zn76c^a~0S_I`@na-q3cW4lYmn;Tl}4M)IwRNGz?XPZ5LOj)n%?$+Qtb3X1@-8* z^#+dhFKjM6z&N3|t!;(SGlsO9bs=&N0Uy+Ndwn=0DQTMj&Y0|#&=HgCVt2iVM_ssL*yGJF?%nH{wN1P0S&mfR%}Zf<(6)JIV-*#EEjTnQ{0yHXoS^}wy+8a zp>2jWfe7)EloKOTxzZbs;i#sp@q_eiR{rfz0iaBt0XLELk0@5*kxOp+{SsJY?pn!S z>Rw-6(aQJ@<8J>eGF{5jaQl511hq6%$g@te}_-d00fkBKU8j^vIg zJ`G(nD7p#hD6LXvclM4uk8&2SIY}&^`>kN1xKjAGE@*2qo%9#9NMm%w!sh}5rIyHS zFYCD1%205&WRS-(*UbZIPh!kf{u)MCiL0Bg^k8(^F)kAixfWDt1V<&r3Lq;1N`BgM zq~gp0yQPln3XA=>M(j0$kgI>L;4EHXU?fSItYBP94UyJ88R2 z+}a%a$+!XWYHBrL(A4$V#n(x!k1)Z^;6tLW*pTucn<$;A^;!24&OQ`a) zX@@3Gj?{en-l>@YH|&m09|W(%paxtJ!ObonH-CT2ya}g~TBnRkH~XZxQ7j+pH_*~? zxZo`1UPwWiwg<#^LvJ6+@l6CH!kZw_!^yyjvYe)bFMCk$jm`Zz*uUTT0wUkK=7mZ< zM@jFasa1cGGzUW6s4K_&15{n3a__W{;B2rR^_i|uGYqt{r6^Z(V`@ON8Vg3fE2e_7 zOxs}(r7Vh6)V3H(Q$}~#(QP0w@ZkxTqaj{FZ2>GlAa?6Ih z+Yt@BhXJDE$X@#U&BxDCd4v9yt;1Eeoqv&SweQwFT|2qfWdw%x7bLo<2^&w)?_FBh zWf$MWt$djaA+Ek}Gk|RC;NZ7rygb7n_c1=z!p%Q9e{%e?bRFq>kg=scX?=)(YnRY+ zX}7-_JtN>+(P?7XHn>S;`v8;|AW-u&rK>wLKMI1IA@&@KQeJ8v&w!&Am3x@`Qq>-k zHmhYot297n-l%Kaxat`e2BCsaY&L61abHrw^Xz(o9Ic)r+o$75KKM}pocG7N|AQ|6 z@8g@egQM?*h=ACV*^e>@stLLxcJ-^|rlwf&a(xQDx6$#jZb|5*>nGjyGz+rfM7jMe z?^{b!`MUv;Fur3Z8omeZW8?pa8Cl?wDM628z!hkSY9}#vO*ZAxrj-#xBsGQA$RikI z!`wt)6KK~b@1Cj)P67q@Mq>`Ko2y*rb*HD=1*K1Azw86NqJbsnP7q~22xU)24Ie$0TiI4e~c4WgGK?J zLUD;2TYCfc<2*Isga;x5P5(bY*Tl|zwB%m3)7BL1P?E`3_Y0^Uq=ED?#+nr5qi&+Jh9DFa2Foe=xYLhY|wEKU(J`CklTGfwG4#VGFpee`co3`_99p_EkB)|&D4p8oBy4cW2-d$ zsRw+y6^L`uq=;uR`8okAmnn{V;Mv&R!}IFOE9O6hcV-PzzsdfO!!Wv(!D9|1swZ?p z$>Se2S)0SL+H0D5QhrX=Ywb^3*#|fW)h~=NEAgH_OQE9?yuab5MRNg@?) zN{kx(IM&q0OMGIvW#7uy`|UWI5oIT?pM(Z*$jXZ$eU=IIT;|QmSTtSu3{ETuyrqKG zxz+HjP=zxb#t5}OCv}qP*;((WgR!*g{8$`L^nt%uS>B&brzQD9`o3fu*%Hjg2 zG{~ydGef$bhiuoN9pGw;aSoQUgBHm}hrGQ%Jq%0D4YRY-=`W_odzh4!)o}gu{Ed%g z4q2;j9&fq@X-H2-5uzzPg2|W9pp{6n(e;&01lTTNrjy;4?~xT^VpkiFYFi#+FStOi zLRpwtfz;ZU8AF?;4t%S;QON$xcw8#E17Rh{zQ}%r4 zs6mJ9DLlGc%YxL3qOxJCV0Vi>UdWz6Mfi{R_O zK*g&52*?aB2CWXY7uth1Am0{CDfpja?cb$m4R2WaLw&N@NocjE@1w_EI{VISVZtdN zjQB=gFW3za+>KwUrr(M40cq=kr?(TI_L71 zsWAIUfkw>GI3LciZpkxFhI zUvVQiT4C>nMe(&^5UEn3X}*R-dYNu8w1hJ+Y{Jg3MUO@qnp7f`W>Opy=~!|L1w$$Qxf=*Z24R zUZ>CJx~`QUfNhui+)6Yd{BnVvub}UPC1UYPL&&q7sUHYKO~^g3CtHTMus3;BgZ6{J zx|fGFolYpHE&s}I-Lr2>UBg!|6_=Mg$6vB{+1BW~)`{j9!P>$Xb9ZPaY+NS0iwgE- zTT;tTxXXzi#CJ3Tv_1dcDivaW_LFo3*x`c4J4t{h^?9gWqf6JVhpN3ie3t+X>=NkA zI((99SuOTER8ipEh9%EdN&A%ihT)%J6u~v`D*9h12K7goR(lDf);%Y9 z!H)XRieA81&o~tSVFu=b`JiTe)uGZE!rUC+!Q5tDHSFYfQ?>f|3I03my8x_GoTr3z z6Z%d8-x-nfpsW_*;$j-~_?0l*aYOgs+E>%}jbWC+JZq?zl}j#sWPk9WF$Mc&9H*bG z?$#JLmZch^-(Dt4yk|Adf2@@Ab>1g}I1<{D-B!zQcpJNpzyJRyU+=EL2IemPo%OLP z#B}Qz?BnJ)en(aKoZo9^GNMH^U1-(Wf(=8 z&0Jy)c_X$`S1@@7m~i&m()tsaT({bm#01Zgr}xZe{q|&TRO8`Za}UJRi#hUdT} ze?$)Lm0q-oSy_bU$X)U`KWX^z@h^8`KNKf5+7lke*ClTLo)@>IP32fHKejVxzmyiM zUqkA(uoqVJD<6uVtQ|T1Xlea}@%<6f^|R5r+qa+tbzS)l*-vDHevq8@#$PS}-mt0K z+UIWhC`tI%dew;Puw(CPQJ4!6DnINUb4`$yO?QDuuHEX#^ZvRtxjNW z`(N+fv?EEHlNyO8k=^c&nrs8}3G@Ek7lF(xty*q;Z|+k5+W20Ry`nEA9%yw4sbO4B zt{LY!#u0|l?te#YM17rmZm2FG-`8F%-&_5N1O0mr8NdmKrJTQBVqJn~)=d2MTT6Z8@j7xghF*X+lDG)YayUazX4pF!Uu z&P_*K9UKe4|AY6oLq~u1LBX{YCjz*2(~{{8gKw?+?j-f4h*Nu_;$sRTR12h=*f7P> z%TGx?ZV;8guKQK$qzk8!h40nXnYo6nq1Ut^ZSl^v_wf@=N90tRwN->BN1T|DFfHWl zsE4tk!6=Jk?IOHpPlw)-vIi85@^}|?BPhkwPzlHA>t$6wV^az!Cm~ajnRpAAC10Z} z$JL2nZnXHGbiAC>uMrCcf3~|bX`fTe8eOR#v-2C{Qu$wG#0k>!m*&xn*TktMYP-LKf7>Cz6Jt?%+>3uk2B2sT2xy+EAK_gTM(R z;Pv0UFDQKC2h9}WEb0`B))G~MYxjwV6WlAnqRmmasu=$Iw{oymFfD@eR9DOTvNaop zuuMCMGa`FRD}I*VWnIH~N{qsgnsgX#?pkM8c?4+jB&7!gn%cS>xY%-OEcN0npNhuO zs=F=GT~u&1W&FOrtFipW%#G+$DV;Nj3rbkLYj*Rsbz6;@rQpj{v8AnQHZ)Q59YT{R zNbuXVo-GKSZTmYNW;Ue;p%4ptILs@6@M*dOnRF$dvPErUCC(bTtOaN9oI>>BTKOcH zun^g3YvG{WN?K_`nm(Q48;E$6E;qm`J=DR{mi6=?l=94#7VL7GSz8e@8DU-;;A}L6 z6d0}8;o#->m)SoVfbl(MI@yK&x?y3TDTmBc_=YdLxm)(BUibz7tm>cleW@xx30eea z`D_DabMt53qzu=|s-B6i^Qu&vJLc?7q*YqiP@3ZDfVJXIy|!&hYCe~+SZ_9G>=U0B zvehJ;HNIduBWw$$8LA5=3kuE>HZTZ>^e!9&zNM|MVq_ya`RLAVA0lk_$HB~9qNf1qk0188wSgL9P0vOpb&n* zfu)mB>vVgpy{e>VjpS{$6;~ffH#GX>D=eU?O4Fx1K+mZNTR5t+0iyc-=|CDd>!9EB zsl@)8SJ(NRcQ-3-sqK=~U+iA0E)05IB1@FC__ITkzBbhbzuDtfatRga*w%ss1B<#5 zzfj`jf}FXUo(ibW8&Wb1TlN+sae1SV*J}98cn;iQD%R7CvoKekD||^VafVNGp|!4w z>6PoIn02b+x|SkIZW6iYujOH|i)X?+f*2;~Xd6%9LzUgfZ)$zRr>yJG81F{^wS1spH?z*b;fLQT3pJb@ zHeTvUf%V$_McJuqh~upwTgannFtigT!QN}#0krro!vVMh4QQ69XGx+bnHE@^z3}bW z0c23=%s%GFmeiTW_Hk_L&eS4uygiVrDqWJrj#RJW7MQsXxbhw+5Qbh|XR?$x99 z$A*|K+4-#%6+iambF6pAB*vt@Exhqhvb>BE|SbyQCiwFPb& zaTH)eEP0Bo%`h>~FMcrngfQLRMuQmZr0Q$_0=opvcuo&Q=b&PO@8cv!{%Bc(74G7Q z&OAq^azc8eFfR?*Bhy(wuxN^MBn2^yaQtAupsHI_Y^L^}=goS*uR|Luye=o6 zD3&$GN9aLQ35?I)e*FCHSB*#D?Dv7~EyTJ!JAHt+lAwUH+y=ew8Xkk?1ny(c-in)2-%iXfEX`B2Fjhxf z=Tt9K;@5HJ(}S3dNv~D-rk7w70>=ZG@7PjK#zshwZjng%>Vm>IUSse$w3YJXgLRgE zhMUdX7p%UAK*;&CYu~2o&&LF(g`>0*<+m$=8;@WCxaltheWoaIE1+={E8|M-)7h3r zg(|?3_TzOy5G8Hmjm^a;uyk7nD?0+;&?g_uNb=v{Y~jJ~xXY&~<{vbm|0(afgFp^X zpVVB1;J7i!f5}%jgMoBOD!|Q-}-7ahL=NG!U#C~2n-@627Q@~V* z_Y5yjs1glwXa#u5Oo~`JSJkn;&aJGX96!kk4&V%83{Af<#|5pk<`eFOhYILWnk1kK zyu{!0OL3nrB$al@dYXOO`LX#UX+L@l;hT{khF`#mRJxjs<_@)RrYjhvizWlVE%yzx;PZ{83;rcc_PMkAb7uC&)7bfgrkwEd^Zxp`=Kzm z*Y^!Lyv@-l7OzgV@+GEyS;eM}8h5W-l%wer>DOCM%vP=T8y8lV9+(l9b=1}ZNPe?pOUy_&HC`13p9FncV`-Oerxj2yht$5;sz(Up}nS$>XuldTYKCITMM+N z@#Vj_=8yK5(s;A$fV*xzZhX{L5kWM(N7);HQh1Y^W@`!Km$URgLz+cRVssXHk!H+M#W7fH(pngFw+oV}uex_5xK#D+hM(l; z11d}OFEaxh^^+w{51CZk5Y7pNcOyEYCp97emx+R0kDbeF80JEDI>pX=;5)%qik6;# z@AP1Xp(a>IIZvTdqxS|7vPX2mUX_)3T~R;@CHc*T?ISK&_M75nC2JK5K|kLD?lUj zoXONCd4H53fM8{%>J!E^%RTx9y4|o}DNZaY7a!k9(rxD9wu1!ti)Sv!i5n;KH!}$g(rlG7uVN3dM)7AHIE?w&Tj2zc;h?Hkeg1Qnfvp@8vw!m` zSRKbo8IK&ngc?Tv?YeYm5jOT4T7hb)3}T*7`xK7p7lz9^WyWj2F}*v|3Es&r^XmRB#asoi}iDZf$nux1KQ-3iD3GE~*g zE#)fnbz2*sQZ-Mi-J*yK6S1p2X^DAesM;b9lfIAoHJG7M4TeDB1c$UIa$c{f>J5%t zLf_oj7aOWLH9Zj7#d7VVbwOxxzgAGps*2;a%Vv1&a4i39*9jRSE@94jYHu{rJLXX( zJ^361yJV~#l%691v6kXRozUDiLzol^fPJr1*{*`Sg)kLVGgG(lrMWuVe@yu=W|+9j z9{&ZHjg#iS1JaC_0`D;`pz?RmC(vN_K`=Azgz5|OmdH1XkL{|T-m#P7D7;zj@tAgw zP{p=V!ONVD0~v3=my;KCR>AJkI}?>(OH|FhsX;Rq{(T<8glQU1;3s`FTWI7hLpM-Q z!tD)2(-Nh8sl%n!NhO`&1pQ~c#>U`7?Yf7*?*x{T6yyVR*94cD%rJcobaj0Re|SH> zDnZn0ZAI91tu{X}G780PI4tG_Ka!H{vI@E6N!6J2k$6c426~wS76OW8+FrMXXvUsw z_Jmud!uw0KleDJX_|<;U6cn1a4Yc?zUXQ+lD>7b~7(XrJqBj5dvusb+tQQ4av-G~X z3FfZU$JQ%@{Q3{XV=<~Grbv->eH|Uh<{IR$JJa)*Gb{BOvpdo zQk@)R!Gv{3&kyL&OzK3!n{*Bz6W4glj&4ppGE@yCr62G>C4NLkPlomvFvJn*_t#ZL z
p*Y4E066PgkvDD}u4Jv(t$b(IY%=xu%C@-?O@{(mJBwnB9K7$|0a##I{ZA%_f znl6YRY{a|kM3TbO`$27j{V)&^C~$9z|5Prh>ODO$pOM&slEU)JGZFeRUP8}M?w?pq zzh$(HkNb_kM>FJczxD;efA)H7=Io~TCpr;njKIU7R373?3a}Lc{2(Aq=@B58fjgoqHCHA z=}*&=3qvT^7z&o~b+x6p>IKGsgi-CQfQY)`lj0dcEXusF$ zTr5KsA@Z}}W^)o_!;KaQcXQt#umE*4ip$1do-LMdZqPFhZ^eV?iPE3TGpNrx9?qIq zd)T!X8sQy;Zl0honMY!h&Jp@+>-$oA23`rJU6>(N3uw3Gdww~t##t?NXl;t8u0EXRc*bga=2EPbSGK> z_}OfrQAEDcK+~=K!J{?VaPCQCiX~+eoR6)Tut1L^5x5YA{`?8O?%m%nht0s7VweFs z+(jv3`>+8v-oRKZ4+5t4)z4sX4w4FRF zru(utvaSptEB1UAKK9zk4)iA&;g!~|&XxcTx`q}wQ_h!VQ}P`0+Vi1y{9EXa*2wBx zat6%}?iO0`b%r+N2V0_|IxW^jCd%+ z{P!MYmf$W#HWlg7AD}8?e2#N`q)#?8G2u7Wn5q`7e_?&WFg z9QbW?`>ArX%~!S^Y+m$t0(Ce=csx|6RGNWTFb#9fH0nlS4Djc99GebQ znq)NzTCAS>eoN1!$(_Rca>YBCP43d@GAEvkabm!3J=5FE-8|!TIj$^3OdrMZ_;ocy zB>sySXjZLY7K%p~fO6Aa2Qunt>#mv7>2YNgq(i6wRGibys`&v@vE8aepmg9;X_Nj# zk~#HwX%lu5^jy>Cv<>vU-v$${D4a?3dKO^Yof^4#HBr^CeZ`Q$#@ZT{`}1h|yQ};f zhRGJdvgLZBYMv_U6i;cvrrRL}8|Abe2Dcocv(I4n(MJ$dvi?^x%iBN{q*v%qxhpHH zH_^J*;abXN=aawMu&jV73n*8T%)tVst|`0l?1?UHItDqXsu7dj5i_8vct*fB(Au|DK8;#7F=CHNj2r#*Hl_J17$c=ET!)jy z7YIs?x8)M@I%{W(+17sHRrwum%^_wdJXxDmT~mK7Ej452T)~D&y)v31tOE-K^p)i9 zR9pGQ2g6mA@}`=`l?do+W#jDO*%|6d8WLAMhaAZ~V1as*WETZTr6@~@3i+eMDJf`U5hk=`N3@U#`*>X2$9^QDMP}XtG50qvg0mZo_fM9{a%J>65~|Z8vf`E^j~P%gTTy zV@th*Re`o(vVUHA!7fXYx8@3jl+;38CPJ5WeftxBql@nSh~Cr1k(|%Rl4sg_qiij{ zze<=BaF5?!68`!c!&!auIKM$KL=1rqRaLT$litS4;&j>czyUDZ9@D}#;+ib;W5neP z#NH~3Q(8U@0Iy}K0leTIYx9gVG2Px$g{(d4{YanlKuFs&O9>t(&#)?Ll1E0DhUn^Z zJ>LhuwEW$Fe%CPNc|&-N+3X(k4Pyj?241}t1)La$i2`fgqgh$~1=^GP!r=5Peh2tZ z_K7m@sa47kK;uH|ogh%&18&-<1>n28RySYmV+B-3Eegy{HhQW&j0htudquH6!@|Mx z%#@Y%v&EDNw^KG-R`@QvyEDriCreX=a@0$$pOD_4$WK}t@lCK##^=W_+!@uIq%Qrd zbqgWBPr#`DC6O?|Y?kzwC&83gK8#0>7~S0; z*1#XLpBzA9xQxkDZ-jn;VT|Sfs^;d64-iekpKJ-l1u=txgJY@kaDtWFFj@Xo_&R9! zvQ0W1*3i7fcWec`vjvX|f?G;wuIiH#?Tk!jomqd?L_o1{k9%7gD%dgM-BQyJeCM@K z@f+dkffeQ8v>;x*+dh1ePYsi>UiR{ewzl-Ih(+HKfolW zk6k{}Pi^7!C-LJCZXs+JNtYXH#7~YVs=7rU*TLumAnrqY2nYlX;6-_KTz%lx7Ve3w zY^hJC7G+7X?KKmihSYHX11c^ZJtUBjDo zgBcb&yR#?>C$96$n2Qy}iSNLZ|NNdr$I*?Kf>+J}9(R6CXkoeUd@T-`$pc3Heq;AY z+Oki*547>oKbb9Zr4sILOUJusW)6PtbsJX}XllrJ!81;CJ&`P{tNH;mkXsh%EeRp$j<7Q z2s?Kl*d5NF&M#*?HuL)q?7SgV2tEN`x8nd6GcF)WeVwN_^5>Z^ zFzmwBsfpsgWRI-rBXI>o_fNx^U{_j8rATMJw1BHKw}KVNZ51cX7-=29R?I?wK~a3B zN_}#Y6#|RBs<&3yoPzA0Y0GP<+T?}^oD$cceWF^$#MR@u0owIigD9%=B{Dk^I$yP2 zemns@8(9wKxJv(35Yy;qTHCk$LF|2IV_%VC%KFA8=PHh9vjZiVm$jhGxJH+EfqArGs7T`gwp1`u@UCa zxR0^0kmAm@EO}GSz?{%lv2nhj#VrjU!wu4`*RBFfb%X^J5jhuGQ)|m-7M22c(5-c| zrDL)$&4PDzl!H1R-;SMlRHq&;l~Kk*Rhwc}(h+I8*~Xjcp5`fE&KQY`t>!JBH??PF za7oHCn;jtC;M=@s?LE9xi>ysH8i}>N>~3pZ`~V-CdBxXZ;(xD3AK0@|`HQ5088&#` zqH&>B+AIIi46P_=U3_OCx{4H7t{vVA6t~j7z^0he;X;V{o@~NfxDb_)9RbvEE@#XK zSiB}*!?GG&O+I9vffL0K7GUm8u)$N`#yasOk7&?1PFE4mX{0rpqGS0Q`vYFg`K8Cl z{NHodu-;JB+_Dna6(`CR$?RSctOS9$&XAjc8!M*ns+}MiM(>JE_9j%<=+3p7$1MQ^-BDrAOIY zV+PY`eN=~KyKnHwecNF(p-Hrt>ivul{D zrh2&^RKrS&n>^=r8ml2hxhEz=8I&Crm#9A^Hf4Qf3SZ+;;*d_i4gpk5)Kdm4)F4x#STYE zrbRhl7E#&8l$u^4yuV>X5S;!5%L>toVgHN%FdW^T)^^H2B9DpOz82OLq+Ie1eMFW_Qct5h%yEK0ZN3UOh#1t( z@U5xQ0K=sxp0OBeJCp?-uT@iysPbEc%N0UiCT^@)DK5{f@$LaqzP=AGK zjXbSl6Ez#=Ape*S&E&A1p|?nsSO2wq9Qh=mH`%4XyW>rA7(KA>D|sTyl|jO?p^{M= ze;*t}Rz{Fk^X&%lWBk!5-kT4zTSADD^}8a*>Lk@v!)*^lDozk#2FejCeKdtS(G<(4 zm*)QzT++X;9iP+043%$W0Wz<>HJ`jAo_Eca(@XL~)-D}-xTU(_h%DZ3XQly}focH$ zJRyqgn%oAh$Ne^Oxe}sBgn35WHA_Cw@68k^NBjiDB9R^yCkb^4#)_Awkh&{e>Z*x* z7p9V3A}Ggt4hU!P&526CC)BhAwyhhwjD0H` zU99(v!`Wff4w$wGUNASjvzC86{usGcOq2lOSBqPPQ=f0}HPxCB(B2$cg)qD4DZb?Hml#XR}k*&kIUgAhHT)(jpR|X9mS7shN_pB#5K(Ykzsb zMig%Yz^1-n5**g^v`H3g6+KGoe?n+}Y>(R~+OOQHL)Y#g_I0HRYAecjT7-#2Ws!&3 z09by0uAN-qN5A)+HIat7&L%?C4qG;c!TI)|g{?Kkir6@Np2f$(4f1Hpnur$9`=(EP z2%>bq^;xg}#@Yak=~IoV(4n5X1yO(m0NO6zdPKQEG>HR;T`^Tmc$}9n9SfMrO0=&R5Klw0ln2&! z!IRTqlM%@qNsPrD#AJjz7#?1T9X^U)Tn^_A*I-6zi#&38)O|6zz}>aTZD1SN@&AW9a(R8Ze_Nk-6v&FC^UZ zxlkI**Vt=5EdaiPC&fHHKDyo3O&bAL*D3!($EnBuLUHN&u|`8;2G>Y44Ld3?BcxN+ z2xLpI+WdD1tFiX9`rba2LDLY>D~#!p(~%$`ybj_*H6+ijGl)khXfkm*Yaa>&?{H5J zy;m+b95Zu)x`AOVb+`W2&8(IS5$4G9?@&z;Dd+}8F|ZGGV-Ps}rJP~x?~L&;8a>-n ze3xEo4-7}hUg*Vn)8{g=nJb!uTLa#ed}$qQcD;g*?|jy`$fUtmdNW+TsCg^E%@{51 z8kqLoQ@G5KMPhMOeyt0#t6nKOI%EYs1B02%;$n_-_%0Z~KkD|-PSagnJZO6e@6H1k zsWh1Z9Q+P(&b4p~eZ~oE)s$%0=;qEY81ZrihN^Q5!b}bdyyBHpgzygKD;s^tH|F+I z@>Mq*d0MiziSW+@IB5>MQ67kjHemLDY>%3u*tdIZh%U}i3( zSon*ynvOi3u)k~%5}_4Ne?f5vq_=F3^`Jpqq;XlzOS@qm*7ykrt7#e@9ko)WP=5ojd z%(s6y)B9FLeJKi(=TywB$D22aXW8-P5b10lqrreEXlVI^$54uGaLlT~(xt|eo0$*D z=yBm}j>>M$Uu-uw!?@!&O`vgdRJqTCfhphu{k7I=Y=oGePas8qMRRxeChF<9^}oEH z#|20)65=2@xUKwuGz+3rTL8sbya$-yD#TsANouKE9mrLqoDQaXyslsS<_l$5z17ByG}mXdHXBYNEML;Fh(z zF!>ugK)MHIdC^42H%bnyh7p?n%-L!^4n_l{$qSY3SMNM(p^@UV1YA4h8h=wE=;=JqaO__DXU zq%g@@NZ~SCZ)BDBYl=i0FZAz#)}V}Q+dyp-qQsOQ#;fLqv6q#m#k(|HD>`x=IOkAA zvS6N1bWtrc>6{TTGenVbxYc4qg>AM}kfB=;(~!n8EH$gmwdB%`z9zcrxpOiSdPn&2 zwT9^2a8=;s*t6A<@burF0P%hSglZsS~W7C_D7`>z8J`5oAwCAU+TG&2` zaKhhk4~OW6F7U>K4bEqqU9<`?WB~-C^g4eE0>w`!xvvfcB&UAZ^@oT^i^^)=#=M@{ z{ki0}xbhQmG^@RD1yklO9Vf8R?328x*=eBTle_9Rmbmg{b1fz&D?-{{xaY!JJ|1S6ZTL4+rY?j>QDfT59H}06V5!lsI3yvuQc8i|^8P z)?p0z@(+M7AO9m-Wzp`X>{vI{<$R5EFg-tZ-6>s^{z`?u639&F%62?2 z4{5wx=7DyKM+LHv7@O8j9KfpM^WZ1eh1v*!`%=*Q`>Ou^?}4PsAm1%J6gsRpoCEg4e{Un5&Bj-!zd*FUh+*lQ(NX2TXUKp);C0(4yG74+STP< zO75fIPivq-%9i$tSU;1YTJW!0dh*`W! zwjb103AqPkspCo1EY$W@;9@%W9*8?wD<;y!fwHb$w%m;tDj@3~cl{NWod97GIhs?B z2s;SpF(b|q>EBHv zW!4Rv=8!6%t6{^j`#}+5Y=s)-Exl+w*!VI6U64BL^A+|~i<5SI`(2}30uaz3^IF1Z zZEzW17%^u-Ao<|=J)r001DKy0Q+`BXo_C=4_%=r#)Zn|l0*7J-L7>m@=lsuvT{iSx zu;bN-ryS#=AzA$Jo&&qjsW4C~u)bC$Mb~KBa!-g5;r3{+U<&B?KL!bZnUyn8F+08wAK zSFCKRbj*9kvUOx7pGnfEDD0Gq${>V)0cklwe1=hVMb7ZHM zsq8Q1+%NC~P31<-0ZChkI3T=7lo-CzAp8DSmb;C-!)qTsO}E3El5!n+Nco-)6*g=x zgf^;UR<0IQLAz2%z!1%M1^G3P%;Oi|HV(J7|H(~cV7K& z^}tx(y$}rNtB3T}%n9*s(BkGl&Vg);S?TM;lh97Ce?I+E_tZw}cAIH~InwQ1^@FkV_j5=#h-eZW4eKcl-<#&G~jjZje(e-7Cq zD3%pVU)yur`&6r_F-AVKcBnGeD|2gtSqJ0rA^jgLZqhpI#1(&2I+i)EleN9YvCMEe z8AkKKvo}>uET2@sF^{DWIOD|``Rss&Sh~_XaRGD#sI5SN2jMp$QYUn!DQ*w@1>mk;nV1tMhYM6>U*{ZZ7dpfe3)`R%gw7I< zL?HM+G1$l9)%M$U(+z?!H%J#8LkGI^t8HLTtsSyN=+-&hkiA#Me|u;uxJQ_?&AL1PFi|dFIq@g94sEWUSv}u~PPYX; z&tk~{pfowawv*a6qja+ywMXK=;7up?+E?-xV-hu;ZkB_OVema}Dn^hUC@&Wl zTEsH92w)XY^Dlc0JAtS?zG37+UVcVA*K>VTH&2L(*>Fk6J>DZwtH{>YHO0J)wwxXx{gm%zh(C;-3V79{ zu%3;UtJ~cyqmRKj3*^IOGhvxq!A~nlSpgR$XI#C~pv3=BZOh~f_HRmJ#b{qI697=@uU?08?}ZQ(O2rtQfaPP- zJ)2X4mP!~T-sbqL_iFYcdA$wX`?lM3$Ccky1~*DnOYy|3$``NP$gX=xy7B6-!JEUXUxm`rjpAj8jQYk1iw16akLOC39<(f3 zxg;;22XOlz#A~H#6+*8*U8sRSkW-@(1_62?N)nULXG})*tg~OhSg`cekp%DQHWsuB z;{vY&Pyk(C5yg&5#fuyz#M4h@-RMiZw>@{y{`x)8jn*G9NuyyZ=VH&$h(7rM9)oOo zd*`0YN7(lVJJkJiqp!5&YQH#TDcqKd*@_62(;+A zd|NWK{fldNM2YJ8U2S;)IsYP2N^&^9fK0sQDXZ|_oezxPHl#oos=C()3_%H9k|cY` z2gxsZ0?FKg{t(yB z8!XnBo2DPdu@#-BkNXiShr8a10xt87@r_eFwizJoxq~Uk(?i9Np#exAL< zWR9O)GQNoMjJ9?JkUpBO@h}fH^2q!j`NMu?6owNM?@p^zJAoE0c@^&hh0O$NCMzavKgL7-dy?_m*%@H9#jZcl3#Pj$nAGg|G&VST93S^U`Ws|yb4P?ewDChLRP1)% zBMF%y&E8M&y9-3SI~nHzIX<9Po@II2%ZS~jl>w#zr@Wu?zG3h}at%+f$|WIf2j5$v z$wX>@b{BJmrrv-~6PDf3BV8d~5$2D&0yl^Ll9o-FXK?SyzfhimbvZTgc{-9u7xo*< zJ1knp>!aGW`TKHrM(0`#g)&hQ-{_jP0$z;eJmoG|xJ!0On?cV%f~)~jY!~w%N2F?V zriy|~RNoE}^X+)k%Iv+V)9;Lazfkz95sc?UKg>WU#3#=_sHH3Yf$o3=)6n12p;G-+ zXqfb0eP&$;l1N7S{d?EY0dZA%Wc_!Yr%y7^NGL$D*d?PgTE8vR@?+pt-IZz(NU-)` zI|y{`;`Pu)Enm6;b%>p^u9`T-OSU6B{GMt3?&%8?RSQVt>*~omsNx4E<6xs#*2?Dq zjaA0FfJ={2?v>NpPy7zuf+qVeS_iO?NN%?6Q&Xc)Iwoo^*gax0qa(A>oC3#!4q8{u z0^~*m?HH4$u;j+p-vT{1>U;Nq0Ol>rHa{zMQE9yPO;K>j#70t@YFAM<#nU+kRCTcN zb(Zw!BxVj}pTwXE&=AL${o&FC7fMJvBrCt`W=C%}(csy4bt9{GGXhZEqO%n@S3`Hy z&6*3s*Xahw>4p>bgcHq^#fpq6waPHQM`Arygbr-$V9ff|rEUYk^OgYp|7UGWqgLi3 zfToNSJh`|sW^s6#cz0hadOHuZGR1dMhmQQIwv0DeVg^GuvOK)f@&rsfeOT7eoel4I zCqs(oe8blxf$W+}fCE_M1$eCpE*n3;|BB1kamMy-v{2f%y#r9*9z}D}`~_*2$096kJ0wBB z()3Gh2yeiH@`dpJnto_IVDB?L!2nDai!01JMKgI{_GQ*71!&uG?1)@#eeEpcm`f&; zJ)&sSWg)whb(*sz`nU?x{hfDXMkzoA`3yX&cB zF+ldP$GtWZ&6c{|@7T>HzYMBoO76XvaW#JSIO7aCJv@%89l-R;w=vY3^WX^SEv_Ik z0d&LtZE*+W8=rxkdvp7T6s~f{Ns#Igt{eVy`;okRqK#|a@2s8U>|@)6`86YV)0`vy@PJj*N4;*xKwn0SO(z~IlMo^jxttg>d_5YVu;&rxN{!H0w4%*M24#=&Ee!Q z@5wn9UA4WqSRLJ?MitUrlRp4``A;$a27GYOw~Z+Rk|$o6dbC&QlKcv7uD2ZN5#k!r zs&_y`*LHpc7|mwxAMH8R)aIATB@eMyNZ-_7`U-RvVEO`x`%`5D^M3Ny&@E4KPLm^r z2{v7IF&t9*7mSNCLV&K)(F5dvkRsoWK1fVCIFNnYzlvYdz;7)#zB`4RHC$>HkR_G- zBtXw$Uat?Ngc?$V5@3_@1-qZS6r&OM>R?NwM@Sl~xoVQM6I7E~1;>WC4QDgv_e`~pbk8mtk@w#5LgUOG9_6h=vMs4e5zjb=L2$}m9U8h;uwo;UyTCoSS%e&$h$TyVj z*`VFVYzPe&FWV(Amue;xuUm5WEmVM}YW*kbB^A^k=%5>M+7I475Q)4wxk;0RlIJbsOB2|RoAIXfQlBcQGGj7 ziolc3)}74*mRqE?+%A5rd8zcjg{8vJEV5@fd*cF)&9Ru&HnEKwM5N}Xd>{KHT2zverHlw9)zG0VXoSLDo7cdCSqAK$0 zp_xv@`-`N@*foAmHKAO$Wiz-W zfV(+R@%6$RnK4Kd0k`}Ma1Q*uItIH?o?!)04D1?WlG7LzI)pASC`wcX-{fqJ(l=oETKpC z2@D%TII)7CeU)S(<^ft=tDXo1?(kM^`~Cg@PdmpN*3^}}L0z=84QxMA1QQk8t| z@~V(nQBYYF1VIr9TUww{36Dhy1QJUvvGOQw6)KNdgMfj6l!sD65~~PEAWdrshF2OM z!9*b>gqI=Nb8mqCx&Qau`y+R9?m2U3=FFKn=ggez!VHQ(p_TtyIoa`w-=ie26X%|n zo^LYESdxa0-6g#L$VdkTkNMi2&l=akD z-dWM`>e)i?*I#(VjXNHv)Ck^% z_%!Wp0$wJtsBNh2QeNt|2l&(73&7gJuPXjT&sW8l$=~UA1Zn*O%gh`j*&aEQULWw} z*M1qnuitFQbxr@)x$b~{RNvRlc{L^L=;b|?rHSD^_Nm??*E-o39-pfv=KdMZUDIZe zu6D1O1CZ-_^4P2|q$`puhn%A|vSx zwZCTJ-Ft*3Oj%6XL!zQXea=2VaWG#=dtUDma!TgWgN9OxqNeHRDZnU021F$xDc!oZ z+9gc$C^){~gq^K;D4_|n>cL6ICi1iR$3oBhGYt&Wi+|qzL>HXAYr^LUzm(CkVfq%9 zdmvsgrj7V4=f1X_ZVU0Ew}!oj$0r{S$wSg>ji$FGZ#`QerpKs`l@t;mG;0J$2CC(d zD%r~khu>YC-;J+p)V|ljZ{dwF7e58r*y`vntw=*|ulXhrIbR_W14{<+4#1H>i0bk? zrbt5SB+jZXV7*=ZG(fb0Ak$(3b(AXD&z#BVnZioKNYaWBGnrFFY204i8SdBoyaBc| ztHuLtPj!nqwP3dx%0m z#D>wt)D#k~g(G`s%qeQZ$7WH3hE(paBw{BAK$=@?6(CLZ-coj12!C+aCS%j+KCcV% z?B=LlnWio=sNs>SCmqV=w4Ve1sMc=mA4R$1e498WFT}YE;506{BB$7ZCSI{?+SW_E z4RvE)kK+=J(sdQQ6dEX3~b7zG55*(tKi%l3JEDUh%g&<`f(K~rF%0P(E1dE^Y>X#@DcCiQOt+8x&o?V;4@Z7i2yOhu29 zf8C(~Qjwy#I)9cELtrOTTELB=Q~&e^K{9Cg+4DmDk|`4`p|^YZZQyB2ST5DzbHM{| zL({ggB(k1XbXUn}dbf|JFvISw!_Lg>+nCPY7Z3`_)+?ktr$yL>Ph)cwSlZ3&*qU!3 z>5qAR#b-dsjh9}9b8IYH{Od8Dd!ABoH;}MYHKu)vXe&gmR2?w|M6g?Q2?3j1lMZ4^ zOj2uwX~}bb#JbpZRDq+$s=WPqG;J^lEc1kgxSQ7_kr#8Ww8n#WDpW6qEfE>&roB=V z-T=v4N?$h4*5J_m`tVMk-0k&Ce=zf>4TjrTD?Ro5e$TvEfgk<|9)ytl72^QUc1?rg z<}MGOh&Q7~TeDoe<9v1dC@*YHsDjfhl>f-6)>5>^S(0cg+Ex%b;e-7N-?2)1g8VIj z2zN}oqfca723vj%qm=+T|>(^%mgm^1y?%OH+=t2}R11RRk_;`<8W#J#mz+y_nYY%eB80*mg&RYh(iQPmW4`_#LnGLEUg z8x_-`0Q)#(6WSlOznbOJir%s=fv)fwI;eG{T-{B3YAV*igT@cc_ukYj!PILk*Ko3T0>qm{f@|hRg8z0P66 z1ngftSQ^h)I3gP`pEnpvdwZ|VUlr_04E@T1j5O2r7!9`B8FW0$y&O8FsI{l>Y5>JE7ksBYQ#(?UE`fVL09d5>Kr{{11q`j2PV$|xk@ zdl$gxBh=n(++Hs179dFc`~s|U4r0u(n%igwL4t(wZmg)qBATG5(dEg(oc+Ly0NpG? zIu3>kN$rJrnO4zlGh^6yb~|VT3Ox{K3xk~B_HuN>U75I#qUJo{0;tF+2>_|?Fdd@# znFkn46t$}vVF`KsfWC%$ORIW3->^tGEXoJ%ZnwFD%uhg^F?|&V$t`;c`i(Pp@)QQt zZV63{Fqyg?=~K9-DUdGM9RXJ+vYib>JkJF+Y)gRKF?v`4rkWG-(Pp;?wPGZ-gm91{?^l0f_1f3j~zaHc-O97$JEuX z>F?Tg6uN8IUhji@f#0N(a)*GwcDw4UD(=ef08Ifu{9}Jr>*}suMG;3fZT0~_yX#&z zPzL@clgYrJ$9;~=A?4(wlan(tGV(F`1>FT7K76RFtNZfhOMid=z`(%w@84&A zMQ3Mcx3;!OBoeS4OJD|t$UScQ+BbHQ1mjL}0zVykpl0g2YnRx+l>c}0-ZmT9wX1(x z{n}N7hgRRWLSHUidYL}a4<8r{Sq^hG)E3CNW2n-}Up6!We-^rnP5-Ml!};L#%!hL` zEt?g)&mG(&4N(hOwI&&>M{%ipMUn01;9EM>f79JkX-A(2{yAm(j9HR1h)X>x)&BgQ zo-h#bWK>eXb3JjW8s#O%|Np`NH{#Jg4n~Ps*UDI11S*uvTGo1^alfX=cZnNNOZW-#dtIEWekCK>mE_tJ;Kta` zVs&0w8`GM=yD!h(>3O1xAhI>3DILA(&7+KXmjct;pLVw_Sf=%b&CfNofHioYfd=Q} zVUqc{KDC)pn{!Nf*)yJ=i$W2@zZL_u{uW-Ct#NvcmEK6>tVsx{qVGo@VN<0+v1mzu zr8gch2np4;%mDKGLpz_1T1GID<&+>yKP{k)?-w^9(G-t#G_vmBJNPrFUvfVKmS2*i zn36N+ogR5%D85s<=v6rX(ytfx+)|mnj^xaEoQ@XtkxVJGnntZto@YERevf@cShfGM z#=)u9GbQN4ou6ccf&>dx5T=SIo1~SW9GG2c%S_~hx~-6+(JpJ+eY$NNO8jN4;J|I- z-#vP+C##1)1CQlYeLRenIBW(eynpX0Tv*I{i*bQ@1=0XGcdO zZFu4yRKiYPGDRo@>~TRYI2%IVDID33c&VSKdkryJ$;z%|$4l-q~`Cw9*i> zXz5WLhd)MMUm?tWevd;HX7z5)CkZLc8XqVvW3J8S(PCZEeuC9u3FKU7vLuzA?TvSOVEpG>+v(is*t$zQ8c^E`f}N&r_9@vkY7dgQCRwjMmlz0 zC+OP4+}F}Q2J-^6+BsAaTLmK@7}LKxHW8J>Kvw~muzv=EaUmS>YDZFv9h$ypT8p?_ z`>IEoEtPrKl1VG1Bdkl|vmTG0_va%IUzUYlXAy+C_nvCjx$n~iaQx-_d{X|_?&aFR zrpgNl=fk8e*o6_z0}GOKiT@NN3!Vj(1Ni=&ItZJ3@islaU(h-)zOn$trJmPvY?!ch zYy(!wn(Z98jLUb%s_kP71M#ujZEZQ&`y7e)=rM;P=lzA9?n(PrXSCMb_j(Et|7N}3 zWy{T?f}0)`^?6YCkd?ftgkoD}Lj(xMjhJTAvWzcwc=|8R-Q@BI3E^{dBr9CtfpAdo zmKmM;m-d8mizjx1PKO0omiKZV2(~>~V>(Sd#2Ivp<4(^d|4Hm zd!|Z_S?)1ntd9UC%DWBT6KA@~d?aB{Yt7cbXk;=r*Be(`CTHw`c&>MZ9gGnok5?T9 zyXf-Q3W-*0A@|hl%AL0D^vYrZaLmgD0g`rB)j0tU>>SUPHq$877jy~%2wC7b;DRh| z8HJx$;eW`Vv8fJI1i$eIH5u7;)*MPZ_dBovFnSq^zc}xnWIgzmGG*;SOtr z({Clpdd(byVk=0-azdM1t1L*{8q8M@!ra}j>4Rn7Q4Icm-aQK|UMk|5KHU?aU-5U| zoIjMSL(H-G)>s1Xq}ncpIB$~Z$z6{A_wr-fJ|*~w*z%T}O*c}wGMsZUVICDmi4DGw zV`&)POsK86ZFqNcmuELVKihfUV-l@A&aJT*JL`~0iess0S;3@p$&EGFtT}bxHDP2g zkS6VgMQ@-7ubX8it=+!f2n~)!zS!ieam->V7)kwH?hRXZUM#eO4@(HVd@n3tW#Ba$ z%ws5BnQ@Uhpyy$~o3DIbSoLQXEVET__Q+Hpg3VQ|d1IcrvCK)^Q1Lp|jWfbQxatbM zMS?HG5^AQY;BNTXl788;TGZle=*GvEzSTH^5n%^6_Sjx*jYXaSxH91EkoB0*sODJa zkMAB%Tujf0**u8LGvdvTV^@i{X1yJ{b)h{aM2YO6ghjW$Xsn7M4>wO@M_o^>HG6un z#mx~<*qsQ8cO~m*J<>O(J8->KQjt+$5Zy`8}?F!h+Eey5M&1oA?hdX~jai98kh zDA}GZS+8%M4K8pNoRo78%)VtyBB@OSzkeXX{@g8yOC=;)Hwjalk zYrbaL0vErCv22-Q@}e+}wqo+0u0~Q54UR>`@A-iz(2h$t%I8mBxIEY%uQWsP2y6Cu z;AnW@Xow>%j>9a+*n(KHYROw0Rpdv$z9mLD`?T6VtVnTHG&V;S|F|Vc{caIz6jZM3a>U#H*vZ&7~`MatnPoRViD7BG%PxZ8jqdwrvP4yxr$D{+ntBy7LUe?s_ipdGPRR? zK3+UlhB2_YFHEqkZhp0?nD*tNaCKmbXp%wyoF$*A^Ld!<@(<}stCoCJ&lk6^>-iS! zZ*r~6bYF}2@P`*-Ug+q*FZ^8Ug5$Dh_3YLaDJZ%ew{mpsj?U!#Z+eSbX0^mBn-o!B zDtvWJ=7LNrhS?1Bw4}nY={bqjcJp(UC6%s&5!KN9rGqI&G)gV`>beyy@{2E@b?7X; z$K0c*8Lf5nMz6_K=W3ULC!zm!*mX^S&;_m0u7NpAKbdHiR)vb2LtQT2rI^Wf9>b6@ zw!4*l1s&_3D;W7K%j3ST^`_MPh%&s;Y4gTUtUs;_%=TtDb5P(ysLFQYabO1WNoFCT z)~BSz2Pc6xG-Y#+n67%$oM>movK*h7%OK4z!DL;jDMUY5CTnKu6BD52*}5t(9HC8q zP|+d>Lweq~cp!E#tt^GZvlS!W>P*NROV5cqQU~@jQtlVK{J4EMc2DRKUzLeE(O(GB zfBSk|%x6Zc=owCYX{B10xEH#5Wc^A^X2Q^<=5Q-f7j7m4`Oe}yL4}Vy_(-m)yMqdL z-3iaDA!{gyNC7%&-L%s6JI-D=VzG^5P4wQQcV-)FKCeDj71)vk2{e36ueKXpN=NHe ze$)_wut*6X?N+4A#1nJ89V@1?m3LKDzrTFa&LXGM)3<11%*I(h zEA3?TJmU2LBJ^2admDsbm_;4e}1!X0?$#*Bw{*s4E@^iS47lpzH4f{QDa^vq3 zJ*zY$V#=0gMHtRN%zOgXs&d?~$@^n5H{5CUS8D+mv1BQ){o+#OjPnzrX5 zfY@%QjgIDxmiFK6+@VH@?%el~F5fmem{i5*2acrxJ$oh1Ov~qM-Afu#Y32kLdg41Y z63gf<)sHkz_XXkPM2>dP7HP{eC9kwmNLer^s%Nzir^S{IIBmMZuNfw}Yd%m%^#rW7?~V=8yHiZ9N>&YY%}j%=RG^G54sRR>7O22-Iw_MJE$NWF9ng$3P8AN$Qd!Rz2<`s`e;WVFrE9X|0eC$C^RMZ>Tv&M zYD~rq1`_7znWmg<*%!UC354m+Vs2?dVVFyGHMVEe?V?%pcm&u}@kl3x zWReCe9l6Rl)9Foo{~ME7mSCp2`Ga3#WKl%}Fxv_>_&}`jI&q7$!?4V`v>M`Zs-Jl% zRf|AMVg%=xAxGOi_tHpdn&as1XqXtvTJOo}e^Hj8DOgbk^Ry!!_4X9;N5;2)yY6|Y zF+x1Nm;|>cc;`d*`Gj6opeIF-4ICw5jx_M{;m)qG!{Oe?4ct zR*cu%G>QuOAQeU$=>umMVj^ z3Ax4bm0B#lb*VT+`u_9Foke-WW3fwUJ>@42^DKDi%xtdQO!ijCUG4;hN&F$9%e5}D zLeV~1Jc+R1=kRd$pyRO>u_J)DUh!3m$3k%7;UQs0Msww#$B0K(3dcGSu?`t=L<*dZ znemxc*v-`%UB28^t6Y6)bWuS)EwDgLJk3TGV=*IdG(7{LA}@YB5q9+g#Lxc5T#`vMF)#sUJ1XOuCSr}79O8~o z#-+vul{^BxZzwxd>XNoMSCSHCMFk$?DUUvzKls-ydY)~sMY#{rv z_%2Xopp@hmtwskb))Lz{J%sO<`$~JvFp3roRcewn(k9}_qbT`evwA(k!5A8(w_dC_n92z?o5uh4eg zu*SGDwqm%ojo>s_$&xS#8`Dw}@Vw8fi<=ta#3l{Di8Y#T_uT3aWkuTd#1ozF(8q_L zMYnE^#+v0WHinA~iU~VqrzUE$Is}8#I#wmme)(?63^8KFW5|^~9VUFRlsDDh#CJSH z>;>EvuoR7GL`)DX{gvTsX`XvM3A#38p5aHrL9)M*I0w2{V(@-Dl^hnFqE>&KV>B^$ z@H`W{oL|P^ZtPYYOFj&~z_O60Rbp@6(#Do=QX4Ah-B@~IV0>@YyR0Y=VfWi4iS${) zcM0!vFXuRwz&!KL;!??D+NNI=k_B&63}Q;sONlMx4IgfsTd}y?kqV=E3@R0}4W#rO zhOZ3E#w1H%S(wg(J$|Xr?KRF`S}l5@0?OH_5qG3#MJBCr)!E77t&@$Nhi#lOq1Lmd zp-KAgW&C8tb*VN;Tf9_kV19T!M|aT7SdDL!YsJVsu|HFx+r_-x&Y}-Mop>4jbrU&* zFS?~Sl1>XKf03BRq3;mx&_^ZR9IDd49Dl2|7!$qLO-j_VBi~(FWY#f~U2{>jl>|UK z!t8w^6wAf#&i^-&ZUbVq*eqwemf4~2R>hU!f|3_tw#|iUG3D=(#sv{FLp3%=mE_Z@ zqY3YufF5 za6X|?#P@y`?6T5lnW0m;0!MAj`X&w+46lCBzyCgF$dL10n;B05BB^CEwwciz$DAE* zom`w8OP2_$MU4vcq$m=8pK+|bLuH$eWF9=mQh!(%7c{X1b-WScQuZ>wO7h-y$m^3* z?fq?le+oLEpBcy?JLng4Vq!8SOv`gQJG!|OQ4)*2_r}xcSlC!%$Rx1uC7NSCK zb<2K7NhayyJQPNXj0eRAg_03M(g&{zHKx9ZOP73{1W_nW|WxXfmE9+JhOIvij5BECS{*Yp1R!^EzC zi^ZFlRt5n@h&vphk6mb5dNb|OXHpZ09bXsiwDY)z7%|MT!s`Y2k;`NE)#+}{5IoNo zi@Bx4H9}>ht8EsE5y{uZrnWw5PxOAw&6$N8`uv0Xz0OM!*r zILw;&@I@3$!s%=Xf*81dHLxo8?8cowInm16>%`AfmaLRe+0@uiBrP3QYyADWLe|tS zt|ixDU#1rx9eBBJ`3d$G>}=S2@w+_fuy1lTRibCq$-7F7@7#HK$_Br#d!$O$vekUL zNJvtJ{_>QsX(Hlk@3b2h1K2Z%rg@N}bR$ne4fMC?rTR>WiTPH&M!E8vH%hDF?0kKs zrGz9ZrGfk`qWD2;sh4!A+uq6t<_Rzh_SDOJDCBiyuH_F|WJBQno%eEyUR$YmIvp0` z=9ew;9~b7l75aN6BR{Lo_U!9>*Y|Z*d2o6t^|3?em$rVbvP#U0*?gFAdxnJ#P(!pz z(F>T*Z%~)_{TQ88?fRRj%xPQB=8r=gOM#H`1(b8kik^de`C|>3&Xmzzvq^r!%?g;% zE6Z9dF>#cvW^#i~Q{UoM%sQU#zRit{vk`;A2GjisdI~eDN%B_q62;wI@yU(lGoIbk zmiX`a@4A`t+4v;C)c#|PbvC94I~4g%5oRoh{`L75Q*pJr#nhHbHAG>=c zN?ubNWCNKF|4DU{rT)L|o8psUz78?^rarWREg$N31**~j z?e^FF?q1mowB)#{nQ^K(hl0fG-Re<`C;uQ$-Rlsw9`TbVu9|zjQeY4=+y(?HOQ)pCb z$sg3{ua`XP6)DunxkINe!@QG)^J%qfO}CbN^ERqnxTh?3T4I322Fs}B_>cR*J4`#@ zIgv)MACBvJj+#BMemG(W3%b{i?6zG-1#$6R`r~sSoWs_dnz*NK?<8uUWIO1>-_P2N5g?13 z1^}r5hIu?e&-*rGlMAL<0%~lqi)HT%=FXT>8%+=;ikBb$PF(6u^sf>xe5@3u{fyd5 znO^D3c$4B&a*>5#HY8e1?RyRfQIU$~q_l?cJvXw#u<`8gN2p-7=+Xk|S&9)-C#bY# z?u!78$ssg@&iuQa>QpgzHeWnvySIapr6QcXHrQ^%-BsO78fe4*gY=gr5p6PN8n_A; z3`462cWfU**?!T1xYjTJo~wo(Xtr$m8}^zXrSyth`hSegZPw-KySX=^seMB)5enq! z0?k=E?I~FDqbmNaB&1T*a5%spd1=07a#?|VIdrUJNwK8T_BOR8Si?j?CEjeGYyzHr z%T1sz;*qmvFD4@FvW+IDiqftN0fCP$B2%p9Ec`!CBaln_Gu~VcS1Cda;j8U>HI|7q z$`L148vMA0k4!xX)Xqy(5I%ADUGWsyvrYb-2$lQSQy^A!MHssyNw zXmcc|WxNqAESJ)+OO@uNfNwyI-@6#ne#aFEcs~7@=g515@3P7Nh67gWTp!kNiW^gF zO*!|8eBJ42IU-Z- z)WhXOT3aMjAR76x-UF}3Gd;%+xQFs~Ya1K2ghmq4xA2pH(x=>$_@H&|z4_PcgNZS! z6L@aEd#Q>5S0S{Pw;DETB*=0J`kBX{x*5>$IQ*2NQHHKaiNJs=4prj|$>{()47&lF zeGBaU?+>Am@0{?Fe!^6VT~orGVVnyFO-Uqn4*XnK2d{>WaXfP-kIPrggkq@od?Qk? z(0Fj?8Fau;YJV_C+FsIR0dw(D`uq||PI&IL7oWQ0W6mb{rjELZGDUqa*XcW~2{9du zo;D0%l?i;s;_2$*o-<(58X+omV=KC3f}i%X<&Er_j)!@t1fUOhM_Ywa7?3FSJOoyR z?`Pv^lGMpn2m3z+w7+MAojhB+nYLg}Y9-&SLzC4u_LBqh$RvWnIwY6>sSW}ZU5||Z zFj=!K0~Mq=WGTv?UgRbwl$HYIt6(LZ4yGp4uX#%@<`;41npnUPJhtIBTiO7=0D~*lh48 z2vAvOxFIvP6@nB9K|AwQ* zzkH0D>SLL19eLrMTjE)(FcGZQk?%1K${GxCj~62m#Eyo|`v`-Zm8AnQ#ZAf81EQUL zAtAt615i0u1FapfNzqzU?B1&SYr$-v7WV5MTjJeM^$Tz4Gr$)KdOr^36ziLYfwltm z?UQaWoTfryQOu`5>%^w4YVxqQZ>yy}=G_)vha-|sb z)d2u&6rh3^5X5n%CC>~;{a>bbiE3(Z#yZuDe7cY8+#66=##EgF`vImVS@7U9Id{-xaCg zvNR(wE2!yYjnm^eu!PmjBOn>}Q`9y2&oZgfoHK|4D|tf@m7q79boz&@VTKYF-Pe-f zPA7%DdJXTmt6H(E###9yDH|WA!JHu)4G|}JC4WJmZ*XsC2!&%H4B*E3TeB)Hsb*U|^!0B{QQ@?`VLe7tg$oj{Mk&<=#gRU*_~h$mw6alo3@db6?O8!!U_#2mCxn4#-9cwHJyJdnG2J9bh1 z0{g3%S!9#f!K0~vtSNB~LDE8014bd=Y!s5o5fnU{`^N(@@+!S#@2#h?NF|w?nU#j% z6LmyL_Z62R$hS*Ee=xFy8_YyWI0GwPjRz5}M^~e9vF0b$tlF%Pkw-<~c&(HzbM_DV zCOe~9MGf2tFPtfB-LAsV*FvTS#0E@Ny^~ned{iG9Jd$`)`OlbWQA+hgV9ZsM7&oiU zdxLBQi~^tlrcsJ~%63KVXLHm=`;Ld-yJ&yUz=0uD?ReSk#Bdi+Ob0 zDUK{ZGz_)>c>&k=$khF`l#`m~$#Ot{u=HkB4qn}@-XFXpR1}E-33%>Jr&HIWf+f%l z0+Ph#ezc1$gaP>N1})E}rjoOdZvDkd;yWHxwH2N@gXcktfOaN(F8NkBv{I?e2(7{jVvrbb_(L%!9%MokGD7XFi za5|PAp!SUb;v*d-eSCzx%X2W7BsFkUUTRn7u$Dm8ZkA~2-+pD{|Hd`w9K@~fH6=N? zj#pzk&X`m0GIT+Gzdhrc~YX8EcPH{8oSVXrk;to+))y8f`gLVe@B% z&#vhc4TbqpF2Y&Iu(80w^gOWs!@gT4kqjqzKHr=iJpYUAlVDViql#ZNXN^vh)Vfqq zs+5H>P}fjwF%GnjUmVGX_5w*9pM-YqhfftskQx|V!x!Ft#AIAzPTm9!|tY8d>LXn0-8*tujzwvgY^5QkbqAPy^gTBdM z%)8;$6h2HV*cu9HV1%h;acCepMJza*ew+|c-|ceiSYQY1lj<%Te;FXewn3(B#@t)n zM+Z2y5$)--QGo!k9{7v+?=z#^&{S#gG|qA?;F|uad;y?dbAlP18a?u-FqJq|NgWSc zKzuG13bBs($exfH4IpQ^b_7``5Wv9vmn2VtD??RbUFG3w2#aJo zZ?6BKN*R#_QAiZXVy2nebOGtKl|bshmx}f)P0(E(o-Dk|3WO5AKzi3tBY__tmwoG7 zB**9<@}{Ekw@M6ZMj{}fVoPkJ23REth$yv#WK+UhZRlZisiqFT#4yt2+<@yaFwqElc8FD~ZPS0NEb5w{zwHm>_enq$=AnjH z18SfyYF-?)S7LPxedoF93U%}^Z_Aq7t?!~_HQa?0nA&YRa2vQhFk6TokyOH*qUnuq zADsvj#`6KqcOyU9)R~YORqf@V*H1My1{G>kvQa_ksvMoVPCeBdIN(>f6OV37=^(3A zSdYyuIjF^v%@nxKQ#pHJESYuLlH&6`G}F;z0&gDHp0WDiSAdU@B;Azy0Uw z(_^KzBu!GCpB>o@|5fYw=k%P3YXB>!z|ip@Lr`@c8WeCTm~tl`p)s#e62)4U55t67vS>TiHB*gLiy2sg|ENYibHkR z&w|%u^gf4y%!xSI`b)8=KY{oH?o_;X01ZCC2W~r==q?i?K;QY`X9|6qM!Dczk$~K! zplz$(xm=PoD>ER?u`9$rQFs;gk#-DSquayG!Wgp!Ous(dc$+(pS%S0)g*r_A1Du|F zYg>!z(dV-cQDyiV1G)6-hsODU1}gqMi~8rJlpya0mg7Lx#)^pAximLZZLRplqJEW& zfGn>f{bw4(m`-jxU&FgaM7i!74zKG1_t}xdb(CXkiN=L}O`-gVR})%@G8IJ7 z+>mDV8S5aTIgjLfo?aA-f`~b_u1KI6*V{S#jQxW&@$HpdXe+gg+}iY4sOmc1as>N1 zAGdh3jv0_)kTODbZcu#}OcgN?xoqlb|r`+ z93TnS*C@G#@8hmf@A5l;M*ga#Glw!lbF8wAE718=m(AK6EajRDhITlG@Fo&W2Oqn- zj~_CZVc15Zy{r_|AcTR&q7AV0dX@^V?vOi&C_n+YG43j6(`s#LC~Q8K>%MjMd;DP^ zgn)>M{35N&C_uw0_l&`sH#WoudGR1OE}GvSKOjRr3-mQyn$) zVv%sKrC1TZ6=j5l+v>PlK1$bVy8ubG-AD}c!wJ4J3vOW?Zq5E$Qc;@HHr7i3aQ38_ z+=12`?mcsOLUL6TS|(||pU9n>1BTobg5u8#!Y~bYNA8Ev#Aii2UZ*=MWOXVm!$}#} zrk?t4A(5%r?c--#yTFZ;2%Rl=KE)p_*(Ni44-(v8jo+t0>uYq;58(#i%3YCg`<`l& z8UA_9E3WMuHQ&Bck5Ypckmkg)VLbm^eA|cO)VGU^N(WS zkjpAScMEX06tJsBrbGeiB$&9j=vBL;2X)#XlR(ArkjV{bv%&;(6`A2XeQ>J(HuHj1 z=4(5ct_80RfP73}mCj`3YA;Ix)ftaZFmND_B;m3WaNFvk4*q#fCyG|H4%?2XJX2iB zd=EYL$1j@EfSXtEoQ+Z*X--<>?#%7AFak=^69`Z|;pwpkK`}wnCr6s~(8447m{YA=yOn>%4<{#q zCC-8~jfizzltm2gZq2Zt0G0^yjkjgqC~Gg^bVsaJO2rtd?qlKvE_MNzrS?#fV9}Fk z_hF&*r$NV@1Bm$ymrlVPcow6wuA|=DK=Rs}G&sbp)R>9tJHbW`6dn!=9ts1J1mqMf z{WNFL;ye`)eIpnBq-)dayGz}8X{_NvkwtGB6jtk4;9CQd4_=)GazZ|&ZFxtk^6$40 zGh;V#){Bvg+w#C?MixoC3?k1faMv(U-6xCub<LIBDCct>@uOXtq#DF^{lX)tLF`xsm{A16BnlrSm0aVoN9yrAbIIcz^`ROdP z`Msv!0kFacdIZQ5j_UYljk|(ekQz^oC?p!b%m`s#NGE<{->d?qt&@IyJgC+m{yWh2 z+tZOq8N}BCk~^L^*emY`kkx{~}vbNr?9&bTh)$aY3833vETKnUTj*SZ5R>$}G97>csOXxxq z*PW)PhUxUc1TTM}H|lz%z#T>RSL`nm4r28~&dT`J&5CoVY9C&6SxD!whMSDF2GHkO1AohalH6LYe8_ei?);3K`5Mu1!Q40@Ps{;c(PHuY))5Vo7GGfa zHcaQ0F&BPokCIh-4`G{XgD!PdSTrhpTqmJ?3^&SZSCAS*yfJ&Vza+oRlV>i~ciD-= z5qO>8RNKF#Im4kEzn551yKGqaxoRSGJZsRah#03~+CeV^TsmS89kU3Nn@Mx-iD%eP zH41uB(h}!XjNH0R5}@%vGLlkb6-0-iTbm*Kt;+MUAZ3KLH8x~SZ+!^NT_`R>zl709 zX8%m9!_t=OvfLFIL5PE4mnqR1s-qGWQIfOrPGK+JM=* zNZQ=j-7;~s$f#V$FQLBP%VP27QvN`L!+=L!iGeu_8-t7dBfwfG|6Ggp1h|;c8US%| zBU=cs)Q8`e1Zqi%@@sh}OQ`k7K=l_mtNS_m4$|n8yoQ+fo`P)&YGS&#FcCVL|!AnzHh)wl}Q* zYxAj7?!o7MSx0q-%b)v7rVK2bGx3q|3x?d5ZbzgA)Mx(pds|~=e&=Vf`|ILPifB~! zWpm~IFJY!e7!5a?=+MK*x1yyb-t()A|C#Xv<+Jit0W~hU(-b2)=PSV(g!u3q?F+|-3SMHZHJ$~DrJmdzb#@}o~GAXswu4BU4lu*;kUXU z6d9_&{L9z(a;i3!$#BUXpzZq(#J0*^?m9Lb_NAas$lMe;_n%ZiPO9FTj$HhQdS)y( ze^%g#g)wy3k4$90WB=3KSQnP&2_wFU`kwPsdbGzsw8Ymj;R<-YWlHY4q(r^@3z>nk zBkPTy8FDJU$EYT=r_OxS>V3M}<~RHQl9>s9GVDgs8zp^N8lM*2mj&U9{DkS=7o$ep z4#)l?h8ZlA(lcVt8f8We4lzyrbo6lI*8KDhg9~($f62ukkPARHEhXmIV6#b?b2EPU zHI${UDf2q$-d_h&lPaR)l6#?E)Lau4FS1*ijDsG>2U{LI)oraRX*fJF$=Q?`o67g6 zsd{VTCwsE$0Qt{0P~S@@n0~zo{q72R$UFAXysrL@(Vr|Y=c7f}MB|tk)T?M{6CC>S znej8$IXe0!_K}M&nSYh~YPg|oIH{EtWA*+3YW%7;HvMDCJ*!9uW8~&&8qtJOiav>)ldW`D#4o@)4&n3c^e3kkVxA_PyRisQNz1RE5nj?rk$EzOd z#e0<#{$N^1UN?3TH**m>xtsc$h%(CL%42q5s6&<)x@i4H?T)8X)H1D5gG7#AjfRO% zuZXUL=Iz(cHbLM=*^|w(xQ6_xaYdRDFrlM`pj5%t@MreXy;sNV-G;RxOKzY`MKjz2 z<&V#8RyrvnWdZy6D`8w|Rne>}-BsD{G+LhQoflgM_KD^Mq7=F?(D<>R^_A2`sh}51 zGW1jfV&b&DUbsN%^&&zj&veoXQ0a>aDkd=uG$#xdLbZ=1fG9;bHT>#wM3aV>Fjah4 z-CvBhu7FcZs7b!Jq<-VUDnA(@MT-Pl5jxw+2%$BNAvW~u4_H~^Gc^#B+G zx=Qx6?cN4dcg_1kw~HI`oc0EN^P`k>Kp8=3maL;USNGaQJMw;=6g7k2DazRe5T%P( zt9^QVb9mLspKBk9uB2SbxG~)UMZkE2j~OHoyAC;kEZNRNv2oS!>O|U zS=Y3J5T$}3&_!Aqg!JKbYfX%zPnm^23zgfb?TCMGq~yOUH6qZ|dQo5d%MHhIgq zR2b!wqZLUC-HUdPo={_9PFNg9i;|BT0v!nJ!`r3L z$e(K*?4^jwg;MGR)~IWM9!I9i(zHBVRrmJqO7rgnu)$qDxoXzOmsvLTEA6}yt){5& zG_}+sO3ws{y`5%LvH55ADg`mHP|;0PjiMI75QfiyZHN~mX+Q=id@jG`JoaFrCOJ0e9`QX=qv6O@DCXYE+O(yT|}wMP75pl4!1 z^)uCZEd(%|2R#45e9W41=@ke3fwC6PqMm%OBP);8tqRT0O^fJ#N7KiGv%uG6(9C}7KCsj%M-!LZnM+)9k7!1! zLN|4r%Kf4g6`@jG`Px{b*l3k14$dGqF)Gnsz|h_7SBgt*2ee}Z=;{rAnzw*!evbZ8 z@lbJkPvfdw_!^-wis2wdudjxGHj}AFd_Ik^8GcfwY)+&(_8T?u?;iNW6;l_Rv!{I# zn;X4b{M&``zm^akv%bITUf~0Sl7NYe?d~0g#@{x$&+wK?Lp}1MH_+ExZ+?E{!zU`4 z9u`2y!9oT8oO3~+<7ZJ6#bbO}

5pvDvsdsCnFhVhc zoZ;xIMi}>3n33pwBZaSZj2tQPH&Xe}%DY7~)!aOjKf^|HR82=wr9e?g))|{BG=B@I z8(Q5@BU}p}iF;`gd~1G)_7-~l4N=5gag?ST>tsRIEHMSrQHCIQoU7Lr{I^C7IcW3b z6SD!=cXvwVFAnkBP|2QiD9i~-w(j^-O-GXT$jURXC67bSohYfQs@i+P`2_77EwuFx z@s|g;AN%;{O+W6t`nQP4$?5Ye`2jXdkEV7W-JW8(t|sx@?bNlL9}ec7iwp@1yVvu* z@93)K4@{HXusvu%+19YtSRnR zam>!hL*x-Be5$Re1LsbF&vpd36mF|`*QXoWD3<59FwISm$2@RO)Xm>Q;)qS}@RXCD4o zyw{7CGN(;F#*H?LSrulDyK!ft?(U_ zs8>FTIlNJ2A~}QoqPPj^+hbvslzH>|bp~g!VQW0Gv9j#O?i5E|q+;^b)E{8{xna~q zbNoj6O@8P!XV36i(uTpeBe4nSC z^XAu=JXI;?uvD24eV=}VXE}EvY;tVnFV3HTZd%lwxjR0;l6nJkn28tk<1dO?-?t(@ zgzxuWd)<7}npPM+TA7%bl7$Tk8|7C|xQBQKG0O2_UZnP}VPYo84XNe)-2dQ5(%uV* z<9HXRgM=!{jnhVZ9!(qr&_np;6rme*tS9%FkqEmj^tI(D9pDzU5i!*3N^( z{=pM6`{d4jTj*x0DLzd$j;HuMC>D}rj;@R_H!%zQ^}2rdpzT!niDqgT6^;|XePiO^ zYW=}dYaPrGZr0X4zP%?*zS(Solq!d3NBv|M_E5w1Ewl3D!?A|K$>$Eh96Kd+8h3o= zq+i~9Q<;a{vZ-|BA)(6X%x_1qK8E=<{y%+%@B94ggPAnbJN({S0Q!CpC-OQz&_Q5%x z8%yE7>Vwt7IyQe(`pm3V!Ox5S$~H=Mzr_zMT52qM#6N-mP-dUZpvt$6-$L5co zkikSNPjTI{hb)3o4M9hXddvT(L?ingrEr&N+}zc}Km9GNm#>?BoIu$U+@E@E%88$z zR^@nURsP%t`=AHfm`M!M#pL;(@rJp{b_V*9@FC5(^=$q_1O>G ztUiS2g)cFVHRCTr@t6E9JaH7CeSe{iwOJMvnBZ!vB%LcKqDFJ6(e8@EtbC<^%*EcD zmK+`Td*Bg2{q;+MOW|*?g;-IlB{DN z%49brjb&_YmaLT!W62U@Fq375FjTg(CSx7TjgT2jma#9tXH?4fH-B_ruN(7xp7WgN zocH^E&iVAJ;cUFm!k4GC1O>fzS(JdMFTT#Z=CZEsB;xsDfgP`$g39Nj2g(%5h|HxA z*jx*~aT%f(AK5iFja2E1osJbF-Y*$zjI@mBzK!0@B=C%H=21#&;jGW*AbRkAfp;%Ptk2 zJd#Fd)zb_62)x{uA;ikV$uFd+F$tbVs zRQzSxoIR8LG2)VH)f!ROq_GXvi%Ise%M;oU#s&ZUqz%4*H~sk*Y2gCdDZOlV%sbmr zqCdLXsy4N)zey!A`Ez$|w}|6epn~YWw2BW2J%5lKdHznx0ynPCwap!tbE%b8zD|kX zacL^1gCFOkug`x%y_FUx0NqiI>jAoBu@>bY@XWas5^2_u&#H8% z+h;*H`uK>!T;8MZjF3J*eZPAHd>Ya7VNb1cJO}@&*0?A_6|62_qOG4Lqq(z`Kjgb* zmp13iwId~5azrRfw|ReD%&?CwAJ+waTFCejkoPLQK3Lwrq{>@T;k7Wc+l>@o?s}+C`@VhBI#(GfXPk)h3D$kJHdoqB3UP1ZcfcG+kGuq~Zr6J7 zP2Y>0&z9C1PC=yQ>83Uv>RtOINP@O}|LwBuJ4x0Cxd;>LDT?3ET^y_Nh=`ET0*Ao0 z_`v%>h0MAWiq6pQk_-+$ui!#n3uW}I>dR(!+~HFZi@A04cfTNWTQ|;(HF6f6C>S!L zy-NKwv)hG$FZqO1rbAWmVGraY9rki)+ePNqEt8&!cNbW`?0B- z*m3y)RCi9DXOXX|ERlRxAXAb8O>$p3TQ)=TpP&1(nlTqNs52yC;;dvJJ&4fw1sa(x#D-S4_ph^L|wZ~GW77QfUSTfkYM z!gU!dIp)r=lC|?EYO~FvS@p{hSdi|=xN$ywxr&Ipz3m`hQGFQ~dzGe|M5^lIp=+12 zCp6kp8jhoK^24L^R%`dr?+@y3Y(y|>acW(c{K2C3)J*S^<=cy@JvIbhzvClIrg(%V zr_GOt|Me$&=;?`mAJ6;>OR7rBy{KGxj;}#!qHAwo`$YSi zi4piUCl-!ugPclN2LTDE2ZsfUzSg{E|yF#=w*5&jpjYc(RX?l2o;e(Xq;?RHe0=LPDx0} zIS5)yH78hIu4VVqy7>SP~BV=Jw{S8k9P8tmaQ7=VXH)Mypi+ z%R#-p(jBx7?ziXZ(hh77_m|yr$mZPaC7E2K=q1l(Lx8g$jrXWd?ub1+o|MnmJ&|qi z-=W4=8q(eer?kw)rAp{iwA+N$jStqQZN&QHGg0e_J|^VU)->)AVAY{_iqyD5_Z^MlX?oq<&3s!mv?|aBJb?meIM6IsKTi z3M2-4Z2Rh#t9~K!I zSuEJ0X4;y+7U(!-60?Z}Vu8cD4qb*`k#h=|Ku)DvkfZ*#Jdu@qnpNlyqSM8&eQcmI z=!ImC60R&RIX2ak-+_9Xh^Xn7B^V_mL+yq;RGgfSF1q+ew&bH-8^`cWzsB_5LeGvi zlG6s)gFHpZ`9`eDvEktW8KhNj=Mcs@}zG3o0v<^?S=U#OkGqyJz-p7180Fio*d@zH(*iec`i9txd+D2 z&R%56#8{0JlCmqaYK*gJm_XvXC&MhrvMx|vwW@cf=wO8>8;?N)c1#XxeG@>>KR(Cm zgtf~Tzu)rDOK9p_QYW(=Mo*1;7)6q2AN#aFUbP!{S$(-Q5=QqUt`Ys3NwAHHe&|2G&&dR+NwsdniK zBiX%oo$mVNz+M1g)Un)Y1BVAP!rlexfOE^91x17Et^l5EKtC zh#NTRPVIHoulY~qBseS>Go%;vUOTw{T^^9x23yCA8ab-SB6G7DOjuQXATyL24cDYcVVSDK${FCo*7NG zEUvaSc4%7*3keB9+pI#XxjDRXc6s9af0fUEEPdwAra$jj{O!+Te;1k2J?Q?@r=LUd zPCnKu;CcVEOc@9aQT!i=9=DqOw&0&_af~o9w5V-8C@3V9@fkS&MOYzsY{0Lw{g1`C z1zR5u4FYxvX9z`3PrFSL@cl|L&HS!Kg|0USFNPl)M9V-y(V1lvF6dB)*@N zP+tE+t2s44G^5^wM4ERl=nSd@I64)3Kn?i%O@joTyj}m}!F<;ulvcIB?jPjv8T)RV z-vXFv>1V6RkSlE~v&tXe|7r{Qu^7t>a@WAzEz-(Wa`7Sb+JED1(OgQVy*>=7#W40f zZ1^D_38*v0b05*wZcvgZ9&qP}*oBu?EdO&`SH>E(*GPW4Js#-0K+pKTzv~7d>~KfP z4^~e@bgfOt>fayxvEa`?S%SBUOkeZ`J(0H z>XzU1A;u-oI6&{C4*;%KY%i{!)d{1?9XEAqNd0HaVzX~&Z!9j{_4hI&KTj)L=~>RE z7ex*gT&g-EzSl8dMX7QMnK=(p{E4C&R;F>`No@wAx|6*k=s}HDx zBTL!f`$xFE0q~3R*;ZC{YNq*CCD`mitxyxum}q;+v-@MWZ05;t2Y&RY14qxpZlCY= zEOfOW)DNfX@JvD{3N*oQsz=1rVPzMN`ZkFg|1G6CgEH>CB38mPc7Z9|9k>tv1}dJBWo(pD{Yflg3#+?Vx#@>De*58! zVwI%hV0TT?(fMu$s-WC2UlV|YU19OHFtI<5?aSFXfpr&3Xs`7&JGfrX7%k<%eB<~4 zrsDtmv*$$vXY;lB*i2gW(V?LcW%FLop>m&$pkl4=^F>p5{EqQy3O;k}Gnil8mOAXh zu>%B4_Mp^_yZmB|FLA|s@sh`5pT7dY4R;uoBc|0p?D!;2Y@C@^$^Fl%cBzsBi>Py<8xtxfC z+aO#2{=Amj>Wll&{q{v$RPX{Tzqx>WB9;zNE4>M?TzarT69fh5qxj4=g+JH5IZ5za z`qT$Dtwnx2zUZs;06+}BR;%QY$4TJoE0c^d0r20;0;H1rJ3yZMBS4O@P35?gJ5g!) z6R+Tr?r@vflMu*kSUjd}1>oS#eF6#eYZBU(I#c(>cKQ)G;v#tHeNiadG^g?X=|9(b zYj}23@vaBS_ow20?I3$SkzR`zM)}%zbc`&?HJ&zsK)&sjK9TLk2fohr7#9s zmk5e^&am6>tK6j!wV0{=Ou|9lB5B%ZR%toSx{B+A^-uo*Nxq|3;a0qLy|E=gXXded$%~M zmBY7y%svyi@6=#hZb$i(W-6N`*`3^lvh7GNIo-VQAKC&0OZigDHpnqShY8q>4EU;F zt4EM4wFMFzBG>%~s>iTg5kP=Od5UWP4J>a&?6i~At8T*5xywB6PFiusY=bzYaB_pw zXMQ^SoJN^&b!(AGG$zo(XQ{UhA>k{OoF{1I-;J{CNGrJxf@t};ToGNdbQV0uHissd z8}yII2o}g)`wDdHpGz+Ta_wN0YQ8<*NH)}s{jtV(nFFifJWYCfO8Sh=XR*(fA+4SYGiL}ul zVZx>pBd{*Mc!@N~P+3!u6%tC#tiH;jqBtKhvDzr6umfVCE`Ej?z`!Su-1V8MD4)&P zU0<8ITwf~cEV}8=Vh7lR(%_lyWE|bNG--GH%ta`uM_<@MS-}n4{r0rcWM=7;gCRs3 zK>*GAkP65eE{>A=)q2fL0q=e1?9RaWsGo~U;!Q1WHC@0q6K#Jbf6uI6rP)|X zF7}{oc#-=mj&2J4dr=Sr>V_|OIat8lq<&j@w*D(4s^383hBuS@D2RaPp_q;tNVYM6 zFwrPxz60_+m6O{+>Cz9wRuwz=CT@7Sd(1B=Tm5mX?r9tvJlKLqlqQXr7(A6FKH7Y9&P8Y3$o?*O|)O~qi6P@0^f?M)uYTQ%`B6a|3tHa@Ox zOL7}GZtSl}g~j2@sMYE_a^7?v4lWf5rPT>qUaZnnR}S_f?b5#S5~G=SM>=ML8&5z9 zxAcLc@fUVczp;Y(JwNbWD=R2$Mtt4h5g`M}_KAn!FWL%?aaK8*)nj5lJ0PmSrKMSc zO_(1mb7FZ@OdoJFv&MjjL$5wMP2CR(*d$^{wAo#kiS+*AZPLmok$!nz$1!(+h@sLL z=g;DZFOdY-~-TcRbMPqz?>$|6S(t{(L{Drh3V3SmPNyF;C7o3#^ zw+}*Y?;^g{absov2Kdy#==6=a^_t_Cm@hgUkfiMCjT#DwG?cL%@>k$u&$gclXyk$V zW4->R3=3mo^F{VSZj9)-rNvt^L%$= z*SEc?bZ2q+M&c|%xcK?tEN~HCB zHRc6QfoY=%jbF}}Hn>Gsw=(}$o!Jjpocny@tU6Oa80NrOQp((F7(UiBF^!g82D>1y z+puqR8eaia_tJOeP^D6!9yP3&yUgq!!jP!~wH^SvQ~!R;jJdKq3vY6mi7=zh3LjD( zQ4t&90lDYTw3aN>T09Ri(ahU3Xp~DRcm-Y<@Yb&Q_Up29apFoku3BEBUGs(=7Uic9 z!a`P}$fUeQ^+uJem|7>L`V1%lHL8vN4hPj#dv-0d(K^06Z;LQtGCD2voS)|%wK7#c z#=uH#{^wwo*6qnVANFw1nXP3WT6B9*`ikMz z;6^hCm`OM=_msh6p0V}nt-~EUIe8w<1Kf|pqa-6)vpu>{Itt48Z9ve~|Mk@W#r=}& zJTrb#LTjKqja|ifIIZcNV4w$QNnp^~qR`i@+haaQ4K(K}i^Rl_&ij$ub-g@;8NWXM$r&T;@_Y#z>yxSxDL zG~)LK23oKC}SJMX{STEzT5EuV=?c(vy>4L7%i7niAFc;77@b{ zoiFr<*lS-hX~J=FX27hfZ`CCUn=!<6?v4tWBIdH zUP#N;&?!!O!E08Cd<3{9Ub|e4K=TAUog49uzROIWk>}i)H)mJMKeod$%7ETpKCvH* z$SjPU`L<+nF%F)THg3ju2o$7JenE+gum9BQMV+^HBOO_01A(qlA8OfE_G-95O6kRJ zE)OS*hK$RQdo3x$GSV7guO&`PCQnV~$@)ar96ZA-4C!0W`OxE;Yr{tH`79ZiT$X8( z5aG4@gvatcFa0=omfa!qB~RUxCtz%2q4)=+SZ>riwr4Iq&IC{|L)@jHa+=>OLB`HA z9rQ>QyhSBiBL@*C*q<}zJ#LWRD2LP5%b8y)llauORM@;Mi7ByJIP&S57 z#z_N-K0;$7avaSFns@%dl_Bi6{@u+)x2nANYrToA){bk*5(-B|w%_be_tAAcgb4V$ zWBEH%?#PL-msH-r4tZV4gfs{?4>eX8e_n)SV2QNZ)FZ$m66mPB zksGC+bJrrSm$O(tkMMF`J7jXDs+{p4;r;geJC&tk*t#k(h8fofQs-bcUG?Wc-I;Dh z#g-Vf3FnyE=^H50Og(bGB2>epN(O~{`1AIQL$hCqCno#E1^5FUm9kx>Q6jiB^~YT# zWq|%P`(7d@H@u7yWn!)Z>#c}3S{7{;RxYzG2SaK6`e4V%v}}MbnK(mB3HFA( zI}Xcdvq>7Qt6brUnjyZzhAmcwA-(JsB;I41yCFArGfh&oX%d!E8|!FP+*7nK!qE~n zmf>i{3M&tRb!XW7mKIC=2PR$`JF`FYBj+p)5;bwYMlHf>iqrq!99edWktv{X@RH|O z9hD<=OPkp7!6N`aInE?EK7XhC(;qiAi1w8W^%0y*r)@YAie1mkam7{d`iDJavca z#zakK9{68xUG>IJ!x+!r@dB@b%~7jEebh>LLxa`C)y;AYCC%Res!gyRg+RuRF=HU- zW(-W}$(e1>kcD?h=TzczOvogc?p-!U^E+{4G7RH91ShqEK(C==%4t9|9MsuBA8)a` z0^$MPyRVzLaw$F9ez~^qkc+~(Udx`irWng??k$}j61RAPUq~aB3cXr}HKbjVrr!CK z0+8tf<=6pnIxKCJwRq5#rRMd>8SCbB%38^1Z<|{cT*-)lN6YWg=)sqb1r$M^ha*ux zZP6IVyY|I{651k3X!1XPDhN2o=O>7F+DJ^d!UyPqGZqgh(95y^KG33%rPOugg_W$D zw(n9mLR8N=BCk!~_jSxOt^SUsn;+BM54lmR1Kfh}+bwRAb*y_SwuaE21AW_TO zGH1zn%A5M-?S19p$C+ORkBDCJDKV`LLXUKlT!08%`bV%xfvt-k7Pz#naAYgI)&31{ z-9daI6ncFg)|8G+a#dXKR&bheg-Qcc{3WfCCCfC$hh!PA=8`3}#jTp8BSX!hWwuwp zl)p~uD;PuiT5zS+kGu|sXEW=__Dx^EeRWCp5|kULUi3AQtWsk9jY&{Clt4m{LpRWCiQ4D^@(equdH%rgY3ja(=?Mx989!y}Kl{L*&yD=5 zhg9z6QUF9Avc1U=bO2CzD`Rv)?Fp`VKVxP28$)= zbM_JhR=pfEeSCPm4mHf=|LyOv)Q_!EY|*cMd`c?HvIoL8%ZPdV*qL+=3-pMjHvGsa z!zWRz7Xl%_92S<&6pVPsU2W^kU+s%})uyJKgUAWf?cr*V1>MwIQMN6=m^-ueZTU;+ zCrQw>mz8taDmjK+CRKB#Pd@YT#cs{*VS=Dhw7uDZ$NGo^PZjdBRZ++&dw=t10O$po z>F>lw`m3-PcYz}L4wvRRtUFtaO}ac=&ioXC>Ck`L(O-uh*MHHG;-FZ9)nt*+5bck( zc5>(`1n7t0=)`hFYpJ!yEX)AWl17s(Lm!2k%V647q6Jb_UOc4RF&gOx{}2YbN7Chn zeL0f+o1S#v>{#9>Y4AG)R41&w@mRk}iu;EVq(f3()--VNHw3LqulC)*Ub6|c%~-D2 z=dj!?C7o3l6ta>+S`Zgujit;C(cfio5b{aHfnU$<$N+cP&H4D5v+m8Zfvbi+mRvO_ z`Q^)uEYgb!&%1p+vmYLjFJE0|o1l)M3>aeBm{rb75{z0X?LrkD$B|qdncIszziT64 zG8UUwZV6dDh(I&Dy!C>8X_D=G`?lWPo7d-n`HDeTO( zLSL&tkPqH?=>8XzknP>L5H6P{ZOk~FsK*`e9Y;IzMIM%68Ry0DS6r?`*@S7~t?Rq7 zguG{_Aba2~N~K4&UHp-R0P}34FdS-b?j{ylgRgx;h=jVmWVqG2=?@-<;_GXSuUjZD zSA~uxbXtoo1pY~;fB-8`&_wx#}oCWIafW>=n*~NJsismBEt_OQxnkJ zSRQYRX3<+kvreG6*cW=W51U~;+I)B&W{Mj}x+uWHdmYwv-ME2;b<5`Yz4@Q$_gFe0 z<|{)PGj&I*?;wdJon}$mlj*JpwoxMjIA@FXOodn|(s3kiI_`4V?H<91D;rnQcMkLz zNiqH|oUU7OGZCy(65oGDWQS=odL$Zj3djFtxnYL^D}yYzE>B(RG3PYSN|=+8sq>Rp zdm*odnRr!m1Fx2Ai|@vJN4*YvE;A4kGc<$3nrWP}ByrkZ3L%<=Y4zoZsUW3xZCp}B z&J}<3vZmzRHJ3H4scobIgEyWZ-k5@9)adi+HEKz|zthKk;3E|b?`F{~DUQ_RgYQUL zZhibBHHfg+=27LBdUpO$+ro}AZa&SNH%jUD+r#)z;zI5`fkY4zPegQQ*g8-(Q!X|+T}y?>R1 z5udDzL&hbW32u+X;E;QK7l6!W{-F328awS9N6Wwnp`3f>0y9!+Xo3@8OALwL#Emg+ zcF!~9WkMx5*|jfSxwKs31}WFAOCg=qwF|^Or#k%bTaw#F(8%ZFBN~F5KFKc}z>lJ4 zO~@Lhd|c5#r=E8W1FY!2X~m1WadTXl{Kg733~NdmlK#yXyYrLGRv}a4(r+Pi4?FvE z+b{wX+vek2!%%RUAvZ3YfNmbU@OT%BPr&(Ih)45f4NDxLI10KK`9zW zkFN=!Kk9|di0-F2V9qj~h`HT4WE4wPo_(Hqwdbp$Jtie!bmtzE>;ARDb(ddf^Tey^ z^@`E+@ifmA-%mE-oCtlHQ)D7Gg>)oz!+3dpx5DoFTE2BMvvpb43jd-cR(f5Jp>0+_ z@v!h!qwjO>#mT06iDeHf)Q@Y+K1KT|CXw7G7yh1`~_vUc>ab6-A}HU)}kkuN$d;dHmfow?)szgdV{El7TeV68%9XGwT9 zdg!j-x=#!g@-`+7sC)budpLl3@eJn}(HtLVcsE|L1n+3FO6y54^bKgws*v;JKy>E8 zz)!q}>Qci1Q7y;wYjrn~9zVrNlu23)aq}YtB$8mmF;G@XJ!8~bfnR>U+IT;%$)DB< z3%gm-7Uvym+&i;Mm94rgTY;|4aywukYkTStoghld(ry9jTF4g96nttgaHz}DNUtJ} zCLtjWX2-_TPA!ucTw4ii@)j~ur}lk1wMpWnev>$PP)qeyJNw!RMwY93jsDuM)|L{1is&kg5Y$>GJ5amV za-DRSi*P8TTD%pDJbp7JwRJHoL5$D9Q9t^RnpmW(YUH34gfP}T7 zAWT_0FHCMGS%dbbnaAj+w`rHvYDh&K|IMxp^lWd9ds)6QjCfrQkv;}I*l;uTv&St4 zY0!##z5_Eavei**l-&14>Q*h^yUGj(9KXi=s`qV{@WW()k)4ohBro&ENs#=jAB)`r z^&Is^5eJJOHg=ozBeKW1AjXt@#w^M``YoafRvcqVK3!7VPU+8?zY9(@j3>n6n3K1& z^+&a~8++>HDNcg43mg2S3G-a^Gf{;uxhky_}u=&)votV)a$$!2?4?}einVrb6=dwZ`Gsg1@-hQ7{dY;d zgwBuF&mpnn3S>7Kni2u4SDR4cYe8}$YH^TPp`!$DxRyW%L5ZgwbRxkQhjE< z=tSVpm9MkkN*yMrXI-(f8HdFf7aj;?61937o+M%lwHWgtf@TuCyk4uah}D$Z_4Aqn zkQ<>)@xyAnt_W@$~c=Ty1o)md_**=v|MmuGIsYoDL9B@rswH^gIM!>^a}8sHcE={H0A~sDQ)8GO#-%Vs7QF z@X$;^MzdYnh=3YxqTi7!sbD!%b8O(##$4rdsW<_FZ(NDZORKW}cld%3CZAx`uq%@* zxWDZ8m)n@gbJ+8|2KfvZ^REQ#foGRecar=-Cy|?ARdIK1@S#{Z^iRQ>%Eld~rPQO!gYzC^qWNL%DJmT*(X2$rx83hb`9llGRVGaxr~=j_Iz#CB zxxB|yr6TpiP2BD3u#q+#8g&JApcY%YE}CwajIXf|A@?FS@cB z4=&sa6cA#jx1olWhiT6RE?KLIvS;%MT)Mb#6FTtzh7QILKx=kN^|sL6qSsG~HyM|G zyu=E5yMyABGRzC~xXRL^$zCp(<%IN6#d(9~?PVi>tK*huvy!qs$6>b0b@`+-v{{c3 zTP5on@O<1z>S5QY4X><=<4sIR@OmdZpvAoYA5QNWVw-J}kZ!jS`CD*9NP89|ijr{D zCLREdO_CPXenWjNQ{?||K;~Rt8cpkb|DxejA6i=I2Qvk;%nqf!h&T8tVv8Oe8 zo)osW6m(A8R4(s*Fz|mWG8Y?r?Ud!R%E^&IT+Zcx^~QI<{P3%vt#Q4szL{K6;nUw8T#v`{=l;lt#$Y7oq}$Af2uhg_Pl=8-_bE-peIJfx?J`qIDBbYe`bzu;7X^ zEQ|;Q7Wur-D--KtV%Z@Z=+T5q%JhWtDW^!%CU(1C-Y}mJ|6-zY0*)&ox<7GUm7mvD zAN?+KJU!`I#{Jh~DFC{q3jzO;Xw?k7gPTK{c;k?49$KLc(cLMm*+<1nIDCE6v`BLe5saFznqVhoREAliWIGP)&)7>ClDB#Ixr`Fjj^)vOl>CuMFphPyHNMf-`ZgG zN?NPjVdYXKYlJ(lZJlOxu$>O0+?I=Ofz8g&+tGN+vuTl1Xy6fy{RtO_vmC=XS{R8rnCNIrCS5N8&Uv41_~` z`zy_y3cW~I7d|tFal2uLj7=!T-YgPX(nmMVBy(sHeC3_Pp09ohuTO~(AT()p%d44r zBUGbZ9Kz&A#-EmeEUvqGYZY$cSW{Tm374w<>!ItMppu|sx(SJmoRols5A5cr<=_Rmdxk@4NtI-KMeGP12t_J{)p zO7g`&TY7V?;c&B~-dI|Q1;&_<0pK8Y?#lLf3-zmC5ycH(Vt-9e30x}wSJWS zU^D)~tD^o4?BMe*HANOjjNJW#2!8jTvvj=?g}QEiHAOwS(mc(GS;t&Mu~orx*oiS!1v~_wX{p>C0EFBVW;c3 z;S7!eQr1Q*!wm{HGN_?5NGDL#(uOoz1O`Al&uwW}8(#c#Bxp)m0gRnA63<&rRSkrM z$k_U@Wpa9mIa>>|K-?kH0$ACdKx)O_ap_L`C>6yGOA1Xd)}B<1#m+co;QFSR%%_}& z8HkHEGD;!{&TY(!9dG*jy_Ksiw6U&sSUGApC>&cr9Hg?mmyscI*W=(6;mqup9Vbvx zVfk|g)MFE=oHMY~$KWG>o0x8cm<2NX4|Z&@O?{rU?6SW4MCQ~q-a}YsDEz$^Ju#Cx zpq#zNn6+#5&l2GYk}ORRo$4L(z5nF+U}KuS05YxbVfa~E*ePH&-sHYT%IG{dywSI> z4DL||1b{m5)`@<37nlY35$?X?{ZuYrgKn zr}<8Nn0-90v;}K)x^pP&R7iV##S4EQpE79+{QA>(}iA=YiMC05V4v$!pBCX7d z+IYBd&j0I-tifOi`l*$GKy?FD{-LO2_gaC_>4{ncaJ{#29gK+uBTc&Zfj(306($@Lo7(th9l19h60R8DLoyMUUphPLNj2zzey7e zS@MfeZ&DjZ66Fj=PfW&-hRUt-S2n3cYo`Q>5@wh1G=>36Y4j*e#yX5Cbz(N9PJj~^ zgLI}?pFHofvHh-u#Z3d5&Y89J-DY|o`kkJ4fCi`2#07-w^NM6GsBqNb z@#7`f5)s7hL}9}p9Y9PSi7gJPd}&`JRQBQgMOc?*z@4HY!o_#-psB9`%!=TKZ~tcB zJ3z;iufx=bfoCB`O!iF{`)m7K&X;(UwB<1%Bf%!Myqjln6T)-mDNUe_ zk|n(IebTU@VhhVQS@70B-&7hHQ$$4=Yi(v^)mw(Ej#nMbtPRce<}Sh(bfemG?_xq2 zLF2c6C1y8|Es6Q-N@QovZE|iMOYe8yFw*?L6s2&=yu6dY7F__(L(k!0*LRruIPkps z6i4kP@ii0PhD$<_Fm6&T`z2Iu+E^ct3I9vgf#^T-0t0NB#g zp(Z2?ed^%@McOcls>7zd1)doLJx4mGS_aaq>&5PFe|uS{qcHvuGj7VwXVfQ=vB*0J z3RxyeNO?|YNdH?~>DUpKuo9$EMn?tAZAweuqmfM*MHQOc7(O2s1Gqes_V4f~jgk4G zcWgnos64tN8xf!>cl3aige-WY1PUl)%kA?1e{ZWQFg^Y9&Lf?A@%(c;Af$~#+g}@n zw#>p%)Wx{I1C>ARi8J=@eiPeUEKMc`09=I7cRU=4uSv+?2m!I9@Kfs4Ia=xgJyZ8J z82;ZC60+twc(0Z{;;!PnS4Xsm6TyWR!AP;Brbt3LGTCY~HdUYFXX(CsU~BC9hLq|# z{h>763!lG;W}D+vizb^5pd+4Dcotpjz#I*X_fBgpl1A&Dk)lA=Dj9uj9iG9Q9b)BK z#hHc0_ES>(UqQ~?1}$d8+nK3+f@~jY>__XZC16_7vKb)r{V-(C0dRXVOckvR8mqyj z)#7<*3;o&E&_BG=BCuvg+i5ez94C4xc<_qElOw382q;Lz*65E07#QeWy^ z&A(uN#}WjQeKdcOBr73?54<2(K$E6lI8<{KhuN@qxG5^XS~803cx zs_8UHxWtRuq|+}yYH-3&Wr;>@pEoSFR1O1Yc7RR_~4 zrOhX?>xNXA>&Zp5ab#8wy1zKQ?WzaMKyFxBc8tEfxOE6ePT*iwY{dQvdLa<6 z{8`%29^tSW7+-K4m`@LHL8fwI4q?qNF3cC)1(%%J-t6}Cl2b~(Y_Z60TJP8?GYOVa z^^~FvAUwbrP5Qv5d0C7rCZ!CR1c6_xBNt>jSm{wmc;Q8d`X=9C);)pFtyAaH7mGq9 zm*Npt*g@~S;wx12GL_N;!b31hjW_jYpXN)IU)@;JtU-S?;ICe58VdI1Nv&gP6IvvP z-cF?64L@qN&WR78=J}N@meX1L34c9-C_7P|p zKL#!PZNMA7T&hFsZGD{9+`Nue37T@=U5@_`c+Xk-Kb9!Q0}l+-CuLGRCTiHCxT>@q zWbfB;G!-{V=KrrH3!TVmB>U`Gso0_VJ`%j=5yQ!i-x|H~{u99Q7L{h3n!h1>Db?|v zK#h0kLUk9COUfjk)aASNe?8BiZBgMpE*m(dcTU|BQG*$IpKn18w9WznZ-uuzf zxw;DO#TI+)^*XDx z9hoKNOwT;ik%QAnQsJC=|KzqmOe?SXOtaYMLPZ7gAnc|PNl3>q z!FG;3+(yyNQVP5(ll(ovdMIGg*Nd%6kiIi50q-)##6<3iES~3jD?oA;ssDz~ITq0f zUP4r#aB;!gqV?1VN@K%{q z_1{YT`izeb#1ljF`qg;u-t2ngf)}M0a`HCTfJLv;yJCX!QlxwQ)_gFqOD~s zx--K0VM*b$FiQfG{&j%qMB(qGcz6qbnhcDf3tAW}4Ms28P9F4Y9jSO$?LJ6a8XvZ??&k74DS%x zG6}@2>^v;4o=ksTOemRSuz}eNC?7yRsXv?Y?S3oWd7D(asWn*ToNDyP-2CGbpJDt0 zc*6UBm}2Ew>%gz;eeV4hy(%FUB9+o#KeZ1qVYq`UtlDf`&Vn%L&iQ`Z%iRDS^BUem7$3g$otbn`JKghfp!@_iom==`PlX=3@0arT< z@70mBwmPOwPhSZ@&R8)aIqOeRH=lSN$GBg#3t-Nxlj=8d3e@g&Crf4~sb5k-4g$a} zeT9Qu-f%^F?GkyabCww<;QDZtHz3Ox#Uz%tw5@VWB_9fGKaE1l2bPTooyj~L-eRjr zo~x_X3;RFH-aD?z^9vh~TD4BZ!H^}`svx6?fNW*BkWJV_i3kJ&0u3W9tqRCgSt_z* zg#^M(*q~*}t`dd`qXGd<7_!AMe)kgurS13qy)S?D(`tF1^PKzK=UnHU>pF~RO>JhR z6C?v=G7bdczS$MqxV{(cj-Q^M5g<5MxLLQg2dl{dyYAC-JYsG=%t#_ke-X+@+$UQu z+&>&ph~UU7oD{mTW?l5H5aRK{1H>;#SED*>u-rcA#qn-SvKm<$OF*m{muJKguE)e7 zpW-?6Xb21wyr0%AHfjJ<)Eb4KMI<1+n2u8YWxv(`MSp0*+A3(xeF~@-Nf=Yp#ci&z z+I$m^G50dBXJCF@qGJyMb?B}|ZsqfzE<&a?x)NA+dB4-xkt>~X!IsFFV$_rGl{EB(At8BNCd*$I1z33g%PUB}TmZdDgK(R1~B z9g@SK(@xB;1U*U17I8|lCSJFAACAk4fxC8fW++`bu3*YwPJV{#UCG0@3%#5*^bd1a zLRJazzNxEOyox~c=VUVEUMv_ zx%bh#yD@@w1JS5gkA>A3tXrPtvIDny`t7n zU<4bqa%|B$uI>GJJcP!L)C+1Q6&jA$lr4`)rd23k3|YC<8@(0-@;!eq_)Fq+02Qo! zN(g=UBDSYqF6&+txJEMKeon)cd+r5Z2T&9jiU^{v>8w^+2t}DZ6&U@CAVyh!^6FXhS|G8xv;ln6{Jp=vRc1yf zzEK%53o4{>8ld)r8%rmKq&e?Vu!>?xTpaLwaL(vkpuJPGsry}eb~9(jBA4qmkk8*L z5!bHl$0JKT^lh&Zcj(h>0;89$(v?E6(%P55_}KTH)atq(j35u@*4+qg5YbMWy5I{Y z=e&|0_`@#WFB>7FLqYXkR4Hr8NsN>aTy$fH_jgd^-e#bh<}GE5#e89)a;zljI~;T(%U*+F z*MTDVlbjbB5C7mx&8<=&dp9ZIFYX=$VsV7%GVBgJ6pNj|O(kR(%wx+*Sx*khHkwxQ zp3Bs>M9j@j&lbq1ovTts0fhe3(X>7IS-XhN@>cmwyd(s6Yh9C@sPH2?42kEQ(V>wY zLSK4J^t+N}vS#Zufz9P&!nso?e048Urx2suUFI<}VZh;y)0fqrM_AgvpUP39@V7_k zRotS_2ZU7tTr_)qSM2pB%A3}d@6-))T&~%yZ;*Jo)F=z_xB#tsIYB3e@UR zl!a}@Ft{nN0bKE+hcZ8!J^`lEWj@x(`#JQK4kIp%=T30xF)!Piu8u?}7$nR{l9d%v ztG2`4nG~yM3u+M8@Ln)0GmwtfW7;E*OiQMdj4_C8?do_*nYSQPtqn!PV23wWNoO)P zI2YS0gAuaoF_JaX^b>Bgw-ZcA{aEuVdvHj4_%E7d*q7YWw^cQK_7c^`=1MI(hnh00 zb4{mhiDkQ)wFP!o-O`$VR0B+*QAR37T*CB5t9)B}Dlt$fr&>QQupqaIwOAw0qJnU! z@lIlP1X$I35^C(greKxe7e_3l)Jt8YGPZu}-M1ygL#x|UPo}vuFD-N_C@2-W*Gig8 zfAqZ40UO$0ed2=rEz<(MQ4^5w=w?5^jlR(F3wwTmj==D4k)U%!jFX%NXMciS3^BX@ zv2=^8?ARgn(n~EjWAmuPIafP~*PQ#xWU>(NF?Hs~w4SN#7oD=*D^zM1J`)#Yx0hNi zQhg;ptXez|gZ&xr>o^>?<;%BVB9*?eZFJLDw8cc`&ql621Mlf zTIyoAA2z$gsOtn$0F|vksHnoZi5nSp9tT6%vq#qPM;Od;ojL(oWoYF;vD`UFZE$PT z=;cr|d}g81fxdaDMmkvJ&{2J2l_0#ScAk$=W5Y;00MEdVXI}1`;~bPhx>5&=Ll_ja zRbCrgfgvEKcu6>oMnSiP@Pt;7cf3%i((TErN)c-fm!#dNt&8@fx;bx5SYAxPP^u7q z@6U4Pb|`y;1(Fo_LMr`l?wW|qrsEN7wMwZwpr8)8P?g5(1$N;|-D~q5FW5%)Ou;Lg zX6b#uCrMT&o1Z)n;)z;;^Pe$)kgh-sRD@)=VBKb=b*p2&x40UQMTyAS2$VYFcx)}Z zh)BN5IqgH!L-q4K{M9X7SznJ4yE3z=MdCOsILM^p&15F<7NfV47Pgn=O}f78mhLzS zZ@Ai`%skIGAXR^2M0wI|j|26jWb;UPG&w=V#_475fd=eadUGe?);jv&t^XZ;FsnsL zf%Ims>tHO?lNi`2rjHIkKzNNK=m9s*Q?da!Z!<28`(@K%6vi14KG(U1j*7H7#%0Dz z+{^-`7wVQe^yliprS*H`32r*daB%o!cl_2;W|*d@t%ogX5uVh%J=MF*j+L27oI7pvXUX zUQ&CEE9;4RtuoQ#0_}jE#VEh&(XFuK&?HXIE zIeZ~telv``{D@M3zRB4mNzG0N;GGP{(6Yk^Wq}rmCpDMiE+>t@ZIDZ^a5)f<*MFC` zi~z_EK7=9@<7X*B?@5#lI`1Q^YGN*VOM;re!jgYN z^JO`*gNS|^?NSDs0*Bx$@u^MSZ>gT;ZpAjglw0-4O7Qe?KDSn_+cj792TPp%!#EedPr(JC1o92 zH)u+%&rOG)VPS`%rHQSBgr1Z1FOmkiE@dso23WZD*8^OWYSJZ+jMsG$n2)F23N{-+OKZz??nfw`#Jk|+N`l7=KM?`)>uw#hGUiyZ z59L(HrTGF3dVa%sJOVfI(aqRwW~*^OJxBsz&ncAO$Yc2+ohva!+h7qypd;?KnBlz7 zTX5*@6a0`Tr)R zy=VAK{t#~YY$Fr3DoC2l&k>}|=KMJySGLav#m5*T)koOjrc?4uGeb$A)bNGKsIuSR z+i4Y*Vf}Gx)}zrc34!C8<6K34_f7g_W_wyRH!ZnPE6$q zCRLK&)xlUWVP+`Ckx=nbFT+s+f?4gH&_v7hv3zg9+mwg7V7hp(TZh&L*rE2oZt-VV z+=&4@P+CNlUTsDg`{}sI(?p5dd}Qe#O@h&}_Y^N*gk6t@uwBjru3j87U0mMU<)5Ur zO#M;SqoqVjRZjm@x#EPl32%^<0MER6w_kWo$JerR+BU;OcA`VaRWjvA)8{*a>M-?m zv+R@Iqa)d82-8(fOwI9~?6RrfLS_zaeG#tp?gp`nT%^#%OK^nYD8BrgX)$}tr3$iC&{gT+MOSJPW#Bup|3k;^_&{7tq z?r|;7+}BcCZA|j3r9bV~L6u+0(Y76rUnMopYw$*i1b9E{x#sWXVqN)y7c=#-s#g+F zVc0;_(ajcvVGQ2k3un$8^Z!iw6do|@x$?TbaP=qHP6!Fx3jQ4Qv&LkiBvO*Q0|wvV zhcDIfUMyWO?sgGqUOeO)w@=TOZZFY)B%gEIuv8voqPBt2mB#>(H5@qV)KeWKAreS^ z0RFYcxJHAJny>32*S%)`*S-H}5-4IUArgXa3hyG{pX>KXA+VzpAZ{9nKWP~D&weLYpEKtE=_6&E zmbd&ItuE9S0*j%=-p#9FKj(@_FN6I6<9Hj$iZzi>RGU_|#z0ppP3=^zbJ5c4>7ScV z8YZ$M7a!7uO`EoE$zCAwptbJ%0V3t#bHA{j_E}3L;DR_AWTmjAI>7}9X7aJLy&`d- z`ObRUJ3Zm$z1dvQ(*~9$ZX!pTyznP@+Ea)h>5)2lIe!>~mhQiFJ{t)_>e2ZWt@k^X zvXm{FfLGv-4kl;@ix?RC*~b{nA@YD%p8{kD9(dZJSYs3al<5d5vv8jZ0{TZ&&*niWIGt&xHKF$O&fm2lN%uH83z8n$!z_HxHI6Xm%hZYJ$cM8T<#hd ze`F`iYr7AvdBD2Yo`SsAb~EK&FUJN`=nKsQZ=pC zc4eK63OaWqCkNidMQsZ><`DuOoC^aa0G?@rzXgRJOfnsq+er#`8*HVbl{OPH-Mhm? z!gTA`DVd(PZ?HJJ?Y~fq1U_)NH^rIZHb$H8{y}hs{AS+)!GxJf!?Q5K}S~SyA!D5rFF~Q@7tW*S_Wm%AHX@A zH?y)?eAr}O)NIfkP+(|J&gRq{3EK|_CkZFPM}U6T*s02mS-!f?7KQBJtrYNip5C-aAji4t>$4Z9 zp#v$KLlch8HR*pj{YGa`OldYu}0MB1=ClNB1 z-O`83>L-1U4jMQf&(inKte{Dr+f2)~uyvsm_GH?ty7Y#xC%J(9aMH@0d6|f!}hcVRPbP@ICSH2BHV-uA*kj2=DMF zPtX;M616U<5Y|lDv5lehS$D{E;~APu0T-K!vBa-I${t_bOAR2Kr$bKx0v~7N1d;p_ z4;kbJ47x@BX%J7AK{ljZTD894;Q|naZv?#AMYLUSG4kE?$>!98Xe1)9|DFg|GJE5D z)U61SPePx=F=*)%<{u`|e@JlYrQHhJ_Fv&EVa)^^LvL5UBlXbyRxtG?O(Avx>wCGp zEwjrT9&0nVgQ0}IceET=4cQT*4{ICeOXxKKuWmKEX8x4#<{y054I6J@BFpQl&)Xe{;_Y<0?NTmGGy(LH2hUt7!X~&-!O4l5~;LZT_ zM}ll5T^f~&!S)VNh;sV~^(GUb-zXyHsVk>}r3t-PD&Ki~Xuqp32PYTZ6LL`S6J~B< zJbJ6R3HjPjTD?VaMv%PuFvx8j1}xA516E9?X~n8!RnWs2zAe*dn%fHua}H)N?722_aXG_^= zxhZaNxWbvx!`2=Yi!AWHN;AFM+#zb+tS`HZp)~ex(94kZjrZ5t^gjW@MSgr0VWO3D zU$c6J!2`d12L;Fb%9t-v0rlYuwXWtC*Rd>v0h>uWyILm1SI&(5xyLn_YwFVaqNa59 z2CFMk0{YF_Su{A1&7x-$@Zcu2SVyZt|FG+X#^u*OoW%@(3d%Pia6~8X@LL|AyAp6F7K#ut_ z%Wb#v9o1gtf4iMaV%2tm^MW=d0{TrJ9Gu=vdT z)4*~(exS@x$2qbmrcM(3{nr9&3Fy#C!L_HB7f0C)X@H*k-@Y}V>=%lDFJRjQ-sU-7 z?bOT?WpX8V^#phd@SB`3H`j;?6jq3~bczN^b$SF_n)2Rbd1q~kNsGB#J5=HF%sp}Y z2v}%n+K|;^RJ@is&t$%#-8)q~p8ro*l4Z_cDd-PmdwD<-Ii`lSSP=re&q6lUxayye z%)H*IC$W;WI)GFO&Jv4IVjamrUC|M-(orCfVI3rK;`HJHK(fKkurIyWB&=E;@MSTe zSiC)yC;0fkoK}`O(r8;LCZH}j$ep{AUd^=5^48b@tQH_r%;51N#pY?Tm>>|rzoZny5CK{pk;~N7Vg#b~j%gRIl6)GxrW*5`IL%b3F=R>qQ$8_3M zN`N5xaxvA(?40L^>)sA$%zcwbxHmXhC?_E=inaHSrWuH|>zgIsS~QC-E6rYL$FN12 z^G0s^z<}~n4T@MUeEz~~dDl`-cXjky%&IJR$?xAOJAL4ijqBYMmhVAI( zW@q@hS?_Z#UN#m`rEN~ZUwkL)r$Cv4&lVsLAp?GO1J1@j?G=P@-X?&fud$QV2`vm;wU+H`=b+}%+65i4seO#a&>=@3Kd+`XgiKcs+K z`&Ig&?APk~K%4?Z+iS?W`=nMr_xpR~{tE1V5;DpD|1?Q%HUp~g&TZ}iFN$pL0n2S= zZ}_0Hn7L{bgIKK=xffg_X4Y@B7)p~Asg@hMS78sbqBK$4ql%0f;5Dner3vtWbzbZj z3ISd~!${xl?+1&K->#8Z1?&qUvfE=&GwFAs?a{|2sQdoA{LCX90u{^of^}t1@6($o z{M`uWYi|JD_n1(GWwU{(b$*2gO)4dJr>Syu`o+|ne*M`CcSe_h2b`0KueeRfjwG${ zKp*bYTcx#p5pZ1Sj2uo`E0eEzNGL1c|lV#6)8HBO$F%E{?z)??@qMyAsDcn4#!B!&2}*|-5MY_ z)dV@VpsYJJxMwMkwwxv{+ z6+1RVCiPd)V*t_;4&K4E<03k52{SLyRZ;26=8a{RCX?gGkQMmN2uDM8w8Y7s{pXhS z*AS(T;oyEKt>qp)S?&8MZ1zo=e~k06ZQXwsn`|Ly%TnO!Pu3~brIUmul$iZ=O0y@8 z_m8Lg%;c7&gq1%`GfO^OdOD*05yeZHT67G(5g z&!U^#-^3b;;Rq-#2{mg=c6$gK{Ev1vzS&{Y-BI~C4$#d(IOjhRqsv|Bk*cGx%bO&L zk2-Cs?UF3hEnlf+7s-`bm{V&+Ckf6YF_)R+$5qo~vp*Xr)ll`>o zv)U=<+K`C{=B1&S`^a#y`CjcUAit2C6&IxjF_W!9$a>A(hT`la}H`Qh#Z;BP`dYr9Dcr%aNzfoDF}^gqDPglDJO#!no&08gzmm9%<;T05YmY^upm&PB+G_$f%YlrEv(+1FqK zP|X9)0L&OuYvvSn(OtX`7>B*D#J)VQ(S>ToxPZm8i&JH)y=bw~V#SL&JI4mbJg1MU z*MdnnV91fji;XqYw3{E;tW;tYF4ayDhlPL9$u{&>uhp{ukG=#uyI$_6edgVGh*G;m z;oBrgkNiewd%|F&v%R7&+L4&b=`d|!S-FI5Ld6T>_S!uAFjupu=Ba)&o2ZWN;ysUq zfb5w^qQ7qGaUe^-BuLd-zMDQA!}R&wFD_&gu(oKF0F}+7^zF322IRr9=4CJM)-3w| zgsPCXtfHnG?0VW-?}VLwYOL8MkN9*R&d%8 zO9rGK%ccsrf^+sNJ^%nn)O{sAO*AI{f z0RMVJ>;oC2Wdfmx!8uT^ob$PyP!0o-BCMqyNTgXK4dx*U2txjps$S{yLf30me#^8K~#ojZ#kL%wqY`>)4^QIqoF_ z>Y)sd0kJmx7XhUAJsO*-$QIbISjJ_`(2@kRiv8|eQnd+0F`UQG`5yUhrGiKx`0&(< zHS%8S9yOp;nrx-NKY_o`d~$5IrOmaXtMb`f4$U2gN_iH!xh$&;Kb?xCPJX(_ntJeE z9#&mHB1*EyfN#V_y7-7IcM;#1F#6Od!8~=cei2w)**{=e#(#i)in<y*{z{ zUa}Y8My?D4hE(fB?<=NW%jlJgC7JiKKdp|KQR{SR`2m#={&@YR0oCpHQ;fpTtF+Qi zy3S3&OUD{zaNj0mV?8k)&deuhcsT5%B{=qRDmVST{fTRtX7@HBA%F94o$|a}8~K*z z_fCjKuUHK_77T70*WI?xmp1EkD6SL~g= zqOidtk;~>DL++d$dvN=l#@r0tWtUc#+@C3cIO2Zu6IhSnf||nXW)SZZ@*AGFB>P}7 zqh`fgfH&~ELMd>Re9iUY{JR@Dor}muPS+b+exsxfSxDHuaKvX)P(5bXKI=b4@tH51 zzMl0RIOehc7BrKAmc(yz4q4du$2nh zqW3Ti(N2F)*t=2&i6KK*o41Jyya5Vb_Q7RUWa$uUR?{{&+z21_*!Z2`bKyn9qX@1UA4s&vNKdo#G`+<~?fwIA}X@psct}u3&Z>>|z*b zQj~6C&zJCA3`BD}KIRg3XF~B~sBl8_SpV`C{}6s^+eu4B{vPt+yBQLc(7k7Lw3R-SHmW)ZQRp#A1%E7#3&MVm_!vgHkA!x>N8&7Rx-8|5gnuph1U3l0sDWs`aYDAa3bQg;-maHVY{iV1 zoDSC z+A-sENzM2E{sIP*T-~4eegOOy9)}J$OcH@^?u!eHtaSz3^@A}39SpZ2S4ozD10nffeGN@$ouuu#fm*vjMP=FapO^6Qxj<^%wwJ$#=oQaYgIwV^-$a3x z3GRu3a^nLpn`-wmxE!Ovn=YLKdnk&4;SV|m4wpc_v1?y@Ilc0($mr1tg)V-{P_H?h zKBsM;J6nUfX~5qe$f>SDLY?)II5n|EbBlIWMcD6m%I3*)p;U*bK*c-WC+s~J++6_( zXRN8#=B^C~hz0=(;`l$P5RZPmRK*oT;4!`TB~5;Dsx}@;YJ33*xGauqD!{G}L%E;8 z0e0pSo&|^S3(f=ATzsbwzWE&fdJa%q2(K@UNpBRw&hQQFe384srG&%9J>XPtoEck2 z?YCDQkP9YW0kZ-Nsa&=(9?}L~n+}cabvbxMkH#*0V=u=p>Ll29dM5RAfW-g9>+RbRU6C`|7>Myc+TghM zdcH=v^D$2d%J*@U6JXjWNkA1Y46*?aXARJ<|0`qEGJU+}4C*r-o-QsK8l6zSY<1t~ zI^gIs2*r!^BfF}6A=6` z`JoEe>jEagR2!xRqdiyP^n-<$-ibu-f6@WK_Jxfd)9C>mXPlZq z{Hg{2&kgDSa?4_9xfFOM?{Q68*!4XS!}leS(EKVp(Q))%Z`HH@FT=k+`oMFK0oug| z>oQ0Or`|-lC@a3V7R@XyuF{)v-XLUm>#v{UBGkqA3+z_$l_Z-%gYz-q)3xAG2VXYb zAAbklF*CBMDe!PWD2UScWt1jGM$OW%?WXc(ZoDJN*#5^_-dc)cN#j>LyQAF?2%oI2 z%~OGxl_#ya`EFc%hB#SKJMKO2K@egW#VB1XQ1Cgf#47 zfTiDWu1d3DkS-bg_?k*)7DZp|UG?kQrIdrqLlroJ3Lou-@u*v7(#;yi&$es}`8~#G z?^`Y{H!~1s*0R-NXkzNQm#ZnD;s%@84XHsQDm%mk3-A9@k!90U{eCjOos_yqIOdL=TSkjsamnEkL*5*jKpma2`e?@0wu5-t*sygxzN`EigN-;)mPSAB;(Jyu zkYe2_)2{VaS@JoFn!(IDjhheeG*1Y?xjvV7=bb;RkuZCb^R8T@8g;Qwo!1{%v zv@y@;o4uh+=q|s%`Sy74P0M6mTFc=Eyv^m#xo2iRS#GR+(y>r})FM;4G+$=_u42}y z4V)v%fZPcm5ee#&95~~psJ_Rb0fG8#|t>(#DN zOun4(;+Qe*_trfp=lj~sA*N@TxrlH zdn*b@4!6Tn+NvK~E<~}%GMGp06-dn=kH4?^{Xb|lKt6(T zynjOO7GOOeSoCT+;rLi^cwV?$H-oQGE8vt<0E*ejlZ0LvpEK{w??(@q$7c=I^Qu;( z`%c7{@T-pV8Wg)H$V70LD;vhwH5sK$lz=m=`7N9K>-a`DUIK9at!b>v6%YNkx0?;# zWm}pr{jzw6J~hom>VXK3_hhXjuyehk<5{>iH$ewKuXDL)0{vAE%3&gn(K*~Ccy{8A z^564gbIdqx9~YE87=2eacc%~O+BTRFa*u-MwQT4REdopTdfmCtx7}uydEg@K?O7pL zRUqQt{&EuOUU7yzr6|U>+dVZS?M)VL-ux*ObM{keJ}K0gv|EdU!(~yVH2A1RdXL4a zdm32$Z4g^Y(ncC~q7HG2&aW2J zNE|x#XPMVr{s6fuCf^Sb%{4-UrZqD;n3lYLD}mZJ$*v4s1;xeLq<_Uc#(+auVIePP zFEzttVX3-vzNea_K{PSzoM%1Sx9FV1)R{dAu-^`nrE^Ap>eYt#KQcIZ@=f@-#6Vor zPKvJd!kJ2URH%I7J393Unj%yb>W?=_vP5!5%Am!bmsx2{yXPX~(2`E8^pcWgR@!wo zYH`YruCd1Av;J*r{7>(>KkwtrN6~(QJ~?#)ZHO)N63OPDH;?%`JV!8C@LknH9Xu9K zmb%oL64IY<2-p(l&ELyOD=px<&&NH=4!hL6#nAb7JzD#5IwveoWBp5=@j$%8`rK;+ z1n($$0b@*qKFz&6t!eRWj;DPmiR323uZp*^?VM_!jnFF|$ng!6pJN}@ zH|wo*IEPX%XQWyVeU?TVY9?DuMS(6>8{r~eULvTOaj!Fqm_b&$3T+Du(Bi3sR2 zmBpl46dR|Ec?qH;jct{{Wqugt-iD@jhvlr&Cr-19E+YNw8Qw@+$B*)ky^jjyJ99MO z3Tf3pdD+PI>UXn>f)d*)%{n4Th=M>uBv7ck3wB)1l8W?yHBN4R%ozTuG=KShwQ(c6 zSkjxEt1-N_*=|dKar1$Z+{x2X#c;jcq>!_pqzX~h!npZnnc~n+g<=t~*_Muj)8T1X z_&Qx<;EaO|Dh^z}_2b5n(q-y|rzY11rKG^45(|)?zELb1i0%vA5meo;R`j$->x!~=cMI;ADKxh>_UbEtNXgPNBG#PNr z=1>}};&Icql>BDwyDm7j2a6$SAei%%nqa=>d1F#e3dqWO?|;k6u3z6kW+(RlfXsq1 z+0}d>JON=7ofFfP?c;0SU}$Ra0OXY(a4bd}L_ut4N|TvrCVf!^fx^jSwOS2XA$?JlWWWHFLr z&l;&P7kZ|~25JyTnyIVSOImVW9u64MT%}Mf z3U6k+YBgZer^QB0C}gtW^0`I?LRkvI>vUR+}vDFZxDd1JgJ=sQC&??BD7zKJY^v|R3> z^4O}et<4DfXU2`QS(+`6%Es+wl-h;h?R9kHG;N+VXM@w`X>BA8dXcHu%0(+rIDdJ^ z;N!YqkK}@Rl~5nkpMSa|cHWYKta>$WSU3FQgzI~1(o4FeVZT8dYCn6bw*Mii7NY8N za-U=52{eJ~f}XB>O6?JcGsZeQ?A<1AnfftEcjq4C@FF~BU7Y9!-S=KsBu}_nu)^ga zsNNdr4@L_6&6m&f9uCTdk~z4;8B?FLyF_f6+2)rz_Usiz`n2PI*PARGr)p3~V#lev zRBy~^E+9=$cjo!sb>DEkeTU9DMX?d{v2M8wf6K?cdAA$Za|YUX<_1)apZY#JJ{dBy zJ+odRKWi0m%c>^lz;?*tQKps}Z*i(0XW#4IGqymcRW{=eHfSQwAgJ z1dk*YF}{!@+)dwXyCbG*IfLv6VXa(XV3@nZWOA}04}qITgWc7%aCbnlkD`h*#YPCl zp~gCDD1(e3J{^=nosELw`$*!0Ai%=|#7JHUL>NF#46PT9A2KDH;+{WjZ+W4thXAE1&GWt5i<0Zt&1Nz(@z%fp?8 zjna>JQd_(ybFGuCWyErx(@c4&~9j{Lt^Wof_ z2b#gHPO>C+8PE<)2`C2tC);bUb|rI4E0Hg3ZpA;0)b3Xy?^!xo=4VVCcpkQkMk0pt z8K5RVb#~}dy<$7zD1%G*T)xz%QcSySd=dPuVY{~L(wY}4eoa;RV&DUoVXdlqc)g~A zRcOi=m7KO-xzX`3doQ(^LI}ya7jh}?kaq;7tuPD-I>i!QI%Td9lg?Di5U3peRp81# zLzy)&rQ%RHuGI0Tf8JnOF(p>bMEpmxU0}!Z_r?n+GEM9Mnx2YwWZ$r3cvczXZ8USb zUMDKw$~d>3o~X&0;lyBth>GPjT%qj#aCy}DoT+wvK8<)upnI>7&9n9JaC0+SzQn(Q zmWQzggL3*AO4j010ZB~}|36uXoiqEFznqYZDXxAY*gp{YcAh)L*cwvf^1)9_;%Ciq zqgO^`h|Rk7g_rvLh;MzYo&pZ)uZlXo6%hK%lT?{Hv}#h>n_c1Y<$>b4{oTs;M-M3J z{{*z)lrv->0}7)u%$C2PFOfpZ!e8oy-K5#1?X~H51v|Da(O2Rd9ue-ol+-742BrK5 z)p)SqGtBbelC{2yG_%^U$MVHGmXpyK1a3lWBAQ7b3Us?R*`sEjot{WUV9X`OM|bR9x*p<}=|7VtD``zeU~WQ0}bW%OMtFqL^RuaH!T#-(!Tu$@% zFwHQIGen%4Un-h(uUqRPOnDD`uH431IUD(Vt{n2d%}el0iLksCW*B9R+jS0pdwO;& zF@-cWd-;g3{!ivWuB#<{yJo7k3Wf>jB@lbZkDda&J_)WfX4%Gaciy=LizWb~Qb;bo z0s<(rx^GDaK99__2z?5+LFcm=3HmN#R-V_=$0noaaQI7Lg&$dNpTwnB!C#RdTUKRd z&I8o(nizlxfQLjpD^Jp8Xh1f8z(C^MX_qR0k<)i-iX_PS&0&w}+aDNSXg&`AELHh; zI$)c>+)2tvCeSPzQ?gAckC zXvF~Ls+X$Qo!N8>b2J$dS<%Z01y<Dv7vel+#d zimHKD1ju0x40hyl0fDsc)a>xel`Gu8xK|E=COz%x*R0T=1TWI_AH=`4sR&tqw>Whn z_9D=rf*Sl2puyivb`0iHYi%$fXaM>HcP4pGJ8KSbmLf!K12VIX;gyJ^$at;y8caaS zHurZ*Q*mc1(kHV**{1$-AAr@h){!Rj?-W@-ib3>hL6Hcavj~@~MRgDUQMdDmY!RcR zFha%j)YdzEms$@Y39rB1icQe1$VE|(5l;Md?yXCyqzBQ`hU~xwXh%*nk6uFZJ?*&J z|EDVp$j_=D{`f_p83>Yixu8Rr%!IxELv9f#m21vLwE=PT9X&eqvlM* zQbsn?T{@<_lOmsCTwI1|;z{xlC2CzeEpU9U^5+h`4}D6v(?;b%#u$|J0s-_T#P6NP z!u6M+h?9*fM{=sy%C8?OzP~q7>D#Do^cc=$fFTnP@&8Qe3I6t zL#I%?iEHZrS!u~RGb`yhD;8xiMST*xAgX@ludQj8Z4?j1%&I^tmqMKKC*})RcUIB_ z5)u`_v-`k~7NfQzLgoU^UMA2F`zZs~4EuTWJthT4n8i`b#om1*p3;*+W)*oQj~^U>-O(poyc{{+t8JiiiHotk%%!#P ziKvzYxZ{~0WurXg9H~=doo_+;{yXi!$M0s5N*GYdwZa%_M@~!`r(8^C!PAsqe$%Mr zl*aAIQQm6pEDHPoGfcbnvNYD`OxNkm8J%`J2Oy^=2kC{dgUZ9Ha7DMC3#pvwCI-T8 z0yUFR>@Yq$F1sDw5G||Bm@k&~cpf(Q)GEOsa+`ZFXw`t5@Lso_aKlAC7eynKKyE)e zH}dJfoTqJl%{O0l;}#p*2iOG64L%ZGJ6CuORMJa!s3Ow%&3lrJ_p3xnT9pFN z`%M#QfA55PmXV-mX&XNmbY^pqu5^9$1e_?VSVK(B%S?<(CwehtGRN-+= zT1K@ppLL!xk2?O-BA@SkdObb$X=R!a!o9IPTF(de7-D6tsTDaR?&I?O*+Z%vLNFLI zb%aA&<%7rbuX2Av_m+K`PA=cqm)d>*_LR$VR<1p)14MUS@eELeOQF9SU&Qj&B!85L8e#nqMh9XGR2SCX!w!+Hh32beyW(tfXO zin3K@d`nQ12{q%#Ob7ObV_l9z#N*WZV3O?;P;hHS&J?zGAXkhiOD?X*_UwYe!u8z4 zo@f5kNv_WkI)LV5{FU+-#Br~Ax_xyPs?4op)~~9>l$G+$y;qQ5Y$xye>naE-dB6ui zNa?tFoV?W_40i$Fo_z2JdpNM7t`(0l-=TlmgEGXnAwcZD3m|P7R!Ma~9vWP6_o~L2 zml5eIkB^_B=QZy+Jas^rkv==0RJIz_5Auyb4<)bhXEsi!48k~b`{7?JLjLI?$2P;+ z=MH`^y41kjSgwo!wIX|jAw?MVj-N#Jx2wWn=N-wusUxRi+6$=YQs{#05te{uNcD-LfonMDmANa_{Q*P{Qae&GrL@&tc45j{J#w zvW_s}?q6J44ICc1bj0Gh%-mPeA5RRtve{nWB{%Y0kOftOKISZUe4?#W z8H9z5V?NiLhUBNrQp%A`z}{$6Pmw_j2nosPePfoq)4b{W@jAF^`1R&BRdv9%;oUuA z9Us)W&*LTHrS)R(v#TIJ;fd5c6L^c&;e?MZo>F_IGK4w0XXL+CsyYv zgF7lUYM~}B83C#$15@GN3RXU2ANA_+oD~^+wCL+zk$jjvb~+K}1EvwS;K{^=yT&(;^z>i*v}rhmMSXA`ydP06VXEZ=%3 zZiiAxG_bBgg(ZYOcHHw#r<_)?d0HKAGf~;`7fuT{!XOC znoYWa$btkt!G&`Rm4_{ma#8MwgvDI!7T$!v!|jg^o4jDz#O3Rzvx6-L+Fq&Mm(UB0 z@b)sOR??_k28DqPW^ekMt9&eH)5>jwQx#})>{3SUWIu2m)(c7)aeb~yqzbGfuYRD{ zpI)#H1MYE)Qh}VoKldbp{!?{f7B4K>fUKA{a^_o|l=3Y%nN4ip`=c>}$Qk+WlKK(H z)eRs}zvBha<^scc>Zc{zO1)*&0Rpca{uo&>zf~1VcasOV?*lmfMY=p7S=XqXK|T3B z?io(vN8e!7r83XNo)+VL^KL7wM-svjg;_Y_Q>TK!;2&bDi}|FXqa2Vt0tFH+d?tEM zg#PyCHB1C#g|JVU-^f66h;D)IWgQp#vobbzu&cq+;DW)DCD;4FN9?}K=fVL;i&p$4 zlrnrK+e1kwca-5o-vPd&v_O{JzG8z(f8Qv(Q;5oHfz(jHqku;loDnzhf?D!{Q%-ZA zb*aDG(ieYv`^^rG9|Zg_RUYP>p)c7LxRuDUEb;cl`&0Do_vJ4%1I0SzM)+{VTp%>L zQk<>34KH|u(ufYwh15{x8s{RD92+k(TKrM^c32()YCIW3!2_`vFl+Jo1|hJ1|V9(H!3j0Qmmp3Jv( zGtXq8dWgTgM_d@7R7og0(Kz05mmVttacJm&TUjk`9mb*E-H%x|G&&#)257+KfWFov znGX%vLwjxRHrhYo6{##c3IJh<-4aFpbxtns(_^F}6ios>_8ks?jBD%3;YbxT!gCRN zMC4Y7; zug@rsB%yfdbBF}e)O-|jsgCmNrnq|k%%*)Rk@@=@8YKu$A)%G4@h4GLhBF_zyJvD+ zYh(oM45VPMF6z2RKOb7JhJJ%gL>ZGpQ#~96mSZ)`eW{0s-ltB=fH1)hT1P0Dw|3B{D zJF4j`{2z~{LR&#>s~{qx6+syyqEJDW)B(y6WKWrb0aW%#KxrKyq99v_%3d)6f)GNm z8c?Q!j1ZG36c9p^VjuxRlJ5-(mG<*F=X1{Q&)@s>oXX0*_qoq_J+J4v_f36p*fXmE zIjUT$yGsDxOflj~?Pu)#XYG^UsYaY$8NHUEt^gdYfS39pbS6*Mqu0io$SmDrtYM{o zsm+kKFZLWldK7)}t8j+V;NJNPcNn!kJZpT9uKn=8eaV}DF?rh(|MS9mZ}8gA(wiwOKCinyHr!rB+X2da{7U^uwO7QsWUkW=pg`af(DJ2K1!{0g&x5W-AOV#_ z9MTWR$u7*;3l#{Udz&G(K}S&Qt%=?I_yAxx1$LZ79g$ZAFLLQwSOy&Ht9jO-kKu*( zQFPbUd$%3!L{uIm|DU6tj$32aUymTyw*}n$mdA(gs$a71Nq?+^8$F(9(FB1cn)EmY zl3xlrnWY!2*p}1Xw%0DU_*p7kZ3cOnmgaTb#qGGwPy^J%J+A>F~aV z8Q=G$MnN(ZSsBV=c^{FxWv6f!H*s$w=C#Jg|y-QIZIW zxwv*oXLj-0m1n&YztC^$PzWt$lcwNO@yS7G(niSkoq=>4rlOEE@3UToEku8D_t@=# zAR9T2+g9QR52~DV1=@QLA5#fjfMXtb{U~+S*vwwe|4o*fdlL3uaDB2`5*KI$JK!$* z&8b>;|0xo%Gs|P)Dxrwr9~z|lDvtzEQD^~iPDAuWogsykf)@~h0G1beFc-*Yp|`Nu z?>Ri4*K~JKOJmailOiNy^}GQfygg!cmGl03vZhV~2~pk9aC`9jg~0=5!SYd{A{ieG z8obYm1GSW5Hw!N$(k!H9wbtEpl`yu-wgKXTS?<~2r~lg>=FVrNC# zLBSDS;pMQ0Tlau-!g&;Efi1Hz!XUnO>K)V^$VokLvn_MCNKX1|a`Nr7wlPY7HZIjb zN$Nj{(;1ASS6U3hyLt^GERR);9vogiuwZy~QK(5;IfX4{?%DUExeJ;;zDOfsL|6SB zeZA<7lGCZdwwXb-o1lKHP>rpszm;)d(v zp+n4@qQ~eLJu%$B&&oXoT{$or)&|cG!HKs%kV%$gCEL5DZc7n``B}9_tMehL&D@DW zaIdY_M1%ioIyo2<%>m)PVqv>EP^;;f*ERjh2Rhs>({(IY_3AVUdGq$jS_3;o=g+m? zqong+i(FG%e(Wgi@z+6AELjFHOM3DMTYrEYihA!_I%&F!c`#vsFi!$nmA5_OE;`7& zV?X*Vsg?Saav~th!>QWiM^2<2x$bT==k8!lXH(YsM$Hp7n=kUquRX^KC3p4HXO;K} zZ~)cgwQSiRsk`Q;`gLn=Sp-(pfN(bgzp&Ko?B;dkduMyZ;TD}|@od&7aGUuQ;oD8W zAe&>qH~z69Xj@f%&3PI18lD|>)JGC<(CG?-carCO-z}XAiE$(iwlZ7I!h;>%lJTmi z)HK>%s{8&lgv|abU@O33=+^mbTvWpiQEL9cHT6OhEGZ_dhFST3A0pHa=4SUr_$)5ehj@UF4eKW@Uadog= z${;QvAM_?yiZGSu(|?u0F0}mc&(Y&{eQ;^pWoA~7cOYDUJjT6#>D}ZTXy><(?NWj6 zw#;AtWyqEO;&#u_RRgE&4TogHIkl;9Tl01Ic&DQD#Fl4qzU-@m4I z?yWwZqg6SCvm}M)q632Sz2)$FLnO3?8`fLk{bmJMq+*_ncdOF!*~zLdDufpi&XV@X zW2N{-^$%i@D~Vfwlqie*x!UZW9Kd1Evu;&=v72Y*q)#nWLNg+)>=>$!ut8qNZ- znT2`M*|aT%)=^HR*c+Z2j9IweP^A~~PIcs1P$eZPrlSjaxM`2wJsQ|s%>j8et)!`s zgDS5&EA=jD4SC$mPaG|+MTQ30hWz&Gf{3GqZ2$IOuZ$hD|96WtQW{)8H+tNcnbG4C zXsAD)7=7hx$0Qz_1SGBy2)7MY`me;p3;RZ{)@Yd0DBe>ZVC}%!~@E z>P;WC&}N`z_o}Mv`dT*Q;XO6Yigpqo+*d>LG!io>`VJ_}O5V>wiGw+8DkW(qSQ}12DDYZzW);HP@1;xh)Uvf+o`~0uX zu-p{6GNHr7lH|FaME@UV3L>+@zdzs6L9-L&Md<}t#RAtKHLXu$f5mC4rI0Y+S6bfH zve_r^ZXEP;eG7MJpX4j8B;U&Id%@w_zQ;ADKa83q-~tBsv4@yn9^Bl(&pP%h&s#3t z+Zo#T=W)Rc&_QaXD@u+2k&R6~^@E`=miWHmHcf?*=x8*kU`t-CCQ`^F70!rO>yFI| z*KmKJD=t%pP0xpP*W+iZ?>Tim96sF*2&ZzRU%XsIby|pI=Gyx=$FfCt5B?du=k{}_ zpk1h?Gd;H8_)1SC;k^Xp*|zc}%bpx?5`sORcidD?z}8k2%7rb(mEP7v`zvxMo4RQI zgUreiIPvZLBTV_(GwDk|bkuZupx&3tgFBLNvaP?-q9^_ib-Fq`WKlHp`0fK$!rwOn zh(*3cKA*th?B~#zpD`>*gSGVmd2c6ciSvG>$%Dbv_Cwk~y++-KY_c}9{Ay~N2g z{l%*JDw3hy9{%>J9mM0Z(?O3qSV2~i0j)akINTvDxv1+xz|zin&Oumv>A=z1NWJn| zY@OdiY*%jC!l$q~UJ#ry=WJnUn?CAlG*ii$oukxsob6ZY>P1x9hCs5TFAqbpF|?T) z{W`AGPzeIp!2EJKasc*3DWz(J{)(kSJU9{Fc$q|VX{Zo$+)s$mBt`^ksd0B1bsv2g z&4uB5pV!JgoE+GHbE)@4{Rc~>Jooq#aZve6eD!?n${}1VT9@9((#yC9cDH7i5(;Sv9*@qE|0hC|k{d#YWhMyIq!# z%~wOjD{&$H$0Jc%2x5^{%w9K@Qq9=73!^6O%rVN7*{1Yz%JI!A0|B>wT{5SnGJLP$ z4Ru%z1jFaGBUuiZ@Pw(?mWNP5sY(IJ+(vIsdMKfP-mh;UEH8^e^VxBB;8@pGbIDTN z*k8<^&f{6dC)M?FwU*cop7fTFM;WoanL+=?q23QGJy6AAVsh(LV!j%AWZt&KjdS&N|=q zQp}^pQj=g&4f+z#jRk5Et7Mj@WM%_f+px3 z=aw(cw+6T+N(*XeMWn(Vni)F3sc4)wK#=#2wF_#7WS&4nJ7^Sj84?|Rk9^nxGFdXi`-p}Jn22SE-o$+Yv(tV)ZmZT)Z1DFl{X zno@v$=RJ=~)4Vws*c2H*Z87wRd-I(2vWZfPyMox`V!S%5g3yX~jHI~H+Bet`zCVSQ z0#gS6E%;6i4_4?x)E^FN`{N>kH%Nq1vcjk3gBh+%=-2^nnG&A02mLdFwKs&i8*MtM z{Jm#@FYz^0U(GD-h?-FPKGje%Hivx4?Q`eC>3-?G0rfN6zuTwbj@0a7Axpdp-Ds^F z0)17s6C%#6jyjl?@8M8JhCI7JR7;3i7#(zp^y9QupY@2pJT);)FXfy!>vDifdtvSe zX?|%Y=Fua{M!(&lj)l791cmG~OY*&a5So1!z}507*u|lJl*n2ps*#*Sx7w!xNAfx%b`uSK z@T`4k?LlRU1aovxHyQ&zLA1l?dS{lg@fX9r8OZr|YVJWdoDtVJ?Iz;hb?bhwZJf7I zinGh?Qw(P&uu;nTVZNsi@fU%l(=K>Kz}@m=#M*44UR)-#R|c#4f@|`dmhuR2WTPys zPeR|JW$EW>xLyC)Hfn4&o51>bqnY?%q~KLV$>=M{+r31%AJM}st=Q>+KD=Uckn-h) zWo%Zdu7_;QoA$)%TWLpJS2e9{1W5C}7YP>DfD!+!rvKtK`t<7hD_Un)n)MdM6G~8D z+!!$O=%9aytA1U$A<>8xF?ZSMZPSWHWSDz2>E*PcegBFKHN2XFEbyoDH!6ha_L8>~ z9|WdddJdt}P%e$Ds>nX-_VbDNVB}inNw7nshP8*^Fsh-GMahh{>;E zVDS;Zpk)V3#S+9(v;ee4rjl8125LNO=B=Ney-q|VN*;L@8%N^rQ>;i4=w44e64A|i z40O+xR-ned%ANy^Ynuv%KI4(+D@&ATp`x8qA2Bke2HpP{CHrHSU96 zXB1z3h$PEz$_rl33}i^Yb&R;NXH^8J5MUc1s_O!<-`4<44M_7VbF}h?^$d--&?Aa4 z2IV?Z5Xn8nRzKpUbyM*7<<`m~z#NG4?X$TD$t`8f&V|HzQ}aN3fz2;zuu_nz-K_kE zaP|weyhvR6sD_xNlKzYPtA_v70jMIx;9#pCH4T`4NEfTnDJzcB)Z0al@sA0z=$}to zI6Tw`#r7mE7!L6RZ6R&np=eNa%{tLH+v~F7A8*Llal+oT!=`UNd|$06e!`6DGAmyP z^DBfGjVg#qs{eb$qwas_vJK)FX6jrn`v>#<(3&*bNec5t@3s)$Z!swr{eLAb92)9? z8eog)%L>%!YQ|utwDBfXP%ye@-O$VSy7cA)O~)`JCGzYt#S1q4)ou4jT6MEB@XZ`@ zpswVhhqK?BuI_q@yC5>pBmblJW&c(Cqq=4^S*lBQB$DS#o!F>==<^S`yMS>X+;HhU zn%0A+fKL;7f$p1wDcI2Il+iz0y}8%tUz)43o8p&n_NIje+Y*c5bLpvNsaZ!b_|`+j z(=t?gXcbAb?JeqO$<_To5+m@rhY~Wh{Sj^kbqh;S$|&*Va0@P!Aa67pjVF>qe@Urw z#CaJcFKD<($0jf64vhz9hP;;b;m&8P=?2=R`e+Co7KVE zPB>%ysDr?&NI4|+^zdnF2(4vhlr-P`sF;Ue{+W6n4W9@f?xV*z*PtNh&Y&%N(bRQ9 z&6EwEt*M31@~%dRMOJi4-Aq%`FhllrvHmFWqU_*|1V(NWu4)tGACxs)np1tYmEk(& z8-mrwo>H0q(+hTILD{7c1+HOSKsle>ko^owd=x}k=d%nnKYZXY>hhj*!k3o)2c$K@ zinel^p^77jkw$xrlSp)`no&KR0$j`jp{7AwH(ncay>y+YUeGg3-Gxv@7Wh#6Hii#l z9Qj@b)%IqPQYE6LKQVfp5C=LZ$u21QCeA|t!mWO}-DdqQvYh8o$}nQ2%pT()8qHaa zvOu1yaf7ki8?;@f>f{4rFpCX}pIv#S`g({K`ltEj{Q7hFm@QP(YFWbaHIoi10pJj9 zmAwh1RGsMVPmEhP<}kn^m^%4pj_;v|N&)Y7L~OY*d$x+CDjdw(=~yUhbWq!+urIiL z`aiyR_)GE0gP9yhp0~m4p;WRLHff>Cc@UDlhY+DpWSvb@>M<80dZN7!3$BMnVywhX z&C+B_9`k>?5UJ=)v)r&cO2Vs|oH`07VqwT6!rNk~B;+|Zbb?5?Kv2z+FQ`u;Tl}de zGfQ`USL2zyq#f2x&F>SjBT8^YnqU zLyG78Zvz8&pM8mfx!}ACooI%lg=}z*>lxYy42hgcx|IGE)DLrtqg6A<9SiCvj;jr& zJ9<>wNhBP*AJkU|yP;&(U;R)4cj1-}(>vUojh?;9fMb1LdhR#tkK4wng%%Dq(pyoZ zK|;%cn8y4E6ZH&t%du|;d=bunRYU4I^`p*`Mp4zcV#%`P|9E< z-B1f0u6F-PINJ87{~+2~zeisKy!+mV%IDSNkvsjm{DJbRPYwPe-FD+OdWirB!D0eGCXUFe$7o|45FS5B^J_**2ypgC?c+a zw4ICqaDvzb07Mu797ig=JhpPc+g!nD680EV;o3aSzsBw0-Z#yEaeBkuSws2*)LS<4 z_%GYKoX{)2_%HHZR~ycVJu5=NjM0h@zRHkS1Dz$Mk#>_24LulrO_%sz^Dm|;rC>%R z@U8oarx(;%u)*Lh!lAM^{MQGbFI(pW8-5GO_KHrLP<0_6EXO=*2lrJ5HGC)b>?4Y6 zf;L(=rdD+@d0`VFWz3@4i(5B+E?p_W72m2tR5?nm^(c>s7mn6;*J-JiZ5TqEsne~^ zC+HQDo(&N32(Ize7hTf3=!tM%>Qdm}UQKrgt@Nr9L!91G(=4(gET7>7kK_#&pW=HJ5Z0kU(>8HC&CDJ59WG{ z8Ax&)nMqXX{v4qG&+x7LSXzYEGBadIRYa(0RgcyM;@9ilVsU1b<7Thu*O}=p^lL+I zm{?SQNepZdqquw)SY2q5J1IUm!tYa2QFz1F? zG2t*WZTMlR8!BdUHi#f)uns^P*d)8fG3L1bT_^3;9guN$$vYq`IZ)gKnjrfYIbSDN z@IDG|Byb*lRZIm~79r|TeomUwQ4Q8Uf??@-q%@V@RY?LhSHbhl^FPorBGf8GbGbk| zqBe@D(tkPKxq2-29z)+{Gtk79SJ$?7**09-(6$i;JA;-6>s#|6$eP;t$v*^x9>>0D zF`=|xji0g`RxH-GODk^nqWs9Y52f^%k3@#EFAmZrg=0H<{wFr)#r-!n$COOgEgFu; zrZ99lWX;b}(Z^!F)r|(9d<$tn!OYM`>t--l&G(e79P3-Sp)M!*Vq zL%m%Hk_93lP$!9@(poml>7v5V% z`cNF~<0x}jhhz|-l!(JV96)URBd~$dNjHQe;REa!tQ_`Y7Q!gI7itDB66M^0D!!{9 zk;NNUj2Z|r0%R-p-;~$v%_6e)fPO@v#2U94}r6`##J__C42qoRh+6TJGFX zw2c5)A%?75UW{MAyh`D0%YV7fX8Rbu>72`F~T%`1CmXz3h#fYpOW}iqq z+W#BP#TWh)&5v3)xO7itQ?w{`jPV=ya2+6+vz=jh$lFRNKb`jSt5FJwjk2`70r^IB zbmUYCRk}pmJ*_?p8^--K9u$wZy5t&3v8CMw{)@@O&GS-?k+pF|?DWEUDf{k@>_S3ZFH!F{qiT5u z;yH`ro=01y7n>~z610dI1MX?Q9;~`}LVcA+`E~AwJe@@e%^EE&a$ zMn3mplRL9@3gHfmxJ>Krp>zlTGP5q1R|qF0Fa5++$qSKvpAQtzo9E!M5pvD~jp$^B z_dk&p%Q~e18B`Ph&6iQKynYfhEmuO>X@J|Kya^6eEM|;6$1yvp>;cVFX-8tQUzQW; zXiNs~n+1KOW<`XWXfn19|4!~PIpvMX9-QmF6Cuq{!JlVx_*<^T@v^10j8IEqCLK)n zY>F1H%1Ihx3+}5yaSQ5S;}c4*YHbgEwmg@GRyw(w;g|wsi(#A%xuRHp?i=iy=YS6e0QrC8&gG+5Kg7-oXb}W zIk?V3!iP^DF!YG)on=l1DZGlS_B#OSE`};*(j-bwLf_)P-6nRya>s8ha;I7k(RaJ# z1!raMQT1uUD%Wc zu56J#8(o4jIsdZdUZyS}*8EmUZ>g7#gcgpAgD#8S6>?j#`m8RU(K5Eoolj0zYVd%D zdK2@4(j{s!nzvym8bnYN;`rTv!Sr1DIxu~7)JdaMw^W~V99RrcRIkBfI&C&5) ztknJ|cKz)iOVT%Jh@S|!4-i{!#%gkT*9%Df=6w6&o@VIy3ve&lR6(Sn>ctt17zT5w)-ZN_G^d71kUE2PTN#xOI~YHpkniBmz5*vjbmGJC z^Y}2ofq0XSLJqJK#Y(4dAfX3{eZCy?fCs8lI$BrE2D**+S2(R&$;sRNDsJaa zpR+pWpo7KYr|$N-z``Keu>{^hqQ80a1<8oYpT*9jrp~0l{%0DdAYo&ZQ79y*gJ8i- zi~yWuKEt)Jhu`&U+7T-}`Utv$fR1?UCC~pe1w(cYVu=EGX8%{3!qjFRO)-5B6Ge&Q zLz@9VHpaA5+y;?3g{E||^80_WOEYlKy#UD09--qLoqwW;A$E4@>^j$2@^UNjOM^Te?$!`3@)AWI>)^ZjI{4lb`F<>Xe4&orlQeyA8e#YY zM7c12xA%&&m=G#=)v%!@tl*PrM=#zjr%4$Vv2o*RF&LJR*w7^Bop z-E(dU7EsatLNC@nfr^Zy8#xk2IK~m)+(R`%A4pAc+!wu7X_}5{nb;AXR-f?~<3YY? zDTPY9&T;g_pr1%@6K=@1Ov3s+u*SZLkiivTuMT%5T3Cw9NqLW$oFQ-qrM?N&3*wgUm6zO#$TvV=Xt6@{Qzf-08A`r z&_pS5^VJ7H+jE_O z8IcCrJ@4YRmt5=B7@ia^_CyK1!ceLuYP=v!{@A$=;%7R#c@!$rze7*{VWDRyOa4`N z>LdxpIR$ z+t+P**+oB#|52fXAHrfplZF|sRtuRBV+V9!H~n3jOog(Tr+H$y^1GtMFfT~2nT0Qd z=#szKcv0NskNy@77+pu=#L;}5K=r2dD}qNyw+!TB%j(Yvqs%}(8a_<5*JbVM3hq@z zC+jlkuB|*mn92R(XmxY|GN=n3Z8}V1wLZg0)#`MJ#CGoOSG%^bcqxOBMYv!$Lavyj z8Qt{3G?^M@afPS>KMQg4!&1Z>&%)Vg?BD@x1S1UZZ6&>Oj2nWL`qIqR;}(=HYwlzNkcBbI zZ*WwK?b?h&vgcMxdGD54DI7gb4DWH+#RtV^GQ=k%%D%+P^DcxBoG9qosvc3L0HUpm zDfm!4GYv6TB&56|wAwkpHx~;rAJg2O;_Yev=8)LsQ#E&Vrc9?f+vk~V`SPu&@>%^J z+vFz^K62mfJF2#tWn~M36T+D^g3-jhiGP2P{Je|_YDxNt7P7~kv?EPkc|Q{iM#O!I zr4OjV+TKK`pi^rSxJqc4_?HBJkrg{(MJh7e|tiCzh||MI8D;lh!A=zz((xx=*~8~ z!=Ddd{0dOX0=DxeAkz2;6uy0XLUXjGEEK=&50=Z@?Ut<+aNo=2C*s2qv=8x-PK;k( z)UbSIkCJHMP-P7H-}27%dS(`2Y-99P^4S4w2y4;o!r^-_oYDoJw&;nD#Ito`cDPK9 z#0G<^+&ieXv0Bh<_(2lLp7MF-4~zFC1)YYPdby+pdpqw(Uq@j2JU_35sdZ4OPZ zEE9vkd~|>E=hz1jRHa@Acx9WY?IVnZ@u`l4d69kwCLa zSyDe;ssg}|*yJ@{m+!D|Q;Twv2wx-OF#!r-O3t0u&6CuDPl|j$5m!7=$b71LyrlF>z;T5R={uveU3u=JM(O zjTZgaWQf0y=)BVTg&0vkQs(925X2IVkB` zhG0Gb-Tm-Lfe=oeth@G->qpRb zdB;TK$efF?XkM=`#r{bz_Lsa+%~2L}G)El!3@Os7^4|TvE~E6APhZ7ln*sHnSYdR0 z67#8@JOc=176_0aLdn#>KbaR9=`KjYJHzAmnLFR~a*Lmv#CA~OXp2iVbhH}!B2uGD z=dx&Q_3|q6+-Fn5(HS^)ra4iq3Y2+%5Nd7lluV+H_|m5Hgv*b5jm!OWJFZ)wH)Aa} zf6XA&n?qtd4EWPC?WAD@aOKHi&rFit1l-y_Dj-)|;6`p+_VxH9EjyYOs8MKoBVi5= zAWu?|LxY?MM0mcC`)(s-JcD%R5zS@aM20y1(NM@1s{lxMkyyfLHE50Lt5J%Lb4!2Z z7f>KXgPCt;q$2q#87WwgK5@l*x0Q_YF5O+TEq@m64>*_{&i(?tgJwb7$Fk2+#P_6A zc6n-|#b&;KBK^BF*F|wfaoC;n!qRQ0#7XZL$;x$k^|i3+)ui*hv|p0OP~o+CMQP|5G8+wgr5dn_e3RpLU67YWQEGZw!f zk>90j7~9h8N4-W3Z!|$_Bn8@9vJ3w4c1Ns%Zs$XJ#j<8{RJQjc)@?Z~^iA|Jg9|Uv;t;Gj`@5oyDt@|ucj#FNw z+qJD9D#ob$Tej>TBloA9yg?5hk=7Iyf5jq-364k2AaaguM5DRrL33P(`{7yYHwt% zFZJWdOz>H+Jewf*wNbz6I=n@dXKt%=WxBXD9)CdG!nZF~T)}!{+cjY*Ov|;~@%ks! z)NdtIQhJc`o>iAo60j@G&>}FmBwFY=ipVnyv9ds|vGIV|wCdlm**ma?*ckF*IC{F> zG~c0Cv3OHM@iipf70DxvhaVWKfV#=Mkb>Ih&`&W^6*?-S=~!QO5O9h`m?QAsO$h`;9a28-NRX|M4gc?W%ulT#O4NT12jJKoksd0!(Lq z*xQ%cv2&9K`5-s@+!DwA1hSxwuKiCtsHdv;B5PmirPrMGMqpBI2c3j!SN4cf3&?!K zIkCqlbP}eEx9_(zP@h1UY&djRr0Gs2*VRj2n3ilX-V@uaRJrPF$o}?aPPyz=B@`F0 zLnCAH0I0Em*Dfp-+oSw;UmiQ$9X01UCFtDS=#fL{#rA0+7Vsn3x5(cORK46U=Iu}1ulrTDdB;} zlyUhBp6Ufjna*9Zc+V#@7gI9a5sfAuIEsJOw^lNxe9EetH2+u0Ufo02aAX&{v1!TLHWAQcXU#&_crI-1CMMkc>jf!_lAt zFdtMWK17$`c9h6zWcE0o@^->1@y2W;2|bz9X0p_4)dR?2hpM$`RUarPapDiS*v-mY z3SB&I@ywnSN&PbiW)vP}+<9@15Sr(AXlq-P5Db`c<->Q|;&2Dw4;g1~FB+I{m{I@J zxcpG(aT*&YrH4Hqkb5iIAmF^vuWv7Gh$M4|?R6uELDpVe=A^x z^3{eF%^Y?Fe88h*yFAsCk>~unX0}pm>o8R0W{0XZUi)qjYV+?ulw3ERbI8jzEWLAH z+$8_P4#~lG>S=M)4fUFs2fq><&68~;KYkx1rK(p!DatMty9Ib;?K3KcMm~J9Oa4hY zpd)TFmVI64SdZWhOG_mu53UN+^Dx-t75*!a3}A~S3vst|4aqtTI>?Y)0jkjYb*3(# z(L(;XzrjV_774{4mg#a<4<(>}9c-pf&gqT1IaCdbs$~#muO^8T84}WXBP<3 zRi(-Uou>`zI;4ra#abYr*CuMM^V(;`dO0<`3`RJ_iH`-&5(Crp?>N*r>>8b#G@V_^tPo(k*Kwa9aOK#{AznEkw?pZ z?z`)|ZRUyj<{3OaTLfIJGRC9o$R$Eu`dO6>Nj>S}lR};KTpl7vvoor6XR#q5 z^f3tuDNZwEJ+XRO!5=8A%+J|jMLphGBSg%t4K`i&aZ`OO8D+QbW&Po(>_vE;nd21b z`xJ98e7K=u0V6wh%TqLSnU^SO5Rg2JY!Wwb?R4L(inRmGQVSb1o7v!I%8KZZYg8%{=LxvH9)KJ;fcE4upocot zWfDDGVFC@3@7sJo<`3NNqilR+)ZvfQs1%8Nxbf)ie~0zY>20OjMCK=jS2_2HR^wT^ z{X08evN={t@4L$R(LvKde5`L@@`kg6ruPOn|2`AF6Eg5%6;WHJ}q10Wn`6~@&0E?Y7 z5k?4^60;o(noP;V5PxfnvKmWyF*r*3=oVG7|4ycE-o1-E>{z*LRMsIQVI8z{l7w&r zjNwP%k!9;lb~A_YTGoNUos@LMTg+VV-9{~>wLn4m%%G-*xle1Fr?_^%%+AuX@+P+* zzZb2*jB}UJbF+?gyB+?^P>SQ}%0W?O7&3Vi#Hdd3pYns>;>5S*^6bja(TvLrruR$z zu1A_H+AsDLqI_`8%M9Sq+DY3zwP$vhr?+c4`ri+zs{2mt@t9Un4@C#ZEI*={``X>s zxmc(nlf#>ISOqv#G+VkhcorTO9?DqRDE6C7k)Yp3fPtdl0RSkd?7B9C?gRZH#m@kX z)#a<{7!sqG=87tA=_4E|jFv#yoP;Ib|8C!o!efT9-HF+7sJwhf= zEZH0AeTZArMFyi50P;$K@grj!Lyn;^uw~$rwfLMPnFg_V<4L`n8t&hh2Y zLuzCqMY3?*?jZNw!cKzc<8n6Q(9sO6y}X?!-+x(ZEHG`udC0iHtY#fb{?HKbde!#e z{3~XM7Gx&usU$gRTHoMr|NAKR&WVNGhcHDgg~V3>0JtyW=;v#P=u2DhRO!GE14=z7 zgaAAF@`PKYL5MGm9U^F6eI@8mBk`2ve17S%MBa_a8g%qa%_7kcyK3}`X7=n^EwGql zs0}CHRRdoJ%8nyA$?*PXg+C|60ATP2Zs(lj1u>H>xMQB^1rF*&xw7=e={|bY(m9{e z(xb3Cn>e0c$%_*u(Xod;R18MS=kq(_^_P0BAb__34d+$fx;yx$sN6*26rNd`1@Zh%xc;=sgMh)o3nNLwrWV${ zcwL@eAJ8{x0pGX+*2DvHE_6Ng{A6d6ZwaP*r=}p_dBn^dv>ohUv-0`poyO%;uf;c> ze7X7eO8JB%HP=sdh&|o0f-k>lqzRGj3|6vaRizz?1SE<^uy8w%Ec{6|;!>)EX7iog zC|n6p18xlGB%$+|7UBvdmWcWNChhp`v(i+@Yz;Eb+Jc$+_dtBl8(_Tw=XX}c zQ?tldT>Cs?D|t=j!ON1re{;!7XzRsIUEqMiRUuaMbmevB?OJADmXCC+Z$`%UbWW1> z0oe=(3=;ci5}(hA;Pc}jZf<_}N}qS|9xi<`cuYTu5y;M6q>t%Ww_1uO<$3O(N$nR) zL=7jKBb;=N59k@}u3UUBKJdsuQ;)j~a#tC6Qv$-g{{u+F>z@l1>5v8`f@aEpcaj=Q zL8DqZy2CnjIJL9g%?Jzlx++lU1>%qOpz;`eZTaUG#}+*`rSv8>b|pl{}O!YXoF&yc`pE{wwYHla=!=9xapUKn^z^Mb$daz~O@Nkhm_R_F%E$my=z)ZjwjRP zftv7H#}X4{kc)Ta=bYHQ=r~s3uCOWm-0|qM-A}U}wEDcrc6C3BgqD4AZKnIhrR=2T z%vrfFA4*v#@iPq~9#&?F7j69C4B_M#N(=R8K4Da!!Mm)+7;%e!u@LFj8pHMHwRbL) zDKVdx>GBT4jO7O)$5Ira#T?K{;W5j@H3IE|p*T22(9aZG&*AgSNPI2^^u6#y**K#i z*lboapL>TM9~%h5RUA8>f1a%s-vUU3_%KB#_+4JeJ+KRlA$==EvLfDl{Mg)wsFbK0 zfA>&!wr4{S@U4S1#2k)haz&daTsI&@?V{@!TAtGIcGDRKH&(`FqSg zsI~Qx8RK#takY%n$MeJa;yoUT-&}I|U(|HXx)P?FgvKm>@+rs-o=V;f{K5;8ym|P$ zABtfAi?G)I4pKU$D)zWwul&xf4S?%=RV0=vF863_s)C>Kh2tH&_b1E&Lg9aSfwjLr zWFXw9-H*$Au3M|vdjOkr)V%~klzfn9{~M|-<>?>n4Mpnzi*m1hMBBWp@9wRTyLSKA z;`T$fSAlHa2d4_!F(o5nmQ#7*N9U)Vw2W0_TZ)&22fx-nRk$pOXEi{$>D)@tZ{98J zH~;4s20}s63v$R_BpamwKVcFNdpE$cM+VY5Rz#k6bON2?-F|q@oWJT8h}-S3ElngguDe}5bSYtNCs#C zdmwa%YvXvv08f=LC-BMa8qhGQEj&}X_G8GoRKarG!E(S4nfBqAMD!yBa>zjZ#D=yg zKuHOXC%6ng7PX*2AZ_#8MbxA^SXhYNEuDWpZeVRTNHEnFJB87Ie)4||^v~~se`xm^ z?_dA5=cIrNUH5b!2QpdrB;@!5A*_22F~O?WeGTy|0LxtWv~_E5`C;AH;OsV7?fP&0 z-;?+wtjIP|!0@ko?t19Ft`Jbx>%IqR*OdS3y?~it|BPozT?@2X2wVTXgt;#9V@E4u z)_~{g&)OD!6CdSXFj(t1S^cKx3d+RNqOw5LFDLwvLvX_GvQ=Jw-G^?+LGRnH79!St z^Z%dI?60cTe~yY-bBe3$O_*z*QAjm>C>1u7^&b^@CD7t^&;NI=YBM&x!XMxhBoy`d z!|yh z66r8LEXo-|HK~7-2;8AaqCh-?NS|&b~@v2kt}K4J1&Kf=HT>`KWG=u{VfmTU|&@q4=G5mz*IG~ zQoY>q$CINSdQOQX?%6Fg(S(hxP14*K@%44_^Fcrw@yyJ`r;Wi3MQ#`fm@_QifCLDX z4PZ=u9fxlv@oj}u2lLRRZ}-4>bnN_q76M2iq`f7TQM9~fHVHq9b~bc&=$2kQ@1?W> z>>X?G>EYSqG4uCAIj4;pvGMbFXxy^+$f=K?Q!T}*mu9%<*|AF`AsSO{t@}3`D{2#Z zq--(8=)a`-oi!5R#X9i6J~M(Cili27{PSVjoKIWI+tg-ezgN)TKB9z(B{IYV6o zQGmb~IHUx*FOP3+=I7-nzSN%Db!n^olX_VV80I>q}geV@c=#YnzFZRz?= zT^@P&Lh>3xtG-R>^gMr{KYz+(6zKmNGp8Xj74Hd_FxALTWr1zr|8?W~AshQhr(^DZ z&0Uu2rbu=CVLOer0>CrSjxH#h&)4;q ztCjH={dsnPaJ)7MS`oJF0{(It1EhunVxNzdl|1Zp!*Krf#i^`aG0n{D5`{A#;`qb* zMzM@xJ^msc5zhNWchEPQn^w_hG%@e=roz9>_39h(qWs5oas5ji+}uPj(P+7(w*j}z z$yZz+)QsSfPq&UlQ)wQvEfWr{Z=*vSbxEv0dZjnZAC~~0!LS({A7v>2+hImN z(MWICxAURwcr3FuwsmOfG{%GWVZK5|&uIBhhrWDdTnTfpBFARxmf;tCuQVCVUp`_N zb%hpM=<#_>yWKyfZ)c&wq)B&jwu)AW*DicZQnU>7U|t#1U7h+r_>T*s_*)kD`SD9! z9QDQ0Fnv}Le#M{vIhnBgK{Pc{+JMxw)QX6N$1tk$^XPFXW-n#ge5%=l|7Fg@dYR|a zdVjOH4by8^d&}ME9ZXiKe&MeL!&-eti*IkN{88a3+f4nfS{`}tLe-kOp98suKm*hp zq`7tREJlz~%;HRb;?u%umQ|tlqPOMp#H~U)l*C@>HEN;0>BSf={N~Tp#eIk-BDnA= ziTH_4`MS`l#KCr>|39o7_sh#reQVXBjKVwNDnpM0!$n{r0eN^879^A~ zKP56XN8s}-lfW=xK;QV0eUQ`~nyeuwYzoGX!!pMU;ZH(ehQLdhFLA6ApcycAg}A3n z(oRvG(JGwxPN2iAH!6CJ#T3nOatue7>sz)@y_LC0_m;cu zyC?IGqYlc~FSBw8$;RtB)WFvUN!tGQy>?rL$b=4Ig85ix{KctHWgfr$ z`puuK+Zwl$1xJ}21ShTzZ|kyG_eNI#KgPa0uF0$KTWbsLt%$7y5d>>dRF=w;Fru}J zY!MXMvJDU@LO@1>Ev={sD99$DvScL?Ajk-45fE9D03n2-qJ)ryhzSWK1l|*dciYG3 z`Mmt);5yg&o%7q{d$k@(It13*glX!ptd_7}ye#fTF2#FKg z-K)Y@I58Nz^aeT(R8xz-&^VVRd-+&O4@)r=#mm7W#zbP_`c$cbXo{v)hMd2n#d4=K zV%C(FY6IoC;cNo=0|zS42(8Ina|)k*^Uqzs9}&Dxj)u|Re6#>86-hsN=R00pF0!6O z0^-}qsBI!HJaQF@LZ02$(h$8iZo1y6+J1a*XDGhQ3g4#(cW<7cI%D8oQ;+it3L9L*{6u*k9Sz2JQhpT66hWC zQ`=~0Z7zZ4hgKB6pFsOXv&`sMzK7Z0LF^m+^Rlx0=*z*3AQ|fKLkF`N0iX?Mt?mv+ z{Qj}wYFdK+(ud~gnfrOQK%Iyzb+);Wj7BZnp!p*UztD`0#8QRlZ2XakllglImz@EB z;TSL-@V6SS+5Ug5d^bP{z6mxK{+^yYPJs%NH%l1Ytps)-ol6lq_)H`K?tU|aCEJaz76hyzHpBr>CTQ-HY6;{*R@%1OlZu2QE*#aFcvnzXp<5++#I5 zH_(G0giZn;+RlH7(MT5 zm$e=n>^isQ>a^^eeyvqk;p*hb^S%KwFI!5g-+dH)t(Od83(Bz}sE>G!{7qU)7Wm6D z9_YIWO-G-0;;ar$&Zne%ZAx{+=6(MlykGcn`D4H9sNRVPK5waSLT1bP-KTxnLI;|& z;<72tqE^spMO2kxTrDfD6nA$}_Xfr&gbPL$QB%du4)B~crcJjw_ORIJh7D=eCwTL7 z?IQ2(_|O=1anAqb&(xLcR}{@ppmoOsJ|=4=w${rrel6>yK*mBV;q_vN+8T~oUrY0! zYsu1#_;br4k;_v8w5TQezLe3`Dn9h1Ev?PUXe_++xfSu}os5t&YGr~C167Ksy;%9r zj+?D7{?7JSC}`?(8*o7Tcs$?~SxCxN_e&~9)K?+Kfq%jMJ|;Kc>(JobWtzumFaA6D zPAX>TW-GpIxNOaxKHBmR*{>)IFr07mqDmSoc1!H4ru2>pYkCBGT}>rT&p9Nx%Wb3n z?l0HpU1<>qcdW@+(;rtP{vnB1C?lV!G}k)huiRq)ZKoWO^Y`<5tWbiEo^cL2R|$Ot z|6L{g7W@}H>uTvxTPq$j%v|5hX&1!k^OTN#MN5QX?zqc+PrWnb?MY3-L&hc%wa5YQHTJk6du26Ps0rQL(;NV7 zR^Qmhui!PKEX6S(4K>X?A^nD=$&sDZgrS=+@JiISYTGcm>r03M@3m~*jl*lUcC$ZQ zk>Cbah@O_+^4bBq{0uNYa&fkYKmHC2HQ7TLN`0Yo0Ix>TDJobh8q&FxWwOQqg==p4 zQWv-(zffKxN?CTpYf~seIM9*jz^$Z&)ey3UBdKZFI;u2uiRe zI(cimFu$>?fX$dAv>w-b&eYp>Qr4m|Q*m5W(=)z2lt$Y4pCqjgcgG#ne1AvnYr}F-?H4Y{K+m%!BB8AV*B#9z}K+~3jpJAXm<05%8-x0DRp0{EWZQ4 zolw_Qw@ecz#nfMs1!}TO$<>m_1`>A>F0`%E~O z{YmcBR5<6R2A)83wSyt@Frz=*__r2$|4MiQ zJfx;hVs-OZPOe59s`tc^`moW7#TKT-qMP7P$=pve50%sql~Rk(Vyud&FWn$xXQ2_? z(*xdetB>B;xEgj=Z0IqJnjZ&eCkhQEiH;&aJ}S}&YPX$qYGYFMT>@jmmA?3?C6=xe z{1PP`dHy!q5#F}8`KzlOeM65Ymn#%RSAd}SxAzjlM-V;;aoUY$uhXQci3OWBD#0@1 z;1h|+Pq$ZxI|^C2V(fOlu7`@Rd5>7 zs0LlG_vNewA19^O9_`{GU40=`rCcZBI zW*%9Ya6ieJ4EUmBNw?AJ!yAfLkMXD^5a=ej@XeH_%*avbFU=p{kkE7E^VcFYo0VYm z?$kZhA%Cj=Rk5pl8y;P=b_*LGf@%y{>4t1AXKFzXm-RlDKAIcV$Pv7sM4DXCZ(W<> zdQvaE;k#bdtl-{M_;#Rt)eUcR6ZRD~BY>T60yAJ6h4fKuDC(nK)4j23u|*Ex+ZJw`SeIT=jADj)2UlSrm6Z0$R~K_Y;TB-cAfbDxq{cC#N}3yHK0~y zy^(}9P#cVvu1DB3DZxIq>geISDLRoATLTl3XKJYH>p3DN_I3X2ca zd2nha8Z1KVpNMjdun}%RzIk2*iC!j)b{;axB#-rt-fYJkP}`(0Fl|ak*Mi7rPPb1P z8^+x8{&AJ1ErF#$9cs9byoXxsQP#(f^Xwif!N%g?;r^s5xM*K%Q^D|h4@p;w=RTsA z=C|T3@xH}ZYmS5E+BOQJ_XFtnUA_H?WwSJ0J=ZmycZo&-=yyqm0E#Xv?C@+8OI zH5i!5nYGN(6?LUeNrm@U$=6TJSuS4mp7Tad0n0B4jM5{(#G~Y`I#9fbV)Q6z6lql> zw-%A5hada4*ylV^Yt_zPc2SI&`l!K{mq$+jo<)+J0`_TOB{jf38*V~;w~M+oBO-aqFQsX7> zdy6=vH8Z(nbo(mx&${Y;>!WwR0yQY{v!tG%Vgp|VoDCfA=0gYSct5$C2i^kDx~|9p zzE?a2Z_6rNeRhNQrQ;`I`!5DcgTGfe0plED6|pIz1@(?(m9P8UENO@Emj8I)(;1_Z zVdpj7*_ChS9QLc1&UskZf}faMo_(T#WYNcObkWkUrJ5H~xNTu3NtSnzXYkUp52fuI z)K=ehBO_!02;3prGA(>bt(eU(Z>=@rzIPoSgN~R5=mhqIG=(i~I5FxN;jMF0{+N62 z_re7KTw?x^Pis@@aQyoFAAGlb@ldpsm}4$0P@9oUe*$lB*`T4X(S$t;9A!4sAa!g? z-Q~=xv`K3ML1a5_VkKwiX7`nv=DUc+Q)+dVuX(YMf z115VwBmax015}#UVIFK?X04!e#rv)h`}4&>@k$j#p}gL4P+cF(&n58v(BjM&_iU(1 z(oHpwC~{CM7xGHp}3K86w~~liv$Dl^Y4Z!&?r~b#P#JdPO&mkZt#GKZ^IDHQ~3BS zf{z#o5MMv|xia0xwR^OvEmrJTDnZh9>F0=+>oH>sS%3j;(>?>=9Oi`&%zS=kc&YUK zK&P1bR0~-#EdVa8RXF$7c&mQ&vWKf>;Cb-eNUPBy-WiHfiEAahG&Dy1F7RVB@aex) z)c|&9P^8|81ix$LO6U5#pxM8?D!D zia2UO;pO%OY+;k3bu`9`55~P2Vv=;wnVIZ9{(w~_*eU#LNcmEO@ye?=+$g?E$Ns#= z!0+fG0GkPfAV`YTTaf@H`78o|2HPHPH9A5K1E@l`D+9|ylUJYIU~_ER_0JL)u_tq- zk|{4gN{QTlJsI^$)2dK6#|&VLwl*E1#%fRXe8!JDy7Q-{&n#;oR?YWE7DsCpn@Rh` zm|8o8W&W;%*dgOf4QA`R+sy;K0*0pmkIUt3^@_`Zk8(nkny|;L$O{${L)@j#V~UH97i6C z&v6fU&og%r9@gtM_ce?_=hb2!f;y!$vlEAOY6iZ^NJ(hk>ee=F*M9TgTLVgpOQJ}P ziG~-uvH7|W0QrHfMru?tIJ8FtGi%ktD~~~w+LP(G;uSc7AOsmD=puAsBH(7~CV@w= ztxalFn)cM_K)@Q($`$uL|2?rSpk5J3ue;x1z4QY>gtM08X`Dtxn$<+E#rf&!5LW~dFQPVw=&%dEoVIRa0eT{^0|O2)HaxZsqkGy=*32m zF^vv--c3bD@QZTJ1?~XMV;zTd#apS0`C7xn)sYptroRsUwQohfeq_nZkD*7OI!}Wv zt$E-!@JL@A@(MmsHSBs=4RxKiTf661!&xUIpo5Kvi?7}z`U=C)cl-PjNdO3RS*Ck0@+?j%<4ntS7z^}nZ?c$_*Pp4#MEt{9};Y ziq3LYEn(0Uqe?g;{lo&@-^`M#%UYX`@$OTasxU*6xuE6}RO2YI`!XI5fVcogkA10S znp+7@ZXQ(L3jz%Imj&Ye6|C&gqb_4Lo|FJEwK+Hw7`^F8=s(`pG;8S6D5;ZaY!G$m0H+5(Kk%$^*Ov+2YzK8B{c_%UJ#;wi%rVt zvoy69@!I)5uX^%Uj%YqOdWJWyzbpf)p^BaATL+hHZK(ob*I#lC=->n^xhU3kZ4fj4k8(%ECykWyf+R_N`IZ3fweapaHu(Uyn z!gbT6eDBsKEo%GV&J_sZ*LL{59r!cit^D8HOC1INj&4=l&j@&!Fj8^BJ>tz_lcda@ zLgG-7w{W{wq6;4_q{mfl z{<%O0MN0gXq*P=>H)ac_@Agts}lJ?wq#L4g@!e88eB!mJqyq! z?;^X!+Cxi*kr*Y5$4r9~Nb6S{uE>Ive_mj(`qu+jq3N{x?+@d@p;Bw3bk}v(*Rzb6@ZmbbS?>Owl5ujkPlmD9M!B~ zp;fbT!?(6%i!h@M1y69Sp>fCnyaq;vM<1Bk`$i83KZ_sz-}Y$Nvi0}>h`Zwsz_Ql` zrStt@OrTS$^M(?JNIJ^U@EKt23AzYy6j9ey++2FD>M4uLTit#};g$@GrR8*X^vKuxu7EDl6;5EA* z)o)gEBgz=R`^%!E%%uW$d~`OQS!Z#F<}*$@ggx9HQOWM+b^K{$$C})@WR5F8SF86W zeu)TBj>$cHYcFoLTAgN&NL+{$948s#VZ;%XE-kwueb|_Pr)Wp&}}zOM7L}NeY%<|(5E^X zvC24_Zn08iBN$mSIziPXn!83?GXZHRbkoJqOinEXXuw z-Ne;diE=H)pJB_pduyaqw8}r9V1;N(Q6vLW z<;#$U`Q*7^J~Tz)GMI(2p#)Pgk zb~;b`TFjo%2r~f8AJ;FvJk2f=PSxZ2xnr16p?G>ye5tAUzw7e1UIDL{Pe9nm?BCbw=84rTzFC*MFA|MP8S}{@XRYiB&?^QQT$AZO5Q9g1h4vL; z!lI{db01!CcyMqwS`BvZ=EB=+7ocDHNCkiIJEXX6Wlw{kF6&v3#V`hEjGBipSJF^hr^-Xo-xi^_!sTGCbIX>u8ohhN!e zLHL!(L$JYh1F%bMyL!E4B5_8I(L>=AYKaAs$pD$Ah#TmZ4PD|2`(T;SulSdg3-h?A zc@-pk@qzZcD~dn>NJ3t}B)`cvcGI(@S9mE3Z^aeJKck|rj9GK%a9-CljyP}9@8fCL zk1?(VY;YYBILCD7JX)$c?9Y5fdd^iSNdx{_q`qjKZ8c z(!Gg#8o(Z}tG-)!MS);*)n-G4C?*k_qi_!3EsaY5rVP!uWJp#VghB>n`H9bpR76u$ z_INBh8bAbq!8+am#i$W;V%L+cn{OO=0GGkR<5rIKpXTf%u0OEi)NXP&Wwt&j9-c&| zXf2G)Lq|;+^Bs#CbRYer&|D0+>oODd;t1&MnzrwT6=SrA+?D zdX#UkvCgNzfV@Smr%VJ6HS=2$G3FesuOwK|UYRBGHagX`rR|-JmZ}wmZhyrcc!DllE5_dei5d5xl*G@@;%e@#%+c5!rH5s7qlpKaByxnQXaFohun z)6BVbOLC?v_}36W_BT28V-3Oq?uH8v)tQUucgK4iwQ9~>nzFqnm}|3!?Xgnu0N@B2 zsQLzW507pSLhss(v69b{I`H37biT-sveN*b#9$}6H3DnVwhZ;$947X_`XX_D`t}$raDgiP+kBFEU3wPCp{zY%Eu}7z;CbvIG%{7TKMp!n7Q8@v_ zx#3RTuV{Dp5c}T9*66rsMKZovOZs4)p=x&w7|A($?wAy%I>BQP4c3%xn>h)y3~Z!z zRC`$#>M0}}HnY6p5tB9RisODZHQHCZ;h7CN3v}MlH|AGg9q6z;Xx0|=Sb=n&#uW~r zcgF_h>}c2O*E2*Gv`gtD#Z2yXh&M_xB4C1r1ahn;^rbnay2pRdoUj0qu?tH_Tmt@kOF@13x;&VjI_YT zA|9u92X;iQ^gv4#6e0DApp72$%F?i2r0#n+r(CHa7gBHef?J~p&6{t`Gw)TJBsZq4 zGiwqECtW+#KY9{qJ<6mQV_4r2^p&lGdA!w)Jfd(e=gr{lQiwJ`wu0!(0-K5a}geKR%U235d;(m zQ;f%`BwWi2G+{Tg?4ojlUIFf?;vp??5X3kfpj23wlJv1=<8My{+G0ui%ch-6^mgrX zZaAZ8 z>J(O16J&qsVld~@KC^-~Q{JE^)p?`?QY%)>7YHGLq^^tYJUv! zmzn0OhZXc5$FMGr>L_!K@T^ElQ??CKvv}EU20bR zYIx~bnJt}w+diB$)}G)nnkY$GR6&nO>-U-hkG0d5**T$M;4KuCow)R5tK5wP_u!KM zJLv~^;b`U!^{|9xBW65Sgt5u#=gwIbeh8Tz&m2?ACWI9il6Ja7BK#oEKSly{T63EfQ%$}lyY{kUe{uHZ&&sQ=ze*rF-;gTO#8w{Ye zc=|!CFkGgg&yc&r^otO#QB%l%gO}fhH=AjcBhq4iB|M6ZzKsy=|W^W;SiNDB`@I6Qi5)<^i_Zw3>q51zqWgxZd-b zW7`rtavV!6VSCvkCSjaRk1q9dp_$PM(}i_mOZ{{-HpW)a1axc4eHT}6LvakSk%sIB-g#j2Y>Om7WJ(d!D6QBzU zA2fk+<~Rs~gVl1TOb9Ouwq{1fRyj~}{kB<;{ozWGwbkT^PoNF}?0danOsL~i|d3h_GxZ{DrMhEFk5{?O8c6|>UuT|%CJZf@8-X8PmC z%fC`|_EB$sDGi|kT#aBKI>ZHwH#*%Mo?=*JQb2}Bh`9O5ekpKgO*LnsJkQW{+&Qb0 zY7pg`{6vdjK??(>#;w#w*VQbyyd3ecy3so^(Uf7qn@54swDMlPc%XMIr0R2c)YQA5 z_CAOoF+j{wJ$AFW6Bd9skF4G--Es}ulm*iEaGQF%y#masWJnhJXAod4*I0urIj*i zM+zTv6q_2^IAkPqq6!&JOe<}Xyz=}2Bf4JdOX(e#et~mXX~U8h%aq(@DaUiTd?`!! z1piV5i#eNIqLmwqt}P(@2ZuYhu~ka7nycD*OMx+55YD+;bzh4jT zCMh8C%ExVn*spXcJbw(XQ}pEMlt~2r>(B?=mq;m=LuSzG2dERWAKuq;Gt5Yu3CW@r z{kTgw??0Yf10vWk#O{r%75<`Lyy6#~2M2n@<(D~{o9D7-&ex-KuC}=7%ID)Td3a~E zTan)noNF1)SW*gRyEH^`+9>cU_%7z*ZW7UF3y^mmW|&i^omiS3awf)cw64?eXiO4n z++KXty0C0&2?X91_oSR<28CO&O%Ri?rY_IAvzp+K z{SKkWlo>v}rc8xHrg08veDq7-38m1X%ezR zM0g?!N83W(-<4+XkYh&exM%2-4YDkXg~X^%h32WxiprPvhMwE2KI!Squhb}zyaRx# zdEdFw=Oh+3KEhaAE}B6DwsK-F(k1^uEoPoDrS3FFbGa6m} zqrzEOV?x#j!z9|qjMsknhdK&b&=R>TE<=T~Ta zg=jpL)gKu@k?A5g;5;D(I8l3Mp^iCDHYuRX0Wwqn)G-Q1A0(5_ZZ0$;GNXD-Fq%~r zWm1mR+|r(-d>_x0!muHPuRK+2|$h`^$LFHzIR=eeo^GwvZSk%!d*M9rDSiw@_ zkf>1AdxM6lPqwPu_~t=5pAx$A`M7^aGuA53(wiy4-jA%Y6H>(dF+X`Sclb%XT)ma< zI#~<%44Z)6@t>Gngjl#VXu)Gpig`T^fUX!6-o8W}xs?C*Qvm`s1UCb^21h6%4K|)A zj`t#1Qb~IT?AsQIZJ(S3*?FqXEm{~vaV;?_i^iI*_{^PGX>S$-LVql(m4;pe--iYD zKgrvGL)&hnE>WQg=z#w1ABa8*p2McfM2kWXdF6L2@v8=Jp_1L3@SQqyF#vISqv}I{ zLN8wK|I|0}5qksg2}Y-=-iVuoLUO)8#L_N_;L_YC8dVeU2l9g~fpr4fd;2(yH&042 z3VxYmV>f!d-xKnfcpSJlt_2A1;m+YhHKMv{Ezy|ZqC?v|po6%jTGBU>-6%%%Vx(hD zWf>X9h}1t^Sle^FCoR1PG8;$pP{R-nw1I9fv>ZE_T#f7Br+?946jT&FrhvU=9Pn#RgEHs^kWpkct zMH`Do5ScEH8Zbef83#kBsdVQtXGOjqnckIR6;#-1)$A@?qREW7m?_!PE^&R{POxi_@VhS%&w$5N~(c%q&p@EeE8XmlnS_<%~IlgsH>?o0C%A1 zQ*|YsHQ2bGB8%CeD+r2$2o=$lW3O&|(-B!Y^Jy-59nw1r zXaMT@i0B5O^RSrCjCZ^D>Fb<}cnE3Su*)3RR7BnUMTfXNWba`u!a*JS@Y4ixwf)g9 zZ35B2*NoS+Mk7IjdUoQe&WMhFPoJ-u~QLMRadB4(5BIF%*3Y6>_8lAUM%uW7F|W zJ$pEzhc3305rrotUM5K4-VHs5bZyvm4&UVSf6%riQVljzgV>8Tsk(BncSj;COjNB# z5%T>;#z$4JM#{?P6Bpw@>|a9mn*u;~y6*+_DPT=3T$^Dk<+@_WhJmo$^Fg!w8t@1p zF{;o4cLGL}331JooVea?BI}v6B9|Re;Q?^fUZ6tSDXP0e(P-Ld6?-jUd1^1^o9r^( z@ugPooAVAYKedsROgw(9nY*d2f>Q{>832mvaf3MoI8?2qWy_V{ zJQg` zWx$VGfgX{qxXmXBa4@mQeRkfV|D96QE>F@R77wnC0FoqkATdi7)`E>5L&>wiLjfp# z3E$v6{uulFwf*OdEQtv*#FoniP%-5|@Bj-yyEE@B$Z(1~8jq+cu9!wPi2D}zU#wR* zFoC*YO*pqM3y8G=i^68_^^2N~EgAB!}#n}~tyjzb#f2di=voYc2naV*S*|rhoZqct- zq>J+I@;?2uFYkEVo&I$bz|XDEfvc<}c>Yx)k*OnE2!W=o_TxZYngRD(l@bl~j&(I= zynmRW)6@Vq5oeq1b>)Yf1ncIWhgfo;5kS6m?1^1aO?L7Tn}HQq6w;!eNJyD!*d0m* z8WJ#`y_3sy^&A21*lGWjD@tQHXk5#o`Pk_dkb|t5Z>wZtQ4`2c9yb)##E#ChEgxA>3(Xa4Ez%CUbJ0v z9OXKLf!+z)#VP2iU?d4XtHn5=7Om!7o^}W*kxi)zJAi{t|0Q@24yHQ5OSC*tq-ZC2 z?v}2&IE*4Z5z7>9SNqqL%D=t=(=7IT12$~l??|p<{qd!g++(}rak}3oPE5|$PrjWT zCN6#p;)6?q#Z|rwun)Yc?z}zcrW@uoS=L+5)}be&sGQ{$mBi>e$_c*&vd+K`FLWF! z_h<=l1oAXQR<-bP2=r4p^>2P>+AgpN%XOp`v$TEV)0gvCAm*4(g-@bd` ztwhB>9Kw)l`9*sjQNXzDmhNXe;V-Y7NWK_>b;H)wjGhSGpiT${A$+{BAuE>cCm$ay z`=pecBX%uJ2V^H}-f5;~qx{isW4cGW(||2===Z7zj+xI8yRw#eT$>5Glqs24RtvOV zkv$sUR7(O;LasN$8y2n(XB;HJeIp|udw=#w-n2+7HmNrJnmkuXZMrAEv!QCs>t+(j zapLVyN1>1u7xw%1JH$k(mY&+6MsMJ7n)I?PY7J8rR6IvNO}*_bY1w*ZqbDOAkN%=M z^{J6Ewe5k8lOqdX=Xq%C6 z=AY@i0wxR$DSu{-p_YCtB5nE^ShNm>@k<5C)O&_Sn4rZe+aK7v34B%2jqllMhDEO6 z*bX=k^@4%6h_2UEFoUFfF?R2>uU-yv{N=tVb6e&#nKdOrS_yu>1!7fF55zuZhwe5q zcn#JhDhw=01IBwVFXC5{sRWV@X&HOrlBw~df{o7uIht}^&t0K#ug{8Zl=}V9XxK3( z9{8ANDGEKL<2GVHBxJI)O6-SB`hYmP2fV_M``L_v8uI+Fp>GRoa~HYE^o@_IZ>3m< zw$FOB(m_7P?Ve(sX)aJBXhC;Y5_2xoHdDD}DcwfQrGJWE6y=^vl#PhTD)t>nq!x;Oxr4rvN2j% zN#~{MyEyr@yMB6y2?63%)L=%qOAb}6RqRogcJ*xcp}8_Zp?#PcX%EOXmx_&Ak)~@- zdHIJrPa?G_2u`}_x-NnYBW~~3}$K7H?(0%iWvnCp|R&>MPbX z*}5~~#C&jp7Anr-6olYnW?G9&0~0>jjMb5A57RmaZ;D1+H&qdF=aUGRNTz%h&qOv^ z2{>G0SCTXcadjAs9c4e5d|rVKztHgnJL-*St0`9p$$jGBC#T`D8ohE0m(psZc)xH5 z-ugA#*H$fa!+d1mxTTpMU>AM{b^<Xu)>gYi>Rg(3yhz_DM3ql4Q#9NLSpJ2X>gG zt_r*@kDJvZ&bael)P`Vn$M!1t7nLW@w%#bzU0yet=h8bJCgtZuo;6L`64_l|Zc0|H zYS%3eiJ$$2*NBRr81}l#4<)+wJEQ+O5Um z?V-+Ix@jk|LWm|B?B;1(9U+7Q_KX+Xzd7$F0~^QA;N}IruKK8Y5{dG@#`G+Dm-FS% zmkK%&L017hq5;4e6iLPq_g{i^>?KR@v_D{B6=0U@^#DW(*euyO&qlEah6VAE%Z0H^ z{M|q5hd!;~_>{ItUjcx(*YQZP&Y8Y59`Q~W&77>C5A1u=tPC2@{?Q=lQiz+l>oS{e zjY&iAz}_dgHcw>C^C56^yGzXrM4qtC$6t_H0F#>>IyzQ+n^`BC<`09kGAvbm5*hv6 z2ZQYZAiLBuDvdIcrSpWTEm2&}^h#vg!IrjGp?XclvYV$4RZhV>w_yAV!jDc195csW z4@T)79t+&nTszj4=5wSwcD|OWhiKJG*&>Kgo@)w7?lr=|idCAZc-P>Z^(3qJ=xQS? zJR(xuNh~b^VyBP`dR-l&iWJmQP>KYlgSIEixR~8@gm(*wn<7iGhwv)S;}7Q0quLXb zpLw2liumUM+E0~p=Aa4j+5nr6G3P;&Kr7bYdpgsKi8Tc-76GQ+cld|)j&mDsYWFa% z7YJl|!&DPWREGlD5r+EX)MUrL@o3)S!IZlYX)AJXbrTjp&3E?dm9;xyK?_E^)oLow zIYKSv<_M38hTFyYy38f(w6`#E077aD+mkX_dkSA!1i}zES58O)S^8MuS!C|@L{jJC z!*OS^iBjbhjDDyaU)&PY=sjAkEn3_Dh!{6VX^v zz*1{;;sTYcAMh!>a1L2x4TDH5n8R%$KDR;Th~$Va?8T}yRTXBcI~JI7_D4oFXR5Zt zVG|r(VE<}+o^9 z)aqf%`Ejh43$dWN8DMfzB%-6aBORbJtN!;4si=8zd#nxt)O47OENL1LzsDCLbU%h? z)IHP8?ANJAy+@Uo2-9G*=%*B!2(y@{jsAr{F6?qti#fPWPY) z&;g1{&gapvY`=40{?=6YH?9ZtFf8?-64+)+PH!t8*Tx08wyS8roV7R5xkG1_(;1$m zdpog|-j3=Q1`xVu#B_GlO>>>S7I*mYckc;R{9YH`(tu%2Wj5ltl_i;rz`=Ig9hh(d z1+H-5YysrsV{8CeeW97Y8Qy4R>u^b2C+n!LJus${bu-9T;M8Jn4ET#o4D}nDE7y`9 zc8JXi2()^!c!xf%wiF4Y7=54EU6=>L5@aAkO(;G_1#grNItlT?Lp-(3P+-x8`DxT z-l<-K?c1a6xH)~Q&bj&uNRSg6bpPd_c{0v+6b)IKW)}RD z|7uPJ`om8r`<N+4qkb=&Zu_F6xjgGrv~s3)_K%csN`#E9-`zP4$e%VtI60~Q!F zvY!1`EI|c()@xP$075>3imvHI8$ki*!@*krN5{LFV*S96jGm_ z_#XRx>#XJbboZrF&2o+uO!f%cs;yp^4)<#owUh<0v&_SIjRyx` zsipr#$E;<4gTY-~6!=E=Kf1X!B1H*))NSlOZn}}ai@~vC&hI8&$)^%&@2YA)a3jgc zK@!{WYM!MZFSEdVR=B4i$)tP}A!Ra1T$m*dz&?!}6n`cx9S9T$~AcBH0%Z2WwQ-8RBx9e7pxL;Zw#$-GojDa=C21U!@BN z2CZYo2S?!Oiq2y`JYgvo50~VRhgiHbE=t0WfhIb@EZs_8Yz?&w?8S0ye)~3=>S*ry zzn#~}Qw`G>vsRAn_L=^OgNReM4){!$>Yg0^1PKbX>s9C@y1|x~oZK{212D@zk5*bs zGo>Gem~ZcmM?4Qo%(p-C_YS2@waybZ?Bx}fPUCCl3h>M7p1%1 z;me*ix8g;JG6&dx>0%G}F^YDS5^QM)B94YyBDZEv!5Svp3A|ktr^aH|g`#ygk4W3& z4FSHuXe!>W1+xrnA_o5IX=;&M8X5w}BwIM(7mg=aFClX#{T$BzH^Z6Pp#;(ARcNzof(CY*EF3drnfHZ z^xij<^4*1J$K>exVEeE{zE&~YTVC8+T1`Cue)n0TLF(UQ>@uG?51bBWdn=E{OVx>9$DR$d^hlZNrnNPjXTK!TdtxkxR;+hss9Wa)7f-?1aSBK6=PyKX4&ttEqYHCMR zSyq}dC*p!|D4DNfLF#C>xK=+|zMc`f1g4Qf5TTWYwbBOjwh`M7nIwhOpYVQ}Z~&O~ zd(*#qdXFqtYPin_=|JP3&GH=Y(Bf5*u~ zCIBB>JYUiKb#s^+HkrK`LOBkQL6?@9qR0w=3UY}5-1I<&HXT>@Tg0E&I>i_sJl+eI0xvxYI?F+Xx63?VyFs|xj+eMeFM*X0I>+?T?LxELsGkz4PRyqr|Bta9* zy@kJTbG>>>x6e->qMm(CyYEvC@tYU)pWhC*_J6-!W77WiEF{4cKzz-8KiYdQB_El8 zupV2fQf?#Q4-3xn2l^{5cS8ng0|mo6M}r)Rk9MKA(?}mo7MkK%l~!kj72;s_Z~N&{ zm*E>j2mSda(nfnw--0QEVO6B}Eh97kG^3dJi9bc!)$j9kcG+@u8v&m;@z-Iy?P}u% z!{4-0EkEpMoGy9sOUA^L7xOdC!_j{h>fH8WPI#L*6b;WJ?@WZ7ILK^&G>jWz`6E)suW#P?3wzm>xUv^#cDT}_n2W3J1zp>asYB{Wi4H|$ULpPW zdx-F$ho`v12??8voGhxNa(!iX1zx*9+j!)d_2h{o$C`xLkh|u~F8Zhh!%GL~L4L`# zNmEO0CHQ#axWym7k^R7YhKhUpDla%1RcMR!2a}VIna!-9~zu#9w0B6%A`+8pl+_?Qu8_Yq6 zra%jsZfxD_>qaT*hl1r(c8`lL>FZz;W3{P#UC#pyq^ElFS<~(4!Q?RWzwJi(^KSlR zHypa%1|5!FnJlY~>&g2hp1%v`CP?N8Jo==ysm=|T|4`f@R$@e-*eT7{(6D9J_(I-- zN5`Rhmu)!pad~i8m^_b_yJ|M8*}OzH|0~_jm-1k6!(fpItOW z@W{KN7P&zAxCx9>Ua%D~w;>C`P3`JXG?N(MgPU3ixFONUfItqeuyMnn_wfgD@ign( zw~x15-<}-}JV4!UW!ff>P3mqhqcmbGk62tAc4{Xc@0di^ur4ca*~{QeUx3Ta`|qcp zkL$V3p?^9QHGanN3#^lXadtu4a8GxK1_~BQZzWHHrqw69$fb;r;C#q4&4R+kO3!T# zE61}JT$i(P`yb-#Lf!I}*`w^?GL);JKN`(?w@}@ct$y8-fpcNQ8MLG&h0)M7Q{u## zPHF211?RSxf6_%NvcXY)=yyX!wDUO8qA690;uP@GREK|GOW^=LFpIBy_CQvmAs3$| z&`TU}%CUjrAi15Cj0THj`KpNE;z(qb&hpu4}mgRKiFeWI~=SR z*$%W_BQ)9EofyH>9wo3J?O2iCQ$Tt#k7pBktFw@-$DAio;>b@PJ=%Qz;tJUy(X~!C zN>B{Q-s`N|q#LbD={PU*M_FgsX02N;gkA@7kZEVymN^66(s;%; z54w<>6fu_0$hgsaQM~u1>}eb6CR6^wX!I*d*46 zP(uYSrM->lqc0V1ApdUe5{}@MhM1{xsS#Diu_i8#o8r^5+@k|*>c<~#sZ0o#8~4yy z(UtnMNSTH^U(g33sGzyAsn|@G3&+j%o>@{pub_8ppU)kAhZTw;vJ5fBZhsfR(}PO& z7Sx;dPp*?NGLEV0f|16DAul&y409e&`cWOSclxWLbL7o^em9%;)p<{~o5t6nlQC3? zc2Q+6ZrG1dt?eU<<3q>Rk5~V3*Y3eaXFXrqre>4Ow}Z4YV~p6H$O5ywQQ7nOh$y5pZMVW_(NIYH?g2^O?f-aN&rVOrRhyQlPS!ftR ztu!+g*`hMRTQe2&0Pd)j2bQHYF`4JH&JDsK_Q3%lfbbueZl1blXf*y{E%QT#Tc~} z&FHz$PYY1HoD99AR}qCQWq;QAu9H1>yb|!K$_F%jc00|Jd>DUkC}q@h^Clbyg10D=KikrfE^>OUp zKSHtj_%)<=Kjv%Zp>6+@+4T-B^5=M|eB8Mf{Vz^6Ke^mn$0bc81k6VK7D-|Y(l@V+ zmy;h8K+*W=cT4Gvp;_Szv0|7(ciWJ_mNLZAl?XB|-%Cd+>N!D=757Hm zJqU1-*cwWW_3CUnAl?9pwk3}uO#ju5pf@n>1aWXU*)D;2twAwx+{TAro?YFc>z~8> z(_1*Iqg&sE_xJ2SUgBXop8x2Gw$t-Ks<0vt>FPn-GoGmpdi#GXPt)KM7;#8I zaE%t7K=%+UhfCnbP|ON=4;)()8A4;2zNara1p=867flg;7Q(p`h;{djlz5j6JJ(v- z7cej)?-Jl6yKLc3)xk&$55=KSRRJ*MaZl?x9P^K6{;e{iU37h)I;@pW%tDy4 zv`I6z#3F;aqCfuIv5?T2wUErS1}Xtg^tW91 z%6s#^%DHFvG1tntzgf~bX#8^WEs%R}+C1k~AlJ=&Otx(`Nq+66tDu_tMTm9az zY8K<^N=DVWd{wAdq)oM3YjH=3HspN1f1mcENqEJlJYoX`gl90V<8X!}S3Avn8Nb=_ zY7*$gSc;}W5+l+?*DBibaI5$-1XLr5p(VuN7aQUrcR&4Dr<_1FgisBU$)s*b9LSOu zYrC+xiLf*1nez(8B*!Q2lh?DzNH>s0#uH*SXH5Bmef!A6f97poG_SCwYptR;e9>o0 z5||YW)+e%WpB81V!RsWNN4*sb0z~57BUKd!6_aBd*LFLRwV@kgz}MOm;RZ!8a0jw? z`n7K+_jizJec_ee&kkV6YN>g>+6x679;3?}rwD|@yPx^?b)34bun*m#Ha zyKrWw8ArGYzGjt+W4oXHWOhA2UeW++Ok!;$3&NKS!+{nT?F{_;c?&h*?Z+OSy807m zMSc4hC2aNT+i++h9b;0bIBs7F+)}SDq-qDmjPn55v;{>yGeSCu|Gqt!>D8bP@})z+ zDik%x&O#SfJ)IOBtJ3&px3fig;j?Jk-mXIJNI@|p5?*Y|?tn4_^_m{rJK5CRdWC8dM682G z_?U&uhTr~BerEfJHQ~N3)8>7S=$paYdw;A}q_>?-!kFQO))8kji!+$NxP*{IIC1!y z(B<1O_W zmO-qKF=?bY$APW^`Sq$HWPmLS%1l6wDr`}WieNaI6_2q+YZY@kG02{UWytP>bOPRO z=2s5ZrqA9#EM0oV)hEF0b<~*CjcoLEY@8vZpvnyMJmXT`64@+v4~WvWA`QLnr@uWL z-~aRbhZ=zaW#J0!^0)pxbnBLZ`ALg*Cz|1NhodU>p^pq|zv~wVC$g&;;A1pkX9(Lk zYlA~=J*Fjn zl8k;pp$-={F*F3av8hSm-H<) zGvhxjxo>ASn%}}E7Yl)O&@~kZN_&x(0D%M<%Wi>UTYQp3hdv>@`|S>__j(Y zCnY5vZP#6J6o`4m3(h3r`H6Lz0eC*U^VWap3(k}dmuj5`iCb1aDj7oo{jG|_R4Hp& z(TZFK++3lYus(`e^+W(dHNFuH?Q6vdSqhD=c~A*{z&jn8BMhe)1jo5{ zKljXpfpqFb2I2*_Pk$s5jT0$P_P^eD>PZF|A)7uuo>*eiT7xe2@40Q%cp?z0aR|8D zsTZzxzt7IauLQ2cGnE_<#m7z^?+D8Q2$gz^!_}cF94xvpg!NM!kP20o0z8xRN~9Wq z76+kFmHwKKcu=(zo132*pA=ng__Ab}T5xoBaIeYF?68Mx8EG1#e?9cQ!&n@xBq4M2 zO56is*XI-?^O&C@4t(Y1uFq*h!X`$N-8H*3`bW=*l!~pd!-gCz+lz}QHJw>)D&Y=u zM4-Wx+sir3X&m;b>WjEA-L(Gk+aE^Gyzng<&g^|>bXf99Gc%E}RrN-36u16k3ynpb zT~NSlICeIZxr8{HQO5`eKmb6QkrzKu-L}T+z?fb>HXL9;*@!nezt3 z2=gJNZc|8WRcL(b$Mg{xI40kb(2rdiSlb zx0!%YRX`}0jLb|{*gV(2l!P1+Pgp4)^@@MIiM9`FP*Qyrn3S>ZX90nN1XoWhegBdp zP-vOPI}HG&T6dBy4ip?u5ege4CAh}lPEH*}N%%~3QBUCuAG19A=r(~4ecC}B^PxcD z+k4yUso&dSLrlN{0O{_NUsQOl`Ttd0*)*qhhFmbGeAsU3_M`hs(;@VcY4Zr*~`cm=F`TX7I%O9yNrd;OK8sW`J`y7pjjvPh?!x9%FMOhS>r z9?hs?_!)W=sg!t0kyE+I1G0Ilckxz)F54#UU1886SG@p4HitR3PLcjVCA1RH9Neys zZ2>xZ>iwj}9TXGfBfc%yI-YmT*^`{i;x{&82~Csjrc!Q3&!fHn9O3`g2TES4c>6-C z?z8V@3079i{o27%t*J#@7J9S}jNZ4iYp9QrE~qQTne~87zQlwXuD2$8i&AKrc|DM~ zH@{eWt#r5&WN|O$veH(oChXBq=$U-$%38i0qt$}$uV z0m*ryW2+lw5|+;npoF%Ke0L#v3j^RyXpX zHFr(8D~gPwR-Cz8@nn4nx58ULy3Psaaf^A|>tnoHQUk!Ao%EBSTHk)tVZ))6Munj$ z1O`;axGLRa;-T$oCIEM0tLAdv-N>{CP%t&4UbXGM2O|!m=bJj`gxNmlxxP*X-+lS> zUS)F62+fpdBr2ahr2>5ad||@3UvgZ*l!KX*^MPr~F}=Uf4&f&bizfn9B*%W65U7k5 zM~M<_p;ib=&6OcoL99O+opI8-6QCm(R|NmawMAVc5XgAtHhhWKMt(BCC1>Or`f(!r9rN2$!+pGXtC4>``f zR+8lTgEfYiOBGy#rqYyaq`(x~qSq}^DKKIDvHh&@)w!y7U+-82(if~ZmDs=}0uXbW zLf;J+XiKDGPc{hDXp5oWVLP~)G5X|mTveHJQs|Q3BrV@0tkhm6cF@cC#Dsy3-um6q zH4HQeTC1L#ySAEwF_9O`5+nO`PC@XytZUJrs;R6q;YH%f#?ra85G&!d%8RpK_8kuZ z?_#_OE+od*v=Qc@v$Kz>^Iz{gRuN*435MUkQ-HP$ zu`U%)Sf57YnY;bGC?L(~FsIFhsHm9C++83-Z}0V#IAu6$iQfFi4a}~kPFA6%aPMY0 zi`-kMJ172e8rF#z#j0k;m66Gi7vlSvuoK0TKqc>8G=O>N()o5Oj!`zbJZ3__J<5( z$=#4LsgAPp(b^}(pzXcy{D-$)ORTBq-{%uq1r21hAdxC$_~hnq8aQr=Hyj1d7LcR5 z{2cCmb7Jp;d;^^`l+nV!loh)bv>|C%=QL;B3JT;Cz+P50J|EuwuW=a|hM(E3Zvrjj zWd6E`8K!wqmmnft)_jv7-|^+l&a2K4z>Z9l-U#twa~mwbM28Gf(#<=fz7J8MSU?4N;Uoz~Sx+G-+^7xRs9*!fP4Zm^siL zJHwY{%W0N*~Da8zo{UK;4QOV4HAMooL!T?=Ij> z4(JeWWiS2h+F{TD4Sj>Z)i!tDGIo=lVD{7R$>}z4gF+19Lt9z$N_B%y=+(Jq^H+H^ zDrIeR%}-sueC;1)s#|4FtgyD-@$?0CBEk`A1@u5u(vxd9Y zSjqKljjS=@$q2ImtxCD=oxZBI3kRoU@!;~@c2lJ+`AH#u!kMf6=d|b#COdU+K+AR} zsr`(<8rwx^G|GnFpvCP!3*K2f#Rt)iZe?XQ7u}G3cK>)@sIfcLpI?;_;e0j z9O=5Zwfc}i^Qf;v^VEDgK08=OjcmZ6&0R{gWZhV zn7+WQ4gC#~ch|C_TZ?s?U0LJiFI8zV)D{YS(7F+)yW-LMbBU>vVHNCR7kNKnxH~H& zSkXOQny5I8Q{1YT*i$?X+9VqpYqb&8x z^=XzrKhOeMX<&r)iWq>dL8o#c%7QGs8F>Q=bB$`<$iGDey`AW%Ot%8Y=QPbWIn#|Q z+$PtjisnSPv|>p;20a72a2w$;Y4Uj0VA;cBcU*vRZug4PDIv za;=e(G?uZQ!wa7qBuD(FLs#P6o={(_QD5;POoLmX;uyoT<~OO>LIKjvS=0FPB?j5& z*}3d9v#D5MO|?sYfVlZiCOwmKvBi&tyr-Y4FQ0V}X$b6biN>eQLSwD`ID0R6bqFVj zN-R;hR18z&tx7AHtBy*oYb%T!n7$^B!@Vux-B3qOg_5yJ9=EW+uFC-riFn?9T99Ic zHm&Ik!5Vgw86mz>o_?^T32pU%9=T8?}g1nZtSfwJ=Lf?ZXcXo?48eA{Oi21A>^qu`fA0L9p@bYoiZc2+0rvt$v=uVh!L@@Jf zbtJWdS1<90$9J0A_j5VM!i(qa{>TI#BACcSIxp6=2ZC1szZ#51M3`mHy6ArcmE<+u zmWS_s?lpAS(9P*259B(YkT-v!UJ3kVU|rh6w-yJh^*K3gix4a%zpIgl6yl;r38Kcml zktuu0R(-=Hk)p|sG57B`HqDhc$1exQzlAZi4T|iE8-QmPf|YGutuH5s5hG@qh7y}7 zhM|}|SrS%IBPNf^GY4ddf(lvYvv`NkTk7JYjQ2CRt4Qf>rF{|PX@`I9{Hj~w>Vhad z9bi7u{IzI-dt$dH$q`+5e|T*-qeKJ7nRYY_odXh^)&RNsWt@7;8=^&(cH;jq@8AUW zFtVa|5S*Y6P>L%boJf;hP3x;+p8)tF)+F@YLh9t=iKjb0wne#Bijlp#Nr0Is>mFU~ z?l6T5!Pd}khHvaE_9SC!kKHp&Ts9n`jkt}x!4{N?IXRnO$tkjnzGiDO1ln{j;6BV5 z!nbU`=Z;mCV#;Gf+pnGlO!bj9`h`srdS-3wWAd8oZ)Ju{yPFf*BGBzM>5N7jExAcd zR*-%u^7k!)zvKn#>y{GJ0Ru;S+XnT`{Eyk3QtXXQ&K2qb7pK06- zPT|~(EyEc?LAK@#Cwytxo!?t@n5Ow;B`&H*!6{QY6f+iexZKO4yK5wg-s!2n2q(4S zW(bqd84BJ$VoZ|!1jmUIycV@1sf3j;L*l2^vbQ;)@9#%ngWOo5<0Z&v^rSt#sWsqr zmkX|(p!0>{6S?uJuee99tl17)7U|c4G4ZXVQ-@XO-4{On)CVJ0hPGJa8W2I8lhnE6 z*xU1T#~05}WNcIvJ$>h;o$p2#o;rMW&~_EML0}wZ z&KUab%6Vt=l|6Rw4he$EM~jaW5#@%TkIwEc`x_+6SNNzPtheen>@_ZaTF{m^xi3DM zKDAp(xAWp1ino4Vi#7M{(!0ljd!^TeNxX`y(&Y)ut`pHEo-8bIfU9Q!Silv#{ayO zbB+p^MY76<(@8WJMge0uIYotcZLn!%_$dIivUAGkvcMZQps2B&8hY>4A0?tQ@u@#)FJyj{Qz;JChk?E7W$klJ zt5?`s8LV{8XeyI-r8+y!rnXikpmUaM7uCmI`5}Jtr75<%foQEy-AY+!Jw3p@QtWUGKRf9&_YjGUN&jcOC4XG^Y#f(Ou2!z2p>ir$ zVc+x!{VTZHjpfp%r3r%F&m<+nK9Y*;p)2m`bntMQen|m)a0T;VG7~(+0V!eT)a1AO zGY!cSJ)jOODEWsSE8l5-*~_C7aoxB*gG@%mSvmF$=YgN z8S!!4?byebhrq9A9GuGD$E)4 zsm#Nk{mUj$rvNWYFUz1R2leFfsytul+vw7mJ4x>42#vylvz9&AO%R zd!^VPTF9x3cTMB4m+cT|qt}6fukWFMoB~~Covvp)5tsCx_~Mev6E91lQg}tgmJ^balCP5gmfyjL1au3=^C1c5_##JL^ ztLq=04Ik-_{X)DKnPEwC>Hy3LuedF91hiN1+(eu~L~rl?dYI*z7v8K8$&q9mmur@& zvK#hbSZjLwf#vy$%vu|6kn^KLWWs zZreD2v+hH-uyp9T+@rV=SFd;rAa&$_{&DiKE@lYYfSMY_Sx-eDu%+_S3%U#f`LOv~ zE7A)*_+x(|CS1ZY-tg_El7{7fqKX&S64M51XZq|GIzCrADE_~bHGgrRJvH?1srBE9 zlFk$Y0N}65xDpote$^_Zdsi$Ho$lp1mypMw_Z>H)dX9ZEd#EbuG3!=O17ZfEG}8(R z+C}m|UvhFY9*v$JuaWVnRjM|?rue>iai#dq8GUFxwVlH8_)=j_tS+3{=F}G#+IYAn zS{_5U|4bnD)@o9pT!RASO(KO^J@#riPaTGX4L?y&7YdB|mI^IWBZ&A`w&ROHebxo{ z=y`kBCfD!f{X5cT%JOu7pSe4D*siAkcFt@Ke5B@1u$0(K+Oe-=M)GTnA|XeZ!sei2 zn&WAwHSk(v)qxg3z@``z=wEGtX1 zztRi-=dKE#`B8R>vPq~(y2OB5qH6ez`s}`+Gy@!JM=-y|55~c*fsuvY3OJ&WIp(S; z8QFP>IO@ErL49PkK5J~@LZ8?pOY9UIbw^ai9rfCNRP;r3M+8I} z3Da*&s#3P0lR0SJ_nS7KWB7FS^J`U{9liLJr#z`$y?-PvT98Onu@;GC&Dl>78w6`! z0^Q;}1GoB)tLNV%xX=C=@%xf=mqOgeTSWiqjGC6{?qrQyyT9Or8Le^AA*nrDh~S?2 za+X!52+m_1R(ta7V1%3*4f!aT3atcj)6C6aUMV9HR#qY#zb$SxGGq=;{5cEwogi=a zoVom#+GhqqPpV=@k0kZ}@?5@!iPq?*-^OCIeXY8FNm;^3q#)qYcZvxAw}R0g9zgo-0KUrJDpxa6FNqK}ZHC}3=+nn>=K9Wy0{xO=1>-@q%}`5{ zyL`Oj%%ZGq$@{t*)hk!)qn0Kp9}RHx-A~NhyFQt{m$x9&rVqJ}FjZJ;6vhvJJqj)_ zIEwxO5Wv-YHHp_6`m9|ChR3NKj2tgg9bXG`EtY6pBd&JpbEYImnZ}>^ae!TGr#PHa z1Y?`=hSWOr=4{_cs~&B`x#(seU$AOIWK068Z*UA}$*#KRY)ihYG>P2wm&mU6X0t5W zdxcdZ(&+e~H;NaRiYflA)BzWd@`TVv(fy4&6mbBdPPPT(IUSr(=M58#@9={8U#Ga~ z9XrfIVZsp+F{{Tid_FeQI$&Z8@!hi^7?<-up2EI+%A-V-5`XO{?E!t%3S!Wxe&s%c z{i;@VjACNB2^VwzY4CdGwgFz^mrleqn&nWU#Fv%ozI-6Hs5|is)>9UTPQ+56PqmYE zbTqF*Gcep9eb_>}ix?4lFqBuYruCt@!Jh)TYldQ+I~=g}N{2I1-PmYEtHXty>X+_* z@}7Mor^PtmuQ!x4xo`5}{)_`I#oK%&lb>8#Nm>MY5${8y&wD7y`5v@I-*GOv zhtJ!);S4*2_k8bDPW?X-jvJF;v}*MCSiGsIGZQ)(ZQbf^(_z8FA{aWfBGijd8WZp3rU_Gv|;L zqDCKvIgm#y6hJljE2J&kgcxYSGly^<9x)!?8G6?B9l_UKGgTHZ-+6Ug5NB&R}&1$&^E1K)OCcG8RXeG&%@=Faew4W(r3NXwO2mz2d5U=`X#J^^hZlo z6MmS@j&j2-)WrtHg9Nxjb8akxg#eGLwHhMz5E%pax*>d@Fkv(nmokfGzk86L5Dnh2 z`>D6e*d6g*mHOxZ^Mv|HJn^uu$*wQ{NQPKNl*gdcD0saagRe?O19qzeX~;_Ne6J#5 zbjKcgLKwbzNw$L!?kh@bnO@Pyh-jH6IH96$<%bZlWjYCAvr{-71Oh}P@ zG~94>9blIQ_7E2vf4o=GfOUGpfSL!pmAFN0$QjQB>_~=odc@HWd(9;F;PFDoMnMWG zSw%)Je=yL+e@y=9`iJMkb=|SgmmOmNnZ}7#!RR!eA{12cC-rU!rZ1o+QAwd$(E+vf zNS&KH<{E0IW55wfEK)Hm41ih!IcxF7$0#Gn_;O;E(j}*T5q{`FLfU9gFPVEpN#}GD z08y1T5)9j<_n(McwOk(jO4qXKZ*L`fxIgwV{<+tBiTiroVD{AT-G)BUilklCiz)vC z9r_BKKDx6)6Q)R>#YV${J;p@SP~v_f9BsO;P%YNqi!3Z);Nz)?K4H6Rrfe#2aH5p| z_Sd!iWmDL9PW@0KBE~|F3B1DqPCl#i%BX6X6fcQJCxCfdNAkPw&?5+Us;K zIskWDx)Tq73J$!yLi!Ut+KDv=6&Bo3)cM*%qSzY995A2*+-I0##NcrMg&IjB&Dg@| zVp^k0o@@uf@Kr+4?V1)h&cA{QGjdC9;uwY>?cFR$U-n+By|T5^esTKd5?Gf7R|Emoy~K`CF*a_qJS4Qo0ti?zzDzUfR_h%emM$*7Wu9&}VQAXbBdDRxmY z{Ued(x@lSSb*p+Ls_d2Ir2mKHK~iZ4lc9*Ap=LflQd6cdIuB@C2gQspnbU|}y)lKT zhJbpst~9XAPI1J!#Ha>j=iLxz-aX#&JMfQD(+Rr zeFYQFPB3H?9&FTK2P7EJu2%7#$)EIv{my_0i}|>B%-u9SsTP}kidJpe>?Q(3zwQ<& zBY32OcQ$(YA#jVZwZIN8MFgS9J%!=(6*yhHLXWd9*Xdjv`i1l1hVG{S zVaE<63q=TIB@VmKAxj^5bBT%UKRCdi8v5c__7pSlN)ofqKY-5j0#M{EWai(E{Jmqp z;fzxJ4Z0{MGD=HdR7sjnutnAHg{GO-{`khqH91{zE0@bJzcSya)6WKCvPgH&>3`-U zKO*h#O|89>wPcfO)vyU_=YAol9BUox*4*khW>S`tVBeLAN`-E8zhpB>w`iw5L~{Ah+ECb z>3&v@OV>+IiInXP1b4KMenKYo23Lq9o82ol)h5({Tp33yiH8IcR(O3*66Ct4bBUyv zOcRSZfdY*KPT_hJpSBq2G3BKmf4YLULz)?XS+2RFBCVhPwJIsd^@#OBn*G{20k`*3 zN{AaTdh~J5jxve5CMbYKnqn%Hte_qO%Tl~EPc2E*(PJVD1SzTNL7N2aA6@(>E9XJ^nv52M`E5tu;Rsno%JgNuRd5D%{W2<&7$w)xhC$)P}j z4!r798Cp=GwzdJ`7cgt4u>U%BvqY2@KmC8|4G5>zxIWm=P0U|N>?Zlb%I!B9^^(05 zrH~;WvIQdqi{vXl>DCiyZ^be>D~B%fSthCQbu#o9IRzkR?vv4C8$ez=NEZqA^F|7Y zS?A)xrJ?6HFj0x5Bu&3|iOT_zbz2BWroZvhe7l`wwuK ztcCO<;D#sB+#eqB7`}bhm0bwCl?tCiv7RC6$Uyj^uDy$Zc`kUM!<1w)dC>5^b*GKZ zH|xx<&D_AAHRTsxyr6%yB2d#Gx8dm^fDofG+#8EDDG_(>b2Y*ii5)5mCT86 z!v#QWjdq$f>Wn>nwbMnYwlWA*lX$rbt2|s?ZNoh#8``T(+S5r9^Au@ML2kuCeB5!?Y?#Zl;z6C33PbF$Ti9sS&j)M+ucQu*@? zG7zO)j+M>mRQ}LP^Sgn=8L)M(R#%B7UxK)lwr78oZYju@!Kvf@HlR{PR+6o{y{YbBH_ZR zq4}pCgF8p@mUAoQ|0@zkotN6QZZRS#j`b)d129ru^VCMGtG#w`JQ|XoG+0CclF`z> zq7+zT{Zyv}*L6}T-UKvHj4(0%s~MPRAAWJ?8G2+5FGgHsSXLieEoR^hAyGZOK*qZ- z%@TV`c{JAA`0H|wKE-B{DXl20#kK zn1q8nAl}6Nj>xm3>n98kT3fi`v*)xX^&MH07B$Zgh;=q+-J8|tJ8l>vIQc3*eGknD z07h84#G!$1cqvgHN#P6P9Uz-?B}=iN60nWfE<5f;QAgXc+oJvzWQ;{E@w~7;5W14V z@jLpI_{Cq@nnRo6H|dmfsSTl|p+323c60Bjg`u+uEe5ib+rQ6D{b2ugwb=*qU6gpM zS|aI#0Pq{p=ZVdXY&d7+rJ~wnBHhb%&+ul-S^R!5ot){PSNM%==Ta-5IlppY{G*no zKGN}&xB`+7U~R(H$7Is(40Dl~*ka6X2Gz@1Z?0wonnaaorj{6~6Xt!4LtreE-oM1B zZ;Z9UZDb9pRH_@&5w8`4dx|xTd#J462mz|)0*8-Z6zw^;K+!JgCc7SJ(%}{;Ou) zk)<2|(+wFtJ~kcu*3J*Rn^wEz&#iQfkNQWkinygBQ~OAJ_dOR4!2Lp_tA6Q_%gvZV zVDYFxeZ8}Mdyuc;L_D<%lrBtMsBZ`#U7PK>tUiDuFn4m>b7yH|I}u3|7X?_HzQVfp zG0RbdTmvef=Zh8o2rk6T+;iUJKITzA``yh<6=~=6pH)eLt}Ex-8ja6g>|48yMq5-` z>`%`DWQ6i5_;E!=zcU_gQ=YLF?_4y#sy~Wm$nUUcXV4NR8$Z8-L7IZ~+o#r+U_X!F z{lC4-xz>#YE`T&wCrI|#qGofDH`|0`rOMaD)5B4|N)8UT17#>q(Pj_nZdC|^dpp?c zB`3~G6}s`s0PM= z)icQU|MtebTb4%7C!6@(p~?3^5ssr_e((KDXMe#4u*PZNSsQ=XX@7k$X7Hl>hIt#; zyI6bhObrIF-2bY&D<+Mx&Eg?I>qk7NZ*sV({8dN8@ZcVn^F0%lbhA-NjW_-sW?GhNya3g+ru z;x%wt%M5D_FJdHs6)=u!g}sw{NEA+v6E`Ai>nB#c5GVMhIEWF-;l!BP>j6(=Qnm6w z=a*H)+{3IM9m@Ppa4wCq+z8H!)5kdjmXI3y8P#GJXJ}}Xwrh>`E6@v$P*&1r?gl(e zNq+EXY;QDFf9t^+O!x(9VDqx4LY)HOh^|qyg@`4eAig7h9t`PMV;v>}8?a)J~e+k+xQU zp|ZCY`RB>WH3xfwTvyWc9*H)X6_@-bS*b|}cRV&*MYm63=JX*M5RPK3)uw`@V+9%B zu2Le9EwQT}IsH0-6doC(3^teQo*h&L6F1zpS@O;DIz8aF9mrZ+5d$bZfFVpBTJXp5wsrJz?k-866K=iRYuTpHf2 zE=Fezh1IKOMZmlwoY4>@&?VlUqmk1g7&wwRrVE?p4{^oEEkrG7f}LR~7_JKHQLvRW zEqGtWhmsSw3B+k&Le&3QtzlNuAxJM)`vma|4dtdtSbW0XRIhube}? z&f9jFWF_80$hjob?=;z70fHQ`<8iVn^pag(s0pM?*@u%Ga|Untl=I)7!oGFtyON118Wj8CV?aeZHzb`mm!(%Q;1b4zCq~~R;xSuR zRA~jSBlQ8eADm{xm8hOgI>lWZ`$efZ@w5WyXxGb1I;U^yd*?JeebdIL4Vkx|fg4!= zcHf6906h}J^xGS7bN2VF#}uT>6HFCPrPyp4Ks=iv)Swh2moYtX=O3V-};gB zy)`5o)j^O!bP9`jPY_o6M1XmweH!AKOCGH&OV?->Xz}nkzE$#I$%vpAWI4Frfpp>jLH4I#q?DC5P zZ;AmhyML%W)?3xpokA$5JR!ez{euNLiyk_d=F_=l|JBzb&g2}@FNzMJNewuDu}YPK z?K>vYK;jN?DXouzr^K#0eFfLIu*u#TB8#P^25N~ocwtj^L4kfefQ~myXAb@%Y#?Kr z$r{x`rBhSmIH)rXKRAxO0!3GD@b0BX*>cSjd!z;Fm);rb#GNycB;DmLd*alk8MhEl zAcVfiQG^S#l}Uu08desfMUNZTr@RJmkv(f ziuheO+IDOHl+tErY`4Q&myS21NRUAZRY{u)NGV|oLugpd2|q56?$8mF>m^d|hn<CKa+icn~G;=l~Lc2jnrK6v}hWU!p2vVt_-B~*axHNh1;g<%ygL9Le_-(`kOXU;qd;t+J z#?v4Xt0mkt?TAFT(%G{J9UL(#jv_bcSTk3vDij3YzOlsHu}_<=O4Cz;A>$3In%)Ej5$Sh8qhemYbM|o z+YZ-ewxGq+Ge74?ik}b}v7m5k4yTq6Q`JX!6(fLx48*5;=yz8()j+jqj0GAePKTm< z5?NZwyP%0l@P$8h^Am|=1i1r3)Y_?sf_ikw02uztRC&{1R^I=q-CAS4fu&4PaHJ7T?}5?AkY(x2TSC! zYvoNo<)hKx zsEs+K=}n;I1vjw5aPCf5&XS9}&Yc0Lgu3zrh137#tTjl*sP;<{<-8-J^^(L$YKx>o zfrCLOIsS->-V)UUR?st{i(Q%rBBoFXcrXPD{OUeJop5MbJu05Niler+WII!s;8KGx ziS=HatxJ#x&%~B^L(-1pva*+c@MT4 z*>^6J!2nJFJ^iHdAeaKyMT8(i$Q9y-kz}`$gmYug_5K&*F;u=&-;;Jgw3Wq{2W)wP zEQPrfowXS0%y^}6i&U5Q=lqHeFfbbnbs5MJ9%qS}i!`3$@|#?x%O1LMdvT^_VjFa^ z0kh8$f*p!bfKjZ_zjN4zl|`&!lTLrYerGa}Z8X;6&HJH;Jbd8Lm7DY@a zDyX>=DQK-2@)HdwcAqE&KynZZ7Gx3z{0SdPM#8{shw(BxY-%?=!_WGu=kKq^ObT{9 zd~aX2^zz2Lwvc}rUV-SPBt@dvvWiA2W39FE?o>X##g|F?u;&Dvqp>7FckSBK-4Iv)#B|^LKA_3HRVx{!gth+U6$wiF-k~>S>~+ zpn)4#;VtB%7EiHaXc{=`uJkz1l13cGLZeJ$Mhzl|H8Ue=tuXFhb;CjXvmHO}DbL>N zu|w&G0g^Q>HfP{l5Y35^>N-` z2YUi;DZlcVq4hdg8g^Fcrd3@J~z`5(i zJ5@)kH2Zj2NCr6@?n(lEtYVnX;1oIlPn4ks5j!0WeR zm|)cnGS3=YHSVM9>Dy1PZ&Qa`u-^kx4^Kf>e=P2fUFV{GW6-Y#r9&oO-b&Y~vWTSl zmd!25?wB<6qVa3q0zG68k2v3I$$@n}t?J2cg2gk3gRC6>(jfu8qUl^mT#dKFMdKnA z)*?WoJ7CP-BzinoK9iVNgkjQT4unnhn8OgGuMilr`jsIkj~Gq6{nk|Vx~ec+G5UtR zzb~N**0jznt5E-VGZ@owsfM^F&>$$)S_-5G1Bj8}?TZ)Ht;86-qyd5KIG$Fn>;G@@ zX5J)TZVp!s!+Kr2%$a#?B9U2~HnPo&Y_^RSGFxGOq%mSMQ(4^Sh`1As?V*cZtUJ%# zWotMXIHKbL2KZu#VyGk8xK^PFZ^XvCV_U*oPqGPUTGxoBB<(6^wwk;~u;wLo!>^9Qk|y1|2Tx82+;^M5G8?v0;*Q*B@yIqEI= zajN@SK@12QbCj_ct*1+1hB8JbE#0gEq{Hpg*EdC8-uFYUU!!_(6?-J#TP2x-EPwgL zh2+6o{U+zl?P$2^QFWt+0DaqZX}y)t%Z}EiD@Jj)`%2^LK*f9RqdrNIV4X{@A9&@- zDz-@Y`nS%#f8(Fs8`HNw{`tn{?89EnvDM6Mz^V*-*{w)0G0GFvrTeR!4@oseK*2@1 zgieo7j(GWhD3jgUXy zvERwMHa;WEC_Wf7B>-%7e=pYYwU^?-tIY*6H%4G`lly0(P}a1?;oB#tYGTpfu9sha*2lpYnHzoZk+d^LG_T%+mn*wGm$d#9odo@Pm)}n?pKAQvE7$KOf#&)|gWIo@ zW3u;3LK>&e+RMv}w}0sEGA2klp$-pG=!d&h_hNeH)qCPfIJ@By~MM8EcvUZ)B%>w2ku$kING`Wg4q1Qbv-G^ z)z%@K*=F3Y+ow@e%9XzW0oKlec6Gi#WsbI+>*#4at8v%I+!%JES!1S$c84TxC*_s} zY%;C4yMCp?{nyC=Pzbx9C2ifT5!$=6QoA)7Hx^vCrJLlK^j2ld-L@yIw&o3f--r%3 zhE72%e`%KWzu{*#2<~zn_t-|}2AT+@IrXN^nDd$x6lboRzNt#cC(g+KTzkq(X*zt4 zAO))~Ic@dI?qlMLMa*4E!{Ea^O~z-|&J>n|r^x+MH<- zbC&dJji$rj{iE>r?W%VzFD@I}%-*<1l}8t(#E>lBm2Gq3JO{d(;pd@?w%!-)JKaCg zpv8Q@odD#YTkrRo1Qg12rA-znKK+p&hA*|N4lfvupU}-l&M==PMSd69a;47tU(LKHY7z#cx&FPKQaZ8!ky*rWP!aZsSgS5B8h^# z5PVU+AO)i?>c5}%^B{f#wtMyYdGlCpgz-`5Ld;f@d)pb7*tFF6OtI+~f|B`}g9~cw z2Sa;OqDU;Dl9zwJdS7I7O~2$uW}AinAvx9Q+E-zqoZrM2d8)qkMYCp~y5Egye)(bW z_d@yfv!~^-S!}Dk&jzv5J2$?tT-PRJH+tH4#WcS5!K{big9)K#r(2)Chg1ijd8-{D zQ%G`5=HNt#nmNt*_g|mAe)*nV1IeYWisdkS{1cFD^y;|(${R#Yz?=U1&^*H_|7|s0fG% zkpKxjBApO|fg~g$LY^Vzd^^n2d=hdLtvXhr&o^XjM5tv{QlrKy z^jP(t+PpM&oJ+i7^^)1mE!9%`Yz4GzHX~;OQjj+QG@n$h_-O89Zd(~>}><|i@y{AC&u7$#W<23Z= z zrcLI)+W_{w?aH5b5J9gkgMM>jk~gjV_LdXa|39bv?QbKk=*c-#Ma*#O{rX6PwEmQB zh_^oFXgkcbz=Gn*^!&@18|2#!hVM2s+s;R`MO!SxW{YM|2kJkqjvq9~!3^h9J96ez z9N%ESb?WulQYpl&GYO3GRE3NNt8=@F7=wwqCB5X*x#`F|g8Fb4j zkPcPlB0#MJu2}n~Ru?{80%k~COrQM<-e!=^h@O|&imoU;{;`dPRRAx|;daCxe<9j98Uy#;f_kIAY_jeS=UxESoS;VB;A5LATc-FfIAXeAxA2;=Jlk@uv7=KJaGCY^9aajp$pna@T4IMxU1-t%M zV-~G$rq@b=5ZAQmKaCg^v8f25G<%=no8#v}=4h+87C2p$IeV2B&l$;0wF@UP&kX6# zJ~A6Z zxFI@(0{0Ji1XnphukfNt#0)~IeSF3!Fg*r7Hnz?FM&HEgmIbeEdCt`UtTbfODm8kt zy{OJ`5-p8s<$K>7AC$d9oo4l8wBaOXHh2Okq<_$GQ;t4*Tz8%7({-%k6V3MGWIj+@FmQc(a#=B4%P6QZjoyT!y~ zG%G?B&r#Mh$$X{;`a&*(V$^!YN0fGw*3tsi_Kyv8FeWy-Xwphd{0KUwbZ$t3?*Y;E-(AG& z#CCflVyp+(cI|6%O?0KO-vc@btD7uNzX zzyvn^jShh|aUw^B;qwJUIs*C{4dDy>5sJicV$|%jo)X8Jz^Vmd>XqGm<4sza4P7l* z1yPKcoe!~K*+JK8&s(V-IQi5h-GAd^gQB*W#D2CvX$R*^SIgWLG5*1Xo;w(ZHaNS+ zVyw8x<;ao-ST_#%}XE_}fpTiX?YRJ&##gjUYYDl8^?Bmu5Yjvb{063u5JaKig zj`J=d^_$~^r~aMUPIel2-KIB#$moX2XUW1?zW1FRS)`7sra3A|{9Un)($iNrKTTDZ zTyj^7IQeu_k-w2)Q~AU*0>*zD4S>jJBj3nUU;3A?WiAPkT*uI&a7_55rT1a@NT#EG z-R$91t3CRD3gR`mqld*?GMwy)xFQvCzquh%zVQ=NDDFIlmB8C&Z|gs_q<|h?5^KRM zeQQeK5f8PVlTXJkYFK1DS!e|6HkGE>h&y!@sYvqk*<1E@?5F}bdGV^R>CG+NO#`$y zSDE)X-CC}kvvz^Q}&SpD-M7J-Gw3z0UVS7h zXxck`W^$APLbT7ZKcvVd{)Y{kvL>v`{Dc*>LdXQ~r9S3sKAtJ?Xma{%V;Y3#%{QNL zqd8v-2d$PSn!pU$5X4=PZBlNJa7SguVYs7m8Y<-eJC=@WH4D9It@qFK&Eawypf>Qy z#jjJU?;@AhPULFO%~N56Tvs)>Q%~>7i96wnWHbbYa8o;;N5?T=^U+5EBk=yIKY^)o zd&ETq1gDUWA%V>OlAI~3F8H)6d+gkW1YD7nxH+y!T4QWuCp`Bt%&Qp1*f!cx7IJem7Rg+em#rNMkEZ%`Dl9wFgd-pA;8|1H0qEDc2 zKp?>JJFZvXnxQ#M4OAh<^7}DcO=2JW+hr3%LRxddE?%P@`To-eO|ShAx7=6ed(M|< z_H3HVQUb^pi16^D1;xx}!83Hc;(%2hs58VX!YcI7~(Crzn5?LE&5=pbzeQwG!#5kLPHw7)^YzxaKl+~^}8Z^a*-sHEE9fLg_)uj<&W{Mz>UAl6l z>Pjif-cBAq2I#RPXQ)sC!|GHVdN+~PCDy{*)6^ol^uaPg5srvBuQBbgG+5Z*o6w<^ zLMOYX5qa%gusJ+88c*O)(B53B<|P~x%+V;zRKOIK6VDYziB*F4jpe% zm^a`1QSmsRoCNO8w-FaxxkEj7BDHyY;g1(A(Z|*x&P<9k6ytfPa^siN3b2FWJ+(_R z+=DqlVSl7bQ$o7o`tE|+N6$GuX_SgG-j%6j;r40Jmpf#1v-cp{B3uR?h=a;ulKJ1& zoe(rwu^`Hm8}RAFgW-TdeZY?(?o5*z0LYmeu!rHS5>t8NY=mrr_WYAV1xMQ3CIIe? z{Vk&Q49D2svQnXra-%-*B@+|IW{p+tFklswvoYtQ(Lq|TE0xzzcshm$GM6Jf zXlCiK)QW|mLx){bZI%{f)9mV&M0isJ#7%p?oi{%orcx3lZNsr_N&7mGty|YEq7jEN zBnYCPr6`AOugs|2D@?z?o^+9S~c3+J|#Cyp6usFY^mAH zZ!P|ebYXe!+DS~CABQ?tkp?0U4XEfVxEV>9@~nTKE|MN+%l_m})MuOZCg8N1kREP(qo>Fh{3>o#r4KG`dZ(|SUWDn*P8@Rwo) z#zrB+i!+inZ0cB=I_%3-4XCaTh63U=o!?a7d|aavXEi9%(bbJ=RErAOHFsi>|K&d2 zGj-lf>|g*f3O+Y|y8v@z=}Q;Ln|x{U03Z=a77~O#VqdCHW-|*7itw1YMwzJdy9$)9 zyG`SEj1BSQ=dF(UNCu~kI}haNs12&28wiSp4uRlV9R*y@AQ+uc0Ee@HWVtm*t@R+) zWy_8^k1t2FCQFf)T()ACXIkSPVg4*x;4@*l1ORr$mT*7P9N8&w27vDbfT0c{^fytM zaPYZg19<-JE=*W34bB||Nih`s``QAoxXC(=crWdy27(2bO&J|ux?L)dQ?cey5^_9a z%P;j_ck3H%eXjkjZLQa04MEasky!(FO~X?i`)_^& zuYh3{QQ`B8)JANwqrwtCKM1HjER+Vvl(I0wsHLR}Ea;L0rD3>{UEFY28b;EdLy@(p z2nM+C>qo=Dy=fE-hDGGw1V3q>f^!)aHXMpAw;VXs7{0imFRO2`8qpPoTw=g|Np+Tp zA{J4247e5A=6 z;D06XKPZ9dde!ibqy&vm)Xl%WInOjK?#-QSC#q_qV(#8RaNi zP5FGR7yQ1~H|4H>L~Y7EaaX|SV#azEjYr`1NBr7Hc8Qw=?^vY~Dbwd#H+tm<0lRFg z^!Dui^Ae{8)DHff%d;xi8&R8bPdpMxfv7f*eLLO?KED5u;}m!zmqCG>9Y3@y_vb5! zjCSq9=i8&q!DFr~B-&-x9}wPu(gkaJw>n4NW#6{%V^7*Ey_HFPkm(lLcmOP5cbw}V z#!6fCI)48(ZqBlh6W03L^%YpV|Mh*Hx1Im>u_DIr+UIqh;&=iXmpL6qOB_p%F#9p< zfc7;>uKS{M=g2p0`|MrS*CpqEVM5&faQEO%HW==*n9^`3B=Ct@ZnbSXPvQt;YBO^jBvD`Iz*=Qs97jStj zqglCKYPYeYk_FF_j&GSdd2Gv%0_+Qg#LFVa@7qgthhy(=7Kk?Zi?Vsv8X+^}%E~DE zSikP`^9ZxGPybErB(0-$YbuTZcMJKrNctwJzqrAQ1uZ-8ir9G0mgu@QI!AF=0ndq^ zuM61oeBBQYZzjrb1j`P+BDP!4BV`CWF+NIR?N*~#)=W$@Bh0)B$;bb@h5YYZF?~_z zr6caYf5xf6EG87)+WtK`f6{rmy7}L89{_SJ+eE+rKY38vI$Gp^|BQS5MAGZ1{}V5s z6G&8lMktZ!n3D`j;|nLE#>e7}dNFLHn=mCHJSPy(W;TAr(YX0=Ig#LWJDe@uIE zo>GLI;*I;ucL|@dfmkF8Y)74u7s?4Z1H``r!b4dZ0c2dr z)(eTRV<|T_Pim=3=6bq?QSBcoge-Py5z-5~SleaekOqCBvqg8EHvc7x_4vgcmSFy^ zwd#aPaCdYYa=qD~&Dx+3+GtHrvkB>S{rjvuiHa!D%MaYm>@RuVNgES(Ac|tf_Hu03 z$Zkw_{$7bVlJELQD_v&;h^l~%$ZkhM(@jMF_DA1+@p^D$m(zTrK$*2o6Rup~UV?-7 zWI%%B9huWYhZ8TCC|8|2F2^G@F%R(pO@S=M{a3g81Jr_M88KdUlYN>*#h;4T+_Da0*u_!DfJH2%qLGmCkFkAmzx81kvJU*>ec0E&72&Aq=YD^6xnN8~ox z0IUsz5VJ~VmohZ8&fb(fC3E_yCDwjX!nm$I^dp!JSuRUo0ZWFjR_=1n3b2=e=Qok} zg}i!cf@K36Jn$c{rCebazwzM@gOQK(#=UGAd4YSsypjv%rJ?zON%sE1srCmwMJ}sO zmTgCmSv+5-{F=wZs!lC4+;{rAB#n#qGW&!e>)i*Q#iVC68Mb=u>vRvxpdF6$7W8<) z7x_<|^8_s{NYqxKmqYn2Bbf>$uYKG6X#DEfG`IuEBp@-?{Mn0QL(k=P91Kamz?6xq zJ`waE=;M#bTIF9t&F(e%9N}e~@1MqV8s;yj3Cm_bl7QMTvBhIR!){N%zFM^-FD7X5 z$G5m&=F!L-cfONop3QyeHPNw=ht-AkA=@e_hd5m0n9#=L*liF8cQ29jjDW++6#@6F z{<M}DbYSzJMIvoZG^JFS zyVVGAbPLNs`oObA9g*GN2|Ujh^K#Z7Wn#Yg4=Qar5a)ei`+!2%^{XZh3A6O7x!Q>r ze>7Efr5oMtb-! zQ>n_?zJt?=&um?joYRDj9Jm7PbbmGXf@h;V1T#^dZFF~+WvNMCdEu#L)_8! z!O*WiaS=M2v>h2=<-%4eGY|>SxMp6aZ`@AJ>-A)vV~9dbP_(-b=lk#UXA6F8&^a#J$g%E&tQ#?p0K3%>pDbG?N+IbU?W8t$X6S zj)1Q%3Ne2?j4{_R`Ypl>qX0l0fG1*NiMNU_ncKQxb|}{rAg}LXVC^B0ZrF_=BYvW| zahwc0t67tolYUSCbkN0W3;>yGZtVwgfHP%%{$~i1^3tMZ`sI`Dq{A-z zI^TeN_U+xZ#&$o!H=pF3n$v6+T=aR0G9;CQB5fYfbAJ_~-|m#X>Ds=FEL(?-uKZ!( zrjJ2P3Jzl_yNk9^c=}I<`2pgqmnsp502Gc*RYejdgGM`O-C)D>79V^ju z)d-cL{R0d--EHn!m^VR>Tx7uwdK}@>?&P7=J*-3J=poQ^!(;4)8m9rcel^jbYl|R2 zC41@+Q5qEJq)Ht5=t6jaD-c_I*4pnGG%sf5W1UmG$RQld=Qv_{Jo?gjvsJ-pCR=}0 zS8zMT#GMdf7EHy7_ep7EOt{-~(`8R13G%)8FnnPYdHgfoH^++L%&AO5B{U*398m!o(7ORCi{xeA1NC-aKluL5x1|1t)DU$#zceLlM|6i5mZ;38J}ud9w|; zWKXLjM7S)>03iZt+$@rgM~Dts635HJv`M{#mYykI1Ew~Gsig0fmd}PtZ?Cie@K}l5 z?qOTFbWAS1MHPL9)rT*}4#+H2*v84M*J;VYg)Gk^Eq@@V0I0j>L?ery*TpsI%by#G z2(9rM$e<3Fg*=*|=Pi!sG}~1?lqhT^U+}~queLwPt)e+0r<9oo2e|n@r;W1li0%D! z3nmsRsVyH9->M4DCWjS*D|ia7MK(p&o&(o~2HHK0FN{z1u49T1(t5pK1{mYGgDl&;wwQ>-@`k|4;OxGUq>AF| zfiHbzD;>TXheT{I{QLpM3-&o~8MyKZtqTq7jfJ>fc{tB^aK~%Q@lBWXi*9VXkht+% z|5AAuN<`W~CU!l2VKd% z?b96d-C(F}TBH{^vpimAgDM>WTPdowz^U=fYe^4hpfvB4in+i^E$V(`TcO;cQFxWyI+heT!1f(|8zMl8G0&AHtW?T*^X^8jyf%+REHXmc6P3)``Jt73`sE z`+Sux;Ktw-_mCgkKs0&9DACK%vgUa}S;VmdjA(`athXb9&M~P1=kbZngtc#7P$rY9_uySK!?pvSkmeFMk#jzU^=j zDTCX^g*=Qfn+(rjn~v(z585E=;u`mYivwSNg$Eq63LJOuld_gK?=P&umm`BB<5e4lDp%r@aAeh^~L@if_c_|qP=5tEc1LZ2Wv(=vG zk)08>{fA|AJq!D)apkrgBw=Yhi~{$;(hWzt2WxyRrWQw<0L0xtR1Z;B1#_P_tsZyx~%ehrZXl z=t^VioiMMUNBYuL)-NNZP8I+DL4)|!&Al%f_bGByn1Hm4VvHAx|Z-> zLoOP1R9$m|R_wsFw=eYb$5b#}-Q#t7l5TjCLOWoc9H-{=)T%_JE5b_4jhu%K$dR+1 zD8cV)Kr&Ul-ZXN|QXWN-%u9Xbf-FWSF4s4HIqP>zpOD|%`d*;vb^1%z2|pVO%C0Z; zEYhA!(qIw4C6_xy;B=YOPfxw1TF+_G)UOREmz%O`^ZVjLDAXSLHXBMn795HSk8>oU zbGS313SkyqhpVR0VH;HKhVS`&Sg3T+6_c#rv{hLUu5@z5J$I2!fI|*C5fVPZs-Z}B zP48s##?wQ$<0W%yhQw~9>7-?NB4UKNcFLQd2u15Oi&@rePhm{^JguQvpt>s(H18i1 z?w&B3VU^yLE-=K1HP4mLsON?|cEG(iit0`#O+R`#W1AKLRlF9CD4XsHVc^gi(@_PM z{fX1!6E+WGXhx4~%P{lz ztUlGyb#M+MZdvFI4XK1-Bc4hYebXXeZhY0Lbl=p5(%jY7JculC%ndLZ=;%6hp4!-j zib2PCV76)=_IHk&*xl2MSGOnc9#H%^I1%m@g|o3lT_Zl3uDM2t?PjE73#t>GVUyEd zQ8z|+y5wkuQI2J0U{l-rdKvP?qhDO+II|%{2rOV>-<;6&BWl z>ATJtb$pF`<+bbHF~T;%bRWwW;SMef)0`p__OKjcqOtkOM)&!1_fLI#e9yUF9F|d1 zF_W^h%v-Ud;+SwzX{wV=?bnc!0O4%fqshb@#OZt3$JARI<@TQXdzjY1 zp7+8WQuU-B7YUQ2gob)4W!{jkJM7Wwm-wc#oEJ z>+Q|<1?~wv6Vr6TYWkAEayMA!PCU=d(jWJQ?nkEYwbZ;uV)pOK4zLKDnKH!fO`WdF z7|~^(U`ymuiaK|*#C&K^=6j4dv6UO^g}N(&s)~N9%K6UbokFga>;tfw=R&^It?ea@ z8?Zx2PD>g6a!ccL!`7noXRjpJ5_I*=arG$plXJ~&YBlxN+U?qw1<5@H&cUd(kiTk0 z@5548mRzvkyrYecA2cqw`=#=GC3%PQVfH|@x`Z6D9VmzAlntRc>YYH<&@(eJ6XvC4 z-{QSOs{Xz@)p{yBN{+rNRz=r~XVS?-bn~&>$@t@)E*Chq#`6%EFw#Y{m-Ji{n=BVR zG&QE9Z(dGVlpqu{wn!Tz(Kc`=l!Y%188le^JvssOe6yJcD3_Cd0m?PVgDGM}7(KX5j1(XXYN0oJD6^!ga zM(SwE%v_g&CjnxYQasMI33EjTlo{BWR0SB=g`d~bR{A7WJ%c~cV`C|)uC#7Dvjrc_ zebj#|is`Gdb2lN^b;xYy)kpkmT|*VSB1UTSV_i%7#`feP`b8V8HA;Nf^{4Y_87KmE zh+yk7u-pHetBrW^{jT z`g>%g63okeJbC9`n~d;SZp-D1L*V6zj$bdobavC}W~EBsw$Ghvvxux{OgiXS@ z2ezVao6@p4=jcX*oT0rt84e%Ea|a_%jK1=+A0a1O6Ej9i|H=M+R%@H92qsP^p5sm7 z8pLA+MHs)KJoi}Sj@iwO(WYsyg?s(7?YXXf!&zsxi3*uz!KQaxHP5uipxLcw$u$k+ z^j9#v2Fz}-ZcyE|)0UVqBo0Hkt4%)S^xHISI$hRQ7BVi63?kFFSQb>GZ5XIbbk4Ne zuZ@FmM*D9WbA~+tNPfql{VkVILe;J=6~Px5Fm>e{T}$zY<{<7U3u|J<*i@W{v2YMY z{XEsTr{>X9djquiXz=P{nBjb03yM$u)Thp~M=65~UiMi`6C-l+JmfmDS)6IU>1@?1F~;hEV=6yk zF>|%hruCH<){33o!e9*&wFR?}@dA>3*e0 zQhN|IQ{V%J86!$TltWu9xEg%+J(zm$VgDP%R%zp&UXKs1V|Zh0A&oNngO()1&b#kG zr?tym66Ud{ENh!5EZO71*ZkWN{)p&RJDcx0(zBFr7GLYEllKz7Hc*0pJ|2WRj<&Z= zISw0RN|L9O@lYNO&Nqdqou0$BmqGVpjV}olDdCyD!BK$|N9CPBe3I$xK zY))X?80^#15<`ast3#LqN9zZ+-u=VTE@>TIcd3}E*y>#BFe(1XKrqH5>=5m8biw%@ zcim;ZFmh=r5(~c2!gKuq#+)|0>wTp=Ht|K>zK+Y-_S*+dANA%`jqL zQ|?iALt6HiI8&rRA17GaAv!zp)XOYOSO*`JO9T95Z&BGP_5(kkB)k>y3(Gs2rbqxS z(Nqb!rGl$Le7^FBT<|c@t`Rp1$G$Y03aQ1SO@t7i48pKJkC2D-@9X)a^V5pEgwci- zCT?gK%Yym&1|(vdX={dKoIg$)uN`RW{M6HLuG|H%zgR9!8EV=H>ByrL130J?G1qjl zq+GN_lG;o;+=uPFIRQ-nIj4{9r(PD9tG_va!+mpp4m)6@Zq*eLOTxxGX5%6ij-|P3Cn~(PraOr=u`aYvhnourOA@~>-w?DV>4mE`~G5- zwjE{&F?!X(o)=FlmCf__zIydbez_}r9W>=O${5)eL{vY2BhTZOOSIUCI!Trhz_6G) z^&J1&j5KGZ(wy1^Y1)2AznGzAIn6M>V|KhNdFO+xJANG=488kTGOkQLw&MjCg;JfP zvv>-y${uuak*t0zOp!UK;Oq*(?4h)lSEVQZmeo^C=+gj~|TRv_5}3#91S zc{($0MOtfS4|a6vECXwD1^Zc|dJ-%@#|A*@@1rGWAetHdjLx7Q5XOiRc_|%THP)e) zpm4Kvuj9RZUHm@?P+enPoRE}mb5ks&_l(uX)9BF;doGrbJ-C5I2%R1v^CFkU9WzTz zh4;`sX#eI-J0%GPuEWTxQ)vYvw$`X{1IW5dOEs6qwGh`t1k-1A0EZr!6{<4LFl{f_ z`*=?#W8}5}v+GS}O*_4?>Xt%M>RuT2pq+PbsVX_^Ah`PLS}tlvp7vR%+=7rX{b#|YDOh1uh_r1- z%K+SjWPMDGGDhwfqt!47=mCg_X_B%;oss6Mmlhu1i-X z&GftDPCy7dM^h1!ck>(A9e&Dbp3&}3J(j#FG?9Nz;h^5y^nP^gboj1XGlPM`;Kx(4 z^q>Iisa0DnGfUeXP>}1J-DmFT`xWl@$4o5&J$9J4P2qJ1+n&?p4K8GRv1}g(W%_aR z*&K5U*|u_Mm+IX!jZL(Jy*F)JhcY7loV#5adt&t=FW>Lkmg>qp&ywUu_N()v|Nd*M zrCrwUD_->oFA$+sxK=HCV^S7&R4`bO47*!Xae-mTuF?>N_#IAih7S16<10QL4iwdIM_zffpWV{MXsHZ~#XF4l4zEo|g(F5&CB^Pm_gN2A zKOm>&C6%bZbjBN5V`Bc1XZ7JuBE;saPIQ@dBqA;Im3xkur-Yatuq^!jM3k2P+!zilm#N++w?W%IaQ+88jda3nY2VrZW zIEZFyYZo4e4iQY(I-)c}KTkhliAW3m)eT@)b~&V?R^YSp_Or1s?=;TWsU6$+#XatV zw*O{>ehI&jdtwG*EUnDxsMMyeLohnUo-(x$vE%N+p4~Ux&>}(js5#GYLiDSkvL~%B z=Eh|J+-iLSp4fBn$KP|8B2$$WFwaT$8B(Ko-NbRFGC!P4WNIsR9zN#ss3otGYg-&N zC$t=XG*`mUlPl|j(g2&wQGS%}d`ClmM(d;OJx$<{(83$!qj$_kSwkiHXDNF^3fM|m zQGzh}_@Od{T7j)-5v=j~;1swO(#jfZ7>}OE6hI9#vhvp0^WTHY7UehyuKr9iFP$s0S-s_Bd-{X<)ax%YgN|H_ zr28-~;P0Hsq(kZlY>Oz$>R2GGb40^obzCatCVh~GWI@6V8OKN^j2+U(`R$P0Sazir zvWt=C#I_x5iLIn8+WD$yvqFR$Ais_yzB#~4?Isd`rgr!beTYTCK%GQJBX{h3U530O zU|HZQu-9O)65xrS4*kTF)>Z34-ZZ5|0i}@+mjSh!fsW2r{WNnP<#K6zxoq#K4jWkw zgsn&s_8GF7{?x-Ww$``oi8--MF8I8bH{Jz1HZ`u(97zo(RJs2oy8EFjFnf0s$Z$;6 zsw8I9^oZ8a1lvQ%w$tW}*poB$(gQBt>H;^mmo&xVt6It=#j~Dd-ZA@%m$P>KNE6ux2bu87tjWa*cfrcHny*PMTgC zd<=g8>GxwtD7a~MjK?eZ9HUl9a6*rpGb6o=`hY5mvTzgH3eYD2?n^T$ACnvCAeTj_ zBE##b%z4F+on@qHbxJhHktOQV(_2v>Dm}S58f8f{FHbcc_aViucCi~Db(DL^H6iN9 z)-|`ioT#H&w~V)@@6d&2%tsXQN!w~dGpa5vQm6uNdR*dvR5yMV! zZy**Z70Ly*wTS0D;l5(9EqEZMwgOVpVt1d#RXg^4{4_k>R3afl^d!eEfjj9}2^`+cIs z-t8S84%zNmD=jovxmQx!pjvz4Tbu*p4! zZfJxDasWCE+nx>TRae=4zBxU3*oVQDqR3vJ=gh8n&N=AKw_T`?vNoa355Qwr{bpGh z>OOIj5x`C{CZ)W}YeYg4Jxnbn-8LmZSXK$^3B;C^jH3UMh0Ps50^M|t1PXai<{xDw zy0<=+3pPh76XvC0s2d!WJ7xh>evUn)57&vQZGWvkg%DPkLEwJTnx>X}`^Q{g zXGp|$AD>U3SSreDM%udUn)w{Ahc!bcRrLmFl~K0LEIkwp*sLD#A92;oP9P8qHvI_1@0ZNr&^<5gDB>O=(|ck) z(1^j(!TXsuL5M*Q@+4jdcV&}(;Z&{%Z9fc!x3(ug_ek;0(Wt1Ch0$-2_DSx%`!q5Y z$s9OHfQN2200r^uSDSUPL+;VerWnj^9ud6oi%HY+=Qp195IUaHoGYl*@9jr85QcW*&u{Cpn-H~ zHE133i?9{#VL4PQdq+_J1fiIg#i*e<^ITx%Lj%5~-Rynp-lX2`t0e=XlbsYR(r~-} zet;mMx=z|f@LIW`vq9^%J7zb=OR58&fbib7=s7((C;JbqdSSJlvNX@gB9@#5kD z1E&`UG;46Xj@;lmdS$`r{Ym)Z{whBnmo9v9>OtD>!@j6pS=KJtooC5?E#%D7dsgMV z@+oZCJq}WT%mt$dqg#;%3~9)4Z`!LeFH?G`Bq(bsgE~SQ?&^*w|Ar<7m2M`;^nKGSaq5itI}Kd${mG{pSTJv zssv`C-nn7G5qoXMMWn2TkxwWr4i;WURsHxnJvIe6w`~mV?9rU?i|sRa6S@}*MeM$tNTXlJatsHu@0gWnrUJB_ zW+k=n)c$4%O{?R^RPnKAO|K(*?T^pL?#@{y$tzEl`zRauy*MCNdN6`#7+01lhuVM; z@iAKplyuk!EjL=o^~|0f`}A5r1yV0ta-L210TJs|{PgqUn+{W`u;^jg;y5sDmTPM}pHL&kULOTQz4Pn4%j=VX4A7p833H6e zv3M<%Mf1JGcIe`uBf|%ex?bT*?+g1q4el&=nqg1=Rvu(WR2EP!pp+P}HdZ@ynYOEE zyvH$t`T_zi#jM+QFv2ur+A^`3)3L#b%JDueJw`sI?Bf{vqB+uft8~pLtOfCmI@K=U zk?52nr(Xq#;ka1}$vm`BU!(xHFH{Z zmtxPzy>aD5a-Z*!d~*bKCg=7eXSw0xAz=YmUp@wFp>%je&@Wa_9qH2j5g)sImzjZJ zl}8u=65ye0Nx!OAmVLWleJ&H;WHC`Ux3hogW2jbFLvhDh@=6<`P$7Wt_f~yaE@H1r z9$6aOxAF9<)s?G~Ld@<0x@=y$S-{7SFF5^nvf|o@uL!QmGEe>$Cj zQ`-DSf5uzk-JWenZULvhqxL;l6}Xk;x3Y*W0@X*?(HJuRr&4sc@7CY=2kMW`g#FpA6}9!U~&WQw?#1h)+&%_h`ryt zT&Gzh(4Q=Z@;#&G9BNmX5hd_B>eS2ocTfE|Q^cFf2T%h&zVYO8+*|iWilHhB9CpVW z5dKf?UM>Qk7I@Rj=kDtS20-!P-2de?$c#OmaOt2;cYYq`0yBC1KOOpqDdp@J@MlBT(S%3Z zfXIKoTR`9j@C~7_kC#9E{U$Jb>A$Sb6(f3M2|~8l=KITe*FPD!s$jar)_+`g|DH|D zDeC|0y2FJ}b0Gh0kN@)0oiY(FypY(Fb&y;b-XWH`ST^JR>W*{`|fwed7W z3IM<_sh{K#-d`3kQ1|fv$)7nAcUhV@#{bc!`P95{@GnaAYFnuxpB0#B+|zjmfX1(v z3yl0l$6pF6SN-QmAa=)07y#hPV4mOn%j<60)5+zS{!dw41@Vp<_esC{zb*Oy2uc5C zvD0Cjr@Wg}>igFrU=tpWw+i(=X3O;c zI33J0hL-@ysgHCvtPO*G^!C)|I zX(>rX80-KB2HW@i00sCa8Xq44{@G)#h`I$!Z(y1LA1DoP%H4#)vM`4?p6mmkC-2>T za2t=ucXoDmb#--jcZ0vlf549i4jecN{x}~!efsp-vu7_}ym^U!W?Zhw6G;pOFp z!C-tH%?;pRbAOHR%}{8TtA7#ht}vWn~rp71h<%jg5^>y+2zzn1E=*@`ZtmyLpTtQ0h0brJ zyf-8gX?b~ht-p3{ZEbxNzrDR(9}PbT4n}38D6b@??GQXLwrp?J8w9>OXd(638V2Jj zhyL4hQBSo85KvHB^5z2v&6z$IiG^~Gec{*)wgAqg1DE9kz{8v#YqkW>_K3 z-+#P+yC-QhRJNr~Dwe_X3d8F9>TqcqRqdTKzOH9{3F7NHZVOR+V6dtM^Q}bjbuH3) z7_3}yYbI*qFbw9N^&UwC?6`W0cmM`F&4{Li!QS{H_k!hj;NT%$1mLQB78V7b!Q&7x zn6?Rl4lL*YpR;_^4@t~(ROOmeW)WVYo=a)BLYFxFshbF8}K27io zjuOtc(vnMMLQZ{Z8LPO(UQd%@bxd-Snvwe`3}%C0MG_r8P`OV1(5y}&sk47>u>IgD z+%(_Ou3C+X@6Vjyy9ah7n7I)#m*`lRp)tW`9Iq>CD11LP!^ULf^LnAeFF-x>$RPp0 zN3szYR2RJIAD@|c!Y?vo(j-;#WO(0Rn7dg~1WIgraBEy=^|PubygQ~VMV`+DE~N3f zVZi2tB{y$fgIv_xMnhAxf={igzc&7AHiosgP8H`UN_5ryo;5H9*IB?7(eSxQZNFWW zp(=yxxpKFxD4OFPJYcZ1`uPhuM==pwbFG3v-EDQRHxJ+iK@$zs0UPc@)+1m9D7t?QaX5YXWfL>*%5dT;g5Z zP^CA#yt6l`_9h9K6yJcs{FgWuyca0n&LW8yl{^jWdgQZaHbsA(W+r%qu19P>TH?hl z(n|LNeDcPGbiZO>=9Kg?d^@2L?xruj}hk+dwJ`eokWV z>Cf<8u`sFPvMCSPr38cPLwMF;1^SF?S8$v4gUun|4JNIe*d%0YDxcUdw!9p8hw5md zu^iXHX3?TZ^|kBfkZNumD!YWiYqe0r{R&mNf;lXm5SucGtv5+-J$-tfEB=3OaUYI0 z-TGuUepB^cvWSK2{dFA6YhR$?k{dT^&AVPNZ@fhk&)H`&?Q1lm)d&kQ$YEzV3(UaP zmlLDdT&BfZj%)Xg(YIdZeYhUUWnS}kV_^ItM8|T-G?f2wR{#?(dm?iNu3>WgV}$zbJ7Yg9|`~N=Lt#xup|@zbq=>&63#d;O+fO2R>=NMVaLLr(QVF z�Q!k(}=cYuLSg;a-l+? zV?;F+Q^nlw3Nc;B1)_y$`Bn}K`4H&4`MdV|>l7t@Tm&t+8V{{4xa!bktax33XFdu&hkFizFX;}VO!o%a5X zEjYKqyCv>tGP^<;7tx5|_ltwqs^-`az2iJRThA|ZXI1?B*e@V)8jXlEsOe!z9%RkN zLbi1hGqUwug`Z{mXJ;|4xk+cNqeK<3G377qkQlt0$_Gn57uP9)@Z#^JcU`_Tt}YQc zi%k=Mfr;sB83UwqbDuk778&0{`WMzcgVb?r5$G2tDzbT`7&5P{Ip(V<8K6j&$9o*_ ziy}#1Mo*3jW#sdGC>6OtdP#tGNw5>a;1j6WG#5Q$`C>|VU_Gxn}4~7<(pgR<1xlVJDVaPn`s`L)>~vez>-eS&%&f7Ch1rz zrX@4*c6BJ5K_&8(PbE*WXu=fr9V1~F3}0?G2HaRC?f*=d_CMJjL%36{&TY39xwYRO z&WGZQ{X~WBH2ZA{dfx#*5PFD2huYGVR=tvqGZt@e=;0g|AUm|VXW!W49j8DXV8@{=7?z*MwvVHC~ z>r~WqqH7`=*X`<9NI!#S=ttq3`kK%!Rn~x8cYBMF3E6bTE5#!xTH_V+R$sD=I(0H6w{EqW6IBn*>B&}B9E#+ z{;f#Jb|1|z0eb}mY$w7!gW0&3C+@ZGo~=-DKEK%U*8w3F0iiA15U$e;xi(ymzS|vG z95cNh8mjpS-bllh+~Es9bpZAh1r)_^jBeuc84O|sGaV7Ms%_fCEpKyGRKw|GM?IrB zNbAo&C#!XyaaA+V1AF+UKUIDM&0M|cXLE&W9-Djiqf&C6&Xyk~M6Vo$9lZ%4f`Aas zWE8eW#K~|)pU&f~wBU}6OVH}RFZ^I-0@w*U$cWj&vlQUZ27ApG?u}6JxwTzV)+ww< z*skL+Kt8?C0tN)u#3F+`eJY3a_`{e_Y)5g=sR-|bWokjulL36y zSu>LPlu+CtVJPU{UYL^?u+eEF*hVjFIhGk3uBZ-S4wyDKWc?zLM9Z=!e6zmmycF9W z_f5z^^AmBkFZTO=@(iPOIS$jF1TZJXF~~@xj*AgjPIb!K<{e;^9}PBNy8|o#1VV*& zYxruhNK5#W@Nn)ZO-fsg&r9e@|+C7@*(Tv;QSvJ%iNIXKk&h^0Wm6Ex52Z$R$HCMb{Zs~zrG&J z8D6L6E=6iJ7}`AB=Pr5!e)B;inh@LN!Z#BzN(aMJk->tJ^MA@_y=+97bGWwA!6JorX6QT9;I=J?!&T7wzHW`Y8uxSQBi@Z?O-Q{*hmrauG6-!CY()Yo# z=z}mkJsDv5$)woH{53ru+$)-J`V8(xKA5&Rgj+i@`=VmCjFv2Ai0FiS!F)p91(^FL zAXl66g7>SljcclFdGLwyPPJmHDpjAEx)ZF@yB4D*G5=JHF}Jl*GB0?Gp5 z5u6L1aKlV3t+lLN#0{~!rbHSTY-ukbnOZ%nHeX$`_^kliNXc;(uiYGwH%k@VdsAl7k? zVS{<3>I|&MF2C+I32q^IVA+XILB2gM|kbdPbNYwd0|G!qgkJNW8aVVQ75gnEAg z=Yi4Z)yN#i@!BNOp`LA4blkNG5G z4seC1vVK7XqNXbYiRZHRiLh?<2$75J?obZ*4w=1Y!p+Kpo*MoNsiVfrWs7@Mdol=} zy0FWUd(f)R1#h7atq|SKWg@QSE-B}^spYG_-rZXHI|y46(h4b(tDwKB3CH$->Ypjo zf;tORa$)3R_wE|vqouYLWq8*8!JA(1kgA--;jd&|cCsE2lm?oQVA(^?*vw?wfBRIn zTlD`3=*TyNWK~cUZS#2R;x4tqD6m`chhlA`Xe6=FsCzmjVcJ2-@UW4gWgR|;h+0CF^C zy!RbmiNsXUXedluk9Yq5erYAMY0qvC>K+21UxX&e_zeu*b92rg70{29B;tJDEHSBw z`z~-*`1&QZM8*Hvao{5>`}ClbokMFyNhTI#aa8YCNA+9IVPNH45HV8F-^4DaM){q@ z0>LX*=bG89$Pd=DpBtl=g({Q`k+lw@&(WT9pCe$d${5Sjl)hyplfT98+H}F{qfuf)OM;* zmEURrygwV5zV}tHqxcxxcouM-VRea@q%;S*JNaJG&IDr>&86h1VBeafPD0kCcIc{0 zYKGfo0gQ9k{$nDsz0TI0+{{G5c1Gr(yWKD-O9y5#F71Qk8C;{@7}h#uUQSb%Unn@3 zZ{3>L*N2Oh+BlVnPbn8-|#P&+t!LjWAPmjMWd23}(B}1(HbSLTo$8Si2-ofzA zVR}n};4oANv1x_KrOWVh-3hPt!)Noo)~*mk+B${SP$xh1Ehx|Y_ zXZJbSilr8FMX$0_Us-#@=VUBf?t>Vi-754Gvkh&=A(8~* z5-7;18o|-;Vo5a$k#U-NXv)PkDOvvafu`*77z?^FO~=ncmb0yO$IPYVsbHTVwM{`w zy2^|i`chf(nku2w3|&~*-sS|2x%Yn0ASTED7&KXW$*}#*Aj142v_m&p%vhfc8J6)b zmzr2l^*&s1w2uiPtPG`ZCO54$H}GhwcMo(~i(Wb9qwgOgJa7*Z`A=l;|7~&Uh?_GF zEHeWlsuBEJ{)a!r8)#v8P-lRGhWDi(*w@a$GUs-G#wbQvFC2vJc%XJ9E3L|y*UBE) zZsdW69kh>z!H$sKAHi?H(_Ad{BS*fbT`>Q`h8 zg&i$~W{-si$$1nWw5XHB<#^HgFy9x#sQ=n|1UZUASz_ZSkpNkwXp%>L<+Di z_yz=oUb&?Wi_X`gN+yg!XOtY23Psc#P^qlW4-GHjkiwDskHPT3h(Q4-n?{|1Pf@rO z(az1lkR4c7F~Ei7F{D+}9$kKsLdVH9k5E!;ph580@TlxPe@2)K>GBLYT@xe3g06eS zQdP~+DdE5c`iQ--_dH}oUZA**PspG0G2WCL80pf9(Uo1070%gP?h1k529zdQ#XLsL z{1}xO0A^Ph;tv*8S}HbMVc^fd;8RUKN4+1mp#!mr9dkmArGd}x6XFf5yc8m%F*@$& zYB5rzZDmjhsU}8u2c|s$QL`S&3UUDzpGqAwE8NZbA)SJuSxp#T7J?!QiWk~|8vo6E zI6&9*Tx{19xc9)EFytUKvCG8HYCA>(1gv++(We9!b!ZG@%)AGN*9B*z#-cziy*AJf zh#x0J$xu`zpWkV(PMh-}kJ)|35gqqTl4+nhkP^0ZnjCq)=uDB2LkvxOq_gd6n;ra8 zYOxegnRCZH*bRzxVSv=m=xEDt(HtNGhstFyYY;Ekx!*QHfM@|A&k<}!25Gobec0P_ z>1Wb$7|aQZp7$(B#dx!Y-g&cH8Q@1n&RAa7BVK^a%NJ2K*q#EuyvqzdKZgD4v;E+0 zk3BOj?EQ7H!+tQ&Vqbkm(&(^o{@!vpWESv9;%a8ofuW+d37Gpq$R?E|hwKd^n9Ib% zCu_U18i3?og&wU%Y$2LK^LHS98ixqk2fO+VB2WNIGefsbCnxC6v0vHKejH*>HiOD| z*flgi#nag3AQ?a&XzI=?T=(+!OrZz!WOxBcb_6iUh-Wle4J;{B*J17@5@arfjUN4? zFutcel58RgH)9VpzMulFi>4?3NUQmeU_8R<4WPu#7&gEuy%h#K1i=DX518>8fHIQ- zD1vQ=EP(HSU;&IkfHvZob%dqHG5^sOIP`!yB;>$M@kG@d9A>^45!PdN0I&{BVnDw7 zac$$8*wnp=`aj+Y+3%q!kAzeOL|hHuu%npe@7B^Qa$38omjLGdD7GSKpozC^Rb;yj zwB8}Tgi^2ePoBLK?o*X_qp1MD>B-VWu#%Gnfl8<4wZU6j6tE=V zj2+8t@H?|9$yGPr6-%>+j4%jM6pkTjf@dSHPfaiy5fgOjGCujjzjZMJ3REnU0WpYD1Six5vIl?6NvLl^=5>P`hKsP7K zF!Tm8_#21~MWFMEVw_z>10$p^Cvrqk!te<29ko9&ahgGg7hu<~XQSj?dpP#Nc^HdIiEjXi(?USNe9hBE!4gfbsA<~C zb}`!ve@TqoSKP*$h8IYhAz=rhNIU^+$P7%809kmr}-0H{IfY_Mg1lrX3o?GzyXKREjR z-w_!pJtIUABEg)XU>0{z7Tvssx8;cT&Yz9Ow2AJKr~j$h`a{u}m%3owA| zI^&YRS&oFBNe{c@^9wkoYc_EZ$I0~;I_rm{>RRh)Lhk9%-=U-)U=~>GZbTS7T}px& zx?8s?=*f4!<1D*=Kwx8Uxh7=o00AC@=OxW3J39ZY@8E9J34c#~vw|iRo(`S*4Y}_% zXI5q{kmN zRlBb%k~n}K9wzgoWAsia{{pdq4d(6wl^}x_q^|qF!<+{zsG$`Rz=W8^)4rN(TD%Cp z+p*A5sM3BePf06>iZni0rWY_#XU+oaaEG-;G_;ZF@v!`$k1na zRmXCx32wXF;&ul@b(84rpmDNvGoaKw@vTAkfScQu<7qylz9PG=K0tG?K_X5eHQ2Ex zkrU%Q&ipOj^G8R-ZXfO{*mUhAnh$U?cJN1yDT4iW*%SXZG zV`y_7BC4BrgkV*G3?8TqsUPJQ&id83K1EjexrL$3v^Xmgpj}m+;8|%WHSHC9dg6-1St;gs`=L-y`VHA4cK&Kq{VDU&+fML;>j;cZ zxg(Z=q;qe#FAi0R06g^*$4A!+TPLu2w?Bc_VzFf`g-jJ^cqj93&3QpIp=+@KpIUp{ z7<7NJU+VL$Hubnfk-LnGhRoCfP{>)8DRkEyw}|XOvIe_+Wz(MfhUVk`)#Zm4*%5Jy zeoY7Sg{{FFE8VIWGglgU#sbhGyR!m2MT7uUZ&a91*H1c~G_<5+@XMJiEIf%DCoF#J z=XcA)vGB-)jQwYOLQxHMpJkap;@YE)7cT!c6W|+&`rc!ZojZzE;^q3>b6Z^Fmc+k; zxR$?QC#Z@Xf3ph4ktsc@0>t%F)gS!c_3x=cuyCBSkHrW3)vx)!1IykR+w}UQ3Tqc` zfWz+=NuU5K0{MYWVtgvfs1^~$liu9HT87pgBDdlxVQr9DwxbCeRh7H#dGdFxn9o(U z2)7M2*!yC)CaC1o3YC2BgIQEyyTxb>a+F*)m5w(e+%EF(h28BPF!mmA9vJ_SrMngF zm2Xn%loyOD?MP8@A?b#VVsnOW@0_m=%&%us;SlS()kG;wHyH2j8_}e?e2kW0c^1eL zlz_YSL~ZSZU6^CxlUy8+vTep+b8D$k%R5NGRt?`EEcI|elq8Su-eB5%Oh*F4$uSI_ zPv84j=hsTy`kn7{4G7}OQYIp46D_h@(Dz$IobLu4jaJpTNceBT_I9mFQKHI5`L{?^?$ss%nphUQ0_BMza!!}D2jsMjUz zcbN%CH4s+%(!Id6r^2YJSE@py{qOT1ATGsnFr>-T8;UNnWZ8Ga9fD@*e7<2lYu~?nn7Y*1nQ4D^IX_i7sd?cusI4S*`_JFbgI?!{O*&Fjk~Qk+u!{l&>h@^_WGHt zK6DA6ei36AJ?ls`_#l@>a5{ceENZ5{a$)T;V6#>y&*(ib$49%pcMuRQ4BgYPO}*&X zOS(VMYoF5s%#HffkK7pK)cdZ%Tf$bSXDb8R7G*_bybg(L{P`H&g0^3Bl=ngQwHAJV zy}mh)#-*oLt}W@7w2i%GB>roCqgdffn_#q_jAbM$SvR-9LCKzxbARuVqk$kG9c^H) zwsc^zTwYXTKqpHK8UAYYw(+5@>dy%Q@L>82^$FvGu7pX$EPgK{oaNloQf9H{m0;Q|*LaOqm6Sm1 z_4KtGFuafkR1YfO_%c3pQ*PCdD@^M^hUop46wocy`6mbCie8=7m-wT=^rfNQZuAKZ zxhdR0)wces3)iBM4!6Or%_BVI$^=x3n)OR^O{%ha8WDjkaVzQnF45WNqz8kwLNgjr z*!QNSn7(RjQ8vENj^fYoHMKrKIC+X^DN$2K0QB23*&*!AKpcO)Cf4YHC5xHeH(e2! zb|X2%(h>H(l8huigZmpq^!TNLPy%wrc?(iMzHGr~=RIjaR7wS$6zGtF+}9{~C?)}x ziGj>I5|kO1PJ*BQeVDXO5G3|(E52^9@-|?$_~hZ&%F?PAhW702`JWH){|kmwyKlkK z>K8NuP%!OnaNc$|_~S-7JnjaZ9QQuhMoXqw62SQVPAv21Z0qJ2R>7B1n!|;H9Mgj5 zRtq%=f=*3czrsgw=$-Q4|F6gSFQCXjApS0}|4cvsGYS6Rd@9}_BqUl}=(()`t}{JZ zpctf^Tj=sAP0aS{C86zv!<6nN0_yP91-H(kZ4|xl%*qUiWsI6Fl3I=C@=?6D^B;!` z*~SKJz}soW1Hv0iNUM~j@`dE%?Q5#lYV791Vi$Ixz@S;;P4H_%NO?}ntB&;~KdyQf z8<`@)Mi|#o*jXsVoB@>!Z254minFqcvXQIy{CP55ACOr|6R;99f*;atLiLZ=Za9** zNO^^CJ-P;qQZmpnw1kDX!z@kE#9~tL)*3@{>l)`I&7SQnT0=W%*Pul`+t*OpJ zaUgJ=UjQ+xAAW%RqdPRHq11@5Hz>5PsP}wR&=GUbYD*4`23~}`3<=5VqOeJGzNOqU zjv|JP@J^e12$cHnMwX2G>la1m+24?BD1!zLp)BA+B#JO8(`1vyGi)$}IU@ZpzvyMV z*^W%f3P-<0r0qh3(p>**78`@ zf;Y{B@KphgAM&Tu!CQ~=yL`QHzua0$KugN`{nU-~SXALC5D5_vL zpAio|2rWnWBMEQ}ZOsk{;2-hOfF_U(qGgdf*PIIUnkN@#&$VP!02T#WPVUT5)L`C- zhzzzqxSvFE%T)osIy_+3C_b*)17#c(-%{U@Tv{*mv0hoqx3`FHO z%G=y?oiOv9g+m)^=ZT+Se4@6cx!euk^yNAOeJl9o2fV-U@Xm|h+MB9BZ2 zP@_;yxRAxyFJ5TTq8y4qCEjeuJFhtY21P*n2)!`dpd@$yyN;IUA4D}Nc97xv#SnlC zGQ{K_pE8c3g~s+qmr(cI%N?<^--D(s_k#*$R%pVVuQ$6YhUH&wum|Rf>$dL@>aPVx zAg6Bn!_$-4ujZ6rA5Zg&wxG}|O~M0`YzumSFp>?W1_?o^Q*Y@fEzXu@3D}!Ai3VI_ z5nd#bN>u~J?Uke&qBMUrfRZX%O2BKR&J4&ygl;_w0VaGYb9I#P3dxomFto5+j=Xpun6&qsjsbAJ@W z@%kmJMGM~Q5g}Svf)!G{Mk=!;J)`ZbeW;moeAe41XJsg4`&dYQevc-#t+w7U*wO4# zW3n?Jb3I~RoGrF&Wy-2s0lYJJ&F!2}Mfz{J=&YMLj1E>PmCb&-8Ef-HtYEs;&w8Lu zs0eoy13PyG*iG}~b`Wvk?$i{4)#>tf9<_?PR@ zIQ;>l6nOVM;T4%|+5V`ja%3re3tF+yKrjechP^_dyL+5WBE54M?a==k(9u-en^9}a z8HFa(w7;-;4mXK8ecx~C=r6=?(t>_8#edZ&Stt1FRN3T!Tf6?;ULtN0i)sSh;r z7F_x-Kzkr%XY)Yr8lY$@$o^m{{{irSng+*e5N?|)CF>c5<61BOh5TUP@C8K;7c+`~ zhgp#40J0Apht%HREi>RbDBkWm3(*~pvh8pi@+ph%5XiUx4*;&K@B10-)?c(8=TikP zI{6Dy>j@6n5x@W4qo8gIe8gW*07vz&dn={FwuZG9XUoPvH2({v`+M|WF-d`z2K4X) z6!`o7h0E?h*v!ZxmmIrumXkNYK?TUiZNI9T&UL{{^zemyD?G zJIHx{*o(UbpSZIl-qFI}A9`8ijRIf4)_5KpwjnW zg;spY-j!XSx6So$2MgLvg%SS`?)~4CB>$iV1$6(z<@#Tl$n#{*Ud1N>G;E4jMd2}zY$>%`x_|AzV{z3;wUjV{xy*10b%)PgH8F)Eck!4O4sVv!mZc6IYmu-z%+K!gvhJENsJ?yf@g)Z zK#ke~>A&q0peqvUNNX=J1YK3+3ko-X7pO6fA&KETy$;Shch$^N{*5#>(2zfE{j90PFy{ycvS3%w5$G`AytJC50>OaY`=dl}!(nh85i~LlTX_izOa%uIb%J-O zt6%&<>yrQL_8wDEuEJBksPgO`IhP_J{9}hJpi=Y+JivSW#Mi@@@|28*FEH$o58QI| zthE85a^WJ=)QyRULlr*1M&e;-0U*6g(t}D~Z4y0FfxEXLL8}$so`|%O(s*x~_}fiB z&&KF_aw^JL3^+>^7?-D2$%E@Ke||6rKle>keab&mj)`MeD8Y?iE#E-DR__IhnWw|5 zcJ>tw-|jm*J3n|K%cp-zP#w-gCAU%#o@6~zSK*E8_s;@XT4lz(0u3u;>BBygqrqxn z?30JNmW*l(XyOO_)xou7c&N%)WWyi9+$ZbqfE^1#M~Iq~3O4;mG}xnX7j%9Qgd`a z`^=>jz)VqCmnUe!TDrtF#~gs?4WMR*o8BzB9m!aregWNFm5Fu1AzlQJ5FNAB#fm2~ zbxTy!`boBF!_=&NUgk#`A1lWu(FM+$e2lm5y4--T&L~$dm1((5-tz(dOLeide%s7D@UFa91nhEqO6gic zaC2a=fZMS@BeV`t){8&RP;;Bel; z?j9AGYy1WVQNI!WvVS#}94WJT_upteKz3 zz&r+ul*Spp*HjW$=M#E+xNSasetUmm>Y=4#abc^S61&?U75IZK9|uRP>@u=f{mdc< zz$l3tHM^JDdUYDJ{WIaBQsg+3{9|^EiCUpu*T|Zoi*9K+xc#m%Wk`~XZDLq`>|$|` z@Yv6)$e$&BT6KoD)lS3%M_2mLdajS{7%nm1i8xMRXRv(0zvbMWGjg2y3hpXeuTyUn z`|w()bZ$YdEU*0iJdcrRlV^{6946UL5pj8&U?L**;cHXi)(M{bo*9d#X53CQ1>>3Jfj5b`^cpi3p`V35_ym$M^JS^6 za1nafxKt6ByAY+q7I3+%1_=S5OugH)G~#{;qdS*0tuu~-fsz|PmS;~)#Sc{fc&I8% zYx~gzZaMc69UwHh|5%6zGTRuu9Ti+~WGvG#F;VydO%ZlPf~%{I>Ij7H5a`aA;-`OR z6<*j#S;b1|?;}tWU;y8{PZKjam)bqE2;3>O3NJ;UB&puj^9BH|7Nn%OI#|Zt zxkhodSC(GOn#c4}vyl#33j zf6jjNGf1-HNRv;{cBSg+GWzU|%R{-5XT`X=>BnlF=CZoEEAFVXB{ zj;dIqZ}!L#;Jto0(x2Vy%?p~(VDu6%`gQI(i#1Jw`Hb=B&1jR_MiTK_p+wc#Bl`F| zQ`qVc36C9vZM9x+ZdippOrEWsyb4~ZYK>W|h%5N!CK$?HV&%$6JWwjeb+=$?g2pge zs~{51{xHMxKFg088bV?FCC<9NcbBxdNu@PiH+y;&NYv|8ot%L+vYXR{uIXOt8^k*q z=svsUr()K8Rgs%PC!ar{+8PPX;GS4ewLN={B0E`Cg|>8JrWegN!YX+4y4sC~b1W(B zw}l$dWCizX^ixb1N`FbEfhiVF^-Bg#Uv57iaT)W;m3~9r7U%LhZ7>Mg^4L{!jOjwq zcUP@`3NaCm1r)lvZaOoz>%%!P0GJ(d2qh-!a3hL+=Onm(wJ@zTv!;*QnwqDIGS<~I z*Fq-p%3g4EpX}vI7R*|V6mU4R-%q8h@-o#{Ym(}RR&)tvm|t(dR(9sq`!<;v3oeQm z8})Heac_OuyI#xLQ(y_ii}7_xcCRJUVC$6r|R*^X!SK4mbE7a+E3V96{J2P< zC{84;(S20XgdZ$wlwDVE5}T6E2p@Qf%wASKgF2*|Twg2O7iBb3;4#xTHYv!!7eGJw zL4IS3gWLBhVNN6CA+Kb-qcE+He7uk?PjxK6bik(dZt z=WdP5acr&*edw>6JrfAt$@4+De9*tC)~2-)D-|<*>_cN|ZfQtS?h4+?y|#{Z;Gx^u zmAEyoh1C9ixO3|dbg6^YdNQf>nSXw7w7Q>X;oziaMU(Rl!TIDN9a7zxDQPM_z27P_ z71{E+Nd1guarJGl7LKv}bC~G@+Dr9^o$lt(V5{Yi$1nu-S-6@SJg((7TptU+>2g%x zAi$obKg8Nyr6^W}R%BDPN8z%)^m=XKLgA%$`3x@aV&fv$lG^U2P@XS%@hWro3B8`F z+WelibG`Yd?A64^z&*o1J5oR8Zk@KM=5&?^drp{mExXGBO0cj-oxc&gRdA7uFO!e}H`G+qC8CM63B8C0{G>%!f|Jq_h^!>^C{O zK*cyexlqMJ@n8X8jV~|^8o(i<1ne%6iZ(dG^`#@~Blbd)Z z*fZ+G({P|Dw$mq5WjS6c%_YwXwcWkHXFJYlEvxx!1`NFLz7_UK$<&a8W2i&%Ad86M zE63{}-8?mT-CIv~8|aOh*n-H}!0e!=Cbvn&OEteu4&IXtj09|s<9d%`fXnC`N>M`O z3$-G%=Zlk3HmfxkpNtqPt3CX#6f>)@Ban04Lu7 z^_AC+aL<}z)Mn!F7w_|#;$_&7`?6y`BH=YRmM6blFzF^qxThth%;@mmK+j&Dp8wGQ zU=`eNs%`6_L?8EbJ+wH+-?x&M> zj=6kFn|jM;4CrrFY#DR8#nQ*k>z@hNJrXLTM(dSH$4?nhUTP%u8@=4vo8dL%cq&6| z{H*yxvYFbj%DUh5kgHOxq`y^0@WhwCuq9;tZ1xJ5m(Or+={V@cYdf{HO??34rs5Ug z{V~y7c|!P%Ie*z93yaMjICXN6Nm9&IQSzIgCVb);JnK{DRfVe81iICg?5=uO`i!M~ zIhz8*1}XpUO09}K>gevVk~P$Kg}#<^Cczom8*yVzjW&lpKjk*`rDPs^Vik6imniip zCCE@ru0crIab$xZyh(n+>9x~+iDNe+xa#iRWAEWOMLJ0Y%8-AtM>mt{e620lNd_Xx zCd1rRJb&zxd~~*@_A^EqqB-dbZs2~u&;wipqA!JtG4njDI@*dk?a-ySdp3v$QPWyc zX`^FwPO{1)-)iW(&~fWUKTxVh^gZwHJye}#%G>zk$-U7gV2)#Fc;or!(>G;A1J<90 zy+P^(q|qEu!=9kwVo{#T$ZuE7=xxhY{e>$j%LyNJ_^d7B1;e2!Jt1V0q6q2#){?YcRrBw54R)qBQRA_jj;Cj{!B~S3=*8)EF`p9TOudnGy#Yi{YjZ8;^ z`s?B5eEdtlpE|*P#1VYZ5p7vhm%Pv@)S`h;_hZ|@wf(-A8jvB?)P7y=vLRP)$Wn7DYs~F zh)}Lw${=Kt-Z_mVi(*z3&SO1h+dZjqeIz2pjxnAiA;F!fTubUrPOVP@>gY|!1t=A{tgt9~)gxr+-pnZrR( zSE)m6;Mhmg%fdNmLKUN-LTEDgTg@E-Z$!(T2yKm^c+PNSO79?z?Ta4#C6kVfcXIo9 zesF)UMOYnurk_|lcKx{`C<7oKb%T(No_OEg($;@!K;4Qu-1=uB56}7?mrWl zK;r+^zl1F{hqO!LeByXtouu6>OeFI#XbZ>(5!HC&o$o zMP7K$5J*yr7NqVK_6D5c2tiFB+c4bI6I#XX=4kCtqo2AHYmDh_r^3%HT~ZMVpX@&J z(l&cKoW#|Kzoh4w_U0%V+w5?DM}9=&(b>(_uW-KJFtce1sSPYPhWL1CJbIHe*)civu|Z_>J`p*B*&l;P~hpN!~l@)?qO4c(f>&BK12kA30I zJb5N)AJ^b22Eo$A8Gqw^gU5yi*0`7E;fL{J2s_z?v8U^Gvq+tuxy!zvj-P5w_{o2> z!Zk6vEc`*|!$BFcn2ectHR(Ftb$P}VRBz!>9_W(>Zhhvzto;4JIf{WAL?~Qu^iH+q zcHN!vhsw**$5&;}&)5vP3H2!T%_y1Uc|&+6agf#9A9v(BPokS|DklZrG@gPJ&GwYV zM?-VlU*C9t?s<-BwJDbKJf(+S;{cukI*(-mp2pk)2Y8hYr6H;4Lw_@ymsuF(O+p^q z@Q;0q{SsvstAM_J@mFl5QD9JQSZl4u=(JD$4keYk&}Z-t-L_#O&38oMkyW3!47K;O z!z*rA1V-}X>U=pk^NPHk4DO36ew|=GwJIYMb%JpkzvOV@O&dZDt%2HKs6xwsRzKR3 zuTr^lX@8Wz` zs{m=?bORzvGjTj_YSsd@2* zK8({eDjLj;J;$!T>(DpDT&coEwZjd=Hln&$J(>Xy8S556I39GJN7lLWzm_guMU z)=ilukLOJ44heha28{5FaE`N_QWhfGV3QHW3{t6ZlN)&ku{?oFrOBjNP6`}bl|Fk?q?d$hC8E17g?8R$B$zUzCGB$kw9Ok?zZzd1 zY#x~PGiH+?mY8RVpc+w;Em589SGtIi&c+CNx z^`gJ5tJD89obY7*b9`Pp?|C<}hc%Ef@6?B|OyuZMzc+A~Mr$}ITqH=<8@t#ux5-`rhGVLPy2mo zHnD>kTPwALs=zXzNYSc`3TKkZWZ z)wky@*YhJkSh(W$RHB39s^)A4v*tUT3UMAP{CZOzrsELnb?kNLRxY2E@4Gh{Pt_&% zr8|FlJDBgLqn`)3%;hL2`wQQJ(aW`oEaAfBaMwKBzJH~YOkK2l80DO3%^gC=;3hDUmzSN6BleRuWryZ1sq@5pC{ANBBsspq<05!!2)q_$93Jw zKTl-J(4lI=zpIew{W7+I@mwU=s4YG}je*p^tep~6=;ifk=^brZetm=n?YXQ(8Fp}Y zssieJb>c*$v2b;wBWc_tO`oKVIOCtPKf$o*PQ?Di=ndpk3Q!(!75Z^QYf6RC<6 zI=gPxVgk;$SRV;RnJ^e{T=DQ{7+Q+3WQ*7A|*2=_umw_s-ILMg>|AU zJFd46eq9n>OfOrTrZ-RS*KzAe@D{ACZl8%4RY!Aj8LT{*1lRG5SkEsXHYaKGs_v)B zfWJ0%&frU5X!W+;|GqwB(snjMa)wn<}T*=&k7b z>G9Dgw)XM=tG%~=i?VCmhE-HlN~EL)5v4;xxKr&Z zMvp$}w^eje;1B!7#=D)$4iK`I?E&S(=}PeilZoQk{jKUY=dcR1+cyTbdKe(g6)c;%S+y^?oU z7!?O$-p#Z+Za#|}-6_m4A+*YiTp2b!E;kouJ%8Ud*fZ}(DSG{Dv{MubG(MQVwd7Er z%AJ2TW#(m0YtgfFMD9^5diFTd4P@^ga`)lK9ZhVYBHp0!8v{^_9C8ij>PT?qgFoL} zh>Pyv82;GiPuM)L+N6V`xr?{PvG{tWgGFJrp0#N%MalY<{p2?P^8Mz=OEmUvhl7RU z7k})P5~1PTLRQ!Ayx<;;oKjJp3*&zrD!fxgGel?79&Z?rg~_#J^cf^z{1uoyys!6m zcgWJzuY!bBw?4HfdhYBRYzlHbOBV3bUsl>zq&b`}q@lG!fW2^z$tBV-8AT4F1FwA4 z%JTnEqvg2IOV8%Je`ctEw8dYl+J|6cbsEa#wp z0eg#f0^HVlr8xPXAUtKI^Y_hqhw)8n(nvi>bypgT!8l^4@Bb3c36U%j48`N^f!b(~ zL>ODp;GL6Uwj`vN1cF+XfhkAd~?ySyr<4Y$qk-Gv8pu0q`aYmoWY;= zoU+#nbE@ZDVfA{fq*vQ-Hc`f-l7|i(!87Wk%hx0{$C#e&MPr@nq}%|9eRosA2J{F? zli8(bk@IP{LZ&wxkaT;lY9KX!iag4ge1R>N3U#X$W)a=k_+IsL`=I4&@2KTh9U=F( zJD%)mSN7Ix_H~^Z1m<|i+d_5zX#-@zEX}F8$bo(5^Zi=Ku+=B?!FBDMYu8l@y6f|ITxCq6w=@z-U7d z-R)0?fVX}kE1OnGG=j^k@7d~0*j>RdLU-N+9U@A)ojcQGrmtH43=$(ImI=4;fl!dj zGTaLhTSItQ5<&S1X}BFh-bFV_4aBa178)K3XHKWQ%_#QbmP_t`ih(q7B1zDcbswd% z))SR{>5Fb4gysTN^Tjiwy9`Fwv_bl!CF6#hV;h<@MQXwoV*da0sPn$749i?^bU1FL zbg5t$-9Nz815C0IY;a8^N*jn6{rlbQdG>XZku>j)+ATAnkgDGAX6<(km5>MRtwN8z z>|ta=RNH5EJfdr4I;DcWnPmef@TVVum zOVFd@62#7*VBq!O?X8drAl`V-lAr;Q$q9CO zoz$l&;(V(OCdxeSIWd=-Y(%8gUTz*PkZ#bg*gJW88XPuROmJX?rDZE4$iajBoF*I_ zj8#zrOPtKozdR65pfJq z(gjv%j_^s5j_s^ylh>8^FSP{0n)%{WW;Xx%k%*mg)Ec5nxi(a-@k?L;wP94NC*sb= zF)Za+fHg@tVO~%JCE^(7F=lg`xvv?7BU~^BRpIpgC9XazW}exbABwgUNJ={%ci2ap z<_E+^Iv`Lljg-(`MV3j`tHJH3XjrLrH9Em;Wsg-HD~0&T*2_!V^D?sno7%>ap>xsX zc=+oZV!qC9yPCc=Gkt3sj{ew|>MIhPcI!u=c%aN*ApWLVXSJ(JH&ZH3&pd9PR_rghP#L{%Ax2HQ=@jQq^Qe`;MD zn8VXgS8Z;OEihQxRHC-`y`CiN7x_Wui4G>B-K;Zp&&3FFKC^wNif0UCB>bFe2T}gg zB-^jP!?x0$S6sNtsL0eT_!Oob+9iuj!QV=~?4on9oOQJ0R+frZVmW??`f?o_l`;Fw zY^r9MP$5pBbH?&+Brxy@`_ZBu@jGKKS z-9h0Bh3!RwQ1|Ud%}y|rfyp?x*+!al`0ZiK&YD5OdzRvMdtIGW_aS`#t$x z)kJzGgx#-BbZ1i_lZi(^wA`Iu9jX`WS?}C^!XFX-R0CNN?AGhaVqtIPpB97LXI`&6 zE7ztILkgLtP)MY7D;Wvo!)TtmA4bJ0p2MXnq^nz7R&!?NY11@7a@+9LiB&?>mk&P` zKS8k~7WHILfFk9-Z{PdEDuGR)mH}S#Tq9U3b?aLoul-NqIQz`Y0?*i>5>+yWpjkRl zyy(jL(;*HO5?Y4L%ip;NOlvV;XE4!5vPZi|<}sP=L$1YB#c?b1`E;Ho?iWS;FJ%%p zJA(6Fq9}$)KW#8>sE3O(laI;%wlPSF;EJuYNC?dnUM4It((G+Si1XjabKE-jzwD!% z7sfzjMDlv+9Lh(5a-yY;M;x*A8y|PnDFFXWXx-SyIqoT=5g}vIeQ@6u!fL-MdGt|I zRqz7_n@Z9F=P@$0U0G=WL(Wy~SelN$t(eznkk>;?nJ4|Rr$Z`h2IlNrs~XDuy>4W3 zS$Io=jhcb2vOSd=PTbL zdk3{Zc|B4SKS1FSj$I`&cV|D#-Md-)Y0Km;+kj{u$X$; z?72{4r70s*|0c8y$$Uq!k$%>_N&Y+fBz{RpzL6KNynPZ?>mLAo@YX`*-d<3>Rhb1l# z>Vvr*oQ8FTF?O`r{TTKOha+QluES_)Qv;9X)17hDk1BqUTj(B!*%yDA#V???0RDqjlx6g)UbrghbA{_s|V5e30*Wyq1y_xf$YpR5|^M z9RUpcd#kS%MWHsUL0>=bYWrda0Zc+^{}D&1s5qJUUBrUu@3SbMWC>NfJT4_eLXia( z*cU?K#OE>F`PE`;fqe&E)!mjU^TsbaL-9(6t-_nyq`@C`(Sh4B4xnbpOX+MHQEvr0 zX@-=s%mAFcJ7BB=r|J4g_sSfh=ScSmQ9+dqDO#V>3C$M)-L?3pBh~~KwxNmdr(7z+ z?l7dCFEvU2cRSErD}{BmtG;~cE>pM+_Yvb*2fIN}l<0|tB!Vqm#v>4Q*X+Ez&LflR z+~vr(FLjmBlA7|x%bvCO%;A%A-?!5(IAVF3Lq*Usu+o0pXR;lk%eb|s*aXEFa(zi! z_L`7_Sp`DKvZQD+X1EP3j{!{C4W)EWgZ)gtH6XfTM=gZq z%+Pgv92N0|iI?5kA)cT0kxvCZ5#m^E?MRFXE?N}$#?fhr->XhG`%5(j9uavtZUKTj z(s{&KFIc3T!}X2)R_93BU_VoF+_zan3vFyKQA0R9=ZVr={_<4B=g-mrKdJGxbv-U? z=$(>Kxy(4B3^CDW$DejEiE`*5v5l0OW-^HyWty!1^Ie-&I2l*nfsxdcQ11&~%jMr$ z74A+*)V1!Nf-NTo1ZbNfyo-W=#C2;<0gFxJ`dChXKlOBYxd`x!KcAc>R}DK7-JZ@% z$PDgZA5pL}QA4f}C6t!Vm!%11c-29Kn>`3};7IQ6xZ?7vWkPB*R8Dj8m|H)vDlXD z|AYr~ZGQeI)#wb!sUsbky9!QLUswZoVsb;WV8Tq)I?w6k8i^4#YEG6azlW?Xls zC`21R4o6C7@psuKM%OlMvsY(r|Gt;h2=p{Jdfi~SbZG=@(q`?C9e^H(HvriyeFJm| z;hCeO@%Or{G;NmXZVeF3uxQ3W-h9w|_-wJx?Ob7Lo4M}9cIf7Z*X=v#{mlP~^=lKa z7=A#YRMr+#UkpUnbD6Ar6?4#xWB*AsP82-L%i9@_*h`( zopHRQBHE}ak({@lt3=aGTx|d7aHVrr19{9OQb0w!YqLz~Q2}v9#KatqtiAvEACznd zhAB3OG3NTm@zOmnrhzNVCj)i?tV4Y}`6xHt6?4%RMex(1Hn`^c#U- z;mw2OtdhkDX@u->ginV3Y{%}-ZrEG4%#0)q;RMSg&d-`AHOagmcs6rjwlY9IGBLQP z@{Qy9v3GKDV0xs%4QQOCyiHSbi*}qg8*Qcs-sdt0FQleJyeD}o*gTS?m$SxE?{|3- z16lEg{tOAW?g!$F&o1=)>(5dSNg?RFyb? z`{OlxfWNhx$VxJ%tVxnP)(31=?%TlOd=LVOPOh339XGt!)B$kV)L6VG#G8F?vmC! z>to5b98cB_h8^FDp%T0h7Uy|Kcd+xttB($&5~&z-szO~g`dLeNTy;n*YzC`4KVRzS zXDvcxE_3*aIHxw1&`gfv8=AAR+W&=Y@igzFN_A$*R%>+%BgDsU3OvBJPB2T5UXEX_ z=I@azmhS#wKUx5?vaE6KiRH@uP`bBH7Bq?Fm2Jv5)FoRLE3Djw4z7CM{8km6wxS2O z3>A9LPd|k97AVQJ%^d6+lC?Ne2R8|q%2De@Ctjfg3kj!`UMimGDC~ZBj44p(fj4_! z{{DdNoJp=x`{JtNNLor-hJXL%-5UxD&*o*9{OSDtcp$5dn$t3o>ws9kIHML+Z5lBo_ed2 zG*SCq=T@@czT=Wmudo`Na2EDL8`D~{SVwlv_V@alGV8sXa3`-3nt8(QvEzsapoE z>C0NP)CxDsG@f|P=_eJYC{U0yjV`(*HOqR))VKoAS236L&_uFqfYgxk5`bqdnE;x#Q$WL zqD#ClJFisO`+gy|s}xf(r*vYEViKm26O))X;gg-%{4Wi8_tsAxY8wWz;3J~_J5m9> zN`MPACXa99d4c5W2^#1JUbO3!FF?N23(R=!-RwMf&t~}dsCToQU_|-(AMBJw>-QE8 zNoA`vDtv8&>aN1+#9&X6y>tPXJ)d&zt~-6m4KNaSUr6;5f3tf9R`Hh46pLS_-9Rg! zdUC0p^}0KChl+Gw+B$IwAt(Ri3wVlWy^1$iee3Q-*hOEU&c zi_@^e3EJ-3eCfQd_v@hF_wtBkB@^`0Z`WC3(Ba4^4hCdux$AAvhqz|O1m8dE`;oRt zHvBblK)@Z5rV%|_K2ybG)|E!JN;pLwHNYNQ8=04#WFE#YkW(_3Rlec(1B-v;m4~t| zEDKV=Hc5|VYqp{36mxEC4uXv|R;R~A$Hrs^3#xH0J4;hfl(XqRT7YX_2C`HJjU;J@sp92uX|8++!y_WFfW(xYY- zOLbedMZ2U`Aw*C`;RV-0AYa_}$#?$m`3a`}K%7d_wAkxC&HIOYqDoVUPp{~^WJ(O8 zR*)ScF+IEDCASdNE~dWC?#TBL!x9BD#JK9SmFy$sSf#%z))tm$d&%xN?EID!VhfXa z6tweG^D2a#K$n$Vb6;ax_01R6hrJMEqYT8|D#+Hep_0S;b70>%(G7>vK8|-E+1!+!1!m>Ts`r17X0m0+U;Bl z-j^oz7Uj;WaEOmOuR^n|Y@rffpRxt$HM`T$bmDF@B!~3BDnkcajwQ3byS?O`J)HYy zqgv+%cFdl>O%@(n#`*i`&alQ95f!MT*U+}$==7gG3PU&QcN9E_0UxrfOym@L0TuycNx=XJ zYJw-uGp;4*X%1rIyO;i<+Icqox0<;%%Jx)ZhgRDeHw2=9H}8^kt~v_+Oo1Nwg<56JV$=E_+-I6K=Mz&r+`CcJn5?lI%$g(=Q}FW-}K1)wuObu(%B7jM7{&5O^(;#H~4f2b)N9cSkDkw zygZd&+{?nKvzZB=PHZxXuQ!+uz&z3Q2v&+tO7dCN>HiQiN1i-l_mUx*@(lHF0G_+A z+cB!ChNy|<&Cv<=@`FGv?%!2n)jGp2!r#0!rkeQg?G=hFMzm@fUIwqg%QF(Fdjfh% zL-{gZey`%8M2V~r4p^GF9?3|mT|SQe@@f1DTW!0wTZLqWKmaFJkkBturaZpQcX4_b z)ql^8DNWhDX4BWCLDQ0$Pk?A9R|nbEDDVv=6+sG=di&{i%38fE%45XM16K$sahLS5 ztjdEZZvPW5|WvO2dvG2c!xEE704oFjGp+6UAJbEd$Kl z;17*ld|@wtwUIWf`Ta)t0I;0$?Kj@mYi_5GYk}a+mCT;{acEO7fvIppLlwJS|e z$kj=g;&7Q#$~+-IR*2soSJN&(TTaE2LI=7k^@}NAuqKQOm_Y!lYQB~-Kw5Em2WiJ# z|K1w=S>B@yP2-1a8Y89Tzx(OtYJIm~D1ZI6{Hs~=hSM>e2hQF$HMDbit0Rt}w^XO- zM@jI_bfo26lpUQlai3m<2>m+FilDA~pTH^38b0aTnRudF;0dS zW4myv-}`V)O~Eg)UI=nU%Cqfwa`W4vM42wW%?oXGF^N;pZl{h+s~d4OI6@xazxF4t zagWCJS5djT?=#_OtQnO+YSWQEN{-gS?kB8+kR4SQ-oV9KYzZPZJT35BO~m_q?{NQfr1Nf#b=72d+B~XXTy_+Lm;V}qwMjrL#cp}=4L<5E zrtmIIvDU}V?1lva0~gT4U2VAVkZ7{g?3Y=LYbD9s=0G{3GA`ys=?#Xrr1R<9U zNx{jk3maq$vV#h-s|oETvZG9I2AUgbjkwdrx6nkt8+P!X9=)0X)YjLi3lV(sZ7r!4 z<^;pB6-f?bSb>-bz0l{mh#NB5Ig7?PlW1@5NCIT?6}60rrAW3xIPTn(gk9ik)9w2K ze(4>Ed6tt;2>bk_-H%N7p=mF%w__U{pa;_f3DmO_*xeprqpFLbpiB0@X3bjhxAcL~ zi|n(B<0xs!l^upz2LiUG@}M5jPc_^QPA~1!#SOcQX1M1yoh=7BbgYJeQ|C?6i*6wx zqB$`3NMIWMp^)S{2TiSR-N;U$R}Y< z-AGfjNd%)>Se2O)TC-1OjT_QCd zCID?u->%4NeJC$4HaC|jeSzIRZM=X@!EbE?ecSWnMspqTvTpZ@JAMH*#k- zk&4f^()fHz_ts_jL!>NV)D4UYY9~&YH`pxA1VFkYyXZv!z6|TB z;G^67Hh!H^J}hS6*%i{p#?Gf|)jCYu5P4R2$n!T|_%J3&5C2Osv_iJ~A>zKHsb&Zq z7P!>AMs??@*&KWrw{s^uXnPv-�% z(t~`qZX(tFRActXUwdRux8kz~W!Q`UxH3y%M+6%&`3tD#87O%<5-RcfoRQYYtjEeW zZ);P9^C{$M{RNNFdgYeZag(w2>yPJ!+J({80sN93_uAV*&l??{X=rB!n9={GL)Eg| zZI+dTJuo9399zJDVp^cSIm$iwN68 z2&H)gTac$!1T$p3Kv&{0uX)k=md+E{a_3*`0qRi(9V&EH@?y^DWQ!+wQr@mpm4x(% zs(rO5&zDOTCB~Rey-?a@ysSZ`$UaO!G0Fc$vJVj{*$R{xeSZ-(HL(JU7B}yE<)eVUkwMpr<}hnZEi2$kcE*| zOpFk%MfID^UayKbypnzlap$+sdu_#Kf<0?c?b{{&&*gb6ZvCGwWc#cvu(#|%Wn>dh zsK!Y?2dDaBLbiJqeCv!Q)6q&e5oS(1zc@WKN>0SPg(ClzAJc$bms=>;maPGYnml%$ zH~B`_(7r{UJeaBBj8M1?W!z5EAMI4sHyaF7@ow&mY?x;eDVmo~RJ*KeePz-npp@2I zfR!zk6ZtRQO8tP8KjG5pb}`RY#BM8@gpCH3GWZrXn0)f0a+@@N0$8O?b}fs~a?8xH zAf;AdLE0)}Kg2+JQS~CpeJ>B~sP1q0`9ZOYFNLYT{*|E?`WRbdMYH8;xywve$`$wn zK_!-wA5|0ktzdd^{$+aU-FuQUL18l})Q|1mk1U_AI=`%BB}gZyYn6#!u8G#Yy~?u7 znSAJxEN$wz!M~@Ah&VtJL+_2aHJ6io-+5DxmwFn7;#(*yeZ&v0c1Vq=t-8+}e~nnHk;A&@5PfeL!p25QA=#~=c<#|me_S5+0?8Wz2h9|uq?8o+Qn}oZ)ON6?1f1- zA1Uo%C-|vf-{g%5U3m#Ce7wYmQj6=hDicW4-l!kLQdwxG2BG-!Kp{s0(8di?HP-{~ z91aJ3OOJz zNh_zi;A8qqXlIJCE%Se2p~kVT+$eD^7C{HHqP3ISS%znp=WsY!8mID<<+RT19z%S< zvUfk1247r?-TKR+c<)sSg%4bb0cLcYvT~&^`@^n@(?3T@M1c)28Qwb7OjUIkJG8zjr7>I!g-woD{Kz#o4jjRZoZE z3zu1(0R2W!*Ec=wqno}93I3+8lA^WQhdbX-x8Cj(eB+8zatUdNS(FkY@c_1&dSy>kivFO zHGn)yb$V_#Bph3n!eT3Tqm7AdV0eYz#~B7_nHOE%Dl_~|Q%S!mctxj%SFrDqlThLN zhI|mW;u4nXZldyZm;ZBNN%gCvF{Sj@IC6bpzAcjEH0uAHpGEo@^F|X7uNED0AdsHa zM!_#`TvxF@U9z-}*h5Kj0TkKb>i+Lh^g8t3;B|ay@|%Oe=)}?*l(*iFT!nq9BX^it zIMAG5stT^aQ>8W52{=j3w|f!KT#mRR$nzsN2won@{EwhiEpOdI>knk#x5MuOAn^qA z&Cqj)7x^~Q)k0#Qg`YG(G9JiBo^kMTp7^P9jguh+C%=jI5_1l-h(PB%Q<3YR+CuO* zg^GS&I-1v0%nZO)fuT9J690w8ga2GM>|Vdt25tD4U}c^kqwP}PBu*f^zK2M(+(iAC z#s#~_rg!UY{N7GV@@U)DQA7)fiy+hbKpIKZ!CR=d#NMl^D;4!CiCxDn`#hj2OW@6` zc8+H0xF_$UA?Y^!YA+XqJ$wqc|5iHp==8JauwKXA?c?CLx@ zaxa~U1qfo+1))J^Q}nM_A>5Qk+0QNCtnYjQ;)rR8WJ{FaU1#2s=}fRkcU+@X0fC^Q zG>-rKYZs90|F}}c`J9uBKws4IfEd6Y6maJ)3z^r|OQX2Qy67s@ETp4#GxNWi`fG#A zyqp>-Ru9TA6&d(_9@RG`+I#O?o{BhJ&1}u4?*h`a9ln@)HHlU1@nUo7MNP9%24(hw zfmmdjXb&9*QoUklylJ5CnC1YnVv%qGkD}f$$++=707mYJN=^1H(PaCoT69^!u!ne= z)LJg{TD={qb0@PlieKjqUSzx^&w(aT{tcgS^W@>_`GZq^Ql)hTy(s6hg*>QBpyJcq zI$AaGNWz^Y#M@Qgh_3apG(5i*gy(y0`W4$F@7T_gg~FiNr7XiwWMzB1y1#cNe}x&;?X%%2P*YYnxn<3_gCKk8?64BvssWbj?JQtI>jQ2 zBG?k1+^Jck(XKLX8!E&pSUg@b>f-GGFbuaPU4Ox{@kbhgUz?2VbUhwSeb-Ry};<_k4qBXjF#Qz|YoTS@xvha+=&l=~Xlh zcJUUZbEU&84)gULV7MIw&W>WY`0GfI7=lN1lzVYc3*Q~#yxGO*2A>50V zjZi0Z?~&>2?N3Pv)G)bpx<+nIkk|^4Gs#RIBQi~1DU&D zeW?x!d|kTTxsX?Y3d*G0)r`LOlJF#14Aehcnq8ATG zFhV$riBd+SXQ@z;-NkRI?p+L>(HMOp_B#s?ACYAK^XT`N|EJj1(6+>0`+C#>{-^&S zDb4c6BkWxFUMAq)4X3g8@K^UKTj+X>JN?blB^TS{^b3M6-6o^-(VL}E(ki0 zw>5F?V+9xeeO3K16YniDuu_e zW~Mc59Yr6?y5XVrd9rS7VQU*zetOb|0vu7m%a-}ut6X)PYbgmEohUpo6ct~u-TiSZ z33H3H-bt=wWJSo`_`^AAeU*95-&3nF#(;*?puZk1TJ*PU%#z-_R*k&SmD6Oq#e}}t!pu#??m?BvND|RXc*%~AQ&?`IVsJ@iUHzb1 zwOUC&Tw}j2Y5{a0L$2KzRYtQ$2tKV&_3Y6_u;J8|>y44^Ht$~_Du6ypjO8Q}0N>@H+J<0FHC4ej)rxoYAQ`5@S!|XKlHuRL zeI!+DZ6KNyxnC(16`D?zl=W`kLG6#?gsa#jR34?~DfBxRP2qS+%IJtBme_-@vJEih z{PemXrK<*DKcQ}8YZaRKS*t^lwys!lBSIw>TupV^>h}wvy98#-@X>#Ls)Aq0o;YzF zGVe%b6E+pq#{W1JTA|H$V)}!2LHRTb+eW6Vo#{s^#>6U352BnRzzdNXa=@%ue-V(L z%sf$PSxn!38!>cWBBa{`cq(Kfc~mn2&c%L_Td;PZS^ zKXiJ@>cH?JAr*ciENfeN+&;&53*>FBkr}^kG5f`E0u{(9ujciNgFqrIJ?1SY&P`1b z^q1W_Mor_99O7TugWyNYtp6B=w{jLip_=n_lkNBhX2JJfX4VE+CaJ$=;OREpn2n`j ztzI!PjRwdC-uk^xudBann!XFk2KgXu|j4+-V#&Ee}l~{c1BnQ*Fw>0fWL+@ zcnb?t`eE1z49X*|!(j54$UK%WluD%paTq8ID}i%Zbs2=6U(LR-<*K>>j1Moh2^8 z&iC)hmo(S?DyUDu|8v)b#1aKWeeQX8GKl+wp(Tb%2N`tc$_iwE_KZ|ccPmWJ6bS2u zo;pC&ZYjrH2v)!2d&to!ddPGO@9*+8gyen=cia3`eBn7=qnx39eadOgFVX^ zVE8)E;9x_Pydzxua{tZm-;nUwE6OIZ8?t9YfIVjj#fofh`vQC62}T+Vxd>Mo6f>2q z#QWJ#zA+y?pFWe~f!8pF;fR6Yp`#Qq1Ha@7x8ENfBiuXX@3XZR3iS(3RH;u9WsSO{ zw`od5Qba%U)Z3VPtd(4hxuvGbuD~qtY}BQ^@1Lt%KB1uq0dv;aTk27I5tezJ6MKK{f+)(KCy z)ePyLxk0p~aemA+{DoB6@T8jn`Sr9QoYb{XynNJHbgSY7YcZCCnB~{=lXo<{Er~p~ zjz~{FsuR|p4l|Si<4!+N?J?y?<8*Jl;q3CVU&!#vj<&uYP^9jD@n}~f{?bNay&m_} z6yg{$Q_xFnZ4S+9VlJI{{(eu9{A zd4I#|n;GRh(6t`=@^oKORg8b=d1Xv@^@@&tle5#J`P|VGy4B^40ArJXSYbC)s}jcy z6qyQCg?Q*rS0?IGYuPm_%q8)v=K!n94Y@Q$fXIGnW(o3K0>{@E6t>uSLLeFOryeD2H zkQBW_C5bUwbfDA0Y@hU(U8bq$bd_duyt^rEz$Ar*oaIQ!FPrq+zz;{QrdO!F$agtr z17^Nf9!rHz7o>OYN3A8}w#x0m>v^Y(j zZ1+>X7jn(3)HaNC;kQH?a;V|b{<>h`@lBFR7sc?tVRDV?H0k9GmA%6^Aqw8d9~Ln0 z*NqWv)m~B3pVB;d*Qistuf~BSX>uEpFhaVhk%eL$9$en>l#K~t zW0do4nwI_l3vPhPg|}`2&ChS$`c(G+E$8V2%93uGoK^S~Z~z~q&=%JfO~8kz?LJqA bS3YcZvd9suPqDx^w`3(1C5qpD@cn-P^3NjS literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/opt-bloom.png b/docs/assets/images/mii/opt-bloom.png new file mode 100755 index 0000000000000000000000000000000000000000..daab4ab59acf38f4a95223f161ed2256b32f3b33 GIT binary patch literal 405057 zcmeFZcTm$=+du4D0GB2x1+?6 zJch4%bVScyI>U)fo5g7^aSd2su=vQO-YrFQ^=)H_)ly-1G3<{&dYAW-{gMv_yx6*> zJhe7+m)@=H*A;r|EthVa`7M7SZkNhB9FpFI{FkrR%WoU>6#vT?#Rb~JtN-OoG{?^6 z0>l6Q6)CTT%=<475JJPtVxsh49>Dr;-xTkEd4Q|PR}=>S`};F6FyDt(zWOgu(3bjVFGt$#88G!8*W0d<_)ottDloR#eREZVbT~;eXECvP@S{_#Cv!ks zPDW5WT@#hw*`Fk((7_hcHl~pDLoyYb|M5>d zykMjQ;y|VA%%r(yWp55>USx+wnbuWy5wJR|?NAO3%NNkLZsoo~U5SG5%0C$zruuY@V%(w-+tKDJ5}W&YTKQ(G3rqC zvwBS7i&=}oat9WBE8g{qbTS*g9)mh6Sp!-(1xx{kT3l;k*N)=SM`9g#Hs5 znX-o)1B+o|-FLv|+m700g!ePn{dlo4(i12lP=RyXCEd{=?(^p5zwyGkws?Gd!H9Wz zj{Bn2hn9T9(tc3UF41A_c;K#(d4Lja;O-o8gRy?6DP_{Y$UdvA@pr!2x!r&cfeHdG zi(q{jn24Jzl|OgwoYsvi;g_UDLra9}b}SQQ1K`E__$kZXPwYpp zS?R(cGO)ubO1akC-(Pa4parWI-gHzqNkvESi=iweAY1h{++-$7Bcn%qI%Md%ol)A$krr=v6w%be& zeK(lpx5g%qyjX5}IXXdqfD<;?XNcIYo}44uE4qAUP?=>g@3XV2&aa+{xQVGhm=jW3 z>G~VEC!bn{So6ar1AFk)YJ|dSC)CUH-=b5Vs#29}sJu!+GFUS_XYdAlw!g74pv@?? zMIV;E5pl`4gtolJBlD{)F@D0aw5$J|wzSS{FU#NR{O89#gBMA7i%jw8S6u{ks+4fk z+e!BX577*9xLMXASZ%!w?ZntcfevhLR?ke$D5#O08VKL(w3UsL7_pA>7_o`7Ik60u z%$Q#%m0s>Z81xK<#mS@P$r7%1i9V&pGdl;w9_O~QcmH8W`L`$hoG(5QDGAEGaN@e9 z5MnWWC(r!wF+|++6RqTThI=BFLGRn$Hl+Qx$GsV`<&?E-h}{nJ?namBr=*dI!s?X- zu-d7(#C3zZ^_Uuga<(y3_b%}PE0m1>?4aa-`nbK9wPkl_V^wYmh*>l~$4NQcy*)7X z{GvzXDeC$-wU@zu$Ybi(#u<=cL6FaGTqv|lcVsLjG_yJ;@`GX~59TEoV+_pNxQ5t|egtE)tzuOka9X@Jb*|m) z4;89=FpFa`H8nN76jAf_h}_WpbOJZO7z`vyx%W>VZB_6e8!%pPjpP`3oL4Z!!Bq<3 zz=ETes1)({O8ME!|7fnf@ksmgA(Li|B4q=ksfUh8g>2|Wf%c5zEyf|0Y*Q!f9K5{a zP6=27>taQ#znyP#PkH?!>J5X*;^`eQ!MiAWnZ+Jiuaz-$`hio|zcuEZdg{v=1Ce%! zN3Xm+Xg*DE0^A!{ftVc_ToA2r@jvXF8NBI#rz(Gb*j;b~k+J?Ut_>0bEX zY8yN;udq#fbXgl$;%r2q0=3L^2uHs;X>Zu_<}1Xw65yEg|iQ-7RTUp>+0jMyJ5Tf`)8g+W4tV2=lL zES>)2(#;|1W$kVlJ?0Zm;C;(G&HJ+NVM`dZZs(KkHttj$2bGoE&uw2tkoGN|+>bW0 z<+7fXk6>&APMVz@tNWj`nfHknV+Q}7=Rl-avPJfk`d4X7La73)ezm)akV)S=kF zFDkiVs0-*~G*ro*m^^(-KkfmPBg^JH8@X7!J;4{7qitYdFbcXdocTWL#&n3KlS5L# z)<`W)38APyFhRsh85GTbSQ(6VYD)P8oB(0-z|O_0b0@jnUb*FS+4pD{KfvOUAGxVr z9twdIyCfOiS^q=#jx}1q`qUq85ty4UJdH^S+h4+>sV%?g87y;l${zgq)Px)IS<21s z2YoV`_yEk#0dhToCpPXZK>d%IYw*;-%s0#Syk5Vp-2RBi;pQ--J7t7JA(ks(#XXfU zB51TA1PwZSC7<2Il81&Z=$JKh^MfCTjQCJXqjclrcQR!6qEokwJfw@2|6VszkbY%I+qr?;) zt3ESt0Fj^$L;033oBt{iMJn65#AL;uF;u{j&u2e?SgEo*Q%K8~+UrOAtJz>W2nHMx zQ&&YWV8R!UydM=LqoSVjHs{oUt?ToHD*qEuUS27xT6%}gnCQKNf-U8gxqNE`hst7O zp>HwN{J?U8Ypi(+4m#|4_r$+_9P*%zH6rU}WT;{t8S@i#k(+80Zm%<5j5~msIQgBy zmf<-{Y98(A|0FI-3zpk5_3C5w;nrC8;y^Z|yWGp4`xV(_)%R9u_rzb>h<(~Q*AUL9 z6vH-&Wd30fmZfWygXLOAZejh=-eS1q1P@rcl)$~kviSsuExRH|c1_Ru&iJOYT@7Df z&HtJrnTQ{U^1)o2b((*rIk5QH=XC8}Wb{v4ShDf-4hzM=8EsF9sWxRWSk2L+`8p>WgLc>`*ewPLOmqBTvPZ_n-el zsK}p$YMO!iB7f`J%ZA%XLP^KLE^)xSj+l&-7uvJ3dmZ5Uqq_3oXuDq9*tymdm3uLY zKrZhTaNIx{91J544V*$;$e_zPfMC1}7x|NgZ6;3u;Gu1o<{@bL7bXXNZ9kN6XutJ+ zB)7WyX3Mn@?bl09La^lCDA2m^HR)>0#OND8`A|0fitkbSh4k$opXjy8__bIu;AlOr zes&_*N=7-L7Ies0*kxC+^qWj3yVc)2oe^!qq@8daT0&KZ#01n&cB!lzCRt}T~nTQnM$ERQU zG~u_JT({FCH2>sn`a0=ru~jH%f&#wYd&1YYYJa7A0F-zts{;DZeN5nHL1`sGuX}oW z+F0>TviSS?gXV`_PW%g(9k|5_?FG<5h*j&Qy724S=cmuXEiZm>4b$S|b7P;kiRYfXfAmA;`C=B4DfzvO! zUtv>cn;6YC0HX2};UrVeu8)K5Ei~c#{}5$kuYKkY(Yw4(=h0%kJ&C9XD?rm-6e?K7 zgGypa-mr8UcPOfL%Xs$mnfo3Y{|UMC?GdhLC%x&qu>8@}b)!#ZOe7fyuy?2*+Rcqk zXP{(x1OGu@4Mo#*8S)<9AAUOM8-kPu3>z?!#VNq|x@0bJECs1v=-Vvm5af%sqpPJ7 z8+FLvFCy&~`Cc13`Xl~Adc6Nmbu)`YW-tVvgWebEktS-u*W0)UIO9y-IMu8*fA_GJ z`^R}n)!5JqOY3Om@rR07zWVFx-o4||tkn5zN2#{5d74Xk$r`FbTi8Lj5gh zd1E2MF1~uD%3*IH_2@qT7hk(f7r;wxlP_5DyBEV{5 z0BRbS=OqAD+(D6cP#(?9wwdH!Pd}oO*8k_3+-QSZHHS%ClE|>>KtzE(t9nHYO&0$v>6q{>9Sh}Hv#_P6j;g`ipHiv-PweDi2w^VV>#KlB2{`%U`h)>kXJ zbS%VkQ0LI8GVLdT&2(PCd4qn0C0_b-*CNvuJ+?a1B2GM|Ffe>v38qMCE6Z`AjLH?T zRrZa~PdA+nAcEsmWkoRzuya*hgC{7)A_o^*GMNcPUE~8*R+4|q_Hu!j**`=M(XYbO zXJwe^&y$;^c%>jmugFR}Ur|yx#zJ&FaDRCq(Yel$zt1FH(SWXR z`%k?~p6*4Q`SXPhO?rJ7(g>gE<@f^^FOdZ^VpQtEwSw}|$1=x-YN?z*e9>Qwt3R+Q zR`kn+1k6u4EIsu^eMWeAqu2_Ss4Mxrw7lV~&WP?js)zmRi(5@?VTdn>B-ylXVtTcCx9S+#9Os z-&h<7&ALec=i^(<>eCd~3BCMvI$Q5@(i8%)JNe2S7EEEl9NX*XiZ&KO1z|m1Pfgel zVCEpNIeO+|jRljzd_G)aTYVcUxWtgktQT8y_>CUHGaq>N&lgTFX8R{-i%jplb-FsO z!E@32TjLAbu7Y|n4_;-#dce3W#a<8)>nyM>dZY=Juv zur{Mn{$8W@*_a@jKc8GOT)Y4{(ZsV&*K#Yqy$E=s_M*=lm6}(eL89xolDxd7GE%$ z@x(63nViW^fj{2E;br59k66bXOEnXu+#eMy628CxL??3$I?q;~2?T8$^F`mdV7N7hOcUS$!h@PfO+A?9mZ2EFL+*bf72?e(_EZTV zzCo_ASnN!P3JJ~+{d&DS?*Al?tuD8~qa7dRu9^i}zTXWSn6Rq|oCbmUI}3g1psjDPuxdUa{yG2`h=fYDYyHUb1SRkd-)=>H;9c4 z_6zRDGk}ctSVEii6~F1}8)o&G#fty5HUIr1Q$j1%%=wMVc*gySUH0MPLkoSfeHSXe zT83_{cL*_(pV5IV1`Wyg36BVp*j+yUmYbKC_uw7zS6{M^_-6?+FM%T_4VLiK!-LN_ z7L4w5Gvzx0fUmsbgi$s$f+;3O#y&D{SAcqy@?u{90Z?Ht+F2c*$q=z7tHxj}|GsP+ zO^4f)seqprK-?XEO6m?l+R|%^#$=VXC;7NlnB7U*6J%X3-}KE_2-h%KW->c039n>|WKc;?8y6_rO*g!rr~3TbvF0RtLpD$)$BpMuR6%s$reXKrSen2(AC| zLMRh7d*M*biKYt%CVd0$A}{+M06gDEX3Ru6l2`#?2{MmxJ%}TseJ5I^3HK+R`uEME z14ET@H;b=#O7I^U9yo{# zH0jC^m04>n!mP9?XOC>BL=$H#47Zb_t5R#A_@0e2)@@B9sq{^YjWJ$2+oU)kVZU)R zMw(8;G{!zQaer{me#n&j{`%L;*@oW9=&DrgRn3}*pK7`{*x94!YhA1o+(sm1d^`Qt zFwo5^8Q!sdi~n`m!y0f^TcZx!XyZjqpP6vUZW${2Yr5H5nq*}arMgMxilA#I_h+g` zz2aO_NMm4-d~Pvhn>j*8B~536`T&zolbn5lSk^+27La8w4PlQHBn1uwEatYoAI&YI z&_`qD18=wI^)T56nCwT9@zHotgQvvud^=OkOfl>GlXCuY0&s6H&v>lv`bMKSeq$E9 zP`hid!#P<)x0pQg#fF!d=c`dP&pdY-^@4qVUB5ydLgcmat7x?{vYEh@EqvO3CH-z} zcfYYdK)MSSvl}{*XdGXd|EaE~{&F^2o0(K+XQ8723hx9MY5#y1zbXVswxvf*TmyGR zsvcLL7KovH#;u)V0!rhd%eZZ70IbS%{i-%?A(B2}a+a@LbzIxz4Q9N|nFX=;DwptG zeBisEwrc6>O)1Z0k*k_Fp1gq046S0MP-f^V<-)k;)N=m>w=a_Yxa^`kQ9_4C2`)=< z{h6V43E?>d&R=h8-U!Hoe#;jxk@6an;SNk{c@ms0MUQH4)?Y`%H@od8+^q&?`x57O zqXb89BiW0Oq{UdzXgJjfqr}SXUjX6S~|S&|k*7)n3cc){E+coH&G0IdYO4o(cwel&kgdy{#c6lOE4jP;7F(2P0a~H~(C*Kej`a;d4EB!K65UhxO@=PqjO>L!C z%gqy&nQM2>^z8}Lj1n5-0%f(^h=v=Aqr_fGLvHYO)!~D4^WIQW3^P}-$IzCX>zPb@ zga6&W{i`XTuZgL6YfAa;1Zg;N@Nh88eiT)&7jG^;;w2}|+f_AFsRqfrzy1JgM7Pd_ zb(&ZybJ*5!PpuQxtg{X^5o{OP&gILa7*T_q>o%0>A~h{Hr2v4>&-zcfDl$ESVz@~G zNf3UK5*lahgoPmOGZFh02z6k6nT^>edk(1rrlto;{%}F~?j_gH42I~Ye6`y-<75kh z%tFn860-rEVA;cI#`-%z!m}X@ktONku}YU7K@C1n>w{q+{a~ZQyHr?X_4ADO?(GCu zf_HyXaJQx@zA%~VJ_dBeCkgu+ePch`QnjN5cia+)UzJAqVMozI^JcH`(mq}ZuKNsz zJj$PwT|2If^p1LV8`&SIc4Xxmtb!KHG{A?##w_Lzub9mitJx&t=TagT*U%F&wm7#3 zE2pu9BynDXe__dYr`-LuKKuTr#QT`~yKPpVpsVqG1JP;|{F9zTj}6mOT#d6XWf!2E zQ}>!2-)5=I7nU0I>`QDWn7X05q681rK3rmSiuUtJaodz!U+0$$mJWp52_}L-!L474 z8wJaRxZNG)X%g^E-lI9LUKOo*P-dqC2dWf2#UZo7r|^pOkgWD~b+NtD?z;eZZ8QNS zp91{&8V`Wiysw3`i1V6)_1tB7d=m2#2uvR`#@M9Xtg8S%2SDYwqoZ$>_Iq8Vl<<7C z8vfv51IpJ~kX>*WZ7kG1ua9BI#&mmSn^fryN%fJ0-)Ys`*FT2f^W#5!GTc6T4_kUX zSK;W?MKQ9uPE{O+gOOga_RoqKW7e|u*DHNWL3!EO!{&$DB}%ZHkvjJCY%K;IkzEDB z*()tAnT3&r`$N0;61@X<6EoC9g9U?2_y#ZKn2|o}40Dz`A zYbiR%#BZNA9Y^ldw6N)Cr>NmF!$8baV;=n?Z)|AG^Cwc!kOFE>iACAaR29%CGw0&W z>y{PG&{m0?CMMJ!qL29=>6LeEiD2KDV>L90H%V4bO=xxc)^KIN0?_5YH3W~J^P@=# zd)`i!eNLq?PH8N6_KsG0Di0RA55MCty`A7S4zZso9nNgClIZc(dUw!tzdhug6pA?1#!)KD!biYM^OC&f z0$sc1w&5m*<@>oEe$T(PS(Uf?THfZ=e(u;VLyWK@AGie1;nO3~P&iT12#m)l;+s~Rp$zpP| zF;4!bvlO}DnadyNwXO~~BjzL9x6@+c}?Fp@V*VO!TZ(5Ql zyR}Lkx(ZNcue%EL*gS@zLW#!IY#vb9!W?LEz48V*^J_m^p#&{ur-E6X>(p$^{khHy zX}U!M9=&}p=vaJl_ZQOeg}Psao%hWW4BJ!`6|D_H{D6j{1~nY&H>kdlCT81xpRxc6 z%L$=T5l%l-5x%v$kVXdMA_j`qZD(P2HC9T$*Sfmqei7-&1vfCE@-fSt6{ z0wn%MSW)d3U?5h>ni3<|4uMr87|Rss5@F}EEXQIJeBCz5ReA5btz4orEcs5@=c=PU zLd7`GGuHjZ?9>!}GvB!+ddV30%OFPcokqrWH?k`;Hb#bJJ1$xV32t+A_RR=?$wWQw zh`kE_X-lWBpkDRzd<}!}+>Hm)1=+(h;w4u+hAL<74IAcR3L9|=FTN3nx~{H}K?8X< zBpO%q60BXZqME3tkiL+5cgXe50^5RnT{E^SSu1J~J*y=(nfal|QtzfkpKT5jwps^F zGludaj$kWjoP#LEh;W6LwDBmC||l=wfZF z-66H_bFMHjL_Q_wVJOJ@F_6k@-Hus<8gxzoLL92BM|M9GLE1)W19#7|3TWK@K=A(1 z8HzqBLbhGXh}}2w6q|tcJHvrrABw$tRe@14lnnn{eC~i3^QtQf4WR+@AE5xpJR?~B zy`PXAy&zv)pS&fs z3>rP{yEi9^u0EJ}DQFnTtt3a*vrFThT+?P^;V_J(*VlUei9-d-pB6DN{F6QJ zMzr0fK=SPeBSP_`Kze{pVeZtO3Iv|}JpTF~U&6dW{oa>Yn!<&&VmizN+QLoHffQmX z+5Lr+;Y+zA3!^Wet8O+@3w<9x5fM!PGz26DBoeL3swLF&b4bc(DD7o?-1_&@o_Xc9 z<+lLR{`i((pSi*q(setf;2v|mqz9Vk2NLvi8KJE@bFvc5lDHIMx;COHKrKDpqkdGNvoUDQR^*G-`)5akZ|+oJs|zL@&?Z8uyPdXfcY_?%Vf2q`fDgf!?ACsLw>s|O zy9~`&8$g;BJ&%_5cB|SB_ZyIL##ymE_rMdpF$PELYI}CyRGJu=s)QRkw{v=c59m3m zZVqI4%eEtV$11alnx4K;el~dtr1wX%IL?hi?8YJ5p1GJeUYxuZmcm8|Skg^Z8#Zy* z6!Ct@ECcrlw0(jfPj$P;y>U{b_7W)lT~<4u#%(mkFel)fkrSYiOCv~A`V$15bN3@bhm5_aN9!6_4I(OBB{~NJj2^ADykG zlK@T02oCOw z^wtcOA8f%WcfXjr6#IdYug5#a7TIx`jK2C*k$kHrAiLs3HgclHWj$() zXwR`R1c-Pv7_ptFE!C$~a!?fF$3LxOArcnT!q=w|>uDP47NTr0o{p%c#k7Se$3&89 zdP~rXVK;Koh%I`utu!C`hH`gZ?lfDp=T}OMgXi5qtKAYh*rl-4g|yF5qwp86+hIHz zhv~>8FxQMRp1mLh!8LsPX9h0Sm41;O>XdEaw~)r2>R$r(lOO0sHmaX)E=6N?OX%R+ ztlq<4mNk+!y=wyv;JZCS^uBD-UP&PO^>}OH092_%k+;r-d|&tv{08Ib)7}W73Hy%S zjD3r{-_@h0{b+UhSgsIsv}mwh9(TKa>t6KATa6vm9;|nSa#GOMXgdZUI&6-o&b=*Q zpQItnATW@bUEdzO``j4@)Ln}1Cv{ly$}OPu_KQ?DJbX9BYa6B@7l7U*pF6@~!aYp9DLERo` zc=ZoT5WQkTyMyyx`?%q{tq-_GEipS>aJ*=H@IyG+skrXgcekv@oGRNNz##us*LsvU z;z&883;xn~P=#MT;L6s|hzSUJKHv{{co`U2)<41ZN+cOvPcjBZKbk~5dMh^i4!b!M zXQDOd$#==~OXk%fz+RW?vH-2(7GFBC3baL*P+w>NkJJ2L@c+lZv@&v$)6 zXEc(!SOJNh*e7=UDIR!?PN|uQL2p7kd*%CB6)2Suia@7<%52<&%9Qo=vMW#?uAvY~ z;vKzo%w6ZY&T2lhFS`%z{%J*wS{Q!17AZH!>wunBg`^`z1As<3zl%@tbE*POiyRJ0 z9L~-!EARuRy5D*wPb6?+At7i-uKB517k7e0adOLTl81T$tC^{Z@-1N3kY;n9A{;@uw*cuT7W$H0?5pqU2p4!)aVbko^GHfNi7>B$UbR^+@I9#x=7nPB=1o? zP`{qh2YIRR@3;r9g__SsDBKR_<6bc&C!~u#_FYJ02+|?Z@5z&i8Y@TO{RMb!bZn1G z*6(&fy28CJVS7(7C)QR%8ZIRBuhbsQu|7&`m{J?l+d?fjAC>1JOH!j#opaJb4w;h@ zUrsT61oVZVQ-C{AvUerF!Gw)VY2MfjwO{&Bo#oqOmds^dq9pN$C2w*^Yk_1RVNLA| zB$OQz^9ZDjJd>Mp@{~|bK)`Gob#$r??165_^MRp}SJG__=MUU_^{%orCl6m}8it17 zhhQ&6eb?wLn2ik2<NH z-;G2Y4;LQjx6K6A^4S#4>KHP*#-}7*XKWOoTM~D3kZ84ZI<&ahUEOWQp;;S<`WK2h z4I@JWY@GIU{gZc|4+G#Y9$Lk8OF?7K`}w`k<`@P62J27gS9Yq9lYtDnZOCK4HQJ($ zRl-@ma4fx{v=pYjClaF!u_#f}04@g7?KSt@LztrJN!^pC?G4b=y7p8hbJBK|nvjqx zM~<-E=nVd)b=Kcb(-eKD_Ps2q%Z@gRSB74t2fsU~GDz#rxBJ7a!#O%~*%EYG2wm;) z5R1ua{_o&+1y)O;EaGz=IRjx2(rz6Mh~r!gXRb0ZWmHJOJ{iVNnOtE~V0*JtL+rMU z@L)Nfm4tL%=~K8IuBMoYc<5qEb|O9~PFLOg8q*MD7RtazYAGghgjQpoYLdnnNXDb9 zn8Togh7Y*PGi&+v;lDG4i|nXOL<+s!{$>Np;_XxJ<5H71KCB-350eEra=dDpfjuLF zXMZWLNOpF@FF2}U69im3CoNJz0+A?&Ud{U^y9W|mkrTaB@<<0;!;K@rA)bYoTznu= z;iZRR_VipCFvz+dhmauzhM`9fXKn2FH1{!uV#VHc%ul z6T)?i77LJXZM@`s?Gs{E5VUL_)*qtGl}%;b=b#*MGmV4dCB5=R3Y4|H4si?NzOzHw zg{^Z7ee$KTMr>GdLRy7h`&zHB{k)7E&(?h~nMTs0D299GwPA~yjv7joP15ZZ0#zJU z>~J0!`@DYL1;dnG{}aS6d8__~v^eRIsJGrFG%!Uwv|Z>ygjYyxyZ?qtST1M@8s+I0 z%8+QJCl93&FWV_G_V7s2v==3^FC(6P!m;>1myebHs?u&KfeC7h-5g;Ay-1_IQxqzg zEi2qA8Vn{zt`KJgnB5BCe*uL9ZkL{)hb`3Y zuNWIp+U$Yr8QrvraTj{e+?Z@kyCp-@)O@5){aG(?=M($9&lS%ua}PU)MfT4>URogl zz-$k4EzD~OVGKQK;c^%}L%MP(*K#1|>3LHZf7Xad@|`8cZ&g_oS%3_QKtz2fK|L>D zr2zZ-U|J^P^Zt*Lhc29JY`4N>VIsWTb%2poj5$@;IOiJ_W{_iz2z6S{peWEzQf-A# z7^ERZ->s5M!SdxdppXH17hJl5R?^YSCJn@3Ot1=^Pf}IxB>OMM+8@}Pw{ujAK3ghF zTqyS0e2Yn$GI%*6;UGmtbSe!bK{&1FBk2xb9AC_SfKxv~^3do-d$lHJ-W<`Euck zM^=wTel>qM2;2Na}mTw7e8-V&YJ%#}5Y+l;M>tc!_MW69c5R=&u^HCt^ zo<>4Q7RZ{sp03U3$6U26;UDntjRTKGqcq4#|GZaMOKR?1^Zh^-6L6pjF~liJMJvT| z;npBDi|<{!=Y7I4LGrQmQlFIqIYR!(F}@fy!pQPOw~$G`JgEgFebF8Aknifg8^b)5 zzL%Fo!*e?7-cSzAHb}IDvkph8U@!3_v7p@Q_s3UXaR__lidUZfYHQB&^tI|In`eyB zAXM5J!a8FVq2yW_L-Yv9`}rM*gHY4F=&&F0A-IE^S$gJq0ASrEZ9$Oz-BiDt)`!m; zHW6&w^(8-c3Ep;Y=^{G$z;+yV(Gu0X0cc}WQ&>%j*{wga@`cvD~E2?(5G(t6L^~9A| zbX>sSfM)<}*&0dMCB;5EXxYBEoxy0JVk!e{tIc<0hr%=tV^Z$Pf>>I&pMGK9Wi;&p z?Hn-Z+_Nf*H12tDy=!MgM-TaZmyo@dEwV=djutT;DQHec7gn=F;(4Rr*jlnqqmBE7 zLEuK$wW$VWMk(M;BSgx)r1J<6=8Ni~RVQT)8Mz$W@Cib09Hp7B1$KwXTtJ`@-e z>NL+AV(6S5JbUq}2w4J^#yR?11AK|bBF|%0G@4vqb+?o(gxlrVSnG^qtT52b*TFe!ezu9MVR(;_V6D2y$lh&$GYA8sW*&4qGU0fsCC~u`oS4 z>wXJqrk_^4NzOlPns22B9RUYcK-JuWL)xbmoIhGekE z-g1Dds!GmvKV7g(f~}L({3hRTzlT=7^?TdYw9m~JGhkM1Dhyw*>+uuXeLV?$xdfus zO*_MmXALoj6E2@<2S5ulodHEfpu)G9)`{X~o?5t~#$2qx-a0Z%BA1W8EaOEuEGF z{WUD8OzTFiM10S4VCHoUehP*yse4({C6B-0;Dap`hMfg5lO-ZC(oUn2DBLUTg{KKM zWVu}e6CoGS#DBR5bPaFEi!W65+zHs21Li}|rkYycp8M^tea%v+wm8Rl+7jRB(IlX6 z@Xh6D=+bo-l(OI8TeZE|&bPn5f{-y9d<{&Mk7O##K6}z}m+3ZCsZ5b-?!?*O&gw)> zic}~&f4Z_{Ni~P~d{-kcB$g^2w1hus!4sX)6=?`Ut8v_y{dkpL(JJ<|0uKi!NM0f= z7j4Xtt}rN&XlzI+9~?x1EtCo2=Xa)w-ZFY;-I7^*3bx7gH{Ay*3&oP zDB}>Q?FA?mCr*^~5-)6llidM5yrMI*B5p|PB#VoA92Hw2^md-@R0gszX|shHZOqP| zOk_v2$Mt{T@8SJ5!H+GMfElg~P>;Oav{t;nX)X8Y>IfY4?S@0b9Ux!y_~rrt z7$Dv5%bJdi1p-U|D{Rc>b?Ui?hz=MJ7}HyEOf7e_O_2`Fb*>}Bc1b;ZpDzhh)qtBc zP50D={5wE6meoEZT%vtn6NhAQ{31_D*DXa(imB7Hm{3a6dL#KBHM#f5NY+=s6<^zb zN(uS8Rn53J5ApjRDZ4+?Gw)vx;AuYcZE28fB|o!?v3>m=tf5mdo7aLp>Y}G-zbf|y zbpc;6bv^Bo?utkU65wo~j?)%?LcUn-Ql}1-5=2G6`gTtDI+t!|0d{!Co!Xsnqq87y z9ui!N6=&7W%ShI$4lIIhFXoksloK{$`ZN0y^=V7`tk$rEOeuPUbSti_gd$ZJZ{|>eBaAmp$X0bG$#6l{Ec-qxZdb`GDtAg%H@NtLB zn<}DsGH`~5|D+l@CdO}S=fZ}ACeao$-Sr)YftM#%AA82rcDf@f(2{P9>D!Yv<)V z_$9kYLs#$+K6J=s8g}I4n-DlqXSm(bJV% zGCk|9%<~{s2#^PfeRK}AC7m3=OH)u+inC!`zr{^{Upp|Ig;YPwHsXxBPsm7sBZ`#n zyAFmXjE9A54V6P*cHiS}nA@!CrOYJC{bx??txD-gWhowx4n^+zg;YnRG{{@)UwWF} zG0kN1bf*X8mbYKh)eBPyExVW&r`qVAz*qiCQp5@Z$mwLnLQ`1Qbuh7A7dL6xm(-g` z4oF)T)Lk*JQ}OY(3Rs^vO<9dBkIVvE!%K=i`@S58zHrV||zIU|{ zTUXx!94m3qp*|j8D)4zhI|2tcyI2(8J(uI?eb0uSD+$a)0AVYDPTze_M0%B)Jjfz& z)KHF!t9K0oGa&P_?luV?4RL&96Scogu#6}Q1KC2K?b~PMLj<-Z!gAD6O=#nI<-v?^ zb_LiA>F0D9*Px#VTalBC0iaKO@cqGBdjOz~CzaXr%1ge&rV=q=CZXG%Eu1*%+m)mq z`|)Eg%}F=n+SfCgu;PlQaDF@8#s>Ao+`~@d1VxX$t?i z(@pgtUwfLwelSHTXz1M`i_=1S*Lzk?qjIWmR$=IAmP5VLx*j_-D5Mx?>b)#Esh z1$)VfNqzHx%>zh}8lCLe9w6o{1D-7iadr7xYC!b#$zIu8uw`{keJkxSyOH~^bfSEY57?nC zr~r!S6E?g!F4S2NNy)-BYJoO(Al^bK+H#_L+53e1e(wO3GDsdHf*Fdy8A(*U0m_6q zXr64R;=*&oL2TQPU!fBz0gp3rQ|;yeTg_1c_W3+VYY-h6ayzO_*1?H%RW~RMSUhwY zEpA+6hUeS@wa3Ey{PfhT5fQD)EnUL=CABut-DW zZ{u<_*>;93CWKG>on(Bw9b)rt;I^BaMaLvFI`cIpc(!85>}Dkw>M_)fa?;Xb10t-v{v)PC#;s?kwO25wm3D zu8b0z#veG_7K|#Rb5ch{tMuxw8pO)GZQ2bicFVHL+}!n+?|xeCwJ`51f~m###8LRd zu1#T(C&DaBBC7xGJbq*_Wq_E2f9`<)`PIjm3;-;K_??syuDDux6;RBe%dE}! zKTil(49zpl>%$e5T?7)6uECQBW3+AK&)SUBrc1``27Ly6-056CCgHE1i{c>6{ zxC4clKHx+@*ZHG)0{puSYEj(UD;IXAr2{~Ez4Rd|dDWl$36P~0T#;ir?pXM{4 z93~s!(pOg*R4zq&P59!=nl2j8N&`>Zd3KU<F`_IF&k64X(q@J-l?B{CK$cA{5QP zv52_Ty}6;?@m657ahrkTjm~_y#3&E<5%;auO-1Bk(~FPiWF8MpJ#|2R#DPm_G32Xu zW#e9BS>URru%w0$u;htrgrfJI@O%=8hs^HN?SBB~`uyHiSB=xpEstCHK~ol%mfG=F z0@3jScz*4EmyvXh$xXadsXWnER?in(6PF~gQ>h_)2 zsfRvnxYb!a0<=Y9@pR43Hpmwo;4a&}yfc3KMj`rgr8AM{A#1c$H$$P}7IM!f9$bjt z=%{R6OOsSu87JouCL}e~i=D}rj)--&Af17gS3-QdG&9jCGY{lIb#O7>uTY^-AfiF* z=5jQh)a>hnOWzDXOEn%ogWH`dydEJ}yAVYw)9lU`DL0nBvIU$JTuzYRWjvGTkcJ6W zrSf#rwDgqOC{4tPjwV>XxokbU2YaX1O?!85xPC%+64&F?9KkLJs36?jMQ|N350=_T zZsD#4Pr&Vi-HGN}Jh&jeq9?JGynUP{d~JKU{>Za_)x96w5TJ8nSpjY1yj%So+e>e) zvq8!!*9$%dIDpHvJ*x#C=!U}t5zpBWFX*O1H*Lh`i)t)5EJp=9&88yLGDHZ7%w`U;(zV;)`+dnZR>(0(tFf`~v& zc>Ylp(Vh8KCFkFHPf=x%f zLe}hyd`kv$;=S`y;NH4pZ_^(?6KiCmIk{jLY3I2RXhR$I;5e~Yu~$F=p-9O&q^1#f zgCpo}tbE2*E>nrt^{yA)KvK+wmffFErv<#^r;G8%)^|{kr6}q)B^V7H^d5KX@Mtdj zb$;*lu~r<5(PLnes>Lf;+@dT1+%jM**x9OVX1%qbPsc@y-ov7Bfq1wp@M|84}13)&`DM{Y}_8bb<@Z5wN1#5#sh6J&72Ego!76_-Aa0~L%((e zA9my)yF)oAv`2xrwdE`RapDa3O&Qe}QQ z#`->k=K`qNqGv*yjkSZFFGScD(5W4| z*SUky+l3$>3}k=ZEQpQhrEsg$fz)?mP<4YR5a7BedCM9UvS$PS z*w&&6#}SR5Zi8jo;2lyDQ!vg*<{C(+UzeUf8lqW=>quybk@l$w^)BwcZ*O26bjV+! zJA>63Q!Wu|McQ(%hRinHyzA}l4Fs5E?dcx6wLVr?= z3gi6H1#%)>V?3m4CPE-A_XRcv651o6d^4OuSH(Fto7>>1J7DRR<9Q{f6rTy0o~}K> zi9lxkf~$ID!=~&sE*aVeSg0^YbP4!Icxi5scxqmS(uU4oi>Cw0;3wCoO#tq^a zr(^C8iF*<=FXN|qu~4M&sC6VkxPr! z5!z?#AyC2@oZ8oU%5)pL=H($)+4<@WTPOJw(`C)~aX&55ouqV&m=A9^qnjN@%nZRIOfc$&Bv9)S?_n{Cq~wDc>)#%3lLY?3(<)sejZn=8RDA zTjc4aTx3{M?3qxHs1oP+ z`Y{E0@(6ykSI)X`p8Y4+mlAz;@qU??I@=W%gppyZ8GL9VZ3|Z-&}g- zI2}LV0T3BDr<*qR&f+sYtDjX+p`u4!N}$W8yi92r-U&SSYyLwUtj{x8rg=UZqTf0? z@>KpSlj3hKwtuaC&z2KdU6m6{3c)2SJ=@*;vy?m-wHJawWX=ElnH%aD$?<;JIokKR ziimzN2X6DddMi`(g0}VxWAG0WtQHv@K(FIvHT9nhg@+d{l7l$Qz>@j*+f51F{LvH7 zmyPtbP~y3QAJ(l7k~T%m&Q{!QAmj>zBsdxIP1{#7QX)1GS6C-YNRa>)j{z8q?`7K5b z8KH|q24}AyL*uvU+Lx4voj+U#s!`jIh@#HATCot%h^*6>$ann359;rZoe<(mGYmXu z-o9hDHw^!^A)5{Km*Tu?(=B-506)5>GDF{^Er2My-Q+s5EOnQb$>D0q@*ekH{~?ep zJK1S`1a`4!bRphZK@Uj#muT+#?`4zKg2WjEDE>9_brBS33ZxtRpf&$`(ul`|P_~tj z%+*a1o9i;$DuokfAX|w*f;^Zuq-Yq(0p9BE;eQ0LT>dCqeFQEdn!_hU3Vo#C-xqBA zJMpUk; z1PP%q1(5Wi+&7b-eA)y)EUZ5{V-Q|YSd-+VWFFDzx7PwLDj)q~w%B$U+D|gCMOy97 z8E>5BN_GF0I}i}>saZj1dHU2TYkOZ$N`PI(7e1spnH=Ixd$B``zHJi}2O>OX=4|4! zeNl`J_SqZsJGCM2#pX9wDaG{p+O={S)R#E5=lLj2tyZq5W)EmJwI4EB*;;`G-ktQ4 z>G!-5*K7%4=|OkEghPailcj+d!4O|v?S>KNm*P4Y66t|RtpSIulM2E9$RNk{F<;r}*Z(+y^W#ztQ zuY%ugd(VHblyBvB?V#F0$a)nWJKlb^AZbm+bVGEUhB(=7|LyFwWbcP~Neq@czUM}O&y(VllBm~IN%O7EWQ$vh6a3Pa z=s!doY~xIU>wMp}Z;6`jZtAt!s!H;^eR0|@yY=SYcRL5w65qvs&Z>#=DR)eO7DQKF zNN`Z~m(3~8h7j_hqTr>i|Byfe<#^|rM}p%r#K202IdW$R=A=c!Jh}e)%H>B1Mv;Ql9w@^<8M*WWrE{@{}``SuMHb?1aljbN9-E@hVB6qaq< ze(*Xa$?LPCe^T*H37T%>@RTENu?}6Q9}dL_nx-l{)WI37%gZs>CLYRMLlSM1yUws``F^SE9VNN-FLJKe+r< zs^Oc1_W)Y5S1s^XR>-V^=(M>MhnbVQN=?lJ|K)pF6RxKHRv;y?bl=gl4nM?-89Mo{ z7uocUm{m=Rr@Jop?(fR`DqS5>N)9zL0-l5n@!gC=N_caP#OKv2vupE${v_|)KXNyn z3cPwo8Vv1bK-1x*K91#0rC(-v2t-l>Lmo+isixr$FJ~(LRs-A-`L7k_u|qKomx6iN zU+PoUJO{?d7v#?x9*$3v+?%|;1jbm&udc4;h)OkBa9c%W&+P~#}FsiPN3mr1~xK)H}$p|%4^wTx6b5Udjo*)eg+d9ho zPdF&Pn4=y;y0s~+L&9l0)8}20^jF5pSgvTlU-%Q<%W$@Miih&=ONHI*6nhhbc(+KN zAsySeGv|Z(-G<0r`6uYe$Pql%>J+Vt$s)!SQP?B+HJPK&(p36z^-W4chCC`2JuZFK z?;f=Qp~uOYOr15%!Ytz!<@2jSDT@sgg49c7<9FGjt#v}N0|b-y>$qcArk>cx7H?@Q z;Y$=RC)My=Jrp}hX9P#8PY9Hz=cpXd#z(c zxFGL+U;WP=xiNwHDV0ARVTr;Dnxb3*Z=k0JuTTTxCQ6FoBr{d>el|b zsL(Au#r4Ss;ZU@@qy&nZPjay7UW{v{yAGsY8|v-G?w)WG7j7RKSJ2P%F=3~EPUV|iUnIX?D-Lu5|>x@!y`n?%WHGx?~tIR5PSz4b*i|v>V~QaAcXp@IZ{me{#*|H z{Angc7pj@&qE{GB+!Xq+GUF<+`?rT`=$@Uj0uy0aGhU=X9bC4;WNO6vhE}ux$aUZG zQ;$e~Zk0*L1+HR*k2CE2U)~$Cn)L-Uc;d1%+zZq%&WmMR0VheCOK=)kBPkVf#og`i zRSrhig&WI(Wn2eGS^A3Ns8rK{>02WKHp66uOk`+x6KH?QANT3p5mcz1&8 zEccKmo1EkaYLH5e4PkI_8r5A>!fJH+RC@2pL?pmOwtbtvZm@L3&`lYIMKwF6ej3vJ zqOevPkX<*u6iH)Su~vW~Z>qGQ&n;`sCG7A>S;g=~HQxMEX=}%o-%{9HCk03!iM0zTUPsowuZn{= z{PL2n#nEA_1tLv@vs3B$MetT^12?CH#@7l=@~hSU%1Er``uMhaCu^fUAocOTpV46> zQX+%Fw(q@YBvX!Tm zccOWkLzi<^(n6zyh8m!+g;=+g+hj*>%J=4<@S+BTzfoS58!L=p zt;khDeEBDHezWz$S&=&v)L0{PwE#prCGWsj9<-}{{QWC$Llk)OX}^} z-T8#iGY;N?b{>c~BYDqBu4?mul~9s8v+sJI3y5VQ9M*yii_i*#BN-1e_263($BVMN zPWmhRLOe8oH!{Tb>_Ogbt+(R-n=iW1y%3fE$fN-khE*AYc?}-CG$4-t2K2~NR#aGl z8$q+3yE5!c(9M2W9ZV3gD*l)8NSqb8Fs91iFoyzhn^tvT8e3Sj4y=rE{Qgorb%Q0m zsK(RA;H4CJZQ4TK%I;6iK4S+M7dAzhid_nrW zH)1to^#4+u1HpZq0ngZKZn@Z+DP1%}G%MCbzCrpHlZ2=1{-y>RGhrno}&Nb&{Gr!!HP63-v& zW_*;i;<$o7tGwIy`jRFmnLfo?wpse&#~R#G0%YJJI;~OF$vKrxU75I;;?Uyu^29x$ zjZbHk9o|Ig5PtftT@8&OQKON zmW51q=@0A}2TOgiTzJt)YOJZT8(QA`>txTpP%4jePwv5q2&5=c2jlcRD%>$rF}d{q zme~voP>Z)`GNKMlBE}FH@n~XuiCp80#&Hv&<*x1hO6~!<=uyj^MB4}D9R6Q|^`N92 z+tv-vU4hqpebt5sD$9}ennQJWog0}{uk^^5%4}(JYGV`zXzNK>oaJ@su0W^$*pJiE zajI}e3c>A?vOj(G)W3b8K5i3~L~hi5v91X3Ip3K>vL$A``Gzq(KQ*)anz#G&)zT0^ z;lTEd^diAUPd3L{XK$)suJ<%GnQ>fpoPnr-BJWA?O?1t0eL}EheM`p}%b{r-r$GN>Kr{eZDmE* z5tJB$HCmh-q>?}cwbPsftqBI%Gb^g8`vxKbSLl4-ITzqbK=2~OI%TbSAQov$!-^l^&NB{$)XDb+5hLZczoiyT_kcpl0Qj( z^QOzU7!J@k{gC||WVH_$HX!d-#)jyeW>8K0pJcs9LtMfTY_gw%C1z3tt!s40WQVz1 z@|(qO9SxtlXd#+mKBDMKD6yI@WSSAQn5BS8e20|mZhJE%DRIqjh{FK3>cGOBdwkCy z_{lxjj?E!$QAQDfQXryVh#UxCS;L-w7^gA-5>2Bv@~06S{NQqD`Hj|BLE*p<-iRBF zJ&mI?Fii2hM7u<&qi6kAJ`Wxiyg>+JH;+*uuqQX650q-0)p^4G{lO}X}8!DGVJO$xPZ&&tarY^pS zPum#ca7Elr24#Kyg_d0N2zSGHMaUs1!ZF(|+gt;DK~^9PqQ6>-3)QorVRht3mv=-7 z+PlsVfAe=gaP`uqUud<@lHN_1Ml*ZO0wFWg<2#Q=n}f$&6#wtR!wt|*F&THhx({SP za+G$h2~SiIipq1e?XVp$bHkiff=&|vZr9R=wdj|_xWy7Ia2Q{%C)c@ zSwmNPPpB#e&qZ_`m{m+(l9=`=Mw;dUQNxLujrE7{Eo$&CDyk2m;}v}A z%+fdLciB5tW29fGBlxPJJ#kBxWyp)3D`9-ygEgz=IrROo!_DYUW+r+(-}JlhnoY?j zYQK?37xpjV8K?P^_0TrsTDiM)LxvFClnw;(uxCiqA`L;O^6TSFM7(s(wT@jwAsY)H zQV!#TEF0Vs?IPU1ELs$y7inzR6Nu@F^*H};K|5?F!CiYA0$y;}a%tCuF z92YWtCH!evFS6zrCvK8+umlv3ntAEdey(Td__|8cR&pYNCoR)qj0v?HH3k}YU~df_ zwnax2DfEH-|H{Y@{4_ADnv#u;k? z)*Nl)OK8q86v}Hby8NQ-HP}3pl?5-fF-g731A+GL6CcmM46Cy4QSooUZFJOj0TDM# z_2kQaA$zTfk}B(4U>jDV|5~9SwYK((1QvLx8G}6OP z8`N$)Ess5dj(AQyske&UUix5qD-2&c|4F#cfzwFQ{b~zz(~mcsgdw{kkUob|yA$AaetV$~ zl+z;4I`GQBf(g<;A+bBMPRAHxFPjUhE^qcZyfdT?>>X>iV zQQ`xY=m*jry^reP)pK1s{+pNy#tZh$>(C~5v1ciZV zJL|Q0Y1>nQ3?!@7(~zY@U8-wU+Cy>c=-(5mKf>k#kRoIZ-O75X2u;%QB%tlijGs#N zaK0qB?~hg{D*}Nb!HWg_49Xd|9kR8}!qK5tH|~h7;wjFYJHX;iH1j zDG;c33ZOH|CA$p%7O|n!w8>d)mW{@5|$uvOygs^Ye zy1_B%N#Q@BT*(xDY0M!Ul}kBZ2!b>~A4=$C4vJiy>)%$1J?M6!+}`!B!nfbP9535g zPiOCxn%f$rwsT_kH1yYPNwAq}J;WU^Ozb`|wCI~>)`-12d|f0@rhM>kSl92*w(Rhq z*G1UlO^n8z{wiJzLys!|Es@Eq3+oygig)+7oH+MyUz{3Qn6kLs*1@wLy&CzQ(TrHS z7Qt%9ts(^CsgRg}M$@d!{tC2DY{P08AXI>(?YB2lF;6prjQSdO=!%H=J_CCP^H~R+ zoB?>#>N0UrOBv34s|&xCq33~9Y=K^CiT@kFgti*XvO19Gqhv4s27Ezs9X9U<@qh3*LIFZ@E0t-PpYZJC?!S36JD^y|2Zp&1*uC*Iw?W+aQ!f( zJ6iuW$A|N{r7%>uV@v-B8v70Kg03^P@N8*9ovGmW!8U{aDtDc&kDWGnzX);`CR|!&Z9ds=&lH3ZOhQFD0JLB7%wh5V@v-txLOz#p;m`Vap6( zKt5AUm!NG>XWc^=;cpE&oj%<6i<)w{1kn%C37ga@Z59*lAjKRF5(JYI@*ksHt)Lxg4ouKg^5m(3W1E1&DrePM+IuWgSz z>C%96`JZjBX?}-}VUT(2d7WUNwAi2mRbNl|PyDf^~Qa zD|}c}DX_$8YIP^f{SA^3acS_uT5R)cn*Ieiml!yL!oorc;vYjy27oze{OAqxn`4~( zU~8ziwVZ3{1os3eg#B-KFfXO}L9J`n?og9O^d`*pJJeTH&kQs5j;AFs>u8jDa%Rf^ z(7=VTdMg2%KpfzcP`!)?J@%&k^1JVXC4JWU@o(4SO7A*cT^YOKP+xMs%R#;@%L-JD z^T16oR^QRrbt{~W^ijxQe;btFlAljssYWvF=h);144??Ut4KC<_g2`YwBeU!@uxCZ zk7@!gw@FR%T{CW8EmKMsx$l+@0yPvCdI1QVm5y(%ojCxQC&be`D}odaW~9)BYo+L> z=)8#^HagJIOF=--TC>xopR*t%Qsu@Ohh4v@C`2E9n{4qpq>tzPbBO)bK3hWp5HF=J zkxXL15_s24)vzBuNyk)4An~kLH;NU5HW-jjsYZu_l%jw5cy<}`f7Jt#f47(WTFZKd zJoFgL`kMuuQkk2zw5K%^2&7o>d+1*YQOf(MaEi;Z*j{eB3Vn0invH_JJRdd3cE%j;}@V;*P%>jY{r^ zhtD$v-y_{#3kDq#q1hOW42emW~g#VKA4tKuz&8IF0DP4ONc_n*ybmU8y8r zq)r!Q-v0TPn^2PLo+b#cu8WQNUXiw=>w1IQTTQZiRCX+k6*CL^;58vZQ>)A>Me}0o zlCA`|i@AWMD@9mz&^4rSyH5jKt$Cajcnh2SXX?I_A`;<+%hZFg0nKMA_ZhMG-aKQF zx<>ZTval0C@VxHXXyZa+Zk)mtZncNJ9X(ktA~o9_Z&j`5)Q#joBpdQFnpfe(M8-+m zXBPg82kSzlZh#cVsNJs*bWA5tuJAW(rPpcuVe^b4w7XhQdf(B(NWHfCvZY$_vFiOm!fbdh} zHbjbb5*QjiC9N)C=4q26z_POP)43vi4SnEp51BVW-9(_A6*0bZNT7PRJ%D0p; zm9*TyKF+`#uBBcZa~c&Gv$Mvcf-5X?O^iMq)_7=qx<0irr^O|;Cc4r>@Yd}Sa5SaI z_WE;9@!)hI?n0G%!Nbv6nK$CfXylNh@gca3_k;T|Me#1FCH92f^nE3Xtk3F7g5cV8 zG1%+WTjJ4O2tsMghS4mMP;1;jAcWOOOm6gsce&-83sWZjpLf7!%El|A9^B3T3Y=Qf zWA$k+bIDdRVQ+*+IZ_(iTH8lq2K&5?zozaF=%?WoebpU;*OJ$fc>S^zNR~SK!fvEE zr_k0-9E|_zw@2Y;dzgS-7ydkCv#PVMgD7L4k<)At1?A>3t8Ew=cE6J=|e7rjOcn(5p^URh_I zr?9f%7lIV81S}LEZ00D;3K!vyKe;k|HbQ1oVn75&jtcBAK~J5Cb~V9RDaV6~3-Rgs zv8`r1xl`}xus(i-1fbmsb8iEV-4O}(`_Ewun^2V!|AXUlS#ghb(snThRbcBJ{fAtG z&c!h$%3ygfHl>(!svPLRgEWbCS7C+Q z^g$fBrL+;3Wsgp4eb}U`fa8~M03CmG1GHC&|Ck1R5y?(ASA)NXhxy?e`V#&lH&V?7 zU>C6ZQrxAy_Iy_v%mpMr{iQfybY&D_d`Rtg5pD~^+`&9}vIY_++<;^9mp{*b^n!^3 zLBsI#MlUR`d2a$JplLC7m^%@oM0wz`K|;(03U21rp*vncp2!8i=e`pC-3J=iyc~5# zhm@MfX+`T^cT~kPev<6qwXC7z?i+=y*U&{yDnv-&J44Eio2I0Q;v$NJU_HbNzot4{ zt)@v$L5K1c-v=tBGz0sQwD2p8aVx9(V7=J?3B6Q|137|K#KC$;9i{Oha#eM2a%}DrNq{>Rt<_@Nw2*)O@ce zi2sZzVTUV0;gA_7%c#7^)YrNrZujJvC4~e%4WS%wjF_S>up832li+%}Iw5aswYrMg zx%J}u3o0?6dG`RQZwD$o$rXjyEK3rxjIj-Ii*4>a^X)oSCFHCT*$L;MuoJ%w-C#2V z)5PT-CNurF1!~m4UPR{D^wX@I#5lZfq#Q5V4yZiX2CaEj;^i%l8LO<^p1q9+@^`#* z){`#HQIQd3QStjXa#~cXtlA)bAO9o7KlH+#tUo5F>m^cbO|nujBG)@Z{N8Cn;eW%( zU*&6q2UT#cYcX6F`(V)AaF$L0DQfRm%h=zCDcP;gZx!9C97*g17vl1w0kn~bL(7u` zjek+XnJU@FfV2*YW2>yc-;!(6(r*>v%U?OF2L(Wz>ZVFJ4O}Q!#{R1=)qOuL6GkdP z#jFD`^=uVT(pJV!%GezR=nYFi=9|mkA1AvDJXfxhYC7O}k(4XZ^@k3REJ!iJo@)c$ zQ`AnCFLpCDg*ql@n(vK?y`2S888iljBDnSrWjCj0MUQH<9(bIVt*wxjc}=$}Gs~**v^mwqr4w!7xq;m)ZQBR1<;i{z_ zP`KItRJeJFuNMq|kqAz4a?V)pBr@QrfX_8r^X2j_p$~Tq?*c`Qo%SUT*julEN)xPB z3;Zs94H!0^^k{lH?*lC?GOcVkysI7c8AzbljC=Ai{rnyn8$wwUy4DSxBj!&MXQoh4 zt*)OnPz+Ih=hr>|+t?>Y9MzK8$8+P@xRS^B_V=qIReNU4^BWCn-I*M!oV9$g7YrSQA z2hLaeqK76LiBu!{8FwwX-8-JsVRxLt!*S>$H>acKr^A`c_X2{4WZjXWm0y6boPKSt zRvYgGBX;1R@~EiLIIh=Zk$i$Xow;!mkLUDf#8dhsHS6x~f**!UXM!c->#x<__Q*rH z#fIAjgUH}C>jyHay+#{TI@#8H+a{YI17HZRip_}vMov*ZPP+cA4mf@Pz!D&x;zQkM zk|&_(!}`}q(-qQKt^+mh_1Y?Mx@mx|f=UqRHxzwYINut{1!hVduyuu><)U^ky_~1p z`nKl;xt>ovXgqC_0EZ3^Kbx)W4WV_j{M6jgpv{)!+tQ6S*YGBy^Nu-8QfpzUx#4cV zE4yGwJFg%wN|Ft8jh#~S5)KgP=$leu(6SIDETCVibj>_l)y}gs)TN>YS%z5< zgMAJmgIgF1(6c|Ty+w!vo~|Hod0HrkUWx$gK(irJP%(Tzgu^i$`vf#H`yy&+qQdK~ zvLPUmxC3yO0@V_^un6)Sk+hW*H4LJCpLnTF&OI;58?pdbyzE5VDX~=(`hjut{EwH; zmImnhk$LoRM}{R&U?Li>GWcw0kJ{RRRr`r=o10hB03iC86?bM^D&{l%=A(ZCbIay2 zP;HFh^U~*I#mY_5!PC84dlMcrN0nJp)q6$8Yj8(4+4!-erfs>(6WI>YYWo#)iM``J{)JCbF_?VS+LBM8LvB zWobr1A9sCZ#fR1&y6IR%uS9eUzuVXGWWV(JV1U4yWW~s471PSVH<_iXm42ZqV`>uyu*uPv+4Dyw_6m8s}gf!^%uJEu(R-8&1xeMEC{ z6`QBeI$z;$we0up@)B(G8Et!wPIkQUJfc%5um#`TW;|UR8d$wdUMyLb3t3n=DGik zJ$=}Z&4`D0viy6hOV?>&2i8TP$_U?tr>j&>%k+l^xMisn_mXq%l^Q@508tdu#&S=2 zT=k{Oy<31=Q2Y*=*P=JZ@y|k0xrJRB12P7_nZ$V1#=i-S7&%fU@giMfS)Jd~R61V@ z1AheeFDQnCrFn{%Hh-8~su)ypcvErCqls$f*?{2Y19dJ1>oiPwx9`vaI40baM&Q<< zrVCw7#t38-NBM%%>rIfE(;ZI)7X#2xhO{|nI%{UQp*dHBK|FPRSW^VTes0&Ck}LJF zq#u-iRtvy+!(U73X#$-k%8ALQ;5fW9WR&AjA5+6W@c3uR@6eoo{>Su{wAlgSd`#K! zj77T6*MeL+EZL&jstd01Jl@&nJ$KGakS^z$P&<%CN{_R{Ez(ie)5Drw<{VNo_S{L} zfF2)2KOOIoP+gGeU6Cf~)4gNgf(cynFNl@zuG8d9-mIRDio-MBXM4xr_-)d!%*)8j zILSoJBL8n+ajtYuAUJf7fkgf2w|j*fIr!)LoFf6S6dY^V-=q2MrU1pU3@wg2-2!qu z*i6z9MISm-E)~w3BCF1mopehx88k{uM!mbBeXINSPw}jZf_%$BEoJG)IJ_w$$JHADPn1>wD>h! zR`}12!WltS$NrpI+NB(!|Ly08DTV?F*mu5w_3dO-$}wJBjiEu)c+c!P1@{Q^bhbg^ouKJkiH-x+Pmlv zYP}z9F2517{_|bzwsDjFw1E<9lYhEOx z$KN_6oO6bX`!iK$#yRT6Z{`^XyuBb~pr(46zt{erwjs!9DVtd z^$LH+Q;=FKb@Ml)k&_SNMTXT_!zKxFzu=5EKF1Rb*wQW z*n!^`K&9SQ-nBsCEUI!K{xD8`FM%N=jq_a``!+@!3jPC9A2HADUTR!gfBq-`F{NX4 z2v|(1^+@n8*D^-gv*cu~j5$J8BuIJ2G&BcUKG+`nc*MZr{D1@&x;{)*AhE^{KF17gItAQa8>978ri2{IB`@# z@Ml0LMgQURM&q5Y9>nAH0l-oA8u{kvg|m+7_L)g&G~NJ=ntX&vH5DT*! z(-g99wE&ojr8&SlT#lj(8M_J)WL%o{`j8^z+}mqasMBY3Fqq$v=gQA`wET5nC`_Wi zar2j~13Z+=i9~S^1Q`_^M{c8x9$h>4YvhB)$kl8F*QMN;?FX`kA9N$|9IS1r2c!D7XLy^=RZY`jv28L7+kPI}gAUR;j!$+rOU{3{>S+IJ7XX}Yv9gGL)SqJyI?mYx|YI!Kczwdt2O*woX& zz!m@g7U4&D?FTcJt0sa<1xcsMJewK1v1d!#Ea`)uF z#+C0~pW$CaO{yfcCN5yFSB_N9%NX<1jC6T}OO${2RhN}U2Qwnj(g8ypt9l7ES@YXp z3(<$%&Le)>zY89nM0kTf94P7r=YgQE#OCk_r|Y*>>#*BAye@@)FMTKqw0O`YH${z< z@iz>#6H{gJQ8KIYdz%Lwo%<4Tr?Pn<)O z0>Lr-P#aiT6kH4e)aPS)5>~Z9Of@yEoM{GRAQ{D_K#I!Yq0X%_u$0?n%=f1ANInr; zk8Y~Vfp7zK3i!2T>FC=84Jw>=1%RZrv#MTpND&xZt+>MP%GJ!nRbc@-SBs>Bv$TVe)QNeN9dGPXT3OCySi4E@`& z1zP3WUU3W5yBwJ_1wbo=Zk|`6Krp!R^?s_)cCTe4P3EH8OM$o~h}T~y4I>8zgzaaw zqm7q+1276(OBYES`9?jjvOI<{7TM_JC7lPH%D>TnBePbCsLzc10%P2P~?$K@;p41AbsV?EWk9;w1>id9qRMs76`18FP4YDCh@<&mPlJT7h*&zL<5U4F~> zDDK7b-71p_z>7gl%Z@J2ac>{tFx%BOJ1X|7r}#-@@_s%3n%ywA^PrM>!$H42cYL|IBC z$H5RJ+Vp=FH0!#+{qSXa(NFcMj(117wT_5`3$-3fKg9H`0iGQ=wWQQ9a97Kb_J;lJ zD~>MLV9s014FDY7Qu4^7ALd-gxf*MYXNIjN9TjiANXr6%rz+zSA0ZHaJ^WSjPb$CJ zq(T}cbTL=9@db+|*P#Gl%0xKd25Yx!l;T5HvxD4&)gq8Sx)>U#GIRoNq+31GAVz4v zHoAMxc$0iQ*6BZ=6O2!_^is{D&ux`E16LyVEEWqpxar3QcMeHZiZ19h3W~hm9 zc<0to7ctR*ef#JcbWWIVf{O4%tKC+TUdOXR#|ooCCGSbG{863G+6KCT$- z9TuFHT1eLzY1{m4N#B}qyp{@jixD5sLiUyibtcT29&FB24RE+>aXiyAs)C zK>Q}4)S-U5WW;YOf1?SXr|M2W;n>1;ft=`>$6Z}DS^@Bt$^T*o*`3UdHa_C9)GrX& zO{JhYLIjk^VCp`di}=34zCZkoj=ksc{-1nlZZI(N04P1+R9=*`T9-7HDfh(Th&1#Q zpZgLM=%lK6A2PLoRbtNFz(fqYP_JwjlKnr~AMESxoY=DNJJIuioxZX*;~+Kivd8VY z?t=*Z$G{&xG0(8NU>7taWwF}deTUHK@)tzj!MKNY9tJRg>QKdgEht0`+VsGG53tv* zMJ|lSeXo^jSb30qA27j>LCoOi>#BQ%lGLSdRfx`zHIVZORdrmxN3r&T1a}sCt3=HI zE}9Hdw%LLY{@}#qjoZVrM^84sTO11->!su#VFbX^tqRL;3&{$`Q@eD!F8xoo7T1*p zJpyFxasFPuv5R|Dj4zYJC?+I}FUsnU>K+3&K`jtGfGL_5ufo9A`%;DKy{+Nr84{#t z*3#D%Ajb(jC5+%NwLqTcGrX!yaDGT)D`|ZOIQNto^J1w_sWEnh5~1FUcSuc1&V*r` zzTjWVID4z*&XDJjgB=e4&$61?g+}1ig854k#x6ice=P7`BZX#3P_?8T0F`tr_W|^- zM$LbQj%uAs+bv&3_ySoUi3-9~_+7^f;;;%Z0pFRt?9~t1{nLkYsQjwKpTJqKT2enq z1>3aFbH~m+z4vrvextmIg_){z+SgNSJN#)WoT&;-^qHG{vCkd#hSys=FI?RJY-9K(i7W(&=`%LdQAcL7l5xB5 zhvWMCd4>$MHyPAR$wM>*SMOcm_w4pbW!q^UAvui-594;9z+SU-2W(%+$*0(vOs85l z{T-5|PxZ%V#-XXvQ~DFvAUlP5<;xKNZ&y_u9P(eEI{S6`CWkV1mj%!&*O4lv(u}Fy zzozNpj&inhl-<&Y;T+U0!=f$+Guy^Jl$^EvD8<)rjg;zD%9sQG>yny8IZg<@OgzsX zb#4aFaM;Vgg>J1Kttn@R-!Rla87>J0{{eJ^E=;v>8JEtg!CSfWNWkIC@vlLzUKded_ywKZ*PH9D=T#3%Iiu5$2ctG``7yt`>}_ zk^Xy-8* zUd|b~=T-+m_7o7@7W_Zy@)aZP#C=V)VnbHkIuRw?Sn3VP+^ ze-Zn4 zWo<4K$cJ;~5d4}32oDXChP97J*hxYSygYW*z8i212mAfMm@J0$7*eZKz0R~vZi0e zX#sC{TQ`8-e_QGhIDpGi^f}4%!IYF`SULSYPL!<{N_VC448PJ%CvABxmymTZQ?Dm za}>83#IV$bNLw-r?V73*MKbgl*Qvn zvb30y&V$sls<~n%1*%878b#kd*(>vFI+=&-W}%~xP3Wn4^irsYV<|7!c@4;@uRRM{ z^Gtr5!HE5IO;3v^rp_zBP?NGxCzp^Of(UT5{KUA(X&fXttW}I2q8oeShkT9hof7Yh5 zfk9|eKl+1_b}l20KxBQ_DevBIe7goimaX!U&V!tmq)OjK_6%)@784fl{25X%rRh^` zD*mFL$Uy!*UPX3(z44Vnd8t*0H;e2GlfA2zn|ZNf9gURt*UDy}8nUKM{jV9>i77$j2({drQ3qiT zxXXeT5(-mT6>Z!5cC*Rsx!zzlNVG?a1a5|^$ImZKYLIh8>#+Z-_Zh_JDc_-~^dpRN zGrU@2-BJ74pzY3IZ(BfmT~$wel&~#l=HF=3{wG8EePPP(GJwL*20ls#VC7``gF(qH2j(yM=xCvYB z!aIRtpTNP_);3dA&(+`@l9M=OHR%C8VdhFjJqh(2VlIO@7tMeufN>E((S`4NN`dBU zQ4%nHg7JKnH(HaUU6^TKZeO?mAnT_`1Z+B@_FG%E5Ru*Zz}TnpEBa7p31@Mz4yLj5 zH|Xhi0;hi7g(Rq?_E(58;o=6K4s(m>Px5fAj7_k}8p?jDNi%58)Eg(Y48RKj+`RT! z5P&M=bPi5^j=<&~o8D`W(!MG1bkN7S3L(yXOBQy3Xjw%h>B+Ji6xgKER<0s2aj*)c z4;}bACq7NRFxBjniWXY>9+ zMJMxYAt;jLu5o;L4ezBV{`;T~FtR+;f}JicS;Pf`6%$uuMM;bwbP8+Y*m1ZX15|eHHk=anReQ0s$WGQpi;PuK~C*vHX^^tJq+`BbSF> zMRSklk8Xgl=~{e%Lby~aP33;xQg~y->0CfCiP0DTkdBF#{(v7cgokC-xZ$%V1vvZUgU{ z>Od~oXK3B~@>4LMO5Y*Y^(n05QK zDbasE_rGJM94eBu$0NKKcR(pZ>z7uw_xB`a#+eVxYYv~1y-Oa%2ZeV5rM;|^K+4apvDAg6jb7b_8ZqxLw za+XgN@wSm|jBRAm<{>`>Ze!GVKFKjX=>wuWBWkL&<+0uptBlsnk_0|noa)O4f8Tr6 z#6*gNL-Mn!y$L79_+`DeYYxtDI;rqXeQD&esmNyj+r8TqkEGOi80sIPIG%w+``M?b zA!kS1c_i770MsGQ+X`ty-Kfja}WniO=vm9&s9z z5Use#ozEBg_4Ax*u~~Uxr@7xz{1jVXMIczvYy)MV;gfB0M{^BMe9V^8pPk zUKOTJ%U#t!l;-k7tNnnc2vit7pu(trR29hcWei+_;fal&V@yx&z@vXKw_uIJ-?E7( zx^DR=<;j4oHBi=n<@Pq*JZQ_uw5X#yf3>3)CXCQJ^&M}(kulw2&c^J@ydNmC6@y_a z0~U?tJ>VF`n56{4_ISLXkFxsT;l;UpFpl^aPDiLtCNjWJz8HA=E}{>^J-2)v%8I`M zUN68BrzyAeH_t1%-b?F{%r#yNQupZ8hkoaF~yF5v_2+4~>Y^}Et+ zwjC;iRHIu+f(bwmdNj`+38+%O)XuOofpU|ePT)ilo22Ujl*$cRT6>5Vp4HWcGQp21QME>5!o+uRq?YofY6to=VujC$x3nt zcZW!)kT8HfS6zD#yuh`|H@(Y~0^GY==qI_YRS9A5x!~f*z0aLe$W_14xTE;HUq^2a zgcN_~doEyMFE{@xo5MIU_>c;&4@UR_F-h*7_OsN2x}fTf`0>^eg@*U!tq4Pi;l$Bc z9q2`OArE5}JzU>;zL#rDw4USK=<^@&C-g6jn4|nTDLq0nTj#vDn;Y56Z~_e2!erj{ zLL&A{#X`&*J=dMICUgxGg|vp8dn``*A`JQ8q+3=BBl7&#V$T=mhL0yY3Wc9rQ4fei zE59q=zajHQo~0|Mo-&3E9bHdGz@#h#BqH`XTvl8esL!fT1{d zSk9Rr$gu}{(plXA+dEU10Co>%2Zs*ecOdv$>NuxC(jhR1AY&@md4MIW<|*fG6%ck| zdG7qdm>BZcO{l!wSk9>DAql(q@K=eb3Ygg~&rK(mT0u1D{*c4?wH-h$m)E5B!TyeF zOq;bh2S6b7knOxlqDme}c+or6tiDuy(P2AfeH3 z2>NHh|c ziwR~PC*yZR`_##+vS{iL*9T5xRx$Z8tI%?c!W;|0gW;ybwK_+Jruy9_Ch&?$N*{X> z`!HPUQFl*3J8lz-751en7`P`|*3<3~Tw~%{^(I7}#Tcii)xF2}Rx&%J8h9Uwlkgym z=k621et|kK_Xqn}$}K61_!#hFoaYDMAu};9q3E@%$JH&HS>82BAL7%u9ad!27y+-CV^j-1a}Z?gWCzeEvsgX5|5J}1 zqKaDT-1Jib`=5<&e)*dj@yikOvUMYQ`|_pW2l_2$(Z$_qlQ@^C`(#>*GYk`lH%oD) z^?&==22i&O{-PcASYq)vmw~{{diYK$^Ij&1+#?@GpTAou!*eTZdN1i#M3z=|v;>;# z#9;QP!9^tBZ-51=uEt9aROmizl8c+vMPYiyNIOuW^F1ygG^Q3E?iJjA0irhkt3hAR z&tNudw!F*e{%3P8d8h0UVmz>ywb|(=o5g2aUxh4`)l+o&S@X{&7=BFDWU16g8Pb4m z@-VKRC(WkJtjR9xbaWQ%LDM=t^IJh|XFXoPo(LH1(0IXK(@KgqA{O7K`8$SGMM#)V zNpoR=7}bC9viL`_14gTff3u$5DF~v+hiG=k&lV>ClihSsJq=XoxyT|cFq&DL>`m%{ zP!>ZGV*^*;xE++_TG*GrV7PFipA+`+ow!V%%qG&MAu3 z>BAyg1HZps^Q__sfMdFbJ)>U!tGzR?n`B+)Bf&?7;v_9bMsF1i`En!-3UVDj>t>3UY0f{=F&U~%TnC=jrVi=%SM+zf_B8Iw}U*8BeML4Vap4gCdBsBwOli}hrHWR()4O@ zQU`PgTZV({f^fc@g{on0yj6Z%r)buj?wG*!`-j(&@v`DtO>U%TYG5ciXR%B3^6Q#I za8OD2u*Ha}x@#Fi)G&w=yuN++-J^Q)Bab`W;v;&fi1p@j6X3KSO-Y}R9LwKa*#v6M z1C5rUOVf-#hjzqAFe3+(L14xE*v+4pG78U=DQd?OVa~1hnAi|Rcc>_L#`6ZoAc;Sy zu-EB>(J;yc4s-Cy0kf~r#dJPva6vD)gsLTr@t)~%n;hHQtA@KM`HzesB0zg~=`Qw*#agY*6kcsi@(MXv%?_V52EY9! zFx^2~3`xDW3jGkJ#$_Od{Ur$W$u<@oTtmdWE0#mWtX_jkTqDbmb8{{&0H+Q>UrGS9 z-f4$#f1%dl&I`Ml1hNaDx3w)Kmd(J5*Kr%q$AeQRxNA-WFot=W#ysj^q#`D6<7D}s z2IyZ#2N;|3z=Rx(Q^M+zw=L9?KP20c*%t|0z$PoA{CC%BFJ;VL_PC4am7)`_mY{+Q zt`?aNn+)A!<~45dB2ZWWb|Cr!xi^VBZ+zvQG4CCKXw$DnRZv;I2G=TW`5@{cM#@te z!_)}+`(PdV+*F`Rf!3*B%e99?=kAYMc*EecX4&lIf}8TpyHDg!GO(={;pi#?8mS6= zYfChKd%mvI2XN%tlSJQoGZSIE$3pVwjt^#S-DtqHJLVA@>yNhzmzXjPdxsgww)7u_ zCOn_`u{rr8b_5Y?-!0GVneHh`5fY+Y{l;R&ug{FldjCe@{X6D9T~V&z zmlPihIa_ifm7mWBjIZe>I8+%dc+$PQl#81{xbEfV7LcJ{6YaY7?<$rGgA|P4N$k47 zlC!xDM%Ew$tq2%M9oQd<*aP4lNXvWa83I_cfa)qb?Q+$eVm_A{iI*ih|5JQI5hM5d zpyD-9Q2~Z{wlVwV2i!XLk_G%6oj;X#_EG7(_rEwAP>Ph|)BDGI{bEUqfAPF~Ov$1^ zyBW>H&!F<2V(GBA(EuY#*Py&*|yAb7~XVkU8 zY5xJ#$Cs9BV21M?gdza`!U@xGw^;fw(Todv{6@k6r#s$KLSlIAGaWoc0rcz{2y5#O zvZ9vF{blQB6Isda^1;3#QE!Ejeh+nVH1?+SniXU^)z-<}f&bM|p z!vqhdXw(-CXLnDr%|!}?w>9HOZYt(PHCwRz&u$ld*=;mV!c!?S(aN~e42rd(xY(D=)0Q9B_o*W1nX8(9k}{4Nk<^eZcfm|Ebp zsVtB1SauWtmRS6fa;sp&{9}YR*^@l=F|TihF2EfdPaU%rEn zG7XXf{$+pggDe~k5Dn}$asB=hJ%Ij{e@}nOwVB1kyz=d3&kJxrPVP%yPfG^NAgk2d z7;i&wS8g*pZm;*}0dZjhvEp{3%cIE-|adt1cuGvCf0@${ekd^3o#{+&42Vz1Eu;#u?~{wI$gKX(H$6T{JrR`+FC z@5AeONEjYzWvudxGZGz0mC$#+v*;h-?F4t>cW{rA>oDS@jG=Xp0MH$`8m*0V`c~ag zy2o33<8f5!cn>*X$HcWH*NPLfI?}&MM*^v}`GQYOD@2z~G5&@!931BJkq(9v1>r#u z43g&0LA*IfkZ~^RxjDLqiDMd--3?};-K@i^62Zqli)_DnAkK;lq1QX+WS$pgpt{9; z`+Js?FV*g-0&|-lVtE67i_AQq(V2M}8sTF?XA`jo5;ebn`1Uet_3=RLJyMvwn)J~- zDN_Fw0%5``N~~wHV0LSWS#Y9Q0hb^i_|R$W(qhwt&XCL9>_NvuFeohDZv9Cv{ty0= zaq;0jn_q2JxUYyK0qz1du-6e99H8@uFGn9ubVzdhLn2}N6SNQ>Ze-4Ci`@H6k)-De z!Sn`413?<5#8>idWrV~sC|2y3W*w3qPWGv@n9wnBh)Wr;<`vGDTpOHt2`Call6KxD z#;Il=JYi4pIS9J|TNhBRhMjsY8yes-BnPUBA>z9M1Cc|000SlLQ4)qCuANF8r`nd*VVwp}t_(L+dV5ii;1q)`! zdx|}_YdPdPlYfBc@_@VP`IsOH4?3S3K&M0kE{zDQe`FNMDu#qC=u&R#%_;V9Bmv9h z#e>5b55;bBBxR=Y5KF@h`11xKh%=J>pa^{Vmf4mreQnBtVj*!CQu`$mGCiQ4EvDw6 zGkOh9WrBa|vj*L@pUDG2=x`VHM&W|4&v)pN6WSX?h`atEHxdHSGdviL33}gNE2gA= znOhyc*xlZkVaS>VQ1}_*0XP?cA2up}(P<=(HLwtEb(X@3J0Te!F=c6|#70a4$iszEpd9mHUK zWo*LhPFobw_#@Vt0t}Hu;>*;`ey{gNjC_kdV2Gxk8gaUgpt~x7EK*cZ{m@Cpec==h zpZE~zqXx7zNPz(e^6f=Yj@qfBY$TAXpCa1;t)>SNTpAP_zdp*p6!Qz1Lic}oc1pUx z$n%BluHSTDD}Z#IzX-$9h1Frt#QjA3^4_}1xuk~8EJuO1PFyemJMI!?p?N&>I`5ch zq*iRP(ng5VWb*N)qg#S^mN*6#m;rah|7QlA+3gArBaEV7X|L9tV}jR-wObej^88nS z2LfI;OEP^!B$LubjxWTguk>#j$L>On&)+#ur+)MqMR7h|FGk+#K$jTCTK8x8ZY5pS zb)Y{#*NqA+xQX17HI+Mnn1|=>rT_v;>(IU9TWaV_Yi=0wMeO3KIJvK+yK+_#?8HA) z7(Xv}v;C-;4@Qaj&IvM*zHH^r?`uql1WEBHsR)x^ArkqJiR(|N(>wrtK5h&IS_~mQ zY4MZPkY;T>+4gMnN5=T7*vrj=Ngcx?@#vzrv&J7U6=>5uK8Z0eu%&sZwY-kzyX^LS z3OjRWx&`)`n0cf*RYuuzvoC4Sf88_9Yd_=O1ROo0wt7l8qBgxM6!{`v8f;1)+W<*N zI+iCpZpnE;35&||zCC0N5tGY+!x)J$#w^?b(WMqND9anM6>%@_px0q;Dn;%dfV_n6 z$u8|Ir;Ct++?j-ns38^xH5@odDx^X5ZY>u~45IMICJP3lu4-xjQYs4LIdo`S_z`|! z)_UpmNb(UmIPWMPJVazLChoWAmIczOPYil|7MT010`AuTgmnLmypft9!r(3*q%LaU)= z0#+Iz<`_=2pRe-$uHd#G(`u8CNiNXp>h*PXI9QC5{5__JHPoIg5fweJZdJ1fBhm30 zrb8{Y4fDO_@z2u_XHdJxE3$3xBbe@Aj{N>MbNp8=s3@#V?VHCNhrNbUOouf2+Dme( zgct}~`kf_@NwJMSzqhy70MZUlO-`Lsd?7xCc;A_KQt;tS6UPD40Td>GE7>euXgs$j zBir6H4pl4hW%K4+fgO`qVC*Y81Slt!o{fY9VW)XMQYNLRTlVi|uTQ`rS>iN$KX^Y- ze8Op*f$}526&MS2VfML?zaWdI3-RLtSo*sNgnxaD@@c1JI2CtxT1Q_1-;_|Ntaq>p zI0tjx%{d^mt2BSA;L={eod(>rKwavrShhF71yYIt88T1Yr19$pTP#$%nLTeHz2A`F_3q}IYmNDS$l`J&x zujk+UY;qd7*J9T*Zs!+*`!d3j#ZnSu8iYo-R8S?Fez_dvedj9FpoXBY@xg^&?N0nY zbjoftIdqZEt(80mbP;=SLo2gzYthbG2C)90c>u`Mf=}w%fsAodV`YFn{s<54)hn}; zbUPX7gq_oAT0h$Ttg-8rJTS1MTZX(X6=ScQ7eIKmi^bV^shq_(V3j^!6c^cEvpXrtgvc<%kR$M@kvdnOa{m;(}C5AzPS;7$>N zF2%`V(GlGw+!TH7N#5inpG+%I=aM2{jCz_{`VReT^Z;pF6e{cE_LxLF2W@Mj6%;@L zhJ88*w$QAu5zg-WdpqPaGK_Y;Gk*KUA%M14$GNY+>m&D-XX*9FYL6BGU0Ypjx%2J8 zzK9O@gdE%PC5I{eueoJDv)Mr!z0dnzOzH(>jGXnD;;4KpM%@cJ12c^`wyhYP2uk7b1U&_#ncMfakRC);VG~1g$ z6~k9AKi;)D)?|7J14g*CU8|F?2iVeXj5Jnm_`jqzOiwmqG5w^Kg;Zinav#u5r_&l+ zA19syqs)(#uh`EuI=74+_H;7zPaKQ+dd9uwCK5NZmSDD@czP38~_5 zN>?;(hou|WjgwK@ihHv2OmmkNMF75&!>vLPuYzfU*cslx+$I}w0fbaf)vWZ^>Lcf9 zFsApga!ClbJg;lvkFPAW{L8%`1fD$*d~qiezS5-SgWBeDHz`E-6-E$^SR8c}j=S+T z6O}VC6CM%_tIJ-8MGE1;tw4&zyNH>u7oa?XmGLh69E&1;EyaDOg>@ZiiLS96M{2w^ ztgFgAX>ymOaZ(t4dj%SnW~;q5SwkL!qOm>!p|YjyZJtNKjhSJ+=tBK41&gMkuVvKx zaMv~T3uKXM+X3jgHl!6lS$0@l-{eGZOwZ1ysi)YO{T|YO&@qMhnJe| z!>7ga1S+0)yj`IB?@rVf>_qdr0-eN)mEFgR8Te%)cDl$-@Wqe5neG1>OOzHrBi-~L zzeL~BnE;K*0;jhs3pF@=;VvqHxqk2`hq#9Lf9I1Pf4*&x?4Nk;P1>I`-T%KL-n{pU zd^DlMWOYK0Mr@})#Ex5k{s#$h_crxBx=*HssYRCjfnF^932=8pfViX%2bBI;T4dHw zciv+>wvE4eS21!0d}%&2v!G*8|Az7_1%H~rTNe1*{K50|IQ}hnJpG6dA^4jFF)Q=e zfmEBJ2>Q@DEa~(+JP?TV0diU?oHCG<`0ktPjZ-;rx@C1vNL)HWS>_mG#AfbVg2>15 zPBzl9Z+=K&Y!dR0ggpuu+y%ph4;bBLcM(|_4n}X^psy*Q39{k)TR^~&gb-I2&Grj9 z(W0|kqCIuL8PTN#0ebxJv9vlw#&s3Oai5ZzJBN_@rRY=rPzL)x@yBw^M8`6ynLhLe zYWLn-%lW^z!=}<=qEF z_dOcre`;j$RYF7?R!w59yAGdlFAmk+YzlTN-;JT6U{5$uMcdQ2s=~GMN0r)~60_m* zYIT9sTW0O1qTAaZC^CSz%*pLRhn=7AuZ7-gd`rB;_&{r3KwuzU&iI}BWbexi?uzQ} zi#-S!ba1Wn-CxP#m(O=P zbdV=td5iA?|30y`LZ?9fL#YdijvL=9>;RM!r9JS2_ z6zRB7E!#cIU8$TnKYhcDUlO|biG6EePI1xmiJFJ+M*DO(Cpu@M;gmI>xOd;aQyY84 z%3{+f$$)FKL}MU8n0bZ<{vtKD*~&?f$OlXh<-@MqVS^4%G zzb(!l5TItH8%zO5oUqki^ng0aH)gQd6xb*RJ)yZLA~}H#>oO5Qk>pc+45G_mcb)T)D3gEA-Y!(q^9F~v?u)cYAWDC*-?~w_^Vc{pp=-tnO=&3{_&?swz&Sq z-4~8-OmvZi!%=``TA=3$l~jDs$&}vmj20M+~w>4)uDq|Mbzv>>a_;|vdy;&pMW{R z$YcLC3Cd^8eIR8mG)ahOL|fz5&q(50^C(wz4+fERffNo4O#$s}_z#-**vw}J8W&v_ zC*coHIRerXt$SA#Hnrs5gJFcci8RD=p+azvD&|qOR5Wo6S*=8DzlhjMOU>Ya03F~Y z)I(gU!MH-$e|btM67G9a7>+! zIdzTg$|k6Va)mu@Lwx!>4xdTo`aHG$_P;Ey5x&zP$6hIgYvHkai_=Fw7{b}p>S%T- zBdFRgVA#gIM$OgmJT!Z)$@%GsZW(*$X_by)2*SoVRO~ud%&IjNV{tc=Iq6v-j0Xmo z8|Ohe0~>iBkC1U<|aTo&Zm>oi+p|NIPpa0FB+m1LxMc!FfLA>^7-MLDzh$F{_tp(;fY*+szq{%?* z0qs%CXbZ(56_JRGIZtgL1K)!+!Ctl)NrGqq<2Jc-`31<}%?G*YtQpJn_h8!!P#|Ljm30iELNQ>s2D#529TuqqpBVR}e6<7!tv$ zSB80hug=9}$aYIWW*k^T!|(DIHCoK^`J3quuCPoYh!8L=-iP3+n7U0AjjxlthX}MP z;$cLv6Oh@8!U9AQgN#VwpJb^NNWFy@;~gFv9!Wa?U-8~RWpY0^!IyqH*>4U34FTbs zWS%?4-Jg-bP1h%=-&S9nbor-V6NHiP8wW9hxUh@>7x4N8JQ=|B)FEnB@6t;8B!&_32J}mBb!7l%groqIj2B zk98L#`_6!}{pzt^okmNOW#pSWp3ie2Sr%}&`!yq1>mVw$NA#m28)0WJfqq1IFlF`L z8rz~BB~O|ML)`~E%(|+93!6JQaBAjmU2xe@1braj41rC7mIy-X31HMXy2w{-UZ=mo znT~1YSkXY;W9oAQNRt|4=g%+4qn{JwNb4Hs^q-?`{{Hm^`0XJzti0V!iCkfEKA?O0 zfGM;K>|eoIEN|YZ`={!hk9`6_YB`XIyl* zjd+%JTA2Lf>*9{bVepB5Vjn*S!`)Qt7Oh~Y2IsWo$BNMdh7t}d?bC$O>71IL?$w84 z>x_^w5lpeR{3CD4GQ#_(n&SzGmXYfmEP#cKfy*1s-#$A7br zZ?5>Z!nw+!ym}g@cF;QAQmpc4`L@gMCbi{vu1Iv?ibOn16c}U`0T^;BprAfPvs&y$ zRrZQ{IR~KYBb#%-3!0qNc1M=pDGd5@@t`B5WS`&zjG#8p%G-36$3A9r+5qEWI`@kU zhI40e2NN28wGJ9^13<2TJVWfe9l~M}Fw}ta7rzled-!w#;$`s8aF+C|C<2cKR2o=T zm~m(5?p_K~C5!_%GSoY{z7iDpPb3l-;>g8$9?1|aKS3>SwOXH@tIzuVy5r_`1FM0g8)^l*yV!Kqb7H8^qBV3>vK+q6kin zwwz0u_JBg#NmhT2CTzH7%eu^-d4py}dBjQivqv^Pf2g892?^kh}Q=0|e{{HuqnGw5kQt{j{+> zp8MXtqAtz@Xu<8CQq|r3bMmfE0tGTsEe^bKzyXh3SS0on84c+MC7AejF3P}|)zMPou7wlQEVtB^zNCBZ>k$&(&qk z5UfCH*X)!nCiA|Oder(0!(f!OKrak4x9%m->MgwN3* zD@{2>6?c>Dfq@vGRc}n52Qfh*kLdjpA1Q)1ptwgWy}66^J#=*Cex@*C;}qvvVhR8q z=lLFYhEMgtmj0Z#7JUfVHOyoujvd;(MjTGrjzWC&8a+%`POg!7=dl`b!#@~BLMT|$ zlRgP9Cazk7wV~wLl-e96P1>)_p^8R8dQ{w&UN0rq6ywQGTO_PtV?6CB=tSjPN{7Utk3-$OX} zD`)+xbg8PQ+Z)V?z1>pK=PyA0DhA>A&yT+pm=IgWxCnqA;>x%8^M-pDhk%8OO#nz} z-$(w8?bm{#msCr@(MNzMwq?=f%W**YT6T=wPsrx!Bt`yc#N2QVb3;wnpox~woOYOL6>2&)KV0AnlT z^c7PQ*b6%o?c?F5fKWj;#*qSF5NM`J`@8TDHVmYj-NEo&XGks;;!$Xrhr?)9S_ZQn zhE-c8Z7!zJqfmOlqINV3$<%RuuA1`-_#<5^Yy?nfQjw=1kNM-`AvxuJIJT`qV;^h9 zZvP4^7aZ-Fx|Vv4i3I(}~Z!Uwv-Emz062*Q08W-t>;aQAp{ zGpsovkEtBIf7_W@10T?Dh8f&>14G6#7s@RL4$W+%VPdRenq3lf?1r>}J7j>j9Q8lR zcq-?UW1wXHVm9zL*uLzSr&`54;Bm>2KWLd_q}$HJBgon+04O-q1*fm(qvZBJV^SDD zLvODG8}v^oe#QkjDS{jY$5*6l zsh`EhsrDpo3=+pNV>1sp9KcuRB|<6ZIS0}WLKsu#+;{-5UZLe#(*q0fy^mt~fGc7W zy~G9b8KMKR-5utv%_|vkq5raY`I_gzCaa{S#393pn%+bgt-tnYG0h&-9cw4?JC-tEr zWZ%orz{EL$ZDKHre#Dg_GF{$uE|xZ~hdxefIHGYV7{W0eyjs zoJQAMXUoBDP3r?dUTI}MKvug3v%{4i|CW~`>_HWWe89YLUw(|Da4+$?gd^sA_0imj zc;u=kTKQl^oBdgm-|%751p~v`R1AuH{BEzv%(MH&3iM06;Pm1M0@l6*2-VPwvUuC{Xmda*`ud0%EH(;prDn~Q*EjnsyT5w_QEqhJdhkbU!Blh`cr+f%!1#ktvky3DNu_;WuE?7Fk z)0Bb+wP8MJGle^wWbcw8a6La|SC?i^3&FxvzEo7oN;K1anJoKWx0VJox z>G|Q5JB*1b<})j|`R#&9ABu&LKDTY~CC|MU_~oA9daYs9EK02seXX_66Ca%Kkl8_t z`oMej6wm(9N8Sq~z#L^JMMW30K<6eRB8t0DmHo5plfz1B`g^qR@oq~2MZCkaKf$lr z+0x-TWDYWK@1N25nE27*c)yi1`4Tbmf^MlN;jLRCaIUZ+kFW0Aad4tj;)#19>}~n( zQ7Iu$L!XC4h|M=gQ7f8S3WW%lpU~&FVVZ=ka-p(L?Z2@;&o}&GXqk1HYIm=bKQB0k z>Xf=ueLD&s(uz?@by9ruYOJX1b4OX%XR(JSf#^SkX0XrQp<<{0u0##x;*e$Qwj{rS zNx|aIF3)VZ_93!xwk?) z8nmmRj<5bXY{PGy@Po8<8{^jEw~(#vH{VrW00THtp8i7<$2b&F z5hO1^=s+4_nJC9~7xbPJ>=3B-l0oE3d!-L+mwi`)c}OFbQi93t)vG?~F1^F#j#W{P zxWRN-SACK&x26{~SnB@m$@^ysmSrBFB)3xgwHKm=7&~0oxJ#t3l0DL)OYd&^s+UwK zdwI_}`=k$x*BQ&W#_gt7J&Bq;IRE5ZRaW;^Q}=XKPXi=%!kL;`uR7r(Ax7W88KRJDC%?;D1ZlzE* zJELS0M=rc!@p%z|o@N!a*}=J@Ka*3j)=a&BO8}3%<($P=Sjt#=7y{WqT_Z<4>G($5 zYoTpY{XvLz#T|Mzr?OtKAse4@Sx3F3SZ_Cxc^H4 zir{>z$a;v(Xy#f$4mQuul#so{Pehpncs)|vswCWCbE(Xkn1_KsUDVgUB zepFtPdTUKg6HUtj?8{W8wfcyS(=@r9C8{kmP+&`KeC5CTg&#`3)41gK7Hxp1F&bio zz|}SjD!?52LWyxb*VI9he_CxBx>s|1-FfS~R8ud+njag17=JCU^=#tp+=TRYoDeH~ zB%~MHU($yZ1NL*+^KEONQvPM>lJJuX>pkV+UY>-fZV$#&;H2K^A+pq}lx%pzu9e=@ zsey^X&Ct>{m#Nij&DOfj7pq3*I;%#sKhnhJr|aLtvr*>vWJB8Mrsa6HTE>tNa^J1X z54ZjUlTao17t1RR!P@?na44pUa1@^p=vB`&V17TAj%JOL9&jW?lr=uIVZz<6Vid{~ zkfPk_cNb@Jg9)vb_wwjubEl;UNbYL>xZP^w{(;kQuDV{3nidlB?ANm1s*(6Z6DBfN zw=s-|CW~2jr?ryIOTdxIt*=EYIydq8eWh?Z<)wGX6bsfhJ?c$1NS2<94FO-GX1EBN zw=yG^PD*py`gx+pa~kL)H74A|?#djLI%VDA8??Ep;p&nwCU-{%sdOpSb59(H(k?fL z7fs$$>HFGJ>CaQR91b$?@q~#FOH=0XUJ|Ejg#FS@Ohu;&miwH8I`AGH_7zLE1@n2C z#{osTtQ`$f;msvZw4z7jhWzS7wCBysLV-#t2qamLzpNY4fW&x6e8DV`8W3ZQtdqZz z%#je%IfI&X#UQi<$-Cc}U#*cXq(ilyy7gi19b}h?vHg~!MJvHHve;^U(cu;Bw#xmj zlRX06jh2Y~DQ-nF^XSl3dxG48 zSWKKDI51-;oZg44To{N5=BTBiT@t=bH}k!L8zBURo46&2zq7H>SCQuEiGgpI=CR&T z5;_vBD9lPG^AIY0N;;q7oQkn7G(5yF3>;`r1Lo$nS04)Lw&0bX%0CrcVf+E}?eT_D ztBj0?q~^o9jK{|BMsECwF_lM;u-%B-En$Jt?4coY32=AGB#ys7a-GnqZ^V zOy_p?0Uz%B6^o^J1!KiCVX7mCJ@NNRPK${rSTLq0tPACNk^4zs^K@AbzGvVG-sYZl z|3_9LMMK!{#0}@NG%y=ZdP+2bn`bve4DWW$Siys5O!cwS!RRZNXoUjVcwqySrmGLH z=E%usE+oEA`Iqc`^aHPSE$& z!W*}O)x;5aMNVR!3rTf})WUUVOPj9x51?(LhMCbPoC7I>4qCTcrYfOfQf&U^yVR9-4$n@a>+=%qbra-?q&=bmj&jhH71eCDD~ieDDUoDcF3otk(L!x3K)P z2T(RVBTCT{e66QyrJ+hWHM+VgRqv%gQOn{a8$5O(fHUG=JWi6l@#DcWdg{W2YZcim zoznbSS9jRQDn2#B&*_~cvzCJcN7|5=aJtx8ToP%rvk2LV=9=D`RVpfv>pM##)lzEe z5%`qBQ!6FJV_bxW!$GJ@M9@%@wYFM@r|SoD4zl_Q1;3 zgf0l9MEU$&@?7sg^4tCZ#+xKmbBr5OD{Lpbl?@ga+pov$Z=VA)#GUP4`3MyULSVCB>c5?fqTM|DLHKS~4vBaS0hzb5A%R!xw zKBJk(mZa>&AI^rpyjaW}EO``y(Iy+?oW7X}wX}18F;2Ck7TVf zzYlx6k1+ZKC-!3y$S-S61Fq3WN*O_{_&3JmHQ-z4fj9dYcC9Av8Ime&${XMC$;R4O)a)=d)sy z{WO`wIF3ycy3%TEHtx2)qMPazLaxiDCR}Ju)$0Y7)D6?k<22pvMqFX+2b?72B>L&3Z{F`a=&Mnjpha<{QhpUdqXB-AuN@(Ps}mOidj;uy!XqbSQi(P__U=+dnSjg+R?mKr{rU!yE)jfGG`Ua z87$Z;paW9Nz92iab^wD2G`M^5PLy z11EO>O{I%_+hKg=D<-pw&s`*LkoB{5TCR4N()sw#Qg~Mf+HuA0ICU;eO0An7Wjvxm zCyzW|75S{aENJswx00Bs_DQ{i&hj{-drBZas3nSe#Iv2+1Q~js5$fQ{kv^p{cd`7$ z8NG3}*LC82s6Om@xuZ@%(AKds{QZgYDf)3>KZTuKbb#VFiB-X`hvXAB%gHpgWe=cT zqM4U5>m##-u(g(O+1sz!TpSOh2ow&1!POOVl2GMO(0o%A|5Qholz1!r)^A&pJ(*ywSAv%F|( zfco*`%P{u-H%xjR7N60UySH++vV2=l$n$x6tI?ja7cQd!e1Mu{xaZ>p-5TlkZ@Uyy+IABe%KY)Ig4+HcdrTwCG6#a|$Rhu%@sb>~DLabIi7tw5Z?_5bG zS&7$v?c2HW91b*iuwN#dZT<7HHyTuTYzf-#>>(phT$twcG?rAH8a@xEe`m@+H_k$b z%jESw)1iKp&krs^JGwP!38*?#8T+)^^xs94k=wp2B{FAH==R-E?5LzJ(V|o>4b{j~ zUK(xzDs0A<*a)JPVo5c+%=;TTp4Ejv&A|~{wX-6!T`Q=zcDn+l>l-Gzd;NBaJ^GVB z$fSbaT#~Omx?C%_Lz#qXYpf1$zc|>;cI}rw+tAw1XXPJ{c0Ln|iSSUr{-Dv`w7==+;t9gb%Zs+N!TyftCaHqx;g;tNz3Fg}ST5 z$hVhoFC&EPTkVXfF3Vf{ZhPyCh;Aw8KY^bcz4~_P42x1$YW-2xVN=7^88^6BLVmxd zPF+C>kwKf-P`+T(jBiuc9=*Q{D?~Cf)DvvEmr@cRbp>0 zvsRsk`z5OD1<+RF>418&h2Ml~tzd}}mWLap0UJ1z+?Bo9$|2bGBE=ZX?jmXKXdBQG z6E|I-5?SM2A;!3dafMO@PhKgUSEnahuAVbHlA=L1JCLu=%;8rx2e(%Pw>knY<+I~4 z{I@H1H!HsseXo?l9I+fhIgV7@7aCTlZeYh&@*;sUS{E2>g!@v?6Rx-U-{|0Rh!33S z3-DfZE^A%wCU&Krh+gY42d)~G-pCBI!^V6ooq`7JdaKhKriBLX9&caDxN!pHv-vrO z_BnV);~`BaQ(_I%(&9stkBVU*H&7lx&!xf~jdkv(6vuZ5ka_@(GKC;zm8}R59g{d- ztsc8Rmwt1EtCJeFC}=YfO+LnAfv%iY`stK~JWU6qb9gxpWK%S}3~dNA=mWY8>yOY< z2l`jtu_HI+VYXBBk|zu?qbk8@zRIX9aw-uMC1KWNzc|C}IMPrt?p-c{B;dZ6dgt*5 z$)1ewbBDFE&|&o7)K=fDKD%6&o<60lFv zQ3O%wM;nz_E6UNouP<4Y&1^I?e3~=`M^(;mzx~wen%IIq7^nO};sUE6s-wM+q0U@dbqgw2#YVze>0QEU{Z_bxk32Cr_5^z z+(;$AK|};&c%nY%)KalNfu-4wB3|m^?g}lY>2Bd=^u5US+}C9UAPv>bp^SiEf|Ddl z5x)BicKN0zx!G`$L~nEsZl(Azw1)CGQrPvMklCew&>q@b4`#tLee$JlhxBL%HitzRD_d0KHtiI$ADH|*l|2T2h_XC69^?&Z?xVHFGcqeKJTzsrd=&jEI z7qMz!expI)fy-*vdx=x*bmkZIFDAMyPzH9>!uBz@bu?k<=ASW6pYA(C*-i=Pn|Q<6 z@4GG=ZjmpSn-Vso>xc*XKrXsS^@zna*ghs$?e$FVgI=(=D~1G-F%hI06v5*Z#B3&wlgg)!Lu~HsfRjT0^tu&PSNdwSY`AMlI4{&| zXQs$)RltR9=$v4E5Xu-L8#CH!hLKLG5kjmT059P)WwH9qVdk;IK8nN zr>H3R@)UEi4vl)eHQm4EUv03|CKCKii;FmE=;Bu=9p-1MsUH{l_*00r$a0%BpZZu2 zD~H|v#diAD)Va6f>cf(_+jx-;k&+$rfZR_<;x_v()Tv^z5zByWMKT~=X_u#drN@38 z@m}*?gol&exy2vZ_Vj+~5iDb+Y&b=yu^hbjKxO~jV+$$1*un*QlVNAB*!j!~(w3x6 zooZdHJ9h#cg?@QRGw2h%cQq0TbiWMN3fjmP8FK@TY?6?hEsL{(gpzC_1_&nHyd={@ zXYIt3wSt(p;Hw*z5a#aC7*j&QWR@E*XknEhL9<4Xt5tl23%6H9ox5nYk3yAyyE&9B z#AjvV>pwM=qIg-$m0Q=n$js9hnwU#|<)vxr{-lVYVpFYH@WT+v`+=7B&?Gjc^y*CY zMjUsa<|0Ha)L-ACSDKyVr5^S?nDTs#_2_^t+sxcQ)>w!Mb5rNOaCCqgWDS%Mdc zvi&wN>pnL5Ez-1lRb*97szI&m`e<4+X}s}KgT%$};^N=;FbGj?DWpb@U*n^YC^tt% zYyipEM59-3A{z8PH3J$0xmwpf40P55Nz_ettZ%v~=P&kgK3GVhNsHu|3;7XjYe@=4 zlaG$HFTb2}eJhPCG+E);8>C3%t=opT(ZkPR&?gYNYD&nF&M3a3S8X)AcJs7-;`O=l zlg<+~C>DaTA_n!{8!=RQ^3|{R(vu`EDz;l;60G7)Q-KwtC6`GEA{^1fUgGrv3@Am| zi08O43*N&nLemjA>ghvNo931C_UFa){*O7}r~Sj(XtpaMm%C>{MLDK#tp_pSY^MV7 zuk~;20Gvf~EplKou0Gqaqb1|%P+YLz95FTAOu@nfQ2pQ1-JT89-ZZZeF3@*r@cuw9 zZ4F-!b}aR%p?AG`Y&#G4RbBmXB#e*#FbBx#-MLAut`qYdVXW6b4?^wcA%V3=5*rYx z#7K*;@yf*Pvhj@8)_%TMWyIY4duNAFcoW{#OKPtf9P?`QLun*#w>_&h8m~UWe#Lc$ zYR&R~UM{h-*s3qe^0j9=SYjQEwo_)>H)MXIG;qKP%bhm-i5TS&ugQBDl@?*=?68=R zguV0BN|kp3x^QjD%Dl5B!W`8t{%)23N7z?Db-87I zDJB4ISy)SN8U%WLJ~%Y@%-irb+t&KVwNk&{ zq;38<$+3;O*%@VpK#jxDfan;z>3iaL!>7O&M};2*<48SvP3ucU&i}k zqQG9MFWQW-Ah%R#)OUi)##*h|h!frQjagf~I6Y2-A?f3T+3#bc!g9pr1Ed#n!H`gW z@qls5%<{a=jS=h2L9MSvm(yNfu%IK`H_44`dtJ)}W2M_7 zQ3urvOM``>_mX3^-hQMY&;)S6RRJ{ZXAG^zr%fKe4pWC13SL;g)1krA@i|(nNabg8 z;PsF=>XL}+e^}wDhWaFIlA4Qeop_pOPsWk}V=h4@IhvoTL8*6{4R<8*@r*{>h#oa@ z)|{u5)(Vym(Xwyg2Oiv4{7L*8B?V0bwRFK|3heiG={{(y6-#g?Ouy5qB*rkYzRkWA zZWRr6F%hs7v+>ubZybMfy0CE>S2p1$FdZp8xNWYx64a69R1Wu>CQ4rU-c&YPXhKVK zxGynL6~J<=P@umwp`&iA`kFEtR$cSg2SC|GpZxRG1t4w}_igYI)zORdH8u zh1$_zq}i%cZ+!^5@#+}+`h^d;?$f(UZ$+MPsDjKONqO$yD(2^k>@;2kn%wx4P5a4c zx!EJy?1@-93}TbTi}FxXhqOWiHolpN!nB%c>?6*QKnR!U7jkYHh$M=-}7hHVq zlY#Sq&?B%2KHv{>^0 zej4 zxJie1G%H*uX8xXPk$We*wRA8Yw2ISh>ldf<@(@2$m`sL73GYiqx#o1SgPyR@!kS;z z^*u>CMRvzrJP3zwPKF(QwcD8eI`!?=1WsSN!o@+I#|lGMdDTV&!(=etbGrdv9i>{X zPF&g$7bsXjQ6UPJKZ^PrUQ|!w>30YplQ8)dE(8d4=zf@`X5#L2=|XPxMF>Aup_bF{ zmX)ZBy-!ahPbs>Lyjo#pRj7!|o$>x7LI3d&E?gvSx?Ptl6VcH0lBf}S@`iH6GeNV% z8v{W?asVgU>%gM?cg1;@3%YDzG)Vtx?cNp!+KHcb*Oj} z;;a@1N8TM=t;LnH1X)d2%oSGdZOp}jfq;E9#rT&Hi+v6|d=?1_H!|cSQC}px!PX+q zg_igIPL-;U)o*d1tS@iRpY}jUa>McN1RbNn)s|t3J)@VttDl#04Sg^1F&*TAy)NLB z`Wz{|%hI)^|E{~hPj|yZp8f2Lx(R=#HLOboro_F`g6%3eQ_xBqWFv5(1aHq4XG9$Z^=w{p6;cZuw>q9V_bTRx0gE%ArRsnQ!r$enob} zq|VR`YAjHS{(K2;usH5#Zv$s9Y#c1=Op47w{Or9q=Zv(n^b-JSoT4AM$+1EUNK+(|% z><#ViR9t)$CMMAfT$+)qie~V*FpmxiUV&|k1mCy(7XmKPi3;Opr`ZG)P%i&?I+3{2 z!zFM3ygX{>s+9Jd!%u{IZVPGoF#CtNxnS#jYbZ^;jSXE}_P<~foWlhgIFP!?f#jYP zbAv5Evq_Y!Zb9u7qwCDvA`YtUE~!KJY~B^zSGrC;l2ciEHEk+@vEFFaxx?tLZP<)D z@9KEznCxw=kk^)~hx|}D;VUAO#VQcfOzHF6vzu|U=?&U9BWpFV$BaC#|B6TXe59wz zZru^fW%&KmLrqKM^gwQxRoa{ipnJhyI`m* zT87(;--l^w8nFw_5*(z~T}Iv^KG+Q33nb+OP#>Qa2k=`p)X0CFR`erjWN!>vWF$7! z9*@>Oq{f)@Mk@%^vn!uMpLqxWK;#){lCYT^fuQzyS67n53_$XY8+<2qcMWc)^Aq(v zkl?yj3Z^xxp>NFmIbQqi`Y-G}m$bP)9wO{`Fs+E-D9aTQEYxvYue~YzpK)d_ny;{y zukaX{sCk8b-RmfdJ|&>j@mO!33iCcPjs~-J;1(LTPFjX$;UgJ-Xn;4O3OnynypQyD zG8wD{LNMdS-7v1xr%X7^(m!Sy)uL$PzBOuH1ZDElXqlaS*;Ocp{_T0E6SszeE~}wL z0eN}f?>y{-V9e7Ej#I+JE_r%E$JsB|S%F*Eii5m1<0qQoLM#fk+wuC zZMW205qD=7FfGs2#X(3D!T#pjsrTgj!>ROmTWsjoy6*!1`$jOgOn=gb|FO3Sc2W{L z-LR6^Mg;#e4lEtW0DFp(1iKU7-VWOjaeNlte^~z4 zYPb6uO#2ZjniniCDS03Cdf`nD=kvo#MS3o<2XGT&x zvhMH|68e9ggg-u<#Ktoe^#zMnSb-1sN!~1hfsrr?L`iEa*PmUhO@wI>IL zduvk;b+^?icr733oR4fa;U(K_6>IGF{ zj_yp7`zkZG$$llA;{fpWDh&?+W%Sp8|HniAxdJwV+9QJXtpQ@4wFAj|$PcYT3 z8^RE!4C|j?7*^K3`}flCk9U8?^|nve7V(%mnVyTGvQFD#&X6A#QK8640Et>3*O9JS z;*)9F{O@aksmau#`JrJrQ#Hd>dlW1{I}mme=IiV?VchgZk+o%lQ=*iEb{FJ{S$A@iuBY z{!bb&-NHdGKP8rMYB~H}OJ|%w9pE`HH1TrbA!-CJqYtae!eE|0wNp1Gx*-0%$%fIt zs%=P~f9(B09zkrK$^eu>YTsu*B&r03S2!ha%jWZ7$;TzezO%ULm(G}Ae8O$8L?==y%wlDm+LjPFVKSkCM56qGT)hE@wgS zlOs;Map9a%KCx_#n_?X`&Vza^_A5lBq8mZ^*3IgMt78==o%($|3Vm+L-x374ktP8T z2#y6D?=9Y@|M_bmuoSi-r~A%w;YrBq$Xz^<)Vhb1M9&j2Xq@g7hZ9muO1w&|vs&sx zGx3I<=cGOP`O(+#wHdlo{#OrG4`!7P0&T(se#Y7_W@fzTQxsxfEdyh8tAQgn(_@@N z@WLfcB|aVhsZcOX;4QVfhE`bJboK)CUgP-A|Aw8)y&yzN((1 zMbcW?W7`Wa;Ll%H53DRLU(Hms@AH2)Zh9T)&TJp`0?miDXunXFrCu2&P3B%(b`zQ{ zn4gwDV3w$%41(RT(7!zDsC3vgp3}4=mPy0J;0|_Ou13L?&fsAFC%{{CywXv1)lhcc z-ETS@tQ(elTxeIxv??x7_XYaT8o4^xZOOEE%@A0C_E8WFciGk(;@xcl!Cf^xWt z%XfGnmkZDfDT?}ZiIT|n0|;t3E?_|13)@q-Y`mqP^K1JIQIx#os68tDux|2?Gk(Pi zds_I{HeAJ6YckP~%i~rHL%&GMWsSqpa?zwQaH+|C%iy4^L4Xx%fO3m6T`8G16q4bG zkHzqxJ#XrGmhij@&7=%w<#9e)>{s;7rSsfU=7*_JL385tLHA3;ha*V-$`B#|4ss`itSc<1#}lWz!QMs)l8ogvyi;0Yg?}iIqJDByc?~d2 zVU7*tMEA16Fg8MESf(>7-)lpumZtonB>{Fi{({~T%nu~*vKd1HdM zW96UxJ|uhYXb!sT(-?P*MrWymtF65%Q#5u%98L?h4n{jBRA#1-wiK&ZRJrV0H{lDVFYC~I2 z0JY;ihdDKgRL_GgEErB`{C#%-s+tDKNLs|{gm~49pFrwQyeVKW8^FH;FozU4!bEE; z?SC)c{&?QrUr;#H0_5N-AuYf1)}ZA3HARLWjd)_hJiNRn?hjfnkm&%Qas3inE<~j; zi>H2nFIWGXq|bsC|5o?iplLOukfA4ox@0vF%dPWriy$O|S~F@z znF2?ToX=WAc~TYu0{XGH?DR^d|NozJn(b)6%Uh4QobWXa`ZBI$OC{$rl5il=KmHUwIKbY56#%QP((h} zsCIi&iWp}9Vn_K~+M*l>pM1Ham6jpzOYOXxgT)(pg(I}LemJRWz}YnV9B=#CA|dtZ zK(;0f#iqxt1Po0wQxv%iUWbm-X!scaXJJ)BrV3)d@X=bMgW99#pN9~nCklVEwwmuq z789d(Xd-Bii`sWT*`GK2-{-<93Q6n8HOL&fxv$Ef%!J&+Ro8PhAr|U?WtQZ6eG4Y; zGbKqT2wL=}T3eam_eqe;Q8uZ_{O*ya`}Y6)kbOibgB0ARet`39neH>JKnZQCEi~_g zK^xdg(5+BE6{GJZFghaozg{A%$I*JL!`uLvBw#IK{NcQrxqp9w2*^0J!sU|GTgydV zouz6m57l)^{0l4!Cy|lL9r3p~MzsLflz>8x_%i;Lg8oeXdh$w`I#ai5_Z1v=j8KY@ zF#_d~kQ+Ze%qkRV+w50W@`qd|(D|{$0WE-_Rc&MD;s2;0qmWY!Gm@0;-%6w)CEVU( zR$gJXB?cKn23qdXFWc(v zf!OQZ_z|L41`&>37#KCfpB-^rALUOjlCoh&Fe>5BCwqJSnyB@5@J6W=I~?w6Xz;&S z^gsw;*#8+8A2cBKKQgs%x-BPddZ6?Aj}=@3SBCJ&Gh&{&=pIQUshcS+4*B9Wsg0ZGfmU zm^R5>KN~>n#&mcs%pyw zSQ^$_>FB(6ijw_dXJpDrc_2q3bXSi75EY{#r1&7Bz3^KFicMKftcV)rQtKf0xa)X3 z&-;>bOE4A`UQdXp3dIVBk^No;SVi*{4dmqi8w~_4Sokaf&L`~@T*}Ue8kp4V+I4$5 zubhzI1m(E2!u$yiEfj{msWO@@G>u+LV#iM9J0n&pby3f_<_0OdMfEAvI*#qYrp1P( zQ+qTz_&zf;)8hy7)LOSfo~^<~d=23Ya?_K7f;ZuND->N=3Cv%~4fJelRkD7P|5@P` z4@gZI^?Z<2HrMEE?V&M7$%owo4jhijaJd`IujJ%He)A0(_}-v08umfOcrjdLW+vd& zS=1#;g^ojxAEp#>L}>K`c3vam2=O}TOO~o%fmw0{?UD(RHwnppIJUFTT&sz?Bc|zhXeFF})xH6HB>?mVPT2HqgK}msT+3MO*?G}&H-8JWWL*XXmNXN@ zQ4<*96By`(&f3ut$IaN0D0J&Yl6qp8e#A2LkAjmKBdd(CaS_XFw{n8n{+qxSuXHdu z!GP}5ALPE-{sdQlK*?a_UCMfPDB=3?pwVQA6`0d<_t90Z9TL%)zt3Nl+FFWEHwy0dc+vB%T>O|4uI}rdGO>wE+7z<_v`D zYBG*(CNE%Rt&0{)=jEtcyR!#7F3~ew3Mhkrn3a&I{GbhH%-|2eE*w%n>U>&5q{u(z zrm!YP1M%p)a~@kg(o=B5Nzfg2w4AMsmlcHbvmD9$w1!I@Uh6??u&k!i zYPekw{~cRCKoPBi1InKbbUIhMjP)i|e=FS>^;~RK;h|H%kz?BW6`@~vP=Xh(=xI>K z$Q4v*HH*v5#HOhMT?&*#voRA{&0c4lqQ0n96kj(eLre?kM=hk;p%KsWP~>90v-|zoO)%H zm%+>skAFj%Hgwc1+4DnQRnbmA+;rGGU4?HU0x@%VfVZ_Riq*I}eY2ivCM`yf65?Ru z3PUf{25H>nk!Or9a*bwg+`4>@^Rt_+>$&g6Joo2f0uY2e#dlTH{M@%2Ox6b zLg`}avBSmOPi0r7yzTkz8OFb@R6nYbiM(y&bDBA$q`6=jTVThvf_-mJg*bHcVoV25 z9R0lIkif>JU=%OEHL9#1VvI1xq^+DSYG(`r56WIJg%xySxB7!FMKa?^?jSfFsi2@R zJpSE>yhMjOaw`CbqSt~t(s@f@K0%d@ZoRMlMrC&64)MIdc_cs6VZ^NhDUUTP%ISB( zL98J{s$@_yytMILApQrkcoFIf)@JIbunozCHUo9yMB-xP((<*Ix**OQQD)D62H@6w z{?6a(;{FGdTE8B7Ztik4+|p;~7@F5gT&ByS61&&p`GPY%&MI!blvRq;9tc%Dpi@2f zdcwSjX}you%X_2yVW(WV60Ux7vTN6{F{f2+JzQwl@af@1w}`omp6B*=PJ_BQ-1x0Py z!60tyrMIZwBHLP8WFF%P@RBXNk_hDZ297Tgz6Sp0tR1`Gacbtjlm&PiOKPWY8n__z zu2Y!pC;s-xP)I1^HfDPSul9T^h{%>Ny7K@XLyj#SnMj7R_=Ma9pf3$nm znkN2k&?cA^P28W-pgj)8y1Ft@AT}LQx%$~E!{Ppc6(kLFu1hB$$QGG!WvjWl7#zA0 z%8IsiqQJP-)IFQxGUWKnCSi7YtgNwkh24%u2j0N;~PdR1dg6Yk&x#XyRf_MI{{`#0!w;f-bgA+t6#L-dZc02aTN~f06=*Pr{ z7o&TKnv!UASsS!qn`0r3S98^Fzs9Qp`QGCw&ACeq)vEW?^-EfLMr5*T1*r|0fULSa+taI0!nJpQ7r)+9Rsz)2RHOgA$U3e9rZ~9KcSz4dC}WbE0)V6;-@%V ze>1}1+1SB);LjnN@K)ESB@K-A(xf(rm+!vtn+X%E388@%nFG!J;xOreA9ANdA2-jj6ZZb2kJVQk8`aTZkBFQg8WwMb=R3qR#sH0waFt*%stH{k zID(^0swik7u2E4~F!K)VDymiCwM{Fg-`+;iC<~+>1I6qQMH^|~4Vx~)i#qXWxsfkR z4|i*Jt6gw&mV7^p&c42D%_tl2C(|J;Y{hI}4Mk=U2S@KC`a_6ri8d>gz8HA6%DF&C z^Np&&HvJ1F`QazZAchhix0JQk&T;zpA=M(TSE9+3`hy;Y+ND`^GTyF+g2oNE3X%LaxB9Xgq^Ni? zXm1VOhD9p#K4b&JT=YV2$Gf1l$lJ!2mE%x(!ut5~Sz{Z8(=ONoG+O7j`Bb7XiSQ<^ zf(m>PZ#5<&!yI>hPl(CK-DDWmP6fI_QtQqsXr_M_q`wG!Sc#6KT?Z{oO7FNYx1CfF zmrn;&K*c#jKkdkMKl=%_(^5{^jnJ1(Ys`t@6@#ED0sE^?N&JYrSzOa~@FWhz=7?+d zKU`)^*G($^SfHv-!DRpB6DRRT@bJJHQcdSKKM?jh+FK56Gi*NSNs$hK&tFEqI|?wM z0fBaX469{)SUMKFAol)PuTn)y&eG;`&IvB&4KOPdU6mMVsLrV?aU(9@F>+xBbFJGq z-a+ENX%3ET5QuL9=ZlmQq;p0CS)qTItbPtT-mtIUXk#tPi4q?b709^fTWpWU<)XiM z7oG*tDZmHg$=-#cj*_=mMDgIBdjua|&3bMwTiZEF(`g2Lxe0eC6ye6D14*<(5WW+sgD)Z|HdAn;DVJKNrKB4bYfb(dYl<>-N4(> z?98!%gK)#*7PY;h45AhA=6W;|p;B@XzP2r&r~{h2|KV&#^W&M{9H0{|W4#ysYrI{hJD57>&o=

auWNtM|{f1VG z*dn-a?|C@d5R|KQHNAnLzOe=80+E&cCw>+=L}@Q334&>DShDT?8J)b++^;ox04xrP z(CNb5=2|13nKtV@yi7Ub)^u@!NIMvTKRf_#*uVDjV(UD%wt8O|C0dU#ugh6$z&lso z;>4Rj*Lj`Vp8Rz8TIz&gZ4zammYZ$Kmka`cuuFxE7P`qLA620j7~biUl`Cm^uO0(a z(5c`BRjjpfr#^*Yf<#~ML_U@k29ydM`vhQ65-`KNX756@g60oG6#X_uO_eI2ciTX= zA&61WdP0?>S|DEy@8j>);?utMA2KP7XPeS1JL@f``moPwbPp9*@AC1jhIG@*8o3>W$QXj=&-P(0xvt$ahU2 z4mba^&q9+4XG>1%w(=QJvp2L5<2is5GWT<-u?y(LyKCK z<*2Tr$@AGSzo_rAKSGDm9CToIOS0#D8F*`9_@Y2H>*4($6FtiA{K_j@-j%RNbAvn){{` z0Z?pn(fEOiD-TW_ZFNn)hBMfVv?fAv6gYy_&&;$j;-fPF^Z{_hUMytYz?NTpJBv-q45&u%Z_!Qh{JcX2;?ZnHt1 z`;<`Bd!aMmda-8A<7>K(@!y*L;tW9K+$wTLB!RE}|a$ z3v_SG#Kdl7du+a3`yMP7(4}9)YI}P0-Mw+*=W)r}ZcTo$F}8(=z|403;>sy@d&W3n z*x_`oKBm6M)}82|LqB;;t0|&u_p*LERyphMjgIX$yi)(ok-q*txYqBX-Lbmi;kOHw z478`s0bK!e9Km*qGdU9^Z6|7*^9?&)`tJ1ECxv_kJeeH-IA$b)-cuN|SLBZF0h)D$ z+yI`}E+ECA_jXzvJhCkhd%IQ@y+1&og~h}AhUIxRGfI?e6H9B$P+_9;Dx~<>%`tn`DOXx@p>8WVtI-Nf~N+wT5Uqq}R zFGK5{JY~D^W`s%!nv#TF9i2`dpl&~Ze6dw+;kZ{&tR^Ml_+a~Db!S+>DCy&SOo3pAgK?l@2ZN3<9P9$ ztZzvFE^s=U*;J-s={BPzM?e<~LbgZmS)H$KAELv@#2_4+DBr=IKBy z^IY863r-YuO5r?J7sGB(s=VCs*|fDml*)g2aE0_OcJlnh!EwWmHy(H1 zFZFvfZTV{_5kVGgZhK-4y{DhqI?7Y+V!5(kwBmLTVW%F7M?qqjxjwR!Zh~FaoLOBFih^4Vuw~B_a$- z`Qh*UOrEcwj=M6ph|3*g=WNRE7la5igBRX|3cQ~o#7%$^xJw(;>_;u^Rb=3yYJ#%0{&4)IF}Myk7SO& zu>Rmkb2apk_S#EY6L+8i3s*s%E@%c`NOgoc7W=>#%lxItdZ$@Wg-pbC6X{Ysn>ZjZ zIZEm7h0REXP?D4(CWC^UJMpo6O(XR7X|&1EGic5DEg|jCh|oheedi3@!=k%{JKAV`_lk?i@O@DZc2=n9&{DtZqkmefV^6fH z;uVf`CD_fuSX`uAqm`$m^pO$XsOo=6k9=FH#KV)RfNZXZ`5ilCze3T|N}-XKl}iw<~M3p!E-#!&k zrm3wC2tap02`+g!DO<@G<;zFu)1-pWc9Yx5@~QG5Gr1n~4}$V^tMFi}h*h5f&LWQa zIVdOx%|OrJPf*j00?LgQY_>(!&m&>+^*jb&+0 zq1|+G9}?~PamRa?6?%s*vmGCQ%-<{6Y=}udx-6vmmFS>l@Qg@x8E57{j*29J*!I9y26cqU9o<)F5MxH(6JGSg!Xkdh4=a45p_D4=X0^aaZhslal{k zqspX~*wQ7GK}7v&& zbylge=<3J22k4o&Ju#QVO~UoBpOd<#?It>=ePrT2od9Z;QFO1^RLvDyIrq34&7~7; z@4i!wPgi@dd0$U`AEs_sUUoL2V=$r7z{;y1=jQxe{|NcDH?)f69L0`@hRdVF?+m?& zvl|*wSw+zH;`I!yjk_^rUD0*cdJzye!=}94-soE^tYfc64dj>u{av&Fip`!J?X62@ zs4X()FWzV19A)>!N+9T1E&<5kcNbUST7uWX0+)fyLRw4UkeIio*ELGhT43C%by0t=gD2FWz$!ELm%F0s?2UmxFlWY@G+a79o|_sF7$5b>w! zM^lSL##PzRPN7|6k`?V3>3AiFih;|vdRK3&Csc6pBQ7JYY&Mn;Yv^?~rMtqnHD!F= zUml3eMXDN!I3EAHx(ZliJ?Y*q#=gqDZk$4_RV)3ujVm;ZFP-e$kj7{Sf7XphFZfYL z$5@wR4zj6RmwTedV}LfJgOfPu1_;p^!EUP7?LBHs)<*av=Zble_Dl779jXhtk-p9k zEZqr-*>0_cp0lu2T94V$VDP0TLi-lC=D+lvgo?-fe!(Th@yy&2D(WFj6!nn?`I^Sy zR(jbTx5O-xWcOJ(?`v@Uz}?NV&m83Gvea@#5kv#HiZhq#-^F z-s8Z9BE0>_)KBVmk}&g~UqGKrH0LDrBb@P3Dh&Qld*<`xF}XP=Sa=kuf3Vro)bE9{ ze<&|m7*_VT(GFN>yoI&-yF(%^w3{ic|W{2QzuVfdH4NTN@D|tV(9Zx4l#H3 zzs88Y86ahqgO%Rkdg}xIVjZ?4gEBbFSa(bEhy}W_Env!p+868;7*AF+>pOw2Pjcuq zP-Y->1$|>iSb$dlyhUV?FJXo}G>S(i5Z%%7FojV+5x*ioE1ZyfP|OBQ1eKd1gM0`G zq|6+;Bh}jiW)?4l{WhYW>pl1OurkaGxjL`EsGmw(P8$Ub#_92H<7B%F>gAh^ zy{30wKcH()vJsC1-NvY`-)yTLdvG+r9GbwM}19qK+&GZF1Fzh9I^y)AFaeaM8I7^?t#xn+4D3DJ@C}4-_o7}fraH{$Xzh8E(Rc#XfLW$<@ZSt-a3yRsmJXw0 z0(qkC7QsE=Ix58gSsGD(H_V}_Uj}y){V$T4YL7H5TVQg>*)s~<{}OX<5iuZExA_+N zJeK3m>j|rm^DnXl|IDzSO}$}bM+bizd3GpKTG8Vaed)S4qQtK2F}JUZcOy2V(WT`3 zrEpG!1XESKLudGZl$&B=Of_28%&EaL?CaN;PRAFv4kVT@^r|a%4cX z`t)RKo7oWGpo0dPprQ<3aOwmqYCW%57@u{aHsChx__9Mt@P;=8*cL52m|zgWIh~b4TMVpe1Pq>Jvm#*L-m{}P z7;L5RE{5;1z(8?blrcV5pJ;j#(=Fdi8S-%|&TH3>!v;|Kzb4Q0ihMiWaz_Hs5T50w zg~FQ{+mX2@pB0O@mmrV4iSF)FpVR`o-mAa|GE=(68zHUJ*ja(vDV$(oOQz6t!M)6K zY`fW((={xPTP+6`6OnopkeaW1{3Y3MbRCTVk!x*YPGwAWm(q6FqT-gc(Q zstp}sPUm{MeB#g35>RoA4nH{t^AW+~4UfrU(}Jb%s0|Z8t0EG!XR$8ix|WtmPrlN4 zws#fUjJWAbTT-in%a(BtRxPGE&@H*`E5iAI)nm|&UAg{(b0^L$>FW*)UV((#Ya@m< zCu*-($|+JvH&4}?-4h-uA5lfEI)-M7bwd~o!+>|aQ=#T!Xo@05lcGo&#^`__45f1n zC11#wB@r!vbJ-cPUf9GY=NZ+HHLs1;B<>9yl9brxRu!=(@s~NJWEgXzS8&^|HO@wD zNEmQ}ztft7fIu^1%?*YbiY+NXL}oK?4P`Gi8FB`mzQtzFz#A=WSxM;m&OHk)tN6O8 z$2IFMR6!=K$P`098=W>em{Z`)!r>VJc7Q zIAakJ6XdIs2SX!mdwv_z8SSS&93eBnLHKzxQg*G#m3<|CI}o776$TkgB7TE;+dA){ zPJsE0Wfqu+KL?2k(|$T4uQcX2Ave&$I+}#3QTX~=!ovJX0k_>-8nKM^1vhZW7p%LL znE&SC8>H6eAS)-FH1=mpz(n)G3Qa#j4aG-^<)3Cbk8#d73afbCI}M-d5&cug5BO`w z_l4&BXmP?@x$bE8?cbbnRG+-{)w(h#kvnbjoxrbmzONMD6U%$7lng>21BNs`N)XTO z#M%xiDcz?{Z}H2GBH!>}V$UQjVw#dS4a*#7xQPpIKo6Lydt|_2h=;0XMHjwiw>LXk zx6LKbrj+qnvN>`-VH@B?NO8=N_|Z$YCrPgC5SPv^24LJ zWqo6d%i6?WC&oF;waej`u-feAHk?y#O3OT!-p9Q0l=!|a_{yV4TC-K2PZp_^PhjAX z-3@wE8~|TDw@o1liG34VTlekoMwv zFRIcBFk5X&1sj`6L+5t{gPo>oR#e}6@?Dq6=%AZ$lX$l--pUge$=2`#DWSb~X z8DrB^<+#+R&L`ITp*$)|Ta^;?(JpSgUKM*s<#xB}Wfs;lQK6f!w9Rm&$3||kc};)F zpZzL_ZDCDf8akAHKXm5e9J#j3I9cy)diOprf5+g90P*?LDb#J*BaM6VltSi`TxBNm zXKQaI~VU?oDRN8>qeQiw+*=_Nm8G0J$vx^?;R7`w{Ik+a~5jj=_ zV$xGKRcZUAKpJ5R+TtF93~WU6(}*O8OJQNTR8uuk8CDl}@-o$#Q(m`ZhDCB9rlm!x1` z3bx*Y7t)f$8tR*Vn8;^k!4`2O^fr)DTr?YBZdlbluK7XiwRF9IpAW*R zV~pPl4J^rNV*!cD5}jEu3@4EatU^O1Z=(_BMZfE>%c2x{m)uhfV6xX z&A?#$ow)C`b;BHvG&ySe;qGs`L~?!_`{EvTgPjq_qLz0h#^_eOlZ zz|=gu7YYMH}Ki-|Vt4BW}Y@lz(w!ReuvDL8{P&q`K!#m>t>6+ht5 z`?}LHCe{vbUh8p6cJerG!?PpH1ol2w0=6) zr>V}CmZ;98N4CU1taDPrI)bbYW}M~EcEnuMH+~}(Gp9`&eiMzbi+#b7PDZHd<|@`1 zLA48I!kOxnDvSO51Bu;{*<+7iy*1xp-@7XimnqVf$`qAv?-wY?3eqSq#CkStWR|^f zoxx-E7-#CJvb=5XW-AY+C-b*yHNQ-T>4AmDix%?aD#T^sEz49pxXHCnX0bx6@5)79 zLF$U)|GuWs7!2p9SgYbm{g2jwxxNJ}nPz<9 zpH@ppQS)|ve)$oD5-exYociReWFLQFmJr|G+Vs318Nfh!h4Y)IMI1R}91tKK%?S@; zZ{GJZGFb^hHSxYqB!yOA6G<_FQqZH-Fw_egclEDEdO9_HPk=jxB7y-EqNvlVlzS@V z`4dS>A;N=^btH6`b|n~CKR8~(FG|;N`aDkye2#TksXfkM3O}wBC(S-ck;g(UCkWN4_(Y2xDRaUcc3OL(K)0Adk`2J z3pCx)I;RkYuP#XRh1H+_-Us*AxV@l`SAfv|o|7qmb+1F9B!Rp$Uxamz05qm(p)o@D z)s(fIw#FQ*fG+>fFIU5Gg2*^JO9#+R=bH&)7hkx2hU=gUbD}oEn+Y9q({7yg_obhA z@ItkIv`4pYp2oS!eSS8Qqq7XLk7&1ZBdzjDI=3XyvIPf7$gTAS`*`OMp}o#b8!p08 zIMX-dVXo#R-w28I)w-bjCA?_g$az{1lbuO?DKo%IWn*J%Ls(9DtrE<5uY55D{$LJk z=AlR4=yi2*2h#mgTH<`4@A}ECR;m~JTqWqw#*_J1Da9w>w&@6-drwH-^m^n3rcQC` zn!vrE?5oDBU2Z#FXXj!*`mg6=E^nMHnKuYB4A1e&+n;7#f!1>a^LC+KgA`Z!u!>rj ziNw0-yj^0stW!9b7FXV?+j>s+yLjkSY!^Ik?48ipb)hBeg{-7u-}SniXaB{|DD4Q& zZU51RF$WCkf~jmPXB;_3y-`R-fxll+#Msg{=Rjvm?-w<1-JK%^!<~-5_16j;VeQAS+o#zyl&2FH4MKWBTZb!@;2r_SiDrbolOdV!cqD4-r0+n(`{EK}%j7q(? z?Z*UvPEbT9*W91YEYyY_zkJ%doKAz*^90G% zAGB6?CS_>|n{4e$f9;S%w=GwlYz@*uTDgC7fw1TiBL)c{*gs&1$8EMZ)OnGKQS}I^ ziFh3bAfThkXiEA#6@Kw9Z@Vq7_&nCzo}_mx4!|szVHxE@k*k(Ke=oi><_OE&8O?`x zL@~{G7U53#&GVwiAYnI?uzm?BU4+V<;*c$|2XJSbJG6XCiQB8c^NdH&3P(q!$f9`G z_9`#?dc;WOR?p`Vwcogi8K&W%&dz&L#N(2zCwoizzut2<_GaW^0}CODtvWy9>7mi` zDN9&e+T&J7g<&2Dj_iixHUZM_7dx`hrIZ&wU?{Q*XTXS+ywbp~PcWUcp?`lj0sn1! zFjkSYH%CkI^kQD0?WIjyk>WMi2-uteStj%&UTVjyzh-}>%`qz2!Is+ImXk?63nWF? z1hu;DTrBkc5DzC2ightnVMITgR#Wp%e6oETMSf7f0c$AHhcGfoerS- z;blk5QZR{m2VA0pcz|_`tG3J*qO9^0ycZ&)DTl8yE~cakGvN_!fkCZum^P}>F8TcS z*FlNIvRlK0kgZkHVP;8@+`Kd;;?_{UFRq2DIKs+W!;5x(Rx*+BvGlXvhhJhyviT>R zjuEO0n)m||Wd}GW02!rZM6qXvlnrwZ{1`Tp^g=wYOdbc!LwsWvnli-L^EJ5{1U|2e^-&+5Ua;YFOA_i z`*2&%Rg9J>ZDVUo{)T=C|1rS3T^2zr^n5E>+GqQc6{)n89kvHu1GSAQ1GS~e+8iu( zhDjWjrWvLE7?g0VZb(n> z@WRX8g1Y$aR~tDQhx=X_b4KsTM;QTwHsfw>a!os~_~7~Tn=hPm+DV2v5v9_*^yw|1 z4EGenlUH8f#r<09d8vDzF|p>quT3%hetNx;DUsOk z=`j??+6g>pRy1_lkmLI?Z0dM2+2#+Bs{#95uCLUAIkI$UHGK?I`43RDDt>^7emc6h zbnO<22-qVAozah<1CUU0zJ9!bz13Q7r^1$8Ozy66X)GeRQGWN@_CAabd8F+Qs?2|# z2V)h=-io50a5<1i$Tw7oW*HGHJ4vgc^`10QmH#Rxt~Yd-506Lr5KSTu6>(X$KtUJ} z+=z6M(h%~8lHZ>gvA@AEe_bssRa#AiE4Z$Vs!#;pS=dZ{CNQt~F4-q62k!ZySTSDJl&H@XPQXAlTtY@Q zMX%iez2#r5*J578hdSc-3n)zx&2?PTSE-wyJ#Jan-V?r&J0|G4_rh@C`zRtV2S_T9 zjWBkUXWMD`m2byo#V2c~av9z;f9rVna)|E5R_ie~dC&Eo@nnB?Qy}binZqNj_^Z!Wu?s%;C|L+rt zva?r`h{z~pi$b!J>`St^R2?SL|iav+rE!w_f{PC@3>eA1!ja;*x@jvgYn;vi1lL7u}Uq1Rv(RQ8#KnvOZ?k|I;c$i@4>Wqf1bd`s_jc#vZIS2jFG z4N3x>^b4FeSb@PaS=AInW3DBg3mo#SgeV9D|%@Mba z#3uREc#CE)&6`zSofgOH`+H_C)CI)|7Aq>afLBwX>HLw<fyf}0$C{tP8HfXHpDQgA8 z)KI>QwvZDl#&eL|D-va{U#^ehclv;vL6x8kvt%_rxOH@13+g#h8CrHuF#H%HXNW*f zZj|a9HUzt>nN}LwoD(!0^)|os+!D0|;FhP0LX1;?5C2Czmp#nGh8QK2H})}NtM@qy z;>8xKv~zV>OFNqFuNZE*00yn-asEV*iTRBim&YH=&<9v20vm;{P-}guP^<9wZPc@c zmz&1N5g+MAMs%|BfM+-)*asaF?mSghc~Ob@0PA8gc(u7T_+yVvTtqoSVZ%j|pxOvq z3$e1`C%H0I1RG3J_!0@c4^yK7OE=neb*zs;YP~_T2FZ-4@?jMDQE*jEy8#9t6M73e z@*XL2p+OsIv&yX%K;~MKBT%=U)CLMEvv2PMoL*5~J4y_Wu{>R{97D*OxXm>mkD?uI z;spEaoGKbB`F-!8ygDGwlNB2k>{=qUYFrcJ@URCg|BlVFPPXkKZbx7*a#b~F&Jv1s^Tk@iux49Iti;YP z$Tvh{T`ungAU&?g3O5SAIMRpzU*1+fW-%0qFW&9rC*?_102I@vw=om*|l>c5vIsNhU~k+H_;bgz}Fh>eTrW(H-65nGfWnXr-0=#wWtu zc{lZYnPr^Rw9n{GQJAC_#0?a%AIu%%0{?A^z3t^-(>jdYeI-CO~wps$}ImOy0rMz^k%|3CFD5#rCnK z^Sid*Fx+TyfiWrLQ7L)DnZjN_*}9Y?TZIl#QTh3$shV+>Qe$OJf?qg8cTH$vOF8vP zK$tsLlcN#iT_CXg+F>@tFxH1I+J-zzT;v3|`-4arsR{*KM=QS;nL5Ax(xxW5b1&v+ zuU)JNIaj*hq#zqWj`2I#;Roi;|LSo(B>GFFGIG zAstJqQ&eC8N8-o4lNU?|fi>mbNf(#6%}Z;lDjAqdKi#p|6fPZCz}RI7@8|48rb^~r zhSmDt-{RY}6##_|z*;j~h@Q!LuDAghlwo&SRi%`uZSNS z#mzGfzrfML8fkbAoS7U;NGm})3TKJ&9^ti~hng=*m(o;XuBKsE$t;fIv+*`7xnpmr z@K0MFKNS4%T=ClF= zSdIY7{SV0_KW+;MNaFNjWfy5jzaJTz36O%ctPh7H8J#)5$tA79;GYoD3*DCE>)QrG zVQ6xzNw!e^sz4`Aqs9p|UK|P0s+s4ZFX}xL1PYVWyd>m|g4^Nm51dkWZ5K6r#{dB5 zB8o50s`XfpJ_A@#34`tub^=xpFPG?$O1of)Roh(8W~YeRnr_1 z1+aqe*83zQy|CppiMp5B2gA7p2ih~gUg%d9{gO(1XZCc_-e<1VRgTFoVK+<++%Q4?OzSuOX1%pGxGdQ?dz}mWHR`R z7_WwMrJt~km$XiS{^1mht%OJzv0?r7x*X*WRQGlqeT71d)aWTdZmF>`l0XAQX86$_ zR9gCMtCU){@xr3q+bb5(pC}5EDkjj)f5Reb~F*!xerpRyQE?DK<#Y|XJ*Flh;EQ3zXfZU9Cxx2tXkOvyib@w=$IQj zL<1=>c+zrpmz=_hhmNGRQoz=q_IaE^a+VN!q{_KIHGueD7R*a{dwt#cbDhpW%1y=9 z1zZ>iIwn=`4+L{yAov8aH>1?UdejCeL-H!CilmI*gA&GM4QjJn$|-~>&{EgjA<2v% zN_a%Fg%~Q@fOD^y<5X+kDx34u0nH-+vZMIr>tn&jgYnzK3F72l4&`M>=K01NzHHt^ zU2Kqaj+8r!DRDW|Qtf2u`{_1sYC-R5k{~Bp^dOTk7}@;Ca~`rNq?emMczcg0d6NWr ze`%>sT17~XpT(aO2?rs5ZQo#>YfKzZR((`QG@+MSC0KoY4#J}dzE>IRRVAj>rH4Jr=RbBW z2+oAOJ%R#eb?sRBLu}F<0>t+VS!;zz9*p?b1qy@{XGZS=*&4nB6?tZqr%zt`9)D|v zStFlg9a3(zBvgvD0)$HWroHT$hL*g)FKHaTnc27ub$z5>p5+ZE!6WMaQ1r-u)m?uJ zBMGOOGssGpl((NPOeqlVav^M73FRsk3f3zP8dru{o`Bpuqileqo#b(tq*og>3&wUE zkiU~eUn#s$^6jm`(i?Ka#c>*6oz!aK(8CvN3OLWh2L-ycm2Xt#16QWDH#oz5`t)7E z41p~`fx#KOj}<;Hd2lRp8*|km2y4 zViOdKy)eSA+6)7*%Dg#}*nrM{c*66!65;`e7%;YMvXvRZ+UHcXqrx0DS6;(kdAe;4 zCsUT3C{uhMSX7F?Re?IO1J*uOK-PZx*vAWRLYuC|aQHg&@UvrYu3#Mm%Ahsqsob5d zj0$y)qBriUSjn;T1*u)7GUJnu0Nf_tdRKl)_1#=r@SxA+YM-2pHfVQvNwj&Kz)FF) zc$|M=qWht_7uVR_ZQh%y(*48WkW^sPm#gb)e9ek@8en+fq5R)&h*Nz%MnK*Q6}Z5F zk2m%JNkSYPcpOM^O3qx*fk({`gr(fznn`qAwec|$x0W^%W9TP#@JZ|AkdcTomXFt^ zwpB;g>S+vG!!}QGY2^|jUQQog)m9J?%}B%Z7`s0yd1a+(?%p9|*?mvkSu0rBM~Yb0 zuZA7xJ8(Aac}|n%QQq?J*GkOw;AC!YEdhAWEk@MIGD8Tg!DxkT&!nFq2)!D0_VKBP z4Na>|o495vFfZoLXn$^ z_dDFTaJ>B4;UbC&kaqgQQB2a35o&kiwbOco;Z!89OsC^{f#5526c6|WgY@#a#S=?{ zWYm3-dfxiRj#FK_;VQ9fy@pl_#%Z11u;<-;GREnHcL3q}P>XzrSuB93S-GFw-6*@; zE1aigL<%DHGCtBzg^9h;ISvSEM6MMNJ{JtsB53^C!V4X^aQe+2*-)0=C^TN4$@4Xi zmhe=MahX;d4OTU1Qqf!jXDac*0Zz2($RaTCgY3_`5!B_xq`njkpa%CZ6eUx>RF>lj z?Ho!h22fGs{s*lSAU;z^zw0 z#BRUr9(bX|I5;p}VcGqXV_n(!1K&)VZpu*VqZun;SLvw4UrhAUHWU@=KETzjIoO^R z3B-}KResYM3okA*J5?uzk4t-?h-uEjxxdTi%%FgK^#>l*)1QK;z)&qyGfbF9m# za$h6EK{fdak$LIbQDe^H&LJS{;OV*8fzl#Jf(k*zDXb(7j?v-~KwF6sg7P9` zW-Ov>w+3OSe#rscLq##Eew(6T4OW{h8(c6X>Nn!1d)H@N{leQf?gpAH3O0H(tB_;g zRC2u?d~=K$5(H-0(E4Chi@1mduShe*MqXPAKvpL_^L@YMA60YYVZ&7?*E%Zo>Cin7 zAo}kQqF`Yf*8ox&+4D$_Z^zqk?n;&yxJmS2ZK87s&GZpKI& zbz&aRU;}%7eJdr;G$fj#%I<0})yWz*edV!GF}{?@mk3KUjIdCCflRhoZD@0B9uOBZ zuQ`%7i>qNW$1Em$*uqIc{`1&+rB|k((@||m_wu>i-Fvco-zLrT%uaFrexAaK*P>*9 zcKbk`G`I*=#=Euw;*oJ}BR?@6NYEpvP0 zB=(I z_4jO7xx;<;pO4R3-~kxirX3}!R#*g(#awUia2SQzDMjh4yUwPQr!IR<2&nUI^jsTx zRwdG?WEZE}jQ`wJWSYXMcXOMr=u_u+O|MzRq)-brzFT+qU3t*CtKc8Lucj>5894$o zWZn|uPJYH=o&20qLeybY#sT*UMZHxhHi!|ksu++|M$duLJ#gMxbN#YA!9aOy+z{QI z_j{puF{d5y0q4s-hSevSOLsm!X(-#5>wr}_Rg6hkA;X-#aNq69;8K9omyAg;X8%>Z zx=$kj*%?o6m+04$T9CH{=#`QxQ^@*k!NP)Zh^lHemnbbCm(m;1YOtL1t8ZLL)x(^B$q~niL*r@h_ z3&93V+})AY=;cggTyjM=jI*R)O)O&kULho(y#WukU>ma;6Ocy}AY7b~AL|gg;V@vV zVazB{{DCa{#DzJ6T+T+;hsyH`?K=UiVFdeZ$d@EDahOy5mzurx(bae$Yd-FQcwNv8 zHa^KY2FZHHL+3d?R4d#TW^g#H=>H=kZgYkT)1Dw9CTjT@mlz-4>l3IlO>P_3!uAzK{*tm&R$&A%mda%bhlWs4J-kHFN#=^2jP64YcPFT zv8HX|Ac@5qv%@=Kx$!FqvW#KQvVuO%RwytT)#7cdVWdKHN&ehBwBRI9Bm zrItL65>RnA`8uEqFQPs=EnvJab}o*6KHE{*ZAWyuV)OrI5qHp0%$ZCtmabzi)+?}twDa`O$Ow; zhCssvbm+b?;-BGW01#NB7&%jYFU~(vS7|CK@gb4Q;(nz%-~u&7e@}b0J-S;%iz>Vk zO|tT&Y6VH5Ks-CcsENF+URSRRyMP?`I+5)-rl!BMtLq6o6=cIcITT@^KT06`4K^!c zU=L#z9<=FITJIUel!1eO(vGb<=@U0P9@L7hr@C3Dp;UG;)64R&PRa}g8sj`(E#2q- zd%hH!IEKZ$y3*NRbWcR&XQC9YMRDsqwwaHEVk_^CxM_AqRb<5>nK??gj2jj%)BERV zwH*kZezZo9YuG+T<-&9&uNp;ACvQ+R#1I(Y>1GTTn?> z`y@h3MA$I|!$ugjFd)iczO^ab+S{78p;@_MB*7peqAYF4++OCe)nA%mYqJRX+{tYl zFT|6(1jw4{N-vFFI2Y#eRJPJk8Ap)bc@PT7YD30ry?_cqOFA`{_O(7zOK{jup5wx( z$iL9LY}`K)Ta)bXd8}gld*n}6R*pN4)kirnH1Nc103Z#Q|#ZvBWCLWaxvBT z$1W+4i<%4+I09m{rev+Mgsipv|~B}4>nnOL+;Jldc?8L%M2`W z>QirXoPR|;-MXek6gQ9%ch0HGGQaq@11IMV$6~(A^RdtCzrAA*crRLf{7C0wW(eGB z*Q5e;KzM(fqZS&IxcNt>SoF}3gPYJu8 zW@^3A=z$oBh~D6fLqjx?!kd9%%CKXMf^jq(e7MJOF)h+k8AZqX5|ad_n=PyC5rb)x z@ULw@AgKC~KE{jradgWs@m}GE2?Nlv{QIG4Rs0=_>~lvlN;o|a-@#VQ@F3(a#5^zi zd@ixvg>u27Hv}H(LMKg=nD)TN!PK?u5PPBmt|mWD-#)-yZ2W>$^R=?6Qg7i6a5PS) z(Z8+6|1G{b{6(QIM(WGOE{gq5y%ao0ep!xM>^?eLwiM6UO_Q;~TL>unf!o%7>gBq% zY_{m$r$nFhrrySB8x%||v_K27+n?p|z9j*^Xk`Y7sggTjd*Jigcm8 z-7+FCYh9<6DcqU@i&Ed$#%tjnZ**8)2$U)E?06da>H_p&>K#S^%G75P6kC@b%@<^z z_t()P4cL&gpB1Rf!KZF{WxW&=)0@pzN^Ib#d1H|M_$nmom#IlkE1r#vW zA%g0%u`9CcFcs1~anLkLGRskI0U7Q&pG31A(rvQ~+NR|T=Y6o95B51x@xmeQPf@wZ z>*+f?_>!ewnyybj>^gFAPJTj+x5qD`n>-u&v)|PRRd-)_wtfq<2As9Fnk+Rhs~u|_ zGK}U$Xb-MY4_IZk$-j9bx&tGv0*RO2v);my3I^Df=kuO`(VD^4I`FD!?aC=`!vmp+ z4<;~HuIANAE1d5}Y*1>!JtA`cgCrwx*A*dE|Paavtx!rmcv6J!0Izxnqg zBNW-xR&2;}z64z2ROpA1NAQIgfYwO7pZ_9nimrT2ZRG24ZWfFgb`Y=?+Uf)VtJ&_i zTuD!x?~72C1V>wmTjvW-TU~jB+DfEyLfAH#PqL6n7m5B&qrqJH&q2PS+lQa~)g zt;3i20=~g>4cm%dJu}0J#>3f#Q(Es;jnuUo#i1*VMa6j! z9B-+PJPR{MODy&xt_kD7hD;9Uif5h_M`|RAfts&TZZBG{^Bj7ko;P{v6_N+0v{`?t zdcOWie_knIm2SYXS-qp0w)JE@W^OH~^t5MX+7_@scBR~I0IS1{^DQ+%)E_7@Q5o>p zn+Qjz;9=`nFF#^7es5U~KEO4A8Qf5cT}I~5x^;8695W7EH!%!?^G)r3q=F^>26m6b zrtmA6ijD#kq-FIMq0@H|N7pYpF-C7ve<|xy9JwNbI5X$>-f*Rw1;?d8Q)hu=1%h?b z$Up^+!0=Qdvv|`G7xNGO6~P#k^qG^|BSFSN$JxEN4;MrgX62ThCb@3TcmBY~iS^4W zWQ+_%?F;V$aS&}$Ayu^Kvvt=V9rJo)PKK}pA$#PwNOD9jrR*``H4<2EMmOoFmF01? z%yx~^P1>WY7Z8>p(P}%mMyH1x`@LNUUzEGT@o0ia&JOuEBTl(67G-QRMqkQhyvRg; zhy8NktrAp%@WtJw=*qh8ycfd&1Js>GbmsPntDom_v`WtL#W=A0Gc=A-GJ%12_PN#S zFOds$yYkl;!LqlmY7J0|8l{XP?*TiBS=h|N;7%4rkWZK~l&jvm5@foTBngqTR*c zJd8|if@7$0KeyCNKShOjNNyOxtxm4#F{?LhNr7MAD7%@na~OsRhY>@jSYZy&MYSS) zcSg;JC&I5i;&O_GQQt`O248^p81EGrdMMN}1dhH^6}k7Oii(z^=D7Pzp#Vu4INAAt zL4d#1-bRP%c#@m{LvY0`dczeB$L88!_Z>Oj+4OLSN2GFkvD$`bl0I>`STbPcQbJ|7 z=!JU`+1-s=tbI6AP0y)bhc9@!kg-U^!u+TfJM+@GE;h>ic{p}evNxvNurfpWw2}FE`_PxM}H2W+e;;T%0C6mgvhi{%joidtk3b7 z1c0nr%m_XmwRopw%>8OvL{f#rI)rV&m;y^@YF{w&8%z0<^A=-ORflW2jc5z+uuUHt zw68$CyGFqhj5{jP2A-__!I6=wFPG@&I`$ww1F@JT3MhEciAX!k<|3M-me?`qjKh@(IEg?;>>W^hYN-iPFqOR5FHZ ze8O~9oDtp19XjIkwU1YS*~BdmOvqCC5zCF*)a8a=K$U|_8(r*lm#m#g9R?UV1*loeuy;?FhMPsm?*>MZFbljt>a zYVcP0QuKv$(mK1-7~sO=ZE_<%VZk@=SneJDo|Y)H7qFbPS5vM#z&y6AY-0e$mlgG2 z=i$1G)AD1M8Rx)ieA+)fBQI!7v+m{cR|$Rr?>uR6B5hQl9-)OvRwa+?ZhB1x0m!@kn6N<3& z=hixV&0XPK-E3dK^w{9H@jb|a_Wy8$QDCx-f8ALWNLd|*OFGToq`u>w@P=_e>nL+U zyk=p!FvoQBrONqwX^~QB((txVE8)b?_>OM`tC=Gft*`+l`Q8dk@aI}8rUoWFOovTb;|m6C!WSF^wAY@6dh|F{kOFH1-a z+}1m~gXyUj&c*TH-nBn;-Li=;B929v454m)e)n04=smsJmo%cLz{iZVX?IhX(mKE> zCpgOyxiddBH9A*(y*v`cN@i%E@|v0WuGpiK zV13E}=Y{Yy^sOR%&}~1dIeaO<=v+oKLbx4^xpFWU@D!az+L;1+n33GlOk*Q%4=vEt z&1e;N=~O;Xg};-rbwbB{rD-jWt6f~P3X$b@sH^6jeyf*GlhDytt)F8)4^}{h-wG2a zFOF=2L1n!H=N-=sm%FNmQF$=>G#d0OA@SZlO^!j#g6o&al}z|jY6JO)cdNNshFTM}n&c<)ij8v;9gDp(?5&BOFCf7tkH{Ya&WTm;=SKhXYA|p3(TK zW8*PeEeF}-VucUQKxb73mOv4;twozwqw8#YA_A>%<04`EDhcxkX#tE>@vtGCe=mL7 zjs)hKd{Zq!xh?5Kw6gi=Ohk6X{pECwCS90XeysQ#T?7A^>?;fH^iCPrR@#hZq z_V|VZm)vN94|cP=PUdoZq8mp)n=eNTe+KYflnVDr&2d{z0p$}~SRNXD<~fjv7=3kb0*Zs`YgdBFIAB~j(J0KGnbxZv1Y43SocLD&qEFe|nY~;M!Y7G- zn#@CnI^l+QF!#+|f#R#3KBko1d<0oY*C%NYt!E5i!A==_hAHm^_sn0~buf5!mfM|B z^R4o{@eJXjr92q?7)9Rp=yGw%W9tReN+T53=41M#qbMnNyzyB{M4D|TGQ`JStvzan$Ro_hnfeO#@tEzs*rol+)RN%o_X2#qBU&kFE_+;=hh z(K<#*6I35aL;!H>bP7Qdy*zn)Fw9;XZt5Muj$N*P@rx?|72JU4pWs zjAkVRt2e6zz2Dkn)I3%jn>s}+pPW)5FNu(B9ATWjY%*|6cn1pk*O+UZE2srV`-;@% zld8=PA$MEb#cc+PgqKnBS4kObS4P*{hQj5Y#&Thdbn%xMFYcs1{ZfWQ zq7!#D)oVGpK(pY1)dMSp*dXh5Nae#xbo%a#Myx0sGb1W)y2=YT<+4}*0+~K~I56Pd zGxO#ryfy7&9jC=R&3&0%y6z2tud5jZ0Mz>m-5d|LA(KCPGc8{JjhAy5d}zE)&Y6SG zI!qC6w`TJFC)w!bmMiMTYy~)=RR~HlT@T6j z?4u~vQUE*ufuh{1XZB>G-kF;74AS$rJ6fNt9B_rOL$fX$fIEsPL?-FdB5f~L^C1nU zXf@_OnJ`&91$STH{GgQZ*Y$Ecr*^RIgV8 z>@t0kk&;{nd_88o`uM(Fqp8CSTyr|9IT>@3M5PfO207BNd*)jB83(%qJ(_6t@TL{% zQMgO<%`xGSy^S>-OL~&RT1hs@ac)f{f{p>7irj+n4y9`BDkK+)(m9FWtZGH?ZUoay zk~F(M6DHyMlwq%j;J*}6UGxLgkYF1=&pTjQ#A)H^OpTuw>H@L6lr4?JiQwqS)F8d+ zTS#`${ylaZL4B>m2Y_<1$}&zAsC;(u%xW`No{4LhUssqpxi8iY33HX`lzK_{ik39KS-=vL`5-spj$B9=h9qgrgvjqdlW1dE+sOBOMS;Q&UpeY;g? z&{{nr&_l$(KA5I)brXpIm<^YdnO)`S5hb}oL8ZXBlp8{GClec?oPyK!SLq211H>pm0(4>uI-#j#60s+IV zB}Jqv_+iHEf}1B>8-gjq`aRSWh|J8pugBqo{Xu)!+_4G83*=s>ELb_%c$dUSeJI9= z8so%_4r;E}s9on2?!JC6c`h^mnf}gZpdB$aio64YhH{ zSwzV1aYd%!naD3}aKs{dN~3AT+MV=vnp@-rckFp2JnCS zoqZ0qi_NPUiX|oJEY#47Rw?5Fo?Rw*51h1hKM|<6<%37zXHwHO52hc+qy#Mn+^PJ| zdo`;v^sFvK$`QGwgkSe`fc0du(mNcA;GI1vlDZrUkE2n_5F+V$W^1Ev2Y{h<&H!_0`EVzpq1DmP%7C5AZI1Ye_e*@keFU)@ z0+UU{uvKx|;Y?6?^POH}tT3~oL%oeZCo?@8L5+~WwGi*4sw!*5(9p^@NXuTLXBbaf zIX{+k_YT|ACtIeAii^d%on@ik8d%#TCiQztZ1c|?rn@O#akXa_uUyg#5KvqmL9NG@ z6vMmJz4GSVJJ*H~I{TxOh}nGOSW=wb7yjaks3)UPjglhhHR@n$x$tKI3+c4-<^x?b zkG_C7oPPl5)%5;|ti0R-p@L<&xGpjLy8wU$_lnY;>p{$)1uIKw1NiPPO`u!Ol-5z* zr4^_?Zg)t9@74pV*LeUJ$4Gg<%knw^Y)|Y^;~#w7c{55{KDz-{~%D`}%G`yD{H9OJ+`wX<})M)hY8%TxKJ#%Q=T= zD&>Znn{(8_w50?Y4EnR}-BpWfFjF&mW8EBzP=v-L9qmL?A2zh{pGgRYN%-@JjiH7t zFyrQUGQI>(6GS#Qc2*u&7&fr^I#6N_R%_-*?Fxt6f&X(U2inNre~SNOobc~|0m(hR zjd$okzUU&55~+E>Clzy~4Z_A~k#L**dTKpNn+Rp0!$>sNSe2J<)e-4B_He-VknH$; zpr!i)E#-VRl=4UldBEz|z1^>iTZkdNHSBOiM`#Fa2t#nk)j1srL$1|*{vQmvZf$U_ z`F*)~9;E*&bQDlcUl*PBD~Hv++`vTX`?64Ofh1sTWnlk(-L(f4byZNz z?@O)F5yyrF8rUJlIx$!1d*{BX+Rx^P#&%v9+FXBMbIvY{&$!N|_JOPR&PqB^Q9@y~ zNpx4Mmo6cHr#;tO0!(Q$3Aa}vXmv<*M>vDV@fQMBBL$--?qOBDkz`Xs*R{2}@ z>EBzpzlGNwrTwicC=+S%`+%@tiUnghRQXllmvmM|Tm#BD?yHexRfB)k$#< z;Yx=7AFaJxGjF)iw=-s5VX!$@Y&eR03OMgDY9V?%bFz~%oteWH+%)i-nExm z4)VoE!Xrdob_7t{Nff8Qrm*76YkuFi9*hv~ITblbiKV5tLCjd9QyzMBrp}I+Jk=s5 zFlb*_|1q8U8KV4qc;!Fjm;Vw4dKw869z{5_C$80{1YEA%`uLSPv?FbpMSQ18)dOqe zHH&0kAvsj_FFAKk-uMU^cF6&&kRx;i45d^ks|zf-aI}4LGEo-8mD`iNIxqhS==z<2 z%9*|1Vpoeb&xJv$qo(%vzPpC?iOh!)E-LrEiMg6{pJ@!iR@s*-PwIN#F7x6tb~u~} z(!fr0d(3TPIuL1Qq}>h=7`FyZ86OinpM7Us>LSQ-2pbUPa;xKK3fl&h`wE;fF2S?bepoP2eZBsjdb zHP(*R2$itL*Z}!zNadNQfw+X27UIw-yB{z0U*=UuB8*|g_!}ZSjy^Q{vS(A$&kjcL zl^ilTkyZQg;TCDPo2*8vyhg$1mKdSyeXn$fz#IFAIJ+3c&TAVoQ;89+i`vAqW=C*E z)VvPS5c@fSmm|M$TCchm%Q+XQNtu$D(J2Vj|&I`XzrTs&X!m)qPa;#} z*b2iQI4OBE_V|C;_5D9BnBx1@2Z}*)!gmvr#AU&hvl}KBJ`hT2;(D8;LPRrC9|T;% z76|>(aAl?&62Jd=FuCZj)cGVt!Act!iNilkK8ekgYASBIV|L13p`C~!=C4s|Xql{Dn z`}Ff4_uYOB@0rhs;dknU^bUCIwIJ3RWbs+@?Sa=4Uw1?bP- z$?&KDm)A*Xc?NCzzM4u4rG|2kybadIRrwG{W*y$n5^AtLMpG{!)Nc}z4;#w|$x9tf z0WJFPzYoAn@QJMd<`aG8ncW3)p{&T-k2Xbeh#)xB973)~A;^K0!8%`>o5BNwA<1t%fk-GKQmpl>~j6n7)@FzhR9J#*zy_QY4yX z6dI7WKa)ED<+B7<(-9(~3o*}pUZx^vl0QTwn1DywxHGsB<%D_NMZON<5O8Mv=#L|vQa8ySVG4J>PnS;b$JKvs$6nchH zK#lgNUP!pz*h>>#;<@FHTCGIh!-aVti+hitXe2j-zfJbG;hHx0ctKcY6-DFQ>}sL&~(7Ic#fVaPxxbg(R1-4NQ?;j8j|KXv-JRR%j3E08_X4J)iQ@ zC3^ZX@w&_a!EXAx!XI(yLD?36d8EN$19aLsgpmM3PWFF7iO$i$`QO{00mflY^UqI# z(Rv>IrJ1oKIY>QAi9!6bM&WJ>s|o5s^X`37^@KyhCx_9S#aRLrOSKSMSSW!ckN)yV2l4$HPs0;1Ud^7(r=fBF0|@qhmO z&rMG~f4>SYSyn^94h8NT2|+%6ui1f%aETGHBdNeKBwrpafc-E9lFT*#K_vUzy#b31 zs?I)>2<4jl2XJd3r5w_df2PG|9>vSWR?+^sQT>EJLz3$~lVoHbNDPOH8xS3GIAN0Y z=w8i*w~+W?n)vfe&<6vT({HjR+C0$`l)T0MkaM&LJOj$knjQ#OMZ<|)&pm9JAcjANwTsHKy19!il`r{uZHWCjRUdR*%x#-{CfW&j$LiDH% zer@3IPrvq&*%`T*K6MVXzUzzPDkKVy5V>mXCp9j;gww1IqWvh|gRzEfy@L&A)KBg0 zz+Z1~B{0d&Bl~ekkz1VnrbWiO2_|BNc9t-B`P-+LZ*0Tif=yE_RQY!sMJVac`kl-{Kw`v7w7fwovohFO4;ViW`* z$gl2A`y-%hUr&pv z3Q6-+V!}n_ble$#9$n8gtymBF?j4Xin8n35d~mN9iY6=4Vronnvy`MYye|Hn(DR>! zi5P|X-a?WX5}EaB7}&ddPi9jiRGLeo{D;GSF-kjum-`qWGI|Nelm0jzpQU$qgEbAU zM)`Qs5ft0`ktDqex4Vc0t`_WxDB6MOg{~u)_o?>ArwP3|BTJ*-Wh`~fjQ^OBwRUT8 zk{y!O4Efc}z|VG5?UOUmH1nw_5nc&#M4FW2Hop3LoxQVq&jB$O#t>q2)iZ zJ%TSqfXIRR;Qp*hkF}-uPD3_HoP}^dB6~rD4VxBIpAD&IAQISi5s3?voKSop<#6+$ zUFTk`b%DYwYb2v2?p4gw@1lP(?@M`qkDBGzD^gc)Ndhsc%9TKJqJTi;bEb5tfOha? z)Lo=;qip2H!n^)A~y8Yw7GOM&*fAAsE zpntZpJnat~N-FoE?r9EXuil=Xk5#rJpFH<=&_MccEK*!f6L)KD&LPm*2qq`uAr_#x zqd79*3Q>_gfAj^&*@0NsSdl_<=J5EXvNU^nNnV2r@xc_UY~AA1NT`}2tPtJ?F@-I_ zMBc4_j%#H06h~B4_1$_;*Ijz`U5FKdI7rm=iz}F}+ z8JuSiDjqI6%Y{DrHvKnlO*kpwpJYVbsA6o7U>~nh>sgZ{t_lsHS4Ii~KA3y)ftc_c zxE=by&aUqddwPJbdKL-;IFD%n%-TPbe}3A2l9?+_oy_a>gR|t}?5@zl)gYJ}3B=J5q*Y4cZki+$*zKdLbM+qA+?EX@g!(_j)14X zJW>g&p<7&WNle3ixbNh;kws6IImY{TuH&hQgP#;DZ)F-+vURu=RyFOx9`N zm(RS+g7m=?l$a91KXFC)+!eT)zdks8duwHu20ErfT0$F}I`%+y_HoD8JACXj^K+lp z!O^k?6h?H$FXvRBZOaywA&CQoyl?p$3=sA-Qs>;M)Q6m0!}m$jYVUPV+nTazj}=5_ zb7cL53gL46`AeQ4_J{8BRuqpuy>~hiD|iPmjauft^~QC59F{MTx};5%4uUDsd*44( zbCn*YJ$BAaskM;n8*A1{?W`(6W)8HC)gRaia@oy*mhV$NQtd%V5S{U(069Gd`{o`^ zTd5EQ*Vj^_#@#P9450^JjG;8HfBZf&(nx`eF7YWr%uc6AfL`DS4+wSn6;}GczifLcZIAAwMzp`tSXy`n-!-^E4&@1 zQ#7BzAi?NUoeV#gMRH&O>%5A@M%F-mPD#y!ib?Ac`Hw3E)v1oti#-4K%W-BbpCH9Z zH=A*g;#Z&CpWoxrCMLV{;U`C4EGPLJyQKdv5zJNV;8*CRYcg3 zHD(A|k)+5Ghp^EFcIDsiziU|kLi;ThxW?-oiTt(X`O8~v5`>O&XVzek9vZH2VO)NT z6SvDj$x@Ib;s492{`1T2@3pjyb*K|+q3fkYqL#lwY2eSPp6-KiMjBM$T3NQvn*mPO z55xxgp`ZWvBfpDq$~{XU!<;w^ET=E&v2w;{V_!Nm%=KJJeuZg%enli!;>xjKF^<3e zQ&4m%e0@O3LjdL04f~ZI#8`5_P5sXJ>uvnAzDkvZ-75ZvDCzI3rb!SkU>+2Pj}e0J z>c5kfg)xd0$mT~f6LXdALNR@$GhLYwX%zF`E?v0;5K%28t`?S;!A<Uo2gR|x{eye@4PhUM!D!)a7`VB_(CEi z3$zassEs@y42Ljw%{_ZH!k~+OZk8MY)=RhGNw=Uedh%80|F!?aR1am z(#$u(m>Dm}trE(08e|9&=qW;t$Z%_+!mEY1{1leqzkR%CkN>uK+JZ%Q9`@M&sfgr- zB)^EP3_$ho`=UgmHf-Sr3QyLyt|dMn0vA$-FN|r;GhK+XY7ve>|0#>>|8yv3O+dos z?|2+C?D+l1LKxOiE(Bk%z%>w1(^d3sx2yZV^X_awDr7V&Wr)YFYu#G$BGr-7ou)_f*M>$r4PW%558tZiQ>MnGDnZ zCPimNx8&0w_xH~~%$8JBsDcNYh6Dau-d_v1pNsuOC^88}mLQV03&xvgv2sO0H&@1j zc}X0n+uK%co1lLix6DHl6j5IU15qLX4)55MXz6*t4hD%ElyH=`(7fkEUk1>#TMoI+ z=-7LKNX^U{yto=% zAf}NRx*ewy^DRC{A+AdHM_i_ZpgpSvqV}Wt509TrJ>gUzJ5n(0wyX@zV{y;@Rpn99 z*AJ}pjPZImTo|deFlV&vek~C9|LineMTPp`7BIs2g)hOCzW@V9d(-YK7m_wx40a1* zNU|{d0mc43DkIjp2zYcFGxoV|7rGWnE&&2_<@`N#$$}NzgHZlF@(_0;C zmXvTwdZE6X-qhgagGA31N}L`Cz0$tIZLIG}13dgEHfNv}{C7tIa_{Kur-MD6SEECo zX&>A+x=&{1r8d=2AB!b^ZHj)hfB^xQ-65>EGdIz!%^^eQX7Is5=~uHBicF zQhTP-r|O{Wm+yY4iIR1SV7KyK;}g?yWs4 z&}p>BxBpnAK5Ny&QN9W@2vhBi1x>-hjQkB+qsccnxrBV4~^~-pvcpmiVvoW@Io8EYd1N8w2)+Zs;y=9MI6k#U;9?AfT!JO zKBj+rJ0U0?q)kNZtaXpb(ha8g-Jywcb-mFZA7;l5?#lKkbB3@jH(Yyu;^J+}n97vQN0?a30K+>sO zOPpI_6_F#2CdSh0=sRS8$44prf=B&)PW!#gWBFeJl|QcfQP>*99SBmSrD_8-R3t_l zx(uw$$i14wr+grFry=&ukbRa`Ui&p$<|Xl!F7;Z#1g}zNA`#}|P!7R(!dTL06rA709((D-Eh{dnQg%ZNS3m6)XkuHh zEyN*}-5h{dvX8hLX0ikHHxjya;rRWEjQKds*ut+Ll1#ia7O3`{-r;}s1rP${djJnP z&dQ<&nDijMxfX8AM`!W*srz zT5%97Z~$aJn|F&WZlF%cWcWj-Q9%c!%K!yLKLN z`SXP5{OpcW51l(HAR%_Twd~%77|Gk-d@e&?`@xQJ46EGM!@{kMuiXu|iVdqjO7U0^ z^R;43mh;zLSZl`QI|7Z{%|bX*`A6%JNE*K7AP>Wa{NL#+5sf~;2y>xxEO2mjh<4wB)W_}gQU(9 zD0;X`TQ@##f3T&(Gv|0Q1}qq4=wp#IyBn8{tz%p}lVTdO)x@YIhvBh0p;$tdQ|>z< z@-6DUXU-`_7JV>HqJ+=_67WQWa@H&6@Ym4=`+^n#L^_x#`@x|?Q!R> ziq1CM)P6T)06i%ad6dL3x2>`Tcj7%OZS?f3Hk^L0O`w4}>1c1S^PaPxb%Dsej`a+% zj7mqWFKa<3EtEH;`_2WiO6haj&GVqta%BrmgBZ{vveE=9Bv|zM+?F5WZy~1hqq59I zqO89t^Fc0`3dUzvR~9YJF>&<5mV>M-e;p!+4D2A2W1fry-;%@)20 z^K~{zLJiP61*7JE4qG+K=dBe-vyh(8pf(PWmrPifa+H=47q)aDUeP_qi3)7Dz-+hcRlR^2%fon4 z9&XHR-`LBn;Jw+$`Rh`=0X3S~03Hz3c?iUyHArlPlLUuimz$9q zKAGV2!$MJdqryphscX`3(U>hO7p(uZg(e&LW`lOYjb9!laRoCo?1mt`peh-roEpBX zv@O2eY*X`iuNeq|(?T^}zN86sQOetU{bHo+r<;r1J%{%?&kQekyqTrux=dK^{r%JR z>txaw4ob^jxM!z8koqv;{l%|X*RMBqdULJ%^{pJRvkEf!6p4GTQs<`0lx{E5;wUW4 zuDPCXefrdV>f}O1>(ket#AL?WL|sDF{|{wf0uJ@s_unQVDWZ}rnV~F649S)?W6d^} zv6MA4jEpFIX%Qu37|Ym7mMk+ujFF|X?}f2WDPgiD`xgE0={)Co-uFDq`+r>5oa(5q zU%&glzsqO&-W*Wb{73bHRETbeIoY`YVH%sf;{`I-|V%ErN4V8OaHsO;gWy+|KH!dchKzIlk-=xJ$LOt`UW8Q^oDVK2pikXsA*nfVE9Rz4Q;f^w zkZAp(2VdM$^e7g@SMB;3os9w{4hA!~;_T&r^5y}a+GTRi-99=7A)Vm>PP7ykJly_Sn_%YNb#6UD8H9I7#&C$>9)+8qv$ z1vH9xNzf{0uqvQ*W}sLLFj!~6-ShWTm2Vn%&E|qXwFchNJ`q99w}gY5DvZ~`O26i> z8llRvy7M;_Jys$EzK``)C@n)5ui&7Be7dccSwENDu(m<)6`n}guyqTG1L6^?7zu+} zU{p)B&8$SIGy{Qc&Y3X;5~DMfj|3|1@;oxOK&)BUue|Gd^jKh(`B!*k zN5DlF8b7k>(c<-&(t~}tEND*Sbekn6I?cwW;bVb~&q@mP&*j2{IudFl_Tx}ffy<14zcq*E>?KO!aiSMZyZvW3~f z8=D>6e|smV;ioFNu_<7IZ-CE_r_4qEbGLw>#qU3C*LJ{)Y!@r%-q{RDRi-_lyU0=9 zqpvVo1ps*$@-6aC0#e;ChE?#^#l2jDQiFqo{`W_}08~|?*MF)P9`ndKARu7t^9uxt zm_E$B+6|0d$rP*H7C%+Nr+(OuO{886JcH&##ux*>l6*9Bkz5$|NE2gZFISJ(XY|Ik z@7~52JC`ggO4Rpq*3}0{b#t4C6d2&s=*06y1vV?){DZlC`cwIL%9Dol&aJM)8_9V& zCjGDW40e#m`gAvJvF+vw!pFT(x2(#>`r;NLuHaxL@z3AM+1u?HD_nprP>Laa^`w?X zic!R5W}m}w^!!ZB4fjQd%q8o@QT3Pq)#;kKUEi0R9;p8vfQ$qyNz42!Y@Vbn4H{pW zAdCz`jBtLg;h-IShNr>Cp71)}9fUl7hC{y!zMIbMd-r zwe=`NfiT0f2;syQ+_a2LxjWV;+%h1m^Gi)9#>igXuq5VEvR7=9ttCD|4QhN7i-tvV zc|n!jogjYWI}9+(-dDsa#^y?V%wxEVO5=8ka}I>B6v*O_(rnO;5foVlIGhQqGC2B$ zOXqGH(`KDbou_a42&PYyV_%KITLQe2A-ewZu>5QMq#iz(uUf>m@%pdDmA_EVP2)6cg4dd9{HfIYNpKqS6r&pSMN?LIs2(lS2`MVEgI zgFvY8@{@=SO0-XMP2fNnKICj>&woHxClBdG@UPndc*|6@0Ix`OYtkJp|&D{$76KCQU zp=wW`4d&!SWM-v1dpIC$EhM)LaPaUUfw5VI!Md(itY%iR+m-0b^Emnx9%;{v?)a)g zic5x#RHSLzQU%+gSqP(@8*a1mD(7uJJ&L~^d?s;7u8uNp7t^}bOP$kg%WFPrmQazB z=lYb>k{ik6t;BDEuq}iC@(iAN%T$M$w;aZzr~63R0I56Y(G7`w{5bym$JdVH8GMPK z1LsF|j;RJN`C+BaY*-HnoC2RUQxBN9_$A{EHR)2f4(_n9F#Bzj%h#nt|L4n+Rb9&b z8UnXjF09uuTrMgy^O$(jGIV$JCkj6G;LE6r;OY50(S^O15A$+8skHt!eS2729+}_m z_2aEjd@(wI#jRIwSY-&06v-j>3+Fgx)PFkz4xTzF4t>7VgOpBtnvx)^xc6X2u5eB%GP znV`E_`b)HIZHb0SXElKNMHJ|OaXjM2%|JMiU&I(6Zvh8OUY2(h?ZZI^S_X?}yiSsP zA+|z7LOvI~e_q9?T*DCv*^a+>4%6m*yr*rZbc{&n->czYOsI?+LuA!{&k#-4!=#P7>KegE6f{yJ!aD-wYY<;)N8yiW zPWgPYgt7DmVAI8#GsQYZr{CKdWmc#D;QB8&%=-E|_RBjKYT$DIR_n4|O|Xr_tuObT zj@SXa3EJaB=6!bZ3nUfmdQ1_Mnp62n>aXEujN!p90qHK(x`PKRSZlj6V?DI-1s~#+ zUe79zx1K|N@3m|u6@!PR!Wi=P7JAte{CaU<77r%M8k)k1O zo*6Z+h4m3-nSW1Alzso_u2`t+;I?D5CqFkUkffG*4VQBZ33(S)EB$rfw{*p&Dk9($ z81I&g*aAl{BL}15vv#TCnGm&?&!7ZNLsd>{SwU+d+ZR21d*QqS9NpGP;7&Bu`IjdF zc_K7NXp>el-uDviB+>0C(p-s0iYh0K-^Z5}7PS@;>FgSaIHVWVZ+cJ5g`+MgVGTam zhTckY1ODg-hnSS3A8pV%Qgh-5lr{LU=*E@#!1g`+$SoOz|5YpvtvD7g*^LAjOkT~i z$ipjF!z3hH9fB4?nEm-X5OV!4+tQr_zh&3QKtrajx0Eg7z*!WG#;pnKWjjdZTriQr zX*ZL~<%zy-ky!&+w2GP4YL}@jFSf96KxN1n;{`b|!cVNscgi=waC*D9ko4?zKv{|~ z7xiTvHo*KY>E!z_aJ|;;oP^?!4w|ANx;MEQE!UK(G`wfprEsY8@GYOuH^UvJRdcHz zDgAe)nh@r}=gN>x|2*5W{2Yo?F9~T?^K-!>q5o9B* zJ#BP5E@KqdpcWaM_DWat7P3W+$6T07>%EAbK467KbHYb1IHX1}91Ozl(>FG-@WT(1+NgGJJbZE!7Z}Y|Z=Z-s%Q^#XY(oF#7Cwb}3lkO90u^<( zljM}fjpfzV#GgNdgE%-IzSQ#z0o3_8&) zuO%pZju3Rec8ugnn$J@;xnuy=q4^icyk)rgSGyUvv<$nLw|e- z_D)ve%lh869nGZeoW{djL8fTWG+|=`=scb_c7Kv671Q;9HP-j7b5upCi)?DdV#A2N z0tza*T2^5S=9oKM$xcp)$SZ=K@y~((DuXR(f$UUVT-^OsV|N}Xk^5c)9>}$W2wSm@ z1^xzIO-;=)Ac>4C_Zrp*8GP(ngdiS4vV{W)kjtCfJ1av#0<3O|*7{3_*0l8Az_IqU z%_U8U+KWig8WYA`ar#H}x~s^4I|+K7$xsAVGOPRc1AG9O1S?G;Zl{z7yh36uHT)+* zNJc#drJk8AE&q_{>9)-w7=JQ9NLo(W_ozG0;BC-U*=s)K7#*+94VdhPeq7c4Yy4ZZuSxyI zFpW$xHyI5s@Bn>gjxG|5*1V-`|A5ZVAq#s^3_A9nnrUNhN1gf-AE(i}29g=JOs*t$* z@{i*CJxjUL*{TG7#csL%8T;ZPAP!QEz;ayxpIg6E)}Q+WL`VI-?Bwx?Q9e6Guc54x ze1q#GtD^L!DVhPQpFEp@IIf71iW4_Uh(4~EfAF<0!d7UgVu+3}Of}GlN!i<~&*}$I0ubg9>S5S)5-zbk9Rgv%K zGe_%z?i7?i=fL%r)kd7=2IGCaThLA^W^mC5t?}>ARwUVdYHI0{UCv(YGrHr>3g&eZB32Ev99)4o!ZFw@UQFh)gkPjF{{?S zLCk+3^Q$8E&Zbd?q7LZOko*4wGs3oq=+31v5L%cR|`2b#eM9T#wgba$z{2^?TiV`DB)GE~jRJl&!o_sUK` z4+pt-Si!WU%K@L2m38uyIx8z{1VdC>RjXvg&e4sf|K9rq{NnmF9-mSAStxt~;t&LGBq}vJ~sSUR83=146bbPv*y4j>g zJ-kUS+lmapZ}#n_&fIduj#y_;ml|Q&ln(Yvlh=y}rEOzAfON zvvdK5w{mdZuij9(R{=cxpq7Zg(?vc%&VIBtmuV^J<|S}@_wH>4Np1{~il!|~2vZRH zfK;l8$IgED@}+2gKP^Ag-H}B&WU-8kRo0vB+&La@mBuG0+V=F1@{h>AB`{y=*Q=r8 zg!MiB-btTZ-@f&UdocOzNKL3Ycjg8C)S|@t2#V>K)2>ivFRGC6yLa`Cs$Zv_eV&lz zE$A#xQRrsO;uBXsgm6DsE=Y=6vedIU-L?@`*VHQyD;(=PJ$gGSiXSPi*Z1~fBv(m~ zqb1J%ROF%A<%-C~3k3oR`>C|lBit@&<@9_!Nt)s&$S%(qxJ-Gg!~kLR_|~QzzyQXo ziuK78Ia4FUIyGSjz*kJuP5jNt!gVBxx}piObN`-xm2EBONKRppL&*VFc<>J`ZspPv zNJp6Veesxx5~baJoH|JRGS!gf4SB6`2D`KkG0WD@8Zj}45O(-lEo$q@0u+2 zXz8=|4ZWB%HkY)Zel+2N2b0msLBY_vrAw@V<$1BF&bk1I_UO;!-}dZ*KMH!dsGSAT zzIm>V0D0ScUGP!w5;`^4*;ut@K-IjP@;L65mF|xV>EI=UudnW$V*eLvSj1?Psf`D6 z>}DWWvb=Wxy3I397G5QLZm}`iFL}EXu@&Zh+ZK<#Mrx|}A3LY*tVfnE|1ZT-!3#WJsq_9kgOp^0eTBOq%o&W(ROs zdE%z&cjWQ52|1xNZ(%E5%#K-4cF{iFrMBE+)J^Ra+cOeK+cYR;uY7#_-Grnl_ZE3S zzr}03*;0A0Dj_l<`X!F`CnHT0=){Ac5LjcGax*JSTv?AJ$c6^fS>7q9Y{&qKe;A-| zZY9!)3NOrKp~75s)JeyL-s{~TdhGSJ;p#S7+uKm_7h=)?ZPDZ9Yb?|IwMnxiS~r>n zU+xiacO&=;vjjTruppB``$Q=At#tXGmL0+^>Q{HxM=-`>C$FkRfX2<15P-CGh7By0 zN#t0SeONv{?eJcW@uNuGW|N^eL7qg^E*H3>sqHX2d6{~ySKwf1?(gm-edpWdA7AR2S(^4AfEX*&w~O_WJPSbQs1MvVmL-Lt z8)eVOoq5DXcEd zNCxG81hbmSy46yP{jl`>dJFxl^nqp)PE+v!52N_{tok=J{E=WHz28c2wCW5u`cYmd z&j&oJjRY14;`__9$2&K&uBZ=2T^ z=rmcjo%{iCSV2gS;las4lk4tqf&8 z`wJAoEX9MoU%Lo#B*qiSdukiYJz+Wm-$D65(;;@Tm<$VK7C3b78r#I6;xSEd(Th2`V)wGj3vv!dT`PK6?sP@?A*t=Te7E5>V zjkVj$L*fw~BfdV^`9(RmfFg`9Y?%XO`6sEru4SZdUMsT0R>@eIZIjw;>z0bV(+-r* zY1pg%8Z@O64@n;Bm}YdW3@}iQirC2%MH0VTih<8u3!%5GV?oe9Y`(339JSGp{Sv2z zh&GA&7GS@|@bSfLb|`F)=@0eiG$L(z82(DzT8j>9JzMkMnoNQVbNt_88qZYN$wy=y z;yI5i=mEmf;_Us0wwKi!&^QxWVf&M`GGI0RjbcB1i1VB7n_T4#7sorAq+Q5%j1f>e z`TFKu0PVUY_p1$?@vwNa8j7Oiy6+lIP`BqTe7r??kF1Ncc_hy%_Il(6$cyrHN6LOCS~2DZ#UYt6x>j{;0A$t#<@ zp&>!C7p9gy6FYp;&yoq1os2%4nHFUZ@L$v(Sgcgm4gmCR8Af~Y%#8Il<~SaCap;4? zcs_v8#3yW=^7YG&mXeMkn{4o;$;gBDpa)>wNiV`qx3CbmqrN_UFs<@R_o7ydQuXB; z)s(A$PX%%J-Xn5YEfX!6pcxWW`tQCYrl!IpIvI3%XbA>d#!VIf&SEx2>yPp_uUi^b)(1MB|6O)p+4KsE7d|V<}DZa z&*T+k^au(u+)*>Bs7`0%cF5z!i|UuU{ety;OFdETb8gGXMP3F0Qx3hyfJ;&P` z_nDZOn0sytI6JeQ@+3nJ|9WtX-KG>^1>KoC9K2_MP5zX3^>RX1PL4hXL$;%0#~<9+ z_hfx`coQlv-UiI6XJTVxyD8p|Os2E9(8fF_;%jBS?L+dd6Om*9%;bzQ7{jzEW^-D* zK5;w`Ng*Q#8iFS2)Kamp+lPNt6VLl(ZdUyW`PN5DIWKS^=+V1>z|Tn|I=`6)t?OW- z`bFqY#(cn#iA?A^asB42^D#R5oyQC0r6n0O*UA|cCC4+N^F$0og`b>BqlQ^e+Jalp zn=;2{UZ;$|t2HUea-~`4!Ac#YP1oP#&p#O34yi7Su~A!{-)Y{|dlpuD`Rmp!_DhC% z;cN#L)IoM>Rc8)UJ+YToAioV4u>!)o`Gq+a^rkO^J=2skjs@nQPm-__In zuD39?LP8eiR?AZ6KA$~`an_(&vRz67-G?SO!_Snh72t{`-va<`yKuLKG?*iF0Y^uz zz#Zsy+%C^epXWm!lo33w0A1-ySd-)03!m_BCLy8$lWP18V9(q$nu1x(+ z5=eb;zgq-IY<~c`MlO;>CR0Qx50YwirqSF$FMk(zG^`KjNQM%q@X6qs!PaYVhL{v6Kra3b1$s&*ToTgL z5+`18k^N8)v|Ka)-nYSGaP_uo;49CeSCiu6xNQq3XJ;6TfBnNl-Q`|Lkjv_zm7^nm zQ@w{A9UWU*TT50q)(=xYv&WVNt$zO=xK^VErWXTr6yxpVe9RHI%tL#R3@6hzetv`G(cAo}lp zmuG4sEYt75Bv9RRwv0hJw$@UcB-$cdZD_L4Ef^KKtJ~q&vl6sGR`CmHuQdtMVOUOW*_*qcj$SnnO9_9P8aEtng1&S zJ<5UlO?H;>f|pf8p|EVlS%72u>(+6UOgnwfz5m74PM;)#FnYW1)y6iD|Hxea7VPbk zC*#9qrPVJOkloE{{tuy1{E@dH50Wr`(In7CTPXU>Y^8F7Q_fgY4~H$OMorI_FN2?S7}Dvy z@48_!)c8f>AqqR4ehj};miAMb4?9g{7z!aF^KUz# zeM{y>_V?+PSrGut&YUb7%$;~W-gmyo8?@(3c0g7cFc`;n5ipXt%E}!)+)|b;us-9k z4z;q2I<^Ktc(-!PV8X$1t5Bq~=t25(W3hvD!ys3DUhL+D4Yds~UZY|E^q1T&&ef6z z8{a2ECzTZp_{IOEW?_c%@5vgTCsb~|6i~uy1F_gFjB*43%uJGk_j>HZz>Ou3ppGtv z+rHmLgS^*}MUT5Zb_&3sGi8d6V6FsI+V)iixe?eh|3$O=hm6c{d)#+}0xFj#=xUiO z{^-ZSr5+Tw&?f|-s-X?Wm}sr3sZfw&p_Ckqz?s3O=M9Gjtab9npw$qD;bVxlpBa_i zDIZEvb8jPYLKd&wcFCusI7%caojra7tRQLU;(Z@v+ff6kmNYnQQ7lq<~T(SIc^6+?*|1Q|XIb z>W69FsAeyDT@Cfk$oO6ab2x^{wu_K2-80yr3c0iydZaW4mi(q{@vQ1_Eg4igX<+}H z9%0sk|3UB9`sg$JAs)q-zJNxx!g!||=X%mBOo1s!Zj+GF)p5{H#-Zwz)`R^YY+ta! zne&~t;G-UfhL3Y6{9VK)Dffc{I!SvKp5iDMxx2H zT+Z%3(X-^Oyp+=$UDvMgg2^GT1A0J}GUIqe$ zi82?^rsAT%7e+pqbW{CZ9fhZDx1K8K>c|rZDx}rGlo}h8qB-+^&e$2A52Kf5X-i7Y zq=V*1R^>Nu<-Lq**&|H2;FetLYKch`OV$h1DY};guxn#`zqP*0dAw{iy^p$;{+ia3 zJ>H^FKhor2X;mSJh5**~a!M-WkXy$A-urYH7HtH- zPY+b-)(?YiW=Uybr+2|$#vysy16*qPpLgxh+U2`PAs%QIL7%wo)_A)L)j~=TPT}qu zHpb1qV5A=$0%f*;Z|L*CWi9h(htHuJ(~}XPQA;V#xs**lRtP77C|3qLYCob{E>ma0 z=|K)LX|UK*F0V+sA2{NU+(;Byyy@K?erc9p`i9dEE39O_ zMQQ|;(%0({dBa8=bcvjLsr^<->Cte`!q61gC$XdY7|u6YdqZ>FO-xOJbnpMH zSJ44Gxh2Qm&&3oLO2L784~${}HHFhUXp{O5_h+tXO-+Z5g0VZqp(%YIoh^XpwE-M& zRyqpIu&9Xza2|CW@mQZA0VdfR%t(j+Qwj3B8FF4IvcqNR#30Q)tdU0ChBnIDPM~KL zfaLD>h4?ZoW-lSwKVK(UowySX;X8fXAl+h6LL@&CN4(GIxa7=E$Uu&ovNU{!TO35)| zols+`wpoA^FRFidfQWw>ucf(7nojPTZ@fL2k8!^~rk7?f_li;307|6ikkB)L6qGzd1oZiR`joki`rQsS zrIDp!5x1b8fI5}y);ZhvyD|D*w0#IQf6+8!ofzN); z4=cIHW?vD`>z~zge}u;|M>X;JXVOr${dgYkpAW3@aq8SZ3&#)S!7|7!FHqlzR(BdZ3|%TQ0m7CS zY=i7hsMc2ZLO{DYb?z=9q%lj6H2Tox44!h7R6O9Ixf%ABcwtI1 z=7jxOc5}%d8*Zwz<{mjQPA6}#eU4a~GdDbm3kj@&NPmA8!U+mb0y)T3;Gp^Mu(BA7j@Y>tMB?6(4OCq@u+RET2cz$o|2#bI9c@K8MdHk1&%&<=W3t!j)_}CrAO37 zlyZ%_(!HS+G2K^+s><(-XUJmnL#sx$A7LM7Vrm@Q3S(Z4oE8~B{!chl=tfJEB*)q$D zPuh{|($i`y=T_bZc&J)+%w;52$6%bcBmORd&w8x+w#?IKWf=S`EGoPx5$v-pRh8X~%dv}P)2TlKmfLES_W=sGsWOIeUls5y2H*2~$ zZCU^*Gfn{r7zb{}?k2B%{YMP`JI1kanyK#29dm3YWIk+(}y6n^%qgB9;{?z*8D7DSXd1+_8#OxOT(|I~eLEuc4ud9U4P z?P%^i*dtL2kT{-qNHo7x`eyVZNep-@5ZLJ_fTyFg`?Mz@PwWH{cvs6xJGT+eFZ;b) zhQrC0woUz+_I!MKK>VMLVxZ}{)y{JvrBCXK>G#GEkTOjP9VZkhwt(Ull>3S&;k?u4 z{nbxZ_k-f{RL6ifs-5f_=g4iJG@h5+=TGUTGPT*F#>W$gybCIJW~ zumFflH2U^KT$N*VlDv$V!C0tg3;}eLYLtQUTIX)=7? zE|KSm>KUpxnb7`G&hJ-!z()vBlU9om&i}d!e>`<+Qdtb%nSzD{toI??lKI)V_xo-g63HXZ7htiY|a-AFp5EDdt(zX96#C>E$o|Dnx*V{0$XRbDIe*t0YQn>)es>_!p&)%A67Jy=nDR1Xdsv8Z9Mb(- zm!&KXD64MFS@4DrBk5C@HXx6gA z2B<@-+8jUQQOlm8A-0mfti3?3GKMfj-A=b3mL7ByOm(g`*KAe%C*=(eSw~wcf_QA# zO^i~aLf<6cw^;*5Oa-Gpn zAdGa)f-R;~ag8i6wR<jUbS&P|&>zmtA*C?~P3k;>&iG$QhM@erY&vGa7C;p zk7-!vw@=lt!R(Aia`4w+FsT+?fF&?FR92u*=9RFhFiySyZR5>uF1@XdWmhoJJ5YUb zVZ3eSlj7cgy}kCG)BAY7Jt1`}?;4@lIe(jzk${YO@JRGU@JN&;aMU)2xla2=6KQFbjtX@Woy?B}k}(9hK!YQcfMoa`V&t*-bNYVqKJ$J4s^-vysoO7N z;C$G2&-0a=5C4%oXEfd}PvGL>o-20L)z!HI)4}N1ucF{S7>k`i@KmsZXy^iCj$mvs zHMMPbu82AjMpp(!@|XON_ivb;K8RO%FafX)$a`JEID&Hl3odN?_s0YIS}W4K8fs%> zvm&U@`B!}IZ^f}pMo%s6y{}zlUOyh`*v-fo74clRpuvAF56bYGliX`l#V3ZbXkJd4 zW|hN`@xIGn>8%WYbIc2sG=2{#fq@i{k`(3adV(G7kFDFKmHPR&jMP8h zh|HT%c`5TG<>-e`cJ(RzvL=HtPoV9;we|TTxUbt>ifY;8Sb*jfI3nltMTPtMYdxSh z>kIf?oy5QPRM@NMR6*m+P@tey>FDg-!M_W?^#*~6{ez3UyMdiuDssut#H99WGPWyS zwVQ-g%rnn@Vo`wTNVBsABYTacPsm@x1+RUHSm_(zt)hK>yp@*_v04MqN*uDf;M$LieQ$+Z zZqxI-LN#zFuA;P&U7^V#^6@+J3KlB|xkCnH3+wUzAZZ`RwnyCsWsUdxJ}o(u9?ix!G{9EUl@f|J(Yu{Blh)I|eb5D+vUoHn zl78#J5!GX`Gof|Uw={SB69zHXhYORnV!3R$=j-my`0Nl6ow$wk8(64fppDZ3Ji);} zuJc}bz%j9ff^Xz}D5taM^#X~er|*}Hhf7w$di)<8)Q{fw%s(=#VFX#k5$IEAw}f_= z{%}2!Uf_`r1V;QEg^Rc8{4_ZBY>=a&jH*og;cp4Zjyd!F?fOa7uqRtrr8~bKGr)18 zNsM%0XH<&0FY#k4AZZq7%b{e2ZYMt%N*b-}Uldy0Y4Z2n7H6?ksZ%=|5{mCz)M)>H-kCs5OL$wm#Zh$dH+p>DRStx zc(ZhZrJ)zteCUMADd=fg6ZipcbC7e&d^D}WVLXvFdGA&YhglxL>;ut5jfou!riiDv z2bDpl#b^LgHGm1p{NhHO0H#zDg0%9*BTKIf zDvPBoJ5uDVRtJi4tg)e>l|2xMQd>t2l`)O#{!dMzpR4cxYLO4Sda zS#%^vWr`Z2fzSN8CC;w`H8UUkIHV>)7_GY@Sx1W8p-Dum8vzoM{rQJiUNRwl3e(zI zX~M{krQSJlN~U{C4VOmWl4)A+8{?Tqx!3oAsXHs)cYVQi)v*0&QnHe?G|AeHyTNHs8z35r`XM?hMvKFi>CUr^+=To``BkG5Ee zcpj@y^+?gRt&Qmjddr%#cJ|Gek&B%nFMGBR9Qw!BQ3YFP0*HPHeA2crvZ6kexEid3 z>02{p>ifTUz)-z+yYs@QP4XC z!LG{67TW0yu+AeTypY^n3vJrvU5+yF`cg;n34_)tjg6m&(<588vD4*|8#go9ONy#I zX)kxP2DmI*S`ye=n6^_yKR zPeMT{wodtzgq$Lqtv&#f;j&mhLK*E&TPU^?5>QO2WuP`h@JPAUlYj^8UZO_V!Eg-# z2M?F(b_Dd1Tj%pA$OC81@aw*UGS*v&aYPt=1ZEUeW32a-aY*FA&5^nBbVK0~hQ*&u zkUk6W*%QEpaQaS8nW-uPW)mGL;!N*qXPUABfLZ9zff|nKJa6gfC|j*sZ%Sq^M{7@Vg$%cizhEhgXpBi8qF4zr-j>8zhcX(O0 zjM%An|K8wOw#g*R`f>}2)-lH=N#&K>umLu{%*JyBv~zXG{n~&q<6u2@D(_rF@0$ot z4?RqSk=$Zv_f10uKhTY`J*;G?t6lkPSI1H6J)#x>6grmLBi@qU5K$>*+k0Dez z5}3k+gPt!lGt`>yf4D#j5#rH;q08i^hJiOc)q zic&d4Eh>^%J-)@)HFR4v=2D_{HkjyJ1b9rCnhE}T!zU|q^3}<5s z4B){pac$$P{<03HFzbTR)?*c!k`ezWJeLn~a?GSlI z%qsjgm(HfIp8D9+`%LC^vH8O?ZY$k0o>^5JKIUQ?(u_Y`B+|)gW7+V4>Zh`WnpoA4Z2IfoqJk zz#xO?aMGFI{?HQOCNc_(e)Om(OeN}*T!=I31vT=C4ReR4KL-E17Ov~?IlL5Aa-5BnE@Q<_q9<;%4>jc`oJ^1W+ZVb@fhcaOs<3rf-!B z#<4Cnp2G;d-ir})J{-*HG`ZNN?szgB9UNT695ZGHv{{;rJ1Gnx$XZEY_Tv8-(?aK8 zI5Y8$^OjyVPM7E15U|SR@BGezA?Q;c%N=PgTYg|TG$GsHRYl3HJ#W%M0Z#P7$)emu zy@iFEN+1q>yMyV!mKm7W$O}Cma7s`8qF>TTgr2=6l~?GxCJt8iGPUZk&~-%O+Qoe( znppI!!<4oh%v(zDe=1(pRlEd{wIzt$L9I4MSFT7j8Vy(OKYg_;KP zjTve3ogf03KJsDn4jJyfwY4={Hm3XUwqVLZaF^sCDCkKa{}UU^hW zgF5%`|68XRCfl@>?=bY7Ta%blqu7N<8kxNFEj)%}P7A1={1NOi)OdBHDo=Dx#0Q}3 zV=NynhfIK2t063yobNP66d6)3+dGD^!cKb{ik-D9dGa86dS;@M6YpGVB9l z8F_eGR;r~ue=;w}UFb)@yYmOM?2?Kh=cTh7p|C~4^ped`D|_Smti6{+$LO2Gvp4ni z)*pnUl|*G^Oh76Y2mA0@A(CR8W0G+OU_U)KdX3XAOt#m9kcm%_Mo#tn&kn)CvSb6b z)wwHgcFfPu#~P(5Otv?G2A^qGeZ9~j0w!00+3C*|lYGqi6{F|9`?0e_Wqvthcs{%N zFadRSbydO6-A(=-bVGo^1F6Oz!&WE{ynW5(U19!iP^6)~3rJCqTqD9lsw)~<@oSe4 z)l#}eD5FkFz-mZ*1Wq!jADw*wOp)O0fW$y4Lq}$lBN*&>q_ivzw8$-_0o`##7Mb|S zk926z!Tp7qtfe}{E7%CLEDcn7XU0hUG}5shvIPXCP${<^sr|IneocUuz+`=S-Cmke zE!__lN&`ekZ{pcS^XGR4b3iy3yvA1YjMUz+ePB~r`|n__*~-(;fvQYxZ@OyG)Yb(( z@(E@CcqR$pfRwMWiemG(Zz%**yyKZVH!0*{0OiI4Q%9xvNJC8pLmwmXuX~W1wqJ;h zh;BxCaK1jaJucx?c3y!c^iWr5TGAZQIh1A$hg;-hCBr!to*c|g+x3wDbXz*&5X{HH zeVR&oSpM6zTJ$Zq%j=7EGV5%wrTR2g`VW9@1??f!qJh8S!aA*D2S&U^@!wkEXdxYR z2`QKi$nwrO)p_?8rY&%Vqszka=ae^7M~@p7pYtS6K3ayVRZ!K$r?|~!4_$2hehg0? zG1)YqDV!>|;J=zn4up5VYckA0N<=OS6?Z#o*+wnOqrk)?y^>C;Qxu{@UJFpC6=m0+ z3AV#JBA<1YH?Ak~fANA&YsY65-la#q@&bEXzF@BywgCzlNjsAywhj^hDW2I+g(7^j{$wWVNY)1!|n2FZRsd(V)*OuQ#j<u0)yu6=JOkA8%xp|wDUmjOquN@>!J@nVX z7{5i7F)g*h4x^&V-{cUdWve~&UOm59qxtgA;+hK6$SL_q^yM=bV2u^t1u3+Jhl@&^ zN<6KZtfR@lc|+JnsczC7fFA*<~p?F&EB~>~{Ez|kZg@CN!ciN|t#zUa2u$e{u z$5SsB2+Q<@#8#56);L0@U@!A`NoumWCG+o7IJ@cceX$r2iT4hx2T=1Q9$bjgfwvBc zY^0t_j{{>5fE?GRt0KqysMuiBNf0BJid~HC?IDpIf*-bt2m5qWb0-WW#CXoC8nO&l ztKsh~S@r&qgDFJ6G`AA!cK3)ovKkNUG4OCTG&-B?mK^Gt#+u$wD!L;BD!4Y%- zp!z8#gW!~&#=V#Y9Yl>3<%AdVz!2*0odx*8n}2fE(siHRrfWhrU0UN$6YHaT&FKRT z(YF0Y!x?gyS-U0oVx|Ivg4#f`gD5KYtKH4u!d#TGtGL_*+|Nu$U@YjNhS1UhtM`vM zykEV)YQY_j2F7=>PorWAPlgMuWRwCHvZ5;194atR&96(R02=c5yqIRv7t}@~(Jm@^ z-HciL2uy?{v%95%?;Z0FNa|;87XKe(XBiM>zP|r;QA89$6j@LSDQQs=MM@APMY=&c zMp{5xQAAKekPr!_2N*`Fp%JB1YDOAKY3Uf^e?Pis_w1fMzw_Uh`@&u4d7kh0j_dkd zOpzuhx#RbJCMbvxKh6lE3$i=TYvS; z&C4pbY=zS?#uWH$9Y(m2i?nka-#>Ic=r8dtl+W52z68Yq{vc+;@bp-@nuIW0l{JDSqsrCIyZHw^T zj01;ZY9#n-z%S#1x6JoIxo15y{-$k8L0ah6dFN-#+n2E@{?CpdmR**tTUkspM;CSZc@Sbn~o zg=xU(O!UJNKV7MPZO~G z2nG)NQv;494|yM--O#~rWi)Jpb`JI&1WFD^f#5l? zf*M34_q(iJff#EyB}ZGeY*MkdTiY*IQHRrVK&G@x(n>$^t50mJ>Uh*$6$B;p)Cy&z z-cI>T7}TcOp=cEtP{-FLvz}?dTnX(dPz-M18XS3~@6FFw++B7sp6)Od#*^%#0_gre=7)Mqey-8JLA96g?zZ_gY>3gI<3zlkWgB&&9A3z!Kt-5Ti z_MhuhQofX17hYv%Slv%9ZTeEVuza?P_wkKyx-WcTSq zk#lzcFKr6f!=z!&f2h;p@rNf{UNov&XC}7j+tziRP~QZvyIM1PIfMNzQoSkA8&!VSk!pp`Uaa$%p6 zFUAgZjBWUlVs(+xOmr5NnTDNoTMqxLZ@6peubkh7>vdi}8UZFSYR0avmD$Yj-2D;s zJcUFb8U;JiKskzpP8dd>Z0k25HADQSe; ztPAv25Z`1lwphm)jfgQwf%Szz^^TmntXPRS^p49jdd>>G&Sri3)nxAYM7>&Cz>w)2oALg4` zS7KdQ)sL$gTvP>iM)5E{UybZ-n$|VtmT16!P|pOvS{ME%EmGa3t)E}2s&yDHet$~P zzr{?aGCr{YSlXdu8x9@%4mVvd2uA5to>C!Ubbv)oD+&Bz4t2BYu%PbJ;J$v6Xf@3x z#tK)s&Dk9q#xfI93Gh%t{bu*P`^8GU+C5iE*|PV5U?VSH&A$r3p^j}k3-`CIH6omo zgF|+IpE_X=f;!h=WcT!Zrzk)jop!g~J#CH87xT?~ju{{QumtVdXaEP4T>2OYg zO*dDk5W$}GpXW?omL68ELwI=@1hwDx`wn8yqYc(?#2%%--5=n`YD&DJS19Q;BdfI8 zdH$E7%hW4lr=8?Lp|ZJBeamEo#1=MKIMu`y%vaw=B8b*At#~OELa@^j_h^k|f zb`0*8oi)e0v`_D@f*TUdq>0&<`ke^c&h$lLjIL5Rt?1jfDE3xgZQ|)))!{fb`}LbM z$U-+{Ckz<_y8+>3+DQh+EB2#W!8M|6ldI#JjRidi+nR)xWfY)_=w1H|!|d1!=ScNtKmJS}=mKfYxmN9G%QtsmG$0GpQT?*}?Kzs4ijK`X zTp5dZ8+ef`|zKoC|VyU0$1TmME*iDy&JTvb7VNf#gZJh7F37|c4PT}m z-TFEfw2MFTz@y5bMc<*1(SZ*KfPhFzCJh*)zPqMquRJ8S7~3!0gjA`_)U^E?E}EQ} zW6Ljr_==)vKKpd}HjurV4|q*f^h}ua>*pueHd@|0d0pgwD(xJVT1&2vtPOtjlP(VK zu%hon0y^SHK0MdjGQ|IwA-0IYSOH^flRylkr2T_$moh6^wd&EDAj;kHTF7lEaRwv` z)pd1!sKd)Z^Z)j^B8M1T)aH(8%jYOZ7Z#MORxM%Q{uf5;zog@lhr{!~9dg_NM%{S@ zC%=CBNeY$Bw_>=+_qKjwME}WDRiWT~KZ@_O(Z#o9O1L@k-!j{KddsRlGD}wQc$YdqjOS`U&O)gQm6dwzE-jaiplPsQ(`MgXe}u3nmmn@d&dDiv z_wzH7`czOJEv;AX8AO;uiW>)l=NTQ@$T8E3Ub$*kPc=|@g_E<2ChzKKt>EC{K}I`$ z`yn6RGtYtY1-q1=SXw&@t|C4U$=su)I;xz|z!2pk^jZ$!TKA8uwNR@uKQ>`M=e+ow z4h?EhcFr(8@30b^mS4V=Xv+jYKLfALjFf6~S+uuDU|Pkir*!_5!^f_Ev#;`hVA`S0rNevr?tZt^@;xo@c8kNK>r`P+xwbj%6?P*(4K3sdY*VcV=~ zVrQoFTawwDx?Ezmxb%3ZQafl>$q;i#5<5L#enPQi@CTXLr!ntc{1~KCv9`xqQG2HY zg(i)=?>Elo$pS0RisEUmPF=EMFbA9Mr`!M1JycoCrIvcHT-8}=ya#f`U_{ZiL%akg zh6Z;*8ERS)6OWez+_2()+5M;>;B7Qqm~>Es@ZuN_3+-$&53pOen}$F8AuXh3m(ljS zJKa0!Lg&Oyjo}4B4rtM3SnFlvyl$%vu(PE_j{%|K7do_;t}I%RUHFWHoIQQ?=Z^BxwkNAaFR!x$;f9jRsIcI6->Uzk!JVt zlP8UzD8+dSPz3z)!*4Da9R%Ti#;C~SqGoz~)8!t;lZfr^d^Jrf{5U;0{bm7h5YBgWyf7S9_F zm@EJp;1Uvr3vS2NofRx%63EOmn|;Lz^3Cr3QXl2X)eQ3(uBS_SKj8}X*d+rr+r@1&MAH*#AE>h$D2TR$^br?l&b$}QK)yiUDH`UJxw4QcgxjZ`v;>AKnGLNrRZn%dzd6> zGoMd;GKIZr8D*nfcM*Gk0xv&%dD6JGiAE;ky=fBQye}+_vlWlsZv&O~OM9ptObYMz zBnhbiBjB>tX9<~*)0{9qZ0`xOhtp&$Ax^DolerSF(XLj%=6Dp zhh<>0g&7Ce!Fz%(w0Giv4g#3>mfRcqMb-GD9Pw-35QTXTNRDxu-RmPF^lurb6NqnS4Zr-MI0K(MZ}Zmb%s>a%e7o^k2#O^R zt@WaY)9!zILg6B*clQ}Ft0{^%kFa|l#?uktDl0jl)gPfGFP}>YKlNXh_S;#s8g?xY zhdu5Trez6(glqmaYGsp`XLP0qClt6W`!ub@;!FbfRV?=NonKS3OkU3d4GRdZd4ooe zNWM3B_u!1$C$sM{=GlX><~7#X!t?HNvyftWHf@4qUK>w=&FPqAK}0k>e=V)nC$C&5 zbh6E9uTZ82n@zH>L}~u@jQU8U$kTBq?~_MdF0Q||co64p|N77UeOumoXQF7v0#d6zSKfc{g@x^Bj5+zfwm1kr@XhK<)%bin|%! z!rsIijxoz01igA?WDAYsSRYVkf=WUm0$Qrn-lfWSiE^LtyCeiqf3==0-n>n#FBlBaklLdNDrxg z_0=#mLBoMD??`)N`E%O}EveAOa(Zco^%C=0(+~{@?}z51IR+#%urw|Bd8|?CgGY$X zc1Hm`sRG^l1P*>LMVPm~AIE2gQ{m;~I40$$W2?5Z(Vr^cnt^brS#)sA3;dBGx6zlx zs=EL-)9@?-7;v(_HF-rEtjEw(8HtY|%%+D`($g;hFG z^l!!8y$4ICBlhWSWhLYI2Tz2rK4`@7U0;igW;-~TLRgAQezn0 zUsJItZi^uWrOUl`C^KtAtRB8n>E|`7jlax9&9B_3?LiDuQtUI$L@V{O5(-B|C&oPEWD8_eE&F_BHHo^*`A z>bE@qk2$yW^5$e>6O^Ob>(lzw+$riQIVIs*2G~(CL%7w(At@cVzH5y18S7^qDXED` zyD+Mq{jbY!nFECHg%kFLy3*@!RYgV=DN}(OUi=D7Gv%Mc;4IWQ z%UjzvlDE|w`TIaG&tv#EG9Cq^LVzJU{?~D+2K*PPl>lxIFE=_)P~$sC65M zBy#)~ijdPy0Dt*ayE`4 z!wx?Em&Mw(2V8GlbKX~RBRpCZ(?q5AqGMX#uIBjzg!mJPrqIyr9>XIEI=PR?b0O4# zt&V5ZWKiRidp*2Io#d4y0DG8!jk&1wwTdM!fIQdOwgBdC0X1kr4;X*wkKce9-j}-F zq8_^jOx{4a?R4zqrB?t>YH}$qw&%QQDf+!{2*RrJ z@eSIpaCG?VKQBl2xh@PURjnSp4^3qXSk62uTO*6Mb{=}(hc3MI>+4Yjz;nsEDLo_HTr0}B;8|<%bv60OIK@wr5hjC<*ET^x~HUe7f zAXqj)ZsKiKH8rS+UyZt+Mo*30^MV-x>RgfGDIY;LcWK{-t8Rwj;o(8MgFk-X6AQ9R z5wjb<@=HJwaIJmhiX;n#2rnNW!}k-Sv()KlM%f_cf$cDa7xV`?f`(8YRCk#dRsHo$ zxL65sTG42A`ori;^*P{njAp~l$Fc*8@F@$BquJ^w%4}>N7er~NRJP8rLy`uYJ6&sX zCca+m7*y9qQ@}%3-=X@&i8~~@0W(p~`Z!5OL+*kg%xqqyuqoTXO+jVViKi4?_{3Hp zOMh^?rE*4o?V27d8*llyg;Rig{r7)vaxz$3Tjv6rk32{GBcxh#Lgs@yX8}kwxKc+3 z4G@{zpwUo;x5nf0V-76d5T5P?!MxvgK0@-R5rg~KF@Vgz$1w?!|Ffy*huy+?snt@h zBZ*^0M|FP=6DJMH9v+NvkBs%3?%UC{b(9nm2+dnJ^5Q)=J%Iz_Ql2Bz;guzmv^hOU zIx|U=@mBCK{)2nWXm8V%-POs5=4vb@S)Ny`vI9gE$l$V)zbYi<;QwYFHk&rQWI4#N zgi>td11KlplXqF1&cZ@mXj#%;ZHqxjpZ$ZZSB{DfJA2v??!3B=?SD?R(+$j~&&Ov_0yWIgnCjz<8bkXSy!%4H8- zp>#z5pjS7&1n#rKYAMBTdJ;Is^HUjUY1euz$y0KYYTuK|xiw&5X8OY@y>J`@I@rl$ z9UpTR-|=Oc9WHj6mjYC6ZSHt9y zlM@O(bu*{;FqCqfVAf zr&l|@7S%6oaACdeB&_(#KWM_V_+zjBaW!S$Fjh4ZA>2rdYpdxCSV@Dglm(~_Dxae6 zsu4ue8?wP`w{j(>pb=!tes|Rz8H5Bcmzm)Smnt7k$E;tF&OO@P9#E8D+siybT=L&J zbM}uVGN&i}ZT>T|?5C6HUq4w(7-D@c!2ut}Syta2i`xuH!S?hq|-pv|fw?hPxrsOOQ_3%VoHZKI4MX$luZKdN=;P77g!!TI`=3 zq(4ss_{mhoAREi>w|Q~aIjM?mjve1z=~PCB4KF~(+@tqWq=vJ|!os4_G*mRL zfT`#*z+MLLk5VAa3asyrg0S1XSNt&jnKKB%2gV$K;r*F$Rg*elC~7odz+0M17$9OE6} zJ8<6|k2Z(v&Iv3To-^UeR}N^^r)GQ|A7_KGu}EgACmHobNQT<^_TXD7pmwY~(}Ia2 z@AmSoJI6*t#a->!zXmYM%g8+O&PT9&f8+-tdXQQnSco{%sDXU&a{2uSX~`pR*(3Gb z6bn6EK%*g_cQhc#G^e^Q*so)1P zvHh8TcY)9xKo1^hu`txA1)X zuJ!!Q-KwQZ|Ia)2CqLp)5Qj!1ga9@jD$*%}k_oW{+ddJAoh{lPX6rI0#?pjRW1;YH^1(W1x(THhd98ohzq|{g6cdK zSc>!B0zlgw8zfyqLaEHx=Lgo>5en6xS#r21Wy`O^muuyb=&(3%x2!9nj{}#rA47uH zS;RcvI;v9g91E2eZ>!qj*>TwhZz4^DyE8^U`#$evi{O?nUZu2ls>UKwbfh}BXV4vB zg+A|q8^@bpcsQ_L=~}W)R$&>vXBH~4vga$ z78b%E3EA~qF=EdDO_v`_#|;8V{G~3Wv1Ly27xa7G*VHnAHNHAnHg9;DmbmE5=)}*l zU#K*Pv@n>G>-KRhjS30TKu;$4SHYG=7smBmjnNYC=CvO z*{(bI_o?~sKSSfAVUO;zw!>O{^~DO4V@tT`@kX51z0GX*g$*}Dp9H?dN+&}n{_&D% zm&B&Yd%51je>ePdS&GvT=2)Ksgs=8*PI)&!>%%iOM37Y9W{E-hZt*!6KZjOGcfdD^?XFX zv86^@K{-ULzgX^oar3SO#>t(3_^lTUSb1md21GteDV`qr4g#k?g38F4&B0SA9{h}! zzY+2IE>!MSl*_!}FXO@62{Bc;HhOY$dyqq)Vm zzowUE>1{60I5JzV_Rp4Sr~<7Z=jG4c^3<4cva`48PB|!b6tR0H8;z>#Au;d`b3743+00)JIkoVXv~6L8?1u5!w2d+yT)s*1h@YOJIMazkK)G zVg@YGZc!=v(8IDPF_))^eWN?v{_>HCbWKri-_v_H4_%>R;+4{E+Der6KQBVhu1we<^idxkjcW4)5*U6~iN8ht5<{OC)0;e0}``xFU# z#@mc$+fj>LZG%^rZWWJ?UmGj#Zfj_Qh3x1~<<~uBUH`&x9ioL?DqUp0%&Jazjb&)) zB}zNKJ^Sjt&gGa2=aLou4mBTvCEBHD(Z0-WIbNDFcJ%0(cgl73AtlQG)-+?40eiUn zn;s3bJ9(*taULDs#%>K28=Z;POpi~H{PDOXpVs{$7nI$E*wm}Ph>w9a4ZWXyky^-b zeRi{rau*8%d4H9sS-*t|fdyf_zZi8f5O#!9O85GyYCY}-RCmBmu~)BOKS<4?#V#I7@ z1iC=@TESLf5^mVvR9)Tys!_^yg$8A?PcN$B4-ohXl#zj+7V;mi`&!!$PGV(t~UApEuV5Xo;!TOz0Pd2^RajX|IHm$VQH7}&n$0MyGt{J(giKMUp_TG z##NtGD;lxzVyZN)=>s59w32<g{!-*3=@b64oi1l*d zHTm)1{pk*sr2`*%`~^m+rm$Biqx*FoJ){kC7VK>#!YHgUlP>EoOW7tApL@OD z!|ZgjK7I+FK}eT%{?YLK#|s{qB}-cqtlxjn8$9(eM(Kg}o0F3h$>E9=6ksah(<^K? z)cbL0>E>H~P1yV`FsHbb5lvwcU7&SAB2pVedVk%}}+bfQY^r-9BvLegGGA7~R}M5-M%Yo8V~3#beykEu(|jj_4Yw+tw3 zF@h>2PN+n4Z}pfRxK6VAg{_IK#i)-4u@{~p;M_ku ze;LTpQsi%_h_T(fbcmTvo{m;u?PFi#DQsH8_{YNpf=+F##nT3atvSU)h3 z>V3o0-L*R}cWO?lSFKMvzmp(TR&br&F7e!%Bw;JA%hVX!L|vZvv)AN^^=l<|pN53$ zO`kDz(vxUWyhLh(hp^@G=#pqz?5jl?Ne916HqIR$`&IPd-HW!9mvnY&ZQO$QWs%k= zgs#aBjeEPi<~S+JJTs`D?pq z;9?SF?McA4?S_}(?c29N!X?6jY*IYv?QwQRhr>BB%$pmFBbyS7kUOH9e(Q{z@a+1* z5G{uj1;HrwOm(@nxjsuRJ~6mw82=Qo{L?q-uk+r-Up-rUQMec`u$){7n?%yW%4v!t zf1G(G!&~E9wB}!;-e0g-{>|NP)!D9i)xQ8JEX-KRem`|ELsMJB*{1GibF4$MMuRo2 z1Tn5~;p~0`%AoG2vM2O!Z%U<(tORjoN-9!_o%S-A&Rj4wa`%|()IIJr^K8aYy?7N) zw>N@J7?Z@HPDQY~JbFONRiD|vwG{8Pmrv9P^LA>8aH?x^`=$kzR{z$`=Z`*q~rT!nHJ9 z^A|y>-7CAHy>*}4ex*EMXkL_OFqw2Zne>M4?CAIm zo7?juDbyz8_vom%ye>ON>^suzcMDocL>Wk^dBbz}&hJug8TLLdc;U;s`)SvhT2lTW zE5f4>hRMHnv1P&RBw|lR^UV1>4;nMS95k|Qjxok=8gK9HXh`}G2hzP*pXFpI{slnA z#>^QfPw0r!-QPQ*-_Z7N$~maaz@_|WaR+*H57DB@&3@;$;sw-*;Xrbz7+0c~>K3DX z?c*9a*u&%EvRODeW8wqf6aY$#RNU8XF#O) zXz%~zoEl}sJ16(;Ro;3_RKTP-PbK{(_nf(!n^Jef{?Cc$x4>L%!sWqKy?6B3nQR|a zh2^)(Zm&%1h3ftLv_lgzci=QwDiuhpEgC|n2s^JUZgy6SH(+n>=$iLFc(gAzH`p+c zKFp4}+y)!xgk8HIS7$GySFU=z6sH$KKU{G1#Tm1;-5#guq4ZfscIK4oe?Im5@7(?J z?yX)Kpan|@x3`&Lagho`;Q32;Kop8c;5F=uEXxGmn zeKeN$jKo%%(|8c#{ebBmC)^v+z$;Yg0Qb-Szb?9n-IW|KsvaeSSD+>e!t5-q$;u zJJsSSC)C)YEby0;!{W%Rc3*6UT7^oAKaLT;-*P8@|1YYX)Y>oRw+Icskw#_PhiPuV zco{4{x7FBtS?g1S znx2<$^rElvHsB+_P!zi~hHyF&Zgj5Se3fvfM@8~zfNb|G4fDCzsJZ{^?f4>Ha?WmA zQrDJ{6Hoq1yc+PbDLy+}^VMAt3fu4ldA?%|X;2Ub_iScj3-&nt&`Xwy5NGo)I1GPC z6hv6+zQn$v*~XfiKvF^i!4upk=tPfJfUBq5(9;wi+rP~K;TSpg2$?1PuCr={$V@B; z#H1r_=sJfZ4!_*oT{)HwV_V>>VYf?7aw- z=}P>q(vL0UU1&AoV@vC8BoWjB{{B5bC<_19JsyqNMLntarQEXz8c7{%q&%}Evy0%y z?RV;{;B&S?isNnVB53b&My8WM*gNl;f6+_tL5x(6b$OgPCn z)$4d37P-%ESS-+X!gf9tiPwK#l@c_d3g@yB$sInXWP`a=o+7R5X9FJY%#h2M87^Np70!n4FK^ zIQM~kx3fALsU3oCI#V5^z;La{Xa^X68SEmzN}oh5K|w-dYkPb_tr5pg_cSgmpe4WQCy`yYx}X!YT-tuOA{s;Qa%8C`T3EfNXlm*Up~QI z{vxg@DI55+Wc=F@b6?R_;rrk3l%o$@Bbz?)*m_|Uo~K+zCs~GKpaME1CkXk3kR!7j{hQFLe5U7{lR>C<<(GB|snQZ#fJIIp;cJ(HA6gH!>s zt5W1l`!UmE-GT?+<@7K#_))1b<0N0k7NTO0XkV4UX%)NdoTn=_&t^Yl@;ay8fbIM3 zH|hy0J=AI>^bi}^^f{`8LC%k=jOlHOs+N)ddJ=iAZ`}8rk$R>4vpLp}Y0@>iY?P00%F=fdPX?o zG&$XI)d}E@&lcc>klru(CT0BnFOj`L>krHcqFVh}m%vmuwjxPMilawe6KS8!oIZc9 zXprHhd6nQGHp;7bSGs4Zoq=w(@mw)##DFWo6`-nb1Ee%^J5ESc#>U>tKxZZUfd9MG zO-ZX~Iy%`G?KJb0g2imRaFI(MNnM}`C*f|(&gRTQ-ipFQsN|8Z0k!#)l{-G*zGaNW z?Ap)np<{#{`)<$_GF6L}d{>FQ%iWTZLVQEonq@AlkX1WY%JyC2jY#nqC`ap)NLxwv zDom@-EG=>KH$~bj+{B(uX;!H{KfGS7d2|Bf*EWVSk=gJEPj`c{`Y|Pw>L_G2LX1}z1 zgB0GL5(XjDF&b(Y3YDWWE(&lxKD&xlxJP^F>glr@7Md!KnQODXT=If&mM_hS0vnLV zkLHG^tllS~Be*gAA>R&LCJTQ?3PP7gbPT=MIqe9&_g)KKo|rLws&{@ZW9`oMCpo7w zr~EdAT1VRtgl6h%IF_IVrfECQzj@0iW#@mz6?%di-^$7r?S5zq#h(ia;`Ib~?fdtp z9B-TM)e*->B0FAOR+4>{{Ien$e=rqYnYh7$o;J|%_kz#>D(Ub71ukjfOmYT>kdI(wO<8R$U#X0L!(m1!oH?2UptEl8rBc-E@4 zR)+8s>nHKamMlBnkRL7#u5gdaTe5eyc=z`}8^Tqa?UWd#8iw%^ogmm1yUa$t=W^ch=)$I=?rABp z+?eJ>rw!=R zxN?wf0&IXE^b0!^6gQhvD!*NpRh0IXHsOX4Bv3ST>(qdhKjSM1#O}L}=W&(oI(}qy zVZ3{|7}O&ffn6Fat**!B^7jUXR>fYGnda)*Ne9#7E8bwgo?fKyC?q*b`mLgO9q4-Y z%IptfPYn%P?VX3w#P2}LA4`I(@bPmpLP2(OI6xoz_VQxA+a{RXZ~a32oU*v*R%QH~ zraIG0$91|6k&(TF6CFlM|Kmj0jX`wr$P~Z)W}s){*RNmKV*5@ho{Wo*w&bUo0I3NZ zUy)=X2CB9}?%EHJ_8WOE`omZ>&~L_8#+^!^h@ne51lH=+rA0WnS%FEUVZ}|8IuD~w z@?@%PpQ~HZT)2=g=Ot@x&!jtX-#AB>OH~Ll-1HQ`(#w9XVzNHdtBdLv9k+}8fMh?@ zT;ST@wWwEzqGzV2fXjX<^U?q{6!vGo!ht`=-Syyex3i-%RDu1B+4L|-0d<9hU3|H3 zVPpP=+gi=**R_gdoBNP^njSARxY@^bNaS?3lQ_f{nTf(#4VB}|UDsLr)V-zMk@7yh zUExh^Z(Z-r{OAu-4`U01#11yllpoLK{PVp2Ru0=GhxAHFND#5=me|8g7$~V}^*zA> zO@?ELMa}iri|bhjb)05mh9Ci`-F{Iwp#Es@WEN4AkU*hwvI#u+tt5W4UzI8Q{(FT+ z2O;TPMX(g^=FRi*z2jXIL}NZu9q0t9RGw{&6Chvi((yF#xBo^pTyaqsfT8c6IyMRWwaQU_sgv&~o4p`3zq z#&0GXd^KZJY$G@Vgl5Pph3T<`NXtG8I~kkdv0$cPcjlc5D6*+ie>|h46X80LEC(s1 z2h+tNJ$9FZ6>4$v+zfX(iEG>vv5<|bhmur0V-q6kNW7us=s&jl&F`k*)psz+K`L66 zWNC-P#mnQ@Gz$H;drcifA-kY!1EzN~V#<8(s%*M(bx5%vS=wrJ)&AcGbiMWBLjzm` ziVG#q)$v?Jl+(=g2#^=pxOFLZ-lsMPubaM58b%v@*3lhhYv2zz5Vn z-N#8V$9+j_|3PA1(i^^TKU!0OI4JCIaFG}AclPK=a;V6V@z?+yI)org>Dtx zdreUCqdcfkLr0^L;}=7&0%m_I^!zK2(B@ttg$TU?r>*nPJJz}~X z8K(;J-7>Tr9*r4vBfKmv;3!T}B}?UkYnETE)VrUPaOSMA74`-kxC|Afr;mKT1h}TN zF7$#>Bdekp?Nf@A#NbaSZ)dd5q5{gzHwS>q+IWn6tIbDnUL$A4#U@Y`1L`S={tAN) z)B97zH(E&xr9gL=J?}zLcekjyn7g*{e|6hoWo05TeN#5;$u+-V({;{$YbrezD3u;k zCk|mPTjO3JUJbpyD@6}`!Hb||hXD#Pc@A8CQOlE8`@Sjv@6&ke6Ihj-z%r{|{rDTX zoHIC;oaXzldOqurh6^?9)~AmjSFRKh^ztl}dWsyA%cJ~e=Hzuw>W>0rcRTIH1&1Al zC8JNf3N~#ybGHoqp98T`>*(#NLk$>~%g>Shc;>qU#RW}}rhM<5EEiPIa(-Y~%`CBz z__EZu0OxbHIxFU^OLychIrG|k=}`3DacZ?0A^~TXH5|p|FmQ34;Z>e*h@PFxq1;hi zu-3j=Ef!xkWxfh%q=z#njg)_-+;2l|QoDKN_Xip{?9sMDP8h?#*PMngk*?IDejs9A zGw=imt#n_Mz=2zQg1WL1W3iLSv8`h9h{!Ejlahpj+T{l4N$4j9{V- zwd*CtUVmJtVNk-3sS~&1%x^s^p?c6Wv755IKjl3X3AYMg!&<9~TVt(T&Q3gV-Hrco ze>Dl>g>ZyKF3FsF_Umf;gS`jT$B(nYfb-yP{Oi{!EZhIbx^E`*5i0=}F$!>$!qdv` zDL`qx5=2GK_7|%Sdd%>~lWoA%ZPn_|(Abz6(Ml*foLYZx4Ob}#6E$@^|KCJltT)3h zl{W0*sTy^Fu)k7wspW}rhubQXyF=|Gq$$vSoL^*UXCNRE{eqpWxSK7lMD>?XA^z4| z+&9n*={W#ebwWXVI)Hq+9I%G?(Y7KV@)fcDI$aVUjh_vl)LpJXucJ@kO&uwb?gLVE zc%F@d3CBJ$5vH@(@4^l%>DCSOyThE9i6)Eb0>>ro9$ernN_&EfIL?t&10O0~kl#z|wg|f#g)P}L$kVs#x&O`W)d3&7X9&nbtDS?ja@*#7d-F19q|5kdSuj(S8 zpKYO`$!yEyJ66ZiZ-R_wR1iC4k|jjX1-e-?E}ZC*lMHzxtFe%&OEQK1MwA7>kDbA4 z_L`+4)4Q2ZxkNa~aiagw^87to?jP&c8DG)};?~wy=JTna;x`TVyep)vYQDa{L|J$s zRQk;O*8zYqu2EJANZ0l?WjRgt|ozWm%p%8e#u{om7X!Id+3t@r@~S9WQR z5?D%yR)kQ}GQD zL;Z09MzkzE`Qkep@eoudL(dDx@Rph)EMkD3G_>BLhxqo8$5tlXQBXDYE}ZC=1DKtD zX?QMksw9^`&*9?L(Br!q#EgEAVNl+ovjQHaD&0R7xdr*LUGZv%IdJp9T4@)50YS@} z%Q$wKX(B<(7#aiiL$}JcQuXRF(1sm`qb($Q?-bD&StHqmkbrtS zh@PsDf!ss3ZKlF{y`hnyB@b$?pZ$WlzJt;;EB>1yPnwL<2eDI_Z?C^4`o^zl(R{xq zQe3IH{jB+m=qFy7Jf10XdIJlkPtpxejY7zb}7`m#ye3%;UD#pYBxv)z#}j@#;7Qe^U`=^inXDpXOvnD{_b< zwa3&WIE&78bLB?{sjVxP->m5x6e!oI-GX5(7~^0qP&)+Bfp9%&i!x%Sp#`{GKxW5W zd4H3Z3n%$z?tMl84Do*#mUL3QrvzFN(V`T|9G6La8ddu+Sg@UH3VU(A`Q;{X+VK87 zf2lCm7|El=m5Xm8>MA|kNiWkhU*Hkj#1@N=4WSX=u2IdB+ zu1ntH+g3$ucjFb`NH}F>Eg%;bBf97RZ0zOhZU6U{rod`#wuc3>-L*Dz-+T@l52E32 zB%u^}L(Ktcd#w_ftIiW&Zk87VI8%_DTft*}9@D`!Eo48aKj=Pr2ApxX9m+)!Nu8e& z@2174^HZf3?}C!4vdGzSM~BY$%Nv~WjZWJFHh>cfBvI<>{{8e$S8l+~^x6C=x&mxK z#RYHrkr`X0`j44ccr^aERRwD}!lL0E+VZ%wMRLJ%@3gx~`|(&qvMPDh-NX#E-<8$~ z0p*|}F>y4^+4+)T9;YI~LQ}*LxA+Es|Nic$4re*#h6-&j>*mK{i4w4z?>yVvVBG0R ziuF~on2(*hl`&7%emzwc`Lep<4(bpyQ(Gx6C%7kw@ zBfg)FI*pijr4*%GG>tLpKIQh)-dwn&y5?-Sk}-TkABWzgvB&xSIw>eAyBbI2?@{0` z>ueC3KQ~nhV2R1#<=-rPIG*>Jz!jJ}usv@<41j7C+AJ_g{Z$*o&5S$rzf1U!)ipH@ zh;#4PG=;m;e%z0{<_AifUWJ~ckzHdGMma8HvsP&WnHzJwJL=y z;A8RY&5f3qQZ>puuJ6F)fq?IwGUj9d)Lt{4|mkZlVh5*B*rLw1%zvz75og_ zw=n;62i&Kj6qEze1QOdN85Zw(KPUf)9Xe^+yK`R6v7pX`8>$mr$dIt%xTb6&V9hT> z%hUg4AB9G)tuPJ^g+`P=D<1mx&BSASEXp+^Y&w&e{e-i=wcxxHviDk#kMl+)Uun$v zxtL$v<$G0vN@LI6wHd=+dMgN1{YK2U_>F0}pswu+Phq(tlZGVE)r|crYbPDV1*fXa zQ}ph^Z9yQ<9}h=;|9S^muA%yc#(&INj_AX4PO1f|Kp6;d@bU;>Y+lX4({!N6Wwq=_ zL+h6fvpg?N)Uf_}^ux6sInAMk(>*%7kxTq*%11-=1S^{O5Bkm2`B}RMdF>%?L59!; zR?6cVk;YYmEhqJ#`TY2o;-)d;traaw+_ZkE7Ym7xl222RO9xiYzsbBI?p*o^((wJBG{89u6P_d08uhU-EdFRw>N;lzi(S~k^FXgrfa5Xf z=c!s;dKE<;icpf3AM@^((GJ z#>*nI=p(m~HX$kGrrhXTF>J9n^gE(yF# zlTB4sj?OoAJxV}|Z{lI4L!;lPu%`9@SbOt8D%Z7t{4}W0s1QknG7HJLC}mitGS3tu zL#C``E|JVEvkYY(m&}$74Q5KFWgcS5Ov;q;cRlug-*evc?#}7F-{1ag@2$P9bwAI2 z-`DlIKEr~ng|i0o;GaJI&=DFiv&Q-0?724FEE(>FH2RZw_PVy7^6^UgoM5|k31quav2sh|Dg9VnImsdkizVSGI zA|cEy!AaWYlCubMmX=8~cbM|yeZH1k zg4xwtNG5NoH(B8^1&^!a-9UA^)0mr9_j>t*$@>*j_GY6*1K!HZ_+r3|=4nsVpI>r) zeRzrzdp}qTwN&9DF~7>^@ZnbP%J!8;k^23butoWr9(h_i-*6GWz5Znf9vJkAs+p&J z;rm)b9ItY*v6>9tn0S^w&3R#w00Y0w5IF!9?PxQzdk$_j^#mk)CKP8bkDANbo`9W;k^bG=AtCR;+NviBd0gL=d=IKRZ$+ zGKrn{4Fnaxxj;i2yM8e9R5O>S?&P}5g<)g339Zlm03NFTD%MX0N;$FRo}~KAF}qDo z1sNk{fn_rw1`b~^omZ#X3C0qKkfK@>zucYdJ{PGAZAL72V~P1K)d*dVk2%axNfaOW zL_Wdm+;NYx+L>pDtC!l{fR>-J21JWV1Ep6YKW4p<={xrf5FxbF3B7}PG&vwXzNok-u3PFWeF8^>nHu5LTj+~e zCU1unk(sALKMoul;e%fxFoQYmX#XVz0DiNMCj&#G#}jl`wEHiAR(t#P!HaBHOlzl$ zn)_USnNP%k$R|0ETBpt7vYigv%E%=woa=C?e#%bpK>3Q%vnlRc zTc6#~JsYCN7`tu$K9Pt_e}Rd`xZ*va^D#X(4F?HRN0xLoA#T{C^O0RVY}cau4Y@ER zpR&v6aem0vG2CWDo>rYTP!7qBP{ktwj)7Id;l z%D67pQh)C;4yL5gMIc;0JJkY}R5|DCKPCb;=HJ}h+}xZ^{=J6?JUNwWcel!6Uzd0! z@)b6JkEOgI2sI@4Kl|*raAKYu=v7)^iE}q^%Wiu)&jazy2hvV*GGyETBf(DHc5pj* zi#`23$>)d>YWK@}>*Q3usw#eEf|{q&wg77hhYlnl-#ZgVaT-X zTuOD4^pBCR>`H?YTkeXauC8rk5W2l`9m`;mF(C3iY94_2xAw`8r719Ca;*oODb3VQ z?QCtg8!I1EysC!BbljuCJEb5bUWXU?x^+$CHaz_jioiTkfrnZIgL1-5b1Kf``kW<* z0y=&FCL;YX{7?8@HxZ;Z0LZRWFsm;N4%v5mA@F;AxB7+_W_Y@f1EKLEl#L+(y1=$P zOjq$pd;3tNJ|zueYZ3^N&sATxn%Kj;f?bZ zh@)<=S_Z?tUo&Li17>%(=J^Tdu@06Jqmcv5ru^3ZCAq+lm3elU<~&d3687*p@qsa3 zhsj%j^ZL7)?DF^j9T8>%Ql8tZ++5-ZsP}#anHn1uVfo#W)`*~^Qn&N%D(UJoFr3Z_ zV}fzp&UV`(6mZQ@tuGlA%!s|NE-iI@wdFBevCD(ew#Bq$v6=T@kgJ)JVi&s>K7A$c z#eRCS@Plcnm!7Ox!09{HnaPLvf6^}w&SW=c_0VT#U++Gm9=e3wv;9{AUc~HlZeVC6 zEk3_u$lXJFxLA2?(K|Rh8p598J|;c9A%-WSV~QOk)#5aiy*Z%_GG)X|R%L;}hVTfQ zwRs{z`Gs_2gCf|=mJdQ!y8gLYth^?7?|(n+rROX5-)x_6+I=^?EKJ$%4u`PDGz3J& zMnhm~M>oSfWJ%^*KHI$W{#=pF9lkv&dfk_X&n*c9WH0=t_-Fjn@r|^>i>i)FfO1TB zfL6~_z}9uC&iTQ#IT^oFg)we9w@gf3+UyO4%1aG)pZbzak0fI(LNrt zXcfP=JX>soEB+T;m>@dgBtcHQjuS@NGhfI3d8$~EIE9rp&dMY0`l1m3ymNp1I3Q#HrH|?f{4ieoF`3vJw#|v6S`7jD7<REWY#(u;Ng`~9{TL5* z3clyZupbBD>GuJckZqf<%8$oH+@1ZJKU&x-vsJfQJ$nwIpZP2CV1b9W zTrnN{z(@89kQ`AwosYi>z{phboGUc(?Z9guGzGyG79-xgZ4M;maD?(EO2>@hJ@I%- z5GnwMbwJ^&pv77#6=BF!8WtV|^kA7oUIBFv_>aBK-SQjs>)aME4WZ{ohD&}M2yFcV zcs|uM8)qWS8EZ6FGr19jM+LMPE0fTw4?=whn zyEZKKG0?`Q=-1DG-}mp|O~5c$B~yi^bU+7Y<_Ke5ue?V4nho{8*HL-<`uZyWEYvBs zXs>X3-~_3+a{^FXozk;2>|seC z@TCV|$-QoC<^pLx8s#mOg z2X|8Z07&S5&Je3DB;bS(U;$)AK-P3Nff4C^I`rRw^nH5+a1aXfBh|Ehs|*q4HELFQ z_A`@)mVwmW)vPBKs~-YVpm5|*3J))3*uj?zz~Hg&ini#wg~t$~Ge`FxH`#H7|M|1C zq+tim#4;v=d`Ums=z>fSp;lfzEws|h`%7HKQg~$Z=MN;=b2U(Az$=d5EQmNU(XSiQ zzvnmDD~R0yrS6V|%fy~@QZd_$JMrT=6He?Z*JX4j^-r<2yJ+4}wm5`(>>zt{veM5x z=<%uRGT%5Ir!&p2=l>1;c>i8DhihdM64hp=QMU;vqmC4YZC1G_+>DNoF&i4J6+AQK zIx&I0lCBhQF=uzB>XaTew_XMXyK|XW=NYbwv}(w((Rs6yj3C^cAdD*4(O@lGPl)3z zqZdf%Sj|2t+pk%k;jrBO&Lg7w=z6x;CE}jk>Z80Rf%SU*#}2z+Wnpv0oO}9!@9dbl zZfkytKI(J-a?jS&z`32aeG?}1)MOJTLxinrT57M{Ab%0Lb#IAnU@O)fB-*|c=v^E7 zHkfWBw?%U^`*^J5BLhVR?*Zq9w=NiZ`^H?4Gv5XAmT&db5X6-BEjXrK=Kb3xLDO-RJwF1m^^CK*ndWh`ZR5ZAXUZ*)$4g*C`@4J6nzfp z*LwV)2a^|D>YN89lgToV>#|i~t-FKp7kWqWbVBZLo%u-_OaD+e2<#4ZTmVDEioP6) zgg*xO!H~F*&19(cw$avjp56Y>BFpLrd$XCh1d9yl7!jD;(kRO>lV1T}O!UICCq>d4%GGw3t4?PP#CqItr^$Es za?tHl90y`+W#qE`#Zt-G`$CnkZdeC~W5?tz>@9zy$64%kQ)0G7MtJ&+JEc_%Dn-|< z=S~!zmmMegJVO!b5AN?xGWN3wZu$~YoSrQ+bizsC+MB`6hXu2S)o!XsBxSGc@_#G- z;QtorGkq>~3}_1y0*NYx}yWCoAIf3vL4UlH9kC_+HT%2))(5#M2& zsqGT)3rh=}|7NJ7c5Lq)PIO<-6kG08owpmY1WR}zn|>)bwhJ8g^vqs>-!TB#*Yl6W zz3(v1C9;jH_KY<7)?N-;J=avV7w8|AI_M9mKdTg0kkU+fLrByxth1r#k&>nk?Oz{E zo66?xH%_rIQO9#lUU4$%VTx_8rw35WAVVkXTe79cY|VJ3#e)!rdeC?m1;!mYOTOFmLy++6umsoa{VMWVuHm zhgt=u@TDV=^!a87|5kSPiDx!HyvfD8e;#$qcKH_aivNk~VLyO?y8o)ZRMtHx4KRvJ z;6)w|wF2OCei*O9Z>b~B{H_~wrO0%}O;O)-2HZGuDxvPz9KQap3oixT<%`BI5Rn*u zk@(c3B}(dCRaU|D$*{zbi|OK-Y&n)Yl^V1PHZOy(sp;?ZI!_g(_;>o|GUa|P!FKH! zaP15DxoS7IK_2wtjSWs=P(ZzOMiE?RhiF-vcZ&6BKiy-+TZ4@_8CQ+6ja)5+S%~5&9V#4F- zv-NbX z<9tYytgNhz0O?K>@s315+QU7ze>f!LV7xnUacPlS*Q5s&XM4{JnCYexCG5yC4LUa1Ymg${a zJ`4IZH5_457pmoz)spg?_P43Xs9!Z8=yn)vua#+;dYjiVR3cuy+C ztLkjlM*g6g(>X;W?+QQTTgHNc5l<`+l;??>%pIrMBHTMm{pnr)*p=W8w=!u^HWJ6K zpVR#p%csS9n4G*rsueOTn-MG+2%xW5C_Mh{3mF^Xu{C2zar!hXgx7|^GRT5O+<1OH zeTQd=c8cr?AQ6V}-EESMU{_xfvLTz z#JJ|oi93tKuvM0CvX+UI!UHqn7uK^y30FR`8W2{Wr+t)&Y}lVOBh?mzqLT)ziOQ?T z%6H1Q)_zp(%af?6Yxh*TfmAj8#$idg?V$^sHshtp5PA zH-3R1HMuNQUW7qPH|Ex%dehsQzCW!o+h-kv=;h5zg|nS8gGh7wdyTU!Rg^S z$7xHmU(Zbmomw%gABM2`ZBi1=2`c1zxFC24_a?2$?fSV}Y)elVj}fEoN%J@)zAR{* zFgmmQ0l;1k#zpXku`oNriyZ}&Z5kkJ26LB>a@QjKY1MQkZ5LmOtN->>lkM-8^d{54 zSZJzwZT|4FV=%h-G22@x(-g&%_not5z);f^R(zAzXy~)8NPl|salKPRruW3Y#t7txk_d+N&$q{kb8btaojKa7)aUXWXlQ8I zkRikh#hbsrK^Bj>uTn<_ZdE%5q;JKX+o;@1_`D@(-?B3zjoWZR{jq=Xq{y}HY5C5R zr11I6US%z_<4 zDaj}9s#wZDWXLIT(Vfjq+)cIAZlLz*4)($D>}QGeN`Cnr*TruavD>)bA1&40O|@RF z%wvg@y#15;H{8WnidP6TvoDyaKO0x`9$j*+Kkc(PK`!COI<=#9du7cbchhx=1JzAe z&r!D$Wv}DX(nU{w%*`rjA1Xj(XU-b`ZET}&--t)#LO;pw{yKW3d;{!q>E)?csG=?$W#qh(@nLUiEf4!Wtw**w8P5ZYvBf z8|l81`!1?o5NYiMzL~k7s>Z-10kPDb3!W*m=#q=%F|?jTthd(b$#9>La;yhrC-uKx zc=FzIO!j3cvsk(z84CU5>&Ls>n?GZ}LC&Kxq7l}^+e>xmS8{PHZNns01blzAkvG(< zXCH~?G336JwEFTr^O3N~4|8F$tdPaZOng(SOuAsmT7E4vZ~3#7xoFOM!#QrB!27Pl zBE?P>DLFJrnx97rYB z2u}w&sZ|?`ZNI!%0VS9DV0p0vL=(dLe!*ZM!9XKN>yExJGD3)l!!c)fNs%JO2WqlI zj3tj#+LJq`oAY}WS+)j$+FNOyj6f~=mzcjX%Xm9YA*os_)zrBBfrWlY*3$KuXnsX- zQ?{>iml~gB~0Z^t&!Fi9w%y=RhF zih?QY^h9_ky{w71LpI82=H6bg8prkRJI{545Ozj7c338mp3Txnc6#L=`uSyN1y&X` zc^^&d-6wSMcl^p_(g+^c~E)f7AoW2{(G< zKX^0oj~0_=d-AWtE6>3qAF0+@td^;wl5~TVO{XZM2`8uuS3;EU$%{IgOyG3+S9Qp9 zE(i{RZ&-0 zaC1zcQ{|=fw9-cpi)vz{9)^85Zt{U4LRwbN6ps-g63nqL?X%mAm1WTVqB23BtWwPL zCPNp4y|EQ#g+IxuLv+X9E8;Pu#&xU8-~d8(m#kn+J{mjc;;&}WHNm%xPOjgZErTxf zv4gIYcGzc3-+pF=m4B;C=m3g4y-qjp_#fqH6$m)(&Y#kAgBIbB>+2bCgOT0m{)Z(I z?e0m6@a`G`_xyO5PVCVw(0QntVj6R+xS8KS^YPKuWd8Nq2#1|F6W7jV!>y$rP_!O2 z26W(%z}LynvP;5v?i&*pn}_X;oD9+ZD18bhcK?|2zH4DR!JqD>Ok^-@e6m6ttjFc+ zOV+lj+PyNhpQ+(n-&&~2+BRM1aK6e8l)4h#5R}L=njeGVRne5s-2cLl{*1K6=(GS{ zpdG9Gkv_y?8UM0==|Jp(7Zn*{7b-2*vsp?nlo1Jn;NxzMilV?4l@(s7GgzCtK12D3 zSy_!E+{;REOa8rSfeXS4DFu)13Ze}DC|nV}9UhJ<{2`b&h&mF&yILgK+jSY=NIEY| zz4EwsF9V~Fz{P)kxA2%L$+jmcQGiEs%s7FfZeUi<`mA+J7ernMr_9B%Bm3*Tz&JtJ z{uKB-niyMVt2u9~H{`OgFy?r+G3lugd}PJ;Ymd1YLTas}68=JIxpT&V1xrJfs3A5= zEP2CjAhT2v2rHJsEseP+<Ir{S(;J+Uj{v!!-lOp4-j$@|0@*U*8s%I8jF>#3>Kl{Mm~nt-L!yA*Rfx zniimzt)WQov1w=Cp5puGl{14Ww(3=hq; z1!I}gl)HGc8T{gxQ2gqLP-3`fW-hBd$;tefz~wlm{NU_?*ovYT`NNZY@EBdMlpI0l ztJY@?<&<*BB8&7@FNdm=Xm6@y*d1**JQghAtbpU7S>RIXRjcj%=4a|TyMAKb?PM^X zs4=(a+QVxujcfJig++=J3|0?;BDcXJ7z?opDQU%$&jJ(IqEm9N!#}l+>3DMDTui1< zmb=Z*2>qX5E-#eJN?DkWzCA}3m^tf1it>eFx7Bzg=1U~D2kamJqBHvAM8Z;IG^RSk zb>(FxPh)jId<61f+JLjGl%b-d6a{-wTW6Q!L?f(^17xSJix#rbz(D(i0a^)ycG`@M zXwY!JDgnWD^>ShTqvZOeAZ&?aNz8yqNi>~zbh*ut0Ixt&0b40VCdy}~WHsi7Mk4Aq zF-5CD9|qCfs@{E>MRkj(4+hETWcPwE(IsQ66MYiybXeUtvUM)x%TfNkxa5wRA5jdu zrHO3G)prp>C&+MBKL1soLa&>TTvQT|ky79h9PD6_1_g%~5ez@S?u8ose6n<4dyxj{ zt)OjzWK*Elx9ZBA_(eII?`_x~+!Pb6DTmxqr)yo~$_3tIbpwCW7y%nyUzM3RE9>6$ z^!uaOe8(UxbcYih03b!>fRXTXcI>60=~q*EM;!FnORM}q7M7vsrA!9O?AWU)cK*5ttf9ii$*VtE{??NuaxUH4zlcb% zuUpM*hbrhCTE^beM13sUC^A|^6Yd}@xzw<8f+tm@9;zxsNPuTAkRaGgBl*JM1d80B zf}@>ahS)+ghOMsI(8ZziV~gC847zBQvG?P|1M=1a^!($5dm#3RFM1iD@W5eOwXQ6H z9&og}H$UK{^2`y%V=}f24!Eztof(~C9o$Lp)Ar%COq~SPH2#q_O!%;Ky*yX`eswXG z9>O6L9I%@a`D$93elsx=uJ8u4L^o5EBpQ3v{_u znKbcCBa8N=PwZ)RJ4;=oy;XY{ z(oEyhJ#!Wx90P*N(DVZuLrEaMHjGmH!N)idON=!kEikJ#$)ei5?iW#bDO>$wi-@6$ z((67)3otVp=$orIPs{@0umBP*&D)Z^hfGe-xG{75en~)b)s{;W7nfE_#i5}_WmP&O9W7k$iuu) z-N6llYVQJE;$B$igfv%k#oa0LQQYO*elHFi#{nHP0kgjmI;6kDcd(ax5IFQ4oO=Je z&EA8&$q)`}2|jX3QmOiH9@|*v;kV$jHwK^8HjmXOeI1ef-N~|Z%GN4!n8%-n;$K^ z%a7vQ&c79=rg*Icn^NbEAJ=Heg3>%I2hP0^UFxEa2SG~NHUZ6(yu}GA3op#b3Z#Q? zfVNA71Ft?#IDWRj*v6+@7$eKzj_z^ac67YitCys)>VhG&q#EoRgoF%auzce-2l zu+M;ql!^w)j6{|ya(BQH(0k4Lgx($sRWMZIfbvW&^wdn8ti04UZtqFI{pt;=i07V3 zH6Vw1!y@A90+|( zWVeBf;`{gSO-9~6LEs!Do^wdpd42$2UglotXp;7qgCOL{h7ggpGdwX+4G1X8Le5F$ z1LCBDyh30u6V1j~c|NdGjJou7{;Hyuw``XEjF$?w!1-~TaY0PE1s!B9DtO#CS&7~( zyHILjG`)t!KJV;8y1zSK?l)_A)T0}7m1EQV5|D0xf-?k)LPI##brx0uZ$y zxNg`4hHp~+{_V+-We3{BXp7VATho(~S z3kZNp8*^srQTatPntD>=o+rDCW@xnYBP-qrU4Di%030MILmM#yjRHR-%mVMLvMUbK z1ABYz;Jr=0{ctflDhkx9jm4!%#WNrAoDF!^caq#4nx^*p<*19FjV;UK^b*y92;y>@ zw18Yx4r;i@w#=IrtxUW19yznNOEhUgW`A{7zbroL<6<)yLza1$qwOz)lXJ|k!y4o)<(w{O41_1>BL z#FCiAkS2N6JKAq+jf{r9WNM1BKrNfk?hc7_fr zprfLt`HA}d-RHkH^7NS6q1q%)DvRW#BqH3{1cMzS)ZPUqjTBB(sZqqt^aOjJOU5U9 zm&N#~=TkOeLL%#2?&yC__D=kzJDycaxS5wcRQbwUZ?`e{l`le^R^5QEFYI*#yZ-DM zWDNBeQwzADKBZb@Sx7!2Q55x7Ba2a2)vS-YJWi-A9AC>G449)}k4WgFO*_x#e&=dx zd)GU5U!!xMl<~{)+R&%kDJm%r3)U;>$KR5pV9JKwHzOmvH$^2m%qk}#wgmAGn%9jJf-rf9kKNxb@nGI^-F%u>c zx+jq{b>?`4Y3tmu`9Ew#&a(2iA1%Q5{sNeh#1f_{wE5*TW12hGhw^4hKRrO_fm?2P zTe3`ZmD|SlH(G{QBe4KC{Z{pUfT|Ml+UL7KWy<&ZtTJW-p=)hY%%s&XW3;UUbB(Q9!-%Pa%SWOYmz__eLK(o~qe?D!(bzwe zNY)D|gfX3|r&W<${&AC&45heZp{lwc)Dvnkp`T;#02&jq4Z@c&*diMz-*d?I&6{IA zi3H!fErtw*b$gsv1~heZG@t*W9T^5K9_?}c>a7)Bgyysaxg+fASq_6b3F*Z*ZhCLw zjP)myY_AWzdwQ@LG4GSuUSk?>dGYx7aj*T)Bf;aVQ&0>ht_hDuN%f;^&-3hWCn2Y5 zfd8rbVJQptpvsuWi<=>-?== z_}9>*$aW=NjLyxVk2c9h)4UuQNYi30t@@rrccp~UE7sofdCeOlp({i;T~_kO-LYs2 z)9I|H+#`W=M&%7ri$g|kbk&z$8BG&xlYQ8L`k&HXbYHm-YBTS%0S~Aem4VE*7rItX z=hNMv>FX9R4$~!}1gk|mk}IAI6tfUXJrpdsdccj zFE#2DKT`Vx`e6bF;;5&~=m8<>MqC-`eHec`Y~i2>Re2>z!lql2<8q``y^Q7(ef>up z#zLd5BIqqZqn>Q}_@YwWE#JrivH#$Yu z4y+rQSy^R+m(y&0Oa>ZzsBbftgQYK4(8eHBr#S1fB|)Jc$jL(5B~~#NJKGxwk0wK5 zn&Ta%f6rtWkR7$lY+t37Z__U>F7hGizs#BqWti#CP%s)C5SjLXru;8YG>MCIQQ^f; zS+?S}?X9G*M~LPNMO8VLF!Q((!n!tZGtDU6@qd96ACI1j}c0!SoQ|s0R#aI)VNh3@t!J$Xj z!7hg7ocr1C}ar=s~Oy>ZHeD-4E1XNIHX8bfA~vuTVi zI*$69)k$^&dPYZ1-mcS8E3b(JFVfUvhJge>@~E1IEa&(X7JUqz%;l}(8kjfWCib!k z-c|QNUvj6Ciw3tDS&t*x854Ad>v0CF?{A*d`R^X+Kb&f?RDeM$yRI`<$4x&(@xzA? zxg5iE)ujznmhUKcu0udAp>KU`^V^4Gv6sn}Pmi&wq&$c7OA7o~bD7-F{Nln`W7$Gb zXda*XQ(cC*u9{9wy~bQh7L0CqT1wUcT(-=mAVAT-E-Y43)|3l16cEBL z+)x8lt@}v#g3IKWNV&FsM$?{0osX&*wvA18raRD02HG(v)ju?*1L=*SoJSCvcRilZ z`Kl|*YDxA=(8otMuedqsr4cRTCq5GG*X%q5Vag?>sLL zRcF3GaSGkYT$)x!)EIL)o?rvL&g!Q&!%NS+Tvp`e4(|4YH3*aAKbaVLA;DJ=W5o&U ziks!!t;Wl4t%rrH zie8W<$?Btu1z<9CM3v^U`>1g@B3V;ny_--7cy@fREQxYT!udu4b35v-@#la|+p}|X zs@hwcL&)s31tt1Y5L%~4xqX<|Ss~j)(b~9 z86OALDRpasx-euUuuaxTO)`;%0h|k#sS7 zn)R5P)2*pv2mi4``Hvuf3U0$H&dkhAh)RAaKmFdDe0RZsV#}p-7)@Y}h2w#kbw6Rohgtu5Md4->Q%ct^bL0l2n@Hc8mXRZ-B{i0-($U2f-G8+-ET zqeBHD(5w2H^n3u6%(1-?NQ*@2lLbW7xo4|Vn_zN(D=cJBW=F{)`>dokh+;2G(WBM}gVdiEw@l&Qdn#fF63 zKD*fetAS+1k16}jVrt^I=p^jvAR^(ADtr8`z6=$6Bg|_UNG3_%+w2)-f0+iP7O7jR z3T`UipHoQA5-&l{a}$=9n)_n6cQnZC0Oh~#ky3eKjQZMeJPbG4(_ojDhEbJtZ^f3A z;V~Z>#x}9*59IyQs-Nj*W=kJ4rh`(~*z(L6`>TT7rd)E-=`x0Ql1-@m^RkIP+nzmr zucU`r$6D)JC8dwS=#opu=$zmB#1->vqQV=srvFbXby2lnge0|~N1!?stuh(RCtw&pBq0k`q z%zllXcXlu}Mh^?ZfM5LLVx3;XXL_AyOGmW24smz4>zDJOXak*H_fVvG@W#1*gpvWlpVL$T2;@PsZwf7)U;!#o!s z!|szdm%wc%KKzm*1CLTv@+i_%+IbM;m()||lrEudJez3Md6`nZ)qN#0!GCMbdF4kb z-*Pmr`tiecLmSy?rDUH)EkL|!Q5xuJ960Y%sI|K1H}+h~izzWn3nDkVKdtoRG+pB2 z7Zt^OwHSVJt*o(2^#X&Fyxpfaq{mvNk1aWwMaKQ?oV*jjbEKP>Ui8O;fQWlaM{Mgj zCC>nb0NR+E9iUkJ1w9zf z^Yl&Dda9zXaMhHu3)0=6Q`B-ZvetuJx=RALSXWEztiIllJ7XSqG7Nvv*>wKS=Fjp6 z0dum)P)WJ>)gtNZ9!Aoa;VkyvFvJwQT<2CotG_uIM9Nb$^|-qct`q8U75`*$Z%!0zjQbM9HFx5RXVHhKsM1GeafFd ziFe3w)$E6GX!&x##qW>Y2emk<{Kq%e^A=H4HNo~)+~8JMe2;uB@tpARQ_#%OK*1;U+tAna?a{F@ z7DmQ*ef9?cH%O+Fa3AO^cbcWrlUSc*LPTz&fY)Wf@B!RSv}Fc_WbO6w*quW&3wlb5 zw>@X_$VhRGc@D+9`>WKi`Qovrd|?IKO5BBru)>{QlU>}%nldWLg9i|ow2}4f|M~h@ z4OPSdZHex;`(DhYY>iwjg51K;ZXEdfgyii;z)Vd(#R<2+zH_~|)FyFx@}qJ3$U0ag zqQM#Saz17EGont3l{V79Y5EuLtkp}v0|sL0|KcXXV0~g2ErM#W02o340fVpWU5@@6 z0+;3$aN+URd>+4DaOt^S9OShbIE_hgX%FXeYnB{T1}yzLhBm?_H1(%p@@fc!>| zH%Gw+_!V99e|@BGWM?3AFre)^cZ`Dnr$uUNY9r)D5St=62%*57{>^c@=D~fhis3M@ zen+tkxvkAe@+eFj!axb4D>J0?`fuywQHD*Rp&bWZgWAti#vgn|-}QUr#R9uA=@>zqgqC282rzvvw($UD zK#Ha=GeqDecx*dm8dg{BKX71r{USjeX*|GhIx63|ep(CO*xbS$u+08VrW@wc&h~1x zIs?WxG8nZI9nPvO$#PRn+^FVI@9R5HkvCq1$psB5%#<7&4R4?9O9kgnisQ#$?&YrY zUc3``+}2}#>f%hmiwO;Kuwi*|<$f#+o>GN(+pL`C7gOASerCc48xmlS_r29)TNNm` z+~CKHOl^=EJ;dxNlTos#%;e&l!Y&3QT+cA|%01m1+ZziQ{X)luq3;j|Ef3_6B0+*q z{-@b;JRe!V7J2PDN024pqJAZ^PFuiD`%NgK>&sQDv*NC0$b+R_ybzvvnK+JmQ*x#};O)D?18Fp1XUK)6?i(2g`tw9*79A zkYN{b+pRJ8lwLJhlL9N`@r+Jq?F`Ced-H`m>Q}@j_{e$)5f*lu>_ec=PuPAZqV8*(s9*6YA7S8& z?`%H!2>=ceux5MUIir=72%Va)D0|{_1f{70To#EvM#6ddY=*@2A6tEX`DEpTz~^Di zk!}p{QOlYo&Z62PzEG}-4rItBr&{3EGub9o^fQ1AaB3;%@61Cev8)MKeu2EYvao;Ia+w8y^v237BN2 zz;)N2Z!9x4fG>2I{Ctg?`;34EGmPCFc;d?E3I5_SE^pSa@AbatFVzH}D%&eoe}L@W zGGcx#r4u#aN1`Xz4KUmK5?=YQM_T`TEy{W?rKsfC7|l^yhHAa3b!H}*0z4Hw|;2mQ(1>bm#HpRjp?M3**}} zf~Z{!z=bTAE`6>v4ah6={T{fjpYkUU4R=y6Z#3%RHiyK{rlFAI-g>(JDDEvyZo}=o z{4K-5xe(0M`b+8o_~~c|@|9Y4BiLD^-XIxA{dE4*8o2yZR7#wHtv9YKmxfL6jvN!L`^ICxU6} z8?$8ztqZUHKS?#ngD2cIDx+I2d{yc_$vezfYW9K9oJ);Ms1Q!_12iKz&;$tsR za$IOg4yISx^YFXLri8P?Y82}Uo;!CkzrQE152W`=z4hukJT^NT(Ulb5sAyDQsYF_V zr8gO=^I)vN$$*5+_LN5D#;}PHu@$WfuV)F^`N1#@-JU%@%tC8kZDZjpZ-tNFVPaBE zRbhERhnZ%cE-QCHV++mPJUVP8UEz%TSDG6{STl5F4dw?}n7uWIv#FZ&yuQ<3>Efv9 zd>?86B&D}QzQ5S=B2UFPPw+X>3%f0Nzb{47($%$M2;F1NPzH9A9hI-i_fYpk)krwu z5eJFP28IU@RZfz&l>z|H=k%RuMd`%$*Q~UEu4lC0DY}!NVtqN|g}|E2A0ceM5duO? z&VEtYRI~-sOqQ3A&&*7~;>dw_AS|qU)gANav1(6>LpkrTc4eOx_?ms$yzTudRr$!v zsVdBAk_=p0-(IKX1f6k|Tg8K^G=KZ$JCx_yDfAg5O01j|SXAk#Plj2vP&cyIAJ|V9 zxxE$KytbGg@UtddmoZK9d9GeB{7u8BSdvDz>d{`9-4`^1<|Q z!0-N2v>=bZs^PwUCyAR*P~hMEmjpgvZst**y+p-^JlGK!>XQ6Nep?Uiz`I-6VKPJx zA448|R><}$+_=+Ks+UsKtUTbJVgItdPRIFr)M<+f3KyvHS(oOaa6)Dd^e)>5Ut%8} ztp-&7FX8ilE{ZsiuJ{zYnk>D`EN`V97vU#^p+=^P6ddIjT~_Cd70D`TV8uxwXFJl9 zgnatZH=QUpRcRagO_%>F36JX~x)p~7kfNCR(-I?1KL%wnkYzfwr>M4<=COfY7Uu2W zRIRxT1{4(TcTGpP#Dtl$q|p~qsH&=4e+-*=$$N}GG9yE6stpLjK5EiZ>m2J;To!(r zXGJe$9)E$aZ;3Gn0_?T19ZS^^w{54W0p|v;(sCfvmySlapuV==j9Om~E-U3RAnq*u zyTu+Z1hwF55*Jm9hrZU!hm9GmRxTn(z26AhAhN+Y%I>|7zPht=eg;?PPUF_$YzS0F zL%12nd-zu{%s*e85fE>qN$<9#tl#mJ=x3k1@I|`h_zfuBIDkd;`32DcZ(l3YQN1Yh z4g>wkk(1N)~Fa(67r3XYtiZ=3r* zK6bY$Tn*p!H_g#Js3LzYYN=TOQGf6*qv%z>mNP#I8UtOH#zYPR{II7wi*)x(Rs*F{ zTXXIHZ?-eqNV5d&j%nxfCbzmj+UXZ?kOf~q4tz|UU!-oSbp*o7JiB=kMj$)}6|C2v z9d3j1(P>R67n#}E8vg)gcaQFnk-oWNCB8>B*BYY)uZg6d^1KX&A7y-n5|n1UaPAgx!u6_Ld#8yK zFb#-}j)t>S(~-PJ*8Z%BLxx_t!#^%3Lz(!hv)NSJ#z8h^G35(N4zOo0gYRLqPDL48 zM2Q8aB|5uZogLZNX}I%`VzD2?Ru->=QTMafn; z(Fz<(chK4W`eKvYOaP!y=h?Ay+)oc4BfQx=tZ;-*=oVsPnf*ZHD4h_)z5|E9J;IfG zyCcPI4#(z|U#K1dhj_3{?SbX(1zF_qMJA@})YThsN<#mm!h6cd^6QOm2s55G4w2zd z&teAVQUT;n3)_u)b=A7V2^t7YPJlnAwHWZD0s{aG{DFe+$_`>kaq%pVdT#QWOO`K9 zjloOS{CJP4z2)NQB6}d{YZXjC%0-b;w$!M}rgo&I%K1}ut7tNv2flHRJ%2r+KRJ+8{cq=*;raOdB`d}dHBz${Pp322`*`g5w4vklLTt+j;RHBQGg!$oDUB{L ze{5;Q$zo~~!O)5siFpK6Mpu9KL*oAKV2N!lq~BDssntS8o=BB^Vh3jCyGoxK+{l|0 zq$;-S=bif5&IX}8rRRC!jSH)zqs&ubL$d$Wv&w2ZnaYpgDL3r&)YP)%Qc1|75zUU> zm=f)eXKZN?rxC?Xccwarvgs$U0cSOrNNzVITZ?0V@3p5YCbL?q*0sj^<;-Y>!$iXV zIrFsYH2Gxdv;mzeS5GhR7)XI)+M>jA?>Js`C;yl4-t)xY1h2Nnu4aijeNRC+q0m;c zKs}?;MrocBqVK^~ZgdW72r*e43l%?t5N^S=gXJRiyJhfnW#1kxeka6$2e`+;uyuG0 zpt*H1ZusOtfr7s4Ivlm7AtURv+3_Z>Ym96m64%HmMN)h-kE_auUm7P6azh7V(^Sk8 z#15t^#e)?iXd)v-a@yHaRG3StxLsvEpMokRN$QyVP{l#O6F|=YvJ)2%wp)s^beQz& zx|$U9>U<-6fuusM5ZG-IyKwqp%vT7rPZB11J*r{`{1V2SVn_>sA@lh0bqWe>l6js2 zY}o&w7Om2$YG>O8p+|#1nrsZ;U1kOb2LDO2#;%B!J^)v=i|&OY#whHTL+qQ^Yym2& zIDA$_EpJY)%KJ3ki4!oGs75S6mWp)?Onk+DwD8-5v4XJM`sEqZ=0|6F62w_#E_A1k zOuzLUSs$2*PnAhLmwNljez5IP)2Vn=0R$FnaFu-J3176P)4T%*zO}0Q@DdWvODTZX z_Qvx}ABetCO;3}n^@B<8b}Va0dOA=!=sR5uWJ{h8}Yv^2yE(#Wb<%-3f3ZLp~&d86Lm0738wQhSf9QtKY z@iy<^^@s(JTs^coHCCvsx5KvQ_xdJ=@RM`{hqkfzLtVj-`vo z7?X6L%s$vu%W)nGcKSpKm{mCG10&qKGg(;2RrXCjq(m5A$Dndc8dp>6U=?EGToyso)K^uA7Y2~%)UW^jU3c?wo5GEanoRF6c-oICZs4lyq=O-@S zB{qch<=NkbNlC-EZ>v$Y1$d=IVflAmuf{3#qP8By=xzPXs)v7Xbl2t@cf*wD%+Ix* zhE)HZ2>%K0COrQH@qI7(_aFGgV)sh7?G`7umSVRr10=$hu8HsPJr9@59uWql?@rmR zXrcn#oR!k(&2E~_`To$Gh!b_0FQ@PEu`4U#Uln@L_vjd^qLh=0V`E0lJ*f2y{GR6X zaBBY785KAf}1`QX^DIp=*}*Dv?^7gYilBvOU6gE_HSyJufYm&o?4 z$QcT4SN)Rq4PVy=wMTo;K8p@C8rk+w9%XB9l(Q~pI=lZ?Nz#(~yyfY4;cfo>A7>Ob zK91>zCV!|Q>bzr%8=dd&m$kQUHvJ~;(L4RmdXg4r7|>P2D1qTAhB=DD(BY5nA1AKx zeEdQ9A$iE#so!vSa`wEN)VsB*;^IX4A%UKu#JQS64y%vXU1yhz#_D+Vx@iR>*TZ5* z=EDxvPmZqn_kZN9|7^1prmNzhGsS5#LhU*ipDUAE7n?Mc;KTu$%zaDa6t43HlPfRd zKDsqSrc%=bE}XH^4?n|!)a_R%-`W{9lkuwCC}?qha!Y|U6!)B$ywQo!{q~|>yFCmu z*a|8nMhHj(Q@iibZ{GfIBax_T_>fiWvmhveqKW+ISF%plN(h)xBD*cL^Fxf>`2I7< ztbz;A2#h7lr4FV=Q~u^f05Kz;quDf}UpZDst5>6wV~y$ukFY9TR9YGrVuBqgwC`(}cBkG9O@wXWGz%BY* zpv>1$h>bbNVXaqsV>K)qZv7kewz7IIU)@T`$Ds|7 zzRXAl{-wTE{WZroc6gg>@p5J6T^zX~vJ{Aq!8kH>I(ec?68CBBun)?TCeI2aFZfm3 z?SI)`&6DJ$YO1|Nc7DXy!wy|Fkd(!aD`goJ^8S-B5>V#qu#w2IlpWmZre}`Cmz6lb zujqulr|F8UqCuf4&rP}gs>UwPxQI{1w~%NvJNK%AWCKkJy!-sBs3#iT_Dh8AEJ!L8 z^V^tviY*T-hNqJ6&(qozJx|5HvKDr5f_r$wt?j$Q={9_^!xdgYoB5tB$dn|oA4Lu5ys{F1gR zj3_LSY`#>DOQ#uYMO$_R3yB$WAZO#sqrhGQq2x87HrEzLtZ5b-e$fa&HxAdlg`f~2 zYwvfKFOoReNL3heV-R~`(2P^MGDcQK4Rz_(mp9GXyW8=YgGsObPPiHa2(!oy%=UJk z25x=~16oZWg%MtcA3s#o?K1leKfN*s7dsT;G-gX<>UbSzX*+0P^X-{?FZ{NF;0n`h zvCZ(0uMlzQIW+*iU*ZzURFl-fZ%uNwz0i1zoNyohbP11b9Kog%ob03D8Hb#r)D@rq?v<+gERC^aATb$|>ca03 z1frNmxdZLjHwL~`nI1O()J#8T+y34?sqQ5O^wtR1v2!Jw$<`JgPp8mo{k~|&5PgbA zM!VHKd4QPEUAm#e%h-#is{&`CM^!TbC*)FLrj%M_nx&HRrG%L01fw5315RTb`ETTw z+kc^%ZDo2qXm(%)N2G{Is1*3-0e|~PmC4`X4FT8(W!B~fQc3Ru2&anj`4X*lS0aWL zKd6jUM|XR15>#Y}_`4;FU92sZ-^6i^mj4JAoYp!lMc-l+Vy*?lLDI=r!~aV^5c#D) zaXqNBw8u7D@SH+&=M7hRxU0?mDWtf3fI0pUQwp)bNr}gffcb<(#h;v0MMI&a=af;N zZl-!dOPnQC=Srn3-wm3KS9DqzwwwK&mogZ0Wdw@cZ38l;=K7z3Z(505pgA1;b|UjD;}c&HD0#qbksu>NI}w=4eQ=HVu`IU#g5`9nw!%6C|3%yYV^ z6I^AuB8--Nnd#z|fy%C@@G_*)fkP+RdJ;04Vym6OmFd~#GX3VwqqX-tTsEu1(^VN& zXe%EZs!A@M^S%6!=EJCzxR@ZS5kF(Dt|5Ncsvf z57zd_{?T6TSRolm)dc>SJ2c}UQ2)u2AHkYLIE7FylN$8~bSdW6c3!n@_38O9YiVk{M65T!UUj!+t;kfsX*_Ie+bf z&IaZLtp}2j5a(JC*1o-&82|#{WbQsWL=314X7wV(Vb)zwxv9`&Xgd^fDTvCRe&37b zU#P$X>Py8`_ERnO`Bvzp8zf6_26=u6D(HaZw-Lu~{e795yo9J(btFMXxi)Kgh&<k8yRmP zm=ASnvamajnAlwtLh;`ZLt0%pdZ5)hbvk!bDnJyTivY74K zAGlxJC2v2PU^%E^dk|0810UqSvKb&&=f5Qi{0+rjo#1tx2}7cZB!g%#0eqv8-V?3~ z&$Vt;`mW_jWrcjapl&79I%GadGb#n|KIL_rS4g;7P^-t~wz9tUX?1)M9)K!iAp*VV zLB(xx0SV}ItZn_JSrW0CD}5K5rRSxJIz}7ru?d6N@r2eFbSluum8_rBU7&mC?3CfblkloS(KE$N$^&y<8j@)2FH4 z;L|)JN*gTr5vuC8cnk%7qWTSP65tJyq9i2Am0Zb{&0C(I*>!^p83siHzG9&dqOv}k z_#NO8!Kj}rl!WpyRfxeh3&zAtnzAa

in0o@vkH*7zq(f;C2jhX^I>e_J3vnJl=qcr+ssP_%By71Dc1Jqwb?2ha1Da!17G{`_j&lJcjZkcEyT z=_-@H@-Obf`;5Azwe$u*5&GMZ`!o#}*HZ1Q&jrwS6$)Vs$Mb;!YX075+F6{(3LQ^@ z&u@lL#`AzxDrHaGYObG4rc1X2M_{&FCalE1EBTJtTdB<@JSzZWz?8t&i3UL7LuWb} z)4;WG3pf^%(sMBuiyM7!HL|O+A8%0p>jd`jp6$waGA}-oOT&l*e(FK{g;(y$eOY%= zDHO>4dAw8%^;DzxRPRi1&v@Gxel>ZTD%W!Ul2z+R`^=BjiIvk=s$ZuJ;)`(KxU7R; zp(7piFp8?=l5!||G5U1+8;mY2CvEo6(xdjlZ6(4VU2L4dTx4(->nu|^Mft<-0UYpl zfInLG&}MFBp70kOs2sjq5&am0{?BBY{|yq>7zS%L(spjziR#p5crE&+sU+2Qg&3MTo4>xY9&36gyq~2 zBN2)uarP=z!u@?9DgZ3t%F31qZEoz?&7bTFi-?ti3WRWHz{U5iRz>;U)GGKhJEiCZ z(weg0-c4@mQl#slHEyoWEbgx;1BE^%Y)$|NJ=O%7c|z-;;ogqJ=)M<18nA`vvJ*TG5PF_;0 z$!xR~DN6V7x8;uR5f73g4q$LGe#2b#Ef2D;n}b)*{ofAU^%95a5D&<8&v#ig0CF<0 zIfdl9?ozrOa^%`(D<}J{eD70%O@{$Y2}(|DJ?DMbF~7;9l&15^M4Sd`zK+oUp)nLk937DW$1wGTfZ+ACPf)ma~R` zW0y|OsqHRzCC-fEyYH3(YRY_8H*i$lnFAyAbZ#^Ra%4ic8n2?DDSrOyEaVp4zU?h8 z{*t3U*n-;Yv#eEx9{hA{N3?w3Om)1fKB<7PnD$q5{7?Vqv%INj6WgQ7z3`IJRg&Ts z!>RPlJJwH4IZ$IgDDb{GNoC3AI1?7nLLX&5+XSjHTZHY=N;@oN@BCJ<FY1r%rds=@La`@YOTCKgBeDGohgeIg%z%5v->YId|urT5J zro#mTM=E2sF8=}O#H;{wFpRt!P>1+LB1q&>&iSHm(1{KHL!I@-28NUrxerh7f+g|J zozx^LVtbwI)??BtNKJ3v9qPP1(|YPideekLO;E!Kxo~l~KUi}KGau`X4^lK0@ z;+rNd?Gv@N(uM8R&)YAV#0&zREo}vVwCgghYRRg0(4&6CYVQ+B#YGE2&JisYE5hT~ z!$9*n8pVA{?Yz29kzU0)4wIWPo)Ycvg;nxtIG7~b;*7o77FGVWcCH-3!n0G*iFy}> zm1HcElztsakPcvUZ+cmZnE@(E+1YNkhA8Iy))x$}@uwLP6!b52PrQ9$u5A7=_~=;M zEB<83FxDb<|E!crdSp@dFS)eBxiIoH+HUrN8V;N{`3KnA1YPt9gg*aAs^ru~9kyf8{G{ z+#$oujwVAhJQe-oF_WIk#Ka_izKu;IN7H1BqVWZ+CSB$R)Y8%wl+~*s)u&#(sB^V-VO zwhE>avH%b)jw+;|3UZtdVFL6Xe#zP{WIx$jQ61eMN{o4h-*=4UXd7vD$==oU_ z1i}CQRt6P!fZz+7`lUm_7ZX&3s#6ogve_6mJJt)_h#Cf70Jx=sSmLfH7+fCfotjeV z%tq%wU;Ud*1OvDmhnC|2TzYC%ll$mgQ4ze=bkaxebD?Vg&vVRv8#7U zMXml8lq~n&;PsUSVR;V-1^qq635gB?Ich9Io54!d$)~(8U4`WrS<~Cm_Y}nI=kRh6 z2&uNdURGTEvACq9o(AZdkshr3v^kTi;EjofKn$3djClpY0|D*A=0NAZ$%VY&`NfhW zSP-3om?Um0#w%^q~?iFEI4o(MVx>T z;DEJdPUFp&J2bd%!a8-1XRlpHb)~-Wwy5-Yn(VQ8q{zsh767nDAA+6TQ~#y}XoO=J zK;mwjl!Iy^6GPTbD4~HB91#{^= zZSDR`;iw=wq02~eB{fK&5SkBYxpxH(oczx7-L{jhFEmu{dFe(Qt45CbP0mbzmZEhP z2Yu>o$2LE?BW;7kKG(1#Gn?+KAbvp9{y$m$1B{|qx7t}IQiHE)twN@AuJ6?7{&^J? zS14LBeQ;|k5jWZ11a8nM{Pa_HvoCq4msYw`2Dd&NK6MmIQufUv(SCYDCYaq2E@DwD zN4$Iz#SUaPPS+m+wgJLT8{M14+YI~ zyUD+F!6#y1KtkYuN*552A|hOc^X$jwB9V5HfI<1y&%KS?dxhPv>{l3M!e}JCs@sL^Pa)3c_J`Aw=4%5oIsudoa`Hn~O{+rqT zZ%P$&b^3gY%Q~7^;b8GCNK4Xo--(B}N*ZC9z&oN<+a;t;V!R(|u?4vC)a=-_j&-JV zFk`;u0QW3D;uAl}%JTvINq?5Fj z($3K-udOwTKodh4HYSS>q!D}_qnwa5@6`6`3}2GudVs7hYZruVHJ_Kei)*AIJ?rQPFVU{>^Z(0pq75;s1a^h^r@MP0++?>~-#6L8a7N z1a%;@Aq-6jgrF0HUfFzqHZ|K zIX;A474BrCoh?lQG)ojy=V&H@RaPomdEALAv)Y_lx4tEQyb0P3{-a#rOX;}GQZ~QoghX)bJPSYV-0?<<=*7>ShY3`w3@nu^00&I)jgfTu@=xl9oh$HUV z8i1(jAwt&+yegpWwG9Y-g^i6V;p@Gsw5H0JSpG-s(rD%blQEsXz#r z%tnK=-dWArlEl&lfGdT=k_7pkabMiWf-zD_`x$%vxQ6d}bt4>FqgIStP)Xbi4e5c?S!88ueztzHS5bVE-?#>ANEenp(oKi8jPC->~o6Zbb+ z@F*iPI`4sk4wYEBqMUzG@8xR_^JLj)h?p9(Fi(U_v#H76>H~elHA?P_63|*6Hq%3< zW)aKBr2rb_ZbP@V8&)gQ*mv0GaI$v|XRVE-u{lj{giIyhVo0>}2r0>@^Ex{224$#` zB})YvA+mQWF20P0cV>?AMGO_+sHPs;o}ihT3BK_9b*ieb1aZLrH;Mlc>0&;BE`pPa zN=V~Us99ct+VM-*W&p%2WIs))$EgVbk_l5$VN{nW<2OcClti-`veW{ylp(3!V^3cs zX%!SkVUI38+6TzQJ>nKqX*J8LHx=@qZ-{_&02D=^Y4T8gqE>`1yut}Z5^|wh3fGcX zU*91%H3Kc21nhG_-}TUb&Or{|xj5$Wz{w}J!kAbfj&@wWN&N$P!xj0!N69w}5IgO6 zQ;rGdO(Ex3**&s>gE>LTA&vxK>fVBCw%-#C{#(3zk`Jx5LnMznO=P;tO*wh_7;*6= z8CBI7`8W%8aq$c*!;yEU)1(wn=2XLPr79|F1YhImyn$=SM(Z5QX)Ig1Cfv+cm`5y- zGjg*m2X`&8DigOYo?~2_H~xi%jL$&6P76GiKv&no%tjXI13Kds870_8ZFW zi$_wC!3h$*vdhyDU5aX4tCDk<=d3&(#uZ0auV$cwr+~5KE{nFxK8QC*hO2$Y&E!8T^TA4xnyR7rpush9a>gEVH5m659EU30 zix4U5j40CD@HZ>0B8(j@8N54;)prv)#9=@Z0Fdw*%t z4koc+GS2vjxrLUHXXp>ftvlQ?HGZ2WKVqs;;d`x7XohEnFT0d$Mr)Gae>@5bFA|kZ zN%e64M+;%ziTJW(YtkUAHmQOUJ7~=XUmEA0HXGu~nT2AvUd#>+O_Zl7?>%n2#dCa0&;591|G&_LoQ? z@9m$Xwf-*tX#w?y(T^|H92TFi?(&(mQJ26=UVwFnD){pto&8h^TEj5-^S6_%t^U~< z3CoT7(#n&iq0=vt$7`JI-1i3ntOs8FNcQng-s1+K+GI)+MZprQG?1VQFeNhU{ILAm z>2cHkK<)tWV8o$%lK}+j)yLJcoAqAfn4XjA$7!H<2xlVs=`sfEUXBVl5H@m@f#GR>~l2pYWm&nJ-}m%RKv|76M|GL_ zguCYCi-(!GvLod89f`aL`^~J_6s%hL8FN!|dIKPD$2{B@wFL`G<{k70cxj0V?@Ozo zgUj9EXe^L=>O*7H86gwmTny>^Y@PaA(9l|@#O3Ce=++2<7?#cC5;0S#ksXhnTo4cyJ%LU@#=Qr|YO#nXsT&({gVLMEV-x^!?e`_Afxqrh2s- zh+@9Ws(^^E|#?Qx@k3gvYyHWwFM7bc*?$ke5Bq#Lpsk``)Xy3%of=`0rZpa)p*{jU?NdH;0J@v;VW+-` zX#3RR124iIx{P2I@PT)_+%*ip1E_dDxHU%JZ@+8cl#2__I}5D<@vP>OJz>*jsdA7C zle2>LEj~_eWDD&&ABL%!p**uQj+n^xmXl{$*TGL2^Afn@H%GAgIeUyk`%7?j7EWbBstY7zJZ*Lm>;7DVUsDri9eR0q{deDV)o~IVfxvWqx*jegm?(8<3OQsvV z;wPbdalX(TdsEMSjten>uUw(ry#Jd@4y-){H&OE1B>{68m=CpBg8CabAv&5R&ZVY; zNV~{Nx5TAmCz;>Q@XP5#ti@XtFajR*Zv`w?`2MnJzVCrP*@)n=X~OaCDyl& z9}f0|{_NRj3WgveFV%5i#X+$E!PvJs9l0FL(*Wm|)lJV#+dejw3Q=Evsy#p1o$M%W z1bT2EJn8V%Fa4|S*QJ$~RLA{u3_*?W)o_ycoA=xV#{j`fB@ z-V>i1?n0K8)BFglST9lVUgnm0PzCPV3?Z&f;A?B5a2ovuOrN{S?3qu7JuI05ziY#sL&|XHZDkpWVDpQF%$kc$=lf!?GN?S7b=e;DK z?l2{Wx+PL;hX)B}42B{Hyq)s7(kILX>Ug;c24)@X5^9JrH)qqvm$ebNpw49_47$OB zI&t*9BYhv^F4xs-;5P|e$2n}-rKRBba_VIMWpTI44wdr*+qgryh!tbq%Vp%xj})-6 z(0IKC2Hg=C86X$fLWkp+xDw175&5FecJNtA+02q`hqf9WtfeWBLg^C^FA!FaO;aj< zOe>Od z_?z~O%YaPo@iF>MkD;3YSVhAd=)835Ao~fq>Xen0vnnbNDNlTQEjHwhNV5T7-IS~L zI`Yr6!&8Es)VtgfYM^o1e!8birp54<;*6HCPgV>&*Lf%;QVmQ)Ij>ZS9hN*Q-Mx0f z6xJTk%149vS_F2e3Dj{N8VbixVZsW!b6%0n*`dzz5wdr}-{FEKv+V9oT^i`uT|sv! z)~Vk(R^s?QB@cyu&GMK%i7J9qkke4-e0?|u4rV~T=;89S1{vr2iE^pr?92XT_uLrk z^EN;J$p9ja$_-dO3?9FMQBBICGJjzqv#KExp*z2P{}qc|`%WA&+E1H!q=JG2U+*4* z_=@k^M)D_Pq5P!nGpE=jy2ai{@^mL}R#(E!iK>$)_FfFj|FJxd^N99VbwS_DyL=|6 zskEfsRsGVKLY;4_i}Z#U9uSt;&GqW)1E_WN$&oX-6N9kM>%3SGw721W=$n0#kLdW@ z;ycyT1gY81e?sC&voG7GSQ13!;L>@}lQGLvoX<^GKc1`kWp(9D<=RY-9MVad zdCvAXFX4(f2oA`!@#8{5b7a9%F$SS2g=<^SfBdjfcvk1w9C7L6`WNE=GRypn9MtVr zswsMc^cky$d5@ifby*^v-+KX5V~8*JxfgO(^7g;pAualY!Z* zsMw!V_OP%ePsLCi4_*ZXO0{>UD!)qhHbx7GFHiG#XiPSZ3K|N26J)$Gm9o#-(nlZD zXNJtZL98!laFTK_%ax|l`>SdMT$<0k{u8{g%m3on{#r_7CAb*J-7!a;3e~B}VilLB zJS~7T-_dR#6)%}Z$FNRVsW^Elsq~xV9~d-yu84eWzPTT>u_6C$hX-H{4s@mD0GO=) z2Jh|FKQ?DyRn2(dv6IVu2A#u89j^S0Vfj}eix$8I>biB*k$P$qIM5*;vIq434weDu zK!V9!XP^_cZcB!Pz5YW-Z}SUz$(H`!OF3WNUy|5C5+tnZqE@~*k`#nQ6kH}T0dbs3 zW2S`PsbDa-Ae+|8<)$2dN|Oj(TY0ErHOA)XsyiS4-TONkMpH=~0cW@QJayRm8=4pz1-=En5FY+k@hqlGm!`wf_A|z4~$iB~X1&Qm8Nph--+Awrt~o@^vxXO6AY zHSR_ys$zUr)SP8?H(AG7{(Qu4-sa!@1WDgr7T?QV-6wq_drHp0qpS6^ zx4GO|2)haV`;KzI0PXf`yV;nBoegK$w(e9~aOH-#rAArX5>227AN&FGuIln2TIu$i zyrBmFaTYgOd)JL1qB*3a0TfhS5(5wqTIQqXQD;b^wKxgJ_m7aOI*!CtB}xU&d~-u` zuCHFbC80q*8={lAad~dFCnd`NCc<{p;h-6>D?k!M+Y>U?>=I+`_xCUMFlNe34ZJ2j zLv_Jxe%&NIzUEua!JS7V;yeW|iGuZ|Vbtqy_IfWiEw4F}ch1C$ILkt}IjS+@h2~8w zP@*PZVIt&?>J@h1xj!{hfU8Wn*mqih-ac^fs&eQZ1`CmHzaiR8<=MGiD-98fbYH?$ zo^DHD!$-X*LMHX^>jgcPKh{J61eLBWjkG>Vp`hSS-p>V^SJ!5D+HrJJ$58O`d%uz{ z0ES=%m@deIt77BEQ&%EH_rg?UFwbVa@<7;cMXe;21*9kfB$!&dIS(b8tt&Sr^*lF?)77(M;v%)|>PBl}1Nt4!yqkb#Jd)QG zc2P?idoA8$Nd`%FEWq;Iu6PStKC5a(|5chJh5QTtL!;dR>2TiouA!@YyF)q!8XePa zdO1^2FPSy%;hl#|W8Eyq9ozm!Q!m80j7e%PC~KO`LvJVr{$Xr$ka}~dN?e%i*YKn{ z@520=jkqpjrPJvo>LPJbTaU~4+Ob^OGTg7DwvqxB{Z1qO050@msqxlb=?Wr+dqw z1ssuCANU?FfFv^?5+Nx0owB;K(HefISQDOQMklMEqOf8}K_;sN=;{EbBLdN!=z5;L zsLn`O2nNP^CJm4K(1g6&)(mff3Irw<9587DfH&SkGRJBu{^p!;6F_omZU9eS`UJ13 z03yJapwdd){k|AzJ9A7+dvCI-rqMqorYl?vIr~e^`L}Z)bOMXff#V;7)MfW!$ElfE zUdL?+8oJh75$NzaZBA(ddiNM+ zMU9$sShq|9%-^uDMbWU6<}=jZC#nxzwh6ov*+;WC zd~IJREL8+l9}xa7+izOp=do(=*f}d}j!fZ}pMaZ6&Sc1&<?bCa#pwj2EgRm1C(2Zd#SHaYX^ z*eJwy=?s6SVqHH`s&AaNjXUBsME$sdqg{?9oA!*IWlr5*i~W^}td_~0Qrxy($Dr+F zBdL=mg|;_3kLeEZ@I9Y(JJXI6B=V`hxqwa7;+zQ!hz%0iwwk80zuLKfvgBK*@1e4r zg~9mKHI1Yk#saFP3!ayWP7sEfmZi!Rq?yfwdmb9jfHo-l!*>%$gSgxEN9DtH)0hK! z7^5+HQ%`Y;FmmYPR5V!A=1yZjKN;jRY9&DkhPKcgKzMD=?XH4MI~m}!xo1bd^FIo( zJyT(8_=gSwXhmXF89~Jz z9rxQOs&sbi+<5RdA?Rd6w6m-0K<5GgByw$Bt~Ka5t*>hUAm`EB7rT+5wxy+yS3W`q zC3ECu-TYD)Mfe}pggP#pr=yT$hjzE)`8~$~HmiKOjoC~=E-s|(N&Vtv;Y%>!L=^Wfg2)>eJqTt$e@q$fP$~!cMC?mt7eQ&2 z#}>lJ^PYlz_~!;b6uBPEzcs{A5xE-ib@s`uNm1FOg@|5zwE2wLS9Tw@8`)>{lu{Pr za-OAcl+eNKtuXo)IRGSxnQ98721M&r5Q{bliZ$yPWjUaG!schC?Ul z%xVR@MqMSp1`I*h#<#!&kYZ0Z(OeojJ2T14q-vPs;WYhrt8X6R&`10{IXrf4T#Yu) zp~iS(hg7X;9~gGv@SJ=f_($B!rKI5>M=ERspA+zBY{WZ@>rGdb7X`SPhqb*ZW*2@@ zV(Czc5MFvkrHiGCeY_Hr@;XI;lb2TQdat<=t{jJ1d(`FLl-O4EThPn8so<=*sx*B}I0N9L!FH*dCz}5hwxq`Uh@=08&y0< zOa~RgS2qp+$Eid94tX=Kl!)6%c$?pRDHaX8N5!vQJTf4DJYEaR)3OLUR;JvK4JIm2 zKfXk!N?X^yfzeJZl9B_-Fhm*DP?gzcHLFX@*S+lVQu7K&vpX|5f&@6UP`=Ya?WKx9 zMydFMZ-n0+^3|6Q*bjk21Z&fj4cLn9`aRCvKv*zpMH4LGC|ef^ZN$ z-6QrjJR|kVkAA3r({!>k>tF|O>ch(!ADh}D(OT;}#(FK|75=((NjN5iXY8FDUohP5UwdDcJe`$-N=d-Ax^-*#44s=nTRFIOSph4W>#!2xkM-)n89lQ# zajV4Z75f5lxs-OBX}UefgtN}+Q5VFtJ>#`$?K8FxJymiWkjy)_*g$vs;`dUDJG$c` zy0#5pp=>GbL8D~8Yc_ZF?t3_aRlnBwsJRH(PXsugDpC4nzC^wt=o)RHhy(Gas0H9ph6L@1Uui9* z>nQe-b@>JB`xRnaIa7uyIob#OIS7p7eHeHZ`nV*>j>5+I%C1QrH0rX(Hv#dZd))OS z`zz{|XpZ-pOm7QuwM{up^q!xCW~2JlGulU3qKHbi?U^0i5^}u!Gg7b>PE~P2y*lco z+DfU9Bzeej!@Y(AdJBPT-x+9dLrw;}_~sF73rOdj8NtP4Si^)K#fm2qWz$~J9hqX? z$8PT@y<|^iWLRxVS1hgT?ywGwn2ZoiT|Oy@UDfGTN$!05R3OXF`~xN7dAqXcQa^jt zV-3uYVjJLR2cxZx1S|T4Qzc>@JnMCk*n3pbxAv)rD`rS$j$<1UVfPBQpS?h>v{i_s zs;x-cq;4_F(4ES#{yP1jL1mAt*T(|aDi2N9XZ^Iv?JT3BPgbo+m!L19^?dpEd>K?A z4bOIJ=QR?1MTUE%Uim<4!^AYD@Av)4Vatbyk0t@Pf-0$p%$CY3zY<72+kA@G3?z?4`)sFT^lWt3mzi&iXORN&wa-Z z2~W0w(be!*P~(9T@jD9u?HklF8rsh0{L)ih{uP`YKE!tD8A9@M9c!}+;9#ImmOdN`` zX4f#NtP4C8ceQeIkiy|b^KKLy?2lq}VF-4sr?;QK7Z8}F-tzE{?Vb5!q=OhC#5X77L z$)nB(f-0?^O_`!tg}OiIo+-l;#Zf3IXpg!Q^GxRgLb=k??sxN)vlx6 zEQaT4qnH?-a>?p+w4&U~^Lwn_Z;{;ex%iKOk}ge*=FL&gvZEEZZM7D-+8A1>{AS0 z!e4v`VsK#f^8Ks;+bn1RqsWi`YLLE1z&hSUG1|Cq=P8ilBySmBiP^*7#IKBePyL)j6n%%^ZT5N=RCXNl=Y8q}l+(3wzA@O3_6MQgDnRGuU zUXRtorYE-nI`HKC+iMU!M8WI2#B}=hF3Im*t+NZu5KAu&*n5&@o&l`{_AF~7e3Y{GVo!ME)~xpt|6OTk$7k6eo)SlbNJ*2X2~p_$0|)vG zR`>i#Of&?>k4D}v)vLRBTr2z>>LaA;tUzcKqoJQsFd6~Llj*|?W3aNYY9t{FTuFu| zo28s4RI_NkH$6X~8>VHive5^(-3ofR{Zdh3G*;5M_XdOO%A_?@k))-+vIxcGZEESK zmR2pB#q~gsrx`&_)?^uI9V`U+c9bftR;Ol`dMB-$u4s7W4J$y{1G5Q1&iMFyKhI95 zY5RH)mn!}eE_f*LMY&>R%Plraynvrrad3U>FET{a<7i1ldk~ zV$k&>z{3lMP(Y-HSNQg>V>n>2RNzyG4>cTc669QE*2qhJn*@;CsA4l*plGk14Zco204@x;HDsL%JRUkijLuo0nC3Ovad{WXuo)HX-ym?x@RAXRgueUugC+7dg+w}}_a&%Q4LKqzEI7`T^rh^c6> zYh(1u59F%#?Z@sF9R{Q~g8)e2td7R-sn8PKNm0B@A$@NIf#@2N+scV?!>M z=5{stZXQXc`?{4@nNxO_UWN9zZ{H3YJC-M{=W`DK9vq>uu(3HAe;$mMo*Q3r5x{Ib z)_J%?VhJkan-Gn-?kqo48z6Cpe2+FrfBEsMR**)5vv84q{xi<~CjqjJV91KO>zH{6 zcj$%+I|qfOM;DH!cZUO#pmAKCr1)4C)hi?p2)Ua$%&Q{_W_Z;YGaJKVRaE6cQP>a# zF>y!d5V2%h2bzB`cdn^E0~erBB1_x)~&lVB@M1q?+oFrgX{ft6G0MdeW9vd+X<@15Bv z(+I}R{j%d001U1a_OF|>(I3X~M%_PxEq!|D>56HK@panxL;4=O5)eNrvf)m%>hC;Js240 zBSqeOqz^!g8SXa!ep=&-(8oL0@N63%RAnLUKyU!2ohXHVQQhNnpI$$D2l|jqK(R@w zs8A+tFZ0@#2w|xRh4n!es&A42n^w+HWo3mVc29|~Ic=-6*vO_(mf4zO9%qOD()<2* zgBK~wuh|7sQxC!e3x9b0HH`j*oFb9y@kjqMX`y>Y${Y27w0nN(i3A^uTSt?2Yt|;( z*CU|#QuVl^6}zK(@fcmSe)8p{ePr`QJy!o@|H-f>0gAN2rq@ObF!N{I%wO|5T zDjD2XeC;AuKDGMJcDmt^f>D~Jyfbi4aJ^z>OAb|(QER<&&&s2>&cpvK@nFx0yl&uC z!R?H}{skw6Pm=hcA4IOr1>=hIM!LRh4}SFD)jO-;TQ?ZMOUc6-Rc@XS;)JASHqtV7 zqKfo~d4ZGONpqL{8pJT)F_v`*gBJ6xFrk`XGdie6^r-= zZ@B1@K-AvhB=KjYs6scC4qd~I#ac2j^ehRzWP*&t-;rbf_lnD(ANTH4VxJGD_JI39D&un|dhT z17S%q`QgYUZ^SCCGSzX{>TI~#GT4u&9~T&!%ZckePN_0DOR5ek2Zc+u*J&x&e4ifz zU#nCJA}bu`pVBL$&&10YnXq@=jnDm<%oh3QWYpoEsWfudUU^%iEHevSk@iC%LRM!l zSZt|BB^U^P(9vYg3Sm~q#wu$}qM?l7?qLkt_J-gxLkZVdD@m+1!{RyCvrSp>f1Vh- zFTE4RiP#f-qM&Secl%A1IV>>+Vjy)M1gQnUqh56$ekD})PCaQE{fSdk2^?G59d{vZ z=5oNfduo^!-C>Q=D|44k0v8@V-l2u2mXq)wWk7jBFzXivLB0lthWGP*eXE3fGSotW zXqJN;6+Z=HCwhlp;5^F)g;A&!-F;Pjii;OOxu6u>9sY7AL{4`Aqo?n&C=ri`#C3K~ z`czI4SCcDW%6ZnU-VGM82?|SFc`#?LE&@ZUrE7KK_eSP_Gm!j3l>UY=K)Vxj&eS=P zS<$h5x7~Pr)o8MvtHj77ksX4K(3HK%CxsY(E^+Nk`7rs=h@5R#RXS{sUD?NO@w3JOsV?c0=p~ZOB9OV`jt(*Qi~B zSc|Nv7APQcwcaX7ZIb}Z>PXPU%Gy0%*R>=7Z@|}#FxOTR`qLZz1f7mD8kB1jS6u$=HF3GMcI4#-j^T0@CwZ(M*_Z@bhuh>w>+ z3WUrTS`S4RK+38zw3>w9=-B^2Ec6a&fTpIV&U|CJho5IiwL9qAacbBqMX$CJRj=yr zZ$%>IP`OWsH}FL^k|Xt$7gqrvLx>^g-AK z;DdCLafKSAL=Bxd>yA8eAD{@|-oBKR4;-XQH#lIHZh{Bm;HcNJGY1*NAh7-gkB}%u z6j}shDUe&xoEh}oMb$?$iA2&8io0!zjIJ_KbAy-FI8#cL7BaeE2Z{@_6}E~kc1>u` z_M-f8I-LwH1-`4<>-5jb>Ciew@c{Y1gSoJ?Em;e;kTLa8KpAUg#>cnAcgF+Ud*!Oe z@2e4k%_|X7KbP8?CIbHma6JJd*v-g6ZLsAl#6MB>^Rlirsfg6`l99}zo2F0!0HTD1 zhxWaPKN17b>zn=>WU~011w%Vds1F%~rCE_D&^+?iM{FQ}EV;Fun=dyL#>+HnC9TQt zh!#di+M&nx;#Ar;)aFij#I6V03fmBB(J^WS#GgODDFDn8snh&mx;JyuN6--BK_V37 zY-+k&$3d84S*WUQY-6Led3)ZOET1~{upMNwe|BVBHAV;Wt zt^CBmwVM(k2j1x8fmOy;x~{t<+xT0=Gn9qMa>MQ7g;4(Zb*%;2L7#26xEMTe zPE8)pXkHEv^WyGU9gSq=q@3piH`-zc{n}lxRPH4_E~iYeKR>|*XZWipwSIBrHEd~v z+H8dpKwoG|^gERuh44t=rk_^ws!qxv7~@q91HHO(A_$K|hYd&lVegX1UU$HKe-5-< znS!`L-UVXz$?p?yV?Gw<7B!8t#nD_8Zf%I4PGbNmm1! zZrb~|2X1>jeE87nD+R}5vF+%J*Sr5(AN@BugY3)WfBLSGnf(Pav)3#`Jit$x+Sg4` zSk&tMDfI)ztR?O+7gVrp5W3zXQ_;oqMEHFmNlcmXp5tSXWJK6enC`jZM}DGK>6 z%_kukRq#zs82xIkf_fap70yxXiie_+ExFI|vlwaFxn4IGJO(F(PBD(4x;{G+irFR=ENJ@K-8ULKKf;M;n6Vpm?MpQtr4F zHYc|7c@=KG|B5!Y#HC_U8=@87|M%|VzmN07*I`cxJ}33O5s7?2gi%P?k+ogf?drq1 z1U$9(kwhyt%vnhO`Uk-xfYApV-R>C3QqQq%HUGFohN##eiQm!q4H{+1o14s>B--Nr z2O6}Se#)ruVzdmkZu{eAh+q)9Rkrf&$EH-&{@1+vX zRC>ZT@YcvzNKoH|0YnFZvNF`Px#Gz2kt+{7yC~f)xxnH(`(D9d=B1W0eg+ zh!vGjo^|Zsn2{$#x1$3(%gp)RLW8SCe3&uU#{)sYhpzr7zGBJW3#7#6G*wQeqmSso zT1Z6!Du3XM&Q5fFh50<}06?}{SgOngv3Xm<${JYV^fB(8Zmzki{%o1*j}n+m40!Sd z#!7mvxm!-c*y5LR`D)LF7eI63KpBsBmlk(FIcVvjt?;3vt6uZMg?kSlKi1Xa6oU;M z8wL#j6A1o8^Uk}BJ5O7b2O{?9&e7}oS37P(qmDa-5+DzDZ~u!!SNg57Yz0-lQ3H<&S6Z zlk_G37m^xSaGZo$a8A|ha)rr(Qj;$+?>ohg-|Kv4g!Z-q=4G^nUy@alEg+xJUH73DSIU&z)DbelsvIW)OMi`F@$yYYi2K37Y`U}Ov@_?XgCm4> z`d=&KDYg&t3IPuo_49gTI;<<>KK7={9fr7hAM`3f*~|qp(%l@=85ltF#64FQCJEx$ z&24gfpeBX-GYnSdF$mkh^$0(mT3$V^(awa>H4 zPpxX{2DiLj*4ViVysh_dJ{k`efP`#Ds1@thEy0AWVh>n+tcxz#+O+TRwnFqOu@Y}E zVATIAxJGN34TMP1^)@Lojxy)RZYkM}0~1N_%V=&|$g^8ONOSA$QsX6tpU8uYNv9X6 z&(dga1ztXpBUzAhNKV?mh(rEdPzbHo`BbDXz$Xw;PZ=EtvIY<=J^c*80N;6nvkkGU zT#V{dW_5ZX`~!$P9e?h`nbS9h#FO5=I%%vvYc5va1+g6@L-wg?Bf@(@m{0}t?K8p4 zqEJP7)x*bxO#2QYj?u^ti3!wZr*7C>FxXi)BmL2Tacx|nJKT@}iL`tVH2N20Wgnji zzTL#=&1+nXtj~4xxaQ=Pt7lh9wW&1ZF{CFz#|O&+PkO|mSGVqFJ;lV2e(hf&R0`QAr?B5)haOp~XYPnFmW;jv*U&@(c|(eVc_sodZgKbkN(R=aNb^d0q{A ziI+c1LviA30Ib)D^sal>ii&_K@`0Jw&tf18lsZyP#P+iVX_@dKrhBa~By+DJsuoaR zud%#)e{@gn3y}w)MCA6CJ39uZ`+)!2R0_)3fVVTi1BKw!^=14`vmN%bx((ahlbyPf za#Qy@DUDY7c^ z>e&_R=gnoQ0AGd_fdf>y zY@d8Rtg|uk!u>yxHM5qJemRaq%3-=q+vo@HO8t=OSoi*~l;`!vZr8W+s-|wRI1Vc# zHe5=0c3swv%S`@(abRrdix-hMj>|RNx_AHR(5Yuz_8;MYa&JpZbl>5^K@-z^N8bk~ zz8<2?IHQCwGo9Pj)mBnBv*PJxhU>u<;IJR#N2SkWXyBhC2agnDXIj-&9@(6aF|a~i zR8Q8(48tAA4#l}#2RgrixV^o-kafpdv#bc8?wj+KOopKZ{o)#ZmoAelYMHJ$IlN)l zWLJMfoSuZ!u-zxj=b5I}Mj(P-4?le;{$!WWGHXgtIX^`!Q`K_ue|X-P=;No$)S*%v|Qkpj1{(rjnVc2-^z!lROuG zitLseJ;nFu zA1#RTXpKTwxo>^xc<7?&8J``7SpLie3qIa*G`EimyR&CB%?~}tWTf@_EFUf9m~enq z?Q@ANicO1DB0De<@l`IlqX=cVjhZ+8^6DP>o`~gW0-k5ea;WVgX}j!g5=KLjjj=E# z^IXcSsLRRZTUon>>MHOJR12I)%LL=s4yopcw9%cEKL`SO@;(FxO|Qc;Gs zdGz@~R-5~H3Zfe_1-42Sj8ZaPy9@_PCbk)7Vnq9qC4^B!+rntL*Ea<)`3(!6Ebi!Oc8y8}5+Z)hq+ zVnK`ksXA3xz-d4;vx!l_ptCv=mh}a~&4`xhMP$2%9#j(7wE`6-CCr?=EBI``f08Ng zIJlFf-Bn1v>bLx5pzUIdBkT`~I(tWLxX58XeIH)h772(S{a6%0j;B<5c2xq)+95yp z!oMo z+rK!O)_2JiFeFaMm0aG}c3kdL5SP-BW&!;R=Otl%8aMRGt_Y`yYfLg0P;3U0FgBz6 z$B4Rj&|J(#rb$tK>zGw9*)Pol#VOq;gC#G?yHY}4G`#$Zx56}bx*30dc2PDEwO!GK zcYICP;fj0_ZUzN#i=N*@e4HMJgJ=*DK|D02&{Ird>XaVn$YwkpTxMW0mGv$KgIg=Ta)aS1<)j3x{MEtzzofTS~064GA-gK^1G`TX7DS z?H{sX65cDb^>7g?f*RcnM@)*?keD@OGq1&XlZr9Csvl;Z_rPk|>?>#CrIZVH^;?I# zTw=s*ZHP}Ta`!IIeYKTLZ2I=$>3%h+lWy3n()* zTUtCE8PE(92Jx7Y`ujQ@vePSC)Y`%#L^q<9_$uCqA1{X!JJ3cT=vw;9vYnOE-A@|2 zK96^#IBLRD+9(MOZUvP5h$sPkA_A=1v#*Bp>+=Atx0`fB{}U`b6Kt?5-|cNVd(kdW zjE+Df4Ux$jB2zXE9ieS&1>;*Uf>VXaohi=dA<`|`Q0!o-neU5a{pC^RbY7M6`P{P! zE$AJ4cu$8_X@<++r;uzEh@ZyqTTTaW=Tg$TF#Z8|TXQO?a-b}AGQ@SVKBT6PN8#JT zJ0$x>%`~vy5lns5E_;>4Y}*)E74KCymROf2By_WqhEqN4km{vpdj$CR@VU#GmPx7e z_+=WmHSP9MDPPY$8%6rS>QiHw(;dYXu*F}ms@9*!(pa0({yA+!s(Ek6($o-pM7phF z*mPnoi~Zc^sml-uXgna zQ!e$tPFFNTCZjaiZM~F8Yg+_`(s9`Q+HhEhX}lK^ju(xj#3;t)!Bz%``7Rd)M7_|K zp_2_x|A5{r4R3wlv=0qG)NXTbZqg z>~kll%S?5I%?Dg-?@xJV%?dY}bQu&ll2u(Xz0dA5=QEewe%E?;)xAbuX zE^JM1#o@0=PW35Qnp#G z4e=86&ggz|#H>7aP#d$pzfI|k*y+!!qxQp=S=|dq#6N$*UR{t%ntctr%QKc7E^hvN2l_!I7YGU_Z08=Ai4`QaPXGy#m>xCLfc7gTzM?$6#`i$(VN#R1opS?y(D)wF#}z+uR?^)XE&Qrmr5Rc z>UWd+Xpw=`1WyOq98ulsgDjxl_I$Wo=M`s0anW(_DSzfQV;CJT)j4 zG*{8`kSH5UzliGPww4v7S1d4YLnMq+qTmmAp=fnnAo8A&k7_chW0jZ>47;qjO^4A` zX{e2<`1%mjgE&1AENh}>fY5Rad3E~ffJrB%FKITdxnd&g!Hz@sHRq|zRh|Olg9VrE z9&NnJ&m2zcYpY5Xs$3cc)n5y24G{zH98KfcbM}{jXLP*s86*9tOx-P+D1123s3j~b z!{On1EX{7OPkz(|ky9RaY6u@@#G#BK5af(_Im6tF_TroLD6~a%eU1|hD&r$ZT|2I*`4IhDQPM)Xsh=4 zK*)mxfru`*Iae85c)hPUR%1eElgA=GnY=BDR%|+#c6%=3#=awm0T$Iiy$##cIbr3t ztesL-#e;21nQ9b!>`Vu+eNAzRX}dmLveVDEii%RPES5-Zd*0(Z#AY^2UR+zAOa^x8 zV3D)s3y>~5%L?rLZ-v5M^!U#f=94$2jiJ(bCfFu>bXhF9Vs`7`%#8v^{EjTLbFn62 z0-HDTF#5Gbg_AjntlNU2Qm=3pSfq})Z=U}2gZ!8!U4yorf;BMfZ$sA0iF6SJ-&xM+ z)GKMB;x0?(g|ncPPuN7B;@W>bT6|_tgYAZSZKO$D;@9V0Ni*~pi%3phaqV@iReT<{ zf>qdS-`RxhSFdn0Ea<&(JY0k!ok3WQD^pKpqQy^SB=oKv-^35~kLY)(wuJT3c{idA z9IU&Dc_mAu_!OF0=GllrSYt0Fl-BUleyHX4aEa?otca0HE-ksc)XgkV4Z!Qu+}zxd zf0JbR zisxOfz19_|gosU>x5|P2X`Q?7$u=7TkFwdpOe5#XZmF}^US4{XWYa}V@pKp5i%#6* zjW2~<@Pm1amL(+PK-vp-d7@Apto_-Jt_(e!=#?mmv@-10!j?(2KEV+>J17GNgHj-4 zTdS?jgZSwBB0B4126K~M_4Zp0hKkLM}atuNl{WP@U!vn^da!N%kg%C6vch?|<# zwPx`_D@-r%GTu{K$Jt3o*vAtkNNu}VP}9r!pq>)PPqBCMNXMUtP&;^zFyiSKq=&jk zYRFtFvVJ7Uc-&s;l{h=2c~?UwmUV)ar;305b`h%-bN8vez3zL_Im}3gs=Us34i}13 zU4+FM-yIsI=Bv|G6ODnoFip}j`~)<1C9$UQuLZTRM*XC1$<*VzSaEfB_a7HIYetdOsL+wL*QrAV-(Bfy_67{R(7MOHQ(u3*d- zO`W&>GYY3<&g|So^VYUpNM}fM`(AhM z!3rvCRRD*Wh%zHz9zPl2cXBHB|3(^xJhD0 zMP{|%7YhO27^Ztvmh`9>N3`h+`X2A4VXSJGy6+Z5Ic zme-?|Gn7ov3OZPJ-*>GyOYX47tPrPU`AJkqYAqM1Aq#V;UNPF5u*mKc?b7Jes*GvKBr2yrR7-J$>?d(;?f#-kb=uA<6bU{w%iHY1|KunIFql1iGDqB2>o^}5p2QXZ(#S?k&jgM>~t$0=iH}X|D)vb z1zX^1u=-UB(xUe#^y7N)57VbD-`uWDavEty7%JRagxEz~t!}K7iBea0SDQx$kj{dm zqesU^3XXPoTe`cuoAWcGss8T~uMLR3;96Q^hqms>-QK1DmHt;+cR{0%k-D*M;=o(8 zu<3V0?iUq&Qo=!98%=!O5W%a(4_n1j($v z%bfu26mqk4XntO`UYS3OBCqTRy&cX?oXx<^8YwQxLNE8{rEpsGG5h(I;BMo}%EFmZ zvL8#NWqFQkp4pe~Y{XWCO&V#yQSg!A)ch=&>_lIa$!A({Mjth^v)YJ=3ui-Vp0JKI zzM`YwAakmGWDNFHJ>ThoceX*jK}o(9jceDC&1B{(m@TbzH*ot$G!)E!7^DyNu1BE* zg({ULR`q$R$bYb~L%K!NKTcv0`fz@T^`b~^2%2T{XMVbSF;_{m?{Eh>8v~>X^5q0+ zLgj5ff<+a%CmQmedWxdpZG1zrqb6Qc2{F(&J1YeA+rV#y6XolkW*>eDH-U)R)A^Rn z$e-Xbr(^cP2Dy@aYQ_1Z=%w=`GkI95h1uDe<6KSV6jynfZD|KnD7p7wiUt*LL&=cl#kp@+_r&zw5Jy!x$%xGoFRtNRyyT0T z2I^P~_LSXL9C^Vv#HPuF}SJ3!FPj5tHP&kA&7{@kcGNPkTG4eewq)fryUy~ZPAjtCRGl| z)@SDW^9Y|4`oD+%3X+|WGrEvtOxnE{{T@NIdiT%~*f+Wd+AUg>NcTWPbKx`qVAm+| zfTIaxE@gh|@|f?+23*&y)c|>6Y(gGvxqvgs?)E=W8s)Ggk=6jks|o_0{TJBt&ug<@ z}J4rEMa^>8Ou-M);AopAZC}X_I(xlp2uLSL)%2{Aw(42x16d<@Iqrt zI~5F8`mgG&RXU8U+%{{m$FzOT>GzxL31qLuR);9WhknER)XI1*r7q$qJMWs@%UE`f ztHYXwJMAbjrIdxc=U)l;VvkK+4xTXe7c2Kt2Y#H*;OD+GjwPaH|TV56mOOAl%4Zzy2&%ieh-{Scf+S*izPmpNr7nTwv6OA4b(YofdniztlLG_1a#Ce4g|@^4XxEKH*&cD`2Q!4)OH2*pGV@ zE<&^#q_Z00r_4{pVt1lWYV5LBH$i_i@nhFoGLhQ7I?C?TAm%i1L)>{Z^3POp^M*VI za0tYsvbH*iQ|bQHn=1AYWN(9n^Vlh1g@+xz{J0t_wf+Ztq|dkv*6)A^unGdWB)f0# zWUWYxU+O&Ig-;kbFFB4lCW)P0`{u(D+f&2<%@f@dng7BjS{pJiWkTG zhto5@C0s->JpL!wvr<(OM9Yusgh8w>b{-D~S0bOmmWDj6&yZZg12b5MX*KD5pAYjY6Pyo|8j>M! zN3n`Ke9)=g{ffH~r!8^w?Y+-vp2AVb9M>6BGrzh-N2wogH@_^(wVdd3>9O{ocL%Sz zepuciCc{+vW*QW5a%UapWE=(;E^g6M2+`j{PQ2E*#X#V^r`Pr=$@}CRjOYR=ej;XQ zwHgxyjvmh!cRN@?YA*Dqj4I(>cX54)hz%3QCh~02;!X5M>;;Y2gI15Ne-jA}>^Xmb zeyW_c!^4E005RdQN|$40>Ekt(fc2nqr{(A8e~fK*IQPRPkbGm>Qq|TLnqxY`&69w& zValXEmVVaP61D)h7{m2$I2@Izm1Xc!JomXJ{APV8)8=1@nnRSVa#3#+SYoae_Zqg! zIKZ>^xuTp&LZ#ifXh9}e_mkl`YJuCEi+q!EGSKjx=2-P0s?JSxNj?ZgmJV^gkbTw= zwg!zfG@;c^iQimqu9M5xxSk*63kKMr;rt|Q)jp#7Nm;zaHr0y7kgbh-7vm2~_g>m?l_ zAASi+dfswv?rXbQr&bpCUAi4HF1dNKZ+M$U;iBkKn@nDMl>I_mJSbc1J77fjF^6C4poE|6W{GkSBqDJAt2A z4|5-Ga6uI538_`=6_;}RzLyQnr{v6lhSf3n^?A4T-TS{A{jn~{hR&*9(iNrjj#6j1 z9)1vYnA-`z=*NI|TT^N~m&coh9Ln(RB@g4zL#L-;w1q6rH}LjNubD%)jL{{zx_t}l z6LsIDob2+>yxdK=U%uy>3N2+@Jp8v<^KE3Zp%+^PUZwg~?K`#Kqrt`>?Sc50QfIJC9xT?G1s=_}X{@6mc>-Lz}2dRb1 z)^rigro%UZ;*&G<1s*zV``ZMUaCZk7NVb=_=H$o@HWjho?Y)m&R*P-=Qr+|~zozx# z43A30O4%QR8)htkU_AR4^d9zFm^Cn=H@)2k@Lv5Q{LfbAAbZEajigx^7k9iX;EjJY zwGX|Cf2%jIwqHaqu6ElVcBi(&vxq^Ot}jU(rG~_rlBtg@4a1KRcL5NV3%{Q_ztH=@ z0SF*we)PWpTRu({$&axiev*rMqYS&=5iLSPyph6LTCJ;^s z-|49GxX%Ktxsb`rf0=ds1HynW1U>ST16#DJr_(+VZF{8KVHeorVIu&7B4Ay$*{K&` zv)j;VeU6Wh&x)nKa)NzqxsZ0^x4D+)RAj3b(kv#{&R!9g>z<(Xi2gE1@W_Xg8 zXx3k8MPRYD8Wd*6#3vsmAjE_K8&}fJpYJF5&ww);jdkgEO?90(c9n|+^&jthd!Wx$ z{9+dZUv#H8@0A_iyoYix5TSZo1Xf^u9JU)o$K%{)qM0{4fYi zVwY#%0HrsEAFq3XPa`c5Q40V=g9}CkhF_MJ7T|eL*16lm?j7?%$DuawEN3&1!6-9d z8aF^hkmW5_T|^X&rS1$mKho}!C38qPr7b3$7KW$&za|!cb-597SLd1ef*ME>u~JnC z)&@mXy$`qRV6#ljkHw#?z6BIJs~23ig%k39|3k+kTA93 zBT}TnS7Z*FQA>1Lj+6Ev=0wz*fs@nASJtos<^8LDIRD}%lPv1lB8P!sFn&5GY#0kh zo3((|D56ERx4)ePMteMg${mQB za@{4a_2`LutX8J(fUhZgDT5uh z%7^nm-K-$+sVOn2oQCu7Piwmm98aucH*5=wj+rOUP+@nga_qTp8A0N}pJE_#zm~N< zkRhgBc8HGfTi=L*=UKPwP+W?6lz|-SUE)^{)MTz(QPXYnvyz1tOr+LU4Y2%TyCU0+xn=!X#nnzIMji<{p+xK-2B?u5o-@R| zpN}R%+M}bNydyt~_7}efYIeSAyaeYs+6nAJjI3AOM#}__THhj56r;ZT8XWqSRuv8$ z3^Vs~Bf~lf6U;AB^nNzvwtZaJtKZ(=JOJ51yG~4|F|^cXC%x& zANiCZ;{|JeP`J?3y1CUlo3sD;`=Z~{m{rCT4OV0Oqr`0Ece0S;VR@t z0hlp3x7A@|B``dScBBE|>`7Rn4|mw3Dm4p2@ZHD7g)w~sWeb;TA_O>y=?(x|C&BKD z!E;`_e)EyvgU#gfU~HuHMwONfi~p)2;Bp@^1(f@5tD?G>`pQ7jxued>2Y_OFkj0Q{ zm=Nj!slP5t{qb(LQ7Z;zUESy)p-C9i+ySEd;u1i8=U@^)@EK7)!j%!a!l9s~B(l#2 zTY(DbL-0Y^3F%{<13WcFOcG+D(~8$pnH{XQVIqS~L`_W?(;8Z-ICoT3*R0KYHWK`q zjw7v7&d17&|7BkK54axCT}eP=g1md9cb#(@4&?5%c%#at#V6`H+jOnrlriHkeCg+j zm6LEvOZ3+kMcms<0dX1{)x@`MCS#I6j zv?xPL;ZT*--0YODO5(fG%HyQjlEuLowO3cR=NQJRmyg=-Japzlx976;A~O9%)(-T8 z&x|OWA+KfzELNgM&K9}BvenpjPqyEE$#8z6y_59PtiQZ|StKlA%=atb6haRxw>~j9 z{VDMnp7${>GW}SZXM#=z5(p#U(LX%iTv&KcURfCiS_xv^cP~F<5}4r5mOadP1IepU z!DyV-FSL6ZgvLm^tlMajVaqQ<1+0H_XGqTlov>NoFrff)ykyR*#Bav$O&t{|`XZZK zIWQ(tEzLP|1%~o}$0wt{JFWft!VV^WtK*7{+Nhe=jx^7#yk=+;5j93KooZM0OwP3vm+{p}S zvBNGpRWk#%X7N=}Tgjp=WqP4tdh=k%#SjFWbk(cr_p-E-W<05 z?7`TOh-r|EMG@3YUP)=!r?6dz&(dv=Ek9An1EMoAN21!;>s#+*gKCGsNjDJDpF+2? zY2@?E_a@1J_e=xSA|Y%R4~aTz=;f{VkCAZ!XWk&NG6Cu_v8d(Qf@kdn7;NchriDjT!vyHC_}$ed zDl+Y>4OVCP1%M7xdBc&yOG!X_+z(7Ii)Tb^>USVU$8bpe((SQBVmq=&DE$Gh6K4oL z@wo5f{pr2f?;p0eRA@-XjIMMH%)Z^Uc2V#6zY+odo!0Y@It^l2+(o-bau@CDPax$# zLN$U;M(mX(peRaaTGbN2QvHL409R`%%JL<&b-`f4BANINd4!?uK*bI0)rC;ho_i-m zct_Nx-+Zy(5k4>Yb;o>176u`6!v;EI+Fyt=zOmdflYDC77ubT)h?+_cnjhaz z+#eP&_%fautcQ|0Xb0gLK?@YToP_`wg|ckFM)z9{Fxl$y!Ch6p%$x|F3t6xWVnCD5 zLi^y@*wZjNuxtB628PI+w~&8qaEms3C<4a2g@e@(w^* zD{@u4H0I4gH~8@>Fg}fdi&O)m0jmj#jLIp6rw7}_pl%vt7W#$aEIJ6uLH;;siHp-+ zVJwUWJb(-=8jp&{_f3vcYpIX z1Y3`Q`ux-&g4T11i?_E68z21pszW4JMSAgrlMO~PV|0F_O?vC#e&zoTLB1AvH-|^R21TAk6m1c;OZjK-A6nsHiWTc`>* z8qm#flYa}JvX_D?Zg%SJGN=o~1DER~jE5Zt`8X`%VVcDM(i#t0u!9#G^&j zNw4+dk=@H7&QZBD0ILZ2c zz%@-$-<)@yK6Er69^N#NjjLm9%eXedKISe%ao9UY0 zu~1W29{}!?!`x{h&5YkH8h0&1`;!E{kL-2SeUZ}7IB$w|LDxauHIWLG4s01UmyA=N zbSN=vWH&qTD>cs!QSaY;Qd3^*^ImG82-tZWUz5(0G6?C}$sU204sfd-Z}A3xH3XuA z0I&f9M@e{= zb`fE3Jn)(h1)HM6+*@gO=T@x8lw+F+q(u2!An8BC?;#Sl1Io=dddnE(+EyWH03|3#y?xn4v z`-SrBJ>rm;td@LTEmK$I4scN1sU)oiIVbIM&r;~q8#|9I0H75*(vm94iBu?m&-(b+ za@%ul(JUQ;PFgL_(;Cn*5bW0KaB@PQ(ZQxkPwZgOj z%_XjmoP`!XUtoLjb)eqBoBxGqzH12B3Skftb9dmz-pe{9{Cx z|B)%E)iu+kO(r3DZGBikK}L@U_MJw^3LloQ8j`3PwbfJrVq4NT-^a~!Bl+3=Ib5ct zTZIotVh*|&Uig^BA)sU`B3OIbkd3ES+VDjE=0?ZW5o$6;L4RPN(J*`)lk}-wDPn--uYah%^2E=o)T&Ab42~=Mr)3=#&{Nf)>43#a;M~PF< zIVC8WiHQsaFJNDZ^T!!gY@=VOI!v=y_&|!z9QN@F_#u{w|eF|LSKfBqqUZRPOg3iL!jL;8`&BLXO8B)IEpCyv1h01FSA2^an7jQz@aq~|g zvD7K^7#Yf^;f?aB4hHlvQ}=DVf*@x=yw0jE8$#NYk49deYqr30VNyceZ^%B76t!rTo15&J{CS*j-@Ywp$ETK5&A6(- z2yg&E^d!!ytHwjZ^ELRM3_&%#0h)0a`hvfKur4skn4iuLmWiyuj+IR);yD!$j(~dibcLdDZ4HL;=jM-gkhc$1y+>DUimt`=EA;@~_ z$Djv%LL?f-8I4hl{Epy13ZLC#rs9+n- z5#oQ+18dV0z+oW*7WMdSAp_+PP>$aXyPLtE&0GQpDEm0F=o^XZFk`7(AOFz}c>A%?TLBw_mIf`X>A5-Z~0?SG8dtW6L1>1SEF(F#b&W@!hs6=)R57(U(+KeOO2fh>c)qF@Z?s9Rtzky^&b*7e-eOPDnjO{o`RZ5Kl0XhQ_{a5v zI}$nRI}Jt9(|aVueBu=l;;_TSS`px>jKycS= z6P~MWH;nCJr@1%m{T~=Zw}G}7m@e`eK&Ysg?L&#Se-kgEK{4hAq$2d9sLcden0o zk(p;wyrV8ZA|2)R*)q`fwrJSi37FRm+8F7stOmg`(k{?;Msi7wY~~@C4mA zrUq)DYM2a8_@dcCMG_1HANfZ+7=6#K~r{XB>0IorcSrF42tqRm# zrILoAM3vo7z4)Y$7Z|jx9&W+pk+TgPE@!bQx`4Q6zvjt9f~W#+_H*$NKB7RY6O`%* zB57mLz25eb^f5R$0(KSt+EVYHq+>I#_C2pj+7)ui!II!vq$YB*T8XD7D{7z2wI8S*u@EgZ{U9T63} ziz-=p*}?%2VR>=fZ|@S>#w*Of5px#w9qCKj_3|Pn-T2hbqF8uKjO!(hTl+N$ODoPNuMkavcpkFz4lWc-L5X}{JdMD`YZXXY(%Ed84s zUot5Xm0&COWw6(<_dR_yL`*T3jGoOFXwCv>MOQDDSIq5yYX*X%kQ=c|K<4vi6U zw=KSRTxr3e9;uEKE&AJBGwWH&bA@eBUIEA@W8MM-z9P(QN0TfM$y!5^B#!gXvf!U^ zn5*T6GW#%yjhWG>~yySi0ILy)t z3gx#~lNwTonvFI0w{(%htiaiT7ZJfiQ8!R7u^PePrn2f#vpBwq<_Z5_X)~(*y(s{u zAPXi2Ws3<`OM2HHZ3v8kP}<8%3VRQdI1?aU3f+85bEmtcsMEfRZ7?=(4xeoYKdmpa zH0uPYrW%@2mN)&+e-j3H;qs-FLMRrJ1H9U~v^UOKn70rp9%@UoBSsUZ88f5$zKHSx zi0}?=_u$5DOi)!TFqF<>li^W@>Hg-Ea89Y-{cncYLqNPC2ZlAiPjoHS_&8GzvZ8Mw zDw`m^YnjX@A2j#|ah-?Dc?@*xElu*t-#>6Z=h%+%A`x(iI_IIt_M;c`@Ae-0SSH;@ zmn}hX^u5dX1w&mCpS>+#U@r;w452-&mQ&vu4WY>}A8*J{r>;*tzto zr6_NC{m_)AWI@X08cvP!1!I)lndV^1!P73i{%NMcb~nuhu*~?sy!eaGjA-C(Ta5ld%!|3KA`W3fd1-C@25nnhTT<2w@w*Fy<%`+wPJiXGU z3AhF(D+^NqKMmH+Qhsv|Aq(*Q)!brScb9hJqiS|3Z1qG}ahym)5OW0vMy}1EWLE#; zG#Qz3wSH}NxfW1>xI6+Y47IqB3+S~J&)xwSEIe{4$43pssH?h9{8i9-0eIfBo(bpvhT=0^n*921^dFBTzNc?5h#b zMK?0D0dZF=b6LjQO{PdGA?zUFB1Vki+~05dbmOx_#|KAi~a zqDH!QIWYiSB*JcCihx-Y931?;?ymiBf-amIWmZt}b4ys<*P;*Pc`a)G`GNsN0HV`UEZ18( z%cWG;LWRKB*X9@EzIlfEY3_&WeL|+=hafsWF9aPh6m9^&*>0s?&hufCvf2VVZnRla z^HR$w%Tdh%)h^iqP5m}7%s>fZzTn>{EyfO-?k+vk+>nVRw83KH45``5H0Q>UCf^ez zJUV&tC1`h}4~%wBWqoA1s9Br~Wm=warh#$#U3`}zMrLtqzwp;kL;*=iFduBTkGp)e z3$Tz+m+3izL&~=pYHgx<1;g|I+dx3R!UB`E{f&{28T6?Fu#fkw*f*4px}~e`N}EI+ zd8zzhX6mStC-q~27P8bJ=;DK|CV`q={+S575d_!Ohyy+#G=s9`uv@Vi*ef@f^z{wN z^&uw%I=$9c9eUqpt@la)TNC{=fg$A8pXDCYpSCC<4@F!QHb{^!^<^gu`*y1*X>g+w zd+@P3V8{Yf_vZ|{02w@zuLmfKZ~qC!z?YYOu7m4P65L(W)mf-~vDf;+U8=pkC9I-R zX8klG(0ioIH$5s|d{u9?+mF9EfTHh9UA*%(Khqr`@&-jP{@GyZm|6DTR+TKA!zL#n zg^6!xhmriAPfe0O+@Eos5*GERxX%^7DEgr$h$?>l(Jw&wlDWUMxObf1P@WKzpbeP> za_?$tDM8HfY-IO^eVQjDp}p~Q)(0gwPl3g)WoYJWf1utgkwL&O6WSsUn!Y>y{!Y7F zibP)fH@vLndK?irMX6E%jhL@fjM)Wt=4=PK*N&D2FO2bskd^fObetA=II*TITSmO- z#8tgC#3(b~MYWQ|a>~6{<_C)|*wNn8zW*03jrZ88!nMO26XS-suhkJ9O;phNJIcO& zE@IhsNQac^HaepTyDOu_9aF4&%DQxTIbomr;MVGnc1_9)Vw^1H6+}UYHX?YkY%q1r zl^DR<1P)ZDUp%;g-P@32OF<<<TfQo=9ba?p=+GsB#NIwMm-{dk(U> zz54SIERz9S%du3+6#EG3?lP@?Js$IMus2%l!53U(khCrI3?h%ZVXz;a1tytdjSrS% zVdUBv_Us)LIgX|!fYNM;v_KQJl^=%%tE>rtlSYbiQ#(IXwsO8|i`7sEi-RMGwTY7p z#O8rETpvFA`I+&bnNx~XlQCJ7iMa0cYwBME)3_sgH*ek#Jc8HP!uw|4hC&G5KJ^sb z?&7LZRs@D|@*+Lw?1I7~WfIQkmPmFgKXD2K;q~rsQ>#)O z@YnNQw+)J1%5b)dE3wZGl|N+^4wf8RK2c)X#0)0m5K%=3PeDpTS@V;mHh|)_`cd(G`Zp8xpklza%*Q z7Zz_QOuVlkEPYjbs{1Vx@5m!9G2I^;i|WtAp=v38wIdh|TLNL?8#Q#TQ9p4Sw*$X< z>&`RHIA@oxBF9v#?h+j!XS1~|=7Y40IlpNVuaSP+xZUvQKuc;L$KCf*un1>;HKv*S z#|a3l`fJBAC&)oll#f0SKSK{2ZIg=ns3llw4h8AgbP#ag*N~BO9MCLHYx-^jIBmTs zPr#@sPZ(moz|)62bW0R`gk;gkvO`FzU(-7~++1xPRg;!cMJO)a@lITA^0jc$s$>Ve z0o`clMoa7n9?_655fy$2yIzh?eVqi6V3WuOextF*#$N#~v}PxJ4993o+3?|p9p5Zb zNKSCKBLC`cAp#>9TOow~X_z>~=*F~7zGcbOv5;yhy_41URIk8>O*KwTT3T9T4+(1z zG=9an8L+4NU$hF@Mo<)PSOxvOPvtPX-0-aXmlqSsCh1%aqcvpO{{0z2_o3539sN~u z)2mcxzHwMVyD{JJ)!Gfj&$=J(!}?EHM6FJ7c&)t2c`SxF0i2)xeEDVZLCe`9($EMv z6aj%{Bh7WeK6+se{!qkh%?d=Veeh>|+jv zymrTB!lY{{_n2sG_h%=?da)x3?QkFbbtFI5*<*(A1L0)_hthL@rnbG!;<=9ahSXr| zmZ*w`OxmrCXB;CZvM3*~L*0+;U;`6{+s^cy1*4BN(i|;&-8_tN-WkrOU?@fOd-jH> z%VJ1VssU0hIEt+m)g}ydoXiTFP$0%U7mTBP7}?+OA8VOS-n{7D*p_an-D1GvOE7P6 z8B8w46QeG#+h2qNL1|vIAY1~>oByvc`&V+5BXqxrbnCQUz9PHZm|*&im)-|>HS0R> z6aby+rA_9N2Q2&WYefWRh+G8mmv{L7A?&2hoq5et^*7F`B=3xaW_h2N+szWhEzz<4 z{l<4%(}I%99qprpOzOH41-AY(Hwj`4ct?s#vQ{RhdgPJ*xRYp)*a;5cP7=WH31yQh zQHCq}K}>H{l9hnN-?MOM)1_hpE)t7?z!9kN&gj=CjvfyiWtA`+coTNnakvn&6FrBE zKRC=p(;tgVY_BtgX86g?p*H??6~uNfL`*pxY=Y`pi?n33NH z2-#5*uiIqu{L{t8^)lal7MKXljevAV2EzSGrHcwlvw=uLu*j7F<>4? z1rd9h4UzwDt!xnoib7Z2JQSTBPT<-&4R#m#+`>3t2ID;X40bcb#za~p5MFoVRmlSB zwUxd>H^@xy<5%}o>d`#e^;g~T1-nPJ-|EGBc47=Mlow##<0LD54O>CVeAEyn{1#y`et(C##kATmV8L2LTfU&)7l0f~w1cko~Rla65$M2<4hzuKs+O zR(=AWwr*M; zq%Wwa=T`V`6@oMjQ9^=CHvEi(WoO|z;nbSU_Xcby>W8*KWmMV7`J7!4YU98uu)N&y zDYIi0HPqU7w4pP-J(PMN( z#ggm`mhlsM18r6DZi)noeVn)`BZll%Y|s!Z)weQ7LvsZx)_@JF=l`~mdpGa{o~s2s>;b2h)!~!GTw;+jf={ z5bUvNo@$&i-c}o^~ zPpYu1ZzJiNf;I>qub@TiDRmjrQfiwz(lHDt)s) zT`Qm@NM=N1;sFafmk39GL!b%M=dke^U4!Yct`Nvr|u` zt~!$^Y;>C`L<2y)(Bv5j^+Q<38`2aI6uFvPayk5^&J8GtaYuy5=E%CAFd1*QF2r#ne4BNW&S6^Acyll+L0A1zW z^hrS$W}48`zY&oJ^txHEz15eOHNFN)WUV#B0A=#gEVl=PY=K=6`y(3~#BHY}fE{&! zzjMVxY$c|<6QIbht3?%c0;5@}$I=`#ogG_g^SHdkM*r#|Hj4wKR5i>?HZ#va85O0H z1Z10_EaQsm2mxb0ZlIv8w_=iXSl3vIYYSaMPB#on?-U8SIY_Ne%RKh=-Hp3j7tSB= z$1GaURPt-f@T*<%pA7hK0=+T$hJ?qmlg68#QjcPx{j5mlAs93K0oD3`T*g#cPeD-; z;Wu0b51;Z)t=+lM3j)ZZ0{R4LgBv~XrXf<}>#uLcmyRQa*Og*Px^enIfs&e=LYC4% zi$3F~(HCtpk|u-9+oXqrt^~+8BuPH~g_sM07S*uh=V-Wl-i(>SX(qNjuTq33*pS@srwAItCRm!wlfF z6R(8trsu{iZLRfXgXT5i*Q)FVe4~7<@f3^U&#q68t#t0J>JUv?)lS(6=Gt~BrYvUd z~wEB(CcgJ2~k-~IiZ zMnnmeIfAQsUu2;%4zVU08GmTmY`_8I#HxmdEL}Gfc_m)47m0obqFSf`g3Tw;BDxG| zt-V$zj})b+tlBMZ?8RYNORWkcWv;Y-eeYZIVebK%O=J`p^;%}(Lb-EX0N45`5MEKJ zuAmy#Jx8iQXSa%`VxS=X87_xfC#zQ1=vu2=gIYa(2%T|AHZ&wJx~;lnL+uUK-S$5r z68J)^0bSH7SX2q*NfBl(OH?GnWZA|05O+^WM-9AbCLbQr4|A5F(yiuJ;N*{{Ew~!S zBT#1E*@3ouvHDqtP63=6wQkESBP1}K8wH1i#4_>!UseXlxpxu_QpqnZV4gR)ITv{G z9xMb&kW@I#SB7%p7{peESTK>V zYhL~ieaC#Q$4r~*#xojYRD^F@^h&P{F}8QDrAJo0m97f%qmWEok)vUj0z?2KCU?Qk zhp3F8)hKwVq#juvYC-NJDPc$J{u6S=NdP&&H-%${ju4whuTi{OFuE@8IxzS=qH}43 zsY5eK0~WsLa@%hr6t>nonX%V#>qvRZYJ&U>SD8QuluiS+66JuREq>UM66dQ(gvI=# zn+>V})E1pi(XGar0AUYuN4H6Ojup;!ZQHWX;E%R#cl+ILYUbMj2Kd$0w9`pM(cIRG z=9&!M_stdE^Lh27{z(CP8!fNSN`SM}Jiz(ohT26+2r3hjWO~mZb=zhn*#L#8A{3%^ z+KUbz{;(YDF3hTpzi>fAlv)b3kkGSV_Ggns!RFwK{nHb_VaWcILCdb489^^yINC4; zh`ETI)L5V1xwcS#&+kyGZp7L$C9}>IR_qk3AZ;OVU@JY&`gIWmDwLIGTa$i#!iNcP0(Vg^M1#ypcJPx!js`#S&Dd))5T_SYv2mj=B|}e=~AZ zY^f%5V}ovsJ@+5?6|K=QXRZSNFEi1V01HL*M#J)rFbXyui(D>jn*nkW4;btM z5f32@xs_PaR9b6MhM3k)D{xmM@`NVMUi>L zLn$221)@FR`iFp>5(EaeN{Dzn2_n=`#4sa%x~O)0T*UzOVs-vEG`&8r60lU7oBXNQ z;}^fLo%}#%Iq!6x)^je#26_!XH>qVi5k{}V>-BNxDNCDR6=Y|=ZK^it*-fC(TxpP_ zBB6~kf`z#1AFSB`rKUl^nSBgo{N$#p5h*yN% z^DRJ5@%(G4PH-VBlUmb?cR&EbB`pA2bHJ{c@Wg7xw!X^I7?uCZ*IY#A-uATyz8CI@r_0ib5X*XQ0S!D3Nh%wbbe0*f~`fs+i(7pu-@5R%#{2wei8XfJt(gg(%WpYvJMtD)_95#`~)Z3PJH+>pEc6niy}z%q@*TW-$HTy!=!$ zz9Av5m@fs0f(@zumfG5cP}m$UXiH>64W{k6`b}>34Q{->an<`aAvUu1j& zU?^{DRbYRym6bjC9s_k5d!g=Cm1o;Pdw1#5WKM{^{F_-n6YVM2T^EP6&Y11jTB=D?TWbM%v zsN}f-i%BzD202_@q+P=^1Y`%X4iUDX`m_tfVHnhWN<@^}pp|31^ZMcr!k+qX@3{CQ z=mwqKd#1Ww$kjhjiA1QRiQ9dpJf3Mdy1^YHq9QoJt`%^m;!Ax3g=fj;ar&xN5);YQ z$^U7e?ODp$_XwQvU{8E+D5#*KZ0EkW-N})STkF?hCyg01m-Sh!$4kxb-Y_~K{epQd z+R1UT><1t2!gPFA~#ZHSXJ8~2x(+}XMTyKhdr(xv2_ zur-T`9N2Fq_~*IBA?O{z_SArw*aV;fGY~;mb75mi)fL9QiCA(y*o8X+$6alps4>-)Lw+Pe`Z*+4~uco|`fj$e7-3z?%AyiD6j z*^m2|-=&6sh7u|osSjQQ- zFI3$d%rvQ(E8eHyY)Yxz?>Jq9zE`HOay}kmodeC9?)NBe*NazOs}S&aI5#%YWf^6b-%KsPt#*X|1= z{A*~5>@kB!^Oz4&FcOKDIGk452~ZD>c0M2!xON-Jk@z)|{q`J{(nna|uo})iw*GGS zTe_Qhg;%o;pEa+jkMz44YTEZ?KleVre#?BxD|RD(>7w6ifGZ!H`?`Rb!}5cro^Q*s zCmsY|ed=h{U?yG|cQnO2?d$v-gd6VpOrzNInWi{^h+oYHZ2yHAzxgR5W?8tC9pDmV z5_htArIIR41hz#rh;w3u>*>MYYH_F;UZfo@mOWgfS?LU&)!>zm#}R$Qn8fTnjGG@lo1F5X930)J_xQ`#E5(kF$5Tb)cb z@6GpZtQVvkmgqu#V@^{v$?7|gK_Ji#&jyzpyw-&{3Z>hI=GJ-P-1#`s-#MDZmwb$Z zyBEy*vf<+Zu#^8aCJ1kKr`LafYda~lUT8Z+XfQy8h68ZU;9qwwUi2u!fd-*hyDVGr zp{aG2lB#yzFBTkMGKOUdH}qt=^(Gc zE=_W)#id*z$s}*X6JA;18^Rf1^?c1>v{a3&jY&*?*a7T3pTX;*px$3$P3SI0x*g$C!xAMx+`F(HF)LxYQ6~yb+MNV zfl*N?01xbr^?@N3^Xk}z!;hZ9-0i`3?O0U_q9?Mcw}4ge{{o82+?N|+kSI@&CHK#M zRD8;K$Y2+GAWtPx;X=HuZ#c|S^N6o9T=dwa?i6xcQLxG6RKZN=il^Hr=5%4Es&-TR zNp)8&RiB};*hW)uAXwD!Q!qeoHWV|NhaJKQ_H zOnbpU^^0Hhub&+GW!j94ez%##Me+=Y#e$LP$8x+gKkJzrcj5QzWGXoag)Z&m1lEnrZSb&^Nd)fED)ouTJ2shwLGD7M3d7jBh zump`ZHz%4mNDd39I1Uj6&1yZFgssXxe6$UD!cd?{t}hm!D>7Ini zmv`Px_*Ru1}N$(r)rlcA+A+|#W==}#4sj`hw=uS=s6e#7(#)Hh`#7+rY?;sa0 z_OhDQyH9q|7iM&#=Est%?^96^WmPyw&O?Gwy@2z$Cw31}IbO=$WVm*ldYTr#BeOp1 z_P67fZ8knkWt5DT@)+u^*l`hTeZ1G_B~z-(`@d$J!PAN4R{e(i1`q$EktfNzfsz+= zUjgZp_^_+y@njn8CBdO+0BFCJD^u0hyWPmcz*aUrHE#8GS^WtChD=pz?Yu^%Z{C2{ zd-IL^N9W!gXCAz~Azhf_x)w!dpqo(B-rg`4GSIfOiG~A+{k`+Z=P&mu$#Rqj@Vf79 zU3&uQIi?OCa9hIfN%-tKGW!|eIS3vRD4Y-rV)ZlZYqgQUFY~i3zmpF|%#OMe{B()( zjI}FQ*xW2qfLX;+3%yDJKulnLECL@Sc54QQwNfT{^iHK_tr2X z6#@>oqJj)?{$D?8Y^|V=ky%Ww$gqUg`G}GtmKCR$+(zOO)V$ziv6Z5P21Mn^l6k zZyxmdMM7Ig8{b*OIV5unFJ%sVmvBuR5Z+t9`b;pY2|dkaSGJZfbmWKh430*G6SL3y zj|UJ@K_Kdo2U6x!o7DLdq{7*BJ^U;L>4<8qYF8QnA0@$lXc(NmXK+$3#o)0r%GI;f zG6Oc{@QyM!wj-e$n+zk+8CKZ=^F#1_079$#?={)noeT<&u*b_YK{GAbEKyAx$sj_U z65<}qKc!$P!+{aHW$Vs4`i1t{qpu)~0h5`jhf1bpb<4m~Bp1ie+)uAh5TwMqU_uo_ zKpSUQcr)i9Z`7?plJkUqk#JS5fK9s;%>K_+cq~o?3K)X~T*9ZCAZ%qLR0FeE=0gWf z<_l+st_Js(EJlpDZP5Wp2NKfq@8k%mpMPPtT8ms7szQ)nx#h1O_CPvF3)8(hBuoel z@CS(luk`{Y!Jv#VeKU%nn4awcWF!)YBKGpjk4@f>z{)HrY1rY@|1Xp>Of=MHhU=`Y z98m^**I$=2?cyp)ck8k(h$P-XY&=KmA*F6G!G}#SuN3wlJ>xIIFEs4Hd`)0+vkjM? z7dr*!?;x#)xHTM;asi322=$5BWj(hz^Lqi%CkA~VUfol|_(X+pzGBnzEg;so#nH&f z3!fnxg|5;}YN`qW%M?y$sIBoO7^f~3<>?jNAhoV8d1Jvc6{_QVhz^v;mqbp1e$73D zVx^|Ew{$YKTim2l3hK8Gn-Camu4$#^D<>=Mf{v^x&!p<0f>P}{Ik;{Tq4|@Ih!vlI zK3fBMIPi4;M;^{^jnA6tpztcnwQ5yF1TP5YE{0yd0Ydta-A%%YLPirr+m1Mm;gdgS z-+vcjqIsX{g5qzpz}@-Js(`(6Ni`>~Jc)?XN8pI6@lwe-X@{Vi3I_4h!w2X!w~e*M z8uNG$MouG4XRZPO?=6N~3n>)i{3lVUELtYP4=sFow0Lro=yn@ccaL?IxHq?XxBu=e%IFt&YTOzrw)8AWdN<>F5{#o;1Wogg(1UjH z@qxvwYs;5X=N7s&z-WVYOe@t&jTm$G`sGEbh4>Qqs~5FFc!qs-Ov3ZdYl>Bd{#(-d zSmWt_4>KuP9!{oQe%#N3#@h(9nnDC6P4-9MlV}n#` z09^msUD2_9?b6Znx7A>m#{oOE(i_(f)%Of_@QzvC)JoFky=@-$_yE_>ldI$P0$_9a z4buOwkA0Sjay*C^++wPuKppxFiE(AxOHK+m6r>adCo85w+Ds_iJ@Nr;lAF}Z| zvBVxdVT4606cTH(*a0QmRnUklNoq7G#M3Fm)+iBvstKM1#49v6)o0Y1Z+jXp5doa) z;?n0SocqLZR3tMM`|nk8WG^@U)3d#9hMWPCriR$ufz)b(kQqy$dQ!f;KTGt}h|SGh z$+u5+>WT7jAT9TM7yDg=2^J*Xjg*$p8>=amkl8@!35$o#Q)zc?R$nDSwEH5|B876otxkyMA;IM{phF(oo0 zw&Gt_v$vXD`tz@53pohfXvEC1Sl^zl1V)#F-MN=X^Q)YOur4rcUKFvW`rvd`@rfnkJ=)jd>(Rc;(vF%HwM8-KcBY#D%g zA+X++5bCG_p28pu2IwB+m(dN^8uU)K&AtTq%t&@G&-ITaz`k}ye*5}gibNR3`ro;k z(1Ld>ce-rbc&k_`Hp)yq8UG4Cb0KE~<)9BA>_NL1_&fry4l>PHxS$X6Pu6GEIeC{* z_!vw=zGWYMN znD&*D#S6yXOmt2JK6v1fedFzMaySsz!6bvkv97}5@q6Xbbbk>!PKc^=E*S6Wg(P3W zfN3Z#voshmy^stbXA0Nm47d}j0d+7KB*(U^OU3scT*vjbhyLNmE(}ip zW}tbw2XP#Tbx2YF9+tbFn&y9E_W3AN!aYJpAX8zqpAu?c*&FxfRJXZ6U zA2#Uy1)ayQYrJliwsq4%mjTU40a>Zh6e0*6uzP|(wau5`v&KkR6{(4=x;$4M912HU3 z_S=lKXz<(ifZq&z|k7``*xvn%#T9E5OSYiXBFi; zRj?y~U150+Tv(h;U*>z=T`IqSTT!Og>6uE`p^i1uy36w;!1~{@X8&?WF!XP=Xmo#j zp9*1{!0jHuZIbo|-zagY_@ga@j<9aO^T#&W(IFYxh=po+vf|%(kH2ibpGv`Tp9(%w zd5~5mf$J%GTe@fK(1#C~fpu#aBb-BxO0b&yxjf23KJ_=V7^QwYdLUdj&Mw8Xk*afB zeN9Ja&66w|pPPUQxwEKsf*2K1XvB8MK0HVfZ2`8I>Hg;tQ9|Zw81yBrB=y*rx_Qr! zK_)g@5H8V==!taX!!D(w>hK8W3OHS`qd3FLM~eYC1#pr-(-UPLZJk($AD$-iQGsM_Ze-8e7?uJ|S&b`Wka68_$qjNNm@Nd*yZ6a5v_3%Y}C#4A-|-7Z>@XzQ_JjE4k28gkB`X>KOpA^BwrZD~^mGi{=ov#t2p9IS$eP&3Rk*X@u3KY<1>p$M(AyVe2kG7#YIkbv(QK$k;7#GV)1ygLJeHUJKi+kQt%iink z)x#-d&UOZ?9QEMbPdXrl`P07T@|4snq>5gJJ#r#qrvr>&^F7Vt04o|s9t*bO77LeV zh$oeBN9@=?@Vb9_Y_2a@Tv`JbWWtEx}qvTSevq6t_ zDX9V?6P+=%Z0Ty?WP+xY zm;kyU290^)N(K$|8MI|Cj>0PFc_)fqBX4(Fb)rT}vO*8Bb4~c1PjzNS1U}PLyrTTx zC4u?jXi?_Pn=}HC{qau8Z&rV|zdb+JRl24klz}aDOgoJzBrC?M39V^%A*0Q75qz&Z ztI$*(Pz}!1!;B>G;a_y8?11||22vi7exp+Hua5x66ihQ`XDAPL9jMLD2)W?pIogsM zavqX!1g+b6m|*aqYu9Ip@tl7)E-2$gnb5MF zQ*$KO+m5}^)l-(22Y@R4@b8>HWTGLd`t`@%L-mT5j$M5rrnM0s5cQneGC>r-JuNk> zJ6KH!;YxX!s2-|-wEN7Mc1Rb6Pi4kZ4WlfG-NTgK5ClRv>t&?)I3|G|{)zq7ST;2O z4rmg@Vk$Ic_OXq2X;XD@2u(q)eTFi!?iBr-cd`RXogXGf_EvbK3j|oRlLx8RVczt99 z@igfG^b85%5_+PI+9VMlH0S3iB)+PJA;ontEFc(jVOFEX@s5G?pMw4cQ2*-_J|7+< z>OUNVyICgixPM9SCSiQ&FVExTKDON3wy1b-D|fQ5RkVl%s3UR%AvBfpj2o<1s7OXW zmhtW*2Q?aYWsA!#*At8@c7V_|#=${P8Ik+7Ru~i^mBgt6 z9HeV6Wve!KY{&lboAw7!338?n=(>(QB`3;}jUAhnqmwS);}~sZZ^xmHyfu|P#7I-<49Sd)R&8Img}-fZ5=suAW(ISL6um!TsKgy$Xl`ASa%;Z+W6Z! zLC0MnYgVxboIe4p&a)@*o=_rY2t@_SQ)v&iXBju;S!=V`TXB()IF&<2cfM>Ah<=c# zH$w>jY!B(B2Q|{R-_-)Y40k{98=h)SW7(-6fAS$D$6E&|CT-q*p^E4e1TEV%oeXW5 zbwF3oiq6Zis*kC4rsX*&B~nSYF6^U_!Y`3zt#?JR}1)P&J@1pd|-ZvjViMw&)4xQy_ZbrT;s8|-yf!hJ-D z4ag`Km%?6N=)VQ$(9jHvL&2SAS zn{dVNiN4^aE{^O2(u|4z&VjAAPI|D9p`pp<1~Y|3F|;K>xngtQS`lQD*pU)1YyXhv zN7mIJw8O&8_S5~BxV6$FxQSLStuP|4g2`u0qaSS4i{Vs zS338f+`8I7 zo}Nc$1n^(qOtQGKpfcO=dCCyXbqvPFLMx}xsE>Zd&@h85Axl-Ns7pQ{RBe&QtHaDw zUd_~jCqZKOXCwS=AW6~7wK)Dmo8YnHYzcrS9ex0?2;TWVt#s&!+|wf`VP-1`jIJpV zJv;uJy!kg9lRvz^p}RFW{W6RP=3ADI#{L|ZUCzjA)r_MpMl58gUZS9Su<8<6wVQ5z zgtzWVDuf>!r4U{ESPM&c!=ge!*n^DVGLls!j|p2tBL_XK2Qtq>Oc?OYv}S#{tR1+w zn$U^NzQ#n7uG)bG5GZEvS_eMY6MBlL1Tv#hlzi&~cKg^Oqv3`W@4ud^|AycQ!F4i} z#+SYP`ykyGNj%(i8zHb|*jr;|`BflmeG}>UC|&=t{yvc3|AS>Y#SSp-PQQa2KaJZ| zBP94TD&|#NHqA`33f`2Cdd9;9kCY(Ptd<}}2Np!o3GU<&3UXb2#h1!jA~!ZNi-IfZ zAbXq^frvJ%1B^&lZ473SbOSyfc5V*1$lLUbz#&@YT&AN9=h-?QtC_|KM^qVt5y=6- zRgWF6BYOj3crQdqFX9 z={X6Bt6;!#1R))Q=@{+RMc9Eo58EW;ec0S8ZD7a=(qa4mzh{ZOokS&A*yq3`fhjw9 znsGb=FFls1u=2D!id{}kDfZ0GvTpY&e>KELN=?^m4wgrqt^(CQ94@EjRTkQ;w(?Xm zz7T)k2CBob^))DUs^oLTE84SG5-qNU>WN0z6Obt#3Jp^_oPU`%y+3YJaXl_^o68?4qg@)+{_SqMCejgtfv-YQ{Xli|Qu3!A7U zAg@8}2T-bJV7w;&E3XcTGyR+P4NAe^bG#+vH%1)@N?smWJa?A;4sr+ch+#S5H|k8y z!vGa1#BXj)F-hE$La1tb>En#q>udlQ*ygI~$tzT)N@Or6G6R~i9&O?lbjy{1&vT;7 zC17qehvZ~X2b7ccn{C%FX%(azNSyOwgQ?t@loMj8YU7IUwlzEr;h7>>3m4LfEB9Yu zV#T`#Uk3O~{9Si@6K3i!fE?j~M4_hZyMFQdz5#}@!@gu2Y#hf1&lB#g5H*dhTayZFx2+b zw)>8hI#D|jRt+?Y*YNzQWTb126rwv^1^gFe!B-8#&81nchl5%Ln@f#Ahk^T83lh4= zd3kQz@NFlt66j=;uzT3x`eS3Sgc9kOq7z5K2(cP(d<$T&=Ld}b_CJ5glo41p1kgSt z0d%<%yqB=>0oH_&lQFM;;p_7dwE#4MUFJ`*BmAEc%_Fj!15|zF655XdX@O8=f7014 z2x&CGgi!sZ#$dS%*ng~lHd&)mFRA|NaT*LJl|E;r!Es%jg+J3toTqKRl&6+p(##7r zzJ5`?LefjwiI5jpydABd9_cda%q`bivKrKVZi~IfFS^qD0q{_r_HcJf z#M%f8xO?!qtvun}IUk*zZM&RPx^Ei?cRTB4&rOxd5pi9%`3>Rxw^>j*jHrV#FI8eS zBhQN|qcq!52e|&`lv4fYN6)XG}u0mBs-RF#2?^ zi4mhVSzncDDw>TepJ9V>L|M8n?lKg{#xGc}Yp0((A>%U{!WGxf4!2kkIsO7i2q$Zx zO6>iv5tiMIEP?~Xx<~XYrbE^2O@uBsq0H*+n($(fhXwHKIbZ+e-(#2GXc}q|?>-rL ze*HT}$fx=>QDt$`y^dvVWooZ}^HBjKT>K=60|wTwaiKkGA8pZD9Lb8v*!vKmj<}_2k<||NniA z!=00)YUMS|vk&26@?^PyYJ>TLs{^09U`4^o`ILDoQ1)p(uc$52;KQJ_apw(AJjhN-=sb!|^B*@+PU#RM*O=x(J zO@T|RbZMAbrvMW>x;@QE)V7lYreHz?_OAc*pFz;%5zO!9)`8e*{uo%QTi(v^|6PR$ zj_ZqzKn(wrf{Z5c$|ln1Z~DCK=zeh!gH#2v`wTY3U%1Kv4kCEerdW>u*W~waXFh~D@C1b$x+x&&dGgNxnvKHadTcig#QtQ}|M1l`#5mP} zgn9_EVuG+=!*h9*YZH0>uNw|V1K^^pkJ+{Ue(#zgcERRTg_F(zpEVG(L^Hsj*$pe{ zW!w6AjoBK0GCws58;i*ktV-?m&rau_Z2#2uX#_n%#)Ov-KB68?ukuI!M08g1JD zUVp;Cy>jcHPcx3A_WUv(<^|_Nakm?V5DcMR(0~4W>m?WO%Vl^+zt@fY+>gce;?H}# zxKuBAsDZk((2qHs^V_!-)2|=SE{(h#5Hzp9Bor6;+6Xp;5$88nZ!!toDuc+Y>l4U=9&Xe6$e;fD(WkahD&K8BsCucsQ8);km)xkwkIep8q-*?DyJX~XV@3+#ID<)GxC5|H>j*Btew%KT0<1D6|3Trl3 z@d?ARmK=W@6e4$jz{j~JOQS-%X2pM8=&1Ubh*HpH@2s2-(@yLw~k^EVmC zuXDSEF#hb*g={{2)=p*S^>MpceK6R$!D~xHwfw8wq_;eRlslw4N9-@x@Am(%F)<7; zw%(@<{X%Q`0v~|C)FFd#=U|aV+aStD7|&3(ZRm#&Yp_+t4^{?oxXyAijpoSs#5&gS zDmc#8Da?G9Dc+iy;;#JVtDRRD&r@bQI-49-#W1BHM4sG~R8R@sOp@oCX3_d`omt6o znFYm_1@iDnyF)=FyIrqIj!KSM*S5^6mD+rwenvMVoS0=T86|Z*ea?DM_OQLtX1jeO zrQ0c_J_{CU9qc9UQ$cr@f@?jaMfSo1d62Pal31sn+N6bLfd!4wz(bmo)b*yB-Ckw& zSz`8{(L+^SF4Wl-ECYK*w+9%{jjWmVSheOWs5e=XH&B}d>-RarfgbU^p{>z|;c|yA zpq_4RQ0+?Ts?Kf*SJ=>Az6zA)I%Czc#aty<<0ogMTa}J9*zmm9+8c66%Jq`2WUJy4 zCA(t=hT|tTzLPEtCS0M5{MM$L7sgZ~E<4rj_OEvn7+-WhAjV@77O3N3Ir}>={f_ zH3}w6xz$(ZZrPFrr}b~rAOCA@4U3RKsy`ppNNa-67MRaJlAOVEMH=P}NqWcHrBtBB za0Pt=U4^ZUIO{qXWRQF@VYJ0Ee9zI+2ESB}&g>!DNN9!W)p60_UH})CWz+LZRz$G{ zW^NIe$;6AnUt`Y8!m?7&**6Gsin~uB-7gWRQRj#Z8k+HJa`iFda=~+B zd8D1$R5TpX6}tJ|7-9~L=e}C$cZ*Be!oXp=A z;@q7p6BR6F1JSAZS9~Y3&FO#x#&-xpGDo77Gj=FuP9Or0%%x&%IbnQA+!<~Oog4?gj-xk9I@P}G4>M*}&P^O-Nr z<#z>}w!C_-8Ff0`*-<%6CDyqHS_3~#d=o+@gLxz#H0O4q+!`=qtG`xg|0GE7=$$jL zVfyG1YA_b((A)O^PcJhj0OK-5X&qf}z7=k?Ut^M(DFgeo8y<_twM{<;AtCc+|7 zEB2+uooh@bX1Ogf*Wn+nR8t?#twsL)wb&WQT6pU@_pwoNTuPhp39Gknsp_(6QjOdE zgMbNoglPduTp7)8?bezB9KG<@NkHfdRwUm~$23D~57T<*^kx{v{Y*^f-556{cUJWx}@W3Ez;_=Sc zH$e_nP1v>Tt$(Vx598xvXXFpVs7JBFA5<#nZ?M#0FaZhq$0tdL6WTy%&ZU|S1V2Xz z%s8Dq#sbTrMZJ<`5$G2QZu(bzF#_Zlj@I`69HY(V0dtcjax{!e0y7zk`3E=?6~2At zHY+0%>X$woC?4wDNcKHeJ_K_FHU|B&-g7!n{CRE?Go-Rd=$EdN$|~cF*E-%PXbH?G zONladpij14vzAo=u6`Z$wkuzKAwS_EnpL|fukZ94#P`}|8x$cC3uc?)J;Ge1XCFx3 zxA1(2_O2?oh*>M8)9*K-&OFRxOU@HE(9fIp+{jsscFhDRrA|t3iHId&ue$!R7GP#^zUw7R8sQfdyhr!?b-Q&1cGX_^j$*sb_vww4 zERqtjkbp(5j@f8{!9Zf(=HeQ@TR7GkY<^XYuhlaC0O9^!eSYnyxAQaqQR-z}m8d#k zP*9~Tdh%+5G)M_ip;p(Vlhhd`(Mr6UBl_gFT?gtPr-F+tbhH`P)G$7o7j(ubi)?DQ z3a$yDlOX4>Aj%Y$*a$X9+{am+RZQ?D2%GQ-he|Wl25>pmgv|w=N^BG-ZY-v+2p6u? zCI*{2*dQ_T+*Zj(Un?-F>SqBi>MK1$rwDruCl`C+*3H+oQFP@(sgRy5#D(mC^@M;* z8)G&JRm9F!-3%M}R8jSPe&A`h8=9O3M@6WjwG#g3`rDSLC75SFZ5+Ol-z+w#ydbli zEx44Q8UR>wSFVT3_e^GHbn9Z(VW7IV1QU2O?PrDPl&%NF3)LTFT;pGG(BB-wkAxAhNsk(eN{b zb9R-#gaZKm!ObZ`8)6rJw?_pfa`u)#ZLhT-(d^7XI)`6}ZW;ZZtCJ?&n)aXwDlua7 zyd6$24=X_z=0?pVbuieZ=Gy2?L=bgQ+O*CF1S7BF!OASL>1B}G#5<4lBZ7{knmCZF zVJ#|J90nGmCxMDuFW|oLQ!1Jg6CrNZB86o^C5)Xfaf=PnL*FhT$Fa&@=Dl_Twn;;6 z$iVW_d<^I&ZE#{x|BZIJWGXC+X+COI$#P>M65=38Hg%?;%$7Dr13%vW&3D#|VT(Qr zIjlvpAB-;#Qn{`^tRH@V1+)Q}6frmQnasLQCfvK9cd1)g9G|^+7p|9Lt?27@ercMW z8=>p6valOgfvu>x56Hk!1uETVu3=d3q(#28SQRUpM^L3FCO&KXC`P;5`W;K7a*_1E zw{h6wi7db^x?V+C4ewl)MBmgJ(hJ4djbeRAtKAwj7J(lK|96FhK>$U`g@l0F9+6}p z44)Et6QKmhOf^@SXho_cB`{Pb(Y3#G=<%Vrz-IHpBDirO2nsQ5Z&_VfC6DVY4fU%m znQkzOm6+$mNP4Q(v-BL*?+s@;+;!Ysm31#qN+mxrCLdiNw}0-LDuMVcV2XRnWK=C&g#Jb%my$seRZYDv~Sr&TZBH zc3LVDU2U3CL!1=-cVeq6PsgfpxRB4*ikl0Rn3^Dux8KTH%2;}BKQIcV2vvA(U1=Q< z=-SREirrp+rGb7+2FpJE@IZhtw6dpj1l`Rx!%v0YkD~@I&&ij_t zx34e|zEq{Pn;p23Fm{DA9q}#YYo@~07ORz_d{yW^qBD&?y%eC9i9M6K4?Nmay%IuCL zeWATqrs`odYW<)Y0XuWF0d2Hq;vQM)!k$z6jUe2aAd(QZbViV)U`MYSh1nWnc)rLq zS?ll%`K%kGLa$e3FrP-);X89-;RsyNC%N9*d*BSlxw98Yt=x@Vw3A$iOA78BL8}Si z35_`bYOx||hQLNbLTC_o7K2S*dtybSo$Cdlm%?X43!zmsdv`q{e%waRtUyB`o=_a* zZZJV@5@fUSd6cec=p)oioB4m6*UoeocKIUSCcar;eJg1>wVAo{p387$J+w_>W~3;m3vWzTArS<2&tcG|IKIjWnyb|MYh9^|{w9x2?@? z`0L&iQxwP6hQ;2q#8pEms0BlHmOZZ zMPq{9I|nj}5GU+h(a37kz)XPlVvP*lq=&#q^i$ig~Z>KagiUR$op@hmBe_LB?kXWII$aXn^Kl z>yk4}S=2KP@j86EPQHGJvsBq!IY3(lYa#5b=_BX;&!((c!~7Fu1G*{x5!Sl(D>N~&XO6c6!WM97rlih1?_dH=w(+NN(TZ$Wp1V^x7lS1xix?^L?XDm!vCSP$w-kFZ5RdLc|%gu#a$as2ZAUGiz$q^!_d z;7F4N9j&`uZR?6COBUwZFhT*SIxCz{cl+WrB-R@qv2|@Fk*IrPI3>t=!Os#N`qdJ> zY+sD)bh1w`j!ki*h)0TWnmP(>>Yvly`zM&PZExk3&k%yei^3AAhT9Xc+drX?D0iK? zw(#b&4!kV=dS87jd&}>vY@6EUwEnDIM1oupvJI8IVd)%XJtle9r*OPsAW~1Dmw7M|7TJSXUgx=OErT~wEx2`{L=~sm6 z@kP~__dcf6{cn?n(qyT$KxNQN`NY~Bvr$Ns1qjYKp)L@)J_YFVg6xInE6^&?|I!@( zFVliXvNS|lFuyi%BrC$VfpWp`!*8{4E1FM^7P&f94X!KCwU+Yl!{ydiPe!Xd6TIR1 z4c9|MukN(^9R%<${xGRC2~?DO+G!_Vu;XJ3$7rjy9i*9Mq`9$lnnHCMn`6~#D~L6N z$TF~KNEA#8*0}L)x?dK%b7Yd0#BPXx9Lu8QigKpDvP*8t)5U%Xo!Q_1~pOqCbPSp zA`2OwY5P+*{jMAZD(T0yai!Jc*1(NRs?$LBjRnrP^S$@1R$MG9CC|gQQlel-KGe(M zoAofNZ!G&sE~JVcU#u6=)V&9d?(QeC&!(x3C^zK7+5;FXxG^`2s; zz^Iqa2Ff^Ir2>=-qZ6FK)ZBv6k6K z0ZV!Dio3R3W`fK}$*vcgp}kiclN7k-_RBxqa_?f`i5FW%E@t_bq<3|HWvt1F5>ep9 z3I251a&q><;%uzYuG+%Wf*rM0z40Y+yH$p3h5Bv|EVeWhIW$jao&4%*nq%_uBF1(& zMA0RbB5+;tc)=tqK{9ZG#vyMyOU(5C__tHmEDmkXFH1(hRqo+-#hOQUTg{(aTfd}i zU-$ac;|HW*?Skb&tyzO7su_)QT4mjC7R2OwqR67)K)cvwzJsehi~N^&rqZa{pG^1) zek8Z2Bo*6--JOLF#-jkgQv!SlA}&69XZoIriHW?kbN-#>(UckBRMvnv!W7KwO%MZO z+5S4ga9~wi&3>=sLH$_0gZjHtO->bx=btLvzki3CJA#f%5te1Gi05dmST-Y~tok2fj_jg6*~#|otHPIPw+9nd&lDUA_MO$$^U z`1%Bp`+A}>D?EByVcj%e7)CSqnMgRDNrgZRlbQ?zL#A_Q^ggx<76~`-7w{PwzVh(v zj!GCPcX>;^PF+Qi_hLQ!%7#TgB3AIEDqdTa6mM>NE&IfvTSEVl>*sFjet&x)dtu2U zRLfRC_*>{@WrMZ1`s|??Pt%;au1;0Y+*cg=;^#`jK9dPHQLa87g-lL` z0pX{(qKQxW`!^ilR1=)X#yZVOc+Dr<9QAMb@c3T7E`ejHBHX6iu2U@cu4Y%0i?tDh z358Ge7Ajb8f^DCM_W2TN_kP(7`nHWe=J>u-btDl|2-8IR)SIDJ-X`9JX#baQt9PtB z^cNrd$+_MbYqAj3k^MdOqvMwusy;iq;}0G%(qz2#y?^@ngVmLy>%qJ!UMqzFKS`}h z%4kKY^qf))kTHpPoOT#ITp3rug^Wy%lZ7$Xc9)aDuoCUJ-r8xbURRle^ zQS$KQ5&YLll9<=GwcYg6_^t5#yt>_gkNWIG;S(pMhuS!mqd)^ZFa%4e4V4pP5NpW? zP(!eq9bNLAnfUebJa<9qZD^q4gZpX5$R}`f3zc>SBbBx^>L^V{Jugkh15GJ-igga^ zSLI2+4SlekCWAyrrEPxkt?=tpg**6H^^U#8y-VbYluT1=%j5^YyI;hUgUw*5K8wp! ziQO~W3rZ`Ou*&H#Ma$yBddCZ-QCGgA5}TH08{cyjNGm3i(GI4v3Mc@a=q{&kt|Z?p+!2Z}RkXZEey- zr7qoqH(Q*T$F@443eVT<+3js;)y&{gpJd9tvk%lN30DL7ThF#0trvAX{h?);5|wtD zMgZ+@BPxmKCt!m0=#*L(H=Mqx&fjH8O=}I)YIf)jbBz8_dN9rLv^rYAzb?hk#Fz9< zXXHz`9)az!T+6ppp?crgI0{bmbo;a=ai z;hl&O5`+j6y+rgNgfIjVogJM~5;fXjqSt58E_)cPL^a- zf`c#4+?vVUWJhTK{}U`RUADbvAz@X|LzKe)B>kWao2t83TtX!eyppsyfa^zENjdUG zd~tDU9LOH>yrvZ&z6c@bAaPK(Lisp^>E!O%XSyB)mCleZ@7t)Ig9ZIvqlV#bME%{1 z!Xuh->p$6a7SPT9)m&mD;9Hgf2RP03kw;i68A^=z+f6HQuVa|Z>_!E3z#i?ybmt$* zfPr~muFj_Kz6PuVqz4!iiHz?+uKak*4Bz|r?@CEgX5bDbONncr9d%OJRTNh4P7gxs zn(8zXq0XtkAvGp5+I5XpiZrIKIybQBr2f+3d_Af48`>+|^S$NDfr@a4>e{~h*0gZG z$9mle!zXZu-JJmu*A^AnX-{tUa;roFv_z=549AM1-3&^Nq@hxdQ|y;f@pnU^tBk8w z4mCtzGZGrHVW|&y2HsPIv_(?B>M z7R@4bQuz%Lm1UA2%->nLI4-u`C+nqfoh{$u!@o}{`9`dmq8MLclb4ywy&eP(`q9wX zOIkJ3JjP&4tnHy@x70l|)tSkn2k)#5k07TNP0JMD|bHz08hY0`$OGvj<4O?mF)w4>vzm2IoST_EovG~gv#u)bH| znBw~~AHw*T*62BlW@PW8iu2qU=o}#B?+MvI8e^XToiwuC=|MPeULQ?1J3^tPoM|^d zA0wDK+>iY>ED2GM>u!AljyPcjvX9}2GhWA{uX8+k)4Q;ZxbAbe{x$PI!BOOf2@@g3 zUm^OR6t8^<@;B_C6#pHh_%d3uGEUAvU77;Ge<30(3sP(q^ZGS&@k-g}y&KB&Pu}xJ z+$tXCEVMv*I;RLy-oDH>oS%7z)61&!suW7hbOF7*Y|)e&w6C7AtbgHVF#q&WkyhQ> z^xNT)Dy%*f78V*QqikY|s_M=@BNKL?6i;lh=SCJ5CNF1uQMT)HA}tsV!F}Ie2%8{* zs1v?GY9>(`It%_}VTv~mK<-)`DrFtHa)C#j_tT%r{5O)%i%khlp}Fr)3%A8xLs<6~3k(VsXOn$M!yHAcz&b=Ia@mlAQqA9mknAXT zTnV&bE3LEPDvDO6EY8f8J;z2@qeb^9k63mvYE`s!olRSpEorWkj}u%#-a_Nv|d6ML~g<7|q=tYm#sg8!0P0)bD zm1W9JL!o{zw@eFW`n&yN;1Mn^Qvti)(zJfu#}*p*Qvp11@QsAV1@ zkvAvcC1PD1@5MHA?9$>sMHHVg7tc*gfy-hb{~()H3W|*9?aq3WK%L)m7T!*wP?yc$l)_*&-zb{P1cU_oofC4ZV!cPHd~Av&{Wm<&GQona$3P&yGgP zk0d=}XU_Hw$7ZSVK@-VHLBjKCFoW?XLe`Ws^TLeEWh-h6$iIzoqA!FQ|& zLC-Dt0R+!06&{3(lb=tWWH>+s=SwiwpyXJEO&PO;O2ge>xzbCP(}>>^0s@DLETPii zR-bB-WQUq-B`2d>$SDruQ*Z5SXG9jqLE;-CvHINz39q^ZG*?%$GUB6g07~pE$xFku z7UUcHdZ!P92Ij`5Xj!D$f<9-hPRHpO5WM`BA#~Txk&|AQp`Y1#G5OgDw_H-MifyObS*LyE~7jm6BD^> z(2XF^HvFh%-mye6DW69&2uaD+(N&I1H*_}wYkWO?_M zdl6Tea0i)Xc&9_C^J5L!FB&+X6&R(k6>c;ts!mUEZ4k}l@ug!i+S?Q2JYFXOw5qN- zV0xEd*7;a@YOYnn9Cv<7O{1MrBpwgWYsTt3m``&e zJ6FXo7MFO(ey5_uqMfYg@A})uVr5*y1r^ls850%>P`zO%OF}i-=@!S2JC*~!3^x-5 zE*UwT&m8Lm8N3n5=;O@|8iF?qkI%wL+4L@um0^8eJ!=5#v?_<7>0%Cd2pdR%z32h@ zBmF<|JY%u|TpBKUaA`n87I*IzsENsEik=K>PzPf3cb850^OTnCNkf&c9jA05cGEPq+yr_hZKX? z6k>Ko*RM11W=_EQG*`#LkIv^z-H5_)HLcm4E+R@Vd_JpLSA=55&k#_A-=LF87T`y<&}`iCCYs1xA#1 zM`pYC=aq+;4YQ=j5%VvG@0!lESSaS0#kDT|NYi7r?cdH7XB#Ca9{uv|ZNxjF=_&3h z@%@DJv}5x@xWC%?$v2xMs4K_2J4hK*(7aZm%%5(!+H>>kO8x0PwlnO( zs>Fu>u9=aC#rqS-2$jGv%&i@ZXz;RnZhR^M97YY?9-|8le5*$^R+W8+Jr}bvM_73c znBu3kpYL)K4&b@NMrKNYgd>pBjKUtT6S9u{zq3wCzqcMINYv}pgtTVU;hu1{8$r?; zwLjIAP>pCQ%w%^5nFv4>ER5QA*Rkjn8csfO?U}?_Leyv;24$YI z`UqBkeLzLkZ06}5Bs)mQe4UlWfVS<-r0}j0HMIImEl@FbpXJ7W)+Ayyy(D$DGKNhT z)aBO(^cVDThL3#>yB;1=YK*oU=Kr*ZmlU>aBw>c19x;@KZrlmm+d5w8jnzE-_qFN% z{{0=0&O`{*i`xx8*4EbcE7xbdVWI%qz@@)BMFz667(%BQdVx?31+?OSi^9)Kg-%;9 z*YA^CtkyYk@ggliMR0Yzssj|$fu*kVZbmg9|J;;oGtn#3OjAz_eHBJ+H;KlC*pTpJ z{6u|HeSg+D?SS!`4?T)@Kj&9-{Y32$pyx|pd;W-?2)#1e&sAJ(c1kn1TB5+8u;j3Qj;Z{fiuqM*e^>4WqV_ox+r1k&VH zP=DVf9-R#fjPlm{p4=|rT_x6oOCcN-8Xtx5#nme^#}S^MQ`T4z#RLuiWW6o!wToq* z4xw?UdZrP3e`WFkbZ>2Ib!f*WxIe$_W-nx))xB07)}bP|dsnD1ZFuB9@Z5lEkIuwg?4APnQ;+X*P-1YLqvzW1BTce2PW{BQGz4VeHuobS5GvH>$LdED zxcwXGh zY(?xS+K6IhtB!}D1v;A0w`(k{JefUXd0}DyRqYCIle`ClK~D!H&7@os!F&anVkB?A zc6SMKy-{P%JSOFiL;CWS@VfL;=5kKuO@Su8IK=11GDp9Fyy&S1`jpcIOv%*MA`B+` zI}3m=<%T4n8$zBI?p2-Q*9XO#OQwJ7Ia@=RxPvp~cz1WGy_Klk3r7dM@yQ&cMQR%dm@! z7%d|MyBUfx-d!{12?z+O^$)^K$R|SMhtrx+MSI;E4EFv=BDC`f_G_hWxG-y^Mb zns`^xGM9B9qjXugRv}#dnro`W0AjyX%psXgEhUXv`FY2wWxW;!0rPY1XV`!!7+%_l zXcI}Wj=vklY{)YI>RHtB*gbsEad>Xg!3Wy;C?a&U*j;gXab3tp^V zX%tPkMCF^!VD8OdkT|ogqjx+uj0cX=!juX8&2#~;6SFHTJAhj(c0OL=)hl`L|TJMxPF8#;2x*4eMnCSV*jQBKrw@ z={Jy}!j!9;!?2u`;&7Y{z_Q$HOGY?~`BAi>Qv=Y~Br)b7#kLnQd1Tbvrw79~Hz{E# zF=f4F?OFP<0?kv59j76zzOP%z50|s=sBpmGcV6dl09@)VqS_6{7hj0oXU5 z8;&10FF2j}7a~8}c6NG<&*=bJ9pwRRk>PN-U->NpGv6C*sa^P)qfq6!X(s9Q`vaqt zhkC$auX$wG{1(F9>0dx+zuNx;=u9Rn>xCtdn0aYl-rHI}wX91e6TYz@{A*eWV=nIR zlV#3Zt79OnG!Dll3p{~!Kx~y^;2JFlWr3o`j*_!+4!;d?ab^VJCL9yVO6(VditTg2 zi|l`8`-7eB0^}Pg-R1)%Fm&Ol!QH!iGhMHDjiTpA40tr-euo8@yd(gwNdz<%q)|8m zv~LnALgsZ8!*it}O&&fMszg2kN4lfKdMedc{4t0W4B)fy1E`w({N_$}8b@N0wF)AQ z0mviOJl5smC6Wwu*W+`zcWu4{#O2y8s~4c>L88kt3nPlXZZT1;vR%p%E++$Wf;p{LXUFTu(%z9x`U#H zJ^AdDoW_=CHSKhxf5eX*O6i;#AkBPlcJYU~`RTe|q4E z!N@hUlvyf&wo=l@_s#GK&C@5a+87-XG!$G7Hq~j2(Vg+!E+J+@8RyOjYO{wP9LOij zhLK_@v5ASYno-LZzv#9JWcc@l%DUp((nBrHK*z&zj1j92z_^uHoj!r`SX-PF`MBuyBSSEZax{hC(V$KKOjOkjo7+ zWrVe@>k04Pae8}uA7yQl5sv~a>b$5u6Mb)Y#x?Own1@a;_dv#c4rx$>%t}XQKy2s4-cVA43EBLZMz5r6$v{-(N zyk>{L>$2;pDJ;nVKrp_8U|swESN!JYpzmPZS%Y;{*HLNJUu`?9LocG9G_Hmq9Zz6cO-McAsUFY9pxhmpOsV*DUer8D@c}hap#0ILa39$1h z89Jl~o2qC9{r#0U+~dDOnZSL!^cao zE>#a=5;-5?`F^50@Z1r?^M)pRIjCstS3Vmjlz*PblTSZ2G1!9=l>X5jOV1f4cp`R3 zgkTW9)C|o>{_%g!wy_$gbB+b$U>rYd79O%2Od|_0#|q zG0j;hco2W$wxwm=$suZm7cCJJ9!UoCjQo}ffXI@e@UKx1UI5g8sPee;XaViZ(qu4X zUfK6W!=Ndb~#Ht%V}*>~Bga7L~1Wiyo=Wr9jAMPN-1aa|YZ=)9faE@5E@R&G^YL z_x0z9ol#7SY7dIK2fw(wloQ@NPrYfNPw6nFwJj4bMAq`N|Fm)T<$3>)0Dt|#s6jkr z`Pbnf$A&htIfhx1OLAg# zJ4a-XL*R{)+myJ#P%5wn>Fg-l%WMWVs40eW&wWRzZmHy*Q26z!xoKH5;(^loAH1PA zWdID05JC7a zi2GND@rIGS3*?K_LN8g;1(0*oLfwcG1G<%FrVW-5mq?dO0Mur}w7kA|38SQ+_ayD{ z6B{tsr-^yRtd?@~1tsQoxbHWYK(JoVwZ#}=eCHXP*oXs@dD<)D-7ocV82ebFV1W(G zwv#t9^`nBq19|(?3@9rixyq9yrt-ihEK?iw2{o%rb+sQ&EV~Y5VDJU>@;Q|RfDL0qhJlvVkeu4oB zp%|t+6Ig@@LvIT8#ydfqMBHAKWkowoP2`vTNR2PoN^XmI1ch}FlV|*F>ygQjSi_8G z+*q3&El5}qK++4-a^ECCkeXz3Jf3x!$VA_|)JSHD!|=rTIAa}c)!qIEchOy|=YEY0 zBr-Gv++VTEjVA#rT1(RT^bE_X$v?r(y~Vuy=xZtH%lbQ26giQ=m$pncU4{u>qO_pT zuTbT`Rym|?{OzB)X)n`loM=#*GAwI0FT0kN0ss;n7F+F0%(H_^YeDY5? z0-YwnRlYSO;_jnHHiEF|k2Fuu)`_v`<)ysSm2xJFhs*U1+-{?zw@d9m^dRc2pk}Zz zt0G=M0sipI8e@{*Wk+&(Xuwjh}MZEV`=0dFVS{x~E z#tI#8ls0B+tH=cLmO2U=NLzhd$8j5?VHEV$=rmI*x8Om@cDGMUIlSiaIToPA{tJB4i{kP#VNUFwmeW6xN%x`*rCm+3S{7^qX z-SYP9b=Kr9riyC{lo*%STR9|k?hh#(CXap|QyvDqBxU2=HMx7B4zHuszHonY)`~|F zUQ+8^7Evq(i>KCrF*$?gl}_8he2SMi4w{E>N+lHR8WoG%dq-J*IIpkAi-J9Mi{1PV zn!V$~hKZsKqV)?XJl-nt%-&zH?mfMM zU;-2jwIS_0t2a52uopRWHc{E8dBD}R0o}iin5QbQHFY)G0X@R!@O1DB5`WA$01)B< z{gY!X+Ezb;uMR95*m7tNWRq9cdlk%g*gWgA0DSK8K7|7HKhhs)0bD6xuSEZhjk1$fYvbYJT&Ew23*|p@6`J2g?FSmyUV_=hPP7UIk!|7sCXZ4m{_Z5#nR}Me+|&Mwn%Q`kinnS6$XFje=ORs@UOj*JKw{ zW0&Y0suvDO8fKsbE1+|6ZKb1gUat~5$gPNu`h0-=(f}0gbrcyh`|gJx|2;?y#tm^p z;FAa#gkdySu`nw-L^m?glS+q<|L!|HP6Nb>W;u1hsi=(R{{G9(UEwu$SwwTNqx}Y$ zfPlFMwa#0RDkPptz%s7u0QP2dzOP zAZfS%>SL6(bPnYluV5W0pu<`$GgpSThNU^WbGbELa+lV|iMJ1z)UR{?%@Cq8IR{`% zP2CyA6;RP1KKS;Q+*ub?_1CG5gq3D8{Hi${6FyL1*Sva=2t`u-+#S88=J1jJk}HYD zs@BcZ#~T3$Fb&hN7oq!q73-H+cSAp3LXiej`OH?co<>hPWA#au_asO}G!|Z%bMCY~3~59cIt07B1&-g3^2*y+0z^0Dxf)>aNCEtcx`L zg_ZgF{n$}&7H%K5f@H0=W~ZX@v-?I13g_D z=3#4oy7BS%IL%%x8m;Z-SA1OuP&*pIJgMh4kA7@3@25phO6pB1UonEKtV7wpF@y|p zs2AJ2hSlG#s3Z2$%DRKx*u}pngT&p%y#~hxeFC1i10(uqVt(9R+mT7>qZJ^BBs;RL z8h3$W;!Kv>>ZOj79oQES+wVaXl1!VQtQl+Mbc9X6%x>KHcUz)nW|oEVm~`STnlrvr z<}er3M5VXodD3W5Zbq<*1Ea`59+tq9%D56c zaghzo6PfRyFzttEyblJLHLiXD;Bvk#9vkhamQU0bDHAdF8!T;24j|4P5BwH=Q@piB zrPFt6Npv|C3oLrngB`8%EZ)( zSC)6Qh3-az{_bhPB?{!hxaFq2f&=x+?gvkrpH~{5cUHyB3nedtrsSDv)a+4t*4snV ztW1VTW`iA5-#NJta)E(Hqb6YA5;d_wQSdnxu;Ia5a0>FiR(MM8!K}i-!&n%?WTqrMW0^=+gIh zLP%1$kQNM0Y(Fg;=}9y-*=uS2T?TGhcJydv%(h3U`%!2*&Rw!CHxq_}8b`6GRC^ zd!u#7zK`O<)_m9YyRf(@^Wy+tkmPGX=qX(rQ|38nb2WlFNu5pY$oNE$KX@WbGkH(L z)ZZ~{?@P#a5J^bbhM`7Uin?zqm#?5`dzY{8!`}nU9L^ckA865hvdR9kG=$uX%3XMo z1#?cc9<=^!Y3xS>Q>X;_I4xz!7uyNQ2E?&0ET#GRCkfD9Nn{_#-;hB)Qk|L3)3&>e zMQ1AHV-Zscp3fQqM`!aNYR>C+U7Xjqi4+6WNZM^NVF1-A#Hs3VO7)Cdb_t}Vu1@9~1S~e1 z)i=e2w{Pd)y$RQY2L`a_LmR+&`#6#CNdT#tC2bTo?A(UKtGc<>l*N8M-f9Nm24F%) zI3xHDce2m^R{?JClm^4! z0|DUy1w?(-zH)l!b-!FJ#8p?6YWX zB}1*fUL~Wn;U~*Z!-0AHXv;gH7HP}V<_4kw5b4acWDYq2+!%ZT<=&et;1iE-9*g3p zK{}2PKD?LBDl9C_TflyqL5rnRgBajEiAj70t2ioi2XrY<%Rz^bA5#m{fACD|W*_>P z6pGo_>#BFe7&iGVe%-ie4*D4FS*zQcRlihTO02x5@$&l0=<&uyo1z@)kW#>XvVkJe zY82Hf3c)m3C<5krQA?dOp!P12OWBMCef4EcCk)xyZ=q_HwaC(T4H5y+iet;6%_c7% z$;jYW3+&L$0}UBXhO$!!1>4at+o7T*oW!^NM_SQE#L_4?|rc310P6+rZ7U;#ga#UqP+jyW~FtxRc=?>OQo|8;3w8sn1+pUcV0qG6qdz+ z_JDzjc;AW4c{nfb(HlhzUY|#Q2Gb977su{>L^fvKSt`b?;x3@XRv*6$ynUhP3N=EY z+vCm3hxygCX9+vEN*!4pw93R1hWxUzyQ%RRac7a)<16KF^~DC6mA|V!OybP6kXpgj zVehlMhZgRcHhvE$r;_Kr3}E1ex9eH%=5?b*x55*{Q1o@S->mwc{cgR zwY`P0eC5*^b1~*f^($>KKPTp??~@hW0|AtZ4s=OY#=I-Ezj6WnirLlF&19_8BEyUR zk_=7HQXF&>8N;x3G&rxekRIqa|MvhMeyARlafr`LXhMaP^(q;VwyT94pdrQKB3iq4Xg-48^P;Ns1;&gH8q=;>fYuhD%LpZ&~*!Pv6F~?#! zxSTn0Lt|oc=n2i_=HplO`_aRrY^6SCY6-zzOKIt!;v4LpNWNjw$-Lb@@n;RFlz6Bo zW#XXcp6W)~bKbHvLfI7Oh{&q{0CjXC0a651RhN`6wxs_Ye`qKF=H-W3)n*|W63UvF zOlPw0kWKK7s;Rj@lQ#1s#XNu~icUzsawI_Z=vUD9^XR~nd6p{R^L3gS`r5f!rtOkJ z_|2GtbDB$|<;`=IvR8CUovu@xlwk2=m=h-We`p)90TAXMW>ddN^R>GPsT{f+0UC!5 zEj~gZ>!PZERdE?Js(+`@f|0>z^H%4pxXZgAo%?o{pPI#mS;ZyH0br%N*0xCV{eqo#&G7k>!+JXKwb0%T(1DjSXX=GV{4p0~l>&2LaDwfZ)A zdKSMFwtwgSn+zS2cww9t;^OGP*>s5R|NT4J&B0t$gV<2okodTAjAR4dw-zOz3M_3+ z!P@0vd@mz;G>5sk<6U%sSAV8wGK%>C!wmWEDd#diYQ8g}boWs(A4VEb9xh$jdq8d6 z+lx?meKkIjTkEfx6Kw`Se7u1&s#L`()MZalg~z4qqUV{`TBcxay{O};iLG24f)0zz>_ zM|U?rB%ZXGR!gw8M=s-1Xli68uk;87#D)E*3;>an4j8D728cuiWazlL5)&au?bjPA zo&%0Is7|-Q(0_b}7|iWSLVO>cPw;Ou2fds` zHJZ~8m(`7Y9E*pI_0^n1gE3^qMep$st#UnXPcop~C;!MqvbD;~1e@&66U!!N3xatO zl@0*#ncK8q7uOKUT}jXaBOBhk`0+s{^Kw906PHHHb*{764w@sF7ZRacl2q?lFRw_H z?mw#1@DWh);oSS_Dko^{53g01c*eDApY5}g}NeCIP6=UO`Zf~JdY#H4JrL5=AudvF+9VOyP*{xJpl6>(dPexlV$r0o9qzl_$WN)a zrk+H`2++ieIa_x;w!dhf1z7cB*EER7v8{9dw&j4D&pN57YF6#TDqP!$0T5YpyKW%d zLH5*y)@o*w8|4cdWgbVLYgSi5Cnv1qY7Vd#gNp;ovstMp-L^U3nNP4hQY+O&U#8n10UuqX2g%eZy@ zdJg5D;2^R5q`-`J2^mUbV14d_~-iNPm#>QI|~a!y5DbV;OOb>}9~@7TIkU{Y4JP#gF<0z<8#hI>PF7kbbKbdGl4`G37iXCwC{A2XH~;L`8esi;cWq@bOBr6Am{sg*$O)6yCcWq#1J6ho5elt8R;D3po?daY#!WtH8}}8J z=vI0Z_~-rKjCUpO{Px`K(7dHNR_pja2ls60nuUF~j9{_8fT?lqw|8%EN9n062J>dQ zgb$;M$;oy%C04>+XR(|fq?Hu8du*Qoc^o#vs~~InogNuOBNZP-N7tRJ7}hZVnmd0s zlV~?!lZCfdb~Saygfkm|D?I%J zYo+{`AbPhp%xzu4;Jd?t4uOZ|sB$^@vT z*&AW_0ER_5eOY{>dTB8W-qv_q7Z&y<)Jm!waCZTx;9y4aykss`04Z)ZWvHcGQ(2eS zQ_k%`#$pz3QnNQ+%iC>%Sax3;X+HoPQ*PojxU%(u#>aPc7;SGjIZy0Bo zoVK3I{%J&)EeApfF$f{@!MIJcNkJXTzCYP+Oj3N`N!|BstZd6pC41@yP;RS}vn`*i zLEbF#JXrnj$76ToZiaoIaM4r9T;j0gu%2|7EK@cG^+uB=UeqNofP?$chR3_N8^Pd^u97S! zlwx??POP+)woVH>QiS99uJDH1lsXgw2q>K=p{cf9aA~bxkw$0AcXQY}2|6=ohfn2t zKOiGlmx1{N+Kf-ElU<{h6@wk4R$L)Fj7so^G~sG2-y2eArxa>6J9Hr-0y$%`ac7a#3mCMN{7u( zGXV&vU58wcze9!*Hk>U&%yx~-hvoK$N_EIJs+d>4trN(8U5000 zFCnLeUNiS5)4u4w2$&od9lE4d_1x==>DAW#S=uXL@RB{5qmUeV4Wvlh`%Y;aiBNL9 zjQ9`ePZ7~~;SFe3+8lf*R_e$f8OQXi+g+_w4gIF2Ev?=7yrYN9bur;reKyflsUxG%#`H(_k?OA&?06H z^5={7=monPf0GO)%?yb=DK!qI#ZQb!n3uL<)GS+vph@W=7s?o+@$i}>&7y}NxD;pI zZ5Q#G#ZJw~W9aR3I&mBF8=Fv+^)N4Yz0o@?A-(s8oO~HUxsv3-v=+FnAIb0UgW?acuhccy zH&x7O?FrF)>{L@2PbMBO98bM(k#ThlNP^g~rIM&8aT(#GB|?a@;I(f>2A^g_$QOB& zns1Z`Q(NW5R5FR>XeL6J&XMY&63eGvXt5rkh@y$iW zo>xq3xP5REds;uOcT;!97>#~oFD8Y$4A@z%Dr>}rVJ}78v`F}-s9%1YZu%og)E11g z8P3{vfdL0g+pAsWBCSbO+Ry(73UUqno!{PK!Cdw#@#oQ_;L*BIsrJr+jwsqmV#koO zIPi{?!*m?0!5=6^zbIU*V3IdRWnv!M%+Rk#I1x|BhI5QT;xJ@#bw45>@_vz5*YJ~k z3N)70Ko*;}7U^`=XSS7s9t?gQiG;Zu+t(67*Q;693#-!-PZndG-`&d(`6eP$LiZzdTg-Z;CsA{~yr?@IDH)vNu}J?0SnqPql~Gnv(Qb zOOms7b6Oa;&Biy~BK;r6OPHk<$lAK>|E2c&?Qj2higQazQxzG*JZ0MkF{RRB zqhYnWu_Xg>6)tsv=Y0m32)_|}W&?JClck5LyIAD;kt1!Qk{d`ogymr%; z+3RBQux4O~46Bdh1QtBLYNnd4rLoN?DgdA zdRSsGga1jvw&uRsyC^^`avo4klN|SFZgKI!@n4;hX=N5R z{XzeUV_%wu(PuR22Sc-}eB}beHU8)RARrc9+2%D%KWoLQ z$v9Vh#}JtH|H-9u5`nK|_L+OkmRw2ESrg!286X&NlU6T(vr?8QJ60kvnxyyc8mA!C zrkUFwP;194a!R|l7i_$LFp6e<{o4C)Io!g;u-SH?r@H3JR(`1r%DCev(D8Izw)m@C zm`9G>{~V?z@NO8XV}1VlSZ|szH&XH9l8ZT6d~?QBUheJjR{HQWw^$QxZ&a*6GPjJE ze24a~iJN2m-q`+z3yq5D|2{67-zuUK9juI7RpEBHlf0A`~-*-upbCSC$!jrE}TICgM4x5td+%-I) zF*<0Me-Gwcx=?KmmgGMGJM_0V5M~#rVQ$Un&4gfGIMe3Yt{$lSiBlV}bF0_|A|LY3uQP{a`x31^|@?R_9OP2>ZBbv?E( zX$ali6j_YpXr~<=AkB$W&4Z?=`^SS~D&r2)rY!1SL~--*NGV=`liRz(Yi8Qgr_=$6 zw*f}eR}P4S1Qj{@Btmi~SygRX8J({48zm_fuDl{esl1Ujsh8R}jnR=Vsslpc=(cQ( z{ad0rrwiH!2!G!sM?lsA*m$}<)4ma;8UO&i-9T#+aznPd(+`k-IH9{PP8&e_y=D4i z>15y8f9Pv?iM!N;JXha2%u$8eo+Rr(vE7g^~vo1RA@FJ0;~lfGku5z zns4o%|0;0+_pF|SyTF+ev!nN6AO3tJ2=maLms*DnX|vjQ3Y;8Du7U@CQSP(H6JP># z9bR%>$5rdrhEWJ$i2%-B_HEd9_@DP&KBy5^MgYyXBpX|69`J{_ktQYz=Q{!;j<8oG zm_!@$DljKDod2!5IX&KRRZd*EaxD8S(n|0R6_|=VVU$(|W&nSw_UdMh-3lLz47S9} zo^3Uv>C8=rkDs0Y4&T}S6~45v8Q8+AsF(uAc*4DbM?>B`0!{oZfqru5G1&_XrU^i2 zddAGaK*uF}p0M|&YHGeeP#l-{BjfPN*^$8zQ5f_4_jGv3$F()JgmmA5mNV{blemNg z=iN=fSm)spp060?xmyosTyEX*;LIQ}b$GL98(fT%hl?`{B8vCgR4U9P{3RqP#)=%q ztAH#cM8Cm9>^O9cuAN8|)J`mbc?ZCdVZDsl3^6`)2=SUFsH`5*hZ;&V5@(0(8tgtM z=myzO@maorIJC$2OZrRi7a5Vx=Lt|XAgLiX>INQ4i_1Ws(mUwdR$1>7^zy}B&{iu` zft~WU8tRKH9rY4Yz#w1Jb0Y(A|AYR61SD8r9^iqx%JQqq!%yT?>vLO=#0itNtoIMI zPKN`dL+i#avnRxxPwS$|nHPL>&-5ty%}-H5^G62r2QWy5f@iOQq|Y-=Qj`qPu;{}a zl@1T+ND5xcKhWq=_{8%GaQF=Pku$?)iX6TS<+W2|`VTwOtn=rdGXzfHo*hcze+$ev zIh8tD-Su^g7Mkih!UP8&BFx zNB?1QH-=^vQCd_ya}LFQI?@`k#mD9exL+Nvnq9B zH$tD?v(n;7s?%nxSw0ZiYRvc`|Dv{0i;Hv$P7-;_Pl>g#Mc5UfOx%1k) z;5>pSKwWZfk-?j#0(ysL=W=R5lBBXi9u1l&lJwf)>My~I!49;kc1vz#S?t{uOhfA` ziCkSlyyJ{!(pq0rY2}03x)KFeco`n`HawCYO5GV$vI#A=fvdXLQ1l_U;iK|vHY@EpFZeW?8=IFx?--Ts~hw~${knO^Nt6W z1c-uQE_;WFBu)2Iy0dr>0^H@#6NFzW0^s&Fe2V-**kUnPD>F$~K8 ztHq?fZmND(Nd$IIFA3*|9_{s1FV^rUmB%nc1UQ~xm$zSC5LD3+cq;kH|Cm21 zXKpT%>)|bUlG;**Q8gL%OOW=;KwE)buqWnMvJx}T2(wrJmmo( zp`}ua>X|ZkjW1Yb&o$q*1Xj-{f2y$dvFdFNDR?L}57>Cs_i1l$>sjR=np~P>rM;ee zbJ3R6gj+Ks-nsdgx71&JKbs^vlcJeKu#m&%ctM{u?>Bk0F6axks(-a<_{c6kdh-!1 zV?*J8NNKF{UK$@8v&iHFommag<&hqL4X|IIuxk)ZFkTUUlYI~6Ga%Ttae`V zTiROmY$P`TZ@CW55EB213I!j3vdyzA62TV$0el#L2vP0g5>SeD5crt9ijvhyKfH$4P2xD7;zA5c7v>K%y1Cpi)i1|O%$X$+ z)-*u%+P2T6b*~}8qT5qwPK!&F-4s_>-wa=$ASy1Y!yFBT$&OU!$PSw0fWBY0{+uN3 zEDh+FzabucZ;~TStFm2jKcGPQ69*vJNtS5$!H`6&K2eAJr?lCVTxC&>hb!sLGmL-*N{xBvQ--rJU$j=15Uho=+ zek@W4A?!H<>^A+0Z&Sm=Hz8p-$pm=uUGhl#$q<{6EmrGrwj{ng+f;5&}y?cn6it_hafg1Q{to|FrD_1&r zZ-3}QAZ^s_KN%1UHj+SC#aZQU&VQ9)ReD6w@>Z&6XK`oyd$j(GU{BoYeiCA7g%0j7 zu&L!w%J5kGVzL6bAwzB(!5LwPRD(2&2H&;3sQ$nTd@uK$wv$z71-x{v>cwuy^??@I zMb&P7hALOf_-|)%<@d@3jCDV}%>Hr zN%MCHmK;EGWHZnEM>IpZU z2XkeJ5`nxD{+G`4?X0n{;uNoE>xvIq)o?I39k3OqfyM0+<@F zDVUPTqG;K9to~HaiBkv!c3UET`z{(&ucbf9zwo02e28u^Ne5rG&U)o?QPuB=Fp=YB z$H3V5Vh(@+%ku$85HR={Am425w=|C)!B<2N>v6UKbuo9!mMCZ26CkMuqvAO!pyB)g ziNj93aOXIU&iSkkcc|qW+5P+hpa&<8gFz|;RT`71kB8QB8N{=t7Ak16cu%wF) z0b#6ZwLPm_yKr;h@1hP!uip@-ATm3VtIdpaY(S9Eas}8rPnH1^)ae*bgM8o!A||Kb zF&u8I+=J_xn;4v29RNmC(L{k3CO|c6+^d`eG;i$lPMFS06%8E`rM32OrHJD}CeL+0dKczAf44O*mHj;A(HAnfeyyFRO6 zE$-a^Ju$Oz*2APcw0+zJ^`Afcc7wAp`6H-`ZUh5wAGr|teF1WnS5J8R5&?l*7tln8 zNudBwaFJ67nyg&|49#WXn_tndE{q>B#9gGOUSoa|Ck8KB2?E+I3rh`3Qesj~Wli7! zR3`}N%BH=8=AycxuNW@~U-bVj5efEOyq{J53NW!B0#KLJ!;}63B~Rp!q&jRUihAXv z@^8`H8|}mLM%9;jUZGuE#9CsQ$e(^vue){cj~$Z;8LAH+>sK|tJ-Nk}aEZ-PuHtLk zrQF;|KM3&p;%o)0gYYkZKMG!-)S%?Iq)_~Zud%bHq%o{&&v%-aV0yWS{XfLLcRbho z|Nq}SqCtd|nN@@+vRC%V$>wF0kz>7NZ^|*UU&v0%%yXbd#dP9PFYgb|+{!uAAs+E8 zDlA;ach)p5XovAYb9t|C-C^P~yY=USp_|mCEcY2b+r+@x!ib6+4B0&In3-52CVZ+n zyV^53=p00~B@R~HxluIeYMjUjF-%bl*JBYv7%?v7XChL45%lURXb*Y->Zg43X2gQl zJ3zXAjZqVC-*bZ@=(FqO-?w^dX77QFAo{i&iyjO26*@Phjzat8$S1Ab8C%~jYh z(GBD`N8^LcK1aMsav{lank%u^0{?PnH&n~gL38J_8nNLz*YWF&Ma~FX2W-?znY-h7 zZ!q=v&8B5j%AmfhxuL3|tOF;$-eSa#t|@2G%2ouX zcqJROD9jwyh-z6)1-ydohsPwN3BW4oKMa4hAzWk|jNHVoDB{H`Z_Z$!vr9bQyRq|q zyeB3Tk+`dFO)yjm${ou&%RRoQdUJ2<;ThUpTpza-BIh5+tIqK=@sl=U#c}XE#}Fw` zi+I*SCA(F@SrFp}niE<(+48tFil(~dQMNpJlwJD@?PXVfK6_c9ElP(__2U_aQqdoBB zLmpYt#rG6Qo2@ZWZpXt@vyryY{rf@_neP2ZmeViOy`msBnC{i_-ARK17t6!*%S>4p zVo8cAfX1MW0m^xZXEmb|lkB~^5EDwJ#j6ULi;M8{f^qJA-&m~A4qT6BW?3w(l<^cA zo#v}E(y=J04_PRs8(>Uqhhv?DlZ`^BTx(=GXF^Lxh6(!#(Lg3?%CLCQ3Eum|$!31M z;5+vqVfHuq<-tW{A2OS-x5-fjK7Ak1y}}=Av+L#nJ?{mbNu>sCuWR)r1Wk_8 zPE@&ryS6GAM{bEnm4ia2Id&>4X=YWT>%g)&@~VzH&qe}$A%kF$L3NrO8Hz+S#q%_6 ztrD<@M}F81tw@&(j~*R0@fH&iLAfjp+s^5pDr0*tfS2j{m~;g-ld3DXMCi)V(DDeF z_2Bx``d$Z1VOwY$6JbxcDwv0`CgBWN-xewV))>GyStKOfa|1(%^UTPq zzTgbv_xJM^V4gXt!~du^-ek4p6NUQa_nKixuLpgxn5tde3|Dt`H{65`BnC_)ns@8m zK2a=j#RbjMrZpB!9aQYO38QjFQuvg@4x7$JeYz2CtQ;rMZaco$461AA3nAA_DWo~V zKn=thfFF!2s=={6rlLb#coILR_5F2`#_)~KYX}{JhQNGnt{XfdF%`M$mZX>5y(`Bd zkcZZ~?qz)1*sW1_UOAqGQ45apni|z&e&_C$i{5s%uj}$hYqwEFTUYmN&l}AJ*gtb^%;8eMZ?8Kh9XV>zW63}lq}4x$H5@H2 z6LM1nyXvK)xTxJ+@K=i`oeK5u;gJyj!?Q{-2ROXndo9$>xz6m!$3n|P8t4}i60Y+G z)e%<4YIIj_-xsId7yxnd{;cfYtjd2ecvIDok{gWCd&{L!`DS)pM1QIe-^3&{-jss) z>Y;}@3T`HK{0i$X2Q$qNqS z$0&YfkOUA$$JQdFsEu8lo*tCzm-l0M-<1c4&AGR*q)xLr8{YczwFSEgB~h{ij#R)K zQDX*qD2jI^P7SViLTieV<hqm>1wRznZ6VuUa{4D201<=9Ss}pqK9q$Anr&o-|qA zF7w1iIE=M75WDz3PDWjiX|L^x;ZxGHk~170Vf1r&T#>0fGXN?niwO-;unt&GO|)x& z*Ie#6Nd;yn$D)N$Z?)NKn=H`c3@#49CwRP`~T!~l$dr2kl$Ql^Rsd*g8PacIw z_7V~I@UZKMH(sV&m#1NLbUuvc>Nr>mw-`Qx5XEZlnm|Wr_36{6rH-p{eh>zaVa)ol zXIsyv@t+*<2pkt7oK5|vOb6|!f-KVeGQRTp)-Nzk)}yE`R-zPxg5wGnE077$hJ@ZB zyU7$QVAQRvrhP+U2#`Y4X4TX{Bh@)poW358r+*b?{e=R`koLYT=wrL0$N>SQlqtyl z^I+092fwwqo}UgEnu>w20;7IwDu`YoFx*F6A7;aODCz5+%&o_Y!+Wm9sq)x#jBDo` z+gkF+n!;naf;^epUi2UC950@`UfM&iioX_>z4z_q!u23BY3a61@_2;Z8BiY|JQ;gr zbBTvXF^ZpuQmp#zmSgGT9fGpD%}&)fw?}8hO4M)etv!cE^=WC9CW+eRn2z;R4T&S- z+yQDl<-;WgqCEAD@eHZa($?JdZ6VmrjoF~K;K?p3Z?*QToa4<4k-_V8MEUC|F|2dd z!dz$Sof8`dZ8h0^L|5dZoG{9}d`}K_dpjY7P zRTkWnow2Lx07NnTAP6!dNM3dSn)j40Niu=PUPes+9hcV+Zmg+iBIMPlUFn z2VJ2Z#hItG8I^!p6^#{kdXcFPItRRcIu{D)8C66k9ON$Ee}cp1%&1Odu5YwV(DQ+#pRnQ|fE3ShIK@!}oXpT#lt z6&UETAraT?ii)fq70y|{!}6pnZ>tz+ra@%;Va>yxR&BqQ&tLK*46U0tv@LK(ml8tq zcdn07NSf5uJWm0TMK|ne2ZMJ+Q5-P#W=sdl)>FUB>N@Q}8%*edee)g&Y~z?(O`g2_ z9z?%B7?Uk_MJ@SApNVyWN)MC>Wgsr0RVe-N3SHR(UX?qCbVuMevbcdRrKl_-_1xt9 zRm$|}up$M3tsmuTAAp0{zDTWF0hyM4g{y;|U-l&wKmvdg&kcf6D|5&oHHn%1u z_lE1q)#Jq*+%8D(NqRH7l)Xm@JnY}aFwz4|SwBbQP4{PQZBf*E`VFv1E5NgN(U62b znc022K6v}}5&H!?E_c$KFRaqW=@Ond0VS`cSPN~v+jOI^$!(;DclWEPwc+}(2v)*d zEZ?Yq8QfJ{`0ejaQl6j1_A-L_+s3?i%~xLd+~rB8$1h3D-SIAW5L;S{e2 z!~VY3E}&M(D!JRTWK3GO+VbXSqP@-|+Q}Uq>fq0g*sET1?#NEvog^*(>+KQx)BfIDjP{%@JmqOb9%=GUPEp2a{zfd?cZ(Abs&;ve z3dkf@{=+DFq-T*dA}V!c5BJh4F&%zahrB2y+eTs&EB8JU$1@WO&E@5TCfNr-Ll4N? z58I4o+{UNwG(F5Qt&dX!Zd+1%6a4cD8&R_Z#6cR_6uGl?^ zLN$R&^v)rdNP01Y=p_BLg2NuSQz=RLUwp;Oh4Y!Wr(GDSvOCg8b9KqSBG@0M`2LTW zB_xakPtoO4JJ{c%;)7)O5t}(Pm#)+=Cbse1hWUz?QNxnofTAfYzPn-SzT9#hdwAUq zR8#j0ER+j2JRyi=T&H#`_BU@_C{N9gpv{x7aa&9?fHf6mp!4F=^(E&!!bf}ZLf|!h zd(;(rR`Vl02G+wOc5}`4HKs*1c7|2oNjHV}92-?LKkL^(k<0_rgPHF-gFR+Nx=cJ! zD@nu-J{q|ADlU)KI{DGgK)qSVjn0+c0}h-S>R@6Y6!q%4M|tafzRgqi91;D62V^8U zXBHm3AenYr8E&r#vmHodr*0ffcPtHpp9d@`wa{>O$10}yJS3me)p)+5FU(1>3O{K2 z=H#6XnsIGa7}p{ov*(PJU%RO2+Dg zs7`)Gva~bcYCYmQl9KIrKcymMr55r3fVntC9h1&@meHA#aNRb^=q6gV$B!{I>!U2!o z>B38wj8g2{4__KcNC&Mq(HTZXGSWjxlSx5(NVnn)aj~p0TbS+AxF}`WtdQro+sq(z zx;q>+sGmm4|C&J7-1_9Jy(qCU(qMi(UU12Cu%k+^th6u@go!YQ1_man-~%{b3Jq8o za_1zRqgac*&3)j&-io4(l%{Ehy25t_YZ#EFN^#VQUI%IJq+BJ@nj&vuIHW$>@$S{t zCvQ@1mnYc4A1>(3qeiSd^k&V4h+34C4UWM~Z$|C6TX#43iKN#J9 zeI1}D`s|fn$z6|IRIrsjOYI^bLA$AD-aR{OV?COjXJX^txVkoA7k|~Fbpu}!ZCL4| zM=?f~c-`&^ypX7nkdTb0Pw!JkXpNsPBv3RCh*Ve^&2Bu|hd=v?fYjpc%~9L(uOH}QL;yQ4Lj!;Dkq&_PfUEV%({+%J9XNHSi$xU#WuCbZ7PdxWE` zOp(SndeHEUL6eS=m&NA0^CB5XWxZq+s+Y^7YJEVI#t|a%>D_vL?DuLp_AND26dD__TJ+k(y6r@|>!NF1BiqR9_ z_&_x z8~(L+3$uRzs~*e}{yFFPJ2C;Jm#R6V{otv)BTM-?9HsiD`BlgRDq_=>x_IC0=(7W( zR7bE&CRRfgG#n_XMUUt;+f6-q{}zu`6~=#hCd6@Xb?RXIK`}m9Y=OCg&;w|MPUd&) z4e)z`ATPvzWi}Bz=J8Fc^%;v7$QK6R{+wMu&%n%X+#2OM&wp=BDcW`8X%yNDVPMnKq{fc7ABb1sp4!qkfhe)6&gfrGpIWrgKFX3 zPLtJPbGd=8dxt{T4slJkAubz4NFn6l=!m6jOfDodxP?L4KcEW6?vX^+o6?1iU>Dwa z+DB2pJW5EsyykPsi?$-02!U#ru#EUpM< z%aB$;6qKwwv>H-q*)-UjK$-xH;rDTqSX9*O5O+G{n*hMUK{0YqxY&(JGm1Xck{1#e z>N3F2(uSAnvE&C~SiQI74UKdpvaP*z6cOK?X*i-t%E`N-5qM4t9}n-L0vfrPaI3!a zdknsjr;>$#^0NWrztdx=emmRVtNuhZA~E#~{n$Fl*`(a|#E7!{d!t|?3sp4{sFA!Y~oR{`? z`0~F9*gOsGnxqE+5}^FQf98StF$e?xR=N-$i~065Xdsj+3>avc31c0ZMs)G~Jek|@ zg%URi45LfVcF?MiNED7-U5IpV%_uO5@Jj$TKFChqQ}_!B_SO>?BH=t22pXW)0Z$ z0%xXuI40V7O&zoz)`?$?TYO5fBExanLvp@+64eZfemFU=M|ju2+&Pq>W{WGeP>V11 zMO`Q#iW^I{W9ouVQEXO4|_+UYiD3Zm_ZA?HvF4q?C$ZG7e(Hz z0vxfXbeG>P$-bw+a0gbAONc9a@8@lSVefTA_t~O)FJ%@3+*D)QXAdm|wEipePP)GC z0!e`mHS9HD03FZmyIpgU$=`q!_K!_;J&NL9S7o&&%5QewPOU zHIVOHgCUF=c5h^P7v)<6Z53GLX$DH_)VSh=B3l;M&%!tqaeD^mAoxlX8mOoVg}FS$ zg)9xV% zI}ji;$cmJ9&q;ZTO~hz~$u{3qX2k}~SZvLpO(3M#e`lX;%44YgxGz_J^?BFdk3iY2 zU2JmUD=o~9a?b!N_#)10vluagcT~FCRg3pOzs0;mLC(1=^TmUmgmgV%s=vb7I+52j z&}*&FqK13#+`rS{SSi~@?-Yu%rw7I=t9Z~RCf2`t+*2frJ$A+zpd z>Nz=?c-{XZ}&qP;Xej%&YoCr0SWi;b^piWOc z5b~VvIHXtlu|Yc_JzJ%*d_sx2+@z+|^ET~NJNP3=O2{f-hGG0EaU;O1* za8MXzE2}#c%&Lf|=j;F)%yNcfZGjOcjotfiDztR;;h5!{7hZue&@XK|=EyvU`WX77`y{6)FbftN|8E4GnoE zfIW5G&IIm723vI!3!SNO?n8YebX^hU@Gm=n=SVUIGyL(d>5Jr+(K||_RlQhm3D-O0 z%~xJqFhVtu%axO5!pVYSd1FGmUuoT%xm=h|fC?M@)H#d;W~vlBVJO$(+yRW0a^AGd z5i!7Jog<7Bu{X_FXj>xyW(yNFO0xl(^SDyrXztRNF;+Hjl_#uyx`voG4R$Ar7s`=| zBcK5D5b~(jX_A7B3-@pa#qP^@X|AImugvyo6xv;AEH$e3;3$)mllzEax?wh4~CGv_{*H0FWX%YD-=f~9zQCGs!KZc@cg!IV~)Ec%7H^05Z4Tv-L>>8JmN7P@$>(Wj5vdnpcKfRnWflXEE^2 zcWD6}2R2$D_E@H$_z1XVO%>S?Mh}&gq{8V!tUgVj^S95ms#Mn;^4u98B-*KxHg7{7 zPb9Ls_Yrdp{O!xH{>J%DIN#WpX@}XwL%}ww>5gO>>a(@LK~OZb?c-CH1+++Sp-l&i z;Y9N#47l-}Ye0RHz$?cgYgIcPh5bdNawnBi?8=CV(T$J%uzXDpS$%M6>H!h?ZSMCl zy9XAlYX2IB3D%Cwb6Kjev>UW)b>ucOV6?{H9#-qwq=H6A{K|xC7+vsC5b3g@xaR&7 zfzgE-0Wxi0J6_;+VG;7~ysf=lq0Y`-C}r(~VckyW`BQ(=oc;1H5EIkE3HWvVek|uF zD6!&13QE3Dx16!85SNaOVSK#bdl(J%LeKyQysJG-)gT_>N@qBWU0ze8-qnmjSuiAG z9WMH~c+SjmWZ{kVMv%l95Qz($X@Ef?pJ8Z^8E4t3%2D1bCahB)sw*FZN*!Tzj_=W0 zJd=}`Y}}VGb_MdYKGd9nNmJm=))epF;wBs9;6|=OmQ&ATgY?v(BIX@RON1VWh__3; zKrp&4?opW4A$@P>x83%|S4Pl45$A-!=Rv7xZ(sBsK%lYlMjnb0bRuj(l*k~#+STrq zdF^gaI(&J1bR?d~HJ1SA#)kZ?nTI=XWOh z==SQ>>w7my6jUz91yh7GhSDpPO6M|}af1G0$?x(!HRMF(@g0go;?d$VPs1~R@_gLt zYeCFZz`EpZNh@lg8j_ytsEm%OPMQdyCl!G=C)p<=H@o%Mk$#uwg@ zMr+hZlno|fm%2Z{x%&k1>zh~EZ8|WQOIN-9`2gbvJFENZo5Q*=F;~}y{6`{kC~>^N zwsYLMwJehd=Dtz~ppKuZ@>yMc5B(BH`ve6r3_YAJY60bjM|#BgzZ;96b2U1 zE_mKGSC~uEtKfMkL=57o=TuxDqfrQ_yH%0ULp|*kfFoVzV)2>XzC%ywD*@^7G7o(g z`FTh`%K}iGwDm|wMlF<$kc1-RXQLFNgRaEdw90i5s@GwM{5S=a#V#i0VhcB#>jZ7U zb1o3+Grzf9wRW0he<|!Z*$VF7Wo$}~97k3frY0#mdK5KQx%`HR;dyTeJuH>BoW_C} ze0tZdz(@F(H@y`1AF7T0?wZi3Zs*KQDaKSH08KSI!S!dSmt{IA;~^_`oyo*5tyt80 z1m4`{T^yjm_g79p0z!Nj2%pDu-`W{cEcnLISIb+~fv(ItpErvj_u?*me3E9iOV#E3a#hGWlylSyRZ3w7RnJ`LIJG#R?+F{nDEN z_kZlMe<2%QGiCo;B+qTxpTngjg$}R@NM2jKisC4ZIl_cwN?CPiw)X+o!l{LDMK)6CQ-}@!D%(g`|2$)UZF@Tf`&C$quBzEk8 z10h!nGzPR{<|O`Da?8#ima)?haHolj`K=P@&rbRin=jV@euaqmvjzNJct}M*mqv`a z4@c~K9Q4XQd&HM|C%C9pI4vYWJVc&OP(o6UBP+DI^81Vf(EREo^uQm`TQa61DQnvg zelZgXQ>&fDMS9%!yl6@)uf*tsOBk*5*KhXFyFDclr`~=FK`Tgjgg8gsKpFpZA;^P+ zu(wC3Nbg;Oa~f27+o0-{Rel-g6#R?1ph4?>eLYGfVs*z4q6vRhka__H z9mFkKg+^l5MBfgMgClHsuxU|wa5ZALu9nz z7C3uofJY${gW?SWNQ!sc97h1&$Oz>Lve0IIacBOC%(FDeur_AsT5CPQi?@+rM)uwpB*8x}7XtG$C7 zNdBF^d^sM$auXMmnJogOkkI_@YH58qXvKIv>l=8)w*!9Jo|68lmU$0e;J=Do{j#lz za*3h!UT{wo+6n6_F!evgvLRK*$0u14a!|q0b_ixx!yeXUA9cux8R?t|6Y4?-h;?-X zUspJoKQl(x8T^oVJnfLt&#wbCFAsi?T)z;T=jj^H))2vvnte!zfTuw)NRU1EW8%TF z*TNej9+J23H+)JDqLGiJCo@w99{^e9r?K=%?hPM3VqytI>Z^Ir`(D_0uiZ)Y@|$t| zzEYn!+MTo>-ohxl&iQH1sPmGfX?l6VHsua%8|c~YyOS@oc4+d}_LEY;hdLp2tQfNf z;jN^H;8z}iYKL(`7Q*giITyEZkod${j**zEuuF4Ch7_<`NJYC2Kn0OjKjb<|sFJuX z8R1s_n*>kW{f)DsQ<=@p%dvoTf=r#73o=lZIM0~rI+|C&-`(eu*Wm^%W-aPb_~sc> z1F4VvaNZhBYU+I0C-_(@k^gDuJB9DPli-y}serTq*39?p& z?{zuYQlR&G5KD3X5{^NXy9i>V9{UFlk_4Yozcg`Ip^%V@;&IilPte_+ew+Fc*)ZfY zmyBD4`evm~KkvH5k@d^x73C5_$XJj{gMQ%-B9ew-GIXQ%@2Gu1+M9YljW{E`+u+OvWy^$Kw1@8(|n~YDx zRy*`hp)-kpqRX)^>Jj^wS4VB;INQUa$22{In{53ULMfXxUXC`55?yZA3l8OsGH(jeye|y! ztSl7M9O;GG;p$9>jc=CTbPHF-YP#0PoLS82L|o*^$v=94B#tKb)r+uIb&cw!jWG+e zM)Hyqch)w*oa*6R6meV(1_hkwVXn@Ocb~b`X*#W55|;6XS^m?zu;bD_F=|hujCW=@ z|5zgc%T@myE(}-lW1R-7?3~~W6_tRqN2mF@TOskc3`2hm=iFB;O=-w9r zVNxilV&YmP&cevYr`-DZ%Wq4BU)`^d-3lb-PH=t_B7q0WLm@Na-4%(}Cx(nrgy=cK zLJq#nxYNbr;H50M@YMtGACy|KA=o!QhyhmU(aG~+jG?I@KZRYrF>meKdc)L4x56z z%aJ8u+utRuFfs;ip5-q=3|mKgfM;tRZG9PVwo8c7_>WB8{k1qJ(=!N)**^ee;qc{< z<{*c!JVY_7jSTP%m9OA&uRKj$72P>yw^&wh5%ZgP`;(+;oVwZmRsUA086dvQ7mMmR zu-f2w{~&rruB^7q5S(2VRpJ zO-xWRMn4&Aot66Y`n->I`AYwwa(uh#=5k&K!=rQQUUGSqSwV3ml)*A-RM}v|L z0Mp%`j&^<9twxmQyr)3x>KNl`^ykgwFp~ zCtC%a0&WcBx(z#Q;a60%Zno&$Ic@^xpe!PPuI3m>xnyBy#+9mINdh9jx`cP1p)>R!anT~xhNddQn6bDRu(oHxf zI*qD9hpp+GQF{R=%=E^zr5whU?~Y8gaKgsdxBh!@5b%+;}g-|bA;~xQ7lh)1c0R6f9Lg%-uVIyEMuq@tT+*8OCw>iG1uO>GwX{>#Yw8>+OB_=v&-6<-QfEhSVmAIHP#FPIc*RsxgKuyKG8WS9~iu zU1!B@Bc8PB%LpyLcI;|dO_|n{z2)U3Oh_s#5QW7S9amlptL^Mwa{rt{^pZ$?^4U5= zM~xd@6WLFxJKyvSsU-%ytV$klyd$R6opVOK(8UC-VOI949%;?`Qy&RWgUXR%uKL}3 zDlrzw3aw5%?D&|TqeFPrN>99-h6un9Ahg{XhC>+~5RfehLzTZW=ArHlIen9z?y6C+ z>QU5b&*h+G@&B1fZTt6GSK_#90&Im>^GeW_+olcLckCG@l%i0 zYpTLBxvQ`2abL&rhd_l)4{PN@2bvC2YrT=9q$F+|U(|74iIZ~*NMKr`meIWwAJ+<* zYi)4}X8~2{G|i^kp@k0k3D)<^aa^(epoB0Ssu@a$QUnJDH)#Fh?|9QHkNEmN2+49G z6}faCDN;l+jI2PR)aZ0-6>!LdvASrrnPL=T{3L8Fx+gJOBMRT${pk5m&D-W<4V{k9 z;<@cl?dt@I--b|f<~Oz;E5K+1@B8jA7TSdmqjjNL5ip|g+1YAL-TQ9wH%nbm^l~qG zS|@kiRy9?%kd3~SySCJ??^2vo9p?PPz}lT_ug$4%3s;>l7H8O(?xp4%>99aNMWe+$ zB{=K+jXiFzXYT8g*poR=*&I*r2_lewd!xV^pK9s$8zM->kIZ%6gQOh1H)!s-gt?C!h~a|6|Uut--@ za{H6YY;ZG?y!s92Rf`GKGU+(}M50)wB;>spPgFUj#9$o4$MFpc&9`Wh21DG@@S3Jf zUU3EEie`vAnHXF|UE_E@-fk{bS;C6W!{^>xW_edU30O8~O2s~f&=jFgm_t87C=J?$ z++E&E&GBc6CNxmRbb@4#nf2E#Oo!W|dv#YJBA&$n-ur7Xu3tH5KaY$4&5!rDP^ZZ_u2;jZdy^g6I!|T?^+uHs0uax|k0Fki zK}tzhs;bzWvmt-3KbAGighq_fM(UBY(o;n=|JyjOEE9B|20QX}#Iz(UXQ-aPoV?OQ zC!(ZTCQBN2n7kL7RT#y9HLsY;qd6gphnN_HYVodevjf4z)|zvUo9D!d$g%z8!$e5` zJ#?&~^J{>HEZnOP{-2QmaYg~;~X4v)2&uO05_2C}ObV|NZIcvP<&F_)(WMWLX2rayAu{=vPWS)2t0PzXA)v~;nW5lsXq$U2Tn5U@=ujp>y+s`VY{t_t=z18`BL6xQzQ z$8j6Z`0^aldsqxD(<=5fEm)O)%l-Xr9ra&209bGE$MsAIm}>lhh} zAXFw4nL$0>S?0w7KC{V)t9_bnucYz0~TTlTJ3?(H5@ zMK)|w4KlCqdca&@2`#Te3BysaMr^Vmvd|*Y#futv38Mrp0!iFT|p15C<|6!#m1_VNy zJ|W?VDCr5KC6}=gqjJ1-Z*jyr*1J2HV%dzKT?o+%IftlW(S{<+d~u@BYI%b5D?U@1 zDT^w1kfhIQrpv3ZI8#Qyl&?tv%@1?7tUO>=Vv4U6Otdhep2)26W(NCMLHD#77_|87@bhR70xB&?r0IB3qkOlmF@EYN=JL0f&R%b2DR-H|m~&Y? zcdG?=ZVWh1@63k;Us`I!vVHX!u961MjP5ys%gP_izzdM=y?rV&pYb-H0T*_yJ1Q1q9Umoq_CgMhzOe@eM#8C_rviY~iy^w9YA z+RgAnLiyhk+y5f40J(0UAE6s*SaYF|8I}2s5{aC!|7fa0y4+1<1o>wH^<2IK=(0F& z-Z>?0Wv!_h6`~_o<_7{X;KA&7je}O~#K?v?L!$~zXHD_Uik}88j^mt{@4^9dW-OoY zy<^7Npo76OR^Cj2IeXkH%tU|-#V<||{DpqU&#!5%eJKz@U}vTVdpw{eo4#)bK)}tI z+b_QVO$r0-+&eLewnvHUXPWv;!B)a-{*rEbqz4FAw=|}X_GBaFf|TEEmTDcJsoWEO?wDPYBIoH2>A9YgY-MiaiPFd)CE)}0 zeT#FPG=t@2(pwTpwlfQO-=CwwZhdI>j;exKrgA)YN3Is9b+K)}17|LT^m3H@^dv*5 zt?=RBmzKyj|6gov{{Npru44sbpto3W*;eT6*|C#l6nLhswbrpur9TEMihO7=oAto4Ixf?P4qO^ zVN#%)3>T$u-PuovO~Xn)=-1Rg5z9=9_-Pz}xvzq9x)8?^m?GZt+Yb-e!3X>ZPVO^_ z(uzaga!0#bQ%W@T#k@9Ng3mLe)e;aBAD`VYqFm->U!9JzZP7DQ&5-MGTA$}mVP|lA zZM`F=>^bPDRwKIdV-87?!b`as0mfw*h{KL79*{fFsU~W#&!*~N-FEPBF)M2wbDlQk^s8@u7{f)f!(qf?6Ky=- zcCw-&l7N$G&kvrv{-j=)oc#W$Qim-e-GC$?W3cF|x!I+yP=L8z54hEZ+P#{>rdr6@ z`#m%2mpc>lNeWCZe>!)+&B%zEC{O;)VJOn8AhA}1>TgzGpVhOLZIEM37c zpf}`*1UM1}dDY77u7glUV_)HAhG+_hZ6KrpY~fW5CRT^7(Q?tB0qMt4US<{hObm^y zTak=rj6Mqf$VD=w-=zWNbQ4wD5VD=P1UaphL?0iR8;Q;r{f|nHp~+y~;9O0rZN3 zL$MB6N^Gm|8byY$Hkd}H+`^q{np*q}z>jh)YO+0bZp4@aQlj;0hw~2;uQo(=?zX|w zElQqV2)jHMc#=LZzP{i0a<>pQ;j`oVJeO7x-sr>k?WuHgu`-)W?cO&tOw)<*?rVJ| zc)Lr#VOxpr6|I)n?qKvaDr52^QMm-#_gtCD_(t7eJtg8Z1SCIYLi?Ti-F4cwe5gXew%dU2}-kg zE>6UIOBu~YeiFQ-^F7s@6SRtoi%PXV5DiL3K@n;``kt?mD($S=K-4|t+X`AN>fwuy z45^$k*=EW=d*}(TKfB^ODs}K{{H--Uqj!Tp@ua`-TJj3;)1jNfAU=;kI#SS z(danRLsZ|8J9RAwHDKN%G|f>iCmRdl6A+bsJ6Rx6z2t5Ze3~*)H+OZPqT}ONytib= z94lZK(bX-jhbvY%CvZ&>(zi;#;vuo z5H(^Uftk-rGvfQK#a#)vFK@k&koa7!t72Io{t(^WwlIukdJYP^ig6Ey-#2 zZ0}*>V#kT`yaWjM^}h;7P7fHXjT(*_Mnk#o+>eM0V2aAgbm!2&8Lz^Jw|{r;i+(ik ziP5vBdixlW9IuUPlc;4xpk+i@)zIWYsh5oz1U5dWpLx-&g*Q;mY?Z=FYyUIru-{rSr%SC4Tb-ldK=<{^S;xq#@8BP zN-W&pU@%Y`tj-|*M=gIpPJ56s-+&b1Nk}u2=Ac0kP*8aJAN-Y7nX?a;fxp)k?KQF4 z=q^KxuiZLrAP1A>z)9P9Z_gD%d0}FW`%ydpV)XUl4G3bZ;&Vkm1Htgc`pDEM_Egcl zrsLF9AK~tWv?b+Ojmb71jK4p*`}$BJgpixW8a1saz&L=livm(V+S4sSG9(>qN=o}p ze{7?}qjs8x!T2)J@MH7!Og|KPI7e0OZV4&gT>JP~=xDK%xJeXtS<^a`Q@hxX8uwbt6{GEnVj}-pTb@?E)jaxAo{hB_zd!Cd$Sv*m$Nb2zjWI(t^ynQLJi~QvwfM!Y-Xe6O zFu$5y#5J)OqVC1+bcFP8!n-d=Oh4RO!<{7>{<*EXIEaRbWb7^Ae*0H(j-qz*`=-QS z6Yi805+whKtp01mE>g?b#Au6CuvTh_;TNT{kXrt!L-b{Jn5)>D*PXXo8UNFLC^+^J?afu0Fh;x79GSIzfjX#rfFQiSDRH1akt0CWe>y_E| z8aZyR435v(!*Jwz<7UfhN|QZW>|h=hQn*1Yx=OVmrNkK*)fKXU^jd(iVHpGvbSLj; zmg`6QO07dzoJG5VM#0qccenn(6}rt!#|t7=ViHUEGL2GlJ!#c`dn^9=?E*>=f-N_V zPzTpQnftX%lqMWcNGh%4d$xqe@z*HjO-(|~*K9ytbr~vyGa#FFiv%mK6qSipPk~-^ z(u`J47nNChIz0I+y70JVssqw+GBX28tV{XT9({iK)9q;iiiV{WX1sBAp~p2bb!U0; z?ddIo$SNNkI7MOhw+*=T&7Mn&cngo?oh*B=+@QLA1!n8dR6BW}h}4G$?gs?@3(*hq zVMIgK|3_d0QZs!-px;PQ`Q_jrIrOY7Dt`j=B90~ak6E04x9q}{Ee*+4Mdos4icJyn zjs2xZqaSIeIdJ-aLW2!%vXz_$ARkuUO3SQSh{pLDnZ{%bWN8Mtc}%7#_^v*O5z7>; zd+X1$@$%m=*F9FO3-gbMIa3c(0hg@K1&K3hP^-=YGmkM8k=<|Y&#_gLlju!9yu#A6 zc09Y06+y>d5H$`h=9skK>_iI&1!}snw1DHs;X%6~)*jv_8#$RO!O6-R@4csWTDk@Z zKmVJb3+I#I=yyTN8K@qc91SAsrs8XJPF)~1mE#zC@vH$963G?76m0^I8*)V}DzfcR zE=+?aj+e6V2K*}!B2I>4Y6R|XH6#N=iT;J!EQ&Ui9;WBkq_l;fompUw0y~wK7anIC zJ_0Z$C-tgJz9M?=6O(fM_8Lt3K%_w%988ZR6o}FhMFdZ@U2nv$Ew*PUx^D^Tva<&b zSnX5N|JiMYd}+Sj|K&@A(}sBtIosda`XOHL;{zFA)#MxfgK_GW+JR2!DZTSrN;>hXJ?pq(u3Nnc95jqr7`+YDr0gEA&e#_Z(+KZR~e*ArL3r`Q&KJj z0PH{t*PVAK^j^Hqs1>j@LF_x|zR@UcEZ?)Z_!D8jk-`jtodHI!zgAk4vEqYio4(&`nr{XE4!^ zw(9OMo9Pn0bO{714Xe`su=p%Nz9sbVEx8MyRu2ihjc-Ad|NlMRM=qW_$uDQ!9u0b? zqRJ50I7W;qR!;srj?ywm!7k-`p`?H<2iz)yBurg5(O|r0MP^Sbb@p-uJVeTXEzbw% z4AiAO`K)f{cRUR)GiKv>aqnXD`Ur9{xi_9yB3`{RLF}%0*-#R*ZjNKcQ&HSwA8@N; zP&Cd6@7u3Hfu58w1(3Z_!D9{(|1Ilba$d{^R|>Gw97B;CtOHOWw#=RC{#LMoh}4^MeY??-4fcK z*h0jGUezl}4V~ocK@XFdpjPE?KGBMu6J_PKDh*}I(a|-si zu!vX{wLW{PdyE9762J&7j^ex&NVj@vso{9y5YD-$fSghTz<-_ZyB?ikE+4%L!z|Fe zr6X5J)!FI7EFvEPSeR0nGBZQU`pqA2RKVkJu26<5k85~o2{<93zYd0&GI_O*H@f@a zx{noe_f=QQXxwHZR1EzeyiM@s5w_7Sd-)1N6j-uASKS6&4I?_x+DAu=a+CrB$Ycg~ z@?u%{o!~pe2RZR1MK8S&^N=HQaDv0Gsb4bd063mm1KCQ$1{Dw?w6T7JRvsYOT(Ion z)~MGK2gNd~y!Fw086?qF@ZN06ZK($zv4V*ZnROutfE#h!>#oxfO~NYgvj%A1pF%?+ z%>`K!{lOMNM>-em-YldG2_zj+;Gn`d6Y{7`k-)kzf zrJo+AYs5}(O-9fe#VRp9lt#nbtpZ#@>+AvnJM?-(H@#s6{IwnOlmR{n5KF`=1&>go zP>JMM-OBopu>t*u31R1h>?JYqB7-$!{ipT|8f7*(KKPF~Sq9it;VJ8d>yu$o&@C6b^>~D%@m4k8DcN>@#W#vQ2#wBopfw9koX6*iVeKzGI_NgKCAcO{Os_QQx-^KmK=n1-{ypBs59aBHSktb}Cnc4D|x-Y8`ENE(=T*s9t`a;$y_#J5ddZGr; zQCk4;qUe?72{G*gnMgPcjrnxKXYZ7G!PU?XpysJN5zf%qM04#%IAn1Zv z{N{eMbLEp(>eiF3dg|$NNh|ZV8S01ua0k4QN%SYrnKdA11-t;3d@lK@7=6f8$@2Ed-p>mND?IFBVDW=tmC=4W0j%?lalmvEwcp!?~_$n zYCMG?OU)RhJKmRGXH?g_Vo8yyY9ugbvGkT5wG!jn01z>- z?Kk@v#|3_lSNFmWoOvEh1+b$s?LQZop2IlyrFFS=eEJTs2%#nlhNQpe7P9SYf3V zb##=OIwenPR~|r{Ej#Bakv+#9%QfSYDHDT7Nl5uX8`a7HZ+%vKA=MD&Y9e zt?1;NHesP`wQ56pg!Or3A~@h7rYdXruu3EA)sEPeQx7XP3LpYa$!OSo#8=B7v+(Ce zaeSmPM{KZlnf=>Z2$E5Db4YU?E-O|Hv!`Vt>r0uEYC5XB7kSGtyE zWVo#+fWCfgtZ5t{cru(3Ra4(vgu{D&V%CuDI(QfZ^kt}3_)M&V!3mpPe^s#H!C(bC z1yqDzArk`18;_NRg${eT$UIjG2I>Iyo5Z8+qZA)oP|FOd!ACWaO zdNSy>+ya6<3d`XtTrDV9H~^_b8CgGuTK=E^;E*9!;FPqY3W)T`WIR_t#)05&MfV`` zqJ^Ia;)&@on8#=^`3qRbS?*lEAs5*iYdatcekpo@U%VtbqE`j0zgL1FhXX&%v0_f0 zB>IiVi(^AcUHt_qdh;VA4FI=H$`j*_crOzPq;^Z}aq6TaeRAO)tAJ&<-d@wgI|Qyt zw!Qfo>El+C5_D$)V__eKWP%S(@!F@M06ooY#KUv$Kb6V5bOtqEef<`OnGV*WR9q|< zE=cdmNIA?6R|8}SF_8SLjFA7PW4fsFNOb9^#KHb*k&c_yNeTLuAan_j2prUZx?}q5asj(1&j^xeSw9=IjMGpz zbkoa+QY9j6Q-1bLXr1go_x?b5q=v4WyiHsA|9E@rfGGE^ZP;z2VvRW}7&s`1NDK%H zs3;|&bR(%SFytW3Mg>HrM7p+85;F)0Ga#iRAs{_NgQPHobi=o9-2H6NcRcTTo`26# zH>~@&>RQ*j7OZ|$QllUPBrQIg`YPzqh-}EBL45U$K|8SNPySC_tE}MuejTTp=4dnh z;rsjwiJ9=Os4lkGw~%z#|Ld{Y|08m-IB@FVg|p%&_2;bUQ9{}>7cxK8Lk-!Rzc5ZX zn3(7%r9xzT25#6qaO7@}6s)Gh`I1B-zvUbxgB^Uz0cCRH(Z@a1H=}&OOFIh+K0tU%M+|~XtoIkXX`e^64^PdX_Mc$^S zChGElg>fq-jS4cYFwat3-lMXGgdoQMYUcQZ#yg7QaI$ji*&h`gF;s3f(kc>5nN6p_ zsPw|Z(`P|0F|4PptvQY1XjE;uFG>6CsdJ-cA9GIWxGwEDeTKJOBAos7oY)@sAFK$yqGj@3$r2}>Gn;g0_ISY7&Djd(6>~K6t|4i^LnY740 zJ2-QRd4cd`SM>dYn6DEP{RK6aw**91(t%PJuyqwuUtM~C$9H;gz>r(dAZv0U0G2Oy zEBM!L(cLQ-^y|Siz@1&3KOzGr+{k+5DeSR8S z4cY`DxWr3aUWdV0oerHQr*e6JcTR86;k|eK&pciaw>-m`Lc@+e9=+FneDad3({_hT zwt4+gPHh;}71EABO{FBSjf0$^>Owv}#hEI`mTvo{`m)Q|V;$86(ik=0RR6#37w~C? zvu^C;BmlRLNx)(9^%Lf3Rzy>^$~B6!O4w0ZVFVHD;L>lkTAV$&*UgW`GdeEbLFrNs z3@oK2B&Zi)neE9DdXWDKYpmhuT)&JjC^8y$T_sp6(fErQcNxvh%}d*aFTWvd%Q zcVWNoky2?`=~3=wda^aqPELp*2HW_xGy0LDo>C`ICl;Xt>~}aE7B+RbB&Gi9mm9F~ ze}c!TvAZ-NfVIk`iz$E8;B!xDU;h(r)c&43g3P}T_Et6h0g~)%!cw#1t}+SQsrUE7 zWXcdKP)XUdHD+&{Gq0t?JhG^GpGM0|p0MIaG_FyRu_9wYc0Yz;Kg{RD38TRKn`C^a z=rX>yqXd#u^$$KB80)@$QaJxA&CBO3wHNsJ%_#bqVL!j%9EodDtD&a@vxwQL#L2Lobyl`PmVAfTyg{I&SOKTVEvL$>fIa*XAjWg zuD4=#{S{bYd#aEiD+wxUyu?h&01S1R?yxlH!FUK(7VIv+ZOBF-{=aP=!tcMzzx_eH z1v*9(w-nVt)^JoFGH7qLQOLm*9}~dOemx8)sAV|AL>H0qL}V(iK`I!cw_?3*4?m9GT+~o3?OM`7?khxEfBAZ zKRB-iT>Cn%dN_?W(ZBzW1Cv9iH7H2#k_To8?Kjf7Ma2O)XAsJJS}uoB88p(S1u8*V z%2iQk^DL;z!D_|Ps>M~qgdBo3R9tzWfQHczlOji=ga3fB_lbqmQ|d*Qf0ss6Vaop$@r}Wk2F`!79$E(grd^eeA{9 zG-_(}G33|)BIciQ<@^UnD1RDeI%6?K3K9Y^YFBdjUSMtXBG@wd0I!84Pmv8qn*xg!J?tDLwkY7vr*I69^LWY|o4;xowU-0S)2Z12DT1 zA)D(2$HqP@?R@*i{}@r-Fp2V*S2d;Mm9EDbH9d#UH=Vb;OTsOsLpp;}Qm+4TmJ^v4 zELr65(%iJIw8re({f%e#WO+#lRF!x0dHDhJ@(+Tt{I1UUpJFqw=$Qfc^0jfDn6}HD zVMj7p7M8dEi``yj0D|5{&DzE4x!=i^h7R(dK-wk8>l?a^a~J?<*p>IRM{ObCHN{Q8c6q9mfCz{W`iA*-@7O!i(q6CnB`{f{17 z(6RU{F9G*2gTq#c{#*~rppH-hJpO^kT>!Y$ChXX4aNXw3y{dC?6MZib&4Vr@m~Dx- zb+Y22gP|uIlH!b?;%?Ub(fIvcChGS|s^rVlY-~?8%=47Oc#U6R;r`>?;zX5mDjJya z%z!*;fMpX60C~znMq?YnbjS_!RZ=bj-2T8I@m^=nmqd5&nPpAMX1#Kv@$Ox2JygBy znRcf{l`6m`@!pRoUMg-Gv#AKtlyUsvRiU0)8!Ns8(Q-HzAD^fQ^CBS-bFHZ{w4yhr zNk~sYzb#Bw;XF~gohc&b%8sWR`g&^-X!FG;myMVAd8G}v9J-uk-eD>!dD>qCf119F z*&MC`*GnoGn+$+p7mQQ0MEulhVzk~!5C3#|x{Ot>9H>E;uOow>--ydF3Fe0Qg{(>c zVX{+h7CHDf!Mz&CQpK)bEme>c>UGTA24&`^|7w}Z`*y1Fiv3`SikP=mvQ2ZYK#wxu zRPe|Q)&mJ6sWQxPK34u=A`HYHmxmSb@Df1u`MoE=dN3(50Bz!$1b`^}oE|1>WIc<6 zz3}1M^-88&zaE5!q^#H*x?DI)avN0XKsN#iZClIuPM4jC6ug`+6TG~XkqHi)3-@P* ztY+F^W_I_oZ*O_cdkIk#hJww`(t2}7OKeB(CVE_g0_He^+kK;00Nu^q3 zyjh+_2oj{4v&@of+169-C;G~I%Y&U$gMc5tDuji+qm#fdN<-55Ihfe%|Hd8uQ4n}N z?PSURj}i9S^2tUXQqg`FKyBrUKx}POhIeaouDwTm3L4Bqz9Ri0`psQOL&)to0U6Lc ztVU!XuKB_Zh^w&3(LxsW!zL#0G*D2uPPp(hlY|yP<|)3y5?aVzjgp6E#gTWzdZ@CI z((?dmnq8&_nW9i|KOx}C8`DsZQaJq$6BC^0_5;{cg7Oj11yZ^#!7HZ80#v6b4$zsZR3)3Hd6iQ1Q(Ew<{i@{2OBJQ77Rc zNjl{MFj6Obj9mP|UQwE;R`)OwZsN!r10;jE53mpwL16`PLDDmg$FDaP)urhl3v@t2 zcnrurf*f1YmD4OtlMlVPK&l@uSu`XaH?ED92F_5Hd354EQ-o}|I;3{m4=a?;5oahl ztLQ_#co?E`R?CrfO=LkW?8)I!_6-~0#&fndu_HeN^r}S{&D5X|NooDunq$BKf3r0#QREO$u|(F93t{l$uKB{% zqb%(z3Jg8xXU~3tocJ8Oy-xq#dh$ou#WWo3f3YcuYy0MONRbHNb#DB6)GsIh>WRa9 z{)Z>-$sH>`1a}&6UJoC_RrIjZyqR|P+n*xF^3bI-do@KCzltJP5qeAgPjC|k6;E?; z7`nfZjSfnc+kf3wUS|twd&-7=6LHU$=N}r}v><3MC2S_y zHPW-KZ{J&9PFD@oI&btfKSPC^9}cQ3%j9nk+k&N zO(dYFltiEP51j$gLY23W7P^Knz&<0`=m0Aj4oKN4$5o?A55wQw5n@MU-K=x zT1vFf5zgQ0aNshPr4+u@-GS=QQgDb8)bby!_YEq)_UE$YPrISNs~Yw=AnAA42{8%K zVOLTfh8qTp19Gk)vk@`MPH_SC&hf$I)Q`B-Jj~rTf=YGxUe%5mc-tVt0@j7(j3j8a zl#iU>%E2?ry+G(hqUJvKZI8YMQu+e&43LAn-{r9!rJNPW2Uc~3!t*`*OOTnDO3IZ} zEb!bSLU1I}`7{xA<~hsYFXD{}o*tiW2s!5TxNVX${CW!( z-6mCHKV0#{!f~xLTJtLzGfcPS1Mj|HCOD6V=dSRVg^ykzs#Uvj>!;Ie`4rc$fAVO{ z@s}(?2W|}HNsS)abzSiZ`og{!jr!UedNR^_`}e8Zx*2ZGV_06EaO{iF4V_5g{`w{2 zcJ`>|%!Ul>zR(OK0iF!&k-5EPrT&L1(Ymck=B>C8bTQFEjg>s;GJyWvqnuYY*q{>M zMYXtRN+Y>rjLhoq1UC6vSG9>`Plf0!hcKjP+zt*~5?WbYyH;h*rkxbAlNOoRJ57QM z_ZA*mPa4=wS*s|vS@W~SK@k_HF8ez&Jb6kdXf60R9F%yy?N|%OxxR4oSe*iwJ~@l` z`H>ndu6?2FNlXs0>q!@Ehqk(?|D-3@m3C*MgDE9qG<6rnAvNEVZ1Fk1({t}pkD(TJ z!xcx*6!i7Z!Eo^_ok>-q!DpHdFTPq^z&w7g-PCOB>%I53Zjo+9dnBUC%84(`N&S)? z`GVQlgJhFuF==0(94s41bqMGAR2n=tQ68)HX2~*H_HjcQab9(2G4Z|U{tdXE4Q{5~ z_y+8x`^w^69L7&*tmq<7wo*40mnuVguKSF{WvY79kjql#y!M^EPG(aw&~wEcPfz=0K*nsVAZwN3Eu08rSvoC_C3M%W5@XdGV2a90J)&WLl{* z?SP;nO~NAwP5u<2riUR9_8PQ0bfVGYCh&=J&}}2lA2|1~*}i`*?d5usEraGT{6>`f zf3gkRwQJX`U=@iq>OPg%jh>hg#b%y6CT1~A7VSifY}Q-~EZlpvd?&?$-HrXyHn5d4 z*lGxDl>oLH16#>4XdYA8tof7PTDN58zdOcF_%E7tHjmciThYx987|K5Mm=D19NGf+ zM+?_KpT91}2+~TLCm!z$3{8x8n2Qat(K7*SoKgaV=$Bx?io8jf>oNndF!8srFJ|8;pD&_-hf4Bg>bRIq7KYzGr}R&?}q=^=m8n&HWZeT8h^H zD?h&x%4;m&HFsMkQG-mf{QS=SEw-bQV%Z(5OjJ)S%v4HNP6y2r_{BX68-&)AmZdtk z_!(~2%tMU1@mFI`AG4TJ0At!+S~aHMY2@*_6*lbn=8&raW=rSxZ(j~;ab(s`1Vy1H zWeaXr*jmY3*H<|sr`2%jwSLLc&4F0S4_0jia6^IN*XA9^5Au^5!A#g3TNzpD2CU8C zVCZd3LP3_<8|ni=_)||brmPN-w=?I9;2SR4w&%(lx2mpd3bdMTn#155z79MV>)`Hj!+l8Fv1=ffXS)!yai&*{<8PFH|B+A>m&~ z#~Y1g^Jq9bTLC}YB_LGvjl;Q-@^a3CGvJ`_$n*UoD{7>?Y4t=HYOqS}O4c2-Ab@`sz-u-*xK_pdB>3h}>q+g1ot@#)J_9>5?WD|~ zc4If#0uNUj9`3ro`v4obFr((Ci|ulPI_c^@7x=2K#$^~8hvYOLhU=`V925P)A2;mZ z6H=-eQ_Fzji^q^L-BfTxZ&9pW-cb6L43Fh|3g*_u2j{rWwL=ApaR$x5yd5k;gOh8W zcmj=NP09Tw9IshYfCd(?r5{fk>``t@U9zM*SgX;hPZ7RUALnsbrZrH2R{{TQp3^4* zoPuTYh$rbeO~<}Br^tm>&gM&ph`?oK@@S)f;oiax>q&@5A>q1&1)>lCPD=ds&Mh|% z9@0zArJpAWxAaVQFb; z;k)Td=OEKs6<07{Dfc(??OgZkGhO&*zDpOubnr||_rOyPfv39Z_n!g&mc=GFoZ?!Q zH@cnk$&Z!8?ZgsDQ4S_ndM4#TID?uO+ym9m!OvMEUrS-+wzHBk2_!I~x6xn{K1O#i zLf|B1cqT`Hpqwm=K@4vG`-52IXBk)3BEO&og z|J_WR*%9Mx*wr(|t?v-ZYfe&7Q7JV}AaeN|m-sB)uZE17@8#0jqs%Dt##UxyIs`g1 z8>{4v>HU$x=3Nnx0?T40bz5`Z6%o<#+Yc!lP^-;yY)a2ZWi5pL2sX{`6m9HJvwp&#k}K3w7}2Uq0DMdH4JmHyrHq_s(|v@}tyv#vY(# z{nRr}j_iC5FVG?<*`fZXc$s0a76vpMwvCKk+N{|InIjbfof-s2B+KfpaZ4s6Ud+6j zWj8#8=j8%k9D1d5i$zE-@HA=xLp^xj;?~FIrG;|n;trcyTENr%>D$9wO=>^2US7?~ z`15b4cFY7zWW>*T>03sHwkEmVNsuioBMK4-i?e#!%kzl$`n;oHBXW{-lZuIz;jHiL zc|KKRKUP=@9VC;27*J1fU#8Ke9=ay7pV+Jmv~u>8rl_4XvPvddeiA4EfgX%1;pLyvXtIY}N-E3FZqWaO)j`m z?M!d7j%=c_lT=xgRZys(4xjzIE9HP?0(Y%dIlqVZ4D?-&hGShH;93d!c2C>g4i2px z8~=!i2*uWBilu(&g}bCy%aS8T;nyDTE31ucp;f|QvH*bXP%H1^^(%Tf0cvR(bn>Gu7-;YY=3 zA-ty#mU>v?&Vw11m%?OSBSmx)NF;8qOrQa^$kZWDR>o*2$BO_g$6NSiJnbs#9U^ zhf1&YQfC-Mf=h-E3iqbJb^>X-k_AgM1@l*e`>|mI0xtZ8y9HR`nga!p98;l=<>2RVq`Rs6F8YIcAfxF zkWFb^-ja@`vCyhJjjMk-eo*jr>5!+(@FVsus0D44Xel>ErytQ-9-z$yZIuQe?L1r)iKm^>JqV-1kET!{a z(dEQ=FSROx{=@LA@hJGSgOS|-!ALY|3+I&Ri*t&JHs=d|PrYZ;r15PcgsJrMr#Fo! z`fwFyvKP#}TODwrugwcw=aY+x1xsdD5ahTF9?EL9F~@5=7`s(OY8;mdOW{*cp8AmM zk(lElt4LHa4`jGQfIs-$ErqmeWBNQ6oZHP5obl3ht91Yp_dcT?vc-L`SVzy>#ROzDL)35jxe)6D0Uqf$f}li3 z`i%oncObo4ljmx`@l)4NtG(Hk_??hB9_Eh;n$*86`YAEKr>93^qOTIg`6O5ZE(=!B zHfyr;|2@be^$MD@-6=O()3HrmV7l=l>H*v*eWH=9jJ%N6oEkWog*cui4skp|%kaHc z^8+}8u`X_b(Of6g=R2)}#`0beg<9x`Mg+5!V&oYOrtDms3^%0Tx}3;s@@$*Jl>6en z4TeTmxC5n7^uhCBbWV$YqA??$1&vy`ipe|0HeWuoX*uvgWdI?1N7@1`liOtxCnQ+W zmnm2Xju7b>49xQ~>({?QH4pn_9k8w*5pFzAeY= zzPwB*x8b8bd-jxG+plGC7DZ9U|NeoXy!@>Pd0&`Wm#WMf^_~>OrO)?KNrf1o@1thy zQ_kN`X}=mbe|M=Fy0DP46!e4T@nAH$ul&iuxIXvUo8m;j06A?#DrJ6<(3jpW%ldqJ zwBW7ueWB^bz}jqUH|Z$N2s?c4#}v!HP~2|ZGnVCw;22+7S-&G9`Kly}USzg57zvGT zjQj57BycShB$fDAgn(;|JfTC$S-Mzm+Vq-drHHamPofl_viZD@mNV+rWGLiKB zb8@>}TTXHXpM!?CR@HfZZ%|~=DNA)xI(mq2=fBFqr5o3i#vljkAXvb*Z&gasns2>E zsqnWdWheA%NUy=|4*kYD5TXKb zW8rN2b88kwmC3~}`Ql6~ACIw84E4fDBa3-%nrN{PhwMdHj-WD_k`9yambPZcaT}D1 zW%mZ{wjYQy8}}434SFv6xN;aw^Z4SzP*zc|cOM_QLpw-S-%+onXR0nCxKzX3EVwD- zJ@!#=ph{MG-VB~^M!?C=vTMT^<+-}(taxhm(rmd7R6aB#C}a3LT6Nzw>%MI$QYsC*JmX3ZAKtj5 z{nQEZeXP^E;X~as8EZTl|5mE~o;+Oc@Jb!wF?ym%RBy=Llr-wehoe3z!~)vzZ+rh? z7RQ+pY_zOqj2I(|Oifm)TI{~b>vYmWi2EN=u9AMPswYo7mhh1_LpM$hHxsd0<^>R8 zf9d?W{K$oU5(%;!Fc)jfj|ip+b*3jN$hBlu`li%mnNd92jm*0E=6b1(u6Yih-Qg$= zc2fA0hz_mIHCxnkDR?SPbd*1bxwjy=C%Ttb-QrG`B&8`gzm^;cUT#X&@i%RJ#XqaE zVU6dO?^z@Lt7!_|{qGk7l); z39cM%(&qYa@)vu%eRYiQwJ$p99sl|L-*eFKVO;nJTg&^m-g__tuZ9&xqCy!8vp{wo z=P4x%4mZ00`oEbB!Qb!)C?H11* zkCAlO8byUcDiODq3b*(!QZNkgqpbUfALXx5;QvI0--`d($NPHZ*N=CKFjy~n_-7M| zXkq8$=J~rROalM(=>QJ+Q!D;^NA$a!IX(C>5US&zM>3F1ExS4u>aHf=j&{yMj)c9< zTsUSSWc5#8^;f9se}as&&_X-@0F=mfYqUL&+p?>u$Som0v^{D0^A6X|B%z)A-RShx z0@=<%fO>w#LH_9%|0_KE8$hyw`NzunOB-^>KeEfi4GL7231z;eV_6xv0!*5YM?F!L zeS24r>|9)$tQ+sVzhk40JLSqhz`}pLg}?r|zx8=vq1T)FfoP}N_)7P_rp4P0PXhw< z*V6WJsF#W}JMYmv)N8R;C31^nf8^bda3HTYs|6nbr?OGGdfPbKSi&bn0tsXtDQQL7 z)q>g`)WH;p=faX8qjfL+XFrW}*RUR}QktfmWvM~G>YrCD* zLaV?fFSKjEUP11p_AO3-I0XwcQVi0%-nClH-?q6uQXU^#81fa zeETt`qzksg{zvG-!S}97TX1lGYc|RlNbc!Yz1y4eK-0TOREaqtYroU+4O`)tUj38} zfG`j$(azH8D;lCjqtuI--W3JvC;fYLmjE|9)wLjGaa4b9)Z?K`HjZ}IesQdXLGvvP zSbijQ`xV1|>r>u``1$HIaP=>b2{}$nUo>vnt%>WmSWEMx>9}e!Ie;AV?xH}UlgV)w zTy8k~ZxQHRly=Q&%*p zXU|6$W=4Foj)xnpgI~k|Ch=ygh<~OvgXT;)dopX(@fHz#fmzP=YXaR2%Y%0P@VEQr z@F6}^zfX}DShw?GWn!COT-!QF*I7YpYH*!hOIum$SP2wa9`k*B@*(>V`?P&u=NHvjIh zaC0>SN%vXOTN4bGq}76^1aieFwAg=$vNa8h%1ZtlDv;X*-|D)m-0mn{k`a3}OmrwXta>9Qa z5DKA46yN^CNd>0-KUgUK3==co!~X=BeD8_!t58|ASp%0R)aLZMrPP#irT9i0&qNa! zq=hwBLY>k+Z*9uKB|E82#$~7Ao6{tfgH!liS14cgPd6zar}Ghcfy7HV#4~|0y@B0}0Ttqk)6{$hQz}n%s zvIS|U;p7*o6=6E~S?+W(yMk*Pk1EhP?W|ra(OdcVN{vzVGV<^fa~@TOB~eHEtnLgt zO16s6=Iwd5H>5l|W#KB-T$_2ZXU*&eH})T`?hjw;bm;jf62*6`!)?QOrCakC zM&Fb>o42u=^=DyQB82Fs_Fv6+NX@*-DL;*p^WUeSQ5q54A4{!nd17bvpu)j8=ye!X zN;cHyk z5R~>gVr6ROEtydIWWSL_TNx!f>?RYeIGvDol_U)vdgS$8v`nO2TvrGe`WP7j~NwW(;4k<+`9bZ=KlU9{P*_z zZ(V3O`v!%uvalJZK+;`;+|tNMpC>-((28Bm+=A=OoH;69)(QV2;@r@BYqk5;r^^#0 z9-ro@7^U!TZlFx5c+q}wSh~5+_i6aDT|j6^l3R)kkshgKIK-nL_wg%RpNoxmVtGQ? zCFPDMM4=qpF0NZIY&08dic$&=&t6Bx54t?SX-SMUyBFar}em{Ea)=tK%O zjfk%^wc-h#>ykF+7C1iBHKs0$%*Wcvq6Hf9n}oem+A zbk_!3rv-B*ymem%k74mvp4hjPC7iSiK0QKY#mQQ2<>oaEPlb)EaylYX=i|1N9q z`w2(>{FLHcXXZ8T#l)Mn^tal5b}1+Y`1v+Qvu+c5n`>14k{38sm30tHn9mWWUS1ZD z>r(cXn&J!Yu*`|{*s_N{^;s7oJV=7Y6q8@Ws14^XUJ z-xR)lT{he$49Pq7*B4!1;$Wnk;2v3FsHOh}CEDA(K$0mYLJ&RPVp;VhKz|c6j(Q%~ zr-s23gYEF+9sDa~HBnJ*n8J0mK%@)$$#=V-iioq)wDWt0L!HcjGM2NfD3GJB{lAdV zrp7{%zUg<)=%0EdYs29lqo*s&`c@MsmMYU!W8}RxN>+@W*rXX@pq!r+jOB|QZ{33; zn`~PscProx(7{?PMK1%RB1S$k2jS|Zd!*Irgq)RO#bV9#sc5xwX+;?5rqU9VfuGB$ zF!f+3{F0WqhR;fsOIcymk0~|h(ni4(xd;Y}>0?GBlMdb*oV7n9cv zgegAa?aUwury;;Vr6@Ssn1$wEL|AOIy+7-G{2KSZ(%IL=f!B@Qj^U3&MkzV~nJWS| z_n-61oSiTTA@%p!*&n4~o2D9nrhD#8{|0$$X&-xq$rmkWr4B!44SP5x&c7s~E^8JF zjJovV_`2xmvq$#Z@B)z{4xKe^Pw`#M?Wn38&CO|V0=g}5EQ$=hHEtL1YM^CI4Ms$P z(Ziym^}dMcg`F3v)p1x!`{s^{N{rG3?{n!wguz7ancnQD7ldg;o-hu-qzs?tBO0#9 ztBTKvineuo9yVWfz{-671097!Ko~$7v{ad!pT)hr0I6e~ezE{2N2UX>I$1Xrasu@7 zgTVGq)Q`4O)`m;<`^IU({iS2?lS|znpXGgnY?8w{G*%?+wbF}17Au*77+#7xPq z%#i%gNx;+A!q(|5EEh>jV=d|T^AvD41<7whUnI>Wq$;V)m>W{>gf!~3`3mX}h9ss$ z?z83Il_c!)z%n`N;Y^*2NFY?Z0X1^ovhF$Hb9^1wmn^3$o$HyjS{~$}n+BZ^W51r_ zA!hdH301-**ET!maqirQ@V9c?f7aPa`C+u@dx`M72wmUD<}w+>HqYyZUE_)JP!miP zzbq!A1Nov`TQ#YvRd%`_p&@ASidRZY_65=59oBKc_snXb9jHY460EhuPI$BO@!=Gi z)!MGc10`=v7@G~(=2;=oIM|q20c#VEQJ&9#t|q$5E%qmBQ-ODDaz1+sN??rE=ob?V znJL=t$r6%Z12hsiqA}Us)(W^UE)Renn%-cX2229E9v*))JrUlp$XuuNp#={VUm;Mi z-=0KAj5KA4Z`r!FH`R41d2Y^O_Ooa2SP645fP*)MmuK~xatO&oEqCoouK(CTq3C-g z#Xmy(Umu$mh;=48%{H6KbB@id_nw9Txg>=U|%61CZ{cN(?=g^=pvKn$I)7xD}}&EGR>**$T=e&C^xF z3n@>SgwzFXhfXn~dHn$DER$r7d^X+_AjhYJ13nl?g_J}n-cVOZr#wCQV<_N%P<*7j zm~5SEDYx#O{j5|`L`|%c2~uz`}0i7-} z8BI1>ldPbUAS|K;ov`abc#0qw;YwE{379(1-Ao}G&}K*T%P54|fcJ5OUz7T#$_DD> z_)!MdCB0$g=y=)Ly7d{%4$Si*&w&j!qopsKUw1yUC9=(6QX!^y_E<@H$kdS9?Qlur z)b1(~8xevC1_BEd#qmJJ4cdMk_+(fc5#D+shp#o5>4woGg^(RS-)PEnB+oS+dIk8{sUyGMOCb-;8Hs@rGvU>rXr;yV`bSeTe7bVXp6a(jlHZ}TLQN>#? z9E-ys0Pm~8x4=645klT~nclxQ7XR4aek>3=m7x>G2~}L+O74J2+z=JSw9kQYG;HH02tG#pO#6+J7(LYoAXfjIS{iUpxn58({{XFT` zB%QjKilPdP3YF+O^^dJ8eILuZW%+{XPe>;Mq9R!$_t`vU?pV~H>n+sz=ocpnJqv_! z3IVFZ#B)TUu3=_aH3#9|^UI`R%C-sFM9WMND|fCkFs24YgAgJ~q{Cdo=L0=ueSiBaLg znJ>5EUz|!d(LNw(bb5C%@eS-R7Ag(CqrR|5?~5=la6{Ju*bE9=prKI z((yj!Ic;Dm3&G4Vx21iyX$kVqmi_c~D-fU+-xX#`1+rn(mxj$<*=JgJ-D79exCI-JzVGG!pm;gw1C{VBnRcV;W6YHRg7%4A>`=m_lrvkDi9&W=P%ZmNMC5^Ff>*aoQK~^Ij{A&WebF0S@%S& zIQ7{;733`l5w?Xf>tbo^@+yjymcqp1ay&c|=hb78fHE3r4+YAKU)-G%? z4V=riEz89CDG}}s%^G(1Qr?z?CT85Oym;N{q6qX&K;3W33a5S2qz8;1U5FZFyvZY&BWb~T%JYpc^nQ#7@Mquu&aCurb4u&DqhleHW#Ml zV>TW!e5X9+I=}2IAnb`!z~UQdJBjg`+;!TApl zoz+!tzr2Bhp2>p=;blm&-(^#I4nItJ6_dUA*sZ|tx!kruT#%+drgTsc@%VT37K4LL z8C85YhoNH2cVO0SZFX^SoExhY3DVzqgcjkVBzsBBe6ELhj-RG?sX>0PXe#4Q< zsl+nkj%jX`3f8XSrA<%338ccp;DyZQjgh$dG`w+>R?JAqYS@gedncO+WmxTJt4e2D z2xUj$6*5z>xMi3VJgN~cngVm;9FaeEX)e? zt*;5)ykC)mFt6D4He-JotZ{X@Qc`FEXu8{a1|7A-yEom z2~bc$1#7TmMNiZoHV&cW<$I}H8_4inRs#6KQuBSiVk=r2KfJs!f?*S~ykj+wD>vpa z9AUSPgD_^~9l<5Qeav!^wz~8IRcDJxc4kLa&-k0O!ItI4WtO5oJScrAm&`Dw9ttV| z*m-I6XYvF?!fC$^v48;s5bhx4@Ro?+S+$VK*4TP<4zsm()CmM8TB%!DDSenA_8XMl zWB_ZbUj3=8u|TfsoB!;|ypg61OyD1(vVC_e_Io6aKj7MWw1zl-zFVfY|G6;c)Y549 z4rr?i%4?z259AB0F1;EL1^(c8Vy%m|qxD+x`Bk2|UXtuhm>58-)F>?$2{>&4DphGJ z^PxxLXIND7Rcv+WSf)%pdPDNlh|2!(AkiT_<8~xiYY^>lC~`UJdD4hyewt&&FZvae zw5XX$Ua|^7#D8@@v*E#XzE#Gr0%H zDhQR)(Ju4G7G@v*^DHv&abcBh`P-89zXjLU)WWa{!`85xW}_$P5T1U3dN_-=%0PGm znk>7hz%AWC#~ubF-QBVHDdo8ik0rf!7cv%^$myOONC1iKg!8 zrgof}^Ik>Y)~f=Ue>Z*1t3*1l!`diX|3$Nd*UFA5VHggA^6A`gB|IpHXaESj?2#h= zu+Z$Cj7od!PXUruu+BU_u?eqg~u$9|wY*r08Oc|Et^J ze;L2az<62~kJ-R{@hrbcZO8h!`T1X3B4MdDFU67`5!{DBsl~>X{#e82apPKDmrd3~ z)H31{^DuL0tKi_kO{;gWP!*$Z)n##~5zk^Q=7)7mMSyN$o=^dZrKgC1CJEL))kJ5m z&FHtLTUsspbypTOQZv-UDVd9qT-$PO`_e~BV0xF(MU9?bN^IJ(q-}FNnWT#$pN%$M ztClD8Ddcd54)HdbuY;a;-xup|hRkzI6U2aB7`&5mB}+BiDvsBznHQU#2_ztlU0a^x z-B;IsdyG>80QX+n_it>Md>0^TD7n2#V)$)4=@0f$U1i`D43#gWRs^yCX38dwI@0HV;;thN^jbY>~Q<|h}vLZfT&bXuXmTXsbDq+R>?!mF1hxOJ^m;x{cXZ`gvFvsYsosmU^T!U|2@!-__lZi$L0+)n zoOoi2P74NN;06sRSZ??ccGQpKA`9zi&H8dr*++7&6t5WP)}yt`uRbT<&I&rn`RQDp zimPyEf31a+Xz1dQLH3=RYZxgaUvAho2UEXEfX)#pi07ozy1Fzu-@xV=T)0_tg|R;M zj$n4;qFy0q#U52?e>$j5hZm57{9S#A(ANvr3;Ldi*;U${16#){IOO%OGG=(l-ie%h zCvAK&F>TSRnzPbH8iv@KlrL8%if>QtYA`IS-DS1JOp+sU&8k9gDR*!t!4d%*1x%`M zV&WSvWl44sp0}HKdwI)HT68aOvPKF?3_kDk@}Y_uQb(0pYTLeFX%z zz*-R1GU7EIhcna7TC#3EHU}#Ee??W`kEz68BY?FNX%7oiK)%DwuF%&`Nv#XiJnp3r z14dR!#m~2{L{46(xoZVmPXh40O zVKB)PQ%U39tu@(v{2=s2*7noo8I^o7(M)DpJ7@=hM3C*Wn9=U1cn@Pu2E=r?T%w|R!r8@@`#l<>v1jo!>&AV6|#wQ*hSVvwQ zzN{wGIT$If+-b7XbgcqN-R+2=%S?N+TXOI1{akd6b_sox5~#WK`|qWU3o_}Obg@p* zE3YBSFQC?<<4R=SeQR#+!;|_#B@v5Xg*N_iv-x|lwy{rD;8dM@S&LUmtO@j&WeL!d zhRaIl8p%(MmM{vOCCoi|&U)BFt-&j~my zU&q&<-lusM?sbu%rs{ACIZXPYu_hsuOd#x z_?|u6qvqlWj6Q?b-$oR)8I_4_}#VX?IRAMj(U8O9{m;=OkCL|a%s|49~K7O3lddbf0GOG(~ zji!*!|4~lidmx*DunM0v?xcBsmV}GI?14FCP*HMnv0%Kzizay@$meN7bUT}`P?i!t z*Ydumb)~~WSfGn?bxGcaR%D@RkuT3ru|M6f(`uBz+|q&fqk|>oE~Z_>7dR_)jUL+F zk`nykad}UT_)IF7v3y##%*yh$K0fD#eP!1<&4L+FQWet$3vC;L*Un+pVrJOqkXNa7 z*GwuU?Sp1Qi*$r4$awf^iq7a4{(L+`Cx`hnT246o+P*WV0uJFNNR~5g;XVH1@34F< z(=k`09Pt`mS%)g8L_JJ-aXv>hOsBQX$)-)pp&}yL%sC_%ozdJ3Q9iR;xq}|CTCqiB z4=Bez4L8ph7>md>V#5=)-ehfK7S;qt^g&aC#TQm{+?SlYquM+l_U=%KTzt1)#au{! zEv;w0N_iEhUY5*74Ac3&cg8CO4H{Yw~jo2*mWPH$8XM81l7bT;kOG9ppm5;jW z0RJ|{UuRLzofn_0Zd7r~e|nhqu|_ZSR*(3OaWbtZtROYO`FoMfe3SuU49NI%oKFkb$iRh@Whnx^?_URG6TLbfwg!y z#bE&KKWO9lv+7g3GtG%8zku`bw}owS&$ zMm|!yJ8f*I&NRMB0{KMpojG?oN1Z8`*3tT-U87(bv*kNPnePVD-4x}QQGVQmi8nFZ zg!)1PYtwttaI^c&SdKAK;B2&$RTP7m<5cr4?F)*D>GJbaEk{8wgE#FZ-g@lA1}i$z z5GGQ$CM$R*3_(t3pg!>ud_Q+J{XxaZ<{aoU%q^ye-&`*CX*-pP>kqAdeLeVHkr`}U z_GWzg7!0afl`e-tpCnPoHLxN~$z-h><4s(>#7GGTc~5xB&O8}kCilcrzU*Q&&k@+O zo~Z2I$oTX#zA6ed5WXmPH|{#YhoPz(=La(hQOhbqPl6DGJuKEf-?MjsHB(d*)L}-$ zhg-uPFhuau1b^B(Oxk?ODVXvV)9cf9PTk(5cY@*KCT z3pbn4=LZ)B=6aMs4F(3WSxHL^=I1Woi4;t}*KOu}a*A zQeIF530gZkTf-oK@IifZW&qA+Sp5{}H-(j@&CmB#@X$iap~sJh9^Wm+BT*@dg_-B6 zn`s)Xt*X3NjGRV8f9saC(2!+jU&-TJaDS60NTd+_NzYrppar?6!*JAc zDlwvTH!PTZUVvMHN4)eKz7(Y;*+S?6Y^AHw!u#2?XX`Z#xBoBW0T?%&`yj(<0MQKB z)(d111_#wq4}*L)rm*)t;?`~4gNWL}CQA?38|(hHs0=>yd@&y(J^=z@8t$Wm7Kw=s zK$!C(G_v_k7+ZNj3p{85vsOZ-#XJrW5tasv!K9J<`2sNihjso&eaO{XHdZtV>nRD(RWvAQ!$ZVnU-zR0J!)$ zX+S>r>uBCOUW<|8-K?eiPSINn;Lvi`=m^%jc;nWbcGFaIlWBSa0vmxM+i>kQ=-WbH zDstkECc`Ywp@NG&Ry$7#j_Y}8I!`TM;?opm=0}`-^2^OTBdnUhzd5U|i7{*L2`^_v zr$}u`hgoafHm2O09y>Zh8WRHh7V5B*E@-S2>A89Sr$`Nqs3-o}6nzFXc8>Z0FhAo~ zOhly7R~xH9JKMpZqx^1nQ6U4w z-8F|ZPWy|9g2Og3^@$m5=au)Df?!`QWy}Osgbaza7}qr=D(nE75kaqjYpS3A+*if^$O?v6Z5(=U+%I7A^p15}!T z^7HQ9yIaZcVO9TswI>QF%)B!7;F-Ui&r?X&Qf)a40O!yjBq>e6z)?Fvhu)ff$*AgV zpiw0?Ha_xGloq1KXSo;~@UDzVmp&XGk)!@BY&l7Hus4`qR45`-Msk}ELZIcedz{vV3??$c*WzZL96 zd$B3R&b;n8Vh73(cR#&Q27=%zKtv!it-|@i zfUe4Dhz`I$W~bl*mW_B%=ardMx@I@yHS{75sMPYygkBH`J>n`c4(INmTm6Y`nGVwq zHsJWBSAU8MJ8&1XnXkN;ru(J_hz(7cm(@dhG--kv+MrG4_cuJed(_vmJ&9^|^OdpA zv;q@AMdEnyt>-i?oBTyv^EeSlWm_i ziP?=kLcoZ_9;)2jR~Es(r*OHJvS6SntX$ivR#$txNky-&bGZbeSVRN`#1n1DBS28B7?J5PxdZrbnS`qcXpG6ep<6zBbSdgE zawGS+t$quXx7r3MT&GhDOj=@YbNt|f@4r4e6g+Vg5%+}kJ%ydPeV|zi(Z0n2H13-B zk91Q_*!7n+;`X~*bZ2IP%HfL#2=79^%VEPIl+MKdoTPW?4XoFhJj@N)6?|E z>oyP}xAdvuklDiZH?{+buoAUh`g+Ma#aEU+touq3$p>4INuoX$tbEqAoX5 z??H5qWI4-GYPTPK zRDhbbJ;+TK+`k1m;G;+z2r@LA~^YD78t$`tP7mrar z$de)puaIe0a1j%3ZTt@O{p!}q#Spp_CfwR*`K_qXA7(se8arKG)~p783+9E#mN6B$ zqa$5O0_aH+h+>q>TECMq$Ra80>b?jACBA2XjqF{!$^Gv-dw;AU{1M9}!hTf>;larB z@5|M3if{?Gfqc&6_8^nbw4nF5EW;=$JoWQ1I&gzX8K7NCWde2ShX=FIb)}$Qr>2LH z#P85Tz}IHL0Ua}Xh)sl~AXb^ck;lpR6{wSr5)&;Pm^YRf^WT8JB~P^m!{u#JL^*8! zr*GzVKHqMbc9`VC1vX(TnTaX$awU?c^IUlD-wYF7@ZM`BZ}a;zbKe+`3^Y>vUdNq> z4b)L1x*=|d_gdovN9!X_dQsN7A7JA-uK_1~@1N_BGe(F0 zat{AcTjHbC{H`Ptg+R#aVqSP+~MWl<7cl-4S^D{DjZ zU-K(CAwQ=#`cK-C`>{Ro)KYM7Ey~kw1khG1!00pGmwv z6e9+d`Ja|x{%7zM(p>sI1j9!2@$gS{Y0p#Bu%LYDPrWA)N9AG%iGaru z^Az>Y^VnuHHk3Q++)Jx86V1xw=VR+@8ZMsMY2pl>O4{Lc=C1;l-Ay2q7C3JjTWQIY4D@7=kd z? zJB?6_#k}Z?(Up?w17s)K131stAgXta^DMtt8byI?LRtM(+k2kd-hO2qEme+ygtUql zstH(*{N9fHH^qy&$&Zz};?3^OxFSrixx{}r3+cZ?7XZ?0gVkDeg6$=aSIrQJ$F)?y zQk3@=6k#63o5}SzU@x(=tG8%bp3ik-{1N1JA#Sz7x1A+w=-L@N{A^zMmqNMbyIptt z%DkvNYC&kZOKtYhTA>%<6%CffmNy<{LZrGo(R`>dPTWK_uS(hP0OfX#PxoO)fbJcL z<`Te_Ft(X=zd6f%b}b3XGi$kFSm>{+XRGkgweQp9?|dgaBjy!IfKY-6SyMZT^||^#9e3L(wsYNghiI>Ta@a zU>$S@nO-UXqKOhAX`q5i?e&>S131^*q}?s?h%avRvNnO`$M0+*$7G&34mQ zfP$&IcBwrI7UaE@Gyx^KM_(tqeF9 z2cZhpDh$WyC<&cz&4Ee-zoze6iRCtuF=dzU5)_0|SyG*>FK%Vo=N#eUkz{^9Dk~I2 zmt`h&$@m73Jk^=6iBf`l;ji&4#IH={OFRX#jN4pqo#(UY={#4aGB3JnAlhhwM%-98 z^YZZ&^C^cf(;YzyPu)6iaf}I8YjNom+cC?%_;C>eN#WJGevZ5kWG~2nSTq_ft2CcI|El&J}L`mZjrSKVx!`Uof-KBx_xEs z-(s=c_9lUR79~x6GU?R=55BT3r|WmyJecV`1Wkx>e`s5BZDTIPL*$CD7G3sLvjgm| znJBT`tp24cj#g^~aFHK9u1)zs|27@0@)(g+IdkN--|1|b9FR-Ku&ZT#2t>!<@07)g zv=zB8WJSjEnKAcH>1yN7#d-%qBz$ zJKPUDuq_!*ypFwPu_|Q^t}<1e+NvgLZ)fnqbgb3K`W5F@tkY@lW8kKL5u~yKWozyZ z6GwXD{L8gV&NjC|=F1usrbfV6)PSDg4?j%0k`Pz3*INX^*|Fb7m&Z~t zs}0b?c6ky7K7d3tfFwCpBm1bX$SH_s4QB$L3C~Y@7RSX@%oe&xR~$m8X3^b&J4N?m zMpZ3X6ThT&FE!~YD}*F|{O zO0ItE%KnU7^et8QZL&AhHibZs8VXPIskN6FUBvk!QeXS*RUib|`u$@c{9iST82|hh zv41vEHuv32HMaDTE7G6Q^*Sin>_5lB!Ari~RA@aY4hBsTKqQI1sGeTo7Y2oCdd-X_ z&E9{aP5;lQAmyLco$_J-4ie)xYvDL#37l|1YoKUiZQAT zIr++nEHS6%O_M8HmHOxpiqXmzZ#rfjCH7KthF|u5+&p_MSi{kyUjylZX^|F&C`_0- zV&e0yF0ia#q|l%%YrJt`7@jr|W$7Xf^zYvpz$iBH7Ppc2(oMwM%2aJ_O>!Js%l{J| z{oxaQOH=bKqQgE&oO4=Q{c0WSjlZo`>00D4*QW)R3lWcX`Jcr}czM2hkE|m6@PQSn zaRjWPy?A~vSjj4|DO!Ggkc)u7yHnQsi)BR~a9)grA>^r#&nsr=*oiYv98s6 zW5^$s){ztjfns*xb_^Or4N#-OGYYEs7!^g~tDwsn8#B@iefhZy7pUJLZTbIdYxI8| zXyUv$`8+L&Pxfdt7bcpw>F(#3!7_2ZB2HGB(G^*d6rf`ZAYoG!_j?|j+e-z0IdpZa z&|+U0w3Ew6`fd`I`b5F4qZc~)F}T_B=?=M;2aSO_^a|3KKpt94g=>~Tmpp@ajU9vZ z|8)jWm~4(t1?<&MV#Qn~{W;4jYbl7zc4h>Fdx)u7wM+=7K=BFjiXMwRgN}L2x&Y$f z*RjS_w)(CG{)(3NRGquaCW?0h0pR&j{R5i-zkI?(j$ba_Hk%%7Jed(`)onWDkFkxA zIqqI;M||w*dvT=#i@m2dCXDVw3}_iub=Fe4h*i(s8Xv>G`&ee#m9?1%nATEchc%|6 zf=n2rqZp{KlG*r3F{eSG8}^JK>n9EB*u^a@**4@pLxM(9QYB~;1^}`3hreHQIIA4E zR%Nl-s3D4p6Z5IVK-|(yx z>`4A_#NI}tKu_>bHJ5)idsCmd^uS_7Qmq4QTuS4iuUi6gA_dM9fbaj+V(g!>-;vhl zcQ-dU?LVr>Fcc7}?dpECeJ`TpP7RtL5&T8Ugzbon zJjWlt#&3bUa{&gsattY}O4f*>b;a_hOTc;uX-+QKql!TKMMmeLheO#1HU5O||C>Pl z|8=wf_GzKUgoht0CCp?ad&?lZg4`gip3WS6HiU6V z^83_+b7U1Qr_Izl>2a2At3VB-BcNE&;fTf zK$X?q*c1_)l2S*qrYg*?LgPWB_{Vj3hc5P1d6f4#bSS{gir=jBX`+;W6IQR>C$}oD zprD`*^X~i-;FV1tYyD^-D+m<7UoYL9tt=?K&q}KcoC3rkQBb(1ND?%H)4zRfSfpOO z*-ylae<>yHKc`u@8@IIGuouxQ+O&N9dd_XFmz;Zg#0h3Gm-b0S_<-5fAKJAVM$B&pj}` z5*&0d+}QnH-Uawk#%od%;rZ-)!Y7if1P4?z7aNDq6+ zmu)RMDyzQUo)?Zq-;0M@Y5vp?d7QmH{mOAx{RCWP2=EQR<%(1m!7SDezQo5e$623T zo>4FInio(}e?_)lI> zN#UxkCAr4NMsDD+cIU$MOwjtmF+GPchpEq}jatE}4I1rq-|b2C54XI3JC2@h03Qgp zu51mZ_qpR|aEQrYxLTHK7>HhQmH7W$BQYq5u=+sl`G%okA|h?F?XzGHLGedi0$ws0 zg5ys%2hstq4SIR~K5hQ{^?Vf#WDMhh>=(lJX-Ey!o_vi4_JM;O<_Gkkn`6bWDTGhP zyD17-i@(M^qWmE?%FPfs>!=`1B zOmf;T;ag((d6{?$f9#R|t8;j}sn{$CR6VSFJxy zo0G40Io<8@XkN9r^<~(U#ax|6h1ve{(b6j&uKu_~Kt0 zRGX3t5@D9@ephC$;gE<`U&&Cf^<<(yBTq5n>b#+HMk{WPp!Z`@=wGKY{^8(;@bmsG z$4Kd z(JfU5g|w)3!Gr_b>i7Vf8~}m(qg?tHY$bj(d_C}9g-W{r6AKZ4hCCsLl{#<4^Z~pw z!5dpmPTzU>(NM4ND9B#$s2Di9g8tHg%VbG5&Yh!tBP7J943-{iV6KQQ++H+3`=5-u!#CTW;vaKh5eMPu zi%49sTW%5j2&=fh=c(gQ@S0o)$WZGV&iw0}Tz7nzMokeodHe(i^he)mqOzuDl=B5o zeV_u!Vxf=2RJ!q!1-;0`wRE5sx~3<1mTd!8|LMq^ z>LEkK?lZ=>f;~$Uuu=}`(u0}CzkNyK&s5LA4r7LhPN*3X7qxet$gKRNQ{S(;u_wGX zJ{l-L%-&lFIqDgrAApl-dsXB`Chf*jZZ*zzM+7Uz-ngZmWcS5G zqbZa?yzVK!J~>!UcKSdr@+599T=Vc^5frzL+ai?VmB)W$&b(#hkqXZQZ#R>9wZMpi zJa9UVg)wM#i+Jl?jB_~t_*fBU_T!sJlZ{K&oKF)DY?N-bz9i=}`s;2=os{wtn7U`z zmyvvTYnR;kH{J(Lfok1ap;F>Htczmj^(#2m4fh0`9{h&U^D5U7kk2rZpx*eFLMR}H z>>nT2F!|s`wKYxi-~chP69yys4IeutDmc%_SVgj{eQ!9O&^4JzBl>pZ{y~GPFGXqs zpRD7YKdHBRGA#9soTD;sF%oP%;#DC-DxCMy4wb4RNcfVkk*ar)6LdFkDs6lZo*l0n z<(Qpa4=ym?pEr8C>f6^hw*)?!8S1a~hQKG}DGGDPk4bKHRrZfbY;O7EHOG3HW*jTT z(@FDn+$$3e*5TRM3)t5UpSyDfoAd{?9ews<35HopVtCbsh$ae9L58dU_v=p~Rb=*@ z37ffr%C&FtFma&5wmhjO;q#yhr~4$nSVs#ca&6zOD)H(_v4XhMfQ1=LasLB)#y3I6 zZ9b(u-<#5S@Jj?rdR6vx))zgSJB4Qn0(O0S8e}8(XS>gZri!f=Iqnluj88u_-Q*SGRN3@-Qom=u%G_n`{keVT@U1Xm@1TR5q7i-IX!pz zWO9jmW6s*^lEz#~EvYal%JSIx7m>~<2m3_t6*BsZc2|Uu^{xqT7F&+@gfzel|3uso znuyl5sd!yJENVJv__#Ai%f+CfH7>ilwvY_p2KOF>QKNZ^+(zrT(+@F9mGeKdt@L6| zH*gBss7(AW1h0F^DY1b(<2>D&o&4IAH@BTpDwaQ%t$4tdzTqYXn0V~kd*CUs1kO4~ zW-fu&j6N9a{{0q-I-*sAtP9?|wqWTo=&`m3Vov#d6$dn8w1(jFbJu01?V7Lo8sPWV z%s3baTTciO&}GYUG^Eq>o(s29SK`(ro>fZC=I zL=Mi2$#a3UIU!_piKMz$V%^*5Z2i|J_X8eiTTVAJg@_+%lZpJ?ZLZ?lL`w{si9BjF zD}W`Q9j2JC*2oM^oUd->K0JCpZ?r$7_42%?gMXjMy!vv?z?yMaFE4n2zQkQH_^K(Fj{tw(g`Gcx zI0CbO_jc=ju;hYP0ZWMhFH+m6!qe8}xAB0c%GZ3y)(9-ghau{IfN%Nap-^vY3ScjO zx+6VwgBFFNjt6Q$BOst46R@hea0G=C(p)K>TpIs44sKA?RXb5AijRK?uz}bEc8CMD zAO+mWpFJqROk?M_TQONa6#%{bnWf!g)`rc^Rjl@!*Jz+nL$?um+2#ZZgVk@tqMqIs zFqZt;Pg+qSYTBL>NU6*(5AAanr{=3u@Su^p5?X9Mjq1Ha9LC1RYz})+-pxR+89%-c zW%J8aXAa=Z6p`@xX;-AOg#31*?uJx(;szh>lzec?W)Iij;eh_5g+gqEQypi!K|B(Q zBOdsCQZXf~3$--q-E9&&K>V_XkZN-t_1}Lz1F?(>@1JSa8Q76!5omPfk>>neCyWU{ zynVQEo}jS2DgKJ zN8yNb8v>!BOc8(%#9#SYczF2j{r|5&0RzawoJW-O49ySWre*dFqTdJc~`p}3@ut>}E`@Lx#{Defx z^cQxI5tZe)NKaDWyg2edzWJ-4Po$C1Zn4ID=`$CZ;{?L8NG{!4_Zlgf+z(n9-UBoY zP@KrR#UzuN2bR6B!{9{j(fR$`uh-NE(~mzorvDTk5E3G{f317M`0(dK&V|piIdqvI zzKK4P4REYJyl{n$iCAl&+|^|O#Xm-9=$`|s_Kki;YkNp>d;k1A6D_gEQ=b#TXpqBc zz~$b8+V3C9d;ZwZX1^gHBz5(b+Owk3=p&^wgR_92R=TY{hYX<;M_!x5XlFyOfpUsGpmR9ruI=<^>usSaOda}>HH3IP?EIIo7=N#Gu z3DTbnnZ) zXu!aNECi)u0#r3O>$l4-Gh9z1*w;Y-A_+THG}HJ z{jQQC{YqCTu@S{8Wbd*VQbn+~#QM;#Qm~gE2h-AhRr`>?=D+J_GbU<*iYf)_s|%)` z8Mj6j)6!PHed+kE6ZywcgJ@9ZfO`olH5vS>)dKG8HV9I{Zr9S%!ar%f6D0|2xHJ;w z3-9))LW$9yrN-d`rr%Id!e1V``P1OEVA33QdbA-l63UiFV6JZO zXB<9!c(=|@l&UA3Wl(>FWs%Z!zg)uPE5q8mo2%bV=z}R-*XH74MzUHp9Ip&RKV^=k zKnW>-L9D-F!=6IElp1zUN_iTOpoSLO24k^Wh4mGFxXD~d0g>6*vKul_j-L({T?!Z* zA74yVLq#0^+2fBI+V9G&o~{_DtdjpOQ)?KccJKgX#$Za}Y<&JqtIuCQ1N{Y3o3Kr= zTf^#3s_kgKT_bB`)Pm!9Q$)@A9Vm7*D=RCH!_-aL1Q7J+;NaN*=Fpd7>jY8Q7tNf^ zB%~{zeIaCf=hYt6+c1B{l5QGVN_277xWua)p%{W8YRl>n{HQPGPIZXTvyZ-CMDa?$U!CVOfqR`(RdQtr$SH zYbQ}fX`o#e3#}2A6(!2%ZW6kr3Z@81Wje$$7(5`;$}95yb;qIBN%NSYQ%`7`vTyk? zhT;iO%r^Bq4W5Tl=K=K{s0>kyfeJ0Z11IhxxYE+>`gi9)w?qIargidfj)JKP6-}Td%;MCj{-(@lN=|Jt_lw)E$9)RKx z^dv0yrCYm?Q&GCEOr6eF&)ugEy=B4YC`lz7B*qv%->W_Ns6H2w@-GdJU#DZ#;h)KZ zEr46;#eV4behs=}jI*{*Z@qT_bT@OGAB7^fP3OR3g9xm#j}9QW15}u5)G3l027w3I z?EZ_S^NUWSF$7Sw*nzScyLt2GDcFXHuyM^6FGfz)IPh(sLnS-uZIr5yt8lSr)5bGe4oN+)hC1((I+1WEhmWbxnIbR#48cC{i#pN-vh66==6TBQ%4dw(R1M6yh z=-ZFbW{$8P02#04na^Wp99*44r90pL>hF2>Eijis&P|bfF_V!VyvH|kF}fhks+SGN zDbrNjHAif@nYYCf3F@@fFcR%@_x4uoZy&WF$QEem+CD-isWn+B6iHcGIXtG1j&ZVc zxYplma#7XDi>lvUA)VEcya4utSI0JwO*nD9L>4Hc*rvqmt*J51?{ zQg9)*Y=&K{{DI{wR+QxUHHpKLwVgJ1f*5J=>lwG6Gjdtpcfe;G5SthT9XFBL5alB< zhlmcOy|S`fyMMJsaPZmS)`Vc@sGYNfYj}=~uInWf%AjquZ>Yw%#rE}zTvM zq4tX&+lN{>3FS(I`%_5>hAsA8u*0+r&Wd1Yt;2%MUp8(&l)F$m=6z7zpn8^qYRVBc z^z*st6!!l0RCE#aXQv#MbK>3k4d3my*K=7(!PJ%Ef{dsiZ8k5J}v^$U5o--ObHWk$a z3KelO&Zi&761&7RZlHB$Q)-reJN@m%y%)BUV_6N&KvZ1Zyn*tzzAU9K<~d{T;qeJZ zjXnDk%x9KWh^V(!9-B8dXl&bX^K_+EP&U;(Moce@p=}t&%ifPFyJ??7s zhFh>7EA|wkNc0D_(VU9`YWO#ZI}s$(JxI#7S{dsC$E`N`x^|0O4QWV zk6J{(;B#&P&6N0R93Q%P6?QO@c&@woeTrDIwe%5`8)7?&8Lko{#EW8Sdr$$FM6_&b zeD<_w+)D1WRX|f)4O9sCj0}A?%4W3wNPz`f$Z1BD_nDm@SYf-d51lEJVrj5xJPISk zq_Eip^42|C%|{K*3;dOyw?7sbfAoP2drCSqu9tG!f9K4sb=)UyP0i!FI^2XEyCH_& zIGKQt%5HT*)Ij3XcRIrYF=Z?R}jYofJXoD&a+(HBfkV zP|>byG|!LHYrU2Uso%CPOYn`q`D%qhGM!ycUPC<~Av$*U_A0Hiwd)3XyHFY5?vK&= zkpR`c$gO^L$`P3>!P1$M(pBuRZ6Or6TLXtsr-JjUN!kc;ywiM(Rqpw2oV%c_JEM@z z%Z-zg8z1mFg`dK4`flf;5OoJz)}QPCplsN?6G?KE)Kx*?R0Hyxm=;;>EE#n|dVG%K z`y(13z_JE;U+jW(w?MEn+k~KYJi!nBmih#bG?X^yIMj14GBMwTu|Luw(6;sm`RFi- z4ioRoXPf(@Kg2ufkKk8enhg1s^s-gh@z5$#^W@zP5l44Xei>fW2o1d#?~=B$a1;*E z!}fM%WY*$N+L)}-V3#?A0}w^(lhEjsUCbK9rznpQq{P?@r`vlkC>Oisxp;noasrWz z@inKWjvLFWrajA4I%BUMtP62%+fnoY@$haUOD)tB?e2Do;ynm=X`<$`SD?F4LeJHo z87QFG&mcr{jIQI1`Gzc(PFK&dORd0eyy>?vRT8~LSp9CEgp<0QyqkE+XQ@GtX#YnGeeI~LE~4+!l7I041z&HBh@%LNW=59;6AV17;zi2+*<(3``8Q(VRXm>b$U{K_^%je>|tvaK~FF zkcE8JM)7FR{-f7{b)EtzZ6fmc9}B@z6pxV=wNKz&%e!;Smqz>-s|aw7`}=U>UwJbLHhZEPapd&|37_-4 zQf_RP5lu>VU4tj4Sa1^LjaE3W2c3|o<_wH+f~AnJGICK&0?!K%3>7C{puMFvCI5oc3l$q6iuQw zFp+6ENVrXBxFD&wl4T~MvS(*0w#e|!sNQT>u< zK^5R(Y`yFfS~pt@tp_SLE|VEmB<`lZdUdg6H)`mZD`#hh>ToREPB+nl9c)N2^~Ls@&_={`{Av(KPUDm)d$eb zn8QIEi(HUCuQr%m4Izk?i;JtLJw);HRA(jzj1md2*yv%t5o;4Z3o|1tMa?cF!{fTs z2~ge$OwygCcGd6Lb4>TCwn1oR&@nD*>>i{K7@dnW@HCB=^9y^K}?+pbX=EAq)~=KMtDj|j>QHDs<^*eiT=fM;D6Cw z`*Zw-y0#DODWiGbzxXB15B+d@a{)m*w!;)#8;roFwTA;=yDCR23c7Er`J+&t&MWTB zaRd!rT}}Xrl|*qDc+iXYe6Wj?667!P)%&D67osv6N~^cVxoVzAC!chpaeJzkw%BPv~2Mp!&1}NTS{!)I!r;brnK& z@wK@C#ovn$nz7gM_?}lx58Hi^WF+0&Xk=UxNZN&*$Y1&&VKG@=WVGmQmVG@P(PLygUi zg3MGWdi)WMGBC0uVPZ1;(o-0`2>$*k9~Hvlu+K*ZJKSh69C(}HSs{aUad9yz_u3j| zYEP7+SjI27#`!HwC5Oh$?m&6oUy;UDlZo>n17F-|?YT@Z3~k_cjAPk`!{%UY+sXay z_UEA)m=tj5OCC?oa@}bT!Io7*LZXLHd{4y~zH@-S4}FkM@TF!Dy69+e zpq7*P3UJ5u2P+$?ifk7BW%wOo+2XB$n7FXpDTL~P7{zj0tW`Kb*U^>$S3j0JrI8@68wuhb)KLp`;?>CXc-v3svA^FyDy1P)ekKP{PxdVZ~u1ifLnj&@qa~PVD`&FJ7`(WBWAzLbbGZVTx48muq zDMBR8e&(_By_3ZbQ|7}%cU6vx|0NCGpzpr;o?`beA}248s*8O&fEx0bF;Wq;GJTST zQGG|#sDT$si^NBzHelu1vZuJer$*|B1NxZiL$Ybvv1{*jQ&ZDd<2A{rv5Kq**77g= z?p=a8F$GL08KS2aS3&KCd(vYj|5j+Rt9S-2YgWEY4Y z%WWG){qQpSpTyZc-e>-Q39x*7`_a72{3zfU{FvPxXEfq^lhB_SIm^1ClVv%t{rX~q zlAct$SuP^lxf9N+F;rB@YF$wLW@dx7m`wbGUehHoWTt?^>22|^igse7Pk5Qbva77P z$ftJ_iU2)9q}FZBQQpqUm^4Ria~PgA=UaS!dAYlUUph1pVKqQECEzH3z~JMo{U`9% zp@3&e7*=yXxZfK%SyyBbnnRXqX1MF;&pnR6(@FhF?Ilp1nNWF*2vXr*$vR5kP@f|Ju=P5)rOB4-+TA%=-ytn~-Ao^U{lG z3BU*OHD{Lz4TN$G>+r2-5#n%yk-k&EBQl_eIaLIrMR@;>#g{uW90N%}&RYar z^>Y{wQs^)+0LoFl;cx;T+r-2~lRuN^ab0sT)%l6aiJ=i$Z-*tO3s9Dco;uh;Tf=AN z@h!hWeti$zour17aqoIlWHBF9yI`oLJJB3{1{tbv4byl%g=-u2y@{z7{OCN11drL* zkq#^32Cmbr!my^V^3cp#v|^=QZXD*Wg6LLtX<6C8CntDikj8seZB0k%6LD*~)fcK# z$uqte^va@W(4UQR7|W4n{~S9zGAQ?+zmkBdTCAsUhrHcsZ_Mvr)t9cs#8$l1``FYx ztHj0_E$!Isf+$fhrD`t@w?PraNIU22)r*ZMOKQw&_tLZ{g02BRdW--fi8j3~cjf z0BwG(a9JM1;|v7R>7E9O(}Sc{L0X4t%DV*d3ep!?f`G3iss4*p2TU{56?<@_;ffkW z;=C6ZOj;=zXq>+F7^daLgAsM+a-hQ|gv~}+nOVD??JI@z07|h6oLB9vy?G+CtM&BO zd}~FZB+a`=C$yiigG?v_`D|v#L%9R$#a_V9@QK)g8VO=-0A|phR{m2fk~LY3Kwv|dS-@x zL4(Dx0clsZ!}++eGB`lcEN47HUeq#_=V>GAh>`9RY=G~@F^UrQo1GNI(ukCWkc0tY z+a{m?L}Pj?c8XGy3>PZ}xVt8gv*O)T(Wx_T-vH|fP|MR7TTX08P{dzzH;FQ@>r)$CXXZbtPb+xE)4Q2}UBISg}~uhi&$V`&~@ zZ^&?jv313qP_Y2DaRjWeb=Rr-2QeD)>wl)i4Rfo{D}`p#^+JDf#nd#DQ@^68LOpNl zcotL7Qu!cJQQvdJcsY?1yS)Z)_wjt@X4__W^{jvX)M06qcL^vl;%ik`ie8JOJ11Wt ztE72Eyz?s5NRS3a375J7Pf>!>!TBwVhM2(YvK9^~7a)TsuEtRfHkBpHhL?@pg|$#MMFiR)A@%minhCNv`kIH)ved@w;=D{N{^h z1~hUL)IASq;oL}Lj~pnvh9 z{vgJXj4p&)oOMyOB2Cz8CIKl;Z7fZu7PX{)SHy-I^P>g8@7 z>pdJhQ0~=Ji)JA1Fd^FLd(|2a1>JOid9iT(G&iR^wKl0VgQV9r%Rg5QO|=4_P07^q z1SKt7l+k@>dCR^MZk4Zll?4mHkVc^d{R zYHp)7=PDWUQPq68)&=nnyqFqVs&=5ZK2L2(>_5tRJ?>aaLm7~!7#(b0He4XS^$1RR zH<~WqdP&ux5|;no%iB?fcEAMJ6}%Q)Qll>(5}uNBSLu$NGZBrXJC-R7{R-B}TU2~#+53OPrOGCbEGd)jQn*Ihj6?jVbn+49C3yB@h=Xnic%t*6Vt>X;fWDB?8R%LqDT5>HO{J;=uO2i%{ z9ieJ<#n>)MCZPoJIjT(83R})B)TZ5O$$7o3<^P$F_34V=_j^$y@y1Lmyc5Os0 z)Y0Ou`%}~3#m_wU^+E{=tv(-kY0G`6mF%@j zx^$z-tbfy!?7k*3f2rOjVOi8IFLkIFO?9MEc#2Pmv@9TSiy?Qh3 z)UCj9m|@;?6kE=jcle~;rQn}6BdLL(=dBmZng%ZIfH_Uxi98q9;|aW@S7Z66Hfa>z z-_TG{FOq#2BrqN4WECYB*~u1|(wQKr6f=wAAZ07Kiq)*YIFT2dycAujlO9X{GFjcp zwny*D34gLs^Rx5zEq7aDY}`LR!3~lg)O6;!)asU}3COafl1d(Q>c(nz>(V6&H0nzg zMt?Qh7xO(fJ(}FM>6NLluZ)!Z(JQLWygusC`@C%U|NEGku5weW0oA0NGBV6Qhjg#9 zD6764`D`4nqxEq$Rt_U)*I#05bjtKG-dg{Q>c@Ms3o!tXoe*_?g*Z$$#tP=?+0zUx zyeG0`%zZ6x72|eLy^>;=op<+b;q})<*boU{svh+G%V_+S)Rf%wh9GqBM zC45sfN9glZvlJ=gWgX|-HrOoK~m52sY@{bCWj@&$*DcO5M!X*Db(J5OHCC*2m5~9di6elGv!_$Z4Wd{V2rt?D8{)EMdyI z39Scd14HYiAM;Z&iTBlihV8i?bxKaPtY_al>O&KoPr3t)Vz_oP+GrgbxKWnkr7h}{ zB0Y5)zlPnHVt66qnDssAkiSO`OBMHCn)I>*l#mIZpS7B2FmE|6LTa)sU?X*%)Vwv08pyCT5L&bPeQ@9@Y>O)R~9or=n ziZf+A1CHO;W$kS?_ZGU`6fUf)^@^p6BYTyR9Ep9?WFvtmIi@qA=66YKLK{ z7oJe9LmZ?eiC1!wik;^>o~Nl!C%{(nn}G_#jEEk&jI=C)i%GgF7~?ogP2yNRH!$6P zWcq5X!%$?ANr!)GGa-bWw|msyq(Z>Sqr?4@b-+f#JTZMkB9@duSwFc_!J{18X=tXS zlG7uzyjGp9uyo8)Ia4{dxmAKmIW|R3W8a0ZY?j0}iy4Dvi<_~BOWrtmedLT4?AZ!c zC-X|ZmL(^5k*_bTO?wa`p zH^;`uH?pxB(ZL@3$=M17FEre{z-%$uuI1Gawn{0Vz=HXJi@yD6^m1PQ?3$dx19jk##l*4N* zXI@RT_^Arm*^Z$_j<)o65k<%?t0d6UE1uirh8|P zj?A-mhwGNv48J=zaz1~4aI;HM=0YPxOGoL>Bo_zF7eelIy6pGv)uH@O7i?_4DE2J!1 zm^YF92_Dmt-m9@InY}5_xXv(QOA4it{QPhw?UR;imoEaDX0jbdHle!*m0E)YOvg&X z$d9Wj8z!1v+t{cWgu3M8vIJx!vCs6#W|%Yx;B;a`2Kkzn;nis&GuCX1>mOE{P?m?j z-wDiOZQhg`P+B0aRNTkNcN^VrBFxL#Gui-Uf`FhZ_|Er)zS%P&@9G%CwQR*pPJsax2kH_K5SO|Y?Fj`-H>1oU>O;F>DSVd>)k1FZKmomaX+Ue; zCC}EYOB_dD%VzYyHpOf@w6D|49#3Y^QCb>_@XDTf8BoEw!t1s&^=psPv=OZ>S0anX zp`}co>kKJeO(x{A`ge8Ju&)oJuihRZ1#-mva73-fag}Smp<_0_FmK9aVrS-h4>0QY zhi9v3W{l3d^^KxS52atwEx8T!B)WjaC$%&TLPK-ck2k6TKW{&S&3LtcD`+`(z-q|b z{%F3c1T)`e?t{x4mo5QJ$kFsTAVu)bAqo`w&(E^F8FD`NT z<&kjRL7-IAGX-Mm{n(A*~P z#GI{Y-;i*adg>-9n#fkt{yG0`14lZ?LLv5mJ8_n*q!cek#^KAcLOA>7Edv}=E7>{o zSeesztGR(HbJ7;Eb@#k%>+F|~dlMVW=!sAXhdl-`V#y;Iq%0(;k@$H`d`_Tjc5vS1ypfBE7qMYbofQZpo|uv*Uty^53+ibwC6xD<|D} z>k2!!-zU@sEJ4ocY!0mcl_Z&ElNm2`e*#?2oKehmZ0^PYJi+g-m((hxrS$~_-U5Y4 z{1rN&C3pC?5xYOt*r&L!YcdJc2~TTUWahY@|lj zlB6lW-oEPdNL=FS?W9t`2PWWf&guOs%fVS;F3HKvd}2pBx?oyWT6)abXm|l?6OqH$ zk^f2Z@e@!D3JAD+K-I-i>;=E2z#ZE~UUN&;=AGtNpo8(IKj-SB%UPLz<`8S&jHoZH zEr_zS{x%9cEV{3culv>1laVT>Qj$gak?&(|)wX>hK`iK}0ZklT1uk4Ac1MrVn|zU! z+w<<`#nmKc1%y2X&$r+Ds>i>tgDXmRepl60*LIJ4Vr%`NF{m* zX{IxYyE_kURJ2pDYI(EkGc&gZ>`I^VZNIm|{j5hlN0Yk{B+}f#MxA6zbJYa^pYLL@ zCs0^TLqXIzROS38G_j)HpS4Zk@XFN5tC^i!x{u)y2*IMj#v+)xomn}EK%JgaUN47c*JG_U{uYu>0$F2Ykv!V}2 zdbi!aD=f#Q7Y@W<9o2;GGNJVT$Jcv-4Rh_Wh#xaEbLJy~KuX$Qe+`Qaek`1|#EQHJ z%kThr+zfzg>;M z0oPX&=?=I(ekWY#-}*G;y?D|PRS}D>2W(`%xpJD`HuAiV3Gj?91-}ql3P>bSxru?v zV4E1XDZ_!chFeg<+E@SjtINi9XXeI;Dnpy4bHuB6PT5A(p$?HRRh&V0o_ncj=iLJDNO+Ft@P5JW9FrXa$=-S#EjjB62p$VXS z;D=1-%cZ1QMh){O;NV*>O|;M-cj_2qQEW%Eq%HDB&ifOB7?Obe5e@4JkNEsNm&O8f z$7Zr?3G;UtT5D8$1=*P?Z~0{ZW6P*s6;Hm3B&3tGR zt6ymboOt84gTGRIx%sgkhm<;NBMq9X$1MjczNiP{>FrAcaDYWyz3hDxaGh_dG2PQE zHPjmJ9}jTK3O%lkuo4?!JYJ^sXdZ5YfJp(m(~Cm(Hk{}OP5}YZ34kZ;30Lwpx*vFm z)gpWrxPz8^aLB@g?pUC$sQpe=$uYMrr;FEYCYnz;MFw22uZfEBWbwbUv3~vCIgjdx zyC`CS*wm9k2Cu)bE{b%KrbqEi$q~WE=;D%;-qSiX$t#$jr*7NLC__ zopVxTMMh+k5g9qrky(+MO~WSh;P<-ve1DI}@ALiLf8CGAeOJ1!>-`$f^)8hWkB#bj z4|q!D3!%xiDP2+m+E|`3WvXp-3A6`~?{PoF-KIspPPn^`zf(mbhPojt)Z*vljmzb* z@IcAj0RB6D1K)=<7GQs*^QXWbL%XEP`Sl^s zt+q2=!41*uZ_sVAe&!zn!5FeoNjGy|x_h>SBBHB|E+Vv8UFb9nc1&@ca%Bl-1G&E# zE7Xs3lkP#q3%;*r_+6b|4f}-eS7WPgeMtUIIM8!Y*u0~)8ospZnqxiBz^b?_M>37x z;8iW_hxDMi^4(S6TNMuR$fsrVva6NDCR-!n5Jhr#96Zik8J~re+CQVf3DNm+Ujdb; zW-N!yq4ed_*gb2g1j?6~9yjNyd-Ba}lX=sY0lFGM8#8}mjJHId(;19Aw{BBH6r(d_ zp3MEA8QI<0nCEU1s9tH857@5uQT?wEP36n6;2rrL6ZrR_`e-dqVj zGy@3L!onw@3_cr)EwO)8zo3r&@CZH~ZgES{jCj*AlqTE`0o&J5M@0sAj$408lWXkX z5CJFU>ZHG`g~P^3c@xdqy_gmBbCQFS5;;`6zAIBC_>JKt5aBcC)1^s%pF->g z@jxwdI=_OwOcRFX!+yP!t-3`rUNhCTyviTv0SG@zduP11pRPFcJUvMrhXo?MVpoof z4mz$Y=jHn_{y;ifS3B4-ZQv=NI*XsRIcMXOZxXIYnOrJ^Es7orzzYSK&M3mq=E`_S zRplizvjECAM`^idAjbb3KL1&GWB_>2AL626k74Y}^kDd}FASOwS_upxeMQF4&*`@y z`q}q{Ixxm1!QB9@SWnx+&V6y@xoV8>ULK?CIVSC*ip&B zUjkC84Q+@1fQ;vxyb2j3-|NTj!&u>okk+)Wd zXiN%xeC7vxUDaurvBn=GnWG3(zc1bee%0?>{+AD<7rp092z=L0?P%`R^C*Y)XC0=G zH&H_YB>%o0=AscdB{j0yySsU$7gBFx(&y9!Zh9L*B#vWVq8s(aj5Q#1?8uwT!5rLN*CGPoph^II zms~69VQ?qLf|O95BIoCwJe99eghEE(&oBZzPa6e&p-x@-{9LrwM_JN+tno^c=YI6g z#$xUNK7V>~I`yk^=d z(bakfXJCB$HX}k$x`6YENB?D1z%E#eGAt9`s~h~1hwvlh`E19(>@By~VV~^x+1mb7 zOtD?TbZ&fC{iJf<{ab}MrTiMHb_cBOJj#~xK~St9DL6W>r(LK);}xd@_dfHw9=S`;S6Kr;2~2MwLg`arq;kc@7cGRNQ2 z3cYNN_3o!rmjkxH@z>zL{hoE$s`cml4pT_2tSVYk4?3bP%uSUSs?*O}=}iu~HQZa| zkGtIu(y}8AW;fR0rpj_ZXKQOeC{xSb zUwSEL_OVr0kS!l}G`6FZMX<-k5yk+6P}*?aybo%SRp&YCG#0+9*C2D4fj+*_NsOP; z7)_8bSp2%8F);_5=e_=Pb^aXW>w(%gfqwnv6U_6XQ(?Ek$xwOl75&8O1!slbHNyY* zMDCRgJpKC<4jTpDbob`lQGaA;_Sz%+ZbWpujMncyMjO|^0w>rEd{PTV-Pbc-x{cO* zFqrWczUD^ZlgPl=ALK2S~;0(JW3{DsEDO$?WV=TtGXU^I!#O zg!vL}mUZViI2tNePh74-)QdoxE&!18JSx^(d`R0M*#(MqpP~>dDG^GOGo%|*q4PX@&Yp)Q_kA*k-vu1e zXNESrapDyd~$M z$_^v@Wo%KPq!IXRK4?>6px|gF+MZ8~qPc}&TZ@r6b!%8cZ$~l<*-c(zp*)r`XTv~` zJ+Y>^u{i4XBVgl}q&4yQ>gKY|hW2mYrLo{&y>Fh+Pj1VfFJAKtA}_1?7(1woz@;Lu zM?RChFv6BFMZi)Mzoz=(Ct#YIh~{E*d2(J+H!tRJ9x!v8N_e5kWp{!Z_QU+w+_>|n zKYHDt38iElu7zjM6(E1>3oQJXp?)~Ruv{Kw z3V7YT{~g#3+mVf>7KLX!*JSwN>TEqg&%r8&=D^N4NIUnBE!?Ni{Q);!{?HCxVhj56 z0U3<}#{RE=&Z${9!Sp)ca#lLAj1+m#CI4+_^2IKL!kfK&$)n}o9f}?CM*S#~R5f1c z&$$0AFrN0EDKe7_8mHOcfdPv?ciR$K5*Afe&>m>TC%9=Fxng8J6Qqm$bWM?K4ZJQn z;JV<48??Pc_J8)gayfM64qPE0`wL2qC0%Y@@Z2QQAa+gh&2k722$&_++^^`0$PApi zMo%%g@Sh_vT1*CqrmA9&?!=AiB8vvsjS9(Qbg{^yOD9tk+28|pNLoB9R9wC*Lx?;I z=i)kYE~Je-PCivQ@TB^!@2Z`T+Eii{Ixijb+veQp^>=q2{hnByMX-73=XSI1&=5>F zOzBAlPsar5VJ?kHeVC!F9YPdhJOjoQDJV&m*b>c5v-b~re|*l0(Q%!&UTQjW?Pf|5 z?|(f}R1#ydMZmOCU+M1EUX*HNBCG))>vg2gH}rLU)La-mhrVU+8b%bo{$&TohP6KF18;$Yoy&!Vcgl0Yo4Ixh_1=GV%$dR_FKDYVufc4pcYu4kSUSW*M0LZ3R&fs&`;U(Wa z^ZZU2pV35dPe07%M&mgF^S3TTeIxM_29M^U7QDnuEk7|Cto3ml8l^re{uhDbETB5U zmx=mdX$8Bg_tKc`#)YfY!WgOWuO}fq)ry7EchqOBM%~o~Vo>A`s{OA{AywiSHDx5I zQyMsb@wa|W0u7myL}74jX``fb1Wb0T|M2HcbqLPLf4VE5M*6sF0T&5Csv$-BonAq& z2_mJ};tx5~^Xo9(N{hdN3L5{R-dA-`85gI^~M!q)6N-}0t3vKzl$)r2VGS22&5 zZ5$E=4ckFxGdHe((RH{)JcX6ikk^@d(aqeCE8U9o;Vd`;8s%=l{`!*pRzu+K1k;Nv9!o^!HR=DwIzJ~2{#1_qc2){w`d4dgFrM#>>-YN^K}qB`H{ zI8Tq(&F+st(1PEk#Q&$5!oRBDSt1n%zN#O+t)ZH*$(~H>}xDufBMdEr~pv*_=7iMA_A9KrTFJo zO#Zu^7Srf)0}U;!dGmSwE%z@8aT$A~dp>Awtwb>%EqwOtsloaHKUq6E zsjwBlmOXG5NYZi1-_>EDCnaKTX9JEP9Y{ZfyGInK`hI;{Av^7|uQlR}@xu8J+YJZK z4@x0}`wBe3JQ(66?I41|n<0#Zhpjppk)Z*8*e-H<542nh+W)X9m0DwdI4t941H!_uFLZnyn$u!4!*Z*!bKd z#{d1;=p!yUccOeB5)IOa+w8uUK@iZPMUU*nkb&5EyJ%7E-qa#8%0<1v7<*c(4Otj|Jb$6y{TqZgG0){m3C4If z4E00#pKw3iS9Ft+3Yr70AHTFnmx_`Hid)Xzoif4%J=GeQ0kd*Bw#e~1O>(GsKHEyF zy!q-)=+xyP@L*RiHmkw&FPo~MnbyWx4}0C`ER8sPP)T(dY>%RcDX2F4HTLfTRcICS zmGOg>5ooq)3_PZ5))G`zl(OMz#ozVb-wW_Vk$@&~QHv!cff301+A4_Kh$#v|<|hW= zpousT0{6D;{iuvVJ)#LOKnq~C4n(wqKt4og4FM78Qc6wyaWEkyAprCGYY2<+w(^7< z8y!6l6+26@)N!T(%1MUS;uQ%w@hjI7Xkl$rh=oFA(&dNs<; z9ex8Rmpg0=(;^gc%du2#4tjSE)CmuU3ibQh>FwiUFp#qEUfK3oTYfajKTN^5qHJMt`qxhdNqo)Zue%--W~SqoNVAK8VLlwZlZ+FD_-{wEbXhF_DXgnL@os z2Tdu3X_*EpZed{D5A_$aDu$#Cw~2cL0BQNa9FEqZECOXkD2=-Uc_}0hZ2EG$p|m`y zm`jOfKWisPU#}a3j*-2&`gv4!i#dTQ~TEmLzHjT zG`k7TDyV`%6#S_*Mo}ESSJ;dYK zuwTv^zSVdxawbtUyQ3KVk@-d@*=^TAbu!l?PFwp=yd#T3mBs`xLp`KJQ=GH`>%0>k zodzu>4Ww(lrL<|MU!fMfe;x|KWxw_I5kB}co;(GZ&A{*M=~e7M4nfwZ$nT=a1OhZ_ z!Z+u9dDTlRZ~@7`k>-(lD;_jGTZhX4D<`GUmZA!}&|Uq2;T|rS*bU%fGE}`S?cfywgo|O&QA@czu=oZ9YiG%b!Ci{DQjctxD%4(sNM@?qgrV(IUNY z#||KZqntSmv=hGpywid<zW5+(+ zt^9ev^my}3)zlIjKWdAbG3>5z(uJu}O;^vYQID~e3To3tRl=vWk%*`&|2B3f{9X4M`#1 z;nISeqvZCy>YErQ`uK0enI@nLjt!=cNt5E@9}S@ucuHsGEfXv&--2@ zr07L5+!gC=YpCFc9p)WKSoSz8Lmk00BOxtnQBOYMkx)OsG~PUKb!43T+%qZTG&Qd0 z_xb?peFqrfzZM;T67rZnxi;pkUTuw{HGwa0{*Z>=DPfb7{T#Eh&cgLza~v$V`L@kl z9s#rkE`gDevwuuWtw(gl;I%GKCSxva*3zXB4KiL6I##hMB;kxi=D@<2z(vGo$^y% zb_2-dp^fTCLz(bEbM7$16iXUrJyEI)B_{c&N^V3~rbT5P7JM688!se7>KKp25hLL*F*s#3vr1;ohb_SFd(ka` zoa4J2$5k|(nd75zhvirx%pDov0tpp>unwV()3e5AUnC}5a|)gpG+1rdrEP~M<_05Y zMK6U;>ZR#G0%rRWKo6zZq9*aDo1{GfyxG@Gyii0nITad6#F1}EtP>!MM*#;^1qGDD zzfP#ZRv&WsFzLFb$OTN%7X`kWeZXZF7P9DH)jV&A^F5Guz3eurvupl%lDssZugA0fH|jeHP68Uh9fU`<5hyNDJNPXqgJwSQyd2dpaA3 zJFYGUxQORVE$jIEJ(;Sa218>HYZ1awS|~7?SdHx!h2JfcE7_9GX}iO0jmG5rtKa!J zTxj@|5v~5;NdfZ#r&6z3ZDdNH`8X4_PLOi$UjnRAgf>@uUfBo`)1%@~<@7l^bq*Q2 zQ%}23BZDkS@gO|xm;fn|m(UU~O>mH2q2K55d$NGU-)dv&$c9!8>t_P_9dH%5cXG+^ z4JK#U+TWBtPf)T1IhrogLRi(;$5rCTVw}mt;Q#-Rw5LJbGKX;0KITXUtgR9Q`l{!8 z_5mR8zf;tdsv1}XUZgWGK?{H=C7DFbFxtr~rizBaq1qN_#H~YJ2#IqqQ9ZCE;^AKt z$LbMe5bwCV0T_gufO7;+U8R11Z|BNiqZVw1RK(V5mqG`Kgq?;^3i6rfZ5Yx?9)|VM zuAZ8JwNGv!RXW0h;Y3A*Rj>~T8geX~9(fF+1NqsmBK)}e+EATuxi92DIkPX$Dk9wc z4m^=0N{_gZpb4%$#z@eg473#%SZw)T#+wd^387FyR;g)9Cjg)-_xQ4*ko0OI;cWnI z_9^;!7JB7826Y(n#Gk&kr|L#macKbCW0T$vfW_g`7)&eAdjnIg6U25vd=f##pM*Hv zx&@}KyUsejHt@>!6qccF+MI)#W$tF)zF!g@x79juLJK1F@!cTDR6%PB*OeS#^cSR3 zAWWS$BPC?LM3$XS&c&dmCW`g7S>1V{z6@aMSdud%50}|qb4dhEfD??pISxIUqG0|?n1O{4ej@t#rM3B?i{1RNPn^w~Vs23rM(s3*IS@8){O7r@CeZsc z8xL*))()w>o&V-4mPsCI(0u>Ooq<3rDZs1Bophm5N z{%@V3rKhL#-deF`w~u%pcqVO=B;~)k^kh8=DZ?Cc4-CQqkI7L|Vz#-5Hw)R2h&TBQ z2Auvqr?XDofc;h-wSRh22*pPECkFUHsT*atOG{&#AF#d2<(n7-JYNfVtvR3Vy*!1p ziEnioZXqxhycydW5Iu@|@!jW!e`77K)xz166gv= zwZ?n?#ryDsYe1N|u(+9Q@>ofk!3<0FbT^mq=?MRsD}v3iD?AM4l4jeadQ$<4G<5oQ zN3ZjxG52G%x6T6GYeL0XW^6LluNTIr0?*L2jm`U=-{{g6yaM^tu?jf^-d_Zs4YuFFQK z=&A-Y;f)4#81INPZ3B(9l^qk)PS-6!v+#<+W~&xgYesHgydT$)5b`HaUI@iN$yW{Z z>nDNjZkg58i-9@#Kj4IJhb3*!F$=l11|pBKL_^~nOXGJLFFo9o-TVPx8?teu%T3{C zYApt9e4`G3w>g*rd$jvJKOMrBRA6E7E6uIG`iOD_Ao!#FB8geA4f6E*8P7s5sOg0` zTk^bHP`OR6dn*=(zUv{c1=tBPs!wkSlXG{>^(w8q|WL(%Dww zK#zcxxi{dF6)gVZh+howO%D*+#1o&qya&Wmtv5r8%9xE9wx}yz!N+HxJ)*cyZEj{l ztP;lO8W>$9D6G%!IQ)7A^6)h`tf$u5JV0XeX+00{`xU~dU5GZOC!NII5Hc>R2*0}J zv5BMS9j?Xl@*`U*q9)IQWTPCgVIs(XWjf%QhpxFQdv3C0SgV1%3a&Y1%_rX1dLwB0 zqNj%rf`5YU>t`AY#UPKoovcRSP@MUUFFm>Y?Ps;PE~GTP)#9J{#w6`Zuq1xa&4OeF z*B=l56gE!r9sUz1zy0Ywl-U*I6-ReN*-`%My|L^0(10qn5d2|B)K~v{YQKpdow+V0Iy{9&#vqJyQyk&72~!c^2uEqn zms}VWe13iTIQ-u4J1hvG4+)L21Xw-7E0H_=23+PggeXyDalSD6R5BNl?F?Hkkdq?| zLHSUTY_OXc9u*{cvC8@*)yC7%>*x9T{LnDpvePEah)MU!`nUh3JZ#WSn^yZ^do54v zE;s*becWgP0pwWHYmPeU@-83{-BzNuy}S>p!NgzB_3Cl};$JtP$c@WD*G1y$oTIa` zDu}Vr#oyY$-m>Y?hU*08WLj3>dfbZ43N_AaZ(@|3cCQ1`?;wn}4u9+iK?4F`PeLS; z`&8mZad{CI)$)KISXhQaj4<~fHy5d>=qVxF`8ky@ClhW?ivHGpo4}YB@b)r43-8A~ zKj-LI`adZ~Q%+~cc*3iT4&@Sj`-Qg0T^q;HO9xF|TuIO=&OwQdxem z0O;k@ZjtJmRtvGdvg?<`!mQqh$CMWwj9+^P+Kp8hF%qFXy%(FYQWl~t8O}u;!S2%h zf{MBFkxh}ssy%1amQbHz7#)}EcQyZ@gpGmZmYel}#7q&)&Z$psNN%xe;DxXj2F-R{ zA`v%-oBj5zW5$4sS#ShQgf9HW6XCXPzJNL)>4zeVB6#*dMbv%4xxagzZg<>}cx56z zE?k~}MRNhL^0E=9(`|`qi`C$i*aB3v1LjnhN5Z8~m2PnaGHa9-;MC6on2KVcOO?`= zrOefo#}=Vej=2@(Fn$su#KNdjp8QkqQ7k%2Wv@n9WEU_oU?l0|8#Y7J$EXh1&s*}I zI$^De$s!`NkIvKz<9o6{T=ch51YeoArmGu3$!Y$kZ;a0CS32MDL)K31yX99t^+z1N zDovvXLack09Z~7JDt}i1HAPfGXvtT47_U1J5h($ku`2fjc=o@*O%h+%{(SG^&yeu8 zUJ&7P`2sRD3yd>v9s94(-?cWAg)Cj!bd8}-@*VI^J7)sfQt(ocno1nSWb(JAs2xc!5jSk0FNI3_X@fI{Ap)*tF!vN_I3G0 zFCwM}CTM~>wuXGb7ldjHouAwfaV0PQG}yKeg7|@dFCvJSl;AC#A$?9K?J}Z6yytT@ zEMf%cgCXgN2)a{W8IslEb+u2!iVtb7;yEDR@;;RMStoH-l3fB*5bhn}VXOiD<2>RS zs`RZ;tGEU-xC)=eANN4xu5Nc1z|Gnl$%iNX*@=%tSQNZ^PHnA+=!ffDL&NvxNtEtV zAgo8AZ@R<++7%%Wa+-3ly!JeauBOV1yBqpFjKjp`tYtJZa!XSWOrm#W_HT2 z+5XFK&SpU`1n+kOMu_;QsHTUkw_t1>qTu_n-T!AcrF!$ssxHTsmu-bG?(&4CUAouC zd529Qtc-JPr<8boyR6_`tHPKn$a*Kl@*#0D2{NU6TZF88i4lw=wfk4=RukSJPEDK> zu)h*8ZI(Z-?<>0Hv~d?dY*BaLV@LjZT+ABZ>hx!OsQn9RU`6o+W>JMWKGg$2?7(e< zOD@#zAZ`fQ+0l)kx>;lB=U;#66D@uGX4x zd#Uy1sxrLdpGqj9KcU6%*I&4NuQ5>`hRZ+^waR#)D|UUt*#4Cc??0m`Ng=5z@O*wE8uM6mKGML{*Copj$)@ zJZWMx>hQxazUA3i$tjRDtYXO!a-x z@<~EfmJQr+%B|PI@-WXFM|AB-UnZYOy_lcj>4|l3jrj`n2atMH9f;fk>`RlE!((}) ze!!1)*GF{*21Z(E+Sz+cV@-&>GeQbIrORgtc+a{NUhpoO%ydp~3KYoevA>nhDY8!u zq0?l63)zT;Qgl_b4B>h`DrjJE!&V!3{iUGX^5(6+by^{?&QD_srdoS}kuBP^&rX13 z&ZFPMuMtUcZa?JWjz2el$c5fT74Y|Yzi9i3Dr}y35t3#scXS{~VYI_>*;xa@C4V*z z@+6ZGZN<-vVqF>^L9*tc7QF*0u7}vI?AsH^e_%P2?_fRuPM$T7Ab?EhB4RV?f{pDd zZ<-znv;;Gv?KVB4upt!Iz}Q=Bs-p6UUKu_!N4S5A-9%v~bHDB5!K8e!OtH}~BM2%8 zNy(X9>LShHKLN*8y#QQtxgIcan&OpikJPz>j3SfL4^M?}NN77}xeY*YDihZcVH93Z z!=0Ia>p7yy(sHHLI*yPlpv6g_I_-amDYXfy9l>5%>ijEMjl1dznrIY*X`%}Eas{ki zImj_yy$8vU^FsS}1Vq<0GLpeVO6Y2JBVh8D5EXvW(qG8!uEYkeDq)hj;1gc&{vnp9 zJ=FM7<-z{mZU<~`E<^AR6(wQ5ZN!kOxT2eYfU|?km!g>50(moV-UNbh-X#yDjUexi zAM{Q7V<%+?*%)qyL*no{h-siW+Jk<}8ip`ixMN`lpp7PKSnxDGl2!PO^P}uZ_>aiM z$|39BjvPD~th8=58J7RF5c__5fuj%x|K`N-<{GTKEyrD8VnJ|;cqzXZuah;p_GF-M3G2B`;mME2c^By8IffrxGi5WIjsb6xRM6;Hhu zlI*5(!a@^Yf=zCSS;sXb(G0_GAyb;sAxis2`pNBg{#%~V!w`Usaj}A{DS7lY4xVxf zlK@j8peD!_qkw**lD>hoO$(OILoN*19Opsp!`va(J*q20#dZ*4lp0% zO+jXy$+~+`+WS4^PFFUJwU&Vp<{ly=7noqDD8h?@;R?JcR%RgQ!9^R3+@r<;895PHvn@bfNcRA{wmZ1l8{BtafrFh5Emr9;O zh8*N8&xq+DwCPtlGwLy!5NnNoJ*9!d2i5P#!f6Xjoob6XSC3*KJWn|pu)BQ^8F*BL z245ip3I3OR8$tU%FW;-rWuAFCZ|!vj0|Lh}lIdU;=%$lFBv4zc)O_#+nWjSRa(Lwe zf@Q#KBRQ)NlTu}?Tx=2z(h+7rzzWR;7caE^_|s}uaZa8j#phrwH7u zl93IQZp97_2&Zk4%nk(k1~|wfNc>l@9MTa;3e$zwrw72`=;w3*lhu7T1S+b8uPo?{k z((7EWbcLWF-{9_mZvN#nn?Y>@1(4OOu{xU#5z$6ymz z&S5vpNP#0(xT@72vpH+yD_E7k{T8Rf*e5EJ^d;Hy@6Hm zj-&13GhaCalz<1E&@2UL*y=gTxz3OdUKx4#gwWkjtlcnD$##=$4z~DE{i*l`(W-ybv62o+=IK8Xr9|;& zQaKFq?V-Lopc@-#K7w)J=4kh#0jExj!*-e(Q6F!bkjRHhI=I^p`J^9^wPzujhT^C* zfI4SPs%A3XG#)F_(jkhywG%gr%|QRXqjkTZ07CGOjpkY+)~2tn#vs_1vqJolUiCLc z5OnHF;CAt3;ztSPq(c! zv~62QUkz<@LoVnWgD^o)%Em(OH z$nDVQ+Ix}G57^kpxslrMuAEccoV1luQ|=hNnPV5wgz3ndhw7Pz@gxVvD{py%OgHS) zL(MNxP5t*^EwY1=Qp1%wo;og33eYdvXUk3+fv@uSeU_$YCrsEiD6NH=iy~?QH9u}3 z^Kxu2LL;tc)9d6}cLewT^9`OjMYVpUgsZ*Pnd=qQMf?9jMk8*atIFgx%TBG5LB31n ztHVSgKu~GnINECT>uZF_J_zLI*sl=K7O?f=SlrR(K5+%? zyzV|KVd$6e@F?&hWL5mh2BDi3ujQJGK~*HZjO#=-Y!stEK^rbcpaq6PkoQ`$yd{5N zn!f^jQ#&%bpS_!F)`wr9K0Fh6G!EsA_PkUdZ z%;|NM2mC&=n2~2+n;jKcUm-dYJFxVuk0Klu^dk?Su-K0p(p0U*FIrU;C=TX*2*Kgd zp<3s-{Fj1V;;QqFi(QduilJvE!I$v)r<{*)!GP{UAe%(Yii>{A@cl;=W;O#+G}rXD zVzM3Z_C~7!5axUOgvQzD-vLwD-RK*TK;=TWYo)17J(VEGFsC@?&OI|s<~-6MPkcw` zM~_*$z^Cq+jeir@;I9Mv=5_%45ov5`)RZ8@_6;GPy)6vJwv7pb2ym|e3@R^83Q--Z zaI_I(03PxZzWVjBMnmt0VNE@GIyAq%jo&FKSLArwIEg0jMWt04k0lI4cr}Be9k->B}XTlN0 z-vK5EHbgHGVZ@QIo2SP&RJ6H{Tx-H}UQ>KaG2uXZXq$cB=IL{IGlU!7G=;sULbYD2 z4IDM^gY-CKtFVI|-M;3vePBXY0zaG3nT2kre@!dTCnhDIyS#VS=OaN%{6U z5p0TRR^|v+_8!+wD#d8y&7{z8Q?w^FsIf&*_M^Y@i_5PhnDz1lRe#$VvSTm88?3a^ z81!fYeJoNx%~IfJaNcfhyw4h1l4^=<*_;~ zGc$dJbUTUgsg2`u>DR8$URNZ<#W{#(cr#$Xop%}#9&$I;z^Mc1cFRC~|5c9yV1VC& zqa(cpK#+G? zf<@jPfbx6hf#2}vRdgTv5HSEw86O|C8yM|_yUN6Q@T3S!0P-uaMax6z!44Sd20AR! zV_&38d4liEAm>FwRJyuH zI^q0j9kLic0q;4S8lNO)^=L$QGMyx}50dtG04Tukc3Rpt{`eSy>ONo1J(v*?zed06 zU!!$ZG4FBf#U}6@-j=jS=RG# zPzh_n?r3{j+L^UqjN`K74@gm9I?pO`46fyIP6RD`^9`MVA@C~NFWMbysa=cxg)0lN^qVh)Qr;{ z+HE|82>cTh4Bb@Av*957?Y#C6KPVSAR{hECR^#yXBH*An<0w!HpK-{mD>p1BlbM$9 z0%UX3f(3RcuO-A1GBWh%d+LC~=RLC@jPw!w#6o5vqyDzSz+3VCSH%mUYt`1GS~p+Z zyB@SzJgPIflWxW&hD7n~sC)rZ#Ww2UR({ExS}F{z;7XdBM)-N6@TDjuHf8?qRxHPv zQfR~iH)^C<1)@AXHQYVt5Y7@#?Bs$v+oeJnjozkejrL60Uy7-68}&wTNa5=H%b77- zfqKs(sHLxkM^SkLQpxhKX^3KMlfRw-3)UTio*Iy*1S6~_4RhC7@XMz>zF3bW(_Kme z{?Xihd(F|5)N9PQ;X;x7CnqaXNd3u~i>@iR8rN*Z2L%`+pNZT;1m*D#t;1)+Pj?4C ziT*%pj>iP;)r}C(TyB_QvDP1SEn!R1DB?IU#qw(R52#^BZoy=+QU*Ct%@q(lCbrRL zlF(L&EI3FxlNWqzP-Hd$ThPY^kKnaHlqX2QLQjoepQ<%+pQtc^7IFp(1673p(O>#l$+94arilCT zL%D7+jt?PMw7FhdnkS^MO+z+`EJCY45&8vSY2E#0jEg<6a23HIUAqdoIY2!CTqCvK z7z(#?_Un5fjwdewc|b$T>tpW#4cXesU5Q?yJmNmU$O}cEO5Cw{AxhN>ugg{7%R4Oq z%gk}Ry-Btl<#SBq-4UIq|&vH}Z|#tU%M z0NChb1LY#Q4BOD`ydpKDr8Vs5=yp5LSP}V z!_7y{0m+xz&^xIdksY0fdsCq37^ALY%`CnKi^2NChamPFAcpELBO<;*^s$D@PBSJ_ zVMb9$6i!8QJDuYumOA?;H+Dbc!O9f&r8`Y2VNZaC!dkG#P$d)mKG~<=j+SHhOMqIO z0S?8>CtGCw38bbE7v0*fKg&}36d1lM7734pn{W*Pb(T{cEo*(c;|rm_4*3R2kWh3E z*~!|iS4siftt@X*qQ=S#w}(dEDRd57J)@Im2EV7)0kkTO4f| zMbNaKXnmeMa>w|kXcr{Ef=+WRAxLt@5iVpvwp0YX+%Z_eTzPHP92UIHz%s7`Jd*x! zSjod*gmlD*=Y=3{0PU$r(wcGC>n9DR3%6q;n*C8)?jJ#S;*1xb*M%Z@^c7q{QVlP2 zhC{GMqA&mYt1vk+C1^o6x$+AaH5Yp=1XcVl45%1b_IF*q&9V8_@B9o1N6bKKTpEFPr>#_x^;L(SU^fbcyR8 z$U>5UW*%T|eQ>O}V7i<$eVEK7PEa;7xn>{p_&zXmh~$sW-Esx6QZ0Zsv=Ai=wXNuR z@r3+7(tI9YnCC+(8=vh$qRz}n-tg&8#luJxhKCb~P1d^^8)irtK-N~gT_w-=EKXh7 z0T6J&qi*zK+w}ganVazOJAJo>a_zEl1Xvj{-K@s}@_-jt?O%l9=?F6NCnB2juVA`K z9)4j)+9is;wo}9jdN-vi2u@_@fMKcEb!w>8CVVFbiqPr7Y`}L0wQi1!xFRqE1bGRI z9c+37+4t5m5e0j9o5#?+`>UX98X@4-nAbpgl09(F8i2q+2RCq8*bJQB2c-T zalC>h+Sh-A_5{J;`-!6oU;ua|(6Pmd*UO_$W=P=%zxCxa)o_-0)tRQ5ylIgUyjF4N z8%VPJ2K_WjYo06{xhE65oL7F-`V`j+NjL|Z!R;=BRiI4CI<$SnK-#jLWl~N0?H7TU zBX-VhT}P>yN|=vhA8EBAE@Z?KD2b`V{^#+#pEN9HHKmtL`uYc=5#)p)1|iA)3!O{u zmKa0R-EBhpEU&L3)L8Uj>3)=;(b}0C#<`=U>7AEr0ugTzt#d$c7DM6vMd&_KFin-G z3)Dz`I}FOmOnULBO5z|pNt3IS*u$g+A>+t;<3l6@;zv5`b{WUnD{qziDAipJr;T8q zY$6p#C$lM(FXq5KROieI)Xb%X-PF17%TAerZh89`Datkjixm!x7I*0#QAK|E_~-tKS1L<(x5b{GqOH^vRIS#qT?4L z6zgVA{FR9=5H~o$ou17Yms2c^Ea;}N2bjb2@%(Y{<8zS;d9Oy7%s+HvNo?%I^F^gr z&m>@-gIFKANs_aCpjRqsPh{W`o_pSYw|p~6-!G37#10>tC}CT5_|0PM7!DOTL6B6) z`LDYAXJ}>Sx4>NUKkcq$D=IR?**`mGJzNja=^5*SHj`)hB8XKMPkGD$$0la^5RhK< zS4)xDEWvaf@Oovp*p21Nj|$9?RX}=)?N+Qhx5z`nVXf zQa-w-05@XPR;IJ|eFLew3v5@XR)RC&f5ovrfa;+SW_z^F*#>(KxXz~FDB7?iL0q2M z0_(~*4hppwCz~-RA3s6|Hqa~=!{M@8jN@sMhc0N=5I9owe8sD}hH?sv=${w5#@%>~ zC>9S-pv)O_ZzNBTLWlbLE)RDbgRmr&2SXeLo3@XlF8LxF{)l2|_Kp!m6hD{0HA6An zYAg+|*b-(dM&y2*TFk7%q(bW=Y=eK0F~Vd6WEDqFo!Y%EWyqoaAQc8^WS;(o)Lml- z0kU;g1M9x7mOZR;SXcCVKe>hUWO*nZx{46M8fe0+N2>1AMx&iiaHkgzUO}pGehG~D zd|3;8PX2xJ20cj=mkX?o6l@eR zk_+RgSkIFqU>X@786O`oaLV33wLGBke9Ut8!Sl|9v7BXTFj?tx~K)w#J*&}A3qSv1t=>F354KJCabg#54=#L8GJ>r2$1Kw@mP zAkbt^p4<9DX>yc!075b~HKUvF(81eK`S&(@U~Y8zD?7@QUH-kw%>sdE)ql%ASbJSK z{QKfbv2*PzEr7+$EC7y95SafJ5omLm&??*sz48;xKd4e?c)9#A4S173d^ACl8)g6+ z@i;{#V@L*ea|R@wRfZl8UgX5qIKRZi^b3x9>d6<>JzW6%d$~J4kp!0s#>ybR3K%CP z7za~B@ut`-pj61O75ZDcRRWLML;Cir2E@jF zD=LJxaTTVmaIN~x)7+);tT!MEfZcclpuEVLT z2;rS}y8QePtsg)_cbK}pXYN1VQjWJlT!)b?(#rs}kECJ-1*IPmb+9GQsPfU9(+L)`>SDM{LM)N0phlp@CItoL2aTH5I zyt@4q7;~+qD~=2Y0Ts6gX(xj~&Wa?3pLyN=o{@7vzMWK-=?|^vUnXEQP)Tc--WAdUE7ZzrPvW$fFWM<H~+ORXUJC4a941NE+IYfF8K)OQ#$*IepOnV#J?z0Ta>3St*QSnEz=J z2tcey%a}ar9@r)*IghFOPd#K_|NNxlH{FvJaeN}FUfX=jhrIDC${VPh&SC%yB@l^2 zXt0JlLc}?*@rh_-ZK>SdWyU4VrLU)!{b5!PL7@1!tSfiR}#B z6#Gxs>;VwQLMoA7e`9`#1(Wiz)nEpX>6etFb7iFWO)~M}UnpJBoalqPJ0csw8{q`Q z?z-6Y7;D=lMZf^t;T?~pLjCz{>ZzbTL8eb4j(UqohJuCd*bc_snN7GW~JB zdh46+4r0X+p;+?gO_Sd}e~YKTiw(fv5n=f6LuVA&*;3&V2{bRe9f72yp#PTQPMwp! z;QoQd1P;kIHj~fu`CF8r2b?XW(^E_<+0m)OX>cVFQ3~LZjO)qv!OOX9`SK3IYA>1fLdQZ6A zk_MvQq&~MfYqJ+%3QwMD;}@r=2jw85>M(j|SHO7r`9-(~UDhaysmm@cu9*t{G9*}w z<`q+d_IE~`FCBkT?E}!x5uuuuIskd!$pYH&9nefoSjRfx)^F?I-|`q&$nukG@cQ~A zi<@?wcP}0E+K1;KE2Ysg#&KlYCeUwf@;0WXI85Zd(=;5ud+*uvtt94}#pWp1?f)SzvTwwo*3;{wb zO5#L|{|~^qbr8lz`VM$MvUsE&uJ=C7KY})g%z>8ojHmm?)FY=?*C-vDeLKK{qvc66 zeSjb6sE7CAC-WpgfP$>|ijAa5i%yf_Z5RVU=go;|rjqx!i}xj9x}yD4a>4pH7+~jb zUM2sf>AMVCmDvT@xsmav%xfoTXJ}LtmhxdY<>u2Xpck+fG%DEqpm-3(RkLE6&@3YB z`NFs3r+$CuWD6=!w}V5!@KHmaawj~b-Y#SMJ9BO=aLRtIu8aOD;G~hB-P!cT?|M)e^yyU zlvdGrX1XHYX-V;PyC3`whk%9=nyP;Y<&T$B1d_zSA1nYBU;G;g!RMM(pkyBXLs#HN zo(l$lYsGLCg5*<_U>QYuMCR5XmN$cQ2{ zLNsjIqu=%T{{B73>F9mm=YH<{x~~yhNm+ZU{{_l$Yo&>)hU8<5$ZN2ls7>MHH7*Sh z{bf8Scr+amQPu?al~pHpMy^?&0mzCP>v?~+*sbaSf{hy=%2un0s9k#%7^IPdk@iLW z;<6t=YZ;Jd+}0|8QNZ641dYT;%|#wv?&e@I3|RarOK^JY z;n->U3mvK%U_PF@SF|)S_(~PxGL!fpHPwEr<=S3*_!Q;z3bJlqZnWS#{P)H%7BHgP zbPvor!N+ii_}B9hbW$+$6qohh=$}i^cJX$lLjO6Cz}}ttK)v!2w4`?b%mC4*{&nSK zb%mXVWyt?zGLK?UWh@Ul3GVwTCQ-%)2>l43?M#Y{LP1Kgeg8-$ZcszOFwP`~ORnD^-BUj45aD+#C)o*% z%pazaf49)?t1lqBU(u08>1$QSamK}IQCH5sx|GkxA_F1!`7WNc&+n=VdjI{sFPXwA zOa!8Jw0|7@$q7lY`1A`*nUSOfmA!Uf?C;wbphLGOR;S^t`!2{>;iNVA3HwscpRAR_ zHyyaTE|*e_w%eq2iz-D}TvQ(O0tf6ExTm)%`_auPyC=eo&=^`Te?^lXhqe?D7mx!BGl* zl+rKa@x*Wj*Rw|n*zGyx`He9Za&Ls0>MsNN|D+Ue>&lpmQGXPg@-S|4gNJQ<^l*`W zH`|t}SdT+sjp=c+Z1EMK5|nPAn8Bz<4D%>A8enUrxr47<2afr;a3xzWAEZ11ov?SOpeHc{e-q9bYf}O@2Cy|b zBYOI&$8J8ReWCXH4g%_Flb77Y6R3o-k^HU1tpqM-Y24o4hHoqQ2kvm!P~DGzq_|0_ z5z^2vrck=~ug zarqMflA(Re&X5*S_lG`eNYQ1+O>4Rpx|wCMz#Bzmi# z#5`KPzK-XFcT4~-UHOizK+1l&k$3Oh&{tX^#C&}a&QYgGLAk`6UWkH8e8i-4Tp9N> zhbJp}fY8*{{p#`u7;061216wordKt$;`=+$e4l9M#Xj(7uu=8D68R%U+~OLU^wVnh zHAcH~F^Mbdh=8G`Fby>^$$ZPHWI2nd z0!`ppBh#VDBVS0zNC?{=1RA7l#8TgqrrkeJYuFX$BZM9p`{%gZ-sJ7e){@L0Q;IS_ z(PId_Nz5^Wk9BS+wTS8!NEF#F%*sRf>Rr_nzMWjs+n(qZfnG6)&Puvg%8MS`d$$Mg zp6Ixx_uSjCDVbUYcciGvp2nB}FV=`xEVM<)fOmUfM{&8+--<2;ciS9DI*4+w8^*-y0)77 zyUWCX)K}hY8-#~lxkr}pZL7oAf3$a7^7#Qp%3{UGLyK(OR4tpC z#>aPA5f95j0=4Z zs1ZNKo$Qqd|7uA%rJe$rK*3Dir=hCJ;dT#E=AW@PiswHZS+XvXv4j}VMT7KNwaK8L zCDD|ffl+0l7<#!OShwu2&yT%&Xi6#UpYGqogUjrp2e-flMnXm{`;U5#Z>nvqW^&vG zQ+)4X>;&5H`a3Ni8-5xT$d?9Z!)&3pFAQ5j!&o@C;LU&TX8{&v;zwDs!yf-}SKnmS zWCUZA2Dly&+8GC%x*_H0Lh-+Ff~y5^+S#s7CvaoRIqTfCzYE>@x86ACdu|5^MZ5R0 zEZti6_1B0iMR(2DPYKr0dv!b{so1t@=WFGVBz2LYZ5y~}_wwbdfyl0ZVUU~T;yYL0o+mUW)J3ie#98~tWN-<_OI^zs!sZ7lL zW)dB{IwmykZNF$kYhY@@&&qP9{Qze-Tc54}%@(RfHSKwX((982-=xlW zUP)vAtrK@i9>$SfhBJPrc|3)*ZYSKM-e)Ns@?4j82~FAB49>aokr4OUH3H&Po-qBU zaeKenAUEuJx%dGPBnYzU>3Q^;_XheJzGD|f%~UOoH5dlJPZL)4$$Zr68he1(Q7 ziZo1mKVUxLQ63Vw1QN&y^+Q~GXRMyxtUN946#KsJszf}ysivyCh9jjl1`pLLooVZ% z$m0@n4cA^`Lagc-Ip$M7REz>p{F?72v3Dazx|*pCkB6~=bVrXH`X=;KFVR7;buvgB zFaW$-y99D3;1O0XShQ&CHqS61KpwS%L(Z>{=|PSkI6OVT6z&iyu40uba<=6o)q^tUV?Vnx9kmRKv>N@YHTvUjFJcerwG^nz?!y`D~oY zYe~}`QTd$G4lek8tQ{)F=(Tx0p4FB@9ra?fxnAaJwdaS;q>_tjZEq=CYVQ$n*V~nY zbuLwZE5`g0Q~Am3L44|;bvBE~WqskI+s35=!KaheMGdRFvz>4silQ<>qNqLDjarFW z{Y5+1sRVXcHr(Drt=Z{L%guyYr=XGRV7}eX#l4j|kd#YY#-B6bz>pC^tR3lgDz@uq z8#Qa65%aF6-jw`q-jm=XWJc~mxB8T55{PGsOeQf*gwEg?*auC1Nhs-^HyJjajO0H2<{yD@FIO4qFx4`MeT zR~@LS?`IKo5!ARNsWH~0xfsJe9Da2<`TGyw@r>&+f>3x1@G(m?vn-)Og!)7ph4E|G z-m-v-M5iA(GwmN4tad<+KFtg+oKOE%BTB@F|?qeBwgLw>lwr*)km0`YMQm_D{cWD|{^aVcI__CJlBJqLFmg4pU9 z&nzr{3s(&LMa7e-2&=T8G>?cZ0GU5JP*Ug$OqzN`@9GANf}lH_p(|bV9KNNZSs8_z z0cS`R)`oH_Y<;tvL?6zP7F?mI;v4;~yx=-l4^DDD20?;GF*OiMqR$oC_*Mt~fpt{U zWx){{X?>?I7u(6GX&)rM%6ldBL(lVvImgImBWJ>rAw=~0Rbse#n_Ll9`j$$q@A%7; zf;Dmqd5PMgG^!h?b@ZBQY4I@@964gPej4;hdj`dOM|N>$v771DN~K&@?EFcr6`G#O zeTX?F7pV?HGSdg6kTb<@51x8e%?C#Wjq-g!7y{E^lvq6m@#G^GT;WH16Xo=0x5Cau zr>aPmVnf7p=Y+Fp^VO5pkISFaJT#?Y8)?W|Q0!HTb@p0DM?9Lk#}U%k!n>RGzT=Oh z4}K5EVb7Z$h}0Dx0~T$L(b`UVxfp!-oUt8A%&xTJV`a79zdW6^Y?ED;euQxBJcDCq zYlv@sf+E?(q~e=PLKEGg!ss?sZ!eR(ZTj)%O5d~aFKNIfr_!m&-HSG^;lK4%^%!@% zo%v*}sw2z#MGay3eQ6>F1y0zi;1{q1M;-#4h!K7wPEz53fpqaT!t$XNEXF-HlYvoKjat=@odbXhw9gN(Ar{yfOS`e0~bch&W zm<@**Y8J0}i1eAav&ODuWHQ~qYTQjEX~GkDzFbPJKyjjA~b`} z&!Ei?3sr74r1XuO2@+kgwaY&?xRH(OoTxwj>PxeaLy6^5EN0F< zufC@WqQj6V+ykVw(2>D$D>La|CD*QdYg31-=2J$Gb^$rv)Gw!=S)O>>{w9*EE`aVm zI2O(o!V8Om>eD^bO>@2F-&9i0G+KLS61M`j{obriyNcB!Zs6RE1C90wl1g4qikKcf zI^lcPMWMed4)yjnnX?WGtZ)(T2YhbCV&4A-)95!|9(3s>lEb7YSa^TAAC+`o3j@2X zi+QDc0?aa=({xX9l?uLZ;>ygxwkAv}`!^l8{DJFz8X|-{ZpV7RI$VRq^&)C>Y6PB^TSvcU@$y79}6G8mjCWfo9 z>S89a=$;w4{Z3;7t$DnyuS>A=?wTnFDaSW*^M1wa*x!_m%12>&9M}B|=YjdXvCAJT z2~q!{*Y<23MJe6Y)}3nfSMN3qjz0)zNUQ=L@hV@u%CLOGk!nD|zY+O%2f33XqJ#_A zmpjtBd%(|624LwWofAHV7cm({oqC)}{28$T#Sn!vu@6J`v*gD=s6LKKzzACNy{Yg@ zsM=X<1`)K-uGH1^sU_!0+!IiGq;V5g8P(M^lYAMJ#3BVgIO}a(;!-}we=Y70`l8Xy zV?F6AA%|v!bNUWi?+Lp+VlmLC(fhf5X)dSUtU&?*U|gL&y{A8r`<= zFWJTeWAr@k(^&51A9cE7wVSpO=wImkHW8fkqum1&iBP|FD%r>q2n}pMp;K^E=RC3U zO33{I*VGcIQ^jP>kc>u>a+bJ!Y11tRO572Xw*H{iRM#z$PR#(1rQ?;;zuSH|Ds$oE`Gc$+=H~Skp((fI#D!D#BG;U7C$5OpE z9s44?iwx5uKfAsqyeOyGN@2yWlsvho>AUTa9guRu{Rnfa{~(@fIiQ>$?^TmxuCX`Y z<^8&lzw;Vh6mhY0`1czjut!5vuds3Ma9Db_K}LoK>&2b=ZVz^3_ez$z`^7zJJE%(O zgfnxGVDToN?PW*J5}w=9TSRXMnC-ST=b9GXgIopN3I-Ldv6>Y^FQqSheji=a5upNH zm*wdK-R~U2OxgHuIlaRFTtX3Qr9pv3#ilu0fa8P-tuBMwBPpPf@Q=g41J;aLjQQqw zwrdMoRG2PDa|ZQ&CZbp$rb~cx8?Z6zMt~JuEao~gxlLXFo2_~&Rr2(nd=KeHAGOdJ z(AvcVP>H`+M}s0a>3U?I2)Y8d_-9uNZe`u^iLbbIz~^y^%K4DKd{(wm-qQOU7#tas zznzAGEV(1GJ+a+8w=f%TD9s6Po=(_OtKMrxbiru#g^ zy}Mhd)3jo$PSdw+(a!rmJ_<_~Z?YyNyR?=KR_gTk9*oY=Xo*EzD^24MB4kp=$Vo9P zt=?SoQ%10&a#x{qj!;Wf$5KRjWy#Qqc8Oxnsum%Y^>Pgrl?I{`!B zwD)`6aW6HvVU&^+jWfMv%O^PPFqEw3W+3H&cS@+}OP7A~Z>rH{I)&kQWGtMHW<8eP z1R%;K1fte|a5lKZvIY-d@BTH{?2>s1DOzzNMMww?*}?_~2IP!Bkrs7tX88UKM&5+` z0U0>Ood6LTTePk26@1tEbf4J}zr=MJ2Z%%)F(ZgK^67x{(fxYQ@u_`H!{hzV)_Q-i zFe0E)n3RLp!?RQIn+x}XdnidTXd&7~b;Dkpe7WDi>&HIX|AytFbVum;_wgUSrKM{| z1lX2Zh9P}KMU45g=4r|1h7AL2y@q`RqQh>sQ!@ov{o}|Oz%bsdH09~WFd83_b|IHE zW;zgV**y>V>kmlcTbWTPdoSod*O}A_nnkR24&B?6_Wi%Bh3_6#G%5d8Y<#Fv5?@@6 zj+Hons8aSVpg}J8r>os@4hQL!Y3-e@7Gz1%G<2_jeTb-l!Q>%A?rMTd0JqRJH!Unm zdl;3f)p5b4Yq`u!dk`@ocD)p7mu>4VE@zu;`yHBIzm)|sVt5;B?s zVs2;H|09TOVRs5@^4u({FTauI<36sw z7kr}U@io3_&hhgKYHqESxu6Gn6H^~2fv-zfcR|!+^5VdnqB1yI%YitKC6?Y&WuT#9 zYL?b}p5jphzmclm;r*S0=nLVq_RuHM9rukN+!iDN&@fa6RP&>YXDI%6n^57><{c$H z=B5?yr#M6UPCjVmp)a4CeEZXTcPtvoT7dck7Rm46+^; z4*Fe5lD5%L<=>m$ircJ8Y0G1~K`~s6gsR@tdfY){DVk#M2Xgrfoh97YT^TS{G^#G) zs-CU4Xpxz5zds`0LmV&mdUHkoMW9F$0YfUNNb)5d!)kp=lV(4LLgT6}luk8=V9R1p z75w^2CQ|PivW*iKFspUHjve5*L^t5vKU4^=@iDSOqMW{-u_iQrxW4lN=q4NKwai-* z<2MHF-L|j<`=kpD$JF)`KG!H`@AuL#LQGm9)ev$(bF{U7oC(Ru>}tE7p)z877+boC zv7A9yH(5?UaM0|5B-osJ>Ewv3#+IapzNQ@@@dk|_CwhEyAaOY5#UxSReYNg?FowL>a@g=m;5o;lH zg?5ui;t0vPLgT&0KQvTuRy=p{Js;CFdk9fhN58$r_CTjDWq0jUfDmq;!O;g*g|t)h zLOLTtt!>EJIq;H%*S(|TE(YY@bz#dlOJ3**`e%(z9TKd*DN0=*)HrI?+x#cMmcp%t z_ga)h-B)N3@MFU$tw9xD=h)c%mgI+U$=;!+I2He@kYSCnM+S7s8P{hH-~&MBv#KEX z<1i{570oKOVJd&y7mybQw=zdYkzDL2t+ASw8?3SQ9_597VSUj7Is1H zMq0_Sdiu`z$s3PBW%qmdSHWIoAqNwFh~HhvAVV{9$Q-rv`nN9o(#B(PPqNO1g;E*~ zd99UVfR>(j3=N_xS^Txv({Rdn=CtMhChvhLnR+-?5ce_UCl^0c>PP+=GW<1^uAYPpwb^#AGq`B&l#2zA8hhvlh1YjHFCF|FM0QPt|I zxSG>zT6VRPbOvW1$56h2<=83x810As87a7bwtOLL;Y5Rg_6=;KjJ27Se>22RHr5tv zCVY}^7MwILonZH!RQPI3S1oG`n%>wRbm@PI4J}QzLZMcD*d00dYW$AmBo3|f{t83L zv9DotERwQzfu#|9=gt2SZgdN!a*x4(HZ|p%cXSywWFyf-xeB0SM2_KMd*rT9G$d{i zWP*x+g5dC54s?TMsSp)*4sPfdVw;qNjS4ZFkzQM#2}V|216Je&Zf{q%v7|yO+PBNA zcmL}XzWV3*iA3SVk$=I~wmHsoLnCK$DIGj7-77ajD;2AM=@$yJUX%2kVwGj+P|B-0`2i8Cez1Ws;&1vt5&sv zfp!5RG^PrAE?M&@e@Tn*qLabk7t_GrVm@u6v;cD)E$Aqx^gBiC5Up=JK^ zET9D*+WYfm->3AFls(yy$>^9g)5zuvUvi-3MM7cW+b=!{O^SLAPmE`C3*y{M;;_>g zM!Q{Gq(*5k@q3MhTJ*w?`!V%|^g{qVtR9J#(2>+R3$q=+_m%$^tTY!ccItRmg&NId zvQx>|jzJ$V>9cXuM>5l+q^laA;SSdc0uA zD*u3ExKlnoZ1^;n&bj&jUGY@##P@SrfnLZa3vxWQUj*vSASigp##@8~K?0Ljq$MuV z-Ac%?dN1X-cnKL$#psk$s(8_+9i&9LU75kx+2hPKha0vXVU;10d%g)>wvVyv#b04) zOw%t#{t2ukVB7rj%4Do^YxB!}4o6Pxu1&0CKyOL!&|+S(W0Z+yMA$ILL1)jSGb%Vh zqkgM?y-t(v0Bc7!oyQByt9<~N{6)hq#^9`r4%yM4AS^zsnO}WM6#jmG@Y>5tV`1iq%i)eHjFzofGZewe* z5~=={UEb8sq4+mS=BpUGO&N4|;`kdB4De@_NYJpGieH;Q!37-Py{!3db`8@(m+eDw zKLXqqR0C0&yQo(I*j>G(8=j`XcWuhsayLFJL#8s6`Hf))*&L_j!~g-mDCOW%2@4>d zFt+$FLsYNnB~IX`nf2!h@}m>5h_E&$05s1f2=U1-+Jj50ejxD!k6x7zkO3L=r0!|| zG7fkSsmmQo^47N$2!4(h;WF9Swp1bYUK7}pBaOI#toZHq2ZHQbJsZYhfJ#8a66v7 z@NI&p`mD$y(A(7N#GIDlK;`gkyWcv|PN&k?R7B%gM^x+gG3jkU6(y>%73~VdJbp*@ufJ*D?yJ#hs5YvzVG@t zk1V(2ZAO^$IDArwr^XXHrhy}2>0EIwp!-ZikyHGL5>1r{&z;g1<%sc2fTUSW;cA}; z%6h>H>r&c1*<7Q2E+{~7OuTVO*V}C;(}0y5Yyc7<_lm1q!{i7bX$@O3i%qxOyG7bx zO#9I(ysv}T55OZzyVFL3-%IbNMvlG+I6<^*vH;cCh68XK_WpE;gZ9do(T*pfeS zOZ+=7R5`^@%0aSO1mm-*)TQ4)+X~0&^f!XrRXBd1Vo45fuVqt%g73}+d3~>j8~c)14wl|`^OEIkYcD+f zOeAfFd8*LLL7EjmZ5KiETvu6c!B?y}rJddX8iV`bUk380%0EMxO@}*DZ=DZSpo0@$oK+CtWMtOG4dp>|~LVCkaqvK3vD1(lAtocnfdP+4-Kt-h2qKg8WvEv#2)=QC? zxZ_(LuwVJq;TtM6&=TLaBTKJihl4MqC4INzWhuPSmTgaLA`qKqNhn67`)By)F2k?k zBwwDmij8YqrfrAxn4S#QS={9k28yS(yna1i?B+_{etz*-s<@xdOe)HYKlpw6HisAs zY*&GYfQ8c1NP#u1L(icfSY)poHcorkgaw^eXgYy-Fl;_sIn47V^d?B|GQe{riNEf^ zG(MTmb`Gv34dft!@ba4g)ISfSBVjUttHp;y$Kx9ZNpGj1e*eBki2fb4+v;02m#cN$ z&RYhle;sHbtswzyEIS&MLp+-f1&K1o?#pNC0y-- z7G0VG{9vOV_+D%0Z- zwj}WAGx9mNsc=6e8Ja5#iaV4 z>u%VrS%$av#PZTrA8rY?7HrCuDI7~@A+2UCh3NjGT+Jj%sQ+A z{oneww|x%tUbTGMPoUxGtBhtp_v$^Cm-SdQ%wiD?X+Hs5mmc36{<}lAOb{x?r9X$& z=jR5zEbR!Bh-@{vY5A{>wdYry_WCY~)Q6cNv`?=Tn2%dJ!V>}Eg+tUztMgWCPHCP1 z)lJ?eiYwC0GoE5r85#7iUhN*Y5>h@@Yv2vE^Cy|2m<%v;v#YtBpJ?p&al^^Kz-;o< z&h2|sT5qcSK9nXVO+kcBv}&)TaUQ$p$7jDgJyTQi`w5xI5GQ`$LNmJ-<8{1Guz%7wKfG!y#Yg`W3zE>`qVDYy3C|9`5pvN zk@CsG`CEp7Oz!rP?(8phs}_Nuz_UizB4XxlAB8HlX8i_@dA3+1)n!Fsl6y>EWRJ=) z{gwS=jV5j^dN9^}rksYSz3As~ir06spCqc}H4`Y6qp+xNJ1!4r6_Tsc%&78{Ur+91 z%|}2X$ylzxR*0(}{+Ua*^wp((?6+Z_aecNCUr;Lmis)f?uD;x2AI31&k*5|)aVo1x zO$e4bspg*aM;Q-;qdj+^W{jA}ko;RB3TO#myK_X&y^+nP{z4qK!n8_blbhfK(aZ&p zWTF9Nl9RSNRsLV?Hf7%Wqnt-K2Hig%^Ge~bd`94>BKWm2SrEtwEP`003G9Y^BxJwf z2l(V^aYYFiFK+xm904yV>?t3z5d3N%WEYWn1KKFowdz)vpZB{)Au`Npo%ZHr2XOT5 zzj_U9?fN=t+$roYg!l3|B+9s6rKPdu>$Afim@UV8KIX^Xpx&tb!un}+8hAZ}V`^NZ zN+A{r`^~;OiNbq>wK5lg^TyfT*C|pq_3U#_UWE5SoTF=${qKvo-l|+rZvfa$AUnzz zM007UMgIX%wtHTu^WO?@V%+aHV*T|oR=z{vE|29@#=bukcacE)*oOB2(wC5CDv-?- z2EkZ*mXS(j4>fZyt~CeCES?(o=3Js)(K6R+1WC}r>^WKBW5xMD!H<;#eY=3a#3rG+ zUHXWB+98zxX`JS}9GCBNkI^$^w$0cG!n~S(di+nFV-|U!NEpp&n`$PA8b71x3hei~ z;*J4fEx+M21OUY8&yk)jo{Pvb96i;Xq5}Gkpp|X_v$}^FE!6GU&#!>H!Z7r}rxHIO z&H;O>OlwJ!G>t~Fqrv5y`TdQ_qJ(av1gp@wuJ_{2W7v>~aAP4{X^Fi#vH=OERXt z&g1em+BT4ZTdU*OPP~)2e~~f5Xy>dhX6`PWNX?%QYOqP5l$Sf(zz1E18|SGUDKv+} zLRIT{j<{ipS}#{jx6hR=wtz%JqHyyA=OG8W$SLzv zfe1JHgS2He3rQl={u3jACcW?tk40z1_Eq&_eZ22SDO|TcRv)z7&63AIy5B8NVtO5l zQobT>VXcKMg7qGKPhFiqLo?I6|*Y9QH=P-evT9ML=F5`0=z@HQx5PEQ6G zdOr!C6hBPNJo5uJy0H&`mY*7YxOz{eZ|6E`qlwpK#yWMmLzR&}YlMzdx}Hn^()$%b zH$qp8jG5<@TOKBB7P4{JxPOWIGX1HEfz%8pQ#rrX!M46hW=H)y@|Wwe4Cn@7fiLci ztZ$Q$p7=2!N~JzR5`_cVpB5UfpDmG+>V~jtc7E1Epqg+kFE2Ll0O_&e8gVZBwV1L?o&3=FRrt-KS zt&}6S>DHTBgtv#LqdrOhkFXTNUU$YF%WEp}1b;^mp&HO_tZ^!W7yjG0g|(esV_Cs} zxyVQ8Ql;ALhWw$Q!-u_h`kAhAF}ALK;$dlPp5JqgrNk&-5DLB*{OTcbjRLh#9TYKG zsPEN`Btg}ucTG87Pivn@-fkCd_XQh2C#Y z4+nf?)p#FdihSsuhecagDL5rBEDRGdbwmh|P7ykI-hWV@RT33*r28-%`rwWvF}QYho=+ z%BW|>@eylfqlpxz+&xjXxKy_OehJQ>B%;oa?Yh=t2pua@B7OfvmBQ-x(8ypSfr<%z z_(zrh4&fZPq&lcb9N!G;nhTq`w>!bckWV!FGKl%a1(Gu&DBcwN-V0xjVi$RtP>t@@ z0g*#p&4v(ri3n=^to@!Q4c>zCPXGG zlF&@*=z!0JsL!n=x_aw7v0P2d#WYnzWdg8PRltP-?#Tp|5k0qE z+yTA7xkjgp+PR2rSk+0!usxI_lEl0%!uY=V*lqzrst5eUI(#<~@Lw^65tczZE&}Ok z?7x^FqrQC`3#F?yBmRSxx~W=!ji50Q7e%Rm$z8w(6==0oKa?aL4qwZN+-Ilu(7szA zBDYT#U`OpbyJ=D~-T@vj(K@{_;Ny4O`j6C1===M^V?(CqGZn4f+2{Z-XJcis*Gq6Yc$nvoptb%p)oR)n`ih|(X++Y{osqN>cgx@Plh$;ake0zhvF_vv$%UG0ZxvI_CP{Cw%h$MUZO zT^j}8e40Mc4X~~yRrjZi??hdZ2A~@rM=cz0s%Z%ECDz!z0ZG+uikal_iW&wfr9nT*0@cdp%BMvVsei^b9 zb4ZzA#HEk3xo=w^G7qLN^X#Z*s-oOB9`EySv`gcagp`?w=MSiTf_pFI%H!43^&e>OYbAOs>L@*b1 zrBUtQx0;2R5x+Qr%^UmU_=dgi*8YsGrEbk*^BOs;6TyYEpnB7Ea#bJUnp%gXumjA? z=P>ppqUQ0%X}9~%Aa`(6+)0j`dJn#%SdB%$r$cg9p7e3$!8Num$PRQ!)*Te$p$apM zBoWwByy3!EVR9IHs!EE8zKP>s^h9jmnb#hUoW~b@GUW@rr)`LNnoG%>!}fM ziyVmkWQUmf44km8$&3p1;p>Mz!0_yMV12|~XKsoQ@NM3q0>*Q@G&(hYF9 z`?|L4jV#WE4<8Cfumk zDPlkdnfhHmAtA(xfz(}Hi_uiI4k_kIKCdmVFD+ z6+|bY840*E1|L{{tUvZrcke&0n-h5o-NipX{8ZbtoErTb&J^7}xG&QAUBk4Dh)4dC z){o2|Y^GW%4gUaimHL0@HnjuyHK=N3{Y8$XIJafrx>H2Ik$IFHUhdM5rxQfac6Pt^ zYWzWLA}Z_UpaCtSXZ3m-KLyAtu8n(gw78dV-4@co;xqwG#?!9{cUHqq$WS2+t=;`8ZB}$=fmtE#329X z9@#PRVeRwpfOZurs31RnK$hmkN16w9=}m+>t+9&P*HmYd{ zW<0(7)EMq+J)ws={{Tnq3YWvAFHU%v?+q-1$T5Qit9+EF+w}j$N~UplfV#b$;mCU0wy3RJ z>W|pJfh8|2odiDu>?h{K*1q8E(Qy?SA{yT3x`tcmeZ1nrh0R#lf#KuGa2K9lk!yIL zw6=REaELgeG)nN`X;v>^+t`gf+P}A)|0Kn&wQ1y%=z>VZgnpm_*#r;$nfRvCC3y5- zK2|t6KjCc75ewrStG?JNq^*^qr5CJ-kMHnNXKktgOk584=tnpH)pojn##b{Od_Xa= z=2dLmx@z5|KQ*sT_*Ne!KH4C|ih}|8Y#?V)Opp`0(UsS}Ce>AQd9o06f6UW4#6~7W z3@Wo_)I8H+bNXlRUpeCoqlD`^;6n%(KZ_OE=dxRMVlMwsJ$uB9sQ4kmY`rXY`utNz zT)#Gu;z<`LP+2zJQMfsxxQDYe>2IX+HS74_SgXfztLS~p12`F|k*D|wb;E+Yjlwr2 z0dlZ_Vdn6vErW4#t}>OF1WCB+Qn6~FxY@7@V-o)hX{7a^_~`5HVwnG`S%__bQI zq$%cRCPn4E5q{rj*<)sI*!Rx5Bs3{4)$bgFC&0y7^=}1#jKgj!9gc{QHF|nRit}dt zuNtP;3+K#^b~U*^1qTnHUR*xofZ8!qE=T2wf?zbT4p^p^hgQ<+FzztNkq90Y7&hAq z@9~vs%~=yYr)R7GB}boVSAS!n=n>iA`vXtRaoI}oAPN|9{tk+)F$T5lo??)z;_sY2 zPqFyodrY(zmM7)xNE>7pX2s9Fr8^~b4g=jazqs97#wdS3s%Hefzk6%24GliQH#5G2 z0;7rjyCyhAj`v2uZ6MWzX-kstgDD-iq%!koSEK`9@OE4CcPa_zZbDqt>NU4lhDYpT z^kdajDKErUjfz~-9oN!llHd}0^uQ`R^0e=8J$n1CY$qCqu8Q&Hs)HqGFOc9Mn34G$ z>CZ7Mb&|jI!(92pv{C)w#3pHvt6kFQQLC#JyHVGvIE_PB930f?lC7>V{Q1@|=}>8N zig(oX!o8k;YU|#TX2FxiiPpF$!0)II;7Y3OWKTXxJfN7h)V0qCDc7DM%Xu==K7DqZ zeMk&t)v@M5Nz9?%E7B=1Qv5ftFlG)O`8z=Taq<%{S7I#Kw8&RD<8ZwP4Y?1pQZJc{ zs7jTS?yjBfO>v@zQLRAn)B-_Vj7F5lYfP4tgF<&4uro(%WY56I4wqhgJib1E<#T-3 z;_}F9$VAH4j z#~IcDLw_NkPe#4w%Ijz?Fh1oUR4ogLW?#ez+F7ba!Z>(|F(OH5m=zmDiZ1lTF8y3# zCSg^&{rxCGR4WaA>>9o!z^=5`ZHZ2g{OLVJ;=0e7Dg9o010W>2s3;27Z@f2Lcdhd~ zN^fZKHdap8l7r@NaM1iO2Ab%!!A^V=*Jp2Rj*Lwta!6t zEJkQmt*-R>5Y5SxQo#@nIsF^wEfOk)3~B*FpLG}+=%cPa;*exhp`q8#{(cijCrfYR z$Dxlou6n)~?oVw?lpzGMza`*yyp2Wru0%`UknRymlJNLrrS?~$)BBv9wIy1| zFsDVKg}cNZW$~Yqw6bxv#S2K{e5%?GN5AB4oTg8&8%p;Rr>(ItOvAwxgSHIWkBzx3 zj5=Y?;4h0~>*nSu7}<0xFk*$FoMzK}QoVbTmo%I5QoB~l`G9HwA>E&_(GqMEg2OnE zn#i=Dw&kr--}(VRdQ+0Y#H6)t*RAl%fT0JzBU__ODSjs@-$;K;Sl_sD3S4i)Hq8fI zghg*m{&V^$5g{dfzn@c5-F6%776Z z2u9K0PREh*imwD3c=&Fk3Zh%#9!AM(f})F+}f&? z?}#8|e57%g$7C@gV5wNIx6OTj8j)fh+{Mr9(0Q-H#$BX|c2%*->T5#AK55@c#jkVI zkGb@UNWv)v11}OXJLY*5_aCPlX?vOs`c%nlNekaqECM}Nqnj3zMg|@Pqy{{U=>lt< z-dj-!7t^+0V&5FW`jO#JenX9Tz(Q|L%w?{OIHU3n2qvTiKWARNUEb7}I6g2}!c#Y1 zR21_Bmk00omTipQ9HHqL+7F=7{b%GvzlP|wDTP(7=Z2OpM~rCBL2lkNkZej9^|z;6 z<(;FOALvduqgMHR%w*M4y@m!TY%Gt%F6v*3qHg+H?Ea`n;n&h_#hYGr>NQDL6&PVR9Y0fc4nY+ryRldcWV2{N*bjj`tT1;taXWiJ87lK~d zNA-S#={-QXHzhg1FI};5zv^Ndmb$;-OvSjc#cDUJg-X>jS7I>xmgZ^~@l=f`6vH8A z(iePh&ps-!w>rUJDZbKxUYp!9JziBiOfjW8b~UD4a4~7J&1`%pdw}=P9zsu5u&uNG zv3Gua%IehLw-)%^)CoLQ8KS1@wQQ`~p8*2^@eFrkMy`|ipi!uSoqAMu{%fGVn z^_5y_MrG|0;vDWlBrYTk>6l)j5m?bbThS`@(DBXC(Kup&rK1+UH!Yl4hsy^3I&b)$ zKzt}Q9yS9G=ySo;mxd<%*_;Tb$65csbV@v25n=aDD}R!RNUX7lAktc~@+Dx&WMEz! zX9t;7e)2G}VNK#>wL4_O-6bI=y*xiA>3FH@@3yC7k6kCZY3RAch;)x->w;oh7>~*d z*ZSV39W!iZz80W~3?ck8E?>OS<{as>UryH7Nb?NCkKo%FU7F5afz!+?K})HMb42Hb ziAvoW!&mJfvGJEVDS3T{TiM@6&y65ZZhOzK{48o#AN+;W^&Sg^7C1A$;TDV??{&M4SLTgE2aHR!`h5^QkU4l7 z|HI3g*FFt>Y@45r+*&3gEf_q>)@o}1$k#+IxrT3uh@8n%CYHDMIUYw~)tTPLyyky# z-FjFd6f)GBi<0`xIC1v2I3K5FN14Xu**=x{)%gJi6YjN$3YvWj3p$o(t)>l`@0_>QnjSFN(TY2%jh_rfIrhxXlb^I zfQQ;4Ux&E!`@*x%fI}(<(5m1I22O_!+5L~8yqEs-Y5e~FAH&h@b5-l@Cz=0*#r!c= z|Mx-euP>@a^Tnhl<7ylmV`B`E8@Zj8U2hBw_0Rg2;)nu0XV=oK%gQ507 zEep_$Tm1ms>9`b9PV>lRzttb&-%drf{1)4{=QfMi!_vnW2oy|ENUV+8v#wEEnN$`O zL#zxCEC_wRY;$-XFkKG!R4C;B;o|#lMH#qk*$prW64q(;99nLA&A8vd$BBkKE|qZ7 zJl+wPc2{lJd-10qYH}#7vtjJo-*>%xM`PGMP2U$6TI;kG-3SyR`itxkuF{%H+cytU zzf>2wogY@Fqc=a;hT>GLBRG!D$3wtBfzn~>mV!5NPW6~aw?+hu>}qjp9uiycPX?Xi zEo#<|pB_f7aHY*T@+6O-1nY%gWtVy1j()DxSBMy89}i0OVuuCgioIpAR&n|en&@v1L)bX~TSIM+==>yaUF-v^49gYVl7tp#>U!wLIzw)SQp z&&Rr24}SG|`zSS;`n!oIwKw>8gNom-ddBhx@bP+rNMB)HL2b6H;+^EGySdLqnv=iQ zc4A-#riESpb{Rjc5Ox1auy}DrPVweAv1*Kh&2wA{d0#hl+#Xm}A1UUc(4=X$0LKnx z&3*j-`0;1B>@_Hu2d!hFNC>R#qzDSjPRhr{=YaZb8E=Xt@bk>q$@Au99wB7sM`~{J z&X^f0oZQ|k*H{=1JLjFxm`{h!?i)xO()6Z;ur2ecw zZvf)S5;12pz`O47+#r@bLlrw=!2itlfLC^7D%WiDwjloRpq~2%D~T9_DLZb@ z=e3?v99Owm&FM_Nsn+yTJ5dGPi`UK^%nE!F8hSOH0vz?VV7p-K?(OPsPip#&Wn16l z)8#unrGw9hPhK~$!3{5uPQ?6~7fGuo7GJQ&Jk{IblB65=8)Q``Q@Wvyelj3_{V?3_ zP~g}lO}H1K8LYzDgAK`FoIYEAI*q4SzqZ^-w{U00^lG+MNmUF8%n;-PBu%7}HOurA z??qB^r+5r9AFI73mO4nVH{lhNgKdS|J}^dh9ggtQq!Xw$_B;Kyw2maqHm73( zmMf|c_L~*afQd)IB_vkuv~3LYs_QJxG7nSBzL(YkKGz|-2Q7W@qXZL!_nMomKv+Ji ziuZc@w|Y_+ks2CWP-zf%h9ixc$L|dm6+XMr zDon^-4#hrmPUZp3t_hpl*Q{`_MYC&^-cWiv9x`SvFl4c>WmTz(;v6Q@c=dxR|1=$QHoNuyrGm(&CU}eXoUexz zbtEO-Ol)Ws@~vxUwMgt$n|SI8_4C5g1;k12MR9$ETa~gG9~a1>2Y_ou$D1BXUR~2g zqkE&bB$m+d^sm@-TKbvZ`C#j|z8OjY;|Y$6H?y4=1+npbNSHQcVM$BNzq26!Iaq^% zwo_dFU>Oqsd5&6)ZjK}2)M@vpgw->Z}LQQg<46hn9!H>8{>;qa&V)0j;=z!A1~?C=;71;4K2ABC1keQ zj}Vr@8XZ&&u67FS4?orX_Zs4?z(AQhhtdWYaWfX{kb}21&Z|B2P;%u}YR9-<#WI+S ztpKs=5km2^q^UiUk}wt@m2`crF$2$#a`~jeebQwFu@tdKEKm$tuipAUmaaPx>i_?r zmCvPAd?=r^&ner9$VysgmW*tLQ})PqNJC0xb7!UOowIi(S%=7;6(_r#k+Xi!ci-Q? zA?N*izn;&>^YM5*pU+dNir*C%?D`8+5RmH&u=Q!6W+td3S(>N+Zu@lb>VE`=;T+j! z-T$ek$mlAW->puJlkX9*K6ps@B;W3MUBkm~Y;7}x9l1Xv?pn*VSMl(NnLzI;LcOrq zX;y#!RQ^P+4A~L`P9vFSRrJ$l*dBI{?Xz`K-#g2z##!pHOZ=3+i!9QXds3<4k&Sct zeZ-xF&gXv`6T$jGdHc^tWHL{q)v*9zm>vFV>rg1HKXv>6Lg8uoFOJ~i%|Ab0xiFqJ zOhmoT_UK4b1XCfWSx9P8DKufFB(K8*stcQFN%051{_mRgvxq*%iU>x9A(hB%E(jgz zAitOXxXK>({Bb;7P$6X1Vb{NS1x#Vr!XB_8mEwUddrm<=Si9z@7lB|yGT_3&W-ne9 zzE`lZN#r~A3!<%zKk|++*-Cd18$7}L(0irMax`p0U(XTV@Q~+w^K=zD!cPS&?s)_A z2>$92;7O~9VI3G}d39}64V@&1?6aTvBt^2c&Rawj4PeY-pHK86sY{=6N?-C|%^cim9PJ_*2swdl|6FtY>BWiOdJm7EH7w%LXDR4xb^4K!>~xa1 z17=44-Dz|n18IvI_1|#tmEHYYC}Qg>S^$|0qnk@3Aym1h?TTd7ALt^ygBUvRpIk-8 zyurNkeFOyIYG}FSBdimP!Nrk-su8n@mGXAYdrS9TY0_JPmq3{120l3wKn#ErJ-(hU{Kdr365MxBj zL1@YX%=Y@((RfDx)3!@>@UKQEn@fNOd+^wz?$F$4bNh?XWRNfCyPCW42=0Lwzb-6n zI;?e1o>XQJg_o%f2fJk*uncU2mV;xwqDJMOr@$2?6A6ey_Hb?HcQN1^4y=Kn`|!0) zA21I)%$>R+oyE$MZNPIzr=ihUNnJ-D3?}w$`G}V3>m{>s)PH0C938mRA=yLKjSW*? z)}lgLRDc%5e=#k)KHpFpOH)7Swm7PYAOXhr3(;^92i;k}3rfDcb@{^u*{5!Zx(LjE z@w*q#ZF7*+1xqJc_V_ISylqBY(K{?3lCo)xX8-%a;--*-#x!z8vKZPrZ+})wDgj^L z#-6dOBD$Gj1ITdaQ#hn48CHY8Yc8u@RC|Cz5M?f$-y5p-c)e-kFaFMp%Hgt~6;Lq$ zj4A$+!73FKT#Lo$^ZQ-ivu5dzShFsPBgzVg>;UG_^F=U__*=L<)jFPt-?fa){yll` zjDP&42;q3PyTO~gSpBj8+Yo>ml+04L?=P&MkaytkY0*xQ{RTczK*b5SIvTIl-5V0$ zMQP3e%L)x~xJ}o?CsMcEFe;U z&z1a5#GKL~pFJ~xfiBguT@kj#+$pDp?u=I;Tw{oeM8?;~?T3~SWf0dC1}>3edhgD*O76G0gc;iLiJ&R|Lu<0cP)K# zecRV-7a9$d3OqaoLKl%XEyaHlIzU1(zsO$rtf}?VX6l<`E+OTI`1_E^1wFby?f&|j zWs+2sV^q`s4hH$>7+phHKev!*oZN7|z)Uf(bjf0XV~AAw^Fn?-G-lX;$TIhxIBAB{Zl2s%btgc>S=wHR z;u^>C<4-E&mfHUq2@THQu^kqRd-3@x?FXMeZE8gZf1vqTe6Ui}sIGN7ix02!cX1&B z8i*u+>F(uw@D#6wPqg5l!J--70od95b3yID!=K1Njexmz${!-#(h}6v4bZzOCGoBO zk&?p*Fy4rT|Kz>rd!1t5f=&gwI#{t_xQb zNyb>|)=9>6+m01rkf%N<9avn4_<`s@?!fgtkS-y z}4-dLa>ePhdAv-Ny{gR!v7@2rX52DjUI-5Q(>%<#thu1y!`%lu&)E)ZWl-^N)+?RWG1ItZZQ%cIMOT znb%L0{5~0R0%(GM?7elb%X=_SxXS!^L?hTUXFe$E5?@%o2*LD$#bn6lk2H3tdg2$c z!AGF%*|jSdH&ZXV|MC}Ogux`664xEDoX;_R$xY$5@!r)3Tl+?ie7jvc4bu)da;ND3 zO;Z5&aIv!?tK#<;1Z})dSzXVRxCJTzOqEtFf-ni2e}jo7FwqT-8RhEM)4c-bl?@5G ziA1k!n{NWt9{0f5@pwR51JAZY^QOg%Y!K@t;d%6iKLGR9{$jg5JvT^im)*ib*Z+{7u^~~?|swh`3Kc*gmYG(e1 zWxIaq!6U=oAKsk!y@;t3a-fyS`^V;f$D)$)F!3>r7?yfDSD<&jLFA4LSfcH_^vio# zrgMtwdOvppZ5IzssfeEyC&q}H+Yz~$u3=%441?i~Z%bF)YBIadpuA^q*iaJPY5`uY zLY^BQBZ+-dDwnu)E=ovGXn+;$Dck1#9@@I3ablppKt=CitS53ErxTKLIjod{#1P2__e7()E^@d;2oe!>eyKw5+$ zD0t9SkcD=|2Q*G2%W;m9_p9mYVP(pd0b)cyf; z-hKaJ-j6(F4cfaP;T32V_MTm^d>`+y>BD^sG2>$I1<*1Vevv!_`Z8*Lm8Yh|dM`xY zH}5m@U5OlBnQPb9E6JIuUHp91H&<8a?y<-vCzVU6<;$FRtoF>zfBSBwQa2Hyo((E! z)VtG`I*o-Qxf!lGo&qQzVfJdz5%)e@SRJ+E88tYf3c`w2UzRM6`?Ol`SK+X=_}uuN+C%NeAHguS)$Sr_9ftZK(b*)zzimk*kL z{ifky&~-4`z1MJ(4_2Xo^te)f3svuQhW-hYr{;IDdmn8~ah|0+mZm^G`COzcMOiE0 z&syD14Q63pZ9y=u6dxL`gp85RTV$4y;vV14V#0hQ@|#>_mBv@{3(~4^WNr0R8yqS0 zhWuQi>MKs&GMhZxr1%c+D?#QIIsdtLpY_#6-d!TSrY)XjFZHGT=B%#|dpthkF_ag| zuE-Y|5h5JTYUaN_$v`KzQf@X&KmU*FMwc6?!*LQc&d+4N&Hlt|ocl2@YP`44%s0<} zH)?tK)GGxYoL0NckbTdU!w$MNFce}Xak}- ^oE8=T{liKm}WQ}6iEwMt-hAqSTE zbzg*{Qu8b!nYd~T+G@e5ADzn3T7$#PckPt0yg@%2{pb=1O^zeXsJH`Ar=1S@Lw;9k zxT7J^2XtGmV^0PDYg4apk6X!v+OcZY{NgS(MYa8Lj~*HxbLiEeh{dCZYW4kLDtOje zVm%XkG4pcwt2AW|XS-3$r432T#^pa@bITZ%u+3qzd*!A^S}rD&B~DK>@q7vYwsvyN zx{XwDTQ$zDX~BwH26y%LmG z1qmBTgIl*BcfCsAcUZ~KOHR1^upwRJeSX^sdgyYR;lXQuAHaWlt8B(3$i72vDj~wJ zP!PQv6({$P^J69vrc6lfA{;UE@0GH`Vp*IjxGsFv*e0Vbu+f@5Rn-!n0`ZHm>A=%F z#L29Q9;zU<(>`Ugo={f;><)(`+o4BXftB;6Lmr1Baihhr@P@;Qqj6D>pTs*>W(~3I zUOx;4ul8&cZ0-5)`Usyc1ISRtoappPs7p3sCC`QCu6Q_+{|rxK57AMa52B zkUPtastMN|U@eoe)ERYu2`t6YW53#OlL5Pw`JG#|%38pd^Pv0^|!vV6g}d$6PGCS#gLnii&rue3>Zd=*6zG_#gs zExs;C&uZFDxwS%5eLI@a|U zN90R&hg-(%7`>Rc3k!Sxt8Y8G*sm$jx9WW-Vn*dd0p9^;VQfF`XT3x zt42#SwH)}@nr&XxLwvS)g6~3wb$6k7#F2CCk@22+n;N{MNeYW#;1=WgOiv7T;)|3S zx!+xMfV?LHwMPuk?+HtHqkpG}lUwrmZjvSM1t0PFxk8V?h@!Bpu?2#i)EhqXgX__T*gkh)r+zRYH9cIIu==6ad^-p;aQ;Ty;(oIK|xiRrA z>?jHu^RjhRJ7TGL?LmcW=gLd@g-qjB&1`$?nt}mU9qavvPLpfsZ71Tl*8JKPe)k$X z29@+0<$N?F(&|X@YQNBJwh>l! zXjcbD{qjAzzOSaz^{jd|P?9RE|J%8ovQE0tr#qx|!H)UXKH2d=Tky;(&r%}$g=`)$ z5!j$R^rm|kUQmxhl`+ma)~fKXfSf8y_3la!sGciog%q?0!)$UpP;FOK#Z*$22B({V z@7GTU%}q2(xHQ$(YK?6mm*#N~+8VvgJ`97y)nZKh?FwpIZkH!|3}*E)T|he? zk+Huu8|d5^CtBM7#-O9w_e;C{q6w#rxofjPBPNiE06plF5^CA$?gH$^krRTJIuXZ~ zJx1mpupd2*pT+S?TXsRx=zc5Y+#Pw`z|Ir(YgX-XeOjzprD~X+67Zs{0>3XC6uo40 zs_xUv$t_ajTch-nY$jSN?Xpj{Nq**^B$liv$gCeGIf=mbjkJq<>T`u={n4nqoF%1e z5Aerh-Ro-A*!uHI2eX$8yhp=MsJ3`>1U@)UGzm)Y`dqSDlkve;Te#w=&(D;!0DI+| zG0ENpS|Z7bW=nfp5nFM0&}K_`RtrUWgW25=0ZhJ4rMpbDVj{x&MXfN}bqfNbdZ2B$ zphsUY&9fqTb=ouX((x_X`rMPB)NyO}Lgne=^0T>=nW~!D+GPmjnfj*gJzWOruiB(; z3LFl6a20O$1@Cw5yEL$F-UFJwnGkIXYlkXg&HXEMz3&E)+Sg#l`RGg#hghbuyXfTR z%@t{$(4z4BO(%kN#jAf^41R5d+VzTc-b&$aV=5~>)kwt?=V2vsZ|rXCnp@`U48C%> zZHW=7C-@G~1Fn_y$C~+Msa_XFQEEYLuCYH!NH`=#(8!4QTPFq?(`&m;@a3I1<*p^T z@d~$LHT;Fn~#T}~=X8;P~M7Pl?;C^cP@^^Dc zNa5d1v;LR*sx^B34lM+H8v(C^Ox$R~_`I8j5^jBMQt{qX#q>^vb89yRWPDecDS?Kj zLf04HeLkADW5?+9)B3)>5M*-dy%{`|dnHRP!m;bLVzlw#MrZmfP1jtvcDan9OJtN( z{IWM*tB18beXNy4o1`-;S){hfsU8D!)3IqY z&i;AmA;`m9m?h8?L=;A$G0$BX^^*hv`;D#{^%f4?v1WScYvbC?;YAmQm9Ufbe-J!c z#%v+cszTA-<-}X$mO20Q)XP5u4?@yi?FOZPwA>Cp()*)@6GF1HTwsFcCxU|7$tN%b zsz(%cXwDHt@FLP+DBg`KZQu?-Z(F2EI`}r%F#Ho=)l0x`uAPN~C>BCv_Ggyy)NsEp1^`;j8zP0ZzTpX+ws^cV zYL8=9R4nUmKAR%hB-Oj<(`o{J*tZVc!{oL2VeO$&*0OY0#I4+NE2=I$Ict~!10UhQ zUJL1oxE6|AYD!_La3|6t?234ja8@zA*|#dB(4AN+4Oc zRN2xv`G)A;8}nra$h4J8y}qTBT(_=-EI6N%o+RFv&BOGM`;pOncC=+2in`U!T41>~=^r+QcpHTnLw|_=7w? zWlqKA&ZB}%YBEdGy|><${7JIGl^#|-8& za3taRHD$ObDn{tSnQ6utf+%};mPVF8dTXtgqIP9tjMT&IdSl4jQpuEb_aTR}o;)8h zx9jxl7KKggX^N};Z`+OxZFC}amk+$4aB>;@*mjYZZ3=1`wubb!4If7Hax4T&%Yx_b zCF;L+IOXR$Pt>nKTR}b)iPw<;bzJ=ObMaa);ZxIl&kc9B===1Udcv;}4}RZg;znKK zO2QLl`mcr_x`B*lapyq&{0Yr@`Mic!;SnhkD)}%q&RQ-oQFTO-dP`PO&Z2N8JExTQ zDdkppt_h-Pj4%hj7vh*OFekc~{QAIAHfl;{n}AbHkf z9WPyJ{~gABXRMf_Ec92vJ*y8EUOR!Nrr{IGP^qGEsGlaxdpOVkeC;Z8l6W^icOP;S z1X8XG?eP$pIuT(ZJ%-) z)$^S%F$iWoGp!nH;@Uie6y3&5d(80HN?^8c^ra(2Zn4(1{c7IgPR~ z0&Hw!Sm@B0%A&F`Zyj6UbZdEd2?a*5mj)RQeNN3CIO5hO z*>i5p?NA}t5LRNE-I!W1>ilu9dR6)u z^=RM!(p#Ljwdt8Hy&zT`yCOWi9DtOiKHnZ6^hvlrZ@$AYz;lINven!nZAD`!yc4_k zSfQ5!VAEF4bFs?I<&><^E1jh`|I1PioWYDf>fz1y zMFlVgra2qX$W&jS?U7j9zHn9p*fQ&CS0^1!fq_o9#@H-g?;qbyDddk+m#`E1VZGF+ z!F<#7bnsNsFRw;Eo3D8r*ynEm;5gVtkV1tZ&}zRjr!gJkcRh5R{U4lG@v-9ox%`yY zhQXOcjC}k|v~0KtXc5ga{-s@k*Su5C$o=5%I3)g^|0M#!v^+5D=QhlY6jFQIFALio(Qx@d{6G-#MJ zDZxjD^f)T%UvFw_o@;WCcPPD4a^&@4cpDwuAu`0E9v05KV6eGJWEdIdgYW0hONC;e8onA@Fq6NU&t)9N8*SpR=F_?7v_}0B% z^a4PiN}l@hC`u|cqj=KWl{uiZF`Q_iA>6d10CL%Zi_GPIN52CgM%>8~}E`hax+MXIMCl|n0 zWL`E`W#2#fS|y28M`^~!>jJN2NIV$$Ud-|fA>Lt?^6vEEsH^-gm%Ia*1GmF$?G8H> zf9YsU^`?omqwXEw1t0?CW*X*kp^6DQ63I z|0)9WnjHvOtQIeo0X z!(`obFkO?&9}M~_{#^A1!fqedz&UNxlU5b62ycUM50s%JGu=xRA}2YA^pLjwOgR6R zDUfg@SfkbiS$rB03rVpyB8BzLLKVzXiOa#rJQ2fy#VkL;#R=eR?%65DZRh7G-3J0g zH2U35{&Zn9S(7W;FT>kR$%Z=q|Dj6@KUfC+DP`?GG&S?8E;V62;9w0#vhAqDaxNTH zw}>WNsMsL<@8W{+g|d=#9XvQ`;{8o4?J1mNx#Pe&ZH3>LytR^Ig(F*$AW_SK+7W(m z8K{(@`=3)q#t9mv16f{$ofkPu8dI(G=cu$}C>8^{*;o6ErLQg9ug{RVHhC{r2)C%3Km1*kRe<=HY(D{b7Y+d6I~ z3tI*R8FyoY3t|9>a9M;sV=?sxprJmbAUx(kX-zFC? zch_IWt#jE|Q=k_+&;mu1sj>s3C?*=iG;eon-M{_W3L$^bFFUn>Kf`Vv(YBje2@JSl zJ)Y-d7Zo68q}{P_VsvgQ(HntE$ryUpk1ic?>rj-(7&WgUWrs?BePEcy&T`thVP{o$ zgiK~d?Nj<=QP#XmCt-FvBUx;=U|uDE+Vh~X{ks{sdj&90+Ndn47C?XWx_xb=lFZhu zmi#;>T%sN&P|vgLDL>6os6s}(rDExO^+!(m`|;rg!r+IwV<)8;*jlo)qa@=?DI|A9}Q4X&&jOk$JL z;0F$IlMy5>jBQ(~0Yn1;9)z?PofYmFpJ{Trr?jO71ebu4&3SBPP@|D6;lVji2 zWS^O=-v92_4)Ko;!W~wdWI>&eo$z2k>SZxK)_w5Y)?J|kW-gz21j0Sr&s#ZVRXr?5 z)um#uOFuA#m-JZl@xFz2H-Tn6Ee`k^TC|~o5%69)b=xZiJ@82kB2qBU$K85X6&Zx1 z6}?7w(Ztn!tCxVl=Hvf|!e;Zzdh<~Jhc%t;c2mwaYef7h&ruxn6V%(e5_;&%?p(EF zq*@%0QDk)hU|`XnO}C>m#Mxy=C!niBAD~)hGqlXPc%cY8dZadaF@1kMFihrU%yI6i z`!v6VgJ>6XN!V}^1{d8m9OK7XT_c?$Y?B_bobl=!@V5yv;ocMPUrOqr*|x9Pzx%}7 zbHsD_2IC8|TA)}vsGhZu=1XGFfH!dhli%Wns;|eu(T8dLijWo(Z=qr&p1<^7|C^cg zYhv4kn@>kK-_A$sQi%g7Q`>#64#y&kt|K$L`tp37pkJnkOI_qf<6DvYxizbUJqQn5 zh1x@qn}~0F!RH2GvTz0%*xdZ8h03DNB$rcl9dA7ISu5JFaUDnfRO38K3%nyk;BFuv zC$lzKQObxKEgi$2ng5{}eL$HQaNd!&iv@S-giotz*;i5j1=z21hCOpBk&r0GW8~T- z&0af*&}Th2t{%~g_*{C-k-AotPO5*f(71!(YSlaQvpMUas_2TLUc1Tt6R-AYUaz=o zM+}O^9=N=EWz0X#(H{l@spkM@t65f`PVPE4U(_m=h?Q*qwvD-Vk3lImcPQz}&dIOL zW?e38CBa~=#^{M;Il-7x>R9s{_rI&)k?YIL0MwNAC~;9nC4W5u!(>X0X7YEK{(Jn?ROD{YU1 zfzDp}SypPLN^SDW6Wueq%{+0@R-$)cbwJH7yCJApE{6`6<*WA3 z(<(X0Wg@*w*Vf$WUw-F(#UJaf&c$Z65MR>GT1wa|88D!B2*jEcRU)-q2D8L|G@yK} z_?)+k6V|-~7v7e#0N^Fhu}2;Wq;@#c!EKT9^LscicydjULxa0t;ti|kXPJT!_*HT% zLh>H0o{`c$z#I5r5^$Q3N*>?ClN^%+q4ovjSV>HO~J`8IgGJNI^ak(v$rTKpIBlgDQ`bqqmGKglO9-SF&Mmw+OO!F_y+qj;_iPvHz8H}hWYX;< zLv+giWM9;b?E&6j9}Zoabc|*ZkZoXXHqf?Ls;(``llkpn8ZkRRy(oa02`4yw_w9=o zeu^-<3sS)I^O&8s=#H#A{iZx0HkG>qN;_`HSGamI6`|^4=GL*9XNWht%cO~4AyaNq zR#u7wCc>|(;c{pr0U$PJV*xRi#npn*FjZH$?Y#d+mDe^6u(e*p>Vudz)fTbJdM=|KUIse1E#u;M!dejRH- zERksfM^Qd+(5}=!U!Y)B_Ye|TzJhZ^AW7bDQDyP+@gRm6JVP`WEjeJj==7+_%ZzL@ zp$hB<2DKc_uTlN|nJ~c5jkqWv^d^L^v!NK;jUyN(TTb1{Ba+E(f24VK4-x^<^d!S% zZP$7shgCo>N9)uH#O1<|oWgj~!OuIoGEG&so=Fh0xxR)yG%?&~djHGi)*Yw8?02Y5 z6RuRSlC5TP4U@N*ft1KyWL~ao`_Ou6bb`+>`^^pXa4cu)x#=d!wFgW4gt)1@Fe)OmE z_U~6(+$>nW)-xFiI*cVh<*|E=mUQioj?Mh(*|GsbJQqJleZ3%@U#7^F1+b75eS9h$scE$I4FEO>5dOF{T~GE&C_w5^O)HSwK$ch&Ao3SVT*tvewz(7Z7B`*yAs z*`YRB@?J@ge$HR16b@9BJ=pDfh4)ok-X^!s@~zHmo@Q_>ZNU<;QipoIRy2yB_R_{w zY`=g?eodCx_MlyX9-^P`u%VXqIgO6Ef4wnTuiN+o+vJ||m^a(8W~H}MPY<@9rsz5~ zfBSm-`0%dUxYRlJxOE04VZoSP) z_~(~#>!{bwthxbZC(hr3&e`abR1@i`TK}DmvDgi?%+D8_iEXEDmXs)75T<|0RdE+& zGMQGUCt`|YME;HAn`oOwvLwQE~Q-9S@F z{l>(*+gkoffDb{UpWJx|fiPx~-?^Tt2Z_%N&ib}9AxsDX(Ca7JhcN#NzaT$|zAqq| zrw3>p+o)&Rr;8|@hF2Ywxyf=4TwB()1BPLCNs&^?!AE`Wl5%K#>t4VFSgn{j zP&GZeNY-7FWB> zTq0=b7RiI-m;>NRTtRq4sO3kSZ^y7!(1@9Qkwpu=u&OjVWg(S3B$Sh@!>OcJjY1J) zUCycGqh%d>kI?1xSP@>hi-wermL8lX3&q`9lb$uvb&;yZnnBM^FJ3FsH8-8W?o#5Jr?9-$j1hV|o#Z5jdN+;j=`uAT8is=eX26 zNvxkn0YUdXIqcYf0(KX;3RNS4$Gvshj?scyu7Vmi2SJ`Da)RU?ZS7U{|8a^MLXjV; zZza1l@(CQj3*kk?dNuN%kV-l-4)B%CJ9<#UqSaD=b(75!DRy4Zz_bJ@408^ed8Jql zwpvM%v~ZGbo=-TB81c%)uL6VTjK(orI5)>FgR~qa@(9R4D1l-gf8!77S3;y2t5eWseWD;}W|c{a_QQs@z@>wQ(;euhRjJ9fv&3_g^IB@s+aB%kUrNHlmBB zZTE^T+8Fk=dPUjAntEg*d!8WjcN+6~B9OXBYarK{Mh8cU4(GkJ5Bgj;CScnVLumG` zp@9RZ-=sJG>;|Y=K&qk+i4@61dDB3SHl-zzjuFJjD=DlMh~=dRAfQnYMd$JPorUMD zBE$E)HLaC~+T1BgQ8R@Dk?e6iX-?`~nHbdFMOC`gmN6I4-_m%|f33>(XjZ`khT(?x zYrUiq1?9zMeA4icB2yqqj~VZ#Bo(69)i=3`+_amF!G%j%qfiUrY)yFS;aXci$|vDx%FF0MNeEEj{*M zrxryJ(hZn#6Mq?%OVC>(WvF>P9JhmdsLp%lsP{y;EuefcOH~_79nRY@j4_+_tw&j5 zxLQ6n=!(?Ql2`bwE|5-|-^AIQ@x3bg8NO)sLzLsRVwNoc?(nr#z*Uq=5N*|p9wGdM zkxim|HoV%ojE%7!GwAuw2P&+rl6cM*(dM&Ro6Lt|_kb3z|C zUt9;of#^3POYL+hh75pe#LHRrhvGr!^8U-}s5v3k{wo>cDV2Qh3|j~zQtzd@Tj#c6 zXo!JuUNQ1k{YYu~j*_s9&?v~77{nwuP3gKS0tMDew(h;sO}K)oaL8^u8(@hPw(OR^ z3?&ZaZ>V_%&GxW#fV%D7>szFpBy}4vJHlGte(eJlbHCY4tv7t>+|nk3GJQbtnMM3j zx$gy=q_+%5`{I?(8mvY}ij0FdiHLUAyxdnyKJRXe5HqoBYw7zm>l~5~9r_1%jOt_E zZbhiU&8EQPFe_L)Ldoz3PfnDa1Er6&?FuYP4=heLDPs=I2M9~L7UZubHyPENQc8O) z&t0>cvrZhu>_2^YY@|lQW-0D?A5a=v%mzcrJ*>jkI;7Zb6YzNXG+z`b=Yjk9-I3EV z?SrZI=hR{ow_^s0TFm#{ct&T(C z!aMDYZabj2oqoPJ(tL6YbP40^-xGY)xR@!zN2&M4;i|`A)r_yS-=AOlRhxMTB#|)5l(pp zSgTkM?|o{&=78zxzpXKN=eekn11gX&vZfH=cV%b^-}87EIsBiZRnoh2A=`vYqgUzY z$fzfqdDU*&jhrY6S!@5ei~pcwTKuHk8cfi*ruBDiS0nFRLpSEiIUn(WQy~i6tT8J& z3d+h9ESrRID}w>m*c#<{A7U){H>_FSvv=0FNPmp9`2w@4y);2^PzpFTyMm%r>Mkq) z2$eEY*{pa8;se8R@vP0o49($)`o1LWu3Z++U^@fY@KvZ&>~h~-X}(qko~2mHGdr=6gVk15166(k zRw(cocKM7mJ8P-c_>3T8drZ|;dEm3#KNAR2=<%$KU|ZI8rP2tE#k2wMb|gWenkOqx5Sy7&+0b%QKM`_nB z7tUFBVdL$;E22^UxA8*ZYBSVIYu`Jyn_?li0XhDtbRSb#?o{mo^WflrYi_^v1z1pl z?}eE%<>)2LUFv14!VpYN&>o7w4I#9#V&q&$2SmNe0YlG001r@jQ}XB5(%E)MQ!YZP! z+9=0w%g!bvs?Q2AYCU;SJ%j9t4GZneY6?T&;oUEl()z?3>)rf4LA4K5AWe6GdiqGz z$-;dtrm{*HaQ0cJNQuvy-UCJ{@{8ZOj%L=S^ms=ho@YxJM@McugM0!zcPn-C?SN3Y zU5fB1b0(0FX7UkA63w`GED~30rJ=T~!AfFI4Ooq8@+y3}F1GC%)F;M#&qML3hkYVhhnqF(_bC!4UiEzi+2U)cq@vt?Kd7`zN8gccCfv3i^A+R+ zxA`goa80UbV3{9x_-55TAPx@hyNG z^+&*35vP(*?H>xAUr&JFFd+3E5V~;47B0ETjehhmw)JE}9SY^zF9Bq4M1z!_GL`?V zL2qyoZ`5}*F}gk&QAKjkNzWR~J?u5TBNJ$-!YBuR>UF|7ve~gp5Vdr{=bdM~X2xi_ zsU??|%YAn1ZiEi=W=t)_NY^J)JUZN&g9p8@aEFm=MbY0Ep`3)fmZl7z51^TGV{KCO zxlLuJ*Ye1zIm10EGxL{90Uq#EHsaY~-PV`|tCtW58F(4H;B3x{CAkOL2zfZ-!ytnR zRaGtDubpFCd1t+&k_Ywh3tli0hG90v6*q9HN7)zh$)`t4PD8JiILG^DFWLFHVu)aR zuM}5r{qN`ReNMHGF>Fi`3se!Y)WC(DI~>MmP!yIVIR!6w_ChtieFm%;V$nu0*}@RF zB=i0PjLDZ~SAse7FBn;gRq#jV0M%2$Q~p&UV||E^I}&wr0Lyg9r07J#?+l`bngG%g zV*pbtT;VY$xed2zDTYk*sZ#4;z>_OlzAAgf(nuS**Gb%P+b=>nD(zd#Ms<y|3BYCg?4-`sF0DnnEmufh&it58u zv;Gmq-1f^>JLem|PTHOZ7uo#Eu-KFXSK-()$$J!RXt&1Z<2R3W*$@JxJ6hc$6}+x+ z?P8XY1xae`I@Tqtywi;7WMunjlga|?zLL7IDQ{z^8ut|_fqV1P#e=_*)$&Q>v?^~c zvtMi#%zMO|+ZcUy+a;g_?!Hv>8-8E($5B3mL6Amfuun^b!7>`E3K4J-dsjb9xPBZG z6#p_+HthFZp#JlcK#+nXMi_qP8!#SVyZ2UTo__K z=^h*SPvC06zBlG0f>tW z)J@ksIm`T9Z2{Cbh?F_Tt0qB|qO~uqDielhLQxW(sdc^}L8^Trtty?}GH(}O5y*1s z-KBHpom_X+EKo%tTQ3(h90jxKUIA}k%07~km|bWvzw5OEq2-KuP=&IUH^1$hl=B`1 zs=~Ae&e;9=XsgL8#E@x;Sn7^6+MnsQFtf?Gb5Rew<#H3zxAUz+X4IcV@UNO#N9!4J z_t5Hx?`0(Fm;D?D2Z!N2)b@Fcv=&c#&{$+P5w&NJw&IFzRvh_7u8CcK1)(g2D}4%Z ziU`RP6mh?Hf}tlD>jt7H|2?A1FLj0M;6u#nqxRfK)9&$B!p0sQUYhTZ(jiCTMX7nq z8;zxi6RxAj9d#(7@zpW&(a5Ej5f(k6k~NYb*xP=&e9++qe!!$1F?^zR*#Zr(acNH^ z;ud50TH{?5quFtzYaS-rxU}j;8Xrk;qVtv~ni*1#3GQ>-;8!pV`SVUx0!6Bq?6-s; zDY8)lX3-WQBWWO)_4=(su~4wcyhlVc%r0W)9P#dIX!qKriB5alY9q|5$p&$&jtnNY zRtnc84}>=KWMY^HnMlRP%P{$6r(;n&5CgM}Jw&&xpS>#XI7bgnN?4gwqh%v_OW0Y) zE=4YRH_G(D`0l!tsPT>lTx5&L@uT-URRo4^nhyh3ncVNmi<9DOYxNR(2VvwJ^)=xPZ0{icozcvRZi7> zhbjc&V(k3d-Z(2wZGft-J+eFSfyj?*yx*jbu1Irx%lv!L=xz#JiDWsiTp?LhjSHAX z44`RA^skBZ=BR@|p^ zDWxbW4m9Nwh?S+BFD9q5GLnqO*ieBxARZwuJq(*e#3dUsvasGiAh6FvzemwxvYcHI z&(g>oDHqa%lrD+Gf)HD~2z-vXjRk#s6zu=O#ZbcQ!@7$bBSm)JW6EH0FdoxB#+Xvx zspt#Yh4Q}#=M{OaU^)X(ZW7nTV7FR6pcS!C*U9UIq-c!r-*%QaYw3HPHBWv6**ym| z)^1=C{Z$2%{al5mM&~FUW{4xbw)-3$tC=^G6E>St8#k{{Pi z%}s!r%VT*&rNdtAksou-sQR`O!5@|u$R+IraWQsd_{gI(ivc^nWx>WbrG=yKkQWJB zg-9RI|9nI!)OlN>3NfhMt#=eQ1Pv`)6z+qES^$54@IH2JkXsi{#Kr{pWq0uL=44Hf zr3&Rdzm5jeh?|IS&{)en=(A(_w`4k{Z6N3$t!~FD+Uvff;rGa-i_$72G|3 zFw4pnCpH@gg=m4DiQzcr2U;J}^zyakO_vB^bs6PZTL*7T2~K~qg%a^;1`53bV^TpxCkIit zc8~L^6DE$Fc`t$Xxi{*B!&+O)@yMbA4O191<4SYt)-XbF5%yO}i$ z{A3=YkwjbDz^cqHvuP*Tr8gYHu~Xr1lbbH zeCMN0;_lHuq%M5QL7jE{WqkbehvMdg|m6?k1L{QS-tLe{9yn$8M$_#$;iP6nG1Fw+;Sp$P0jO8);MmmDJ}JW=(zg6~oAj40cr9J%6>54(kR<>!lU z%9WRZ9l`*ShtLX53a{n%$3HAt9YLXfX3g1Q3i^25Dy6 z+#4**8Q3Oiw-4zZw(a8+CO_dtG$>1`#qwQPy&Uh48|#Qxa)Cu6?ydRO?F@La4eatt z^yTQ0i-H4cNqbQve^Uiuz_RFh4EG1bS4lp5PX~~(&TldC@6_k+*bKD9%!}3IeHvo) z1pbNmFSh=uwrZWr)kF;=fSJCDL_4GA{qe8U2qJj>kC;0@U+5mx4zqLnBizwEr7l*H z>c~`|JBQ@oV88CVa~u=YsZe!37Q$B~2DNA;*$9r8RDJSA?MjBmvv;f|GC!{S%Opqc zxeM4C0E;QJ(NNz|5wGmq<^{z0oL#~Xs5I|mK1u4VLE}c-G2ki3qV2$LD!yT_ImCJt zF_EoAqEN4G0Oz%eP&7!!r5!4i(I~kodkR&5D|v?=kUh4uy=JXN_NXXw+JS*gZxhfg zheFBYd0^L^vmxFc?dc9qU$kd*&gnspj?hhnl-vV+g1Pcu3tHsyz65HUzfF77S+@pY zr*aU_U!|x=L`x1*7o92U$@P@+CCA$r;a4E+_lnBL(%;}W`{sgaQ*Z(8l>ZGp`uC|r zaL!=eILrNh6NZU*6yumi04W%;m(z!KNH@e5Gz&0J)bLTpIcy$GVifrCEA-lv zKfwH&R^YagZWj2SHd|K-;b;?j?LcmhjN4OW5{;32k6sR9Q!#>06t$IHgix)75{Z=u z$^#p;Yof&Mi_$O{gDbSBO~uVxT#<(zn3_o){@YO9=lnJAz>w_%Gr$`RH0q&2`+tgH z?-a0b_W=RFH(K8)3+cYQ$$wnsHbVVxu?CWixhKda&G**iyOW8a1cH*#>Vp77lXho_Pf1p0(E0!*4Zf#Z#5D=&47-L7d@u_)@l3q&yk*z<+1#5! z>Kj5FSfr{4$7i+3F3?^kkso=yqP4+X;JmA>5<9eA7K*%rZ0(woP?K2Qa@ zel$EHZ|U(tv&=>2`kj_aBkqaw4qzBuD`psh zV7{e``k90>`19fjarsN$!(P%?Yn&IDL}Sqe^9eS;&qq8(fUA=13DG%_Rq&T?VUqY| z&GZ&ppRD=uY(cg9>YB%A5Yyd%k*?P^5tO>PYQ}2-d=!hpsPWQvSM7zQjx3MJy$!Vt z;0m(!^bONg0}+>p`50SZGMvq9HZ0XGmLbMV-h0fWpNZ(QkeSnrjEic|S?b8d*jRGOMm+h;9}>% zU!7`XMsJUXBv}ljmQ+B=P6uZ&ci^BB)>_pA*pV}&=Nl(~X!p)6$^7sWs++Jd7`+OhsvF2E)TXOz{u}^uA#@LySJ+Ap!D&0etvw5nlp@-d z_%kI~?eTl){a`L<_-O66AVeJRJ?vPX^3mUh_H3O5-ml#+Qj;|<&mtaJ1i0oPrDdL5 z8ns73Tp3!cr{D=0*0Q5&z+FLL_CC>m#Z&5AE2}e<3}*(keP#jQit*{=@NI57R34O% z5So|#8|2oAcfXLL%XoPhTGWh^zJNINF9&J_&)aPEz@E2J>ZO&`@BeG>+W%tC_y3ew zVX;}QY&KR6-=x|`wMB__X}U;vr5oK=sdmelnsl}2iz8dQNyF4}Z$=YcRMN%n**((L zWiVESOwpB6NSW{RHTmw@KVs(>9?aBy-kntjy!hEu%As;+Qj za+UI zgc&azY}0z-Nc6zWfQ$3<%fNNJb?h6qhwi>vJGvJ@#`jYzyB_tEDQ5yiE{bCh#I=Vt z9mg$hzUC60o!XURc0LOF(5yPi2^rg|Rt2i;#uo6$JBr}&Eg}eR1cIJ>9V&_KNGhx( zlAGasReR1n0zr|T>=fnpM}vn*C%X`C>cdInFauD`2;ai^#50DAv!s$X%z zt<}_X%+Cb>_AUko`%QF{ehvIOA?Hu-?J;DYgbRR70QuJI&o2G>+f&+~ULzS)hcdKB z=ds9prGX-ncLXk)ktkrNP09*T@dX-qo(k~GCuX&Y94BQh$A?YrwDII{Y6up8-Q@t zc#@n1?0{2pn$fGq9hH!LZk|7#!Rse8tVVfymTC+WZj?ebL4OV}f$sNXmeZ|&-086t6{7@8})i%+F z1gPM2Qg552KqxpcUvaP2zeUk-%<2?Lx%VxbIjpLC6#@V-m9tEwehR?_zsQ0E7pxod z-(zUL$!(P49%GOzxV8dREbc&fwXDj(!_=`}4mpy@=j z0RUyzehOW52K!|X&hJ)Tvw){LssUon8(Y#GXLEsej!7VGa1Mjuf{OfY1I8Im`y#Sj z-3kQQ3uA{Et)|#f;TjQ8J*z__7)x<=E-Fi15b$5HL+O=Gjbu>PM11+j6p@bvmoe&UW)eAFj>r4JkGQ=@9W7af}Sq?89 z+HIXqiZzz1jc2UL>jprUu4FX-dMcQL9_k)z+bYo~Rpw%%Si^#hL6gIM8vG%a(j{WY z*fnYYcr$L<=@cI{%T~5#<}2s}#wJKB>Io4~!-H07PG&4A(l}6YWZHDL?Mn7SRV#~P z!64>602IrkcQtr9TVs#O#YhxC)7+v~gVAwUJ;||W8Q9zzkji35M{Qwlv{vK3L{QDL zdLSpkDB_jp{>iFuqW(-J%W$#BnM2PVhBp5;m;TCZR9>}FY=7c-;jNu`~B z)Pe{=Ir{S&1e&uyNo>DyYLijI*&d%L!)qdx?o1CujGMKYP+?5lPg2E)APsB+B`UMa zf@2%Icm3x`iq0-m^!q)~@&%V4eg^c!?_WVl)e|0-M$;xfys0e$k9Nh#O=t05QbtI; zojG$7ulqw(CZTrz-d!FpdBG?)%zz2Q7!sp^rZ>6Wt6FXnEy-Pwp}x&8DFks*^cBhg zf3?ZgnDw<(3vPY`5pG|;d+a}O+c?IURT%==nQr}_5>sg(r|V|-xPJHdL#%$XsXDy? zQl1t%YRG4O8{-rI6;u-v%pmQ}X0#^IrXekzNDQfR2%_}`H_u&X-DC$euQn{H4)8X6 zp1)s6(`d#J{(2*@aSW1d6}B{Bx$Y~(6+0o8xNc`QD;`CJ8@U-q;90!e0oSedsgq=E zPnp?tid&K>NI9x%(lp3leEOXI9sPCd34HU|FbEd2WN}`%iRUt z`)(id?y>GEhHtf{%^+NTe?P^n;Gk<*e&{oE7P_Av9FHOz4PeAH=GSPr52dUrsygxq zN;93I@oH35-P%6Zk&Ed{84SOdY{m-`^Ea0S$S?-`VZMN*(A<>rGa^%X zm~udpMMsPBd%_%mtzC)+%x#mui^U{ar-Tdv9sQodtKooT)_$4MMQCukt$x9FdtZ~E zipXK`foE!?O@ z(T|hb+>)mH`ANZLdg7b2n!i0=bB9H|4OQxxuaj#f)!y@b3v|~rhf4W1WQr?y^~0~j zD=}#op5P+Eb3e#y-g3V zp=y^6K9lwb|4`iV4wh-jN-{98SFH`WkygApH&`KmBSmdFBhmL5!yUg1la~S`j|XP& zTA2a`{!bXs-Vl2}j{{@l>#_iNu zjiMgR`f<5G+hi9DL-=9GZ4(e;V6*AjsC`EYn*`ImRp)RMJ<|B3*cs1Lcu2tSh|v2g zUi;a}UR;#fv)75G2fRSv2@yfhl_Jc2a7K|6CZSS6_cB>d6q1dB5tsC_0k~;o(*E}+ z$-rA4%mX5=o$CvvM+AGywHg>GEhjf_5s?ysj#aAdQcU^bWg18FiVxPvvbXP@K3&lc zk}f3tRv8j(foH(cjW^{>0dBUCtW(he6*3lFFq*we;(SLw2)XRY$#z@i1NJ7;Gr#7n zLiOcl+8*uSb}AG(+MA5*kXi56?A9PPr{hy_ru6cATrbUmY~SpUbc=e@lR~3<{^oH$ zl@+_hlyHZ*S3nrh-re2r-lYJZ%Ab2BlYrnJ89s#xQy3E%0fZmg8j9T% z9=c@B%mE}^hrB6Xo!G2}R7&3aWlZbC%L5^prgH(YhkteBebW6p85dz5fuW;#|7>$> z$(lr^0cZvdIyZ|`rW8E2SW2wr%F@>L!2S5OnD5u~DmvXpY+o0~V#+*dlPZ#9q2h|5 zW75v@BC*G)7Qgz&iAoK$fQpzsLs)4Fw8(|nsENY%dg)1$MlSqj=Z1pz+}fiScjvo% zT^|365d>KUoiHCD+;_}X<{^QEkDRPojk!bSdg5h|Iiy`{8)%DcSa~+Y$C*>~u2!d& z4W7I%>(%KvyNB4Ly{qH6cO5so($t&n{F^k%Yy|DYeQp&Oxgpc0yD8_cw$Hbi<|ZWX zc=*)GuRe>ZIE$!j!81xbc8@Ndm&PMu`-W?ZVHre^bv&;=r`8Ku%O4LnH~GkjT)??G ze)!|Lak@U@kTB*s7_>GoTd%a1hithdS{!$y2quh<%Gl1dXZ|2dR5 z3KCO$z8Bwet^W18Di&N59BO<8IoAyeq-*@$CsqW3y1VNoNh7hCpWB$E`vN zD-V|zbrF=s@lvuy=nHn1k4tn{` zZ|Z4Ym0p z)Fl05aNLQWS8~DewC5o%e?_c-bmSr|j5u5x>x&`QWe-C{SphXMLFq#zasm#L6Epn> z+UaA@$Y0YXh#hbPB(={A4U&jly5YBCH-8lqu!0q+{s7fw&)96no&i+x`mR-Ay-3JP zrCyJEb%xblAlMbgOj!(`ItRrD-A|x#Mud2zH_`;Uz#`Ur!8owT|VL~cUAu->g=gd^|o4|Bv)Nq;$Ye(5&t&9F}uuy z>8pVVkWp!Xn8dx9ZI*Y0O$d0s-J|VSBo;=bB~cJs(*H+o1MF{o^HpGxEqWU2{iPAPz|EbST9KjP!rga>{=vF}FpI3Q2O z>+!KHizcYrxYHx;joL~5c3<^r0#YPRa9ZACoxfX94qnIwlDq`W`Z65G1Oj}pF^$l! zg=7Yu8=>qn90RH6N~KE{NAu56=-g-EF5u#yB!Ccu)pE?S_Inm&ME0X=kkEw1@*Yfp zUF*FPW)hi()R4Zh2+6hz42$CRve(*(B&DOPqjU-eF*;s^+jJgikxN7z`aB_xH`5*t z6GUOs4Q68abwRU+59TcKSg;mCjE0@ju~v?{&q)BL^WcI=$R%rH8O92f>JHHHxJVU2 z4|p&x<~8z|{{uN_f&4`KNuHF*TNCKIVe`OXKFak#6uEWQW@;m|HLv;ge=uKG`t*BzE$Jz$t z-3f#+$Q!Qmk`CL|^snPtkk90~^?d}rt0zq^(U}*ZE-7ghghV&3=UFPk(yKTq zF^Z^lRESe*KV{UOz|&3KDe!4T7!B+(VFq5iS&?M%>gr{J;m17fGh7C8MA z|H)CAyw1iD@&l#YS*{a9gD8KOCoceR=}0nkOeM=1nY^&c_o6~O{(x4RJk%1+sK#3} zur_3Dr^k|FzKUTe=z;%#Fa61?q?N{6=qDA*CpzTbg&>b4{;3vjz^Oe_6&2K<@6VZP z2ljU%HbhyYQN#4`seai|H~snF_)CzIBBCoOMbrRgW2OwlUSJr1eGkK`_V0iB?}Gfj w8vk7jAQyk{jlV3%->dPL#b8*i6$<5Z+zpc1kbNw#KERF)ztYlX54S+mT@ zQVNkJ!ab2>OR_IF-{tq^O#25?)!RQ*X#A%Ue~#-aY&hC;o604 zY-}7V`xQ0W*f@RJ*cRn}`we~Nm1Yh8wbN;zp3@Qg<4!JSr;f3ynK?O}w0AmbWxnpr zu~Vn5?Co}Jli0Rn%eoUzP7bGKL`7}?{Dy7zrz}OQMDptJBJ2+P^-r_0Eit42&5Kuf zV8u3%jZH;ykJj0U{x+Ah4Wp9^v%7+KmwdbZ>lF@cc3K)*yZ1Q!#+TB#RVO8@agU*4 zy^M8AOh8LwL&sQWe4n7f$zz+>3oq15S-&}>bY6z#{(pOVO=vGWeW+38)V~5NoF|oQ znCBR2hV6acCaz(Jb#!#N@HYPbPyOQ;Q+$8_BL4e)8oTfOe|~*G=f67s`Bk<$@AK0C z{PKD6@7FE<=a<*1efz(c{_C>*Z$bXL8u;`77UX{m@}Cv>Uk&-&R`}n7{BJ@2Sb_hq zwtL-}F`q3wT5h~`M`McNsuaVV)WO4l{Lr8OKCh~m>tOco{rmDa_Ql4Gn3JEsDxK$? z8Xvz|^}vBXx9|S^#y|dWCrDLQ^(`sVxFt3!%l^lA{rPW= zHS-rNP99t)$$fiN?$59O_;dg<$RQjOM>m6fI2waNVIfS=Z=*}i@IdMPQX zQ>RYdyK`qHb;y64d+YFvuRAed~wmL@V_3Q7c1aGR1RS7;(dv70# zh0^ik$B!O8s;H!-B&SZaDclF|aI=WW)zQADihSoj^R|5F!&+J^Ox=g}xet}Cx*i?P zj|vn}?$6yc9v{2r@5lJ%`H;}i@}Iw6WZqL3R~vsg<^-e2W5tRUif6J-1;E12J85Ah zM0ApU1rCIk_BUt7?7PZUdtWX5X=P>NVC{a+S)IRCk+t!4b#=FPx!5He=4@-pw&lKf z@gi%%EU2!i9JRDmHZwDObfR{B=obCui&ky(%d%-I*Dt}vZJHpi^yG4tjf8i?)=~B(24V0TlALp zHKtrm$!-z;{P}aQ8SvMgU5AV zZMTVu$s;`5&~3(R@wgHO9pf$kPR;K8q>lnGZe725;i5%-ZY%H$*X_d}V-CkCr8~SW znc};6Zqwfx+S+SDK|y+1*4mFwHsb?s|NV`w+eaVv;lm$vQVdoK2ndYIEgs$f_sT22 zj-HaQ@6SK~d@kxM{I>mBcuY#AE(&v*uFL;kkU5@KhKR&qb6t;&MEAkBvk8SpM)InK ziC@-E(rFmqT)*L;du7F{Rmw3+{=KaBbSIQP92P~FVWAN?V>$2jN!m-Td8a!&0~G?+ z>^NQ;vGcU~E`=r=hDjI+0(`_&Ud=}vF&k1>f?`VCnqOItgo+6`_JIfiNn8tx~z>jee}F=xPL#t8cV(^O>fpK_VjT7^y`A5a&G(5zO-V# zBlizpZb;JA%~L;g=+H=i^Nz&|GoPeKUha*#bLS4%{gepN;+gGyuJ5j&of)dIz^P$x zTDfv%Sx4Dbi*mm;V%GKXxjpy8?_t9@Rth~RO|RhNc6D`imJh$TDH(S6G9sUchmW-Ex{ckFJ6JT`5wPoHyw20MOEOL$mfke=bmejQWu8!2 zsyQ%$pRcc&MR|R5)EDpPq0nYw;VbI$lN(vbtS(l?BakXn{m)p73OvVqHchEiAEx-a z4KptE9y;T(ge|KpN4D#>m%Syc^9lti$^CbO2nJ<8SU{ zyAPk9lk!^f%X`7hq6Oz$KG%57X6ih7D>+iO%CvN6cLZln+Zn@@kHeGQ(G^1#{LvW> zxg2tzewjZs96cv{R7k0va*sE%~vF?AVX(L)TBWA zkOFgL0v|;rtB7jw-Mv0k8nNZ$H7oqMrM6w-;NB!br~1o(?nE7J8fVjprV3x^HXdN? z(k7AbGRVif>x>`HigIVUzXfV^x>HZxM1N+DIaXTCb7tafcchDO|Kx|5KqbJsj)LLp z1I-cCfXtp;Dnr=~PPM^rpLr*!aQW8@3o057?U}XEsqJ5UyDtr z_PP^G;^+1;Pq12ky8lMik}uyM{%d#Fx4sscukP);D*f)-Uh}8dzOT)+ta)0!ZBFv( zvuBrQW@bLTyt{X#w?V{nb~w7?hR~6Y_u=*dIql~}dicC1pPSxu8EjAQj&i$y^!>68 zN~Jbwg`rMWw=JXAtTH)E74O4>EIiZ??S*bmgY9zFvcoTOr+Skt-aWm}?$&nZjcxjP zyIaM!$Nid}&3`NQcz5vyq>1%bQdN*@b=X^gA9d^s(N#(_DO`T(i9p8m$Hx^HmdLN} zj`s3IMcHVii(_=t_>BII(9lMw{s}AwJ<9$0?-P?!uQ0I3x-LDjP1esm?)XYgstkKC z+(8X}+URtZ1POCpuK&lIke}yx^7B}WP1@Ptep=9RSuo0XL!j(-oKTy3(H%Q>cn)1P zEw%R?4v+0SUKO@-gK|J}t;w=wQVjRg$tHy|X3W;pHU6B3r0-UGh~|H|C*A+>$?`^q z$9UV>k9pmRt*id=OwRXmF0I&A*X(^q9vt#VRm*5$@}m@P77|j?n;WqA;zki|_-Lx3v2>NO)8UdSWqywp zofUy;XT1LT;)zPTdBT%lyl5#dFw8!=rh?bSs7@kylUkzAAZikQ>gA0tZ)|g3W3wbS zxb0Quaa4R{`$p1?neZSgap?aLLA3(TOUK`xzgT*ml?MuIL+>=629;UPMs?YtQtQz+ zJrrmjubCnCKD_x;1iJ*>{`n}cMbPVD+ikUCzEW36(P5zcZA_Lx?^-PT=@;wKtu@!a z-@Cl=%9ShTCEn~pZx;RI)15Ea-q)71Z@8FmFAA24|2iUdchfLQD zW zl__$Q?}F2Zt9LaGDWa0v(h51Cb}OM2mrULD;aiJCqurZ#^jXmFiw?MwSf3^wGSg-)K*c$0Qg6TL_I0XWPCh zKeTH=tKt=wI&0r<9<5hC{GO}TJD_G)C=W|{dQqVxry6=b_M4=wx#eI65{~Uw-t`mZQi<7E%!bCIsp4a zCvQCC*z=%9Y(9gos1R)pjcX5eYK^|wD!<&Pni?u<%2y*leG-*=)7yLymWsDN{Cqoi z?i`vKt`TI)l|@8EP{ShBKUA_vw6x^nqTr8SsLx6d4n=mv-88v^r?e?D$DUDCi`sY= zO~ZU@q(`M`3@Gm0_(xvUqbmtV9&9px14{IPQS5avTym?4$7n;WO-_q-(rWP&8ZW}7 zM3n;hc+q;*bF=Y$#Ew3_rdM#*(QmDUFqVlk)waFBmFLWBv%^M4JgEDv8>^MUEIMyl z1gNxJXpyjQFUWltbf5!kbeY?}K+GWfq&{Oq`cFik&`B#(IC!KV20Xw6u?|LIx>P*V zB`Rjy^2mF%prBx)A1}fj2q!BOlv#l?U(|n*H{MjsqZF;Im*sRs(Vx zKr3H$;Tw)Cf$~$Eqp^68v^2; zq+0_$M`Bcco5P1Q*f!&Qt=#(nuc2EFxrmRU z`KgXaDR_BC;D?5Q)|F5Ytha9x+K#h!N?{O7j1ts11a`377A#Hs2k^;>(KIh!@D%o@ z{-WuRVoo1kS`T8oMLdd%G~0g0_~{jZZtBf0?%+*FHY`Xw6Ud zMW@*`PCqMZa&6a)f&f6Y3J3XqN%W##O?~~_(Hybzkj({+Ap6wGrtD_*9XD;m~4GowwyQE`9RU`-#Wa z$ZF-m_QDMveT=*Jza z_1ZmLap2hY?ptCO%NxI0%(EJgn~K(rfiIgx_yzn1I<~Z3uiw0>%ji^8e8RDTbMZ~j zZVQly+Q%+~Pphib@;y%nMMlalzq6_4C!)q5i|1x@L25gHUBG`Zf9&Jq!2pXuIZ5bG ziG}qmRuNNCr3?O7M%Wfpw>`Z5R!Dt~`? zJV%co2VOXS^w=>04yObWj-A+vMpKc2R6tOmBe|N+rPcyMT_*5Up-8BVR%S!Z8Ordv&A{Y5`x070?b=3)#h|WcI>r< zN)wOANUfOr#HSkr&0x;4qY%38pG@;lL4&g@yx06iDL-HJ65eB@&{*yGgz-d(@f3}XNg_8I1&S0FLIp0EpxWj3c4j*G4{MYV-{E{;8rC1{V9e!DR?C*!jxO#zecYI8Ms(~pQ z*NZTTB)dy*-Ijk{%4%DJXj{FiME!d{pg9MG+ZXNZ3G-x5m8nU!t-!k!YI_VMrP7De zx4*}eTa|06?%#X;`?sH-ED6MJQ``PW9n#3jer+;UO^y zFwws`)3U-c+Jg(#^9q(x$5!tqp1{#|>VGYk-F!U!HRzZHIGI0GwYX1=?9cCC+~(x>nsLm_%Kz5a z5F%Uw2;k6EUs^>4zZcKx?rMMF@Y0qnn+>9(qNL=3ZW9z#F7{-aLw~sH=jUe-Ez(?I zUpT5$0E`Ku;de{QzJ0vf9(yqc)sAF@4|Q<|sPYoKB%&QzV*rVDuweKQ=}?3PCQy(p z0P53Ebye!&>4DfP;lYX9wMws*vg5&bTwcs8qlgbrstgOOLO<2+K5gHNN^kM`-7}ID zZpnL`!vhqdR+OZh76ABkqUDj*$->dbTHv48_Qg!|Xpgak+8qg!(3#2}S-t82NDm+p z=zUfADiPgOKZrldI8EZVE#ld&r=CmqC1;vbhiv5I=rIwg>1{}|z+w+o?;0!xH|PMr z^8b1fS1F|Hdq6Fq%NrCF6rfF6Jl~q*XPe!;iCB`H+n67p+`0PZ)2Vk0GD&$U9B;om z&hS!zV9B-P_zRZSjMpGsjJ`KBFuVmTZjfWg92ic?nP4+-e6Z8DM`S(C_N4Iz5=@m1#ea7Z3Cvmc?GKAD} zQa!Or_TAiOY(71Z6NsH5MSzjSx$Ppc21?l4`W{Q>^}HiK!(e`Nx!&LSq3y8?Xuce( z^3Bi8n-H^!v6oCWTSoKVG&-eHIqvV9`>AA+MSc9?u13Iot{S;3_CZw)O!qz?PwMJV>;SyQt711(usPgn00vC zT!6yt#MA2P)DcICQQANR0C7yNO4u}NO-@9~xs`*}ty1Q5J^Jq5yZRDu#%(bRC0Wof z7WhY>){ZgRYMAXOH}-nNH%s_<=6~}YiQln(qTnEB-`}>heq?!UV?Lu#Go!oPxyg7{ z1@Q5>yde&h?B}N&2=JAGLWmTqU*+1`p{y`_W6%Ml=Vpdubkj^4!m<*%L``_mkX&Rzo6}D=XQ=Ww zLEoErrtZ1?LDV^G)Jo5^F{p-R;3fJW9cfi(-Y)TquYHjax>#ZErt%q$wuHy_uU@jv zpv7N#`@~0(!v}gzTL*Nw#4MlV@NC4b>NdVLHK`w2q`dvvIs3e>(5!6HFPhj(2U)zQ zaK+Wlmk5!enT$SwU!>CaEcJQf6o7jtYM#H>+%!X|Pd{pvsk_+hOt(S>EQ6#r090|% z$*i;Jw5)2Hl$tLL_n#ugIAlx>a+NJ7M(=HEQ_9-=_X>Rdd~h(c_2SOsp({_Y0IC1_U{O)Rl%t)yR3M+ zZLkB3J^3PgFdgMq*X~F$^HSf<+qR{K$YF!%M3GL;`nbTw)~z-$^u$iRd%Ak(siTkV z8S>TAoqj}1G=Dhu#MLsz__5^2{JzxHV&;lMg~sHciUhxX2>O%G{2&Ld3+ENyC#-XgZ+}|3K!n50c<|S{0%x2uv4Dz7KaSU8Y=b zE!_$|uM_Atz;yQWO^Jmq&98S27I0SZxt)YGH(#jl_R}DN1JuvE2qfS7%p_hwY9y7O zhJ`+FUQ)dmj5=#h4^SxVqYsE%y7bJaCpJ8KS$9b*CHT!M!=t?oLVde{`lTm+yU>YG zt){P6TMpT;0}GlM(jJ~cbqc4|I0fdxR^xnePZZdSPj8>}i68wiT%!;OLHB!lDh7*u zZ1#`0lmq>(-s`j2AAhGH@YaEQ8fCT<9U|2+`qD^s*v`|(dGI+Mu_5no;jy!T<3qGq zr%$iu&F4bk14j^V-CaC(dr80{f=G{f#lbNTkk_H-rnB2qQOm6_KCwOVEJ(l+o2~*f zAwQUcP}tJm(DTy1zQk6x6?`g*HOiIG0uNfUby@=v>P6*>rkagTD|HV zdcs5}|J-U_V`uG@ZvqNtK0iNfU@&!|W*Chgj7hb}BrYz_5#6W)Sou~)6r6C!vX#1} z3bR9jH%9$@$*;Ca&D%t~^}&`$4i=zHSNP{9D##aIJAMu=xbwx18b6edxF5G`_7HjD zw#)rKZhRJr9Or_?eqgIdw#s+$|`=q)mXyh1C7{xaH-wc6}my z!G~lL8Z*?bD~Ge)Azuel?}=j+wxh{QcSoGoTzfDNB$1$v=9KZq^&P0J{#1M7e9|1hf%nilr#>(nQDn9BhDoX zGY)Z0-O#V6#|A9O@CK>Un#v`I)#^cH=!mP~b%JfAr?7go7y%+2wFm5_k~$w@wb z|Lip_H>!Q`Q1=MOb2fN_etYV=c{!0{z1)ww9}s~-nG&Az`zs;PjDlY9@Bw#3U`pD2fXE} zA+lwQACafo=|O(u$7lQrF2J!~hT~5sVFEvAF*7m5;;e2jpv+Xu^d;-TX03BhG03{| z{oYITUM$dq0#R>0(@X$m$-ahgQvtjcEdE_Lu*((b?`7nPI@Kf!!{o(oN@;BYB-YGt zg5hmqcS04MoE&1u`oqfm@QTH&CLZB^k!XPeB*M~1uw+3W9h;Bc+?Qk5W)(QyZEmK)j^u7pDB2VFy%7eiJ9hQwJitNQ7xikWs8-J9@xeFPw6ad8nnDa7uED+BtjdMV6 zCtwhp({kW-ODBdpE#8(~)Q+;++cQ?6wtzA!)JMBK$6JWpLAFn4cuw!D2;ePV)tZB@ ze@oiY9Nl6O8OEF1Iw3Azg>5VYQf3gN4+dK8{cA3F3veXMe4wWRvuC=ng~AQM^(4es zK(X@tbovd|7y>=k;)3-OFiW$^n3A6E&$K|FWp5&Hj}PAITIRle&i#00l|Il4uOoa* zZ}yA6h4QQw=XCsw!}H_t%WI@T#4#ps$+~8o&pkH+uv3Qq9^q6)^mbr25KwgXo5 zuu^qEJ?c;_x48lYL2=KXJwP-9i+8@eMl$+JlrW*3In<2~OND9uEr!`!-hM|(C~DRL z@>3s9jZd?jAN6SYT5za|$r>*{v>=5J2;i5|;!ZWq0U-NAOG#4llIM2AhI-C4o`5b! z&OznaW#8P{sXnhy&5L*SFQp1y;ykhC~3W#k5V zqhsuiec;t?`SGg5)zJzHR^RGE2O81C5*}?nn*)VA8j~d=zy_;QLgWA zQw-;#2eCzRDT>CMVVkV2ksUsmhf***MfR+4|C^VjD9!yEtqas|`Hw1Ig;nz*F;Ali+M;jiPA-BYx6rv9x22LMacTxh!cS7c%GtS}= zk{>&$eWmQ=ZrNpn%|=8{AUOx2BEl0D-}~?ceLDAA18*JX#q^v08(?M=jJerWgk%vY z+K#iGq*}vuJ*wu7)2Ezb=D>n^cSw_{#UuTB_xM-w0${z6e&|cCOzO%Wb&yq>(4L|5q=e$A5&lkW%%j!MSB;UV%90S3J zsIsZ8}WKf@PHJCM)UIax6V3E9-!R!_N8` zvpC#3ZqA|+y2@Z7aW>tS?XrHmjFzi1EH{7v_2}4*q7U` zQ%-H)D&rLv6#@Pr$I1XwNv$z=3p3$Uj$gQwZcm{$^1vu5CgLG?a?6Gd8_u-5cISS$ zch$T#*U_=fxk+0{uNEMbJ35oV?hxqWMv&~+fVX^0vt`HvTL`oz!5mq6ejYvA>=^LU*(ZoNhIv4 z=(zz%b9y=^{x0cNM;fN+f3bzmSJnKkIq&LmfaXlt1Vvn`CrsXBN^94|F$-Ey>``gd z>l;CyRKPp2$q(o$fSg&B`Yxk7V?9_4$2Avmpyl7xtKItH`*y$q>v4t#FdA}FsjV%) z{51bahVpt!L*db9ss5zMK!i=%Z?)KZ1TdGU=D}>In4tDYip`V(SVcc<2ks(pmgKAk zKfILWn+3d18YL3MB|813bP@(3KHq+L25P}3j#o*EMVW)xK%PqHRE+j)?x~ zB+ZrMeRg0E`h%Tjs#Zvpy69wdH`{wnzqwIZv!O^{9n#W4a$iH_%ZIbgfhNuw;rGCc z)$X#`1)H_W=rjTt*!_?@<`No>Xd96eojmyH)ghJnOM;&vcm;}c_t!{~*NA*bijZ!$ zqgr|c*&hyS6!jxs0@<%}Z;I7Q~9U{smte{de9zfA*~T zcWGac7t($w1@_X4ri9@XERmNMa64vcS+O;zbvZOVHI-stUsCj;$X2Ed;5&!#W_NDi zVZ)d95R|+Wxw43dvnX{jo)MFjpWs z%^*wI>IY3%QhCu*-0CEx+Rs|hnN{_Wx&fbvCE1`V9?U%ssH<`zQ*sOn8fXs6ss#ko z8a#?Wvm{>qUmdSclKkTcuvms#aoa%+uRH@f?2B%YzF1I% zfs(-uUGkuw7wG+w40U?yNFg57Q~2%tdezDxh z%MY&}sD~WiqK@E1hg9*TTAhS8dyXM8wRzF@nN^b7VOLu+fr|w>WD}F2=$8`9csL7! zR}K}`x}c`~+*tn_g~?~?nvH39+ih4P<^h>$xQ5q8cp5@3O;xLo$o7HP!2{QEDY!9I zptc2VBo#=6lwrp-7?hG`wg52NAEn&J`pMl{-iWe3LMBScv|TcS4ciccTRZ;K)Vg&6 zRV5HaR#LL`1(4C(Fba_XQ}aIlm~X>-C{z#Kj}#g9K5|<~hLUKvL@E)_({y7pKhCd2 z`E9P^s9Y!`hUPzh64)dH&`#oG=3QV`S+fCR zL^!YVanr}JiS#Wawe3b{OIo{n9&t#SdJWASda;(3x<+RnZjls z2s3Ff402^9=WwWmzY9yeLTC2eXm7|vdDxf;>?UfZ!D~OIpzia70HtL+Pz#(Zr;L%` zQMphkys#rVfS&dC9YGhDFT_hB}P;-C(Q2I8Pgg4m*j@bzdRP)uYZ{9II81)H&;f>M`KA*g=OzU_;y4B`@zvUQp-^qH0wmx-$Mk+1n1Lx^vI!| z0>|8JKL8x6O}PCAq7{Fbyv0&H93#6F$@v`t(jC4xyxBZsC@%zo!+R6}M=MbseT!Hx zdmli{Rh&I-*P^3z!UdC45HY0graEc`5|uD))E{Y--NERi@877`>-xONe zCk%Cm$fNftL?!J6(~M|-N(fNquJ`6a;_8SW3H2>-hNKt%2=uocV39~NdlQIIDOp2= z#h*MAegJinmdtw;_+lNXVX@ZZxe#5GH7E@G^%Nj^t>3UAriJ-M*Ep|M16zc%ajp64 z+YY&du}>!NSQDaMIJfh|oeP94E+h6LM3xJ%v%Lkt4Mk4P=BQ4gmjq=ncFK47ZqTgj z<_;EA4-VOSkG3Q7vJHEbeNAwl5sCSuqi0DIH{~A>L(s!eX^&$R_PwA}4Of ztde3RYeo0GdQbr)V|insOmBQ*fX-cd*m#W+l*eL|1bJfPdC^p9#jNULt*exJc|0fH z3Ju=wm!CocZMl3}q6GrZFkbRs-@4?UMgRp_dK3|vo><`;9qzTCcZt*vp9K6McB||1 z2t{bCDa;m>p0WK_H2{8s$pX_m9WBU1#il1W-zD&ti?<}iV_XRuxJUgMS*q0cfjP*m9X;`y0 z0W~7FNj>AXWIfTLEE25f*mO1Tk8H_(0Jy{rXSA!4+AYefTC)i}VZ+LE`^5X*2C)4O z>Gi>9Epd31-*}Co5Ax$y(!xLc$tSk^&bG7dq4o%CUj~o9F?RAzwrz<)#yP6(&Ff4||Bz5z; ztol^+mC}o=Ojc7mywVLC)|k-XPSi5*EXodaP1|nPe{JkM{{(TrFY4txTGg!|%>JD+>Z3)Y0Yc5DB_0-t00>QgHhKf4sq=og-IZuegs2p2 zdBJ3+WLYWcFo;YZ*3(-}!y{!^SLwf1v_Acx^e_FU()b0?BF=3;zYfSqbgYe+&hjR%!?ie zil*s^?V6fdYYsjd$bPfwyPx!-{$H#RG|WVg)OJg~qP5~gE^?C#_a@=2n!kQ%hAE1f z^*#Ae+g2-hy8H1f$gf{dsno)AqgPN^T-meK&lp4tLw|3)rt&eY{eJ|Lxrgq{zu zI8xkeKB3^-%8CR<7UONHj;oBlu!2Mtay;>hmXFQ9Z4sj9C`I&Z8BRa6$T}4gY>NyP_VZJPQHGMk!JF$@_kA>Y_-u!6HID^`H*;fU(6!e$b@LJ5egG#CsX?PijB z`Dx;)jrE3!`x%8GDq0Eh7ZxlDfU>jeBe;m_pd3yZVn`;G4Oq4e;`mio^a*n&0Z{BI z^dj7L^>f2}iRfgPW2uvsn>TOXuM*86QSX*=@6I#Q+MHtz(uE2#&p^Y-#d+?aQWNJU zs`qdmfi?SL&i6PBHBoZp{#_~Zn7%LmOU>|@PjAPxu`{U97SzlrN5)emnzPz9xDk?@ zUx)9S0KH_Hd6bnB8t_2oq?~4Io>q_PeNishg5Insfix@;F}%kdlq79%nef(zpLi%I zZB?wLI`(4!QGly-Kx2OQC`>V;{i>Jgu{RuFdLV-=mju=Vo&E?_=jrq32UbUDwdbj_ z$Rm8GTtq;VMw1Qmovi$LcS$1c%}e1dhF?tiY9dF#zdo*enL1j|30P)-dgkWz1U&JHxL)AXnz6hD;ynA=HFB<+tm zVl{6rZD<}nV#+wMj`vvOINR=qlj&t#j&sa$)@TmYi3G=DDv4m0%bxfwS+)4f#-(-Q z$P#jJiI|*}(PTb;(ZGH`K?Iv#TmCrg!!FMvs4_}`#?~@)EJK5~L?mqQ{-JqCZuVZ2 zpemDwrN2;~h?3znL181KgUbXqurdm;hdRKaeKA>+Q7$JX)#F~Jt7Ln0uXw%N*2525 zrL3nX_4UK+cJ(3mj_IhgteDH%K_<#>IojsV=^+z@GE4X}}|b%?jgvhP`>i5O zukf66%IKw*Yg7C6_Rs%A2pW}BO0faD`How13o9uLCcw)nc*u_cExa@ux2FAyw5uQG z)TkN^v`7#*_|TMQin+PMqKYpc_u4zqhHZn)j+VSF3SQhjUpOc{Jp55RWF&ZL(fcoA z4n{a-4k8OTT(K=Vl0W`(Ud!)BJxXL>oC_RwVOe=>QQ*iUUF``Eku>nmx{_L~2-;{) zI``R6zfHi*w!jPkEAvS19hzl|P7S%Sc-d*fF-DMa8+z08RJv`7ifA(+m}lhy^jr%- zWL*`aialYo7cKiTG{jO0Z_w+}Y%R%kpCWaxcXV`2AltBfuz1cBvkzVgwb7TmZOPX3 zYq$X}usQG?m3iS&w|Cc-?}4K@qQy3y%~ms(F&!R_gP5_hK{r|NI@df$eBvZo_|C*e zY4((V*)kltE9j!T?jO9poD5l{S9KE@SU;9cL*W8}Vb}Ki(TG_JCO5p_A=q4FN0ROr zJdBMkMAGGBYwYMHu0P1YoeT=f#*PAAHCC+?`p;~RbH*G-(t6`uP;`W4i!fWk9vzMN zSkjo$%7l2AeNCp?$1r(4{;@5_ZTusvJ1Uk!TH<4cgg|v!>Aj0B2zYK4!>)M^1b9S- zaeq@}+T>InWCM&HZAuYEAckgxg=<2r=_7B4QM;U6SjivkaB7DrC4pYF7Sd))eeodZ z6Bk*!=rD0kk~MEeUJ}L5s!^CLXdFwaLL1oL=k~>^W@G#D)ytPu=RI(yX{aW6>}QTF z_$dgzMm#hL|4Au99HFjTP`&fAewx3a@!5A2*G(BcG!~)CwfM+wiIsBmZSLJ`54l}< zza__>pA3c8bA2hS{RckDLpo5MMPr&NKWkjLrtxLmu!O1U+TOxY+>=0QX}xG+FTz!M4WEV-eMd;HWTlp5(fKS{+R)PB zYA6-dmV-R?Wk?#0l|@;TGd?Zr{(f^o{Po{Ct4)uSd=S%hk89A^@zlW<3nmo(hkjK}+=m&N^j-S{<^`7mwMnqyk{GC+n9QWZdp8A&>EQ)f}U`7xHHNo}-w6 zP8aQWWuSz_4t^kQt;_Ac>fWNJ-#L}jLfhX zAnXe0m(N*aUDfaD-isFzJ9l(>a9(8WQzWJ1dU&{@Ss;ypo7tku(^<2w3OxwEK`K*2 zSVAg|3Dm+J@g5CSn9Z##4ElP9fSae3)3el?-Y`&rlEIZ&D@vL_aIev_dOB`^?!ei2 zygEYP=KEXP4KEq`S;r`Mwf2fZtqY?iw6c3E<5^9O`r5Dw7%Fk@H7y_5v(%9Pde|&v zSnN9M@Eyb93)L;5?hor7F7=@nZplWZd{Kdga)@109ju-gKKXrc(6g(tE%ZHo-3SnMH-g?(Cli&=-MR9AHBLY zJH9a@p^kR4kakUGOVGz*39Y@CzPk+ARM^^rIfG6L0TQAVaFavHqvm{LYu%Fvm>u+brvJR=mgY$1Qx5(|V)TnJR&{jpO|%TKc^8WIm4R&GxpbG`$}@ zqa{9wD{3NUYV1pe1CRQEc~wPcXOf`LzGP9ATuGPbk@vd3FTUFG^$zo2 zH*_mC=Of#4Jf~G=As*q46#ccP^3vjsX|U*QkI!<|_I~JFzd=NbtJTZk#+ImH2U{h( z;1j3ojIX!d+q>oLhh)9+=DFpm{xAAZIJc@@Z?ekKo2W4@ZwU-yMk`fUNLdfK|Ft95 zzr4@$XXjr}NTuXqB(i)z;J~aQiG){On+DJ*G5yC}>E&ArUY@eZzv9{e%lcehFf00e!Yvocy3i_yXEXeKZZ9Y+OzGq%yeH2S?F_G-g?;ms6gsp z0hrdmp#n%X{}*ghn5qmP1_}tJSr_oTYxr<#R|6M3=ZJV#T^%sXoNjuexD2*1!@Txh zY4Ny@KIuM0s?3<)oQHfdAf!6p2R!||u16V7oR%!r9if&`c7`exX2p8KNYjyM7t{_i zZlYOsr)X&E+s$kdXwVKzxVuKm&gkOclS&E)c7l2B*P|Iyl6A|c>$9=?km5v1fqW>B z`b%>hiS?P1;`J^%wlsoWVxS#mnI)iIW9az-@wbC|hB5Y@};*RMYr>SB${UP_{x7(-V(k&d2eYz)eLJl@3Ny(o>4XkWE~24hj2Kyr3fS#o4pn3-il4XMOP5(tW`T&#!0n(( zUssW9nw8v1l*y5>ctg>9M%b)7!Q?dT*}ZVWJI}u#(!l4dEFK-NQ2$H_gwquy@Y?s z9x`L%oae_3F*kWWZm zpFH&^?MqC=?JLAJ1e3kUh^B!5)9V|K6pho(OYmL;=*|qt3<0XVN5E=O#6^B5g8{n7 zD79q-f2<*28JVgl(imk4zBCd66`*0^FWi^_C$*Nn4-+YY@MKa9_Lw5H?ufo$frb(7 zK>9E?OLCnYbS74)fgA%zK;d$l$LY#ju8-*&x;}u0qU{O+1yr7#kp#rcIf49OUKWl2 z0(cqt3vma~9cPdW_~@##kU(R^T50}<2KT&e$YLN5>zS`}B4xyS5xL`Wdbx+4=DLZB z(a^==lv!??wU9;RW+*e7nT%uxLX^}jpmqDxGQf0DD23XlDOm&X*HL#s)E~b#)UXC| z3n);V5KWHCnbANPnIc7irOQ)q194HE-SKSPr5nTkSGT#3^$HvFy-kIKq_|NiSgZKbr-C^5$Nd=xF7CM+?fzMXstTpi z$%xyc_wcs`HZ(zncIegd^P+8(TKv2~K@hpHa-_a)RMy^YJcesW*w{wzsszrzNJ)*x z_fGPvy0P0v|9W@Lj}{zT|8TCw?b~I0imn~R0qmqqDNT=9{a8gD8H~f)sjqP>`NOX< z0%p-|nWFcUhrpd!U6Z##t$Nu0+xsEn1yVMRqKX0+v! zKfBWm&-EvzLUmJC&?-s^h0Bu)eu$v#3y z?z56>*o9BVqpWbd#TDqy{5&~u$|+obFiLhO^eRr$1v4^Ai|5*Kye6QD>32=mF zf9JrtjV6C(4I1>PvNtbOR;7)ytNdG6Ert zT%`#RNVr=}i)3}u4$bU3Dsde*!yq-1m0`M%!j_cukj0T@OuJJ(ibmiVbz@FZk*qcv zAf~`PgranOc-2A2Dv73298Zl-eR`C%8dSzrnmn-7j_A(HrA>Pd(s(Dt31X3e53zOA zsGVoXoZN>c_J7~50c`*PYIMQdy-+`Dp{v5X@FlYtO+e(k$ITNHkR9_{e~Y<7ULNcu zeY16%ilvatXtr3TNvo?SSDvN1)BE{)mC>gQ=MzzbrD!_HcVdcZ3MY-$8zYr8co8=) z{r;8(H3Ze3R=@*>4QsprL$+6`y_a;?Um7?6&uB}SxE1RP(XQt=D*!HnFWqWT-0Ny7 zKuwtrDkTOO)3PEWeQzcaNhtD&((bDHOgV1YzfC8KoZQnmk@2(cFA5cfSJbp5NUbdOMh1vCd{kP01NhoBonf{5kU_!;c`P z%D7ME>*L|J9gxAVf;8r-R-|BDhHgou_#y1ydyPMzfrQVCim@d}$8?_KGFFwK#q0^A zD{X)|ELce;A8yH$dQ^KPc!vzB%^`pjSJ&5scm$ydWG{7u9ww6h2o*q?)X5#kLr9Q? z9c2zBj8)b}l21_NZ)_Iz&=~czprt}e{0}RkCk$|+3}BDmT#}}AD{`g99BMXC*M2U{ zM!>op)-hG z*uAyO1|X#s5m-vy-x!mFU)Blm<>$M9iYY)NSJVF%raNC>`~HxTh89qL-$VYdP|f=O zpL!7{J7ah3K9)se5Lgr%G^0x$tH*4COE22|u2KW@U!}V`8a8O7MC;7Jhxexo9FEDI zK6R>tpd8D!?OQYCeD)Cl~jm;_1;nN6t2%>udDQ5MkhC3;^$ zA*T2&^$4M`EzC$DJXWp@YQJXVFh$eo;vwg${)DX@Op`b0L7kX<^rtLkfs1h5@UEoP zm0VoqlqI5r?GdP%swHQyLUZ`DQ%%L?m}BH4dGBx<7Mo=|Qx>S$qdE2M(reh_2@y>&Y@g#?XgHc4+hMmK^&L=>l>)`JuI{N%`)BicutEiTy- zMm=k60twYPrC-J6&W-pIWS~0@Zd7HbqovR=btl<8{kWb+sHYiv9ANA!a;a;96mN_i zbfKg^D9zWFLX=&^vn^r+% zhW;{BuN+oJlN8zQM@pN%j{{6PMV&4H(I)Ml*O#UW1oF`OchZ19MHy+hPPIbPrf~~h z=(t+KT8jrXijhRd>RU&WsI!h=1m^LG!R3kb{_lgYr4G8kVf{VyGP*E^dM@kMrwiJo ziPEi31T=8jMsR8yZhc`*o{(#d){AQ)HuY>FErzsC$Zaubk?c*9KsPLSg|QY`+WU~Z z;Jm~^K|z|%((uKaEC^Urcucm1JQQo?yzKcX!bpcg)NWAhUpw z8z|vHH#r8Swyl6N(!|B+fu|Dz!3fUUg+^@e3CyXlrAs-06t+;OA)IerpNa4X&GS%n zb!6a;6dI;ULMMSc5W}rGk`BN9vbO4mFn&7={=wy=iDsICP=X{I7dVKDM)w>La)>|l z?+g+oXsi|pW*1!-Rf=)P&N<7$>X@YDH0IEt_r~4dntikEt6$j@^$ss}_(gx|f^U*? zvKsa6`|9nz46<6RQ#f@D3p)z40#Xc;vVXo3Z@bT$cj*epfB$m%@Uq+g`s!PD6;H26 z9lNW)f1*;&?poM*DrKs_CUacA+ts(jpuiB$W~V#@w+(bs@)8Gx?mmee?**xvL7)%U zxl*c;U<}3DI{}riKogP~{fL$1acRZP$YK>6nYWD$5n}SCPyh`Po<-V#b;<+Gva! z1^Fs|451o-weaP0lP}8O)6v6>XBk4Lt7$v|BUH&PF>&49XrzI>`|JK>E7{ZULZ?Nj=a(C{+9K zabge+CSTw+;@d3um&PScOiV)Lf(tWoH$`$<;J>m$wQ)CRT$HngwxjE%8Ztq~-Vnai=^vEZmc|;^0`R3fVJI_1a}qF5 z>t#wQ5l_VS4l5~VH-5_@V)xPnCSmifUao< zGGg7c3|ooPG~kG3pl2~&bF(y+U>|K^gAGJiY_aZHrZ!GDH% zDuyyM)bbJejQBL%0>9OpilZUr!T1;exzMv!!ir^5Ke;MgVS67J!5MDr(KBl0+eOO_^!i5XXk-qgs zkE_5XiYu^z)e#X$+T)_uq>!0NpMMCiT>;aLg;VL{6}eBokGABNvhzcmBVUZNmhk%? zPIfob*be0$3kSHayDHdoGTnA7vPP(BK3%mtJq{zdJ1FffY(q@GA9q@FJbtMYJ3T&k z`RQV7=~4ci@7S)r^}fywcgPjB?W_4~^!!ftS1yMc1s3Aoz-2pWkO*gvbFGuzy`}!> z^o=&sM#i{!wDh6b6KYYIxHmweeg%sT2=7eE6o^Rc z`8T^qC)&i=UdFO-n3h6#uca|s`RzQ*O$mQ}<7^uIl6I}DiLiuZm9KsH728Waa>PiE zJ7!CBA=n(>w11m@2yEr$(f_=Z^}?DB#e;8Es{ab7@KV$U@bR8-H4!30MN)mD#KxvZ z_y68{NVWW%)~^!P>6JFF9O2*Kw*_cJ`-m%F(wwt$(CIH;tlo$4v#SRv3*vFFe|f{k zCeBMz0JU^=)<_e#19vC( zLyywEz~CmpBlSh9uBb*M~Uy7UiiV_*3s-+p9M;Lwv5Ri?R@06-L_2* zA0rUxeVqahO;44_3H?MWDRJmB{#&elRb)<$;V8vJ;YM3LB@l^HvGqM()n z3pLMhu*_jE;bYbQA0O-SBWg^+`4~Wl1lb#l2QQdxfudAM2w*}91y-U(VnoC|t>BGA z*|mk_b{2L1*ytryUOX%^l)KtnTgSKzqw!2%LleBUqHX`Z%({UPXYbrT8Q$;MKy2C?o|v`)p^r020S?r zDh>s4+mXJlc&4>hIpT2w{2k-4*e`}-ukY=?*ZJ4apCMm{`8<^qaoRr_-cxGoE7cj}+X?eRn|d}9v0uEaV9l5~5h22)rT;$bw&^XZ8G*IQ47RxCXKb|G)^-!GR~6x-XO-yjv2e6zSb-jJ&A13EkPEY{w>r!L(0 zYxd0t`9Vt$_u2z0yWm4Al2J3-8aM4Qf<)uTA1{2%T^ZpXt$=`Hez&mB|LS;u5}KAz zXB<9lTsw1Yt@0;!*@tg?nWF`6q(*4SR{SZ!lkg@uI zeaJy_9?r{b+u^A=@VB;8#vNAIi)xN^^M~?j-2ke8M*I9*KVmy{f^faXMDPy68O}A-h1=*91@(ch~6&DjW=7971{IK z{oFCpzXn7?*I0V&B8lY|KB^# zx4iPb-pdTTatapG?g+$HwfRL!vUn)9Z;qdB{I2cJ~UfS{KzTcuA zp+K~J4sX-hZ2LQzM@}C>sFLxoSSR!ToS)slyz}~=oQVS3cyYSYsRqAWsW zPi&RH!%0N&G{W(JQ8T@>4)51^eU*WcAeYowTonvY$P8aUE?AOC&8v|HA z+{`J$YQiPgD4%McWbb2!WTE@(!*e`ypulY8vgYwPUv?MC_@oAC9zT=O^B4Sg(_9K0 zv1}CHy0^&8CpFC=bWL2($1eK&W2Mq_g$|T&%Ieovu5Ocyd%n@}tq>^GX85+cvD@7v zKf$$%@wPpjthsli_qD0Q&ylkPeX-}oi98nLeSd1>{^I66@Q$hcMC|-vTsz-oYGZ8{c4094jTH5HEnjIty@Ui^;t!wqu+S;qWwAI*6Us@=)kpc?9z z`B{OYQhR?npxAu!q<%_(&7g_fk*5pV9{7L>#wje*i)Y}k%S`_eVmsz)FVl)#DD z^*&qM&D~aJ93Y~Q=6p0xDG5PkoC^1!GH{$uv3V7s?k$eF4r+DYpv{omO!h-)D2H;b z9VGnTJ&&uV3^2NNH*xaV+LeV3-Ou4@JGLDFiNI&nm=#oWsY4gRK7BBvX`*%r!o*7e z1fF$(_-ae*o&68L?=Sl_ckMy6Ed$pbnutT^;)k@G`7z?Sd-RL$yGF*Cvt!UURHy|A zwHujBR-|Q#M|-9=o{Eb&u2M*Lu>tJ%Z0Zp>_2$#50a^gH%y_uf)T65<^AHu3Xf=;i zM}$gE#E|Q=y6-_pQ*j_(UyWNhztRKra=C|$SgW-^?4#ej1}DgM)dd%gdenI(ebVLP z!Ql;+MwhTm55!b&YI4qhaIt2a^3hR>Z%=7v7LR>ly%wt!rwJ?vusL!$N22MZqq%kSY z7Ki7;gP_n6g`!AiwGJjRxm9lcY_~2^PbViT5zou_IUksItl@5&$+=qD3C8W>cRL33zrsY4 zQxGWoDSp3tvQ4V#jc30y1^?XiKOz^gdCyw9G*3%r@B6Up7pHgI&kvve-+un*V)*|P z7DvHnk_|-Q8Nm^W5Fxu}ZU$gRA-vFLr-F}+YBu?Z=m1rMv7b2HUg$5jjE)Qx#jsML z3k)0Y`11hU%T+WtqMIB&t=MTMw*yT3OOTB5gTUFLK7iRat+U{0AdXmioUSGu^8>6< z8y=X6-&;~(M>>R1fDo5tiNH;ZX-g9o6%}DcY(ZnviU}Lh9 ztt(-tf%07sJDpV0-1#$fbYkComq6qqNR+6fw}3H^S=j4EeMEOfA0pe{_oG+n9BE~KTW)6++gs~|9L2cETV>sFt)KGZ(~Y0^hiaV|8vTpUZ5kmf-D zFEaPwKJS-tQ!{ntoP-fH{X%s(8+FJTpIJa$0k5epp)fsusT~n2AeDDuJE;%lnBigHl zi3Hc^YpmPi4N5*8eFQ5ZNZpN!D}q?;kuEQEOu*Ks3+YglbgR|sOj}`aE!c06E4ou4 z69H}%oP)4x1XncJo(1?>B4ihVQ%oZgtDx^K?okFO;L&h2w1wN#DH`eUw03>HaXebo zqb_?LSm720q%ajYeYLqgFnwBuk|dv9)d@X)GJOTvjiA;G3J}kNzDa*v^-%`N!yC*-Tl;uFZ zMsb~e%+}it;+NP0NNh={<7_33+=GAp+;=}}atdWMavKMh3`fD*YfhmEg9hoDw`~?k zc=RIfmrn%cQMf)kM5>@8PG{rgQd=FaiEYX1cwy<&R>irN5@%q;OQ0B}yAx(q>QyWX ztcITbD1?;LvCD$&)9#& zG+ZRxAj&2aV_axkwIMjx1Nqd1MgckNaR)ol+6iGR>{ct**l5rSwyZFC3Ca&TklR7r zbV8t80_UxZ;7G#cJ0>|F9TE;(A)ZNwG~F$;O2|~?L@jombZf}{U~BafpsHscP`UF2 zE@v+V$cG}5*P&P$1!bE7(clxN2||utS4bce zf|_u=t`%IoQ=rvMA^b&8aR62u@(phPI%q#SmMO3Wq%-~W_g^`$152eHY+QOUn5?TN zi4N`vYO|y5DYUnQsgr{j%+rL14LwO|dfm{@&A<*j zPM+&pHcH5x!f{9(Zu$!|?liTxj6^sqF$_b9T6${SqKll42F4p=B@J@WfI`9skxmz7voy(e zZWEr4?4X=_s@X^rf57%_um*L=FCo4fmHGB|HpM~ETb_7c-EOO?4N%i-- zFg&R#!KmAEu4vokqhHC+!?q6kd}6eWzu8Num$yUS9vab1$7-qgy_>Fq$w>9x`YaOd&5{yeTK9Vhp&e#SM#s?nXuNiRaTUdzf3v4*`{YY&e~b#UEWn+riM8=i}j zR{PRcp06pyoVve;3>``dIhECkxPVl(Y2G6+8DlwaMixpksW4jJfail=;Nd7nK|1ke zhbb|)8(;(v#VawfI5AdjqzPlfg9kepfLF9N0m;~A44)#|sux_|UcHvLXcOD8?N9qF zJCR=_XI$V~YaZy2#7%Q6?3#q3P6c+9&Z4XdjL-sf l-2ziF78$4jKf2#mV?`G43 zmas?zh;k)D4*gtfPtU7RC5)peXip~WA!ctgMzZ|RWe9~#_dBrmAS?Gr@4?w(b%91e z3M+T^4_Ij+sEx%6&@9o|x^BfKz4YP=2;B zXcTGMaUl%VaqtaK89oGTvGoyakjgP#hR|vR4}z?HF&h|yoHKkmT8vhG&nzRrQ@nwO zUpYd6InR25CdT6sZG83QIJE~J(pUg!!v^O~fT0bKXM}*58;GI4s`!SqlG7;{z+i%d z(##2=5IGju37eyL0=oMa1;Ci$u%Ge~_TD(ADsW*7?L^7TE(K4$S|$v6(0@hvU<#U7 zX%1)o`RF|j<9P7!hoAA8foDwLG)nW~{YD1Foxiv7NoG#?vi0s!L2p4YnHo%zXgHp= zzwIt_3X)KS&{%I)b zfdcLWI2EELO=_}0O+me9t`nt&q}0eZlWw`xd(cB$w_=8r)Q8bE$e`&N;N(O{5n6x= z90eqPw4#wMDfgtFrea)^zn`CsVH#2r(gCO#xfJmD!MJ3=}=G?Vv5_o zs^!b}-;fYrs3+vx{k6B*yUunRYV-4g6(VoSx=d0i*%`z@rz_RggF>;w$-hkonLz^X zprIOJI0>0QGHi1!+8yu}bb>AfuNs^e7##e6-JVt(EGb;Ygt#odHE7iTDa_MuY_lS#qNQq?!jK2f`o?+%NO5!7O?ypbeYoW(k?;u$3vwuFkViuRlK%<0F!R=J z+KkzfJUM9yNQzXT`5KUvZt0SKgWnicFlFSKzV9Ag`=zb3R)Z|d41%j3M3 z6&psh zX|hEtc6ctoDifJ zI1w~bSlZoKP!xjjfGSvmZ>LM9(AMXna~R)B zL|(Jw9CqNe5N&y<8bS1>h}2s{eDLVg3;qC`MTeOkr>vBH?eK0fxkiFLGn>k96X7u^ zLxEaI^P1dgTfVW{F^O?7d0is-PLtNP$mjL&ueT-h}T?~Wr>O%bNRYK zAZNM=Bg6|!J4Y$M`|Im7KHl^9mua;%fy}VY7}Q^mVG*yp8=ZK08C5S zlK@g#PE{nhZHJ?P3uW){4?)!$a}gPt{~`_AZwN!WacB?ou>z}4A4CeZHVQ6KR{LH< z7=5dYFS-41U$P7JzFIX-q%bYtCEp)}c@*K4^Pz6p0Jzo{gds@>rNzt3OUekxE#2;? zB_wf@85H7&n^N9xS?lnxOH&xF+QeKdXbmb+w>%BW!5r4j!BrXskxIlP5#QXeCE`|u zG=G?qO@DN9&_XQq3(Ni`M`4yJDAL}pir5z!W(Y9RE${fTV}@~B>jm{CmjCn`ol+r~ z0jG-3X#)l8E^&kiLD7!AYnUKWI9PbX)DU-nJ$VgT)G<8CX^SF3-_;D##!TYCcnlUC zHQxT+w$<7}SvYb8FIu&*UpXs57%Li5Qi|M+_3!XpkQHh-!ef;`^HBRo$oz z00>kGyyYv5hSlv5tRJIDr4{-Wld;mfDtXHNlcV2sQ08* zb_&SQc?wNe(tszF6C4D>k~&Adkc` z;84}f6b#eK*M>=7StQ(Sav!?C&R;`{Cs@Kz^FvjHT4pWyp7jZJc3}kxUxVv+I0^+F zo#8fCuMI@`w@Bbt7vGd6)(m3q93C0$wdjLdIK8@qnlad1dR((7!x-^2s!dqNFwmJ2 zyavTWVq0-t3iz6a*DG>2XlbPobL+4C?t9w`#Zn&bv-~^MW(n6jvG?`<&PdUT(^nYB zgA?AF*erD|el0ZMM3^ENmgDmn*ql%=b4c4b^1LB6yHK#G0*mYZj_ZQY8d!W; zfgP@-cgb_M!_z~U^#fY6cJ;Lz2z>(8nO41kKc_5g0bea?nH}i8;|Zr>-p|Zu9z-^w z0xjox23c|NvW}!cgb2_?$KmG~b7*P(?eU%8kvCdxFEdv7ZD-Eb_U-;GM2_br;HmDM zVOnr~I)@JPOh)=Cp4kRjk{C@SA1y*0<-`@rWnA9eColo{#*856$ptTe=X3-@eJdsLrB~vsW_ZXLo z1H<%q8;5maLjCugI3z&Lc!Fm#7JwW~!n$mUDQ_JbH>tFW^z(fNsKFUNeiH^`2|Ypj zq|b>M05X=~NQ;@Dg!6YJhg>2h#Om8O69+Z>-Llg}4pUC854`xp zwG?yjFrEOLIe`SI-Y5?q%d~l6(}b~*9S{V{pkhl9PLHEdkIuF6hJukP#0?mKp0U@% z-Y0|l_jxS5QAo zfdp7u*K#6}6VR)QPFGRZmT)p7oV*)DI=wA|gOFlkxaQTI1e(RuJqyk|dY2_x>2hKZ zCp#R+d8yf!h;7dM?Y-;qp+f|<^*)vQu0#$|h7hxV71Sje&-M)+N<9Stju|9#$IWRO zNfjr8`6S@Xr(BzjmoqD5jzM9A>s!i@76R@M$1d>m(=!LeCiiM#RSM2Om;6tZnJ)HJ zx3NEgpdrZ!M45tr2WopUiuak<*3UNnI^ntT&cQ*zdvPp*V~lDcfd>vnWzFdfgL%sMS0BH8B@(iS1Q3 z=A0`Gw~W*NQQ}3h;TV>nG!O)=Kvy|d)qqK9<(G`^gvOk0MtYdCJ_H$|4IO6C^Rm2y zv)s_k0SmVrx$sFSF=qh}i__4!OfSfKEYFgf5{ zQl!Zfm6eq_!WDuT8tbHDK}+x&ISbaf6cLY#L`DYk6gg#x;uny`^^t0D8iSJ)hjf8~ zy#&#V^jbl4Lh%ZC^4I~s(H-!Ag;RBj6WNZ8FKlJf1c41vU<4f0&Q?IXHu|a@<48Ut zk5jfDuCxsKEABk5+6%S4W-g^bkUtqebr$yxc;h2d30T=u*ERgVE#PjO`QiL8YA+^Q z;pYQQ<49~y@&x39?J6Z~|dm3n$Az&TnR6>rML&5+8vIX?2UZv#y4)LAQKj zgW{{6miAhpynr2FkoeZ?CH6zF5Q_Jx{MtJR;g4Ou`QJx(yMzC`A>D4r|5iwk+wt!= z#ECtb?A7R?3zH(4hy(8$PL~M|^#O7*9QXn&JDN`-Jz0h%Ts%O zBf^i%SAB<4ZxwbV7{})fHpt(!BfgIZC&cj2K}n$~>&dMGye^D{g}vgrH+zvd!t&x9 zB~TKaoM_`;MyqHiCz#fyplw`EiyX}TmAhNp`#UF3op5rh0ovD|zdWTln+mo|h~>pb znuWAfV9%AK)y!+7pisC0FXl4jkS95QiUqPNf~>~1DZM{8>YE8CjY~KS8dD}b7eC|0 znT2hb0n&&t?4Ih~D*&?g)S>=)$6}raGe{z5vSIJq+%uB`vCkT_c^<$)B$Wf5*0hM5 z6P2aaihgZTY?Fckr4F=Id7U3wPLEY+ysf=^f6kwI1Dt;ON*j(CB)+jO%1N9?4h*K> zAvYv5huytF5^@{bI+^A|Y*`M6laL2#y?;YzxYFz@W8ep&@~JhJXrrV+%r_F<<)Fu%y+3$^ki`vaqn9t|h-FFR+UU-n z46UIAC9}2=6_iYbCgdWHF!pa#srsVlNIA@7^Q+oPZNs6cUET5V>HguXu6S@6SDwRR zsaShMUl`VcbOLu*m8&=d-BYu$QFL?6vPMEmYQ_o5POg}uO(Qe}dD?Lr;IxG?P`4d= z^~Y~zK&V64IrRQ5hM>8wR>yq9#8LfXkl;Y@^;cfTDo?pts|)S+V8_hG4L{sO#WVc; zG|Tth2vY#-g<}Ben>cx4BVc#UPaJnDpAQ|9DrZWIXZBFhAhMwg8vWBMw_eo+DtP94 zCuMy|Hr*96W6{JZCjx;)QcK4d)WpBD9L?4L+MO2WoG_@hc@dwWOE!@e@qU8SfiJzjNNesfbUd{aQJ#qlu-PkGfTpR zPFSsjYmWt&RgOmGN&M~^iGW|^@5Vzi)^_l2p$ig6)?tiZ$Ph;o1VS&y{9ppUT^NbS z-NY`exO`uzx4t2hNOQ)lS*tf9#)4JCtoeh%4d`N7R^6ALN|p0?<;1^sVK&7df9*PL zkLvU|+I7m{f82P78H8hyx36qjArf6%4KH%BKvYweTen3*WMmfB{gZY$HXLg(3;R

zgWy%QD6ndDO~^M}g6n3jAU; z8?d}CUHdxRl+_wWovK&o~;796y0@8t~7t+i&<{|zUk{hx8XM)^# zBgck#^E(hfB+>e^-J%6FCUK&$c38%8D3T7T)3iOB?g5YMVWgO0777*yea!Fm*k}`E z@l;Rk&X0I8Dla`foulZ3V`4G$IIN)+tkoe1Hz6^xGACenFy^}-RoY>WbpoUeIg`Zq zFTr50(W>J_DH`fGB~1`B;`m8CD5-i>g4mhnN+5t%j@r^)%)8YQUG`(sWV%c>yfu$aY=W@N~AZO|QO@V>`#7 zA`Uh05#ld&f;bA9+}s0a6VJeZVh>9*mkm$XObN_CaLenDF+asOE!W^GQqhzcPrr{WX`b?+d-?w) z_nFAi*pml2&6lvFWwzNdnc!q_@I?id0;^5zvgrB3X~*%8Bsh`q$}a5TMf?hY4yphu z{hmvA+fpLYRd)nee0Fez9L6R9!(E*pj5BEJS6$*DAE&)^iKTfYA&<6=?xR0$Y45@H(B7uu_G9nj^w(*114wY&HvPz}_90!E4BjoSf zBOEVJ0N0Q1_usOa@FSO4S-l^X$C2ZR%+o!--^DR#NNi?vH6J$M zoTh$7U*HVb@>$u1(QXF z84@1UKVAS&|MjDW$OFBU3i}#lfriFLjd{P^mxqJ8UlW=J^7kh?XQPumgdR}r+G2pf zC3+Zu8pLOo#RT7#=|!dW=hnc z|BH? zVl;MPgGARfu?B_~kp2z#p;KgQ!)a?f5VWYOMnuUwJgbHdWe^4vW6Dm&sjaK6mIzi2 z1YGg{%X`PEm}6L?D*Ccn3H|)GAow7O@U!_?ZF1pPO5aD090^kRIIJ(2LEr}sQaT$j zO|s4+U`4ZmBcg=7gEoggkugTaYnXRFG_RZuL)vZL9-a=tY1Ie=rx4r>8pq+p9$;fz z9~l%7-CS{@s@m>qO=6|XYn!ern=@Y$yhIzu&q2!;|464ADWe>_%Mw2rSL8gGrb8JA zDTEz4t;zN$;}o+mp#@5xJM_%OTduB%$Xr`!=>Vn!#~Nx1F`2As*;)@|hzH9!X$Hq> zZfqVFOc~lRP8`i@LQMj6B9zU59_#+|>OL1Nuzsm`0lCd(UA_pH4w%cWR`&dV`s{z` z#O^O!{bYO)hnunpwa`eg`i_-$)LIJu`KRpPUUKc1JY_u=1OWNamIb2H?9eTWU2_Hu z87kDSx=~WaMj!9bC>Qw4pp&xqO)Sj437PlvUHMo;SNHt7K3`M<`3%6IgJZ`4QXh44 z=-MpIeE_8)zpRMEIc!SXwuV?fxv^j0&2G{}IG)u<`KsRgISoDN>sF=bmp4l;SrTHc zWocs9q#m`S)CPMjO<5IdHClX#JBZ6WXXPs=CAO-|A4b&GeIVp~ZB;EDw7-LO;0V-_ zQ6+QBO6$ttLMO>KHy!B*+-@-}kUkBZ(Yj-66^IfunIe;Zn$RA(9rbuUMkh@nsfNXQ zTHEI;=Eg-g=Hj9RfkvG6`}0UBs0E74+dT{eRVLNr1YBf~fjWLO$JBa~e9y5X3sH3n z)JmScm+di*O?@NX=q{o@2Nz(|Wie3{5^C?-krO77Z5THdF zL%n<04(3jVO$TS>TMil8)Es#&x-DRFL|p>5%k-|XI4V}^&Y3xL)qH84-|xzoR<@%4 z(+sk@XTQv@-Y$u(dmIHM2yD@6KhFqDPn#QPxn2bE*}Tf_b)F^&G6{Bb%rQP7LWt}L zs#gz&h1A#Sigx@A(1SGfWql*wcj(?mr&Nr9MQH5H^Y0$}cDlaZjOSa17UXdN4f2Gq z>C}b(-co!eB?wfNfQ{#bwZ+8R^}7K#v77<1#J@wYU@RFHcD8;_?^;aH4fjv{T-225 z;QEoO&oN~>5?{0vnDNlH&MfTnrdu~V{6QB#9EIBxW~WWQ9w&leykbOhK6${+NLF-f z-r(rlAh=kNhc?S%#q;C7TV&l{D>_|CYVrSY8#_Gf)V8>R;(<#)>jSp^s`-&q>&)ze zYT`reer5Mkm}fZXH@awHzzl71*kXc*N|3A#NLqqzA_hh&FU4`=Y}xe#0>t+8 z{D^?C|Am`&3d|19zgdN1r<^@$I$i;gkGWFu;>A+jev)FiNtfy8ilbBAx& zdtWk#A~O_bO!4h;SYJ`bcg}_V8csgN=qrmjXBvkXO_Q9h(dv+)rHD`~hvxXmnBdq@ z(=|nnMz^`2qVKUjqU8?#kfX0~PJ=qSNgV2|4_C;OUS4x1DPve8r!8`_)FFiIv%jX3 zRVNIr;ia{{`$Ft0mjgDR;gQ=lSV#NWAVJ2bk4;fJ@wY#)*uV@Lg#~LO_)0NYAU6#h zDc)c7&;{GM-%XUUXMerTA)*4`3WPANONpFPjw!+4z@AuKd-MLKm}L{1b`ZJ3K^i1* zgo}7n4Z{4{eh;%sfX z{fBA@4DgG{9%O4(%Rv(q10Q=wmNa`KpBta`0QVh{eLUidr5FBe!0l&=S7`!Di%f@( zJO^+mfNxPw8e(ZYlkAC*e#2>wPueosA9!$6FwxFkIA)PvoZ#qgXINQXimv`ljMoyRSTlM8xuw}S(TIjykL1BFf)7Brc{gyBAP$HZyCLTW%J{X>~h>1N(F zBiMv|d2;Z0EGHfo@=u|99Rz%D$Au+b;m-%ck5PR^1?yD|^F<+t8j{0>*DDO~ezTV+ zKm$G>XIfCOaZTo2hd-d{WCn!@0yPP~U$;)RJQ8K#ySlEl%qXVbFEW zT^w%Q{z_Fj@Src!w0E>vzP1%*2ZFGfcg2p2#HKntqHyezu#)d6(MKdRv(J8)pU}QP zcP$r?9?iGX493CyS`A#FPU|!S2);jlU6lvCCl!{77zC@%gq$|;Zj&hA`fpVmSZysc z3~8yL!T|KQ9XPaF<|r@{P=1xDce|);0CL4WuT_nJ6nO64 zYQ+VL0o)yQW2i{wHu$4r0Tj5Bkd%l>mYZG#F$Fr6i-zUWYTeqShnUVUmk|&cNLYj% zhM+j zTQ@i&8lbSXXU|Jj2S|7V0zH^imm06LVXeYZ<4BjK>b79)VSH;<5ZbcN;%;fOdqjYA zmCHaME~<(}Mn{?iD?Kc1=Uo(0Q6iXq;I?rl#h`$urq?V$ZA0G1#o*w*$8~X86lc>e z>ay7`1^PMyiAp%!bXHwqI>|TtCW#z>MNK#Fes0!d3<}aVjXc{FsvfBL@iIUsNDQZv z#!i(YTG=y82MQIR)<%GVyqhv>Na{##e_^ve3HJ-)+#4mWr^IEYLJaeAf~`z4&Hrcchr!j4-42SHy^#Dy*3%uEUQUlDz4t)YH9}&c~*=t8At2l3NZ{=fj$DwnV50-@O!mq;R`=UNln#E8^$gvVl^d%efBD=56cMGSMpHMH z5ZWONuE%g%jKC5K%S$hUk=V4=ee2LrNx0u5H>y%aI>*|W0oP<5lYsJUjWxyXEr1)H z@R*|0l>QED)`gMbHpDA=7U?7*aT3&Du1!HFk)$&F`WSyFbAdLCpamX9XCy~)?H~k- zJ(UT_nORyfxhi=n@3-S8oZwMLf36mZvKWhh$A)5jqmR92F5-K~#yGP*EW!N`>VeD7 z#@j~siyzn6c)HG+e}fZ%WEje*1X#M{4m~nKS}!Tv42%Vi;&CM{d+(zV9YXkACkgeE zYm+#@ItqVxb}SDCfyX!jea@r@|G5J~hRlTEz~Lse3-6#KCV?k@r7;-E zQEHC1p7-1P$ge5WH}~nWW@O@l!J*&)FkVkY99h&5mV{>RJu-N!2kU?qV8l)Wl57{$C;BP*;Owfw@83+L0yCV$YZ{7i3@A#zXgcq-3=+~^p{WI?E< zDcnA6Kt3c__>9mS6NOwfBXsLaePnX&sM}5ugd}c-*(HV*g!D`Q zc(tIs)zX?8|67Oz>=o~OE6Nuetv!N8B>O5J4Ie*$Kwg5eT$9|*KPW;)&abu87?{+c z_(38S>o1%xsnMjFP0Qfs_aM^wH4)UMa!niiT?4OZCX>Q$2l5LrXIps~9}30>f9u^1O9`M`Bj{AMS2lq@qEh8*dafu{jFLDEm-d1XhIXHaye! zGB@>$MsbA+Jf=8int<#25=EWwbaCI-E6wp1R1Lt$1A`MV1op#InS4^K-@IF}!XHU( zlXF-3J^U=e92AQpXa3kw6sCfQIh^f`%(_{J;0TW85gc+5JBhSD>ylpst|0RmVAM2p zh&(8Tt}R99L2nJ)+B&suX4BB+?&|Dh-zwk;alovDZhTl5zbt!-a_S`4<0G8L*1qhn zQhpH~dwAc+PY>2VRey(`4y{ZMO(QF^-xH^R0IWlfcGy`%wKz7QYlbJ*#ELdefjHmb z1K?Xtc_rwc8cNdMA82{%V~;lAJdk|~zn4m+_u@zC!bn2?{7S#Ggy>&%a8Lxf>E+Ow z7`jaOc;E>x?7}vJ`X~irtW6Ms23;GpS`*BRp!h`8Z!87VUII0}gQQp^H!Q9b<2E={ zV1*5LCw89JPQ;B}0(H}DYaHbP1U(K5VG+Hy59#qjLW$R5Lrj4`oi;dchUd0I6Y&hT zI!y3558*eF?Wq1Jp!*$DnaXBHmNk$E2CzZb2d!R*^)M_K5*KgR2Ue&S%8?b|)T@#L zW=NX@|3#^%#`|8AgOe%*N6RbYSMAGQr#dB=l3Y4k_bciry6}u11G~SV?tJsUzxq~g zowSn`E?|SLtdrTM;$Q{JfRupt@}#5D3Qs6>!`Rlw?&aie&WWHk3N`4gNy*AYkKu7g z4|<(rzbrg#fjN&4(99k9<(JX-u1%kpN>7zJ32)cCMX{nrma17HM$`r9Ltg9}V-P!L z3qy?&iM(y-a!=RXDZ%5Rl`{7^eUw^xK4OyTHFH@hqFdCGiqTv5}2PCCR}lD;pK7WH2epqQ#6!ez`K>Lb(6Tt zfWuAHB!Wy@B{NBb_W8tL2p9(jI{<9i&p8dK5uW_%iAw3jClo*`kH5oXA~fn2h2)uD zVcxBoAUd!wq|mhd2yW>#f zmeE15$4iC|VPh1`cHe5n$t!tEsdaaV^%|foU`(Jt( zq~JNMs}V&)Ym|&^cqrSD%GYB? zp|MkmIdiR2E;ex@90I~~hD2d>#uRiS9$o0OA3tjU?LqQ)$S*;})(f)ZCP<$Za|cu5 zhLH-6&K%Qmg!^RJSp+S*L{kS;IhPML-q%7jK?`jS1%{;S`5BCcrxM%0;L=NC7CdrB zDk#xuOy(&Ej0ml(k|9Q)i?Y=|LH@n42mGWhWr0DVfz9s`Mp9^Ie(%hbO@n?@B%uVG zJ*@gz6vNUO%&7x`Vd_GTJHgn_qwqe@{&uVhw)_d|ZizO88(zx7ABRHIOUq-w4u~o-0aBMF{_)uQijK@>1n73Lk%D}(zCmD&i?fugks|IG+3;vaLf7I8R`#Z*gd$+VK`(W%=23Ja>S<69G~ z5o%_M+uB<#wUS9U$$ffok3ziupIw<5-l|4h9=`v)YoX;Y=?N1rI0Z}1{=NEnKk*;Z zE{^%Due6cW!KwWp+?`dptM#gRMCJVxt8WH8_DCD$T^8Z7^5?_bzYqQ63%`k<{p`5^ z^oy;vDUEHH9&cZm`C@ivL&7_2@5c+8I&6J2E9?^1c)uB2>g?>yQ< zGP1HuDKX}pJs3~i;jYDs7RFp)4~FZlhT^M% zfyV*^1IgAQa>G-5{tH))GSu%UQ7^>5{M_-RRI_haA@-!IobgYDLrNirFf0CB}zc% zug@1e(vy{y6^=*9xJ>ifIAmareZyJF#K@$ie#lH}-()CDfbfD=Xro1ctv*_lJF?YSuCQAX#(%e?U$^#9s2R1; ze=0YxM+Y32{ONPAx{Z05c<}k7>}=N-MSvL2;FUe^7#tge?iB~`6gHWBGq6AAOTFwnowXXIMq_99t%kELMhsR-9jh@;ugkh?d+Mv)65GZH^ui-tWtAWe&ggDq z;|JA)8ZJ%pHwA`y6lx5*T{irVA9r_2&v^dOWKKf%*cn6{IuRA#?X!y?QlKBLlCT?Cd2{ZfWUT;!IF~ zbK=g4NK_8&8%t-LKYzXqMMk>*JUq+cy+3u(YK&zpZ=_2K#LzyCHUme^bYlPCx8mvs zDMLfU1NNtiu7-p>P!N1Sp$2)N;0wn>rHS~x#=Vc-;)iSp!a!Bb0MF^(`-Hty^nn@d zf?et4ik7?1a6=?&llvZsO*!=Vrl?7Gd2z;@R@heWutRtZ{d%3+7bJ+AfH1bptAi7& z0s;cOiVN`g+F!~i-J9rBI3{vr>ix)438I$0ciL`VsY#G_TFTeg+ z{kUa9^%mMZy?k@IuV{I{-iyYWi$?$ZB6iaOoxC~#18Hb;u#WG|Z!7$H?it}WW@Vk( zZvlug1yIOa_pTx!^*5+Pfhob#lok;6+!}wrk!e?ZP$#|rz?~pwx7it>_NRf$PA`g3 z5?gb^UdlYC$i3*selY`Te~Lh$k}T2Lv`K;%3DOzBe#AJptHlbFl^Qr+md&_zdU{Ev z66^y0Hn2>0)24FxT?$lC07sGbdXpwv)RjmA;G;vIpsQz1OA8xDUd(CRjh0UIVl;ct z1pD=l_8FcKUc=;}wlUt{e+s%2c`rJmqN2onzs4dbacp+oG>3?eS{SOX@`%WcjQ+BV z#TE5w2ZkO$T!@9c8B=jNpqwM-3N#1jiE$npVMi9)skpBNGk;4Dh>DUWjzQ?1Moi(r zy%$dIEBbcfCmxCBKhXi}tk}O8l?V@^9e+oMsx1443c$_Ilopf+qsFzCUCwyr+o{~4+e^TM8}r)jJ+bWgXk${ zzH6r;&O%V2y-V6)x`%32vDwOJh5tot-p4lgk`Ob4BQfVM8OWaSA1IgeE>LTUfnVpl zC#g!KaA;;0I@4T<7L(Wn8R?yvWc)5oJ8_s^d)eqo-s%-EFp%h*UWq#nNc1q~Vw%&5 zNU(v;tm|+FNGmB*F^9gDcgafon*7LVbl3`RWLWWz(rj;`+a~~zo5P$B>mtmPugVRc zC*i)uLT7K$jqqaM>2v0s0uMk2w``cEre;lN(HbCIHvsi34j(R>Ff{F$WRG)B^y)my zs^8&aM}Va5KFK}r;9wvDHm_tsERZ3(MV{SHcBuLbzwdzL=_O})D@tcjgN`}A_gnKP zot!_hsI>crGk5_eFqP#z_t$fgL6xK#UfI* zk~swkVbvx*K0JJ`51L?tI|}`(wYA;N^TH z46VPq#!gD9>FOGDt2J_*heqvLbvy9BjKZ4Q=X8=pWK4RY11P>suk(donf~~;jVYz( z0L6KflI=xRBA=j6k~X6OTp7xF2Jd?C5JW)%+yln?piT=NLu&j^BFZT$)gN7Rr>i}) zi;}&OaN8Cu?ZTlnmUS=Ske$T?L@rA{m2MUPa~`DRV>v3u&)IbE2~Lb2#PYq^I^h@{myt5#cY>s3Ni!RokM7ASrks7%-Bvm14VDMs z&FK)EVNXyq`?J?*M8h>?r&mXXL#F{wu&LDsnP~U@E^VN0`}Fhiuh!Q8h*O4rIxI}f z9UY~9V7c6(V~j=Je?-f_>=k47IkMZ3Qb7}oy@k;s#Wq|9^a4o%;ry|}aOclF;Bnyh zxbj3KJ=lQyN0kc02SGiYQ@wDL)sO3gF%6hqmfgE|-<`k4f4zWNFl2r+J*)=GY5he(usq4$y-!Y2$K zwVe;9?WQu=CKzDN3ZZ(*Rjr@>A6Et^-E?xIZQ#1~S&4y1qmPLk8a+$z00J&~W_#of zmID$*X`XX;TQlhN3o7I69|LRu0LQu$G&@E3Tew7e>$Mf+MT{Ed1A7JXE2G*J^UooX zUc|>0+z1wDS7pN>k>d~Rdm$D5ovNy;#z3xD>Bxe#;pOWR<&MY7$q-I^>~BVUD+PzZ0gHj638lRdm_PgMK%&sc!q+{sT);2D5TVa0X#}l`N&8vuvgIe4@a7>MeIFxBqa@O4E`p-Aa6ePKY z%a2a;!OeuN5vS7eW^z;0hTE$)9^K5yj1oMMWUNc~$2M@bU`*QK6%`$wf_3Hf*hVCJ z|B3K2fqi%oVRm{&X-;T1$Ul}u!YrF9?h&Vz;kR{6ucJy^r^0)Bi&3>SiSumoN|t!c z;jC7b=AH5VY1Ax)@ z(pQTdZF{G~{ayZ!{weuLTt)*j)lt5QPBeRUDLMvUrhMx~VWzma_`D1#9rWQ@CU`e>NX!D)T3G7)gK6qqq?LcSIKKB0;j zALl#H%V4uLzUV)qyZt}0p*@5Rjg*;P4}*>()PLLI@6oof4j<2SWI?)WZBB0PE^qKa z6@cUBT}{YO>o;j4IkQ*7!s@b1HtWD|&=xX*`wDfqQAKEZl4bo_K$SgY=L6-HtLu)?%ea_RP)J>SeDh`%nQAN$k@!hxP_{@B7JVE0s^=zf2niPP z*~i{+puDTVF2h0V>bfY+U*CZk`m#w*q!QXYq4@kEBHTPBJ^WULK4aesBCNR$etT~& z{}na+PH&p4HxAG$YK{du703y_ONvJ_M~C;#5<5G)6e}sWV_~*0W{^+**|chV zDssK*=%<;R4=S8@qm4sQNW@JL(JF*1aUH4HGof&?VN9coD^^WOLws!P0(i!br;&YW zgDrzw3x4Cqds;tp!$3hAnK2CxuV8C#iKA=qMz9T2^2t&IGvYXcih1QS)z?L_$n<_6 ziYop=YX+ECqp`52Zw#n?&GWP}5oMd4gM1ydEeEW_CLvxOvyc#7nDmLKcO@=Pfs`Ip zx`hTb>Wvw3M^;AW4)@OfH7HvB)?bZ(@w%4QTfXn_8ytu>g~*x-<+b8EWjqbzbNg3& zD@3Jx@Kf;A!{LQvx2w`j!O+O49OnPk4c^VWO-xRDNE@Vp0secFH@k`ZF>!F@{yccpjU*+#Q2Bp{#Hi}@DzGTTABzPkbEea|U zHb{+xqAT4dqc-@DKfYm$*Q>_J(6H)Z8y>9={qc_@#3uxuruu~h=inIn^5ktkxEfqx zv-i23w$BFYIf1yn%i9-SV)|iKEK2>!FECK*avOHU>Bn}l*^j#H`-n!Y?CkQD?eVPF zj`(j#>oCsGV-lNqFh$JZ4i3o1GtQwwK|#l9k*QMrhMCS ziBb-8-r?;F)@Z%6sqG}3g$+J0RL)cs4@V+z^%P9_jsBY)11a=5H zXUD5Fpd1A903t*K`$XJs%Pyb?^vQ52s7Afg=$as=ZhM!^*L5b2?d{K>H>V;q)nRpPbzh&6o z>a|h+ev@q$sTvs>Wz;3WmUcm@&T>=^>F<){9de5ul#m2{;-I{NphD68%~D?!O9&DR zr(T@Ft}V*IITj~Qprf4gLK5lretc013Ilv43uurHi(hQ2Adh39QI@_}r*T?!yJX!V zRbg{SMBLKcMIYYky@*r`l3K7%U)UP_;f*7bI`>^(i;&|0(Gmxtr>}Zn(L+1e5xtKT z2Vi{!1WH@5*t8sF%yMyVTmt+cKn1hoUR{hgV3Rr?oRRwqVId(k*(EI9FdyMTa3j6$ zn3G%y4Xw@Y%usmlvN&Q?RtQpk^c)Xb07~8>fYkkDAP z>v|={ro&MznjYK=Ee%1lpRyi`LDre3z_kxYw4@0OI9uJk8M&GQaEjbN?7#Zl=D`t?c5v}2`KUBGP-(%%5?(DA59 z6z6`w=}wQm@n;_cq$Tw*6kdqEFfRk4_flM3+@Du>hcWqrv!$nbiPoM?q1-`<4Q^pe zjEt(4A_;f0Cd1r?nt~=G46uN^gd@8gmI(_5oi(!DJh!`B=AnNY>`*fv%p^Md+_b%P z_3CumBF?T;JNC8RtHqpSa)O5N$q?q~11u39=b}$M&b9N24|rh+vsjlo=&hp_#~EK? zO+5n@BZ$4DW~P(L0(!PZuGJ=;u3SX*?I?1>A&UyGvwfegXVR$k4gbE50p-@&d3l!a zQykh;9qs^&i;JH&o%V6G56yj#@hF0Me4pWcL}tC=Rnf3eoSc;C1~(FrMQ|SY`?s{@ z(=p(Qi#b%+-@ixRC=6#orgXnG0}_pUSFT>I+jsyv3dYFVl??Y4c4EFCgk->P(+i9g!A39E^^b=KG(BJwPu`5N2G=JL#!|Ar3`Xi&3YT7QqY0 z+xgsXo_!>b=u}WpLxgND*gjsRTVWGoTe(v27%Jef^u7=|9SP`Fq)vE-*yd)Qi~GTi zKJfRKYXXCU%5ivF=DugufT${Fz@apSGdDM^3$$)KQp}JU4ahS@W%WdgFA#0GT zF^TaQ)xh~)PJ~qfB0co?Ms2-TtJi5E+7Sb~7gTD~A~L{c91r+l=8qSX&VxWn<&>BK zBKIlIZG$qt*#lnNmmVabo;eB&oQlc%n^;0vBq_o=<_7K#p(n8l{L*?*IF)LluRB!Q zRx+BRHfaA}&#I$II^mIxNWm9k8MKXf1*T3iJ@$PeELCieoUDmztO&Vl{ZC)pPLMM# z@oGw-F$x8Ml$C?QuaGgn`~>IikU(*Ig?~wd363ejI~~tpBQzhu;J0xm3cQOuUFyd7 z7hRa}i8ZtloipGZ!HqwRxVlpsi5x=TNgljjc_@}j}q%EZnMC*g6H#6%Wh1pjB!m~t8OfvE61Af`B!-igBFfGRWD-XVsY1*u0Wb6qDV zO3x9@QYo86O$o-#v7!*lq`}wQbgKH#r3Ye5|_dx1KAoRX~IdWkl>^ zUw-;tBw-v_jtvlZV7}<7F@zyKVyMawlmo)i;CFL-a?Jt6^%-7cBm{$ykPuqKd=_vo zi7YmyhSR5>@6|(&XZupx_b4T*28w45St(FU8wwxNpY2U?+;6$INhFHxjr>gbg9*dS z_>cq9KZwB*W-OFQLPgehmBtvoceh4!Ue8f9dCd1@QXxq(f$eMk@#*U#o-<7H8(CI; zn0e5i?dyssNwkhaK)N8zkr#;?{Co9+9czJmq_I8fS~u#oQR+#_dZ6D1-Y|H2hJb1? z;=6Uba1@h&$YzCfh90U4ihkL7nXxaTy%jm*7$6}>Q5jC6kv-D%n$FkczrsrIIHU0E z1NSytnu9N#3@7^cgRjypMFdyZN#;EhFeWk!o?WA%*VlAnJW+WyA!&Yxmk+ONla^Z-?+5i(~DG{5S~ zrHhcS_Dw1+IDaxLYr|i44b9IK`nV{w0O>K#PY z;qIFcsD)n=iAHFuh4;(>c_ZIU>(zbaAODYz#Jlj{*Z+uL|GgE$Km7l4d8o*rRlj^( zk|c<`!yRf>dq!}x;iwZPw*Nv_-M$HJjlOMN?Ja!{we?EhiX`J11wUzP>)o zp(pFUo!1k-0AoZ;YZ>N9Z3ocD;J7`NH#crK8Vv0HF*@QMqZ9`(?Uj_q{ei4&CV&!J z$E@J(M5~;!M;jlBP*Ety`cXqBS5r zv9J_3NNK*li|=?7?V^4ejpkHjTP(;>`&~r;6LsyRiF}NhIWq=n=u-6+Sv^5x$In%e zkJ8Z`i)lMFO9V2xl7PUdDtOPfScuO*v}vXPdWY%zmBH&4W_m`MNfIt&Xq%pgm1!h zOysKxPr~D#Gxep51(Xtx{1*W#6N(J9XXvb7|8>`!uZg3){B}yWD;NHy?b3vkpO?@t z3^u0@(~oemhEIBN9}^gcGR5Q{TjeKV!N8%#lw28uM5)OHx*p29$ZPCb+;a;21b3x+F{ z<)Eu15QGrTpe8~#Wx-zmc7XL99>fBOJu$W#KkFFL`xoy(D2_W#g$%?ZS-Tb#v5C&& z6R5&6?4A@$dK-~_#a=Gh>hIkvh<>sPMDvyN3}R3Auq24^} zIOVThHPnxyu(9U6j9o(6DwUm@ zNGfec3q#V_m7)-7kyP3<2BnQG5iQEtN+c@U7_FAlq+P|8M633_?&E#dRad&culxDq zdENK(dtTFPzOL(A=khr}=Vy5@$NM<0480r^_#NH!&sd2>RF!#~Zv!VAa{CfoTd9Fo zzK1~Lqjbt6(&GNnc4LK5esb~pVj%$ zkfV4OTzsTIil_MrP*#-HkQ0K(kX(*6kKzG4P(qDbDiK{o^77&x?4+cmXuH_5b!!q5 z4}z2m+N`Otf}jkPcdc(S7|RrZkf|CZO4d@$?6lUW@VkMJ zaG~fg@f(ubwWtWA;sf5PPf$v_#u3=pAAjTsujcF!hZnHZkm8`IJa3TbX(P@c z0G*OCZM~n&3R_AjKw9;PB#xG!ASARlUmZHKzRKaHDpOoe4%4qhue*!bC0fBhGeaVw z^QezmOMk{A(?RPYSwsxqXywx>c|2KfY7prZlExM&#(%&>cgCIlg)kq0To z>`9Z6z&=fYE@GErIt_@^I%4@ElPolf#v1W#la$Z5yor-|;itcrD@5%jiEjeeSn?^3 zXZKc=JyE#LYTSZngxbOG+q9Clq zyWbEDmoT3s=}a7pO>tBUCLZ6)qPog6%c?auZ{AEnvD>SL6$BHS48F8}igQpQSU6yS z0%6U0Pkeu;LFG%ywEin{rwkMQ4D#s_3Ob05XlXT!c zu}uK$qAAraaaiD6VLG&Iy!5gwpCTy~`=KyXn9q|Y(-wG=l+ECyLmLh%4VXW2G%qvkQ$ zOs@l&m#+9MESMDO^oh7eydZ_wvD+l1=P&)CQ}KV7F()Bk(G|ZOycNVcX<=mn<_Jo2 zgf>tX!Z2#+X{j5iE3gN(bfklW;8NB#0@M(^!Vhm#f!=x6wV|iQUZP0{{_Lt2KU&zz zty`ij;0wqm3|(NK>(Tt4A#>!TV~H$+Lq{F0omQ=ccWL ze-;d<>k#lBkG0RQ7Auzoo+=8tdkJeUw*^}M+$J(ca&}@4z=8bI`K5adt6%e2bWu?vU*E;Zs^Vn4c9sfWie#0m;5-CV6#1-^N3{aZ5uY$Ys|5j zVgLJB6jqy45#c^R!YaweMFG7{)VGh)M~|Z@hFaR+w_)pfffS#wWhAhK@R(lb z->2eiEV>ct>Y$y9KpgeQpMCHPCsS!5WmnB}^6~_TPR7Ejl328^H4rAfYTAz{@$g)w z;Xn)~RKBsZoT`Hf+_*d50`&?hu+#i46)|SXWcR5ZdipzQbo`ONdi4X>tBd1SDYiV# zP_?kIxZOfBa`BCSOQ1?d9YJCkv6YQKu=&sx3lO^y`#=I*LhbL)JW6+h{ggBx(9MI! z##P^Ih_CmIV&e_!hxT;hYe#Ig^1JEK#>j14J?q#AvggLlkXle_Pc@n^COZBK_)2k& zADmB*i9l1t(d@*=ghR9cx<=?MYy|gOvObEz#Fj#v6)Vw|HY+vo*mp0}gjY?O7-Xp^ zNl!JTwJkMeK6-)(sa7B3_Eh8%jYZ6~4k*FT#wFO-;Mlc0ZX5!nEWcrSt=5@Dzss%G+ae>@9=M-%65 zhBP5#A^~<3eCyUc6tQN0nhn%8rRJf&>NpA68ckA|NnP^?muoD(wDERU(u85hir`5Q zrvay^TVKM)%f$uZAd<$R@)*K`9fw@x^jh28m>{^gA9tp}JmcjBt=y7U6KAEuW91#e zZB3VwJco_k?%29Y|D$`)VykgK+&u6ayL9)n8Ujk!Nd^N9B>by}`WZZaTZV!_=yIK| zKL)PoxBr#dQZEer{8t-1&=>gj-|!gJ9X$UHk3s$M-|!gJ9sdoF!QJuS@EF`5|L?CJ z-AZ;XLi;Uu&v)#!JTv{u>OEJ6O)YZ^`Z|2H$pZ1Rb?*arIR$?4%@5aE?{!#EV`-7} zcFD9AhfN}at9M;f_jx0KZP&BkKfhS=c*V4-8>Vy)yL?l4(uhCbJpc7Wtkyq_z z{(PXO+_9$Ae8T+bfEI_6+_FnOQ{;QSy0yQY;L7tc>Bm}l>a_n3<{Ucz^~Qg_0bA_< z6y-%DpM*)+Zr502AQiAoVVPxDaM}6k;g(%zSpnMHh2GBBo&7{ZRKGfOW{^YE zzpOPmjjk^?hq<)#@&)Zre;eM`H1h9wt#R$&+cKT1-tSOqX+G%iEq89Z+PEu6)W*rT zSLOZMQ=Aw5@r1YGOf?}Pj|&oKHQdS*3pI;*dGmt|J7!DG@x7%x-Q+cY;xY zRXbiK#l-3s+8>efJ8rp8J2y#td4`usZRgXkvsZMCbG;GJs)ph`kFC~S4#vAXQi2SZ zJ9MNtmVXd@a`H^dJ)zx`w2O7N*hl*o2-tpktEI#m5#D~=N40RV=Ec@|R}ad43kdGX zyBpr@6y|0TMs+(SP- zG%#q=l&Jl~-T1jyK-E!qq`OO;o`&dV6b#)Xvc67MST%XQyAdlDuc3ba8HGi5;M`lNPHAi|| za2}N=#_p2}lf%398m|iMJRzgg{bfrnX9L$tv-j?Mm$@IH*nfaG*8}d18&0?w{A`&m z`DnO#bm!Z3<$8nCWz(?%avuQX^h&^($}N4jKSu zynUub;G*6EV1@xOW8wl?(cTL)|DM~xZpcktjWxjn`7z9u9ECLIYf8w@{~t#-EX+zu z{cV=%nQI{iLe)tb9Eb6w&F$~P{T1zQ=UrcxnTGi~ggP8oOS9n!o;mNE+}A$grd@(p z=cvt_z`r%hE2n9$A!owHdmVcazAt>{sG4cW^v1|TgL>a$PpQE#?8o=9 z+P8(HDLh+#?tfpN%=`NB?-Tm}ewORsr}h8+G}pfi_W%9Q-oM{#$$Yn?e$Z^<7<>CG zDw0x*=T6vuuGs;Ld)#Mh&Y+?9-N+Ap!%jZDQBhb1r+m`x4DrVZv_*J+ZeY``uQEsa>9r$Ne&R0b1SvcTui?4@O5Q zPbQEPr7=nDuoM)zUR1CqZL+AF--5_fP)u5pG`i&t6KJsGzDxK!Ak%E?D9nu9 z1@k2z$(u+2Flg>98E(GCkCbhN{?tZte95lI3lKowUK4Kno^U(jNuQXrg_{yn>#;!n z8#`!zVs|@lODvw?-INzPMPfov)ho7|#kU?H=qcncAi&3R`qY~jzxSJlkmMt4*OyaoI?#T7BA(M3ZhlUm zNG(CL9`(V1`IB<~c>klS@4Z@`1+L2I=&q;=jy$B2$*F0nd^G!_9Z#Sk!LL@v*LfWE z>V5I9o0RnbWKMcgWdNh%tbgASWUlKSLPJua6skJzKt9*h+k%znaa5GgUJ>T+3j!|7 zuB5dyGW!4fBZok*E-mcVUtMNQqHFlU z89r>p&sE0`pcHPuuW1ITNfsjV{%m8wZot0@D5UG(+sED;+?S|h;ARgRxRW)7J>Rba zwHM-n%wwXPKCxw{`ko9<|vKAl?(dXn9`0pN}#o>QVrxU_c>^TcaKBp=PfSR1^f z-1%j=`ORXe80gmU+P)3r?;bvuzN{tvgm8xGmF3QT0PTqf0baul2X- z^Ljh?f1Da{Vzq580%8_^@zt1wS`PDOv8EbGX~Z^h~XEY2;$IdQVf z$uT}}50oOQfPC3mRtCrFNl8i);3KfIi9?!+YSXW7xHWDw^Q6klp2Rm+ zdS@i1M)Ki(4L01w_|Scg5W6K+_;=qTNQnql2idPtp@tvjvdN-_8!=`F8*APhg(oa} zNA-+5P=6r)zzbSE@!wBLTU!7{ zWYGnId@JTHbI!I+3x8q-{`vUHlO&)~S2+;(kIa`V2DUc6%fbJ;7ccI06q-`WLOs!y zdNYiSjopd)dq0uj;b)N-*Lxkpy?%>rWx1I{0ok&>icoWN)|_svQD)BCbRx007dr-x z`d1jfQv-v8MTt*3@0n$8LaY>tk)bs4^{-QzGq+5Wd{j;ANxau;A;pA5PE%3%uF-1+ zp6@QOzivH5^aAb1D8?kPaNgs)t$L4!e_aT^L_l2hY9s@z zs2Uec3%whGtJu^G!HDC2?XuG-s%IIS^E^rJHHDdYPMRz8Kz=J4K zZ=v#+ccoxiV1W!dKt{*qW%Zq1&EbwU)sn5zZVZ=rPEO9`TF%**V2o-h8UaVWw5I;V z68=PF=?4!VE-Y)Jnic}qEZgvY1##$P8d4RBu{0IFR42LeM1}xh-15u0Um$X}2e{!- zW>-@7dflacSNcuDcCuka?j07=-WEUQq*sg#3B05paO>&jxWDptAcUhnh;A8wLT2ql zq81^M=HAsORnT&{EyS-tFVSZg)9f zCO+-V>vcbjU}2m_5F*lr$0X|;y>@+*Y%Pp~4@s;evWH3rO-*_{-|#& z>8>uB<{%BA1p(NaU-eewy}LG}H(oJWl<&;zkkbwn%yV?ICeLKn(UHSXOG&M{PLUX2d(o1 zzEv^BO=wOxa|~skEyMfW0`qXa5A-$b#r^x2}=g za7AVYEdiyGg{F(!JE0MVUoh|8`6fEJWXXhnno{>h;EDLrJ&RN@L^OhRZFRGVNK}T7^;~IqA>lJSvns z>+j*KD0%$VR4pa_-yXktDk~=)y3kR2nPlJAZP4|h(BSnepWm;0uouPfz1?w{?XURe zW=Oj$D$ba%H|5&23|Ef~#YvVglcylo)OM|N+@8v}g-FZ92$!OW|ZX+thDBGlw*v z-k=e%qv5%dYKQNBUS7#P|4f(s{x!njWv87%>7S#1kB{2hSeGf&7L@n+_{P|-&$D^Y zFSU&ZbAxm&J3P;Rxe(@hf9Y$3Y6(+!V>|842yV&s$Xhog zeKoHNUVYY`S)b~jM+vBitIygGQqsj_2Jd0RqDj>uKGPItu5i%*esOV-W8z;2uUc3b zJ36eci#YF!sF6~;m*0%a@?ydJ5?#*{cXp(HyX)|NJP5Y^%{SM@^bLeN0_^-1{X*I3 z@q?Yysu%O81R6-VpHL5&T`8EbqS%o&YHmqH#;A>6$!Yva$%c+ImPT~X{XHqQVxp3? z?=45!U3z{q57)#j(=W_Hf?iNY+*dPJu3&rItPn?gt$k6(i)tz0vsndnEGBJllDNT(XtomK5mP1hwX;s${L}nbnoD(c?*v#qQXM z@8xsTPdmhh%x{4+$A9s7UxASFTtt8Ro|yTt%c2K-F<+;kOkiijqJ=FNqEsqF zG>tN z<=&L!iU8f>zhyo+2P@{6!JlsIR7CghVzJXImsB`?j-z4f(XPDWf@ zB0GVy$d%JGI5z_@0c|G3=G&av{_lhJ*ZWFy0=)-WKzGSQWzuGQ*+JAT$l?q8s51DNY3L*07fez9 z;jmy#Uj2Q%tRt)LTy`+N;$)V_4!3UU&%V+Hg;&7w7h|%R8Xdg=l|C=~&aA<+$e10R zsp`9>^DB04nrYe1;jEkNTr>#{xqdxro(@RoQHHT7XG6ltu?ONQj zorx4O?Q>QT84~!pq#+#k4@7~*sEo2d;fcGcsjI8sHjKzm?>VPB@4`Y`CscH~3n?j5 z{VS!^s9YVCTaULN7zhSB`_CJC15Mlj5$ierC{QOEJVsliT#3K?<8R?SAMC0H{(5~N zVs1$k_+#nMNG^h}fe800Yen?Vk~a09so*d7EoYgQ0y4l(5i%cQ28J|-Q`m6I5l}G> zqw3t#UE33cki3Z8#DA&zJbTjfyNeEL5knSj6iCSWMc%;60RQf;WMVHd{S+!lC zU8@JmEFxK>Y(f%3>C=dIoJ9#L)b!Onna;c$=AS**?0X@RYF@88m!0Jz^v2`^h?CcW zmnxZ2)&wiLXwb1Kre?_>`05~YYET`(SNmqxN#Oy=#@f5)&rC-rmCNc?M%&HIUKmV zM@Raa2g2~x)zwUcr~`;%nON(IP(bh+M}gTcEu_LEqCqEWFYsiT zK1d%Je>|&20x zMFu6j=%@(x3tQx?tRsP}jyB1-)YW1@Q{kHT-sl#!PgdjpSpnYMlFqj!i9`cIbz7*V zOF1`Zjn(1HROJmdpN0fQu#3tm>&Ze>hC+ zGstnwL)GBKBrqE4qaG~gr_Z*(Bb5(&?|C4p;DfTWt%?e9MeV)SxRA0SBSb3ANo6Mm!<(oFCQsFxiPCNR#$X{20%9-%$0Vx?YVbPZ_kXni zwTOsZ*Tz-dwkX(V<}pc_>b1Sv&O(X$!VI5*^+GtCK~F`l@6WB!Iv@8DRgA~6Hd)SC zSGgp}im2#F-(098FD2;4jT`c7YHuMgpNQ3mg`7D<1jI-s5a;6#%nucn+pA+gWPreB zHr08C>c+`YiRY#LjsqR>?>Yj_na1>o<(*&6pOiZmr0&+EU&OWKB6~z-%qetZ&}1(v zkG88KT~T5LA%4Is(4t^U%!$5QrK&N`A|d@$9Z7OOdNY8(Q;h;`jajM`|lT!*d-;HBv`SCPk$Z6lEHqb2BqD!#9idZS(XT z&C=|#<>fd^V}T8S-@{!B2Iy$Z1swtLYj2Z@J))#NCyIq)RS(n;&$l#>ZfOrmXb)*^ zj!KxF0A1Kpk{*e7==dVa5ruxZvbW*e$9Ej`wcVe_MqtoF9=8A&MmlpwRFRc7m?t|m zsll1IH3N=%WruW^^mI7YJ$c};lo6YOCdg*X682z28JGXosM-Y#a>j&^P}=caxQ}uP%iRmf6qo|glOxY1}K;tRHUe()>z<*3k zc8U`wju$Y+XI&E{Y*w;6ca3luKNTLA3C^jbdRZSkjB=EE;ma+EIh7WlLsm z9_7DnCj_5uQQOnL+y7t@Ze<_l1Q^ur&Gl?^;75b-y|kdPTC=!$^oOMEa4&d&>mAv~ z-K900_V-#3B)8nopOVq}#Hz?oDbV|{<0u;{w@4jmR^fc2WJKAjJ5f76v#cZvQKBgz z8df4?gszfK4KK$sBT*Q?4m({czb$C<`Fu3QZ-|D&&C}C! zW5&K^S4lF(zG8F0*{qGLmcqo!dcAqTI((iE4EUNs2I|4#=2H(~Q6>hmjRD!%1F@Y2 zcTid@y5e343*;-VE-uEAtwPGF2O{dgshPw;jXq6u66mRy=$Q;I(7fQ@tRd6W&I;l8 zEgkwWmz)Sx+CoM`7D8hdYTx~SW`QoOG&iB!5*z|n9o@7i)cq~tun?VeapKFazd!aF zAm}8w49DgO^CHXKBqE(OWm`;iJB3~lv6IC;cK#Sp7MBoVS|NgigAIIgmtWlgE^`wv z))BG?p~NIsP*~Vt{pGX1xt~LgXE|AlpwaPinZ%K@6_y-=kKGc91ix}Fsc(pk*&C^I zed&d?M68>+)pjNLXeG5H7)xz-5&6d5>XNkz7wk$XM~)X>)l95reLoMQ*vQ&`MAaP( z@u|dvUWY{a=ino}g(FOWY$K{q8E^*2IJ6MK?Q!TNG*~CB91~@5t|m^&>iS%3E8424 zUx3_trohL(1=wL04nYWi6P~nv4tW6fN2=q7MS-XDSVu8%cf`HDKKawp2MH}Ho*6{C z0WmbH#kaN-AGly1Umv14E6yu<#P8{SEIZ0&ci_Gpd;8C+v2KbTsp^XVR@>ELI0D<@ zwqahx=8q=WGQh^iFjt`xUkx4xp#(`=f?$T7;5DvkjLSEcrfh2K``e0BVNz5#+Yj9N zVGR3K`~Wb<^b7lqsL|R2%2_77yhQ9X>)5;1MbDOFp@%6I1s#bFyba6>8PLg>BH?`& zIKbLtUx=}ZMgqZ5u0Xf3Tj6Pos!A*vW+aj2l>txGvlRntKTlPd@9AWLc2_eOOz#g9 zRsvOCuu?VgoOeUt)Y>$ia)`Sd&M0;6y?stwJkbhV6vsN!vB{>qy0o~VMYU?ynQK7y z*+VR(aP9;j*f$-C4D&Pyv>$F3Fjc4Ch$$Ho%jk z2lyH>Vxs&*ZR_rDnoehP^_&z$tUS_HRR{zbV_5=)nX`)8vB&Fb#lrS)+L_S-vPkGu72odw^ld5$zmcZr z?HCaeqMtH-yY^6RY{SMSpkpRNE8=4`fS>{Kz8&wVjw0rNaOS|2n{#@?>=~nG9Z@Fs zA!0rNS$?DVs)0UTyXpr-IYkk+4}+EFrdVkzTmvJ#sO$;6E6j2-omIg^W8INXhCKoR z;p^?IyquPFk6Pu;!V2O=j3jJ=J`zx$`_i7+<wRfBm-_T16Z&8u~`EhC+7776kfTE&n1KhptOZ;P)T#dfFMrfZ~y)c#GIUi zA_$|4o=F%&lS5VM!F8ev^it|Rt~e$Nj=`h!tf?dtY6cl%NeGjE#ZP9&Q(*o|iGY!~ zteYXwC)hE7zi}>}_rr)fHvs?z=L9I--1r&+Tw76m?Nvj6S8SH5Dk{>~iWW}E1=BoK z?mkqn5)}a(Y00-r(1?A_aj<(0(r2Nl4@cL_L%(M#?-{o|K~oTFca;cwysZ98JD)v^ zFMuKxjjzfFcKt@F-m$Z#Huy&PXZEfZc=VUVtsrUod5S|MoWr7cQd*6Py*xH)Hmrac z2R)KBz5O66D$2TJqiqK87Nv0R?d8GhUM4ue3AD$H*?buA_w5Z5ZEO?v&McUl&I!hl zs8tQ3MU40OCf?qsn@=K~$Ej0O@dY-)76g*QmZBrDN zk_4j49YSZ3lyPc0)KPxP7 za9Kj{<74sWcZdXfSO#*=&+K!7!eQRn!eWnGt^r22Xg;ZazgQTB0-HDuAsHCgLIv9L^Upv6WE&zIYRYt@aDmebF z19OH);Z|E>NU+uSBLviqHOg0iExKI4MAISab4%aS9k`5M6xrzr+6CReZ{qW?kn{*i zl_9q*hLn)7+bz4gpX|}0_%h`j90DAgzcu^C&4Fc+dS29^v$akVq*eke zO?9Y4#WDGhdR@*%qh!i1Ze}eHXxl(`V^Mr5)hJwtnvr>1nNJGn0nhq7fXanM`w{6b z6wMxpKm{%_x~7(RPiVJrucz?DYlB{ozFF^@@BDZYrW>K^p=ANf<;|#~qbH)Ugvu2= zc2o<1y~wBL8JPF1ZI<$%#fHWbANH&W8U$ug6NUUzjDw*jD8*YhN`j6jnM6#jqu77hbz#wf=HkAo-yU$5b&=I*UrWcI#N>@3&hzE-DyETjHQI0P|Bg zJi3h;jJ*Ku270XKc8v-N5!2?rfm7Buo~A*9!e-8-v)Eewz2( zxg{l;nY<++zx>@_7O{$F3VPrGBxWNKeV^73kTZB+ZP#*|$leJldj(Pws)?UOM2G=0 z8l7(^Ho+$gs}RY*uJir+0DyFGJK&Q|WdW<^0mQS4%A9dN9X3Do7^a&hlYMS^osGyU3ZpQN}dsPjbh?^`x!t?D^Q%5!srZLnt; z!%}E@HL#xYD8RcNRKpYah`^c8ATy*$G@vOkK1DC2si=G|HN~i-4TT8;5$jv2s8HM5 zSx24^LkNMF7IFga5)8T1PLA}xsxs|K^f}WIwGR2gG2dhz+8bSoR@bq+A;7vl7TYz+ zsS){C?F65Mr9$7Y4zw3gMJ4G*SBe}px8;wm$hh-kCZ%mCeJNqQU z?5yI57B~E_ve9^ORrC38g$Z9T;5=na#UdIUhrVnUCpqcN1AS4ah8@ItNevPwoJ12TXm=9J*JhF9>~NHlv8q* zaQL>qj>4+gXcRkhreez>NgouZRSG6y=@5kQ2kfzO%vz#Q?Ow>fKmj{JAc%=iO4pQ@kG-M+H%oq%n!2D=!I$2m?Jju=I$!NSK&(n#dx zxDionPgPuaw6XNH9vd{wzQ!WJjnW4&qMMI<5~F61%_>Jzpzk4mQ(M>7>D1gU6??C} zxTmLN@#xLJ1n=Ti*>Z2okvkdyX9L*BmO<|Ap#sDm)|axlY9 zqqT?Rcj{p4CQ+tFBnZTJneeFdAWUiiQ5MPL-#;FN%-4n34zhxHdQ8}7e?Txl2 zy0J3kbzCop9DwIWKe;mVq3n#$b2Nla$DR{gM}Dn2DD}&l;)rUFf+eyUP)GkLAGUWh zsntMO@Iu1}r!@l)(f7@{bFz!Ug=)udF9bzAw z{S#}?;s`1`fr>M+Udz_XlHCX?L>rv}8R=mP*|8f@*fbmBcl-*A6zQ9z5hWuYhMzrqHV9Vy z&J5=_TS0OUPPiOeFaz`36o0Yj$I|^g=cV=P?;}e__05I1b2YNodW%&~*B*%n{+$@X zDTyFun?d}U=3;?4!nj(wYqF@88_xv)h;d4?B~GW!&$uYLV$Pd>M% zhcH&ao%FstOzM4*cRJe;`bXJQnr-qnq|GCTo5eM4BdR)&Zi!*d71@hPBzm4*IiIHd z`3B?Eb92t$It1@bP@`GVc|4xtV$)9td7oL=3WAdKwJmLVQmaZvov8g2-|}o*#ea$a z*dg3|ufV5|O^f6zM{A=bChwoONXTwf>S`pa^K?5ot11DMe3=D-71XBilGr-P{(h-; z;8EU<4ts0M*y}c|7`l@4eYe_^y;pSp;w`o=hc9wQw_*LBv5}yvZG~PjKQXr+EzW`v z1nFjAF8VA?2Xt^_qR;z-Q{;VwJ=!+c6#R@OOj7Yaj^TqGQT%MK@jux459TT&XO$ikU-qQuFhF`mr_E`OiPV{QH= zaq1P)qptgWK~s9hY3-F`ayQ9%E={3qFsho(8vXe;$}o{prlU z#=z&u--N@Ii8_>o;%#p{DA*efGRzbb&W3F5zG-}E!6n_zQRcAuq-`^J)o1Y%zijZG zI}0>J2Cl;XmIq^gBil?nMAsRhE)YpR1FGl@-dWJN6#5I`jeka zXp#H7p1k5VDtv#}${CUOE9}lN>!r;4iR8G5dG!6~i1|%|dv4O`4`)y*8A^GmeoNtJPqrqH<=!Y!+6CbfalD_0SPZA+Ut1?=9KdeC)AIJvrjxU(5%5 zUUp~kXskouV-I+gUVF$_&0Ea;`-@TP=H}*VA*pAG2_7QZ0(i%7Z88ukpH9J>$7Usp z+!mjMM(Fn-WOByqmK?h6S0DSAy)AJGK;+sL^rhEZNK%D3;J?Bz=C&NXn}tE80>;*p zxkyMB%1g}Aqmmh_B{j&tw^g>bjWPX;R$Z{-U#a`n4$yg= z9(v>H+bfcyluPej%B_$)q`jVw;`$UD=kA=i=OwijF74LZc2<(x@*SIx!$tMPf2*Wh zyRlKKWk|s9JXjc6{j-u8o`;xI&AxZhup|ywkKzabmSjM@yt4J`H6w$lp%%r%Z}hO3 zd<6Ds3Uh+%H$8NrJcWm_WJ~2YfJXPBR*Mr-*E21hwrHi`;LKW|${N z9hCBxGQbNgx65?7n0Cfv(0>=j*69H8LczXVw?qvH12fBRj^w&RZohLDZbkDWqyJL80+oE2>zg0)O%76ZV-a@*BIc5a$?y>t1@)P#SfzqD-`mk-y9@ae-)Fub_5Bs|hSD!*; z9H;kv-4jPwF^MXQESAy1a#2bVOmcp-{$MFd3_X6zPQM99bjw`D5I$qg;_T*>y5EOA7lrlb0Y^SVA!SkbzI(e z!FQ$uHm&#sE*IA3!HcegNr0_oyuTkTcN<+UQ&DOZtxAw>^~LojVY+zxeQB1zvC zF>q)0H|Kd`leP;(rmAu`h5&L50bCa*S=zPHw?98H!-is`%Vmj^;>V3M0ZIJ%fpt}$ zWX5{-Ee1ZFzlh*wRv*k$)@D5}ZbDGtz{8#}$fgs;rLw<7^iAIoKdJ}6GtLE1sLrzD0=zPK6}-<;=R#_bUY2YVh|KqE&@#`>ZtY#p#k2co&m62@A)1;5HX zOJk)fHvhLz#MaR&N}{9$&0WrIycz>wH2g8D;ZsOh8uC=3PcOqOgZApe+t(*UP|lLD zVlPjz8#eIy{1&YHO+Dp3odK=z&;>xL?=Gbw%5qz)gMEU;yvzxv2BTFU8~JpJq5c7n z^hN9&m5|nWpQv@Yw|$=aPOr*N5skAbEb zc}uQ6GR#u+$oNyNdW|bwj0kB@T&3X3_g&l0M}`I+8|X6jw@s;MA92|?c%JkB{z&k7 zGG=1sP|*(_<>lz`Uk~{nP#C(V^BkOqmVFM5NPpGqM6++z`}xitYo7}zoSyx6qmdLC+;Px zSkpfTsc+!x$cDd_E=-JyZ@N#CgjMhvcBX$YhWMi-NthEjEs0;x-Q^DdX)a4a#blS) zC#lGQ!VYV_Z-RiRxdc116EMc+Ks(W^noNolz#Al?H-kb$m%MJ9Sw^4;SY2Y2w;&h7 z<@C!VPn4Wg5~T}``H&@Ya!eW88OmctC;5y5zQjQudh&J1m`Y)(@>@P-dzYQ>ElGsk z$RdE!Wg@7soEQY^nb}r5uuIPk@yZND6oY^j>Ou}uw>Dgz9AIC+?roFN_*0+Iy;2&% zH*bn<1kRXJfnyf^#bs4`yINRW^XG8_lK?>RBux{FtpZ&#{xr*|N065!A}N7J`8UAE zx2upsDP@PqI|gl2!dHYqbd}W6QpB-fxYs@sUKHCV5HW$N0KQ*UhbHI#^vTG~?as_oAr6r>hA`cC zvoQQUAoCTp&T8oUd4BmfVet`q?f?3)=moGye~XgHkX6|tF>d>+ut?KA_pN@dfr;q> z7B2}(Vffmeb#pU-|KtXdKSff&N%FzJur56SBUAyVJ{TcOtu0%oklrGkb+{y|&@@ei z?K_Q-m{=TSU|b6<-X0bj6=yXm%vLX~!O|L9ED9uAhg|k?i0GPBBn;$J`)5U-{^-R0 zcSvp%a`4H>GYb+&9BML_87>eHa$FMDZeMh;(09dQ{TF|Ztb?Ma*}q|y0ox0o)tVhF zeU=+zSvl>tpVclxLz1teQNZO*X^%J&JuQ%ur7y>?OvKO@BJeWduE-o5`6*|7)QTzY zA@{@IX&cP2I}lbb{WtUh!Mz$2EyABmU%UtE%lY@;fA62k=}JAFgs4N%Sd>!PR}>v=V=?oxnWC&OxDERz9BZNSF-0sPq57*&|#wxc>vJsQ+5cF*N_bWekTY zQ;8#jNRRx7uDv``qHyP^;*rz}4XX0OB`vbKh6=s3 zi0reN&pa@fj=U!~hM#xEjp&L>+=0NXN#-fD`N>5uCF&_OWuGjU^*WCS=N>E))@jmg zY7$##6!!MI?DAyK*)vB8*&oqC)dHc>_Qi7X+)^oWK#^{&vw!e8!ZJDQ=5pN-RyY~v zuwcwP>rzrsU|UMs!qn?MT!c({QYnQnZnm5hrEglxB%G#0rnF}|ua?Bm+gJUvy=!wv zXnO$+w%MeFW`M{BqsCU(UirYVDSS{jF#pGS7rJpR%oEm~V9&E;7-HL+2Hi>{Nw9OAsEqGE#)9U}PFP?+$Ho;IC8nD>z@ifs7new~Y>%fkK$tQkp8;?(fPT z`%m_SWE#c~;+qjxPa!BB(|77h?ONpKU3HKidf5e2kn{>Hll|yCf`tr#3;eDn5SUkD z+6-K>E=QXmgoe7G5pt9f1kyhbc|Luho+b4YUC2=NSK_j^J+?=7Ca|mm@n{AHl>jRo z3!8N!CTtLLCs-#nri4YJ`03};dk-9te`MST=qpbq$!R9*(4s!nx^kYTA?!K+-vp-~ zs@i2>t&~WSv#!zMb3VLw4HnWJI{WwUC-_U9U#~cL4x9VX@0OnJ0zTdGw2z6u*CUrF=YOa+A4}Cw%Zu2Io zpG24<;yA2*QuU;&92a%DKjSo`7G-sl3!=!(OYinp7asL+dMR1$cNC&864sn6gN z#9Vq+d6s2oSFMv5&HsxFHzVfy=*1Onm98B+CfET`M`eH_0&-hZu#s%C(5n`vQLCr- zaL;N6SPjEB2~Elx7r?~?-!<=qj6RKgcudTu<7CkxV2PD#gFyJjHob0x@Q|6UeLYm{*664zz;Ih;ppeo)_;BC9~WCw zQemTF+h-YoqYOZa$v_Z6PN2niR=dRkIXk)(AtVvNH)-c~*Wq*UjBB%)&GiZjrl^2{ zoUf+jlB0eQ=T4#x3}7oN$xd{^ENWspzRIw4$Xn4i{@f41Jl+Q9uKlI9!pUQ66S|R%%8+zQgYU*vPZQs=#C?QMXa64wa(MFt_Z8-e3WIW(bGT8vkl2D#WF8Pt%mn|_)D2bB~W#5 z5f;3kuf22U)od>QY3gFwB@{s;>S$NK`N>3naLD(IdqQ@+eb5k8A{ABYYA_UXKeNa<_e+MYe`)?@s3t-kW)|hTW;Cvyd5x1kt?M*X6JpY zDdHkC=M2q6q+b z+GlVXyV57nm-jqhW>0vzRj&{O$$+*d%tgkH&yTuW5M?HWv`q#B1RS^(^KBoXBRuzK zo<ICnzriD`2@!)HkN^6q4m!y_c&<>M2vsQpHDnHzo$Jq|y63U-6*H z+!Ana5dqnQCqgYjj|;s7!ZQ)l$Ck@7`c4J5FhOGM0pIeVqk)aM+|SUOyO^!}gEbuh`Io$>ZIRJk|a-}39bIYd4ZTVn&v3ZZAl`;VU`^XslC zQ@>n=p-tYDiY|GBErkVqhw1k!$|uY_5sl)t8!sXjKZsp+dZLg&_Y+~EK2N@iHT348 zUA4C#KbG1k_w8nEtt_frgHVI}LW4jn#Vtec$zTtJ357`65X#QEQZkgLByx{$I&M*7 zA5+1|>V0t&d?istjW4xm4872%J7|0=c3Y!v|67OrBB(f{#KMSX#0n=p>s*BXy=!H; zocEB<0~Tf`{8AMaZ?P4Uk(8N5NbF*Mn4eX_LeP5iBl%n9)+#cs1x3^QBF7HB3?&Lr z?7&7x$Q;-<}Mi`jIraz5$E?BhaEFK)`ou+h3R&YyPGZ+mPJb{DX+kIdS$xEay z&Akj%)n^+n`N=65Jj2N%Kuo_W47I$nJqAa2SYsW4VQLS!=+!DN=HhRnzyxdy-I4jR ziID0x>r9_UelFmWReiq|9*p#cu+&|c#r{5@bI|&qAB;_0kpN325#x1ohC+)DWe6ng znyV1;JVa$q@V;z*dW!R`$=Tc6M*9MgE19BDSXbL3) zE;q!JFNKY~6nsIhWbHHZ4wSG!_RXE@M>Fv%J3WS4Ny78dotXeP?}#+s8CMzL)NS+g z;Te|?uZKa|5PQ=c9%m?cS=zo_qjpKp*nZd*(PTaf;)7@_KJ(me1F8rFtAQp9jrAZr z*v}IMKO`<5`WBcC1}U)67s?-EZ5o#C$a<)w{rmd$xK9{iUV#}+$X59T^XtNf7UM98 zIHZ8ZgpcO&1$inLC7P(#gr1h3WaQ*#B_$8le+8Bh2F zE7}@{KG2Q?#ET#9g@(LVc}vBn4*)FlA(@n+|Ho{~DS;{?dvuzXGfD*frh4#FG{K#Kq69d1 zLa|h-`|@Q}bAQ%2JCY1!plGlymZp8rhKn`){U$Pu1&dB)+24nQ)lG*m&v!KnMR z@eM+Y9*3m`{4i>29p%xL43`fAG!(PBK9!$8g>XVL@Yl$?rnrh!dnkjKM$k(t&5Glx z`@TwF$J?ZWlwhPdczF@5!*0OGV~6Z&>D}A@uZ#=d-`z}erO8Uwo_Agc75zc=$_4lf zRG_(1;uR}=DZ{3P5u9$j&4Y4g!0VQLRiN@(LX87`7g1bp3FDt84#CgT?P!=&1Pm6V zNAb|b`S~sK8)xG6NlPC2`209>P5e-s!Qj*afvmDX$-A1J02>3ixQaZ%K%#F)uGgjz~TSeV`|i zlcNI^0#nqABK+LHDsq}A-6WwsBMWjTt;CW7kIk?OV8To!>O<$7JfwPPJ(NXUFUK|2 zDTjWovy%$qi4-HivHj4=asnQc?QYhya;WM5@CjKrGfe@+rL2vMi|?N8mITSUB4=r& zbL7QH%Hkdg^lCg$DC_Dlq}bB#Fw zWJw5I6&kMsw_F?r)JTcT=zFYf2I+9Y1T>ADt^JI)h0kdhEc~6CLi9WU$J`T9vK$#{ zGGZ*BJ{NV{WLPlwe?zKb2@55WtO1D~_XbBP_oL zWT+y{E>{~Ghe8dqS1CMm_>r}l2a%%eh&^te-hfqQWt!ctCq`3I(f`Iq5rIfr*e}Em z9l*}Xz>8fWLw%K%@sz0!bpc^F)*|_i9f@Jz0Qp2R00V;1Zu>GX0n@F-9WfGlD)|OS z;O@{}PZk-@3TtnNmlkYUnRBgkqliBW(yto=h~SF+UuasfC7i2rc@lvmRDeda_|WNn zl$k{j;FHvW-$58q{O$86A-Ou)=hh*@hfyrz-<#`bGBFbc(Jjy2R{3+~j(wd3Th4vL zY&jK`M3gkLu*!KMmQ0oUESeUG-KRji!N?z1`#JYrOa?`Qe%+%S(Y-2f2I0KQcY=8? z7~4@=@e_ap$T*vV(Bs>uWtDn8wigBoPxdz4`82xh4&hm8&jWv>p2#|Hx$ww}O@+_q zM3uevNudI=R;+#TFKX|0eJGH8rjzSMBsh5dmE9w>Qn2l@5UqS;Q#@-}Jjf6}k4scQ zlux6ZOM1S7X34c05_W=joip|ytZzp`JfpV;Y(4W?`pfNRrI;FvkbvC|U8$m8?u6R2 z>0J4+%oiEi;Bs22FC;{S8zSs?hLD=`oaPVheM>ygLmC2rPY|0xlm3xxqm*K!0JE=} zYclS1U9YGlpp6dXue7#&?z!pv4><9T`p;cAnF&-`H&^C9$ZSZK9=bKYlX@RJ2Dt7z z><}`AlPAc$m6E4;E7CJs zuK|*AA%Sm5>z$ubWfSl$$K;W(Vagg|hgFNx|8AcCEaf(**`#-QC^OwwulHJ(#haQ zvRv8<_)rQSlUYwo_7bJ!ZHBnFhd)J^-Zs}H4>zwY9J!%Ti+69^cDcdWA%>%oeP8n^1kF>?j%>?J1*(3Eh>Hm~e1K-*$G5s@=o!Bx zu6Bz6Wr|uxA$rERR($DFlSX@o!l6gz$s=~oQ*kde)VMMB=FPV5lE~=j=*)&DlMoXW zB+6!>kYf^x8Qd-{-fsH6fd>UZT3U}y3Z*NjpQ9`bmAw7mRVJdIVx9Ls1%nW1RV{tf zE@E8i7H#5rYh2=e;x!?IS)*Crd3-kZ(>!plM)(iAdJW8!OHe#mNg?WYRrW$Z#g9b{ zeNW}57~$4G2TBwlp~mhY;37H8BgYIkSN6EI#6l6nMK*)dLx@jeo6p==J0g#27j|ko zPrG5)Qj>|jD%23yRgP$In1m`yDtkOr`{&28@e}*v^uA>8NBBR?hVb3ATks}4{2%O& ziOesKBS2L~Zj^}uikCnyf`;?#DP{VDan3%wGR2uZk>VWJ@)p~vnP=1*kalv~0SDg5 zCe;tpDeWu^jp<_v>9(YHa3=C$Iyt;o(8<;v01P*)G~Y|^oosZ zO{?;0x50FjE{qwv2+_mbMhd~hUqU-FG{3lLhp;L4@kaTSHo_ni_*clO)0}iqQ9(bA z>Iz|$A)srviU3uyafjExBfw$?|I%IWjcdC>gLCPBjbd6mC@}}KhBm-Dl$<3ZIDam8 zk`7SgGjF4qL~GZs8Au-+e~%4!Dg0tom5+dzStPfWXe57It}bWL+p&Aw?&TF#ggGRPDiBl zKHsUsqxVK$o_u$)3V_ycrn_{2%GbBdk9vWu=JKap(7nnH%trMENl7nWQME)M*#rrK z1eiAJ83K%eC9Z$@1>3P^dIY!|lIzP}U+eUPDdP&mlN)GEISa8wWnx67er$-?kH->?LhB8Vz4Bs4(C(x@>uoJ-_JYCQbf2ry5OF;6d3BuG~vY?q~!c!-d6 z2I|zZzqV09#5|{R6tns_gc@e}X97g}Kex#%i^HNtsK~4MSDosJP{+JuRLAbGVE08H zRTAZRA`b-%)Xe{nt(<&?8p4r57&mq!PgrC@?s~wga6&k;cTDv}9{hExSSDMC$~=ps zBI+nBNg&i2@^&D`F=(qV)7rKzN37NtHM~G*L}e}=`f8<=@aRWju;q5fS|gh=eQ-(dVnr`Zj?_CXz{9Fp?Tl*e2l z8Eu3J7XzM{g-xQ4iW%WyE}0NDQs!&m$r)X>$6+k4NV5at*Z_8%Y{saGg0m3A)~M%V z49=L3um~v|V!DY0eaWbhfd#?f`@zQZ*QZF~xpU_d+Vgrjfp;k$d9BNz$)DjF%H@R+ zr9O}_blq?sPthN$O=QYn8d?ApmZXICf<^TK7V9nkT{mA9*{Addk|4~1Rg#vW2e*x^ z9@z|Q?5s$9zniBt6x8SJ5xiiVFqVwnwYwM`2{|vBuqeiD%lv?-052IQ=?&ADA4tRl zNVm07wt6-pZ_^u4M*L)>dtc!^M@Kt$}sD(=m$Lch7em|haH^YaU${iV7i z8%GN91Lm@_{VAE^NFX|;*~BBm)#nEe-H7*O!P0Xfh+&%oQ?funUIb94-xm==d#90( zYkC79!o*ljRJ#ayJSi@ROzjdPZ6cB$i{i}yP7te$pTHcB2KRk^`vA4<##a!?T6x?7 zephR$GdynYv6Fs zK6|gd*0;Vg=lu4qv9cvi0~A}Nx-1Ea4v*dLe6vn5aX<(G2}zm{iM5nPq7)W`Mn;O{ zJ^-qc22_B`vm210JVq?CZ`DYy8hx=LilH82f{OQCY@G4jq1;xu?&hR=q7fffes?h9 z$Tp;fA3gJqL9NU+o()-|^59M?%}Ew(POI$BwWxwA%d@Nm55F2oYd0DXsztyRBa?+l ztHsLCqR>sYHB-9r$*Z+SD@PB9hrbvN2f~vz=aR6WT5A7ye{{)F58|>S^4B}xq=SpH zMt~%4H_$iH?de2RFDUu2_39BbPJ8EXPWp!<<$$0Efpmmb@vN2Fo`21ah#;6HA|xv5 zC=WDT+qHf%Xq}Y~lSWtHk8B0#rvVkliP}17>+OB(a@2|F5>e7rsr9k51*QCCM;lF9 zlFcWnU(lIe{%8E?J@xfI#LK!V{T<*<1s{wAZTO}yRhLM!8R_t;ga|&FN2&!NJsM=2H$TP^wR9{$L3dzBF#P}^4q5IkG9bNNm^eCHon zcnVZj-HeA-x2Xy-94wRQE<{8G9_&Mw)Rp?&BMexr&4rz|#-EH(EEdA!zv^MZ0^3+f zEC~}6i*hFp4q_`-kW|fAiY;tk&AdK#Wh=$(C6!zT@X+_Tsa6P3fDySaF9_PUd(3q1 z`b!oUBR@zqF$7JLJ)Zto{f}vLtI!`J0`fkY|HLG-PXG93?fW?%8)tLN2O(+w-Epzx zn@hX)_gXdD?fR38{)~nI&UJn1m|{(Kng0~u(N}BliXGHGwV2EBW`jrwT;-tp$+vHK zF@efeh`VpKbAj;t&cWVs`QOOgJa_kC%71%4<~IbKTZ%;Dcm3l zJj=Pau>61U)NF;0CfTg|cY$7S`b7rS>WYM%OG+lG!4M{hSbyT?gX&biYp{pWLQ zg~~e_)%=s5*YDaA z5{#7b;s`|4FYC2Ss*Xh(Q&ffYGD0HM0%_ggJ>N7}9=wv}USMR+0yM2&S;bb|VaYd(EP_~8>o(7j41cN zPut}{9IFISjSA+Nl->|4Q}Tj;%*$)OJGQJ@H2; zgdSZJ8t!)jU=`hVA0zK1JT@A1zvR)WTON(ozsAabLCGYQ3Sn@kqrN|-CO`KA1J?i@ z$250cZ7RZvnTtG6{WruWp^kuxzy0>x`aEkAsQ55%^!U7(OH=ObPO{as!647xSanJ= zr3rh-*S-j(K~e95o0nq7$%7(Ja>xl>89hD~G-Xv1`m>Jm!TxRTtn}?34 zy*)k<2M$|ka!(zFe35x;0qv+$DE17(%Apxdii44PT*(RTY=I}6i6VBamUYOI=~C2|97#5_ zH3v5(oNXir%{-J_J6^@7Kc#+fB%~tsWO2yc^e&JP-pGU|YP#+}5th{9bY^{0c^gs= zHHW#Vd1NYJS`r$nR4(Y}G-U{>fvt0TmD&kp=_;BIV!2?qUOAQv`CD;-*CBfvbWt$f zR3Gna_ZdHad^L&Eq-`xFAah7qP)!-MXji0|bRCUrxBv`<6u{eaM50`2$&EKn-C#tXUZq@)dvBPE4+3UcUwbJKS!1eIN&|1*|OoNPvQMYsp9=7k-{{WU%7bkv46iFIEw3i zOv>Y<==_I-LQ&HD_p?(WeZ3DLXz_G*Xx5m^H$yzHjf>CCW4Rra$s^9e3l~p`0Y-0V ztf*rLzcRO(2)3&V@v<=eLkvEhS+5N`0Kf9SQfdNEq+Bw68mL;Btr{)0$MaqgH{X(< zsiEf3NKFqW@~wTZ`#{4R=?QZ99A2+DA_YM|O&2cTY^j8ACd=NFC;5XhVj(8jnk7=W z8oNob5u&vd>D->D9X(GZ4-B+=q106Da?QAzAP5wbU)lapSUAwm-GQbB0oc|3Mo%O~ zZJLCL%+-lw%)4jK;T?}&6{e&><60cp4$qvk*~?>6G33I+!d|=7+1}e9RsnqN`uE>| zd**O)sJl0TFHHME;djwX)UL!*(gu3SuNx%fkR>I}<}KKd%=$qQG^ueMy^MwEhOUsr z7m{nj!qYAWwGyJ1x)&VvG62|#^oiB&ymmb@x=?i%Fmck2_AnP6eWpwUEX)T;UK1^^ z=;xxKRFm6SD61AZB(fvwF?#TMG=wo@sLV@1)d#OW zY2+C}0~EyofS`?uaRF3siD9$1guyPRYZ4WWQ5KIZPY8Oj_i@)iXNusDKMu5vjyCP* z?YUn6-D^`))MqlE@CYJ7m)RpTRUKtnv@>gFnBSBxgf#aIL_j7J_@)2Y&`dLx3TXsp zp>!Fc&lxVRl}e#c&^|c@;1zVy@yWIC@#q`!h1PoZMrDkqK2s2aJ*P>HGP?X_Uh zG8*2wp6d!LE8&U+8wes{6^_a!$-aYIxKAN$&TqYj{GoOA`2$qILV*)Hmw)CI_|!dHX<8j)6Es zG4XY*AeXpAV-E3mX=6nVkLu9WFRAoGRiLh^TAg#M{`hA$UxLr9pGVTg9w*}$L!er4 z6Ow5nc3dR(3~l~Xr2$UNx&_3SzYPUh>~)X!e+)VLcKIRc(CyJSYPIGN`l9}046dKZ z)OQ=XRP7aasAGZ|YNBF5?F*Rx0EBEImM*+2yHo^r#~Xv3lJ%WaH*Yf646Q38r6fWC zA^~d+lKh$+E@6oMe)M#>u>Ij)l2ywfq&AsCQs*H}MksU>H!BrYWnJU}qJ&SWVY4Md zc|KSTuaS4CiuCC>W)z>XU-MgYL5Ld4VF-SOD@M$0DE1^4wb9f>zE2OE8ftk0@ONmB zq11(&=;%zIJrq?#XDnT|%mpAuVj!5w{XE)WOyT`8jthf~$9_B4fNC7qpzJdD=xZFU zZNpe3%Zs|Lk-&zp;C|HGKN_J(NIfS+K$7>I8k z=N(rQascxo>uerek!6~(tHw?nJ);?qW#gwa@jZFwKUSZ9Sfe@m#{YC96QH&ycEOmE z<74WtN{`Ok@+>&GA$08FjD>QEH@|Sn0|`CmDoQeytNMr#cbSoYf3x9qbjl1JFvB)9 zV208nclT<+6eT}upy`!|D#2Y#E)ywJ=X=vNPN?1{R0)0wM6M;(Ny)q7%8W{Q4us>#!T#)eZ=lv>u_8I1FHL6N82CZd}xrU5{c)6Ceswd-%VjA7&=z%eq#mtVQML z_K^50NrudAOpGUF2&!|`afnVNwsNIA+?$_oZGRZZE<>fC7T2}h(Tj&?IaCAODEqsA zXUM(4-;5>g>6yRV zd~;)9g^pRsAAhV$?uZJLEamorsmurdiZYJ4@b>I`m-M|l*H_%wo`zl=bHKniPUq%~ zu;Gjxu(=miBde!gFrnb#t54FcnIhF73+24qG*xkVB7{z6l3DN(6T(|z=<@vOP`nb5SaQ$tWvB*KfMY~$`>3fepu2jQUdr-M z0WYsoPw7#+mcnII)j$SyW(_%W$SJKNW!-b|!=n$WJbIP0UIiSiyX`ikhwUxcrvzQ2^qtf7o#Q1J ztX41XbV?mPDLESwK`yCZ7m1!scq+N(=T1}B-pBjS=N`}GvIG5@L8%hzs7y?GlKM4` z4Kx7c$`v{J&F%tnl-DEb6vu)Z_1i)ya0{$fdwp>RJRec52v)a-Jai#8g(n5euG_RS1wsG^OJxkU$RQjhN493>S}v9~Ot=(-4$d zNJeyh^(kx=2$Yy=GSu1D4YVn+Blq*4G?7-YKP_Rx;Et2S; zB;*UMub{;zFWi;#u|sLmqgQ165}Md$*aY-&7lj*Q9d`1QB_^pYs-}k6Lx~1B!JT(j zFC;1;rKku{7%&N*BA9ay!{Tf65>QPShBX=#^q`28-qx;=K^<$6iS zHnw8jA{Wmo26pcnV%wh`stz@14zy?%-{|J}Olgn%Y&Z%8jzm8X9JKx4Oy|35(6!=iQJNa4oV`J5#r2d2scfJ&BJ>%E$^H;=m zUhO*d>e9i^vnG{twr8{Q?(2EDndZAVSCvaPA1$w!Uw!t2yw%x;IZ`UIaTReRrn68n zCAlDcyt}^su4!zZb&Z7b+)F{nfSYQ89&#$vcw%2gws?n> zp5D`3ZH)_sh6Qhzorxk>@{wIvrFDqoUeYM<5S=}yM(uwpI4{9T%B*ah>Tgm zor!K&v`iaj;6MF+1wK;JTh7)NH@RfYlrO@2pPM{)*}Qb7q}Y~@OKt2a_0hpbyZM{` zY7=zHJt32E+arCA*rZzdyG17BS+wb8hYI$)G_W0>>Oc9?+f}kyA*fSg#F|wk$I}sv z6Qo1|r2oqRvF{PB(m&?2&fL4#0X?(zKETp+OhGa&t|jDY%EB!(k@k+gYyPxa z%HfJ5B9!`g_em$#7xa$YM1{st32EZ1dmGE=pU18!)#Uj5XnV9{VG_ zBH2?eWg9*l>Uj6{Oxq|~j$(j2CvW_h0YMrFYr%V?#UZfr)u%IHD%ssK(4-n$o{U9P zVm5~A#g~6w4O3|~ARl9tKuKaW4UE`W+kkDkaKoj$zg(j98RBpYt7K$=Qy~~0_|!}i zX%+KF5Ek&Ybhx5V^W}Z-*X!f_VE+J{|1`mgscIKqSD;@Lf+~~G>j?J{zZP&lFTNGR zO)2bw*YV~?n>K~;Xdwj&A&wKwNk2LM+esI|lbPnz{oEjstts(*H(V!j<<95LS$F#V z*+C;f8e&lOI(y0B5VB=uP!%i8k9U7V>`p2V-n}&Y;6-W8EjyA&deK|gQ@S7UxGe;X z5sn@o4&B9)AEpVK|M}h6O^Pc zuwo1azCQmqvtsh%M`o6{&4+`J1`7ubJl_FoX7YIs5>FH;6|7%`v~)%ld9@62167LN z*O3G=mqsg2YR(>|YbN=_SzxV*Lp=t4h85eymWWLi=~W;*&@b=rEh||+(&=1q0Xaa7 z(GeJ``^Hq!oyZq4cBcICvRG4iTyq>*Kn_(?RnLdrS4+TBl12-$A+B86sizZ7(W*8b zX>7GxnLvmcrcU!inh-=?9{U>8roU>PsLQi*$UFHl4Qqw!M<$B4I^8Fq%;w#A9S5p7 zv_U?Unud_j-^BF>P^4qHPsLCl|0Owz@zwT_8SXB>kk$>ylC9&>`-JTh|PL>iU7MlcqDjza4i?3YVz77 zrN7Qaa;J~B9e+m+I(;%;8v01`#ZS9=%Qk^U!#ffgbW~`buQqw z0@P|TsCu#>WI^PDn$>fXB=d0BByF03nrpO~;8Y%aQ7OBF9qjTw|GIG?5#hZKM!^qR z1Ngh`VGdclOMz#N-)P`qlKv3uROMO$z3DS&Hho5?KB1U@ zwGJEpUOPG);XpT!>^WyhLxhWVK2Awnwjk9h*u7v1$PL-C@DE zcunT9_g}Q-=KM*w@~<|J*amp6ZWnTMaG;#c^ShEuDK#7mY5pp(`4%v_%EVT>1;rOauoLi+dYtHijzF!^ zG~Flg{JCPvznbwyQJ7bqT)8-FnZ(H>%|bY1)V*gMD?|zrOE2Cx5cw{Op4Zx%-;HB(90?(Z%yv zfJTM?@+LGu!;{EzyrMRh`!>CZl1Cx8IVYHDBX}i?+eSd|xjy(Mwt}++ZB2{JM~)>K zP-fVxJLFdEdc$5oy+4n`-fo%9=Vd-}8(Xj$`DncTu8*`QndJ}i{_^W3M?XwFA%J5p z9L@Du%yd2tsh1b9?S41fU@oQvw(pFdS3B$iEwl#5b(9=3J#oBkrg+45&Q3JqF39x( zQ{=erksIWc_l$DNU;ae-4&JXbd&Gp`S#X6AU_x6u?6`k^?+hSL4i}?5ww%D(jL-on z+sx0{vx5)CtSAc85d^%2%+^PQC1>gi7Lz3;^e_?=>A*?yd963kYFC2Q+WD_Hy82h? zOjVealVyOeFydZ>ZfdTeZf79mfC+^>kdu;Nh>H=#a?>bSW%|=%TXRFHM%0IoXY0QY z)2IJb3FTRXW=GxBn6w1aEeU7#V?(C)h>XZ9Z5vQs(B}ud$aT$Pp37&xj_S_Y>+V&I znnAzOoL^Iht&Hh&N|2!h{!6_9l#e5M@7TW9Y&|&Lu2yS#F3b7|c3D!V^`OTv7$rK= z2^C~@h98&z^S$ZiMYcV!eA0GXD4AQ~snu(N-_O;}(JeeU9lh%`&V0`vN zK8aBkBOf|*p0NX=@o->kBE8{JiwT+U1 zZJi*BQ^oJ>_+`N$Qw4aBT*(Ww1W z%_*aa4-8mFm=ZjAgK~no`}CI8R+;(qY7)Dd5yb-knGX`RSv_(pvMrmKIzIfrh5~%u z>4!9Cxg&>4aq5isRy;xpbfJjq>&nfjS2Hl?swVtRe&h{ZSKZ|ZB$@(Zfj!L?#NeDL zKZ_^R?(5tb{si%}B39gn-_Rh*ucDnu{nTmBOWqDOILe4^LK~egDg*Xeb z9Ic8okb6Vv(zP`-GI#w_M8ZYwK~u~qgbahG`|c1-<~RYwAV;Q6z9Yu$`cY*W!QGK{_pF&qgvpql+SI~;!$i`ejr(KXumJ}+%>ViX+$-Q$uwL!v%b8_Ha`|6 zm+R&s_wNgn?cHKi3Fv4<=)`&JpX{mpk>`Cy4mW?3ePR&qd4q23${mxp&7#RZA zM1~^!R1Of}8?okikZ>c$NKjKsrFrK2{1V6VY?Wt{PumehjaVp@#}fLBwEu#Q-~c+4 zGn&xnMyS{?pGx%jDQ^rQO=ByI$Nv2^a`=J@UXJ!l1K^WAS&O`TPWw;quaKM*Q3KrN zH7qo%soX(GJ!TEAw}TBL(B&p1>%Hm;&xD?)rjK&R@U%KJ4&uX`qbEPSmYfCog)jATffI9 zKaC{ZXa+LP7l?=mf_j(*8zm%NFpMV3r;suafn!Ov05TEk89(w&Jy~GC$FzE}RQgM` z9o1taZKtr>%a+zx3~2RcxauT}uQe{8QhBG_>tp#ti!V(t6LOQvOD)`eyjmkEMW(r? z`u@ik{u?LMD+K2zH9D7O6kL0hZeKno^+7LGjyGEtC2jt3v+0Kx^>ldhPK)hsPhDCR zk{K^G<8f@)XJ}y)dEk_WX=g%4owRROK2px!mi& zc=X!}0G~k0{4t$#Jk6UaUmSle%uF!Yq^YUNPhul}ZtIuGqdOiH3ftC*K!WhIgTpz9&Uk#V;W4*tu34#6;3~12s~o1{(1b*K%%@)jdnUQ&^?yNet%Zpc|%tx)?_#N+T6_c*;t`38v2$w z6ybHwYPI+GpI2ag_tBUi*92nY%|M&@>8QoI5RL>KQHwfHNL*6h0Wt3ufM}~==Z&At z2^!d_eTzV?q)3Gw`S_F0@7C;txu=k>SWhu``jbZf3qM{kZPKbuU)r2E-DMAgvUJ4gi;~H{O^Ue$PY2niB_dm`1&G)L$P^(;~a=zq7R{QJ_uo&LMOfp?q zNHv1c6kFDrq45Rvu^=951F~YK06?d+d+pd^dvZf*?JU!H&z4#AqK{%sG6$g<_MWT{ z{pz{{32kO_W8SCY4)x%LVGQBZi`okOXJEwy?C|yP3^we3_RkRQ1p*rD``O@?5Oy|$ z3Q$1&DtEY1UZNP718QAF4ot;E_7nx^JRU5bghz&|JZ+_3@`8sAfM|Y?`u6EyV(_CJ zujvlgea$q5$=}<}LeT2l{y9=8;C<}|&ZALOUA%blv-Y1tF5>QgrH&9Q&_9t@XHqZK zxo1lr-$(M=klmR^T=}b4uO3J^dx8kmR4m4t)s)3$P$RLUat0)1MHnhtk6r)_G`LUW zCkJLM=*L#6oLuznU2bBHWjVCD%&8ooIy-7!l5G#8!j%- z1wAF`N6~c&)soxO-$7H55g!4yYf(n1D2kSw6gnht zVW|UVMiNk&@yV!CQS+*~Xp}taC{PxfG}RD_4~+J+7P@7m2_p9_)Uoi6u5^O;ZThk*WV_r9%7#KzE*n&dyE60ucKE)HbSVh!lv*U|k zyt^6<6iOE~&qG4AD2n>BuQh)CVgVObQQxZ=r58j^4mKiwHj+=GmL`CLM~1@dA~5E) z#L=4Ugo?YYh$Q#3cu;L(L|M%lUdy>4NnSjGTk$JYL*NMh@y8#}Dh}=5&9<8geSo_S z<)iL_2hO@Mu%M##ZxCd9G$aXHoP`Ix}nDM+XVXWRrfe8hM1@a&eNd4uvkX}Hug z6FU{+K4-hNkP%=EF4*f4O(ZP951r+NJ}!*%I&x0U(Z^*I)zR9qp)kZ771Z^2rRafR zWct^Sv#gB`n`kv**u88=hKjf>(8m!g5la&-kLt@K;WB>8n#uaiT^<2o~zf z6JW-ewpI|0a&&N@E<>np-G^I7D#Zsiy?~7q3e&o3nvVmiY`y*KJW1bDziUl9 zWXwh$1Mb*`<3J^6dKUo_Wf-b7`k@@2Md=1E#}aP5Bc27^Ih5RanE!k8@Nxa$X#8(9 zcohG)Qijcf|Bc45!T8_Y7(N*P8;$>ej>eOzrO^+~sJ%WW?ZLLC zkdQxdu=9!0EvKN+Iswr6O?TdjSaNgO;_UdC05K@(o&?^Y6piO6SI5;xA1(LTzImtT z>veAO@f%SvF{%pBSX(Sp|M<~amvPh9XJ}c3I751PV-m|<`o0zarQ=^KTQ1p6K#i~5?LH_PLd8;pCE_@~p2b;^jdCtheGk({L zz`jnqFh?c99D(oaU(BBoQ#s)Hu`)#(GRQLkOn7*(bLK21+=yJYnU4`)WmkGH&^je~ z$`6HcO-*P|F|VpMTOFN!iUeo()<`9!{yxsDx~E<}W2IeObVbr5&zIsWVttLuY&v^x zum14VO7DnHpKb!W|HfxbQ!#cLzoJVhD)XjAa!*pRI%M|ER7sn$=uM%k8o8VL;PJm_2t77XG0FsHS_XStJ)D5hpYEEPy|J@ zp10?nf{oO~Ba%8-ID)kEi#P;|j0C4GLJyC%Cp909$;e!h^yJWay(7>_J_+QivPaQU zIQkdUGMf@3EtVa8`N!;%a*IyD*!gB&O6YA;0fy^G@spZQt4=lfeQF}WgGT&`83V(m zU*7w0YVADRnC$l;zQj;2EBZ#-D-+py>isQ&VquvB7U6#-jmfgMEa_J2B@g-M@iDC4rj|%m+nMu9&$s})IK4U4QmVe9Bdt-d z?wP9IpC^CNeQ9$|mq8BAoH0EOkd8THCrXR?_x-Y6HtEZ!V3z6?C+Ep2(x~^(Qx)`@ zJ2|EAR8bNy@3bwu93oRroh|EmA7WY@X)z+bdojbr;mi>>t2E#{zp6`mwzKn=*`(X7 zd1}-5>7hbej!s-$MgOo_wvC~?CZ!J6A`CRm>C_>*6vl z#bb*53hOoX72wVmds%C_2&`SdqObpgxJ~XC%4@HXt9mKjJKi9paI)6CVuiCdNvsS( zX?an>b?Z*r^>o}mQ~6+PaoeoFMLO554xMYJCL8}bU%EawvP?;$EaQdk#`@tk+{^n0 z*^Q^S4MQru=qc-S_fS~7Wo;Xu1fjYnI$i01*Qbygzr23#bP?9)zOjC0rMcoY{^Iz{ z#3niET|MpF@Z-GjJl>YVN}H$+;t>)`UL|j5m(0k}&c}c9RQfH?SD&)5ZOlDq{7!G8 z)6+Qj?aJoE6CK+a)QDox!eMYEL$9o*5p1oUbaA13cML~f*juCYjQnQMf}!{O8L2Cb zft5VYtqvum)0HxT^0S45=+aM}=(d$m*n0;_>!QtEzd)Gt1uy-T9j^2X0x(l_N)QIC zu3wY#s`I(jh!m4~Ntx4Etvg*R-hqP+{2-s)nWWZk3;46IIfIG%r$lDY&a3cJDpf1G z#AcT*j_W*{D5ZFR+?7?SqF0_gx%a(-(~}AM(ZAWdZVK_gZhdIxdGVX}^Bt!y6h0j8 zDiR`;`IhH(EzvTju{*Qc z@Uv6(eZ3V^MaG-PdO59eUuTiGn58x96N>MEqXkxGF}2GPyE!XfdkTY&tjf_difK;& z=lzN{Q<(?$0wBPuQ+KWtjC^`ttzci`>^st)mjAqwr+}9snf(k%pU9+J+bjhz2LRKI zUu>aFuc%+`!To$a>V9Q3n=W8-@?Oe*AnP6-eX;KcOis=#G^&H|ePF<{u!wT!QHx5F zVSCZ!NtRUar9ZwX=3G=kt+>H1gMs;A$o$P2;s^3W$+edSFJ8|MYD!qLw!g!mo28-Q z*Oq!GzbxD&xNqQ*?v6VtM(*kUv#p+Q&W~@NA71xsP}L0s!Ic)Bv#sAN-M1+V&u{lz znV$Yga0RRA*GrxP=O@|d1ow9@4{vH|zFuD1(c!0;xu-Vm;v)eENp^dbLngboZNP7` zqnqBN4F6gI_rl5A)AoqYE{f>TIK7t3)ZXLnK5yBV|bl+TzFJ{T3J%f70bvr!xAI*>-sNw*X3l8vP7o_ z41w5ne^K!WFFeW3w*g{V-aQuSqo1)_zZvkYrT&!z%c7Ur9CtUG*#Ab)r_)aS?j<)D zeW_ipIVo(PzVVW4GqjtNmS}YLUprvi`(yYgb%hmX#TP$He|l*sXki?jSl@bp4UlPv z%XxNH+3YhPdv)747N^}6w2Nz~_|C3XO8k95n{GLq#R~2jn6YQYmLjVcI-4@W1zla8 z?WV7^GqT!bG0=RolT|Ns9}gg0zeFOu!5`yLGZEt;Q#o_qqFk3oKJ|9m;Et##u zgDIkWBw-q}G6(0-^y1vv2eT{2rbn1~9@^s)-1ujT!-p811AAPI-}-nRG<7llsN=0; z>Jr>mux>_1W`}&+o7b^j88gx{JJ!`^hwFD&Iq4qhwSH?9-habtcb%=?kvw>9 z4~8!otGPio^!mpg#X6M>8SEXMc{)n_TlhmC{L~g(=Mt1vF_`VOUo&hKZ@tD@Q4xlF z2ArP)W&YJ5{L``mhmJ*4`IeiqeJN6#N{E(z3Bfe z?tZf)f@mWFkWR)u>vdtuZ5b|I+LI!dHv{+Iux1*~ZxtLA3EG0c>WQD_H7 zna6kTkd@1CSPZ?4&tsY27d{?pDrUb%(xQPc`=sX*=Fz0xRoa+>Yrv%jm<&xwNzTBu zTRy*?e}(-kMrO28ifZlknMwxtC%UnhoPG2j_!69j{9s#1w;-y8V$+#(ZL9lw`zH=% z^=Tyx+aJy_$jIGd%{I+Egv%?{+EW8~QdZw&?B&xhGSg;3`;eX9eHc#{oQ%ERn5UO- z)*NU2i=LOfN>3cYa%tTAz;fB4n|9;|%pSURy@D5e^z)$PO^4#&shMYo9*>aR&^+bUOa?$*+)Ad6K=fgag z#l$1DhIXv2x0t!oAhc!w*Atp*S~sNW9+^KWM)gXKX-ng*TXHK0KkLXVQVK})wJ)+7 z8fK6zwC`bDDcUx#VGe(w;YoX05k^S96~azZ?l<`M-;|RW{PeiNPxr{KSMj`)fPK0^ zfwR@AxCCz++Ni7I>IQ;nuYW!`#qG~XuEh-tO}e%CZ`r#t9XaLr!_X&{hka7l>R98B zIN}8jtyubEA-Rq2aa)FH=b;AznMN1iBkQls=Sg=p9SEBtzk^P~v$Z&IZ0M+I z`p)6X4NM`YRqoSv&Ayy~i6k~?3z%`F4_=N|_qIuV4p!kVJk5oRL1uJQX>b*XCKY|2$7!dF@gj`IOPcss2bYJZb*m!XjuzaS-sZeYto-Q!YI81m zeW^^g?U2i`SCsQ$KhcAI#_>3JqqU3|d=|}3w{gk4mvM%>t}V9bc~G{kD19rAy$gqZ zavR6>+1-?|QXz20;ACOe(kG)6CufM1Zs1>``n%-bdvDzn0t zG4tg0*P+If(T#n3dwlAU3AzxYb)W-mJ?6MPm$Q{0%kx`o2S9rOYtR8L_+C6>Du#ox z15SGHFJCdOOYs`AK^gV~KV*F^K6>|~1NL#9|U z`D}GV6F%-B5$s#nsPKGLNAGglX@j=4n(a5^Yn-%j#U^6rRnK$e6> zUh9%U(`1nxnKI;AfXX7N&&*W>|KHBl!L^Kb1w8iGhs-gnQfO$d9x_>;g4*I%pP_Gl zbLb6OA-tq&!ML_qmQy%S_zg|?y{|MI$%rWG^fa?82&g%WH_apC_i_DMM1^=cxI@aj zcN;vs~(H9le=5aUr@*_=i?Rs;H9ctcSW^2 zh}~Wo+ww^2W;qKon0o0g5>=s#nw^XU5=EnLS$6k*7Qd8H_Npcw5sfQi6vq&KZFxZ{ zT~Et~7anb0?-LZ}ELUEx9|pVzJu#YljkK5-4=%~%_KwyWzN zFYqcwPR@ch;0Fze*aGh~*LvS7C~CWkaHh6toqpJ$efzrZJ{=$QQf6{2@LnH0`rZ6v zQ`+B3*H-2|PPnx$Nz+Jx*W{$Yq3pg^Ddx-5Z#h7}cH#Kfr~R9UD=M{r%-4 z&V;uSIPc!K^$wYd3hrj&H^Jor*_=}|t~bxta)f`@Id%%2i3{`u?o`~R%b371EyB?}sROjvk!z|e!2_w#zRrq(OS>BG-HCLGP zR|IdkluZ2KledI(^r~M?zEy&uEs){t;UEKXwm~MFR3;q5=wMcHji(DU>mFQxi${6>ToSe-)7r{zlqDLnz&5~_O zval&3TPps4aYA*NZ|wv8Cm%5e{=wjm+x_{{C897h&fS4O{2R2fIBdS&W2;q7e%Wi! z1k4GZ&AZ1}=E<#mc3a6mrbhf^YHUO&;ua?*7vtIsY46T%r9X6kn`Kuhd}u8rqRCdR5Nk}62dN0#){$)rY)C?sjzmbQ*Yo4to7jyWAFT#eD z&H?>AocDpsKbQQ&3HKZdsSumWyF5ue}`ko$fDkEe|h9Zq%R-cSM(P7&G z`NN9;?^Gr|_COwu=X-7?fgZYBw!P#3d?`*;krVpr!*Jqnt2_W+s{uK)T&A5UZ3~nm zVSM#KFK5Whq|FZ+3&B+xY^i|UdTKH(O$rom|NOO39wy++-93JHtL2*Kfc`Vd8G;H^ z)wSc6bd-iIv|wjmQ3lxasV+o|#SL<^X3HlozBt+BO?Ab+jeL3)iNEAmR7|<09WVT* zluz&P-)&iCrA0+k%xcbnR=OORDyP@+lBYY@N+}^a>um7ZwH)TOYa&J~6y&cDcW zl~8EHNpp0PS!ZFgDYe94GJs8EGF+uX9WM#IIdHbwG$SglyLPfGt657BEZ!CA?s=6m z$M&*suZxs`nx^Wq121OiWz;m!lZj4~J^L}oQq$^~K@(ZF|EuA=PB|<}T1a5y5|RW| z@iw7=Euf;wbhU(pm}R+La(K8nL34>WQLWW%rA`Vuq)VtRy%=K`Vl$?38VJ$u?l!@C z#d+!O&4Q>z-E<0TaW!!XIM;kDc}-$flDbZkMHPhfvb#rPE;9q|8ijjp`n`dM$m zlsdJUrLz9mVb3#r^?&&!;%4Y9(_|QA7-o6I%1D4t?eBllsRJ|TQ(z43OdjIyQ%z}( z2WT!QdxLFh4*l+HKy&R5j04%2t*JX!CQWCzu4GST*vjR1*{j}1ow2aU0J3x%^_nj` z^GZ3tsAy(oUyYO=`0sM(G?0p6ZG3Zald9@Q&2#Ts4Dw3Y2W}rC(X&T28iyXK9&I$#K=74*C zRc|6gUCqOSop&U&wz*uk8+No-4f|?#s3>%PX#UDbSX8 zTTt(!tK+9iZ-GMzF*T$7IL1CzTr_lWc8e1uU*~y>iEUq1XNp*FO1R~97@71YiM5@P z7*<7ICN%*=mw_wE@&f}yst}LVJ5|HluqmVt6RY# zX>4}}{BB=|jxVxXU5txwV6WS1Dj-83eMOy@lj>wB4*Q%Qq8Z1a)6YTjAkYRBU^=r3DV_-96k8@7n}?|EJdF9Cn((F@<`h#Y zZgRNNcvjyub{=OF6#Z2?Ks8T+ZEgKtM9`5?iG@Reamj{H`YIXM@)Qm>>d{xeP+B41 zQDtao$jRC3=2Ty8)}C7F)lnW{z;W%O>$9p($uJuprS*1S(zb3%yN#ZLiC7!JW|1P-;_F zBCUDo&>;_i@uiQ!D#&D$#S3T1Nrj??xOVG$s<ekyCf9tj2d?%JYQFr7=eg@Nl=;}1;vO|lFHhl>a|9` z?M_}Q+Ot=@XqQ>PzDlmyQYEoguGtY_@f9c0jTBux|KyWfL4(dYUR=`F@L*5g<2npSD<)2W(S_|yT@a7|Lm&HCL&xNGF1gv_)HST1<)U}tQ{?YUf-i6 zP=(Wh&215g!*(}x?3NoA9y|F}zy1_%+TzaCk?tR8JJ4yD0+=ul&p0E5$M#u?P_Q;F zR8#*D>H{?gq9zJBKZSEKbR0uTZ7AipdzptfOY( zY2(I?fMorP8V>j+FKqET?B?- zV|ZfMH$LXW3MOgA@t!VEqSNT~z9~LkybI#~L^BceCZBZh2rwVIf{(_{%b0|7)v#KR z&~nW3YMSNLNBDMvEi@xK-W?nIG<`i&twX-dJt!|Rp2;|Kk_eo_l>HZA=v@<_3#p3@ zhNbDT=Gb1sdfQkF|8o-G{ZVUUoQ86GV>$#6beLhJIq54-$x0D^r*C>W7F3)O>Hi9P z4_hq(=CkBq-@jd$N)>c6qGgnIiy{&tjh^jJpDCY33 zJ95Oc$K;U{9>V5L;WcFSnJhFX#}JA@?~9<#))FIJTzg13iW04II7^%noFM0zq{b6A z=jINb4R)_ctL?2MBU17Ux!&O%V~+$Ms#v#@@Y3$eyC_QtRnkl;i=x&8lT`-uU38j) z^9z^Wd>=Y22Pt;3=m)m¼=w~!!jXSK+$VVgook~fI_9b#-Mn!JD>&ZZJ7Qz*J(NV+bxzK_-t??yyCu;U zI1QhxviO(V&iBnLH|iGtpq>^TwX(L^_2M@*yPAh8w@UcLKVN>O_u^i&Gh()dYX|Bw z^gE-I`ajM-U0}&9({gtIt|u;Ceq~AhN`h18W$bcS&XW_+Lh{0}q}I1AX*o-9iq|5o zqXPN8ZRb8beB@{TUWSv0h0l8Y^-9|o=dvhjC1lY^#D3}~hB5LYGa66uwbkaYZTFbbDCxe_B!})w7vE-RXdGy$fak4) zNxbDmMdCVdn~svQc0kBe>I~49@f`8B;f7j?#U~_mcGbqm$KMYMT1u_JQmOa3gV<(* z>Bn{@r1U1tmRgZQRivS#K#)@z;iTL1U%W!J2J;?+KpR+g z-&?dCdvZ){$(793s)X&g=&#h$(qbexAr{(wu_=mLa!`jh^ssOtQiMFT3o<^xds_Ko zSTWxI&9LmU@44v2K9AQCUmhP9#A4&I+DA#6hLUEe=S9?ed3o{jwq)xJtlkCQkrY%A zHWGQ%leEiy*xf#I96%Z8hJ4o#6^jOq?PJi!=0-sbGoqfP;-~@TSozPto8`KN!pVFNyE1ksiE`aSG@FQ;&6OH`xeZv0;31j(e6*xgAQZZX8q!o2@Me> zswPl82@3DjU#==_pf0Qmdb|obllWnIQihj@D+@IS^JVbHy z9Vuqd=bV@RwU;j}d%cWtbNqj518iYu6HEsk0TBM(?MG5(5%utJ`hslNqK|znHC#vc zdiK(vV-o&4KQ=>$-LAv=2V{#`B$rbSzUZ^0Jknprwhi{0#U?D7v|5Fl+j^&q$=Xw| zE_8*tS5^Prm+R^2>Ba5rJS0#fv7LQvY6|TT=i%vb0l%+{D359H4oG%I#nS9AL{WRU8eN;pG$ zT<|*{-@ppo8Hq5_>a>@yww%I~JpKpdxX#V|&%*JxPdEwysGK?aiEi1H3bLwpadS1U zu|2y@NUS9R5yO{4svh~{^OnPk3kD2I1hD*Pb7(OOF z@-$yLhhimQQbUdty;L9pA7#&tF;!3lE7McUsdiV8?*NmntesqA(A*0}hKqakFvV96 zjE6o>sml$nb5|e9G-~=(9Hv99j^xhf)zs8bPXV*ygoK1@D84m^_YPaeNyESabdt;9 zo(JrKD2+{y+%(ahMC$4IXtPN9?!L1&Xc{1A1?xR@_*!rZ$_&PbUfj!!yq{mSWG6HO z!ZVtCHH!PFnM!r%Fek<8YZ?76p9iN-K^IT63VK|rMomx|+38+Mowf5zO70{iqvg9m z%A%ofMR;PyF=z#~rrnEg)rMjg;80BBCG!Y}H@U_e%e^xzDO<||^1(@`RsQ+x+Z>~x zzT#Pa(VLV_2`oAod$>u7njX ziz+HA(vBN;!=Gzx`t!q_$r<~7u+pw52Tt7FwXl&9^)REKZuH(vp#D~@5!ZT6hy9=- zo>Sl$Dy1nVfjT*t8li87AKuhsg6&lxr4~_;F1X-v^@xrdT3EyUucTphXu2t1Z_Kob z^$iU!E!7#dp$dTc%Gu_DTOHBRgOzF3H*CBVo2PQ}^FupzfCYKdw27BRk%~pUdAHp- z_Le^IVwWb!}F>?P$B>x{WMsqJMq zt!sNOQTc_kz=MZ<#n%GKta)a8D1Amp0UyOFaP!XbpU#{gS6{5nI?>m9E5-V0G3d=K zNIfsQxjdV_qxbxz^6``oU5g|+nc*3?ph7KgnRB1QA6 zdCljPNMzc=2F*k$+$Lc@5>%g|)dq^Uxiy*{xHVhXA|2}mUMZm9N-5se!)BP;XYY*_ zR4k6!aixY+>q7~J4qBIWnUE(T>HoNTh*CN;(*Eel$4D`Ep{GK7;DItItyYt!h`UXL zUQ3fjiVTVGMRfAZS)HLK2}-%chUrUav;2S-gIriO_;P>&rC@f)VGBt^Pls95G?gSw zEGnUDQXgA$*Qn~QQK%z4fR7(PI$(j)rph=5y|m$~W2>7Dd!fQVZ0>&4@9fKy(=Sf@ zr!Y--RJVs*!bEvcQXTkC#?B`(&isL%>&!u^14_G5waD|ioC?gE1qM_r9cfOaO zUR8i&SWma(v|TQ9GV&Y}yc3t6Jon|I^ttzc#T84BP9M&>m0(fkU}{=8M?LN8j2*fi zT9JC}q|Q7jczg_q1rkTEd})0ZrHVu@x;XG|+|pobV!$%cw5cf+|AP&GEPA}2BKtd5 zpC&JF__^{dn8C zs=)D4I%ZzRN?W#b8Sv+-73&tFbSjzLqod>6>l2^SppAm*GfVy_c^l$xB9FiIC(1iNfCx4-X%^E7l-`-)!fmO|nSVC!L*a zU))-CE6-{94xhjdaRd;`(u+IftFz7(*H+9>k!K&FOkACdf}BY5`VT>-S&LKLFN~K` z@0EU0E4uUJEQ`wiA_@Y0kIPQJ?|>}fM^U%Cv)SGG>|k;m?U#IB=5W8x?$0aX#g(@6 z@6Po=$}iyViSp7us;vI_-g3(3!qvK(i66>JnULk@EiwVkYk*Asldrg-U^+FV@+5gp zC?%y)ms)fj{JTCm7jiJwL;;t`l@>v7-xuSD&mJq)e@`ZU$S#y0Oo)6zi-xU#Vmqlu z%EdK>-J9Z0rkeT)9*8~l3sia+++3+ciZt%%tGx+=X?l=SZ7GULmV_TY4U%-H_>qkw z9Z~gOa-@b1xwqeD8u9vPQSAKxojZ4~F1qQE8pUzJVbgu^gZ5Islw}xw_->-o&d9@e z3j>oF`EmjG&J%OFwhY=W!qg#0p5r)ovau`IRu=)h47z3HIV8FE-}&D~e>+b9pO3Wt zdKirk3Qy?xxM2`1_4O*h{2mmlF=MhqqIkO73CB3IOKFjLCmpj_>dfJjs>z7l;%4l$ zG<5b%zO}~gcO=wP>k7-`Sm-|JV%c_aGmEtmJ=vVlZS(|SF>mJT=6dGNmWy9_G)IFM zo%0r2n3$QRy3Cm;er&JNbB~zSXjgipENS9I-dFw;-GGamQmhrTzA42{uc^YpAu>;3 zwyOUGH$D^1X(-r*HdIu*89BG*3C;x}OdcwO(@L|~WQZ!)2}&58P1kcj;ndxxo>mmn z&B_sZ#n%S0f}0?vC1q=K_S}G#-_F_v@4!|=J~_3a!JM^Ai?Gt`u|J}qELAJQ;7T4pB zX1NFFs>p_;{J3LBs(EqaJi88q5OiD3uDW-r^#?uNrCIIS^l^(OI6#+UhG}o(hadFV zUsN{gW?lcQB`z<-X{GhOpoR*GJpS3WJ_>72CR?6(em5$@n7?TyJ7W2YxCast7TD-% zH_-a}YNdv=!a%icP54HW9KDwh2K2)?)}_E_5U2R{aa#TTF+a!j=|qvLI|fUfuasN2 zIUGocj+kANBQ@E5!}+<$C1mLy(aY#?o^0)lm-CfkGM*f}k(Mc{BCyZYrNK|IF+h4t z$DO3W#dsfP8!gixxVw}VF;hZf%b>vgMq29^~X}{u+ z1JH@<>v-UY1;a}E_;nD1{q-G+5knYWjyD)!H{#>p!lUiid~202^A7_c;G{3VwZn>g zzdidP5a#Q*KV#|+KwY?p>mACf00It20Gh~teFuJ6@Jn+NUs~qbDZt79EnAa2Z+}Mx z^LNF52DbV2y+Y&IUG5th+~lMC&#OxevW+?8RtFuPukXSS+mPQ#U~rz(jRbD#Bu6yN zVg792!xT-vzEyGRK)Vj2m(+JZOF^nRTNC13I0F6;_TDors;g}q9%Ga{HtZ-W7zG`(=ir;UMhZ8&*XrWZ~!-7*9 z^@Pj!eSbOM_m{g)rSKZeTj5_f3Zy?45hE}8L+_3RS2*7L=^rL;F=bOGs4~+N2TO^8 z3on+vJA=D$InzZsr;gwovPMwbPvlt1s{KIvuds?SffuP>=~rgz8Zo(iVIa$?{oNp# zh35f|HceSL4YVZRzj%Db77@|29D@p*iqW7Km&dptmS%Nn#)Hk{>+H1Qt@?$UgzA zfgxc=pCH7++FGX+#RSN!kayqk1nU%S=Rc8obyu^$pRq!bAD(%FSgBhuY%XeP>ezh}ym~c1}$w)_EZY#cn>k>cV=uhOGZ!6PGZpm}z%*9yRr-k$pMzH%lVQc$47Oc}@?qiJE$64D%41 z2mK&)JNP|2wcoDvdz+#BAcdJ7>ufpd5loCU7{PIk{hmp|u*P<<((n)D#%&bFOkMFW zOk->{zkDxvFJJrp&6%MuWA~QdFuIir%6*1anRn>>zSN%lTxJ^(G@b1`hB!vqfIaVs zcEr4jFM&ayTzzfj2SVb{gSo8QqyJ`DiTV*U%tV%t-8f+Nwby`1k`OX+W8EPRCc|tz zKzy+iUoxLO)Qd;muFnlshenwT>)Q5w*{8jB6jx57A@O2a3uN&f3w*)vg-oa()t3C@ zwu*_;9lLxU3C0|IEe9o>c=?aC3XFw>YEaQWxQIB}$F9W(FqX0K8GIc{Q#F7i*$c`xcl_~S-S5N)2>6=f{O&PMuG!OnnG)}Q)F6SMsFS-xQ}Fz zO**eGiRo&G`EGn_pW{=y7!^L&*5zq0Q17fVaE3^9ytVuoa^9+wqzh?j&b?eb5)o^| zULHRJvz5%rT#Y%wDi-yvVlFw}zUdb^qDeLgO%h{3bNzdpQ#6t@44Rf$4BOc?g*Ks5 z-2gq_UQYX2fI!;KH&yZdj81`YjRpNJC2sK+jCy{vx{w>6UV9(!`T9tvb?|jPe@lyV ztbZG)zo%gZcdYgBSIf)@-7-_g(5FtfZBCm!2dggNDo&Xf1=4*-c{rB34);>??=ayk z@-=;V9Fqb>7ULy5mExxoHY!EE!S+91y!b=IqN1{0&%T(u-Zw@N&SbgYit}f$w=mN4 zIs1=8Prbd?oR}&t+26YN4R7Row0rSVFXAH98yeN~S+12UQjgO~d&tzxreUi{YF`&-SoD!ZT~6t0 z<t%|D9Vca8qFps;W@m)1+^g(AY8e{s(}_wk|-0{z{0G%#2A12Z&^e3?H{ zvXs9CI4%D|;T-#uR71#+6Ky^X0;6K)xO^~ChM%_Ohfqv(GT~UucThwfnhx^QJ&Mp9 z_eTYs9~T!CD9O$p@kuHg>qD9)#&7k68N5nHCLb|%rv7Xuz;8__e8V|!CbT;}MoHu& zhT3_7j7NmQ-fj2NbG!ty{Fq7#>BZp#GqRq1<_nIB+QL-q#9fMl`L;0`bRSBMr7(8+PQOv^eq~md!Ise$Xp?1piOFdp&q*@qhLwmwG zzF>$S8_SX~^1DOdxEjnnAY-Zo)nbBmM*bMP%g5Ew`Ikl~d-UGy&IEEUz@hDGFF)Q$ z|K6Ak?7ES;-Uv7Ldgo=olYOHQHdz8jS_V8CKVO-BA2~R#kH*}nMk?3vj>8fi?iDk` z(x?l+d9;7-PnR3GOr%mMchAM_1;+0!7B)dLZ4mk{mVKm^{NT7`=oZB)wo=UsImRTG zfU~2b;rMBu*atJ|!GwrFbWq4Oe)(_gE+1Ewne^$0%U6$Gz8r!YQ)oVAlshaq{-lMQ z#@mbt){UJgo3V0u^7uHF-c8u(~_00-`6J21$0Y$|D^rm5o2q z318O8JT<65!O z<_WQJBftM}P*^8$+@ws17NsX@A<;3TFXFo&`TW0T;^AEo7)AVHZ1OGR27nQT zazdEm=z|o7)gduJgOqs1xq~^rQVcPMElN1u1YVuaxpzby%a2cUF}}`ed+Hifm-9-e zGs0YM@8v#nUSzZ%8Z8Rq#I)n2R8nl7ZPOVsbU;rv2Hz2&q5z0&VOF)j&pq?Lj5hI=w z=oxmcyF2afZ^!cLPZ=0olQ5Ii`PSfZSIl4=rP6qNuRNL4a82R18iKOipBt7&v$$s# z3Vra27hh40nDVqcYcGFJTS97Toe_*@y!`y3dZ6=a8hJW(V@)ogXxhfrxRX4#>s|U= z%hph-mzYnd^caZQa#? zZ9;&m0%0%z9=raDW2zIS=misG?&!zW zBubT#I_xA0AY9EGfhtSSCQoA9-W_<~6b;lLdw#0mZ2P8x-sVhO9ftzYV2@NCDV&J|kIaCUoy0jC*$2RLHO`mnK1b-9N~kZu-X=)vuJY+n@^D@* zeTTYg$#vf0W-~x7W3RZnPV@RJEM;k$;=8=GUza^!zdF#z{lHVfc?(3=3L0nXKK(rV ze#V;01FsFw?KX%goTIs^Xm{gXiMaW?rII%vT$sK^N&cfxXTOfXKG6t^t2NDo(p7_2 zgO+}r*7i4f&4v8rDo4jQ_WpGX202L+=;?xjg7!pzPUNshT_GPy!*L~fv~hAb;CEAi zWy?z;L7Snn-msCPbVx|r64s}GMhCsPNFZ=ANT&d|*}HzfJt)`&c&oII_$8~f&JjdWVRD5)(asPXb_aNgd{pZ47za>^-W0~PyLs3mAEgX2n{9ove*QD5%0%l?OU>RcrP@|;uR zm(v0YEV+KBZRBpd9FT*$VYDZKz-O&--_@n{fWV)(pt`F;@Rlge^QHWIQGh|x@Y^fH z@|WweeU#h89HwqOk>N@b?$b+fsrLc_WfEf6NS{iRe{+Es{Yti7ra5e{)|^etZ%CN_ z6OG%Mu|=t_4f=24+cf7T4^Y`=AW`qTh``}vc4$K*( z7n2K^Dw=a9bDpqRZ40=8gsv%1<9;#7&&k6=3h}2Jciqt z^&H3xVbHmx1`c+%@irBel77FIh-_h#j?C31$0jJMqj%7U0mifr@M{+YJY(D_NJv&$ ztdh;R)lNTU{FFRgTvQZQT1$@uN2x ze|H7DyVL-H?#-Jw@7a_zg(vZlYKp1#s5*UqPxpAk(NCu~V*8AKnegs_f}<0@j(l|R z8P}~R{F?C#`^KlJ=ZQwq6(D3}%Yv(@GR7#L~nj*OMo<`dL`(LO7~pH?z4 zDWiX3zAKGuvJ-xAoj0cGO!zBJV%(lG;m`lyY%GLDdO}CIR5~V7y-jSw1C4y_k-G<~ z$mXT{|NF`LU;BXa$IhKMFSedSbI>X}z*_3*{dumz=D&~h<-BDK4mhmmNGTV|alR_a z<0v8LJI_0Qlghvila=YI0}twUq-Wi{Ynh&NB<8ecYH7jc9qF~&iPDu;^`Sj=Emb$f zi|PAK&Yv2-|NNuk^yBhXlT6a&{O5>-**&i|94;PO-}c0k0B$$;-;C5L706zf8@(|q zB_zpfLBC6>obW*AU+qmdwN-qCwyn+exo~o6V#%lVZHbm9c?TJXe4MY{ji|bFx6kXT z$zXVxi#Km#Y-967nboN|TTiBG?kiv5DRWl!+zyLWRmPe!Ek;kzOUg~nc=j~_7tam1L6M5BGP|dUq9WfI`MIv(`V16yxx-HhX*?Qn-lisNSSI$ zcXb?G-+M2!y;#clu+Lz_SC_OHm%j5MHrBdJO^W8UfA~zgKT)|dQI<&n7l zbiu=(il(IC%~Anv4n3d5?H0w)sNlbO#O4p_?tgR^=&5_Ff4&l4IlJGvROC_lX8u;| zzC_2!zyoXRc*W#qRW*Ix+!Fn++GB^&)6Glfm=wR#OD)XX?d;T=xF#|3o77=Fmr};v zyOtR_FTa$Cr0G1FIr{$bveXHet;aJ>6pw#-r)WKY zWq*N!oTF~IVAK8^&8t@r^A#H&dG}yZX_C)?$ogkzj@o<*68W|UxyEZ=_EZ;XH>1@@ znOQf(D|?D~QZo|ml1r?Yu8jZqY`WFbtMSzh0(dh+@nR0-SQ|6sjNY3zZSRc`Bp$ro zCxaLrR?m4G*UywuKI;I|+x^R8kes0&|HERy5X|hI z{bGlhc+ebs>Ak0V<`_cY{=tM`n&WE3G{@YpAX)*J%!g~mRo`i!%CZ{yg1f*#W9bvn(IETnoL{H&)Nv7F_}-xNpmP< z9X%g2<1KS8zejP9(YPXGDUd&^jpS6iv(lhfl&PG#Qdw+v9Ja`btXlt%Z# zdYsZAq>E%b8{E%dBjU{B`fGQwe`G*kk{T^0v!CEx*&ajUr?TGnp!|W3?vA216b5Fd zu^zoAd6%CQTJ~};&Y18D?j|15w!5kjo!GDKrG74wM?tJyrudASia92c?H2G}QW?@S z@nNZim=o_wbA<#PZEV(&iPa25O*)4|@v^nbUz z13r#87Ri=i)y5ot8gZ7BHE(2H^J%+A$2O?muVOQm<~ogaTm7Ve8tHxsyI9p_og<~M zm0FJ{A!^%qL%|or_<6%s*Pqw#wI-bQOu>H`BoIG!F*el3B5M{KP;|v=i@t}WTt2dG zb9^pJ7LBDm2>RQVw^gy|FmZY6^r`gd`vks zZG8lTpzD&-A)_B|m%zmFnfy4#(;DvDx?GDMY$$vNyLC)Qo=dy;QHrqc+!nX;-FWB*zVj<#l$)jHCt<`M~EA~fTaq%_B$On?7LM{cm`Jq2rWni!`rrX7G#~Y{huhUY9I8B5!pEIZRudCXN3Uu>E z+H3UOp8nZgEd0ok|aZ`6G0bPtrBZoA9WuqC@ot2X3raw-23$@ z5?B4+J&+Uc5Iv=r!lc2LzC}P%E)-H*rsx?KE@}O( zU@{_ZE}{UvL3MOlWss!x*H?qb`@X#b_|Q1LI@RFs_+3{G{`oTzp*veY7wiyENw5q) z-dUFw8Q)gPO)?vDX}zBxd6GV-sYGSyBT0E!?Q|6Z-xat(042l293wAth^O*<&TtwU zR0Ql!;B%m-q(E+WlZvB+*_E_hshG6CxWaDKW{V%jcDRl@)jN0J&0pxL(Xm~+uR&^o zo?_s&)9r$-6xU)hY8h4iDARb|Rm~*gTDVP+IdjJtkG6KIBH3aX8N+ zsjk8;z|JM3!jy6m_{)oV4Dy?<2C26yw&3`^C1O&)+gwU2b+Y?J{SW)0cPJ zE_>X|wAS0%_@kct5$VpHw4S~aZReP(uz^=PX)jRLS1K*&ux7h^cmj9Qux)Mf`>V2r zOFcEa?>D@AJYRdLL!h8wf9$p5N-alkBknm5cEqGTPfwhS)Fev;a-*ZS3#x6D9%|0} zfRe6SP8_@;OKk+)S0HYM8@U-ULeE$1Vfy+A zMUiF2pBI%@r%I*e*;gWmu^azNo!$<=)C{|?mv22ke;DAhRfD&?#`3B{sW@M2B`c;!j zva25n6%Fw^lo|<|`IeOn()fV8AWABvkCE*A-z}fg1*Vb;$w>YKE1Prp9k2kRq>|oQ zDK1sJ0YDY`i;D~VC;td#rXCXrA<_sm7H-_|&69tOlp&2A zewV0mwIgK$7+1M#d7Ku8hHd6x->`R}o`ZNs_5TyM8W~U}#TCJX(H39tz zuPC4Fu#uHk(Ec*5vUPu;2DP+}Bo2p+{R8Tq>Iabs)civ4-1$Rr4Efy^unXvHyP+#B z-^0rANIf5Sq@5>P?U!+PKh-n&2gzT@dS}+z82gEQAo-L>ObqgU>{f5g#PGm{N2yDK zcAAxx)28j9?(yzF3}o;;TC|wPOlJJujcD_;?E1T?6xecwYZxzk>%E#dVE>NA*f?l%ikYgmeXu{N3o=_W$|Z25O?JrA zL0a%k~S%NMgbQ}>a& z0oDj&>{(f%Qyd~5)Jpr8b$xbUoudBSZAiGF(tDKV#%k9D*>hJ(Tnj5hJr9lV`{+?> z6Ro)uWgu`hV$6=hmP0FyTWQ#F==l_Oo4K2X+3?W(@!nssV^d>R8ssHsvGLH>@QqrS z79o3A<4yL&mKQSoQ$v<`$ibI1tYDenBP;)W%&zd_2_b%omDA=~s z56S7uf(_@#=vdM^K3M5WtA0c;=J$wV^j#DRfC-~QNhxc+cz4Upa1&t5nx9o5%^k^K+yfx zR&}VIVTSaQ<)0t$h7K&IF$)rWKVNJ9TxRipy`)sUPTGxM*XyLYGGw%$9lhWpX8;tO zjGxTlw_i>AzO)S|hI&D!09f?dj>xOaHZ7keWhn1*5(o&p_RqYGrh;Z3>u=6w#=~b_ zPOe;$I6Qa@dSKnXIWm6cE)gxZF1fj_r;LQVdODG}`r_s_&yd0osU5hgW9$8#NQQId ztF})EX5=`KPS4M`d;pY$kDQL-?78e>T zZ4~ze7)^_m{5{lW9#D6_EL2Q5DWU$849$^XA|E1Erk`EkmkSN|uU3=1OVZu%b)Fkl z#GM)P0r;V(@UiRyDzy}Hf2}_%pO=;T!)<7mU(3zBtDipJR7@Z;S|BtMF_xWmREK`n zt=Ie8lH`=bwFWQBnZ0|or#)k(OA2yi%_Z5SKfl*;aR#n#yR~&chxVc$d!{+aL0PSN zKZQ$UwxXwSh4B8DyGRl~6;1QQBsJc8i`McU9H7X2m2kz(Dsw!6GKyp6_kUB}depp3wL8M+@7=sN)Z01v99OTWJCDlB;zct>N zOh1+FvZ3ICPS4j*I37vwkqj|Ji!MphWV%Rq?4(jvBk}Hli(Q@E;5)fgq#Rp>)Mut` z!z{)!n9DGOQ#PU$z+I&KwHfO4cOl8E3f@L^4c|fb#0PY`%ywt)1xRyY&-2*FbQiT? zM_*m)$r~QX3#y`Wks_w-^}FI6s{DKXZMtm(a>@OR;XnDR^&ya^|!=mqaIc!N#SGc znP;G{@A&22<8+5fldInjIzTM6z4~ z))i4veb&AKF>X$ow^yt9#}$MmzB7-`R7;&;`tsI=4@NYx(L7H*fWs^`04^3)km zdIn;W2G1n)H3rk;SVv9+#KBc(&mQ`)KXM0dCAmKj>yb2ITCn1=ye@8DzG9YSLqJfT zr--)JgFP$_Xxva_W|O?IMfC(@c4i^#{GU4elZiYKWyt4Zomy{D5*GStLTu4!m(F0w z*RDO`zJ`=}A)i)_{(1bJ+2C4~L5Z?WF*Y5y5$j&cfBAE@!-X$_p2v|~b%B2y=25JC z>UbX`p8`y<^V_|v1|BtbYFbo8ZE|91Lfg0*50C5=!%>Z$BsJJ}Z#HcZcXH~ifO5z+ zWOI5}-TCX*D=DA=s;`|{+j1i+YvapQC$1fOdU~n#B<jEt<#xqp?18`(8OKPj_xO>;(^ zBX{8f6^%LB9zwxA38A4vCv)!ZXE$IRnyKG_@|6yYZ;cBQee7qY-;9)kVjtL`v94Y7 zfbFeNdB=_&IX)7i$V)7c+YI!L$v?lWwh3tS+H7(>RZk>o!1Tsai4Db}>WqttlRxf5 z5afIcDhK%2|gDy&ZRHcnJEIf)7)Bm+C*I~$8Mkx_OLp0%l{jrRT?Ss>*8Y9c7Z zijX7ENy5;>1R6Onr*!1yhJ<=8TUT- z*87KVKDf^Pcx8fR`z;rnn#uN6lR2f8+Dn1m0!5JmR*`WriQ}kx*Lm7J+VZylm)Jul z(y}Z6@jid6@ciHKn3D)f6nXkSBMpC;q($GC$LU)*mL0n7yDRVj7=6+z!W85mg@%S! zQs6qJtput`qivjp2f~ZniLpgqhMKng-%b?VpF|QRBEz{~km$ijyRTyp6`f+;jkIWZ z_veb+4-LEoEeg)HX?N1T(U~Wgw0#NdQCGT7pynhAAab~eHRe#+gbBnk@W9jU>i^Qd z5!r9;1dlumXv_g>b5Un~o(uA^a-U(t;^5A64UlR}5EVw!+8|NFkC2~nP2aaK2OqEj z%l7C1J-yzd%GLsTj(ULv)igQTMrYg%4o{T)@Zp30%kdD>v7=&tgW$fA9M}aA%Rf|9 zR8qZUy5FxPhX5(W!#|A%-i}>>q`CwUkEBlnx;`dpi=RUla1`b3f1h3rsJRi!DLlL4 z&HCv1NHyf@KysCnRQn_!aHOfS!!fj@Z8LVXSowq?iILGcv;bZ@4W(6vZEac%w4FS)te#fy zMkOLdV1zp(NoxtaXNpGj;u?p;!OE$~e9c8lgQR*PF>@NpEstD$QR?h&1P_O4YiLZR zBZ0>v380qAqo^n`I1T1=EIljFc9pC_$Cs)n+1dIa_h)ECDLAV#zQD`-@LsCpkq63*U~ z?#QqTXFXN&GsW0=SaE+g&=vgElohjpzb;?RYQ_3*W6Z$B*MFz(zq9e*lQMA+{C76~ zI~$mR{}RTD+u*;m@qd_&p5f*Si#xvNzH(m~LZWMb{z+lBYvsd5zZdR2yf$J!mxk&B z&(ygo3l?nKHfhfDf}el>WyOD-9Ga)jSDCV{M`8c1b=&ye^8cLu)1Pza{`AYQUg~!C zHPW75e=7JYtg7jfOy9a8kDk{_H#?tqkuhjjsg1$({b!2F#4(M0w*B)Giy0lBdfDH% zus}qhW%AxK1V+M zcea0=k^gQImKpi)CSjS9|5B14r{sVCCfTo9Fr8+VhD;Ntoo~eTQ1#2Q`FVN=Y|JO& zAp6qLkdb_Ga%38}!HL^@Hf16<*$WBHB&1qi9PI0Aiuv1f=8um|lV3fp`Y6A0a1oUd z0#3|*qILs4M%oOxZfa3RFblQpywr{9VxAhN3aE#A7e?LR{83;m0%co7(hsv*=kk3! z9JOfA&JmL;salB|UOEfaW|jD=Xj$^ZApa(5s41xqX#sK&*lc~won=Tg!C$7BJewPb z@+O_nU2Fw8;hQ*$JY@_G6kln5>v;O)RTIg9<3I7vL&h{_sSBZyJ968X3r}{ zZTr62blmpOnTK5TGlE%zypNtftn%$Ahg36C{FFq)|E~QGmA~x!W9+k&5U{QbC)qw2 z+0;tl7>O;nY6^|5b`;oPRQ}Sat$m;~uO9BvoILxXPGJP{{P+rBxRcJed824U7c`SV1b5Z8-Ze*{H26c`|1W5|cFC zieg=@XFc;%UhVn_%ULGuh;LiEkaJ?Kr?#Etriyd#Qsb&%ioQ##0iifqW49Rjfyd_S zD}F6DaqEivwk-W-Ldsbq>xSGpYg@7h;jZ-@8tfM-u#G7*iuZvFX|4ONaBI3&c)c}- zIJaWnk55Z;(0#CcR^Q9%A_>pR3=n9k=VP*6S^Z2@U>pOM1v>__7!^1Wyzn(Q!5kAFf#~r~0U${giCs?GDx+ z45IE2yzOvQPTnGzaETMpD7*?aq`d$T*dWKcRu0<^&gQT^MAyngiWcm{Rs%UGCZ_7nO0PAm-tXBx`Bv%m6X-vtcDx1!b z$XrQWg)|j`Wd`919lKyqEqRc~Iz30XOsfvy)ZgSyT*)yd%(j~OC>qW=J=ph$eb9diA)0$W>YFJEeW`!IRTvoPD zLeUNG^+wU9Co8n9aipzky;!eE*pBDgf|^WP4PDOSl`mEN;-B>+@5XI<2b)1@k*yZ+ zm;3X>>)b=&+n{%pca>--l^H~ye3J#gmUV3`>n(Dqi%@_5QJKNkD>D>g%cX|HrI0H3 zFD?HKy1T-qxmd1ou$-T=g?A&!h*9{=Ws)yS{NlSm7>Io9`=^-Izmpt6_E5f)NaFjJ zC>ZtWC4N$;@&nL2GN=IS&(b?>ib?)zI?8ERXyKd`S+&GE#WDVIWHX;f&Xf7Phtw7^ zC8XkyL7rwIy*}5s%pi}|DY1O5nD?7HN3Eapi;&!+GHCNp#z1-}lbRJe&b{3f2ahEe<|C6ww;jB;3wD$?Er~hj!@{zu34WY5{%zk=9*B zLu)yYtURc1&7G z1+N|U#k;Tce`rSzXI?tuo_YC55&n12Bu%{V6ZJpU>_10t`B~O^9a`r;@R@h`NP(i| z>)^14DYWT9MRA<>gNiEq?aOurM&vgwlli8-9R#LGUkr}99jzSCHf9CSR4q_+SfKwt z;Fy5bFToGj9(=^7Pz(mRAi?c$S^oX$!wjtp4?3%k(~^UB@uURpUihLq<8t3r%h|z_ zC`9>eo!gY>LANxv@HBLcYJx#q7ZS$vO#c@AnfYZ|0#=if zF9j8~#xF65QVW-{*v~LBFldU>V?D9~*yIoYTJXH6sY}bc_3vrb01jEVl#P9H&z;i#h(1v-6<4utpyo& zTG29SxzX2;ujt)|-`|^j!y)m1QLJWQZr$=1msUemz3lKn=X84FGjmoS$Ft@jY6i&S z7=`cg-X!F_ePyj_9z$xcucZ0?60T@r8f{_ABX1+M0!7mKQ3*H75ga0UVhi~mypYT* zJ+ywwBM05+p}r;y>rk*INDW`y>t#_Rm3f*XFrKa~Qd|3n*WDe2(g3jX>G$w`_clJI zD2=c_8+3@E6b>i{PNwp7`VhE0IX|dgXcSJb6*B(fjwtFMUthVYE8qlG#bt4HSroeh zl?5^H{Q8>H!g>(p+YDUf*f7s9LTIq}7pi5Pud-!y^@y(Ceb*JaU5vsxd1VvQ{eSuK zUNm_X5Cs?H0`iTccQ-t6sVmj`{WO`&U-r@AbD`B>owspo zLA_A39Z9bSFJ$Haa0Pmf1pUKvAT^Yxi%e7IYX2M*e`OejC&3}x@?4K~y7Of)B|F@+ zYX_Q=h74j#hEeLUreFte%Pl=ph`B-w<#@9JfGS&=$9nP6Ph?uof4s`>51?5O6jIto1ieIyjmRS~3}cHdjiw}zXv8CHP@GU20* z`2!KlKfaEZ;()s+lN5BsA79o4=50Mow&x>Cu$nw?wIIT)4sZVvQqj^#8=#k7|5Avm zo)_4F3Iyo~B1;X!8n)4#&Xel^>CsmF~Qt~*MNIlXdsfUz6JDIUeawD)z*@`0cJ6zf}CeJIy z!EZG%&Qf^QJ&y(c_^33R;7p=LoZjRu@#)Wb9T18CR%uPbStjE7>F5vo!CJ9$tXEBc zynL4O?{ZjO?2Wq{xt)39>(wheJb?JMEIs>rrggI)>qUZ#aVs}e0I=@xZL?WTDJ&I_ z0V*PjQb%u|y(|!E4fl&!pY_>BB7TP`2iF2El8J+aRL1Ed1=bwUv`AUt#i&WSsq(^S zEwx`+Z>tESWWpA#-Fhev{-ThE2__J%Pj3V2ycX(55deWf$n))+N>AKQ0IA7DadID& z=U$H_blL^ktQ5bgkr}@cz z2y%8Tr_qks6fGb@!VZ67wV_@(b$!_W;FhT>C&AYxXEk<31pKADbsNtMzQuF#7nymckLpnQ^x0RKCLecM6xbF(gwp-qRnY2?jV)h>R=yUNT*nm$BeX2D1 zoTdv^+Y-A|6gc4*5MPlS^t7>Y9vIC%K!Ze3Z@t-+^YSawgMiD5?58D7_>TYzM6oK= z3IUmi+-Dn*9FeThk~1&%ll@5gN_LnOYW;rpG+dh>V_umjilla=Tz70wwNoeH5V`|B z?Fw$OX7!H%2VG+LY)dh~GR^UAiKV95LZYYyT0-X}H@5YJq(t-ZBg{J>uVHCnNmCaB z5Jh{5ekeWg^>r^Q%e$$CcE`1;8-zxYp(H@F%n^=nb|V{v?3%e-sb{cNt(QIpLIcqA zE`m2A`wb6c9Mcx891OIjc|_(a?;6`SYi(2~^+IV}8%rS~qB;cTOH7YLOG-}_FF(6g z3pnRu1IMOXQ;9AsdRM3OrVndSfnHq}r>%aEu@x~==xGyKN1k^zpl_5Sh~-@+W8Sx` zzMnvDQ68JOlc4bk1f@x=)--lOx5_$qK7~+a#lQn!I;JF(CuoT-6DE~k+!8hM3?L>aIw2{ectMmNW>YMoOEbi zV8N*u{|uHaX%(7!*{Otp1vz*w93||WXlZ+|lQY-wYm}N$k-quUsw#=9SQul>kchG7 z`f9%nz`EiNSt`Y9?8OErHXs7rLLI)V5e3_qWHO(|A-6OaaA)^$+ZKaRgVa|psRnht z{}HZKS-Q@P4^Z>mO=X6+Z|{{{i#>MnG9{n%Eh6I5w;6;&Bp;Bdy>oB_4uB7Edu5hLauc z;P(!@3o?XR;r<=`3QevKz1oq_`sZK$c|P!99-X;TA}E^M5LiT{;b#wPwvbxj6tz-p zhT%cWV0O;~-oaCe?as!0svT}GbB_&?+SUXbt>CsR@GXCW$W*)RT2p`unKFl8$vpPAvjU}c2^=a%@3i0`FmEAZAagr`HF^W_$@fI2VztLc&UF!wa$^tD zwTv~}2Fxu?s#XXDDF;j75rGQrN9M<06?-$O)%=B!@J{z|8-9Wh^*r{`G`76YOfU7T zvBl-r*jUS!Eh9ULz}0k1G%YlPUJFszEu%;jv^q=)|J&H5VQQg|z)f7Y&dU5{wT~+q zhFc8`Ss9**-vSGpx>BbN#6vezoBU7baDsAhAea=vjzvI=Up$R4gd@AR@j!f3TOx#` z+vbB^rQngkC#*mKc{X1Zwe{&1$1Ihe_eQSyt0|<@T1ucJKtJl?$XO~#i=?L;Ihh2vDPy?7HWeT zDk#@sqE4Sl&?t9%2{fh%4hASv(9k6BbM1am5}xlz+w!gY8qquNQN4umL+ zl)DBMd@lAGY=1Lf__BrhhMUwMcW&Z#Zl*ITk!RM- z?Zhp96OdYBo*EnhZmFS8+w90Zhe~#Qgi3cj&h~ z8HUEAeg26V(x+e-%ubi>e*RY-Wfl4s0;zI6Nf#_?*-<0RF&ncKk~GvuNOK`S$36SJ z0$>n*aPTD&&|%Y>G(1$Cho^3uow;H%H!On)?byuZmjbSY^;+#^icCi&66$5z6hMqa zl?{C3-l{%5lm+Ou89=D3yu@v!?5}IwFF3`g;D-PT0uY4_|J2N{9=pg{HGEO9Jr;AyHsSZR$k*GZ(5G7EBv(`t|e4$Wrd9VMY3sv;h0% zI>IG6haZteu3U5;z*6N{yIb}1OZPwkiq*)4smq_h2o=;~vzwT*6XEg%9pqXRQ)}YYx=ngxzr40FINUEZ z`olV6X_iH5g_AH9g!Xgw9-wROi&A@YW3eb@s3-7MWK*UO$O@8&qc-pqiU@Ag-^g7G zt8*v8(mquV#)R$!rExpA)=^+rbs|u?tG<9;X$b4Jmd8pU_C*xgo5_g0-xNRnP2DI~ z7eaDE0#db}naRt=Xp*ie>z(d!GSUmi#LYyWH-+e@_t^J*V@Gqft*>G=DGRP)_D0N0 z%EE|qD4GmMnLMzuby?X4=n!NTr{4(|I<26AS%>)-Wv_1oPAu%oaprmILW=oAVmN~DM8msZ^E_0A46IsG>$rL73L*-+>}oi^?uwrvSY7M zJC+g_kKV%Zgpyj>e$%1`KA3NI|2Wkt%fEy0_MoY?eFOwZfM9`oWBDi zP_rh4j$XJsye`j@e4;oN>>Z|pnat-~KJodAZ}XcV2tr|l@sXE{)>Q&`9*GBx@r!yP zDc3|qI}qpgki@=5&0>#)P4fqETI&!$bdpdn;mOpN431mDsdhv`!-SUn_pWNf-T68r zysHPLnxAcTsoz3k3Q61?aZoC}wN)rzpT6|=4qn@HUVhW@6v>wyW&YSFH+atNA)QWl zuV}_{mL9!4)1xub_v)n`E7coVZL`tlaYCZ>}i$_L;N6H&tDRBkU;X%e~ zFRnDMi^HJGckge#+MOXO#W@9>NjsMqeqxFXd+k#Vc5irYvGtx3$;=m@{!c({=j%_MOH$Y?H@Y4CU|m!%Tu zQ{mHtX6>n5dRPdV4ja;C-CENJE4VuwTX7@cl=v7h)S=-nSmK`9^_e;l@A2wib@L0}Ydv1(qq=v0sW z2N6afX?U=$6C8S#g27!9S?RFNGll<4G+!9xXzXko9Ef%9yQa?MzC%L3vZMUe^xT%h zgV_6#L+&m-b&oZKEix}V<}?uJ5xg~(wR+?+zTH^C6+~&WUy$JY%8bQse43_#{83Nt zLsB(dL+*5RiYx4^@_F#{Kr?r5-Ble_)VKojC*jHpZ>LqksPTT~#o+-$2XH1W^QlDNx>rmsl^U$4x4$8OpZKS1Az1vG7UqHULjSC6x zZ1O{pfCiLGg}U0EzB=-Hl5uu zN%Ss8O;kDNw;l{O3yN}iZ9Ub4#;phEau3!Gm!9-KL+B~peOVVucSMChSxxrH=9;(6 zCy2O>`AzPCg$i`jq?7Zb@R!+!7W^ilXqrOet-ud?PCX!nDnhMy`^(CJ+Si)4bsvd= zWd$5Io5&?Zw#HEm75Np`0m$27pk@g;GkHKxSlh~w1rd1At5|(gvhfK@3(O0h>5*x$ zL>LQCT9zZmlTg^EJ_25l;at~Bhq2)#Q7I1YlPTioB>M)|9t@%L6{p>A_(3Z?l(_hg z>K0jw?1x|Y&f0?=#Dd`>H*|r>k<@Vi9)elox2;!ay&(?wH=D`?i3~v0ngdVoK+s3| z3X6-@!oj^)FWs&|9S#CSD9C!k?VR@0)>jGSJXX{f#O~_eiWXJ)(Gp4puc5o>29z zuQCm`z9zbrL-NO0m(3vjG8Np06GPGseNEwcF`#srR?H19?mgOf;V3G4ON6tJm=sg9E<+q2jI+!K zWTC3|Y^EQfwjZ4-TUW?&GJ)sbDiCD`#)r2Me{hb(W6Dhx-2!?4YzgBFL%3$wlr3|H zxE#C486#A~Q*mvXWK+m(D8Z|}7^+Sp+>W1WfOmDFa9AEL59kkKmZ`d+;ljzeri1Oa0Cg&&8Q5ET>O_R4Il zd~4~igt%-NSB%i5R@v=H!L6^*UOzm9Xy8Y6mlFn(n4WtcXX>vU?O7J73seXlgtcyO z>tJb~od+1xd9SI*;svh5HmVSj%rz->c58)2l`rV-Z`g|}iq($w&amgf>jjRpZBPoQw^qcyRI*6-v``3N z*nfr~NT~!DLDGdPwhDB|Fx+?A!4M&ZXlzPij(`O;%CFjDgKpLxt!29?i;|zmj@;)D z`AzW9pan=G$4L|jUCC!h_dC{;0LRe!g{3Ok;~UzN9bnt;{y>QU$_ux7bby_#U+r`D zcQ@Etq0c-lHGBjCrF8h(xqaa$HKvW#mD1@`M`yyhQf(-&ZaLV6IAd34)nB zzP;O54+%7zm{be%G&NdUS~L?K?hZ$GpHx^zR;=UDopAGW=U@Ti$wYk-V%|Yw5PsaU zspErR#^r^N`NB5eVj%W6hd?B-5IRr&<6i=L;R@68i=j#he{xzsig8MRl@~^ltsFlY zDeMgJempM}Hw|IpM}jFcBjeUeV&E@MK4bEm?sJjHWwJKJ= zpyV+h7%eBS?ArFn z^U7D3IhM>Ki>|z@vwkgc6A%_xZqxaO5b#d;zD2<}V9&&ryWy#Xzt%ejCShXFP;XQJ z(D7m->ur9j^Wh9yjkW*D&^OteJ!KM>lTd+PN7cr7tG5u4#oDD!YSxY-c3j3}bTZ2_Cl5;+P~ zkB3mdfVOpFp0zy|*lGLJ4@qdW`%eZ55c{e=qp=l^S3*yL5iDN0?esP!JCO4e-x>-a zbDlOZP=uABX(&~03H0zzKSs?z(?xc>;mB$IG6TDk!#viBSbM&z-dqc- z0SXM2olTx@0Bkc4W+rv79rZKsV!AQuVXdGNr&Lq~L-P7HJn(d^a3{fCn;fo;kXP#(0hm%HH;R#=!}1#1I@J9rx+DafW2 z9z{#2O(X(L#{CNkrgM*2e926-J>OD(JmW&)ll??8I2fw@4%MrsbvWs9g%3g!^Px;&aWjM& ze^yJt>$@{nqjhdIS5hQ_k7ZMKguaLsuN#1_h3saUGG|8(iYr@@<8nf)ZogrKpq4fy zr`ogIrk(?ojZj<4J7V5gDBPxcU>nayo1U*eFnpArdf@!_)W(Gw-i}+Rx5A=n)9GE6 zgW5a?*122pbE2G*=fXAqFsKtr9g~xf%|ugPfVJzglHt-kxBt;@ueXv%11L&(0$`yx z1Upbqpdg+z0y9(`XHc+J0+JjUsF%x&uj~q&vFNv>7cUEXQ(7O7b@Ou{!iQm&jbGaF z{+(k8VeZ&>5?_-ii$eQZYF@bi%0=vAhr=GyJK?U9ud1t3o<#dN^VI&@v+#q=3A5mF z6uQ*Lw-`06V0tapX=()fzMxef1xpR7K-Zflo_Om-B^QSb1p0OE#1%b&WIfq1xvOup zgr^KVfEcOvO0^VP3E>IlwUuku64R_5_;MW<#D)a&Gc77GRbby6Iem3<_DemP4$R#u zB%d`VPM=yRF5vg7jc;G1uD#ym!vO|#hJcOTPI&oAB|&aR*{6C(fiU6_`;e6`S-%A$ zo3+6kGONLVuTZ=P?Z>U8dpYPg5uPQQLL!OP?7YA8TM-5_UPO&F|U}^;jv}5k;Wb~UHq`*A;#+}9?#90uz8NyJ)Y-{*Avw@K; z<4$g|apAN4`iet+iiwQajWSKjmeeGc8VOg^P}}g32oZO0f7=Z^*wAXf3o8I+go502 z=~snlkg9|)dyE^;pCSx0Abd?$5elUF5VN%equh)jw}n;r)&CydoBdU4IH9u%M+;Vq zFq^?9?L6RVT}@*X3RN>9WGv7<54#(wuSwxZU<#Fq_$4yqGIp3-I*+wr<|WH~AU}u~ z78%?(pyP7PF7#|ktRAtTeZC~h?r{$Y)f3o(~QfrHh*+5gyceuXYfRc!{Jk$Y;?>; zraJ?Pr~FT3cF$hmmGAoN)()>cn^j16kPJO7)!&n$^!1-1qNF`rM^Jq7Rkc+b0)HOFUS{fMke z(jwGBoHLp-q!{6so8-?s@R6gp#n|4vChDKqYKtf^x>^vy;ki}3WUQ9!Htm)L06?Pk z<>wAK=p^7uX*|%Od}2we91Jvf&qSB50;0~GkmTa1+*pNr27e~bE-^LMz%;(wP1y3C zo0^Z?%jz}^fynSgZz94IWqQnu*eq!u1_zTAP)6am1@CguKQC*`AZn*@XFZWbj(E-Q zH?EUY`iN2Qb%VVHN1ZPdHj#TUcUwR8J2+!7EUH+o7Zu(pYu_(IZTFAjI)}r~#7?a& zOKhTqTn5_4lUf7ptSDAGC>vCM+)3o4HR-PM}hIoqGg~9dO)O?rWNU!*3`7 zx2MV6&rsyC{Z>FRDm<>IT4-_B28b2_3nQl!Sw2GLV{xS!jMw!-hEfF?G*KYE#`mohO5Tn4j*V-=}uY6)i|(6kpF521SUd3n-7mJI>X==HSk z^@KL@{`jpg^S8bt<^|9DA6JNOm=6mJEniCc9M4931s^iQ3tcK&utVL{JxpJs+bx%; z*5+^J+!LjCX69V3W7hi%&qMJ!@%EttJz@w}K3ERia{5=??^OEw(&V!#fguo^aGDEm z)wDsUU#nrhDE8!p13NTN7G()-fZ~4x+FjP?llm3*CWXk-9J2T>w#_=4Q=N$f)#vti z${F-eu~E<-zAp!6snqb3{91cqlOb7bO7SHUQXBI82tg_*034{zy%+sPyrD!O3yW(EB03SL%r_;b$+NB{ z27(Z=!x3}qiPBl>&C>I2fMplBPMw3jeZTY65GGF|P;fN)qeBR@xL-{iU~NFusPh~8 zw)yiBEsI##`M#JA`)#mS&;Ziqz}yl`c#hZRNq6oh?2_`(5|o#ff2ev7c&yj|e_Xp%I;UuA-?k_XG#!nvaiFoL6Jfgfx8?2k#IUKHWL z;3TLd5FXBjo^lPISkMYTWC`1a0*z3*MZ%ZFDi5FnK#v0OVl_ekxj)j9S~$uQh3DpN z4R*~5|MBBT2Wsj1X?4W"pT-=6rdm5zSt2@TV!CjtbsXt7Smj8 z!Ss6lIn78th(e^aA$1{qP9H2IV$p5_S%KE45HKU;0@Mp(F;AQB=zFH^XadQv2nYvk zC5I(Q{2u|*M0~nn`)ZEfS7TPUl|tN`VH;g2+T*Z+)(sv#>zTZHHhBgds9g9M1m30{oY6Aryb{1=v_b7?c9d0z{km&Gv-H|XX^V+hPqLFi1T1+tarQ3#Z}y^K*3YC}N`~TGWne{7yPk&s$9$)qPKI)A+tjJ+Nve(&uf;l#_ zbx=Dq_ALFc=G30bA|?7cD(sG|wlc#wM*?u&Ab{U`Frz-_uz zJ*f{sMjiQ`{reE3sK#5bn>SVPDjvD&1)cUa6HRzk2PW)mn>GTSUSBPwVMM>Vj|uYcYbEy;r#`bd!gbY zLw~xqExS$vASU%pi>D?<^j`O|c zE05R~C%WbALdy;bas(wblvDLXSgb5C|CSy2wx^;NHK`Vauu^|fdF0A1hd|Wkkh`V- zXhd!zRfr3j4cyli)JM?!ra18fHX>ryO3hN7Y)v*q0gUcNer<{JQ!N5-^}g#i|d9}rLv?3tCUi`L6LFYxc7k>jy7!Z2duQ|^rnJkY^WN&=v+2V%Rk?xm0toGEk-XIxxbx8rb7Vsrg6U?e00_@u zw3=ar9*PR{a5_b38R}$;V7P!+!0r=)`FuUgAQdC^#68qKvwr%1%hQfC#h9*yh=F@Z z*m9nMM?YjTxUo6p6~l1rg^^E<$O^J}f?hWTfrKgT3XIr-;x$5UC?q{mSW_ZTnTKNO zQ_=<@Q5hjm60&IURcvpjZG$X_^DTAijOCS+V0-0@bnmn z?Oe3J6G*$xcj*lV>my+OnRaFCYIEL<76!~ z-!@WBdU(T&XZ^&l=U~4f-ar-vO0F$KZ~A)Hlz44{{uu_*5}D>U823!Vn!VUG3w^|sn$TgGBS zy_Oo`we9R|iC4%TBveV~xz_1^-2~JX@slS|{#L{45O~Uy7k9MX z*d7xnNw!nA+MAyv>Cvvt&gw31pIDT`;$w*@Q4eS8gtyqTiFT^wvOU0aXf#tVp%r$Z zGEfw+f9jqP3o52cY4h=e60otMC4j8VaoDP0sjEX3BvHB>sZZ4*m`|r7dTYuStjtG z9%>{QZ6iz!61$2DA`lk-*RevoN&uCZ*&G zEXefs1lY()`h)AI(nF0G_u>CcA}+MMzXV*?`HF1$yBo;G@Q0UMD}Poei*5-3_?rNr zGGYHcBuf&~5o%XCK${U;URVcG`% zFFxYcB`ol-t*w0vsbBRHT)=kY&7Ni&yg!62=C+;84UAGgQC(;9?PLIO*5VB*LQ;EWwX!Vb=6R8cs?ujZN-wtHhL7iXr;9 zv2{o+&x6BbI-$Q?f{B-$s^nV&2eRA#|6w8E5MM_k*yw|zRGFf~2{Lg1UZ6bYhHK)| zF_LTB0Owu_bv8I3=`KNneI)0>AXikLCFFgFa>fk0)uO+E36t=g1jrp*mHjeHPT)j& zSy=P>pnzFT2Eb(F7@fcI$H)dupiVWo7&jA74QHP=RS@xu3~YBKqACRHo3=R~G{64H zh1_o!B%7hL|B{wQSJZ#;4u9eYhquF#UUY%mi!M{`$NNNIMF5-W)Tz zvbl0!uimi!2Cmv=042A7N4*?ICMe0AevP0rwQHJJO4}HWLjjmlyy%adgvDD{{EjS_ zS18D#5&aFZBaO%LOR&lP+>%aS5wGwqFhW>?{UdV#Ua4jNXN!PK>hv>iO{t&O ze5T6K0WfcB`1@md)fCR53pcsoyGj0Q~ zV=H0uLcvpO6~b?WVon(ps3Nu;4YdWN)$90A*#*1 zz^J^bbq8^3S7lonhlRNLcmfRM+LKXt8;^ZGi7hzA3(}q*g{Ydx8hh45vtGt zN&%YceV=rCGqtqW%%zxs1F=Vyq4JF+aL8M{77ADww3ZyzJa(gySGHn_dDsoiKS=Ga zcJb?I67G!zMc+n>@IA|{{V{Q)tNkhMT6DOsxOjVtop*dCim(nrS?aqegQac}Q~VcP zh@Q6~w9Y8qScP!g_raOCne^-=k8Xv~{clcUw7v3iqB`2P4rWrdw`9{X^`Nlm#aZhD zl)x8^b4aXC>}gW>GSS|9|7#!s38RkqKRzR%?9K$l+Jdq`{PYS(z|2MGu-K*vZw_+y z!+L&*rKpz+;cpA|$VAsxmb;35^8~qTkH5`mF&`8K|0}H2oS4z%Lm{!)@#nV+(aB@L zOQgN|?e}qrg}3HwpEXJxN`T>w6GR6SR)S?lxz-d4*!CMEi$cnPS%%)dDkoZ)ozIN~ z-Jy4&OxwTG>Au71cd}xT#U!kGOx4tIiTT!)rM0pS?z4nE&u)i^;z=|3ud7IZ+*DX$F0epIwFH4a zf>d51a13;KLE&6otqWP_4KP_JSZCh9BTl+$RAQn65Q|{u)stJS~;yL7>N%<8YzrB}Csc@gKCG-Ln zmlR+{s{PT&V=l)YfEV?mejNs)RaRS`)D2SKMoP_SBj-nx`oTCC6D>?q*y1+oBER@z z)J2e*d((mt1-FwycgEIiYflWzd!_56@r#wPGqXzUO9Nz=$zZeF&aE6h%t>^+68Hkb zns8KVN#%2v#w-^z7Mv%?MGMTl=GTSH@RVw&gNq{~b?fNbE9=C>K(zTa#^mO;&Qn@@x zu3%3w8dJj!ed^`R*B55WZ;%OhQt_w&ji|ldC6-@?v#jFiWYMURpk?c zcQH2+-ffbdC}0S>*ob)>(MEX_@L_aHGSo1PI@g&Q;g_W~XBe*ObHiY3ZbA|%vA#q< z?!6RfUIKstK#p@8ZJ-;Rof2c8p%MG8#@`i>%^c=1!%sO$_l=SsPv@M-8K@-SITM;N zPE;B@wQp<&AUkD=Ocs)P*JMQp`h*j_a`N_sLta$86n-UrN{z-)F!%Trjd#P115E`uUm6|+#RP)sV1$qK551=F0F33cgF zJ>B9)Zi?GqmKk{tLXfoGsMaJ4#l9&{Y1qdhJZqq7Y7>P5KS32Gj)buRUE%A;;_>_v zAwgZ_(WVJaLG~F2t)v@(O3)TnAkHc-)9GW{ETAK6N5=u@`PThfcY@D#@5s94#c&ARGkO0?iA# zYMxzFn7Fr_@alS95Is8#q5p|9 zDzQM^v=$G-YTSarLofL#{uI}1XIe8ZzeXAwmhO?*VhwDP>`_F}B;!d7vg10f*}Q{D zTMU}?7-((38WwLB&;Zuh|M+$(7)qBt;VI`c=w`0=9vs6N8 z75a#KTLM~bTd1q9#&oVechm4+bj0zL$U#S#S?Y9GKNn+u-W}tRl~ZSGJ}r@~?q1#4 zwc3e&fYhXCEO%6oB+>GpccTx4WR>h z3bXg$TU=tjX{k%Gem&>L+P#9hKhKVv7*%S_sp&>zJB-t!0;pi3bxECY+rnRfbcnMi zhL*;i5yTtxwff`SX+nMWD;V7S=>rp&pnNBZbIKKR!I47_H{O=~$}(L|h%VpVwg=H@ z&81VpT0qL*W<1_v+za`#P{?5PZmFKx=Kyyh)~qH&cg%sif$iu2#zk|v?maO75TIhZ zUy2~q^{!awEr!S+|LEVPjBO2EiL>nOj1OLVb5?@|T;a(`FMZTORDtu82e!vH68l#Y zA|@2~M}60E`b*Z^+LdG9;@Qemkw;;Ono}wlur51>3px-RR=`d|c3JXA5HS|5w|4_? z7|Rm_LWlvAI9Cq+Be0{*yzal-g+~#D`kvaQy8_ywM8HLQgQ|@Bf=TCSRL~+x09S+{ zsSMy;ILU(0RM^nqrs(yFeIr5sgc}ELV=UQ6MJ_rHqRcRGe4|J%!fAsAaM_2`5~)V_ zn1X^X;Z+M^O0Tb!3E1Iy;UKpz*@D)0%ov}l4_r*_`BMe;Dmr>AzEmfPGEWGU!dS%y zJZPwr$i+?K0;1X&hBpq{#;M8bc-}NLhU))^NAi0sALly7@|@em@XGN;MoIjMug)L| zU-QSib_&Haqv=eHKnZm#hg1Scp=FbMM2zwPdz%8esy>l7mW;z}XdHraz$7O|Khk2^ zE*6e8p;duNZoQY(+}E)6j1ME~Y5((!2pHf(O1(9+UeP+}UsrI1D<^ibs}WW~mVC-V z(X?@SwDzp_Ok*_V{s6?HbTS1c1n8zBi@(`zh^({P z0?BeOl4%PiTrxDF)STwiyb!N98*j+$b>t4jI;h_8SHh%Lr2D1Zr_txTcBP=J;1ZW4 z1P^Mlku2H0XuVKjJlPM}!o#;Wjj|O3ZPKKG%+BOu5+SB0hXGYaM8~Sd_NCCi^vdOB z(h?+3piZQfrW4S!0|zJZ?Xmzvg0x^75wh2di}P02|2%rNeg1$qe3yKt-zhy2ZOK$k99I6o+# zuNrFh61sHi+L%rc%4L(*k`RIk?C+yULlcJEMVU`vLh+=l48aYMksqxj?*BB0+5@(c z^b#5qHXrnN89SO2WF#i92KfF zhWen*(JpDC$KrYb5FQklmh02|9(IvX-lTBGxuAamCWfG(z6&GLqd@E!@{idADijKN zJZNrl(do%DZV8@S;()G^8_*yG(lI9lakgJ0LG?oeGOsZuA_x_9F#lA#q(g286>~G* zUJ20kl-+|8Meh+X8QmA9YjS_H`nW$%$Rgvy5{F#AkCy2ke3XcQ2Q3EG<W?<4$o3CK6%gJx8{qGyu86rMEk!b;V1Q$qL zii4}{peW5QMPYRdDz)|GTj#h$k0V$rhii_)9Of)?N!h5_*{5pJ;A!Y@?J-TW4WP3s z4JD|wbjxBTW+XA8@Cr4dhcEGl?j~T>R%evKD_EVZq}CIr&AWQEhdpcx`>p+m0p zvTs}S3r`Xz5D_|ghd3Nl*UFB$zlHJ>($%$BW!A6*UB6L{cvul+j-gT$4L-rGi3PR2 zmllUVctgZyq#$?L=gV+5*$wPg$!{fyC#F~9ggwQIK4Vn`jmh3p{_HaVMw_&9q!n}+ z8>o70R0ZoMQ@mqhB5qHh5eW%rK&G4s- z(O!tr4G`Wh!V-g}qNtJJKLcC_W4O}208zW?9!p~WAcarq$sI8ZGCWyG$>5%JjtE2f%XSjkIR!ZSjim z7maEmG$O%(#qKC4rf7RZkXeu$*yZJ4^^f-FF&#yJwuXbnwE;YheT)P}^;9^W%ZV;&1%9-)RAwDQR_l1xTA^;u;4%?Qw;gg6le;TNTBn7#tp#|kKAM$?S%5IWmx6(= zbH)4@3{0>I$fqHG%oo48yVo2;g1eJ1LIzk9s@c4sd?pa!+#u{K1VmLwn&KnF8?!J& z6V<*ctrK!&fbp~q@f2dVs_eK5guFVUV3pau#Qjjcsb@Tg7o5PNKj028M?cJA{fwpU zQJ>*spTVMD;e)6*s%zqd?K7zAE^4HwB%MthJ`gb_h*i2mphJLxj{+rbLWR?!4KkE$ zBseRN7Gi3pMts7D9&*@^4kUR(DOny^i6YZxF$Jho`J&uaoMt9yLo-L6Jv)o{5mMDe zH9})@NaUk8n1Ow0j{t-@EHI!a7?_c-0QQJr8fHATp9!ffil>&y3_;B zSs|a1cCT!BKV}#p{Sw1fX|$5Tkd}{IGY>{lCJ!1vGl+-C(?9>iB4goENLz~$9p6m& zlJN$iT_J*zgl~nGQ~oXZ_YD*xo2gRu%Rb3PkM8L*l^Yj`5+?ANf+!~gD<6MR(}l-u zY||lH`4BQYvS0vE5Q4R+GB+Fp4oE^yivYs8x*})Svqui&<33$E2B)mz4@mLq=w-7C zN#;2LR3S*o{qr4qTTcR7E<*%O1w5o&o!KOpLaj)YKjW}F;rcdI&r+0k(2ZnZ(CM{; zHGo*rqGg}L_b0gSz;MsI@X`e8VNbicxe@ehf?;C`r3oN{iWl+F^ggObC$ziBjLG&!Wl9kT28-s&HK)R!E)5Q#tVO$-$4# zZFFjChlj%EQ?<8{d5Vq`{MrI{-F_*_29X~lBy5U%PpOB()_lcZ_&_e-WQaNtKew~% z$fW^jG&kTG;YF65=E!qAMc7c`a0WdN*KAhuy~~(~O$cdZFnwf8Vyc6x9Lvtpv>F13 zK&tBBA#-%j%d6lk56AK?1K7FzCRqX5L7GQ>B)5>RolqMwt6Ovl{a4FcYymA55^!Qt zf3K>{dlqdwT8==zUl;k)Z`Hp-S4#s!-CM+r*?6X?y|5nQq7sBJKY=%-4?(8$SBi6k zokoE3$jc;$vXX^5HxK_Ym2bz?14nmne=<@u`;Er^Oz9mH)Ro?>~n(ZQ<=yQa`Xi zO82_X(T7uasP{kOIy+5Vc=@Dh7Ei1~G*5eW_G~S&I&?~w9WA~et3w>RtMlqC zyL+6JD^?e(Sk?0M*rBF*py<1>(Y5qbQlV4eI{k&YWTL{)h&G2KBeKm6q;s!_F*qI{ z)&<=gg{F`a9>uBH9~&)|pWN&3cZ9mk0KU<{sGJ%6!SY3Q&cnSeMTLd3Uzh=l`y{r; z64a(H8fe;%?%_2oO@i!3v&vO7d8eMy(9mdh2}ijrYiee;qP~DK8IFI8EirooP9F}4 zaBj6fw(R;>Q0Qx2W(w`oZ&OWFmrPt?z^%ec21$pc3Wr8n$Q|JqQ#4Qd5P33hJ?GSw|A zDS3yCvgzoHnPleY^!BYvv3QqmotYSi(`Y+Tf6py$Y?=`{@pOu&_&M*>FfuaINU~1m zU<3XG=rp(Bk+^br8?(BQGR0ronbfAAU3J}5 zH(6Xcw+D~6UPD!tXU49HSI+M>7Vsj(O89h=N`hXRBaG$`aJr;vvS7*ynxCTCRg;!K zqQsrDimjJnds}Mb#^Qc;TEScbyO@)ByfikR`2|zX`v+QL!?}`GP62^|_h3=f!My$} zdg)d!!H3ASi`8)M&%Ell%nMM2H2XRZdHxeP*Iv$#a4GSY6N{Y;50Nnx>`CIgcLy^Q zm1UXAgiF}jyv3 z7&_1=Q3*$k_Zz)UH2ljYT&T)vHeb~k=oBS3dzPmfb2(hXA#~%rrt-hM(K8|k6UMu^ zLz2WD9_oxX2qFSw6dm7;_O9m%rrq7F%S52&_l}k!`g@fal|BgSKpK zT`}rR@HY(O7^qe2A5CUvFfE=s(JZ}9H3o3d+vU%eSAVD zUcYkbrel9B%wLk*V+mylmfnM8w>!+`ZEsnyZR5g-1$=|2EV|-YWhOh}3%r3w>D06x zw)^aZak#3mf`Z{Ao&AX&f+jon9AJhl$)tX`#5|M3lbshF{RPqhmmkzx8hSrL|M77= z<(fx-dt23NquC`Z^8?f3z~fbkxA3wT8Qnkp%s}gpsb?@cj->$h>?cesckxG8b_@LX z#5(J9^pyj}-=MI+w@yF(*vOBr#&WFhHWzIDbn9X#LpGT2TTy6G)bSM>o0bN&x%lTZ zf85LdevG|L!=QkOI0Nj*ethEGf%Zr9`0?W!-OG@iG&KjnaUY1&PN+cadfxbHC{6b2 z-S~L%F$^OJG}Zn3_Wm{M6r1T9g+K3YiiR2!o!WseotnW zc_&`1A->SBXG9Oj%m(un9Nq9f$=G%d75 zJk>gQAjM3DJ)AcsWnp<=b52M#iz8SiKe1ZQi zM=QXr38*2It+BbFC|Q`hWvjm2Vz}wXFzf3zwC1rymA~uh+8%ZPew<#~@uIRao_Rk0 zy(z0-+C5j;L0`fnDF`Zx%S&d*AHo|qYC_WCg(-j=6AYhh84X!tb!A(o^w!{pY?l-& zR4~;+3m(0@5JuWBw|nttqf?y55&jli=4vC?{3ChJ4KaSR`;e{Tnm?7A{3F~9;Y+YL zJOnoNHB|VqI^u%JP`ZHzRRPwuKD*Z}m2-@79qhh!{er*UT zC%Ya%v96@lox8F&y~QQ`cx61wB3Gpxx4}w`@_x>damX9uWT0IT6B62synYDlm6I#z zaLe$?Nw&t&Ur3I)FKkU98}FvPX1;*S`z_4BsaDleIS`dKj<53b{E)u}uSW+75Pqq;0%WS10ZGd#rUg8W|@CncxB=<>?dEVo@k3FdxNL*{#34TWn2l{3&~N zZIu&(`f`1}?Ji*a!sltH9KN+PvUKg59Nt9_$vc}kMw&p&tXOA@gU#T?I!q%X*TfuCif%S@Y; zo8XAyEsI^wE%>=Q!w%_7?Nm=|$KO|FFOJbr@a?h(oP?Mm6kY4QaU69HWMg-R``S8C zDsbo9V0&eCpEf>``@>@!|Ekkyf77gMx0srmwnGaT&6>EV$gqD_b}q-VYAugQ!V3d@ zNCjV2s>Ry}?(DF_WLyO;We*HDIR4?`Z7{V9X-LZPx{OCrFIHPJcjndGyuG*)IHd^N zRFlVi_nJS4bCak5g+%Gy>)X51V0byt$rwsAH#hINdu?k4CO*akmED2drc`6Z!e+m9 zD*;EbUl`n!dA-dAMb^ZU?{u4}!}#`Kd4uPR#e&ckJCrylW$s3?g%J+HVTu|GE{n@_owW2Jh=P(Rks|U5bXsnx-a8tm z^UPkRFN2bwSjm2P{8i9;Sir)I;O-inz#qR$!QAY}kk!X5EmTjjXy-yc%OEmsKK|}C zfRj@?l~mS8u0Pq15Fe=|b>_!lgJ%-tc3*yhCh!jk2>9cxvfHlH3r>b6om(|i`smF` zNBfSP8RmLAbrcNl4eX6*j1Ti>1}UMay4~htbfd+#)jYRH$o~bKTe<0hsNXe&MJJ>v zs+K9jtlw8^#z=Cw)K>(w*~{cNiz1W11Dtgmn?g`Ntb+fzNY^^A6l;B){PGS6@*Tv( znX@UeoEVq^zt7D2g+5b)rZp(pFWyiowkHK$|wyq+9FX zb}uklsd0KMukp8qNKA7CPmdS($)h|gPVPtF_!YBd0DB zyHfjnPD#o0=zI5$!@crA|8e&ua}|0eMHl&AtK$2-ME{-olj;T7_6AU;D%Ct`tOz@G zGz=a}rs76uLbIWQ+7NFL>fbAXY8Idrl9-1@v8q>uub?Dj(h53mo+fZ0j`%7rYx0>(X7ULK_c@4x}_~pk$RiRIBwYRt5bn4sTb?7ZB z>UB5Nrss2{8wSzj2qIiQwlOp{F*#P7ZcSyWs;cUcT%VqwU$BB=Zjac^$pc-rhcNCe zG>T&`ce^u&>5VA&G9-pt^@tBd1(h3*k8K2$>I(O zNe$Q)-%x_=s(oyUJNpSaPU_)jXTN0$ZoJVuSR33EhjI$Yu?pbJ>eU_*GuDnj^uoM& zUFf(1qejD!ZESgLV4KPUGR0FXN9&#!XHnrF55f=~+Ub$w_haMZ?f8>P>APZ@r z`JPv+N6!$YQ#6j8P{(MMNa{g%?n?W9F4^o$db0lu8Zv+%uLpPT1m-U>+g5?ZyV<^Z zp9gSoA3!Yr*UeGIbsHyJ>*w~wAy3HR={Zqo#ZE4$UyJ?m*U8S;6bC3laJU0rVdZ_H z8Pwq0<$N0H)48^11_cG(rzJ)Y15evGjPD7|C62iESj(!!#*Y1E8V^m#B8*V`xF14_ z^HtyFCEc%9pKydS{R^at2a&OO3PBNJ=`NQ^X&S+(4mAPC~SQnbKqP) zy;Lvz^F;J<9TdSc!&thDka9;NVDT;)!Xp2eg>ovts_;*#Jp(|cHazOmIo2>^8 z`;P3u<{bb!tx2@}rKHgP-$Ql1=dDac)$$oK` z>%I=}ac3J~<88LC*`yn&Wi$Pv@>!;ghiZ^Yx0n*?7!nefKni|SM+V^9bD;1SrlP{m$poWIp5lsD8fLrobmJD8) z+S|t_XN3C2{gF8+1afdFV@QHeMJ0QW0z55sK)SQlE^bJ= zwtEeAmiJU|Js?B*t9kQ_G)B!Tfs3`nLVkmVPD&XiQA1ndvx)oS3n(&hM8}jm?alE&WrO`dz+jl1;>cRNZE_|>b| zAOzn5-^js{2zptLvADAtr>+b=Gx2j8gK+G?A5#Hz8~{jm=S@sI~ND zrS1bhS2geKs=96ci1cwN`yb#wGexVfl{-Q7dw@Ej>;4gwLaYjt#gV%nU|)Q#OsGAg zFNS;fYO3~jqpiAKcjLJnjq&NPTZ)6cL${XxyGP{Lro|}zwy3U%p*RJWId9(tz2dNkGw)=()`5_s#yJ}=hv8NsB>alz zeD>gG)P1XFJ|E;O=RbeAcU^g3s%4b~UcHK}tSnH`y2mEHk|LgqKR6lEq*oHZ=7(Lj zr!9-M6pl*S0=hT7Cnv3g16~B>e}pmW0@V30)4SGhZ2$1+E?j^b(f99rhxi_8eF0kf zZe3lSy%xRVXHN)O27)2PA1Tu15doAXkq&&Yy-pwP)q8FtF1QLFj*xdrCXw#>E53r4 zdjJxkm(l&Qd4@w0zU>grlzYCXWT<1EQMQin!gX>n9V_QJ*gs@pI-dr3VY9_ep)WDm z)CmWRUAcXjKAcOv-Kr*)4U-WkIVB~3fvt+o-xs}laL65ILMxHv1#4#k00gZJjtud8 z^hdP#%j{(~?ziMw@{h6KkspG~=lHelzIy--k`iqnsIV5@vVA@amGcGey0feHy-I2N zwfTarV4Ku%@Nmtb$1RBYZpt}501VQSCGlT_TlWOGQz`(Z^)J_toMBmj6#`fj6qO^Z z$}U(GOO+~c6t!1gCFN^w{IkWnCJIXWj>v95tJd-GHVae)OBGyGaURPww%hE_;>DcL zGQIL@RMpU22HO}xT)J29gp0O>M zc|Ke+I)XvE*{2?1gv{OZYoz_I0Dc2jpoTPj2*AXxC!x# zzT3WSFDiE_DR0V47Z(!`_KN~JPrm-20F+h)<_Dz96Ih1gB%Ut4C-2v3o#d5McLX~- z_S735GO$w|syn+xg{>3FXLJIDo*dxe1wY=uXgC2hWrjzD$#;)!C9w!&2^8osb|lE( z*u=b7K=aNZ6&;x8#8ccTBTnBS95K{P(u#navnm_hdK3^xgc~<}hOJsVbc;2|ep}z| zohVigqCf7c%hXJ__CAxBIfy3c9(eCB*Qb41={fpnp`?+xMNg|*%8HcOw@!u->R1AI z^BDQ@9-y0}&i_5>+@4}Perxm{$N}$ubk(9{%6Rj}jvsp&?=BXv6b9WJtXR~>03ws3 zMpdF7>4?j3sN#)hD+yo173l>h_}_n~``_XY=}O`++ovsSFggaL3lzctGNlF8g-=c< zxKeqY!4(sml zdsafd43o9ti?O7>#_gX7ONR+^0ClOStqy&W`Xrr6d!JHc#(~zQoR%kBNoH)z%*nUx z{#^nnL}|!FYR2J}Zk-!cz>&@w7r5}u^X1j$1E`qRS`5}iD7gLjg1K_9frUn5Wf^hi zR|<&ma|cF2cV-OX(|wZ9>`LE5U&iHSvVMsg8Yjq{_T<3xJu5`rJ>(ZG!k5uPfv9c+Srm=d%63^mvwFKun0`PFcpDm7fVv;x%HR5E~UB zKk)YGr+&$*UmtsBMM&weaQ+s2(wKTqugt06N zIRbjgCYPuVK+ksZT4=n`>AcsJ$Iz0B3J63SbU9IxsJ?0Z8V%_T2<#c5D+Qj^ETbwD zWqEc?>tH9Pf1Fpv+>)xeG?6kG~Uk7vI|N%)ggp&y+p=f?b-wZzA z2oZhVkN;j_by0xWW{V0Imj0#+bEX{Z~Q z!$-*$i59y=#gajWI)>h68o9LabycVAMT}{5KKkPTTch;mW?+{>SC;=3$kYmvlN*24 zpej-q2|3e%g>8X3b-nVEa|TNPEp{FF8jFi+{JZez&QY{Xnsl|u1M_{_<;&k6v}z_G{iBb9u@<8dse^`dmo*H6lhXJIomx$nljZZ< z)4qoUghco;=qh@EcNAz8?;dao-^*~K>N~#0k~>*cpg5V=x4;pVq$QX!M3Bbm`xiJ8 zXaf9Tp0tUeK2u&F!%ko@!C6x>yw6sjx`_f3BRvli=ACmsoh3Eyhtc>VHAd9RZwj^&$sJP0IC-iNxXwx~|^Ck(M zn>B`kgCEYj1#bq^1?iNdR)qj}KZ-+y!-&j({Rpo&>ja`<9dSO4p37rr@m5E}4%q(wj_xlcpp({ZU~ zpe+%0h_@izZ%}`}xlsf#0JM3BATWM|03jf9kYJu&G`n zU=C=!pvTGs?t%-qeJgIH`^Bx^A(2b&&j(nge63O!Kg$p}N%^evbY#WMv)iwJjzM+8 z;STDVu_F?(^MY|u8lYK34DCaPP5~^Dg}#M$VYX(aCOTjmE4m&xBm%A~whLP?iKNY? zZnv9fkO2^qHISdEJp766nr2mOmCP-L@=1`viB@k$bEbbJbQaB~9KoXfqIWsDD~=!(WGc z5%ATvo7sgXdvtYmv(P@7)~1urJb6U%cObD%w~n*%2RSgCy z0IE1Yy7_#zotF|56KluVL%KJ>IGiZhHi`y4CctqV8fi!G3Qg9Mt=nc!{=r;6-yh6* zqA~CE9@`{P&L=$q!I2DsH3yg*Tcdhz-p$f5u2=nlFCSe<06q**)ys3@@TH^BkHUEct6$vj9uxphC zMue;L1{7bqesDwer6oTg`uH!o(L0CM;5hU-1P8c;Qvw8(wp~m+{x#4WQK&c!UnYy= zrS4MrPgNK>d4uI_|Wcd|@V&bVB2N!kZv;%&DghJveBRA!RK>bzJ zKbO(WTF3~l=AS+(9L$jc+6{RjMLbac*X2%j#vnEe#gC~c5-z*FSDjqMUP@S0bO1pX z96+0G>SWd#W*tOi_UqDNz-6*plV}c{L!mQK)Mcv?`dLs&sF7 zcw`rI3D9FQ8UgIk3zcN;kAX4|P1iNFA@LX;yB`$v>qNsR*gp`^6t`lIt(n<9oCSOv z$ORjRr^#o|rG?G4eUhLggnwQvBxR!t0JbbP^1mPPCUN_v_%;r{$5&zzi-Fb#by0H8 z)ZAQXELY%&QWJXJ`&h5-IFU%$QJogCmyfR+jgC}r58-4GR{{;uTao)=cksDIgJV*8 z&c}~GUea!MJ^=@ryFy+@e9vF%_Y71tbST}- zLm;<({Xzca;Mh(9JBR=T?7SJFGyYf%R2+3MlDxa8vW!i|HMXXa`*+0zH?g-oAb71Sj|xNMYWSqzU)Uz%dIh zIwGHJTKXQ3(-8yk+5y3?emt>fg`cK(J@mJRYbOh+p;O6*moez!vH-fG#tktSMa?p= z?RnIw)yqy1@b%_HA8rS7ICr+eYbgf>(1I2B8SXS@h^OJt0o=;d)h0?!IKXv@O_4nB zGLh2hnhs4qOXdguu$fLkAYNDXhT;7d((*U0UM9ysWT|qf!9|T1_=fG~efG0*6YBAW zfiD!-xbLT>i&wF5KRb*?%pt0Ew$6OeP`5c5t7g7?e8QNfjkrRaJb=;?{aTY5_b;S{ zM2v7%q*6b4K}gQtX@jfyO76l!u49N2hZs_#K_M0sZG1Y37}!Md;tJs2k?8&6;7Ulg z4qa`;e6yF#Jyuz|_1wklw}Tf(y8xtOrbA$|KMIi4)J>?B`Lc{5LU9~Yu9i7PeYX&fX>cheu>|a}D*5Mc zXRN1$6+5lMc@)T96>r}{F%pMYzpqCBn^{pKk?1rn=!j?eneG9!*MSUS2}Kiy!^%X% zW!*=78vY^(tu$ogUUcpOpJh75C{F<*Se47NA(R0R@;$oQUG_bwwPR5J5{EjEvuWhg z*;O(uOZr{)H1$c?TwbCr>Jeu-H*e~73YG#n+m%Ha1AhF&tKW%$q3?Ke*BkJse7T=> zEm%tV^-l-K997o2Kjae_>}Q zc3h0M72Jq=eUSh|_j0Z>2wvozf}@Yt*GVtiQaWAZ?xtn3rF#zC{Y1uB{9Jd(0IQAQ z&q*Kx*Pe03v!E9tqe75g0Dokkl4gF3D?pR2)C6DxT>)TTouL!GK_o$^K^7pEniUYY0piufG-6izT^ z>K7rhk$7o0f5pZlp5hC|-lvs5{7y*#RRp`}SL7MSbB#BWd0K353w>uCV_g2Xp=jc(Q9rI4Q&?2Qi8?3ORb_SXW3s6)=E@3^5X2Qy8iAG*`&ykZt~|0uCQVoxUcBP=qyF*XrkDDo1D81l$w~|;`(s}_hJ@YbqrgV(dl5*nUqY+oMz8&pj9TbK4wwa9f1vJ~`P{0dsynhbqr4*Cw`7q*Z4Ut8C6v zmGbI)$Y(Vn*fj3^?$J!A=39sVx{i(sZCaJpGoB+IzZ ze)7U7>7sKbV@N|p=0Mqw0(g0r^hAce&YD&{@Oh>3y(XomlJONFjsdvS#5H0jLgH5& z;r{x;m`arRHqG!6*f9D#a9;%mi&RL`+OQU>Ys$mbnxmfuwCd_3K}uM{8wyG8YS};O zEnN8jts#hqAXK*bv?>277%&d6jn&5pkGWH7(-TlQk-|w=I92=2N>2A~3{=-j?D4%(5~>L;Ke!OWGeX{A+$wGFpFcox1*+J9~>?x8-7; zqJ~wL7rpmh*_)!{zB)O9TWf)3K)cb*^T^r}w z^8@nO&)Edf33Xz}!)$Zr(!4lQnU@M>@a9)H3=W_x=|Hjc4#hGlA4vY1W?7{TC=pR> z9bpZPmZ-0kn|T67F8M{FLEAH-7sK8+(ph?_#;x;O<~YnyLhM=IlY@IRkO!m;CyG@r zrLtxX1j-|2ra!m{qSX%W@P9GRbM^9`y9AVh?Ys{*RQ9T-U=LG<4o~pd>i>d_fBr4$ z&ntTby7E@pc(&MH{d~T~mKHeb76AeCT@0#?$Gjf~nLtwu=+cg`p_iStY_|&Ysv(qP z3lyD0p~3zQP8Q`1)it5cuE5_9sCEYS`V*|8k9#S#)hPAi)FQUZ0d0K`kODAFceKbA zJW%|)9-&B>CUYCMw&;UZ041I0NAQIVp286w{1s`;Q{_7R3-Qv&y)M0Ltn8m90wH|G z$mIUp#Aq}?fa^#4B~d}LOmcZi`QN-LTAMyQAfH`y6c7}LS9N9ahMm5PB< z7ar|?gofa9-Zo43BB!|GUK#$gPOL?%_QnN7!|sq7#t&K*)Cd zrzP-#JkxLC&aYftx7h66I}U16r-ZR+1IY6iT)agtZgjImX%Y3U1MZSbyOFDqQg5RV z)+S93L~2X7hF|7n@GO^$fvn@mYJFWGnhp7#E751JmKwQC2)L9n`X2mj*QMkG9p|?w zQ(14n>Igq54c5;#J#5zinEaaz>K_ z;!Gg+l~?#vvWDQ!o5a}cFx;9M>RoIu?JooWNm15-knp4Oor*B%1#WznIkIrR@ytcP zl7?%j0bJDmx#Mw47L4r_3nk)i-1xcXdL{0E+g7WxTvvYr265Jb5L&v z5peiC`3Z!7-ri~xY4qSGbbRl#UHTB62Y?U9p!lW=QERv0V-iS5J-=Yb0YKU8uCg5`bO(?*e|IsO@!(lVYlWC#B54OJWC_}Ce9f^Q%Gk7#;> z>D=?OzzdS-6inF0lD%hF%~bx>G`sKIs_T#m9ME&u)f=@e?PzdmBPprusEZ=Z2=I*z zK$3}~M-2=lhOZhPLODgg1TYjFz0(u##a0*w$Af)s<@0>UV5mS*nZk_<;oxe6Y+NW9 z@v*Q8>N@&=7L1KdZd$NnBX6ngz|GsPu_S@G}q{ zM~$x5)#Miv5f9XVc=!w^tGF+RZFbh6NYB|-uQBvG5-hC}w8k6q-{j$(Ye!&(><6b> zZ~zrO_v5~jP>MUuD^5nfrDm;l)s1}xv2x$!@oaV@BWX0o7h?(ac>lgxExYmK^u|OZ zf!3gJQe1OmeZTaM(_jvwMz4fXU>yG69UDWHOwA>QG9aeQY!+_*kE&Hnbsad4P-sYc z%cz`b=HGQ+k{tjHq$Q>VAbZaqe7&y%&czuJA49wRHQ03ZYjawjJHwfxmYtii6i1NeU&VpnJ8A}l;3E);AamU4a^drJ328lomxck zVs7bkl`#i)VN^b#)LEf>{-bOZQz6;@2$$tgNV-^jhCEeIoKG;SQk*A5L>N7~MVGd@ z+ks`y!oP!x|Nghbd!V?;kv6j(B0>;7{_``RdyD2wRj$CvU1_$BJK5X~fI~P@pm8+N z`e(BEuFcM-zA8{nSXk@ zF%RH5V8{P0)p?$jyRlUNKlZ*nuI9CUH@i(FWfMY_ZAFr#P?3tZVU;13XrQ7Ll@cM1 zgiIM0l_3pV5sji$n#k0kObw<6l{C;K&2_H(yVh#kzu)gYpL5RNr_X-(;AO4v@I3c( z-`92B*KM!kSN?LxqXE=-JHX9Onz!|Lo-#zh-^?8<%k1B{c0PD>f8G_^j$Tk2E_M&Z zf#NgSO`;uShv*$U#r$@8gzf|+p#Z4to{D|^{ys74hvN;A-a9OH+n4KVE+8<@f7YT?Pqtz|95#wIGv+SLZ0BUbIU1+t z>Qhs%3o-~_pXBY4MjYg?`egq(0^f6^$g zIJz?AZJ!2JQ-wCjxJR}Dll!l_sXisBzhri!cGJg{_dE}_*+cs%i(>{5>e5@D1mo#t zYC(^=2cgi}ObPmAnPK(R&x_Js;X{({67e9s^k9*(QjMsX0N*vhM^IF%YCiLO1-!Ly zMz@9@#*JU#$JHmGw=S7dKl~ez$zx6yt}bqY!3i7q(J1r%icf65q~g^Y0937U-Eu(h zy0tNZG|-Le_5^UAil+fjZ6HZG#?v{Wvu=QMQFFNcuoDg+0vb{mVK`wsw1G23bhtIc z#r-H20ad4mH#XWM3*WwQM%(ef{-pjlDrEZL@v(9Kq&2c9E*Ms#oD2SQ&!(mBmDgB( zaK@v9n-T>*Q95s>cXUQk60>28duTso-n!J*SA>A+fv6BMRrn0kM6gaZz4W6ph?lu@ z=ejHW^Y#N=_0m2gL3ygjKTQ9)+&35Gbs9P3p4)aHcqY3Z%a{lbCyS){ho$un9!w>x zndbf^huQeRNq%y3=ES`1kp0#z@*#;DH;BkVGjR~~noHPSYUgr-KIc`EXc0_DO6=tC zOx^+{E^hVDy9ZjfQupYH<-1VqxoyE>QBQO%--3s1fHz+RdJ-t{cCj_f0Z_eL0|fzV zR^@2az|cipM*%nf;3)3Wzc|c`AfHg3Q(|vzpx6hKVUA-M#Q

;i(bT%)07R=^Bh+ zF=p-D0CCTAmi^P4xe1Hv{Mo0jvKJkUdyqj4K20Ms3$S(TamgIE)noWFr;x zkcyyI5C52F_CY-Qa^FG@b&)|4BVAeB!-@78A{@JtQyga=hxkA`g@f=SxCbVh-8FH2 zZ^pF3w!P@`opCW+=K3n0FbW+JbHM<*G#F95L(`(z9kOleU1O%MF7DN78Y1&uAHh!FI)y}ap90uK;KP?1mMp3 zuB}#>laaZ&qCse>G$Qb*pcxfs2L!bA9|?`;XCKx#quW9cLw(YYG|{f*^AnDEuzUV6 zUPHqxYDOnLH)~eDa(>w*h0Up7G2YZLIWU1;O3R$jod~EGY$62<~jmFkhNf1-Tw&NF{LW4x@%hG9t*!Gk}bV z#f3yJtX;&$fmj3sEFcGyZsCB8kc-O$kB+9qm{|rlYyknF`OLag`R)o3TIHu$#1~K_ znG2ktXUWJc53cn;RkwI!}$}Ut2mks=N$vY=|r^L9=;>JRk_ev>AOPX4~kEFU*|B?KL-3VOY(JkGTVXmP2IbY-Y;ikpG z#y5GVR?b6na+F*pJa)_EFV%s9=__yuVwV(Qzf(>}s_=vW*;`iGSu;*M0`jGfdg`(} z=p|#h)(eupqiC9(=*dk}_xU-ee{Tgr^4Y#(8zG*QVZ)(@rfk#CIDG_e5&|P^ zu0`KfWmYVU#Ke|$CQ5mQ8vUKyATxKPCad+NpQnaa8jhRotV)fYfV6NFAf4A&Ly$*W z+Xr-SLwSZrJ3{uwotw!`Hwu&OZehoBE;`L^OJ|X47T^C8)yFNg4VQQDQ~z-V7_|~X zOk$D)>gMteG^hZ#h5{ULToI%wDT`Yb`giHKF^99n|R-xqGhanx52>yj(McSXP{jps3M-N!&K)AnBiW0usl1)>f64 zuc=jlOe^12K2N99c=Ye+%;|OQ?ZAQc?%g{$>|Q0!`)p<9Ht!PDsfek7V)tV92FTLw zQ=r*E+#$g+Oc0~M=Rc<|Q@%J+PO`iMy6N@`AF+B=Poiz3&|sSyORE#}C*+naS@LnV z!OMOkazyhw)`%KT)%~2hx9)F>A~-E8uUsBS8H%1!bZByB0{+c=jAC{I5Ssc=5AGu) zyoHxs7ht;UeE1Oqv!DB+6xkOE@q|hG!9)Oq7cX3x&L-Ugb+e%O9tbTler;({;xhKw zA>D+2k9m`I@1tEGr)V9dtwq38WY|2ob7TzfFjSu|CiltYXFE4h(^}pN$!`O!o?g9< z&?VUyUQ<{_r)34E*xBOpEcJFR z+^X}MM2EmDv9kKSON=>|6<`8TJ*?f zosRT{>3m0p9B%feG4ZmBV=z?)>@MRoIex3>1z8(cX2rX%h42dYCkWy89{{FV{uCc@ z$LS%DO}hj>LG?HbA9*{oVZGufrMU$NZ83Fy=jT-i$ZUMsoTQwW3#WR?C_2(U{yXpA(aRMaMD_c5wFfp-w@MK_AjHQ%Fq zFQm9sW+uoR2Si8@w@P|>wE;R!*cdmUs9jvK;HsoNWKP!BFxZ(sB9mvI(wbv64f_mB z;|gYAFwCuYoWX0bnRORSILTxSgC+I`wr0{Nj2RhU&T^*fP1FRs4-CbWBXzK4Bvpqi z@;@Ityk;~MJNw%4wa+apq=yqJPi=Gv|0AVl*X>Y}nK*3W%pp=IEh`S&&Ey@k<3{md zO@Whpq}2*Cwn(8iMwHF1r1H29!KVb^GnA7DzqR#K@4Ciz{m3=}OT2Jih`l`UF)SCht0p-F=Ap5=BP*KrwYOE(T$+v~ zd2ifoqpu)lMAjAix305B6Prl!O*Dr?+oh|2;NYndS?E%~Q*K#7)?$*lldgcj*vAgN zsAvtFG)y6RA3s39I>*&_352Rf;D*)ocJs&-$ep9ev^!q1z-sl-rly_@lqS)r1-DQf zC#NM`o<@m!`NwT2I>5wPK3bqF=P5OspLHZC3#|xPD0HIShnInGTypS-9^<(&;_|7c z9>fyFZC^OPfk%zsxFOO|A^h?|f+-Ze0O&LSwdK&%h8-dw&4y5muoGDT& zBq9+Ch0PJo4>+q}nUG#+Spi=zJ5mpb_W$k=yNNBl27|F0dll`rF^+Qhz%2ePOJB~H z{)T55Z5Nkrv~)Dy=wCL#?_^~9J}}(XXZfkZ1E`;4G1=v?0U zIvmoC;X-F$v`apFwb9%o0#wiRw7GPdXlCBxzTyqHg!<GK!w3$odQ|F z7LTlEHmjT8fVRV|wsu62_3Ppw^B#kSjJnmuCWgJ9_cMUt( zZ1yYKd`LB<^ZlUtnJ6C+F6G&|R6i%Lmiuvf2T8BN**pgQ(KXf8vJh&FXku7hPPVY> zd)inL77N)ruMZC$Ie&Y;ghP48W-wS`?xTM{`rFUX#|}|{CPp5tlKZ6Slm^%1osWfA|qPt0(fzB~%4?RF1%^BpmZ!As8qjfkk6uqmy(eJgSRf68V;nkxzCAH>BPC zfwl`$hu&2(55YJbs(gvz=Svtw?{ucK^8)9Vz# zv^aaUdVIioRjIy*Mak5IU#AfkN=Sr~l2Y!MQIykW4Uz5JOHBUld!28LvFgI%hHwjS z3Tc2y)to#|Ln_i)_vlD&^r*|u;=F}b(Grj!oN&Z&C7uXUPk8V?L8BOg)c2dZ!~{{; z;NZc7m;C{IGZqnE107x?t=G@U?8cc<;|h~;?%VKP+FKTmw3kYKoQhl}&`4$U*uyji zq6Xyk)L{hMbzS{1pE;+t5^Bn%g zTmQJ`HS33mhf>%`ax4d~HSYJN=oR(-@udN0QXPy^UuGQ=l#$slp<0F}bFfHz$hbkI znM-RqIq|?PfphXRq^MS*#MJ}oOe)3`6ye}dN+~DNrzRxc9K*=rv3Pn{)b`v!j5M3k z!4(Q*^*?D^W0K*X0z1$!a%5pN!AG*?fNcG%dw)lWFZ*d=Hg)`w>LSh!gvkyt67}$# zqqo-og5H|eJZ9#H*6YVYO{kKK%KLw+kg3-Q?JVlhz*AE_L;H(hRESvxpYa2pwAwE> zGxI%6fjs-WY|uD-FaJBioHTsL1mwUY#XBryXKO&zfh%~C+sI>)N;BaUM!_C}yD@=} z#}PYdX&eA-o%Sw0fmEQclp=PyF}M(0?gN|ndc#@UU#g5hMV;5A7#%yzagDU&F&~C0 z1Kz&5Gf4ZDc|J257q{m;7?zEh#EB7y091lW(i=uO6^pjS&NyT`i!dTP_s8JXb!y9* zChm;DOQCK+(2&3duxkrjJj`K3Fy_Qp@b?6Y$*^(WK<8B=A1k>wxBQuvXeuJpb@{x% z84x1NlAAAZ(9?`=1KRsE`zvhhzN&Qw^L$d!FIS98Z}u#&P4_DpnH#cwPk!8$HqIB= zWjtH;qh0IlyTi&m#Bvtl3E(H*k*NCihK*M2pzwIv*kn$!2Z;Tyo=cQ6>;T;fX+g;} z&-?V7&mCt(XDq*3S42YUS;n-wNleSns)Jpuip7f z(K~T)aY36f2?*RsDhuGdtkR6mQv7_DTt&1q1foDBS!3Qgss%Nqa>6l7flMqaNrEX* z;S?rKY7%yp>KSx(--?P&)d6LW&hJq^yJzi|D0PChqHypc7dh>R^!rgG59jAaa$}T# z^uT!RD|8V$&tYh@arE!hm?&1#%Kfc+rw_~Q1~fd!aEy)khA}hQP@ca+2K)%tD(Q!7 zoX8gVz_rc>CQyZq=X%X))g-{3h|ns8yS6k%d9lMX1-t92 z3`uJdJxH{48-9xL`5FFZNudFYQ*N$~{%GPjIFBs2jD|oBU)L)jhjSU*dZA-kG_~ne zP7~rl#)FKS>Am1HIa+E~x#ffHk|ad2oqxqiF(StkdMr3@-9yXzkrD$2^$ty|noCIw z%WtAU#@D}3?ZxvZBTKk0;A2zKtWMqgl)pnAJZC=qFvBdEN9Zklj9(6oJ8ErAN^3GB zf}TnaO_9S7H!J!8rY4gl+PIgPCUT%zy&%zpX6;uVx_fBHG~aT{o3e>o!#IyeD>A=bOAW6PEb6ZqCpTNm2aP|xLypJA$(BcF^=_c5 zo^@hGiOdc}R?3yad(VALh;Ft4+$HNQn{4q?!y-$-Z=&*w!7VB4JGdpcJgLANBr%qi;4y^yKn32;-x2kB-^!C;T@?1i$@Vuk1+2zaL zG9XiIG=%R=9`R;(P;+&ekM)*y=1Sb>I~dAC)lbazrO68*!WbwE|GcnH!hFVZ9=!@;d6iQSR16_*Lf`Xyb&qFfA`AX$%aNON96ng?qQ{KYdvZDeQy^8$K3$@7 ze$^4)y$6hIW?U~swR)Xm(U9P<_@kn0JYquJ9gBy*(ULQMVw!97gkh@FN#`*v)6PTF@Vv9sGCm2csR8HR=uB#rEH;y5GD*ZkMd;@WV$8 zOhv6^lasSFHNKML0kv$wGQQrsgbP*aO&mPyFAIx)2qjY?fLe z!HwHoLQZ}+AVwkZRp^={FG|O2`wNY`ukyT+&3jYUT1>hBlfVY-9v!#n zW!1r9-T=0tqC5MT=OExegn!s^rg@hB1WdagX&Z{%sBp2$E2nJ%#vTNk@&gV+4d@fU zh|1^>+Vps`!L`&YdPTz{mGM!C1rPkXnEN91{HNSjxAk)K;i6l5#@v=0yY4-X1UZot40ftGP2B6}MYn3Z301l6onJV!x7RB~zFwx5f|KcdNj+On2! zDg5XLjS3;Hq!hNV@PZ`=Ih@Lv%w%p$%UWydUpy_O-uTbH4g;`buOkOc=xiv1MYR~X zQ=-Ib#f-Kb@Cm2`fVPY6Aj%1ON_elNEJ5~jv6X*6RDf}#XkRekX&MAr#>QWkwtzHN zc00$rzst=139wK2B!kf}B9;b$Xd~<;VT6c6#4>ARuI7dk9P4+(M-BmGQhat9&`R)pA_bU!_0`1{)H752Bc%xC17p{i;&J;dbJFxQC8@n3-O?(|Dr0W5p zGh^B$i6MY=v&LI=qHF<5d=fqgKoBpqSm?y41}frl<$FzbtJ z%gyHT>gQE7Rt1JrHxcfq1Y_=_& z#6RWGCg?gfJBxXdp}Et=_+n4lk;_L3l87k4n)qhy3`kKR8BBm%eEF2-_PclQCPEZL zH(Nw66iard#}70itq z?r!n9kE=?>X!W zFK$1~Lx!>vY?cjh1Ns3a4&yRbP_VXCK&OHmMQ$s#O7lX!Uc2N;Nh|a0tQ7!XVT$4c z+oPRa!ck_CS(e!VaI0b{K5s+Frt1C?h4vez@~iTZ*fgot9NoQO{C!>j;3HRZks_{o zyD=EkftWN@D%X(4qE#JktvnLSet_-y-x9!Oq*JXy{86L4AlH( zHl4SABO9Ol@D3HNqN-HHjN`aNQur3dn5xuu$mS@<&)T*4%cX7Pv;*p{!n^4L%oeG? zn`#z>i^&1mJCyal=kzqnyE1@wuMnuLz+47-ohL6Ur1sS`plW zi1CjCyA12RwT~2?dUvGi_bWOe83O%;D?w%bsBli6u+Xj--ti6}Dns!%Fl?6zd%&yi z4Lx=kZo?kbh6>Z=kTJ35AKPA(Y*_37 z8yb5DFl`^kP3S>lL{6$ysPAZy&rx(@jad(70ZJ0`fxC@l{b-pv8*&mm5RC9s<75o< zyMZWswi_x?kYUu&HVQNW%u+?o$A(C8Pm+rptC#jn2x;r3O#pA1NW>3Y;5ywfO4?-W zL6@d?o@p=8d*T6$O-4lmBm;1blWnnx;7YjY*uT>U&|?T`3Rym(vk%S%HDA8e!(HkX z_SHRz=oq7-F~?3;Y42CQXd1U=2^o#zAd+tp8rM?N1#G$<|7*Z3mJRfrQ%@bdrZxp+ z8(#88+#zYbSbuafd(iu~uljLb!h6wc`=)(lE`}xSLOSn7@nIHw7n zWCq7k5;x;mT3lExvqwU8@nTctuP?o_E5}3Y}epyb2LHKS*Lc9k!8)IuI3Zgw6z=oyPcXh8Whj(ak}T~ZG|0Lny?yRdnxi_o3Z2IJj0pNRo4^79Q5@)98&Ay$Iqt>K(eL!XHW zqRKGE@nM7*0W}Lt#zwZ&!uODvRivvq)-gsNnK3f5aLj*oB!5OM8JYD=)r311Z4G@L zV{Oj(SZgh38WFU{SuNCzg)#%>Y!Niv!lMM?gVS;ll04Yoo>~@nYNKN40Fv)^V17HF zoE%3DkP^3hGNdvVGDb3tNj>sOQW^RPS5P)f&j*%Nl_wU0*G9h|CmcDqEx;hA?+)!d zPD}XreOvmU`_^8corU(DHg#Ix{{s#7E@R?L|8T;&X)PT**gYifE(;9#Sh+mQ3P>~^ z%F>L{1-%}D?727d;-QoZ~ zm@(DE@nr|*4c#SZQXN1pUDkkUfRLL*<-8j!oxoefxPMwH(Dt$=UV^+yC7%^qj)pVq zzrz@WM88zd!;Wi)rEF&~W@#-qx)?6t z7iTr*zwl6%(vFNJ1#t0&g7*o|57F>ADLG8^uomp0QbEYkhcw^ejF0jM=U%;*-xkrx z45q;`tSY4P318tY+yv;jgd?NNVnYcQ29TkAOTDE9o8+(LmjGGF58fXs!v@HGO#Q8$jn#`UteHL$6v^l%=~h?UzWV27$dfr{& zp|_BbiNYs_{~_79|1QKr_@em>M_(if>?l=_$@Q>^(LTKu;pIWV2@UUNw`CZFH333eODEVZN z(I*BOMUc@a)urPV%w*YUoj!Nb%S4x^R*FQoUR z?SPGt%VzLe%u~f%zcjkF7D*I%mcVeODUU}T)`Ge3-!PAw!G?ie?`5^b?bz?oGP1jU zM>7xO*9U3nB-;tU+E?hoTQ8e8;;^*kDpk05=3klz`FLn}0^U0;;2NDIO0Cv&|FEhm zFGL+!y$Qc!xD3VU28iPD054v>I-8MmEo;ue6GVpw^N!rhO8{j}V_PpeZl~dH`zbZ? z3=9O1JR^l6GJZBvtaBF++~&RIlPmc3wHjm{twYx3la#EE$3m3iR8b-_w1MG!nQcAcS8>| z9bkZpx|VaV{~UuvM&6@wAJ9*%gptykM|w`{XJ%cR_7RQtWIK}ZEupXcueKrC_~5CY zqt`cUpP)Q5qp8i3D{#>oF&MQqFiQz<2Dk~W22%Wje_^|w`dQT`1yR&( z&+SR^5^)TX)h8zlpRqAElK6@Kn-1`4ek}hwApwL!l-w`}-<5ofhFmR84qn9?BRYpy zOj`|}41tvSVL3A>k&NVT=dN5KHzdVv=M81YZj~kR2jb&a-i5B>R@x&_u%*$s2u1Nm zSju_Zf&3@Y4*c8CT^tzV-PB{9Kpr>fnj%B9%H7HmX*f%vt!d#oNMtP?+|=r{U}&@o zxgK5m1S= z)pgU4fJt(r3GyS_t9`-}!*oT*~9XE0A!V8aL= zvZwWAe8?Me*#w;tNcKyFDPa`+#fzFhKA=?ve9f6K3+4}b3-cVRc-QbmTt!^+_d^)& zLh?i{f|aRTf!qT*k3wX$nY54UnnbM4=6UQ&0VgT9`A$$Vhq9ZRtoM27>xgF-(50QIIjfviLXBo9!B7)AH zHIRMOO603aKm^iVWbL{L6-b~fg+^odLCTEp39M4BG~qr8r{=+hnba9p0j_=3`^6%X z19u^us#2adND8&`Za$0j1X|-Zd;~~8l!ghh*fdmtjz;ny;n;srDAJ4XYPP^8r{=Ur z&+?b}Nb9XwaSR94#w+(3-dMor8|1Z`=}SBN^xK7>wuy@&37i=G z#yF11P+gtpN2uX~R8(@G^x|_4k>cP#F$|F(xq(pNu(a$#`wovx!<*6K3uq@ESf`xT zgO=qwjGTD|9DNqMvPuR_#)z>qY~k}z>4H#Cs3JC69kl>Fu45Ey0-^PMB(%Iy;ZI1GA>_BZw;&^RMCY^s`bI11t!Y(U+M_$2WYUZlBr zWZcD{b64PPvizHR5bqEp@fT*(6IMc9Z@@-(pjd~oj6RMMxXlQXE&+t#dVqm%VIt~u zKxO!0!Ksm(F>{98gO?zk$7Vw-L+`nVW+l-r=&~rLaX(Pv-NJdfJVWU2F)DMB6%iqX zIvV{j*+UtLF9~4qVq|4xYC?;71O;@y}783Q#`Y9WQ9IwcSsMv z2ZKl|r7`v7EwYI}G@yfZI#4X~wL3$qKMgc286^?6Lt{p8APJ=fc#Il)wQ@5Sz1UC) zu45pNAn&mHmtj8ipWtDktVDWmhuuc16l(8n{!0Ql*zZeKZ80+s-a>{%q!OdjH;wyR zwU#d(#K~`y3q8FBnm|Ctbg+0nl3xp-tCu7}2L&F9K-PTRD3{>^2~R7?Gn2;#I$k5U0!gj7Q@r7c1Q%_|BoGPQBE;7Osz#NV9tEwx!!npCJM$&aO z?-+<&^m)$}1V%fUZ_-|YJxnMK;tPpevu+)AQS8N3->;Y~Yyf5=Te3z=xTC5>5epd9 z-1baH=1_zY=SJ1-`ROX)@c)>;!WK4ABa97uT`<0KJ7zD2|JA_Fho}#M8;hiknbo0s z`g8VA3TdbYw#I_*Z(jG8&Tl?jWV>GwM#a+;^7b&R39aI zG=K~UJz15LkV(G*!}kQ>9t^;dBXF+?wP zdiP>(Bh!zJ{h-(jcfLxs?VU!sV$Z954^|BX766y9QaM`=JK|Ji5O0wyF6$@@MA)c< zKphKn-{$=xaSIyecVGa5`??mvCqUmxl+x&_DkiMAdNbQ*nGs(!ge# zI)wIl;VI%42~*fdGqBLDX0t*2yunc(hjwry;5IS^o}$}jlW~cum`~sW%O1#POi&^D zXY_a5k%$z|wsr?>1^5s8Hvd6;TV1EBs6z3$7}NrNE+nMW|EvhZ2fXehmk!KWts*rV zWaYxbdDD8RL!B9Eg?_=281#tDQz0^0Y{q9=2G6ITeM^3Ypfw{Qd9Msr__%e{FgUahiea*dHO*2p_#F0u0{}%{hl7O85Rxsp z?f#|N+w6lp>IUSDM;>nHTl;NT!H*u+hnO@(TVBI(pwO%TI*25;{+%HyEYSO_sAYgc z2&x~l;-P~I@g@6Zd0N?#{^g^FhM>`eCh08#8f!bJNB=GA3hn2_10j~tNq|4K78_Y- z&6*9=Pzgr&8euX>TgiM*ysL-1iz83-x08TY(Rz;jD*Xdm-m~7E^-|Ypnx(34AN#E* z13B$=R06ipU@;Pdf5ih?QbK>G?7PXIfBagmBZYuV7H=bGF8PQX;T z?4>ZDgU^pLbyK1Brfcb_p_!0ilVfK)<<4fA$+d6+sYsZ2@P{qo&0~zY)pz7U{xg?; z#|>d3G!d6dh;B@s8M}7f`BuR;YzLxWUpjjAn8HzTaj$Xjtl2HZXi^6NftdiUCU}k> z?e(eif-z`?>jYcl@E_4+%n7UDZD}ZPR6W0T5(*1&WN)#t&C!xT!@qeddczgGj96g9 zGiv4|DuTAvvhh~O4qbm036tzwKr3&f{g15R(XIi1cvv>fMtE$5izGa8RWGky>Xb;s~` zUOgYe_D&eh6|_a}gsI5{vaKb05o6ZOF)|MOcz!v- zn-N=R={qF=y@kwH_%31^N zSEIot9~kb%$BILXu8dGup30k`@Se3`G)S!9+8CMYP87Al8G%1YM1m6YHum*D$BKGJA5n z1S4zTC*HlZdnqpG(+h-m?>V**eq4t62WHB=0tdKaY#QdN+Y|<0R^7d|?8gli2j@NR z?Q;b^f8GtT2BGB&k<^9gNy4#4QS5}LFTZ1yYKwd8g^<;^4BtnZLC^dGsl5${Mfd__^FUZ;l#j4FiS1$%1#=E#G_fKJ&`=)%X$i z!qYjoz9|zltJ3bzkMo1nlyl6F9o{(bn>5?=$8{<4K!5l3sryRphewx%&HEa}RqyUo z=RMS{Gd4B3{#{oyyROnOdAlFmu#;(pe4$kxz2zljoUQoRYK#W)gc|1#p48^_m3xp3 z*MAHLR8U0%BmV|fB4`ZU)NsM*zub{*03m* z7OIMCu~h9uJ{S9L?dfO=L3$d!d*2@C4N*m`9BAauV>p8fqP`<%{;@-Li6+=MIuZJu zYE-PxA*M)urJV`~rWc@qlr0|}5f?ssF|X#LPQ=r`_G1HJHJyQ>)u3Q?aNp&25*Ok9 z?Xu@NPdX~%t1e>_CZutPSjN})JLBEj|5SA1{P_N9G0xarWU?>FWb&4w+$?cnyB`PY zJ^rSJN;o6LO9`)B>z4!{)U<)H^Eq1cCuL`~OEPJQ_mpeQ=YPs!mT0I^R1Jp9<)V_f zU=?#SF$mK>)EeWnQ{~I+wI70m$-|}JMdui+D{r)OjFB#$+MP}LXqViBvFY}&CkRX# zHFSdT(ZpkiVUUr#`s%`%^}Y=3W8oGd6)r`Id!uUolCz+s7qBO#b6D?y{q2G48|z)K z(9pL{8rY9*pPD@P$AncJoPtP3p@C7IVayZQ|j3)z?- zs66_#yul-6>{?YsUXi-iNk8%8+(7~qVvOm>=S8nyb4+52B}Vrnegxr($>*3)wV-Rx zdisVw%)%}n`K!gq%ZvB#%B^wMiFiFJ4o@zvZjGbP>-Kd9#vZbr7eaciFxmcsNld`b z#P^-QNgEe6d(qy_kqNYSo`PpxvNO?8ZXAObWQnfSWWujNY5&`vB@oy#%i<$qm>>H5 z^NLm9;F_>vS#|I6sHZq(4e5F`A>$kj6f$3pz1^`z_~4zRGl0mxxf0pdnuP6pG7-9rqGH;AT#9{*GgabwpRZS^uR6@VUBN+4?%J7XQl;w+ zQ)&4^2-`p^$F0fTASJw(gAKYcXwTZ45qv`KFn<{~W$LF2*<_O=tAsG<{)XQ^yy*x7!l3MkKF#K^PCUj4l* zG9${|uoY68GJ2G705n39xOoemFG;yZm)1pG+Be?3{Zs|2kw7~ z^$Jr&hzJ6$>IwmxL5$I4pXXQ>XiKSu?xk;r*zYr^hz;F1`1F_apT1|bhKM`TE80mq z9D4qAc;e4N)XCAmbrEk9Z~N=By*6SL{?&k0w2_x*W`BF!WHGT#9*Uj&GkZ3}3?w56 zko+fo?qxIY);lJ~ZRR>O^W$PU5U);W7r*-$qPAx{CtUy5Ub{I^i9Lso5!22dL@zjQ zCKvO5iaWsdne5v|^)qU3i49$MNpTvsRgH7Xyo+YIC$C_*BcRvwHumZq_9QOy^g-#L zw-94wAm^U|05Ax3Mi(t+{abIp#vx8FNkJ1!8@j)5n8e(`dN7oWh>4jK6BF|(L)r`Y zMgB~jia(mlGtaxVr(URr%ApYA&NSmC(s&KtQ;sr$=S3C;o?S+cUpu~iP{EsU^Z1&m zA%Mmyv6<{`dPxrqql!c%RTV=zAJ5d6^Dpj92-}hezs9@nU9Ag_9aVXPQM!BZuw&+l zr*)6ry+u>J7fSJkIjKT7D2gY^@Q;h15fKqGXb7X(d96qkZ{pD-T-wR!WWou@8P^;A zYMU`e7oknj->NO3EpA)q7Mjwc1qZ7ed7WVSFshoc+hFXxl z(`F7V7c3`gdiI)VKg_v3SFPUsIK2zXG5RwHng$v;Y4{?67g~esD<3(W@05L;aI}!$ z1iE6Dg~dPJ+Dj21FZR`hTSie`Z$kPnvOX<3Zfc(8Rch&5Gs^Km_M|wOsHytR5OmjK zb*<&>YGTtvvKQk<(f9Jl1qDi%k&9y^?1Atpem4$cq2W28<-84ba`da?O>5CaxEC77 zJ4P9}8yFCN!8)%`Fr$Ob*ydVdv7QtDOrnW{`cfMpw+I*>sq%C2HCe?sct?P}CI!ph zPvw1^p?l3=QFvWLoED?lut-ynRR^H^!FPY!Qib7(S2DY!GIcl$y4SaVgs;67$No;R zIDJ?WZyR%StMJ6c1r81lw~BW99rNPgNM+@$^Xh(+h7R;kTUMsO%fz7^*YYC}A^CvP zniR9|uY;hNq0_7c8J6bKn-DsrfJ#$3&|IPLf?BcS7t{&WF`d320%2zmThxZE-|mN5 zeZ6+FS8tV68wQi5yz;G6@Yj)HD>+v`$7%jF-np-ktpo>3tlFo>_!NOfH)(xj6>}4! z1cNI-SPy{i%Ru|1TTJ#xdcAW(=$x6e=;{D=@z>V0hVEq)ofdz)XF%Dy z74~scYY=^?=>>jT&}Y1{@VcSgf^JQ0>YUr2dWevev*CcKMtbZfn7{Sd; z{!mmus;|4RXlLU3m2Pfs9PD}*-S6EK{*>yi5W2vEc}YH#mKF}hXIr@a&41%wizW$g z*&L>4EAqME%7WQ&5crGp-9t|cp8;3at596Wtk-7Nn%!FjoJrbwbffkXOg^79Y~*L` zWollo#@2Ndsf;Z%o9nORiWIaJqH9F(ho8Pn2*=i%rS9E9xmSaUsp%US)9;$RYU-lB zwiv^H+8s&d3pBfPwK}D-gRl1{Uhfyk_f59RCU25oILxU5Ct*LF;A+eJzm;>sO{K(N zzJGruDJiKDan&wuiL( z*IU9FB6#6h=D5lF8FM|X!Xzf?sn2hqL5}v8c=c`E_263)YjaPiH$?}%EVQbJ zOUAhU;-wcmo^F)$e>y=FGvaLU8}Sz>bAF%Ds7V*m7;RtXOUJv#9Hvqf_;9}fHISEH z7OI6SfHDFv<9O!8y6I_!={<)<*dCA?)`*X@{B`_4$4*PAH_n4{ zIQih>2y|GB5v(ghYz>558q!1YZfX&c4XP50_5jIKL2~03=wU=x%)Q<%<6*#IF9Y0Z z4`7rus@_wO+Zo(4ELIa&M@a3(jx0pu`h__6n%P$uh`3{Vkqzo(Hgc-Bw!JfRK;d5k z+O~wfuwf(vO&71BOi#d~r{tBMQ-nSnXVMSv@3=3NCbJ>!-q>2!k#S#ls(3Sm%gOUb zo5_mn9#5Tmn5BF|`=M*JF~+Fmo!2`(B%C1H!u4ds%@l4TI5`*hsK5rpslQ6Ie>zzJ z2D9)edaq0M$x<6=RWMU3Tw$qIpVMwF*q@=CliH(0U$FQ$ZX{iB^omyz9<&Q={|n$b zSf>t`1HZ5e)E5Vwanjp~v{2%y1^~YePe%nJuLCzb6rj3Z2L?<5SjR`mMA=45M%{?@ ztVM0z@7^9|z6L=k#>lja;p5Q+%sloylZ|3@*@kj8KvoK{df(tn8{t?oxwpTa>K+p0 ztPWD0muNBvfJ=?D8hcV-XzKoqwrgFjftf+j5-h-hV*lgxLRQmRfC9a>Ui~)jKYW-R+rEMJsO=bP%Emd}1(&gzMLQS31i)V>;?>JI zF`<$WLu{En?|0m^#wLFOdE7Z5dQ0e-L{&ivVl7=X&a!KeQ2mJu-<5V#!}cReD5P;< zFBZbLmW^}zRp*}+9K65tc7^p@H)%jeI~*Mg9Z|TdBfC#Qf|=l4lbZ%#AY&X*1XRGB zCIb3f*PajM!GK!kqExcK?)7?u_Cs7@i??@?o%@@sWf!-dQJaC*yA_l#xp*EsI0}(B zA38W^EdE;LXu8OZblSJPq_r>A_jP3*TmgjW=oMHj!oy;|9U@SDjTJg+2#9GzW_{C4 zA#Q=V*+#^Sb!XWeXztb6uW?%bXndzkMsO$t6Sh7%xuDFzD|dBYAFY8{>>2vMk?lO$ zx_hKMv#9Estt*iFtUfyq?+{!_9_X_mDv_AIZU#hU+kqsFP}8PctQ_=lTvf?v@$34! zQu7L6`G~|sm+N+Q_Oi(uX{IPM1`@x+xuDjg$<3VR0)qsJKvqVD6V77_)>IA>JgE#y zSsh^(i@*zo=C+r~`nRf&Pe1dVSeywA#xVMa4|8ovDil`cgV=VRfe0tosyWc6Nm)K} z=iHiB)H`tlN*3)yD!aP^0s<(7n;_m!>CfApiKivil#tIGV+hP;ymS3q_cmxSdF+|B z%mNR!i_sA2#Y(XOK73Y;o0S?Rs$DB5LtxFfX+x@7Yk_#;PJm^D%fOy93_SBBH zlTG&YLN4^fsoo=`yIe?YY7oe$aIA+qaHklSb4wmQcxdu+__$eW*p~DOKLYNj$&hbA z`(avZQx0?UzpS6nsd}1$Ziz=`UVZIw&eTmnVMOR+wrBg0@)FH=+uKVT>dp0VRIu1M zsW86ph#8(GEtF%L_%N-pFBxp(>!hfTbrc+Xf85l30XjK|us)=g2WLce7#Vq$HwgIv z(YjtjM>gdE13%UcTm<}Kc};CGIKLn>j7qc<8Cud{+!x#JBOvFWJIN;-Ipgk({<0FW zxlMQo@bcW&8aL1#cW^1bIZJdYx1_Ofv&NaPb>X&A6ro%gv2!>LY@l(5yX*Bs1Kl&z z()vdpEiyOtkmv-UmrrZG4TFZWsR<@D=PQ$R*asK6vF?A`XPVI-<7f>dNvd9^<~2-c zzJKo?i+#N#p?k9nfW%jUw=(JNsN36EP!C?)~kvp3{zLcUb&sxZXTrAwSGj{;((iYG~Usl7% zoH8b?f56FDYXmBr_355C2w(CWIEDNBVR)yBwgWP#D>`uSH1}P!F}b)v>47M*>M}61 zOc=C!VOiE4f>ZmtZ$j!pu?W=J`nRI7e>DNBUSKt0!zl0in2hD>95ZRrn^$_x4Mh!} zeIXl5@TRDHs;eQhzk#5Hi8#8p>1~=z;kh)JmjK&0G5O3etPrgK-bNeEJctA+YrBD> z3RZR|zD;!FNW_$Yqe=q3c13>h?s!p%1f?!od${0OsZ*5LXPVT;ph_&ihhBT+l&VqniipPOxd%yGwqv$qiIw?0XE5j&DzhA znwesfRH?|M`Lc8Z!K42Q#2YuDV4et1V_MF1iXx39;(JoXYJD#e@~u1&@47^ z&N(Hmz_J2cwHj)f0(f~`+n-i71ti=$iV<8q5wiChF|ST-JgW0BpR=xe9l&zHETHcl9FQoc=MIg{L+QI3s1(C)e`iKFMub=26SdiuO*Q*_!X1l}##;#NZ( z#4$0cU}RYm0^g$p^)=sMZ%Xbfwe0Hf8t9;aoC1)v4W6RWy9?{JAD|V{aUXJh3bX(! zK2#LW!*vJ2n25bdN|-P^&W7xK&~#?oWH*^J`*}TYaWt-?#WDDkq=u}Aa4#&%Y>;rW zRK`b}Dg;t8hMHY-lbytYZEuB~2U?^C=D`ZVZpPZ6OW00HyUmf(ZL{q_BV)njvgTu4 z^66e>=Itf+rSbF(bFNUtbiJnikj+(AE|%ma9aBy)Og`@_vKB^YoO8p%+l~%3Y&zg> z*b^;yMeZQGUR(wHwPxzDwFytpwUEF0kA{-D(46HXYnvRmF{+5b6oDqnp22CVLa3d) zVpIc$klcfUacAOAhWsiGs}qX(n_f|T6#?smKczR`3c2%{8(j_^@lN(w_R3hB3kr6v zjqPr3BIX9bEL5lS31dRAo^InYI>k(Z**)-X^v4$;xbEz-P}~gl$RuF}abY!Bw2T@` z5BkFr{!RP+mzxHg0trF^u_@v6WKH*fJ~8u4e70yqf;&p5vy)X6K$7UdDD^dwqw`Li zMiuER1R^aZXyKz%_9&)8zVBG>ms`YCFvhint>pnPx8dG2AH-<1UFg_HuGGArtno&& z;;rbTvw=K={aRv}!uV5c4y7M02RBAi*+v@{l?BUq!8y}HQ&c_JItqcx*0~q+F`I@x zsp-Q%wa5HnehnQZZEG$o$zW9L21rzkpPNS&rB=65IXHjpf4F@cdf9<_c;CQGa5*mrnuUoHqre;?Ts?r%q8lP|c zdH1O|c%Pqo=Awe<|9C548*o^F23K)hMAMMXE3;9mrX~j9JhDQM2BsoyE}rz4g1^y{ zydD^+!DTC|Xib3uz6+!?QfL4V+hkKrVixwR#CxLDJ zA8&^h9s<-egh%?g*mp~VU>4P%ocY779{}Wi0Z*{D6YJdnjL_fVS6O{<4jmr=AWJ01 zmx=QcVJ=9;yi8*|*-o4z4jdP;)wFl)b@{jZ7lT*uzZO{4d)yBvdeVUyvGn4B#W=tT z_NH{;z9=rly#sx}gvpP>3Ig_p`jMzYI^3aPb=a2y<$!f>XA7HQoQ@sNt8vD*=HVV| zt_BbRiB;#?U|OE~&Xr#7l|#Iw`ixau@tI#DsQpk3Z~u#;F&-%D0v4OWDfyWA-Uh|y zdTL@hEirFvj5`V>%mB|vYhICc3L1p2+$~r%n|MdUJ{i6)s)$!eQFV%bnm5+W_a`m< zPm601+s24EzY=T-PBkj`9T<4@WLMmEjniqs%2fKzywW4Ai{k78^J)}csBt5-Cumdz zHaK5~qm=p&0QY}8Fo4h5!0`7f3q&_gUsleqFg4T&916qaG6r-HJ86Fn$4$LSe)9iPqy5tJ&k9|Wn7 zRIdexNVF7`)+L>cSC9QV{*n@>+HdgZL-;@N@p$0d(ZD4@FyxkicVk5j+y4OYZ}1*U z*AOpbfo!fpPxJ!^X;&l~YE`F|TVI{^oKH@kmKOL-iw8V1BmVlokDD)Vh3wY>vu}hE zh~LtWTs#JG0faL~$9zFdUYD>_wzFOE!PO`;g_-PN&+fVvXOL(iuut=q_4OYNAPzBGdX?ZF@Wy99^^!BK-!UW5}AHrphU zHRe+hTub~OT4{r=iu9a3oc8a*_hY~GLSI{eIUI!P-$7IaicVP!%FmU;M;~pkr7zTC z;I%0}|2{pxh5Cr*)@pR5+)GRZa0&Da2s%h-2q9;%r@A*kG~4}TTnZelIV^1CtEh)= z84`R4=?gLq6j?wyX0oT(Lhqntk>&lO$BAwvdO7oD-!|{)TRC`R*!&+rVqJj-xf+rf zXhC*~^kU^BMMYJ#Iz(d$8k%#^B*78$Z1zE-xZ|Pbni&Ww9BA4H0(@hY@7}9=#CWGKoHQ^Gfb5UH;ee+h+-Px~}&n838!s5uigg?PVN9kce6Bh+m2hPYZW|(F8qA{xZx8uUB&FowANK~sOAo;x7agXD_y^MZ{}5gz zlJs~<6REXA;yl}6D2N{*->t2dzYUH^L&Y@VM{psnaeGRlq|m zHbVbe+Li=vRvsCffS7p`Z?>eG^5$Exh zc-%Oh@T?i=p&l>0y$#y~QV73k72OnOftW#YdzmCDxD?VmKu{(ZLgsQ7E4duZPa@?9 ze=Hs({daunsbW*TU`G4H3ag@X&Gm65MMAznC^+Fz0KhkD8LW360UOO`6oNd6$->kI zAj#lQ{HLh@j`wU!zHbz@VAzq3Qo^yOeThXdn}E8DS@BJ2ZQPdr`4hPh-}+wm2ST~?0NO=m1gpM?{Fp1g6&K`oTPfuO4Xyp_aLvBiN^2ukTUMsq` z=u-?5|KZ!RWRvet!vox1#A4rTxAVeo(XCF@@$G5?os$Nc}x=06MWKfPzH zfB&AL54iumHxWCfqYEfOsbw#sQ$Ch=`q0@sx6Hh@SDz+axIBfaL!ehzpx3)cYtt*{BxyK6YalILCXEH&?Xl*@Q^j*3m-tf1W4BPRPErsSv=eJ%ewk-n$*V_FaB(N zp z`(IXFJeV`Nt|l;fa{?Tp)-{#?XMg>&l(QDrA}@LWrz?^!9Eau(=gkH`@7yoHPirhJ zD$5s{<3M}X2Cq36w|>$$0t9F{?P552@Xy$b{<1S|O@KpxS6sN`b@;Bra^o`vI|qv& z?=^(X^MBSfpc8nVZeFTa(Klqbd0_M&%;_LD3T;kt%9uly2{^RrYtl#lpAcwF{$=Nd z?m+K@B7jX38+#!1m(^XJ@7GYL-J7zqjDk{-^=}Zd98&&yu)BYGuv!L_VHNFr2M^!H zv<&Wdcd|FxEExQ~SzmsM2;MjLL8o;SHckzv(XlXRE{4$r)&d6IPD! z6mlIc>2iOKi1SmBrs9A&c#Tq4G?%30?J2`7kSVmhy$iJ0CJF= zhY*H_6vy!=oW#;ViM)Vs$!j2fF`Iu5BmN6sV_}rolGQ7HIicgmN%qEshS14LTvU0c z7x9iFQX1;?<>(xWY((px=>vEE9b1gN`%e<)eM1G$l$7RB_dGVa1J}SBfSX=z0Df-i zI+LB5alFODMcYWR4BX%WtMCixZdwMrk}HqV*$0APO7Lb}1{!-9AqwKn3o0sDM2wZS zoY`MZigV;IDG@s6d62G#P#y!(`hEx;7Znmi2#etY$N;XP6S9V?Ef~x=!$z%kx>VOH zNa{o8n6)1x;Dg=}uv(jU3D(zwH$$&5e(sha;(9^lufx>)RzT`slB>?-A=A1%cAfg} zJaL{Y1z-!8Ii?Tz4*`h#Xh@?Qm>r~$x2MWaWtaXa(_HY$*E-T_m#86gaYp*LFI6&D z84WRcl+mt+4il)rb2Mzced53GJ9yK4qT}h;(Y^8ANzH5}ff*pn26C}8+WMNjG)Z=I zLr0|ygzaVY^nRgRqKOmj3cd}iprnG9KGwZ)n^Yc#5$%CoZ2#^E{zYmjljw){clBRQ-gld`XY13m9<^IHp+&UTw>l-Du`tMlI;WjQpj)kP z0g)n73?wZO0)dEUJuOckMJK3;Lcmf?l{W=rB!SQsOF@Jvk+&kyDnwq51TcgIvg;Eh z2K^W0^cN9J?)(1SpO@=%UDtO*D?Htq@U!Aq!%j9G1AlyJbi_rq*mm}ef5xdbc13M zN5l>wRk=P|@tTm$fg zCQ5EA1p*lD$I_M2;-9^=3wI#0y6Z z)hvP~)12K6ZrJqDzwm{r`kB$8c0siF=2X`OxdDNcOlMiAS2UEN?_>no3rCQ>7jM|4P~h5A7}w6tTQ!4MDFuu>uVrHsNK zCwqig*f#h`y`v43)Nmo@G-6vg1{w#*kTBbL_W|wQ$HeRVzDd&wlJtUjXcq4FmBB~C z8%VeRTD)4)k6|pCfR5mpZ;@al_NnWG3^#T(D;5+1;M2yd5~4uu*Zh0qT!1)(qb5`< z-Qgh9Tg2;;yfR5-E$Xvse`6nTY0H?kMAu)K)oVG1!0FzJ%FmT8f~zu_we*DMul_#; z&;;Ut2!w!Ax-@5|-fV8C!_$Q7jtbl;OyO7GvpY=%J>taE#bCWK9dvv(my1xZe}p z#joDqkeNYzZ_|=^g&9eQsayB@M9%$mI5h~4|EKoDC2I~T1qx-@oaWU}9 z)^Ck)$2^PUhAf}1IpHxl_*{4>UHgZ=_#spVDk#p>eHm!smXM6b&$I8F6s0*(E8Ib} zr6*L-!tL%8MJdCW!Bw=`P98?%kO)lqh8{ayEbsQFMG@ja>=0U>&s8C}kVr)PSY_=S zB0Ovx!g}fS18BlwAgD^U=zsJzM!aDJE5&!A3#;PLJm`8{ zlFI6IIE0FjPXH-SVrrD8NF$T0Rf05*mWy>{o|ES~6!g~K1eOko`)w!>{nI;S3_8zU zhsu7TYMiJZp>@XXGVsAPG|VIqqpwhRz6B2Hn2rW_=3c;YOQkm^O^I;OzxY`Qo!hqS$|wIw9tJE$rwy^ANT;4vy$JLXk_kB` zc43Qf8kh~=VG%G5Kh(=PO5Vl_IEktNHp)$%ONm4|1b2zGHt(Q6xg}Ch`Bw#X)W3x?i0^3 z{OVo{m-fjsyhIM>e}nX>q@fiHSI@vx5F`mgZ-2dM;7A1x2EfPVUtdq5eJKA8Z#+%K zJ7#nStQ6i&-DM!+F%ARhy@~BSN686s0`MM=@~cZQgD1v!Xvdu1h{2DcG6>-k+yI7X zV{oF#>`vIFc}N8+(8G8NJY24MC+S0xn}$W=DVnm8_Tm>(ErLMZ3qO2Sa){i35rn9p^F`I^}byKvT`G2$pS(qJ6~V}RIw+0vMde{cWKKNT7VqTmXTSK@AIgOM}} zFoDiv9~iocoZAvkM;;G8euJ82yh*EY?#-y@eIjQPbcaic6{=_j7@p{pcnXixm6oRI z9eVJFu$d4z1cBRzgDd5Xx|WQSt9HaX*wnYHBHN_p>o}dXPGZp@C~Uy^$a%V)tl@>_Kr zj%Jnp-3fm31M8GZ&$NB8l0JJfcU27k=$P{(&U`S)erje|OGxWh+pDW*fqu*icsUj^ zS$O%*AkB+RtU>r$_{zPjpczZmap~+*A@b&A6%#0 zSDWhm&mSjr1;q_ZX>B1HhI1fyoA24L^ON=r7`IxlKQNE8=-5+agMAxgO4RL5_DI{n zc))g)sr6i6$fc&Qn(U#~<6vT+326)5@t_wne+dqpHQhIW3)r4##kmeSeOPCM)7i;y z%nXq!3V7WxAsof@_~b=l`LK&&ijt=uhd3DFF~EM6%3hKFD2CTE_4w6mZx$t)r?RSd zVE>qBMN|$srQ3kR5$i(cO&|3G%@H>)7>^aE^m(j(QyLnRHYC z&LHtvQ`zD$+KQ)K4F9lvw9zL}*oExonc}kZQjX7vH3V4<+xRFt0$k~Exg@{hmpQ!N zbzVU6xy8YF6XH=4Sx58`SfGP53bn*Hlnh&l+MM>Cn63sJjf9W*-6sZ0y2hGfVgqJ9 zWDkV68Yyy)S0#RMA7-VFowvUvAYa0F0boJfI9anRelMECx%bhqL5CWkrL%?oiC`XF zo}Vs@{AvM#K?@TGZwH%D4F^WUtXp5FWfo1`6VX~(xjrV5B1C+F;@4)Pt95wR z%MgZ_!7--jgSR6dwK)gl+On#7KqOGgC^xCc*>H?NmsQU@7%dsb1RV0Y_NSfTTJq#Q zoglZLSC1ne+MF#62>KO=oDSOtfB-_n<7922ze}4HYIc$~IQF7e4Xrzf8%Hh`sNNQs z2}@{!aKGr4#_a+Kb_H)}8$w}5sAmB-^1DxKa1cOb-PQsfyI|H+qI2ldGuu+>7eaU- zt%@rPXh5lKaV}fom)RO_!eCa5XhJ&xq9X$`SEp*iLY_1;UkVW|Bk*XrT&ML-1-l=- z;dr0qVGx?mv+cW{#@?5o)Ui}{{^v6wMH zG1!Kxqgbs4Ttw(IU${Rcy>AS|KP*fVF;JB^#DR=MfFo&l+P+O_A#KJ%?E2C>%jBet zMnMQl9wxGIe0zK6DRgmswiVBUENa#i*%6Fp^2VAU7pVKppthDt>jLVa?L3LKY;Ym~ zGXrE(EsQ#-R7UV*OVxP?uyCtQ=o}HJD2ffr!-&>T7Fv}+l~6Hf~{9`sJ_rb zgeQTRL((r_*z5d3Bkz!ZV}+(>Qx1IhK^seBgy;Hb^i189w^I}CAqEhpUx7Q67lwtA z&0Qi)0->NL47u*fZ_-JNMb4|X7qs>&f_U^S40qr zNqfi(53j|7ADtvM_cY?WaWZxznCz90zQFE~lcsHT;3c%TM**T12sJQA35Ad7UGVOT z+xvG?ML`Rn30jDOMvSK)V=v2g6}Bz|45vcDN;YsmFJqfZgV_5FgUsfRN)!r6q=IT` z1|1VmHl{02pcCTVnapQ_HV_SsVT@1%7v1_WZ)4kPct7WAgV1Vw0jJP_@78+f_wKba z@aRc2pCz_P=z*9Jzds(Q^vGB1DGDtUJQnG?JQvYQLXp!utl<=4+rhm1nkNCE@OX$3 z0x{CSiLQaj7Ie%m0h*&GG|o{_V`*r%hPh((ShFi;WlGB-H}+tcT|*xRXl$(TA&y&7 zQ8hy}^KMmu`bAF;2ceIW%%1bZJannPD+QmA0Yw7ZXC~gSK*&XsFo1_5mQ@Tz8t{}( zkfJ5meu9*+h9qdLdyuRSfaEXhA*mZ=B*%gKMK(NTh@9CLZoBy`D17hOLIcD{r}c>T zco?R5t~Wwx_`JnE zKer)f)!>?<$d-eOOv4-G>)8No{)C=d`m*&6jdiujoA0K<8hUThwR`_hToQu!g(?>y zV!#0EL3?#yrG;)V3Z5JiEELZ2RsB-t&YAw~O zY|N+5XB5Ewz-ERT^WJ=m=G<~wG4r0j@?e@@!t?GS^`QSjT^bSL1BH5g>=F83QF3Vn zZ!1qwxd+Kf38+EC2{P^mTfJ$SK#6|FLRi3?ROIupeH&_)0Yh6Bj?!s~2eqba=+M8hYFmB|Tw_oDJhX z6X8Gj<@+pm2s&s~A&Vsd+%m`kd81CGXoq+?yV9tr^nt8Ie(r`Z=`KvbtD?Qk1{Ezk zZ;*PNSEYw%BE!zkt)2*51i%uQ z&O{4Mno^n{3FyYqbsoIIJ~X?e#)R&Pgm$> zC5IU?^O2;UcHlU$sbkyWUpxtUBuQF4psEpK!@;6Y1$VL%OekRayoF4K3K?$VQC$V% zaF#TG#E*qv6cb2h;iUCa4msPhZ*s!vZM67qJQ>aUt5hTyvk{3bp>>YU# zB+uQE7eVseEqReip1mc{1j&>+_Tf(YnZ5t699tLrhriGKyz63H`QyxYPrqub1)2Ho y)|YLhPBY)MTmEYpCOr4`n$7Cd`j>~kvJ1Pl{y_6fN((Q1%O=0S75#PZzy1rC%R68I literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/roberta.png b/docs/assets/images/mii/roberta.png new file mode 100644 index 0000000000000000000000000000000000000000..e4dca962c362e601dbadd4c0dc21d1fb29504720 GIT binary patch literal 641018 zcmeEviC>Os8+OJ!3??L5(n6M0XizF`j7W=8+E)pwM;k?2%?N3w4Mm~SzG%@xWzu3T z(n6)C9;98`_wP8Lnek3D-}e`MZ@+ne6I1td-`9Pe=Xo5*d7RgKX}_8x`&{n1Y;0`o zN_!PF+1NO}*x2Uf{4k6D=8a|r{w3wGThHNu?NNu*W+#rY?K5+*J8tW6-15-MQ%6pm zw6wL^x=DP~)(tCt4aRw5U;FyZ>oP3|WS{A%qlg>c^jjgZ zM_*sxE#P0jkXF5Z<3_))U%!s>nTM}jIsa^O`KfyAr5qg0QoqDjrN8d#+rRJ6-*s_* zau3J-dp*dJT@PbmFJCpt4zOefWZmr;{+DKIb)2_Wd8 zBz`;20#SV>GLj$H)6mmf_A*S|r@?6RL9G|p{}JhizYKKLmQ}{6n0M7>ad5njRth>+ z8LgCg{M{C0i`CN7(&_2x0wN-b5rW^dv7NQvg)0VMy;}C^Ww`m9yZi20zPbB0$KKp^ zytk~kw~SvAe;0)m^g73$Im#FE9j9OL)-S>$B0k^GTT*;? zU-%v42d6Z&v=-y(2V|Gv2QMqw*w`%Q;@bT@bfbBRH`hIfw#v6@X1nmO-FtaBIIb8s z(Q5zwt9s~+q>+-6QfJ!Z`L7eSMVjsKUtgU)eB{XPM4jYA7kN*4VX5`f4(<#N38@~^ zJIwY^n)Hfoqkj6DZPs;*jq@BX{rc;#qkKe-;%?*H&sgf?b(un;8}v1sn3re#^PTRP znc+S$x_;9p|NGYUb1&lN&L;2JxpOD-`ZjMLAM=UPfdSe11FHYzU}8V1T;1fi$B%or zhK7b)z<2n(=)L$>%&KaA=q6)>rl|cD|NbmZ0|RbGZHk{xvcYa09i2b6Ui4mki#02$ z%VI=jW@Z+Y-R7I)HGP@R&N@8MUd_RQSEpAIM9UXnlcb;FHT-o6)!(;v=x&h8b7)&6>)h{UeEPk3 zdd86;dW3IZRi$C#j63*^E8!jL=dp(mGJ?XxSL_QDyM)W_VLkIM*ivM^keVJ~h5)GyZs1%(5y@qtcc8 zzWaAAY?_@x>T)9jKXkv8X!4b4a@~se)!~w4aHh9pDf3gft$%i-bE1x0O|s!q(ZcZ) z568bgx5s^!MYwb?50Lq~*Zb~&{{C5SM?(;2w5g9g4;_#t;o|bTk&zJ%BO{)VA3t6R z3DIt1?q7hHT2(3C^N^Eso$9c_Vwy~)V;jFCm82S3qvTfeq zFQ1kr{6~R*Qc;oGqrrNbrxg_eeQt ze){%x_QwMzCVU(m6tMsNy%QZS$Oq5mCwl{OM%xobChT6NA9*f0{QEicCbuC!<5Ta} zZa>wzY+v{`KO{-LEGz9KgDhdWkq<%F#2Gp-!o)?ecuRbiY)puXoVJ+`cC2HirTZoY zLr73(MP;VO`|Mwe*REl|*n!t`!((#9{`2jNd)(dK&od0Nthka5vb4+sb8! z5F%ZSC7Xs)-M_2yF1hhKhH zz4p`XZL6mTB3AN|jv_wG+p+U)FT>0(-+92hsHiA4`OL=<-FC}7?~u^Y(zREO79q}) z(@gg7Ux;nU;Bo!(o6tK^5fPDD?A(jb&0<$plK=Kn{`%&v!4FQZj#;f@JkWh zeSi0RX>V`uc9&?Y2Sbf6%z;|d)U?o`AkM<6i5}*ybMq7GZ~yh{-~N4Y#Twd1{!*W= zjq=^rK}KJKAap4jJ-^gsJb}Ci0m6(uYvGxN4jS+PPY9oK)jgpB5sl zPpIF7{9B*a($eCD=v`DJn%`gfVV?Wfew+RE`G9?Qi2m$?q+0dOo)QE6hAne+NYg6s&ZHtX84Yw^=J~iGd zzgpnVfmk*BuTKTbgkoEF=>Y)gFJ8RZwYMo2G5`4Df&fNJ?m9C`qb-PkZ+xKCh55YPl>ez{A3OT5 zwR*ecL(LEyTUGu7`SG93lC=|cOvjss8cwSF@_Y`3vzyBIcE|R`NYp3$@%9HBPF5Ui z$g!V$H)hiYy|k4V*jMP7(Qfo<%y+@l0S<3R@V%Y25wy)L87PZ`qxYix_^L_d-Wb;& zp#yh|u_t-x4(UC8oS43D!+A)HCCGn=P^RzU4zA&@mu4ZWFO#%8_#?lZ3_fuQVJb^M zp_Ba$q`-TJp9TsSPD_DjN3+lLeH!KaCHcn5r0$U5;Nsz4CbMXssPE(X(ibu3^uF4)U^h1YOZSD--hI2)sZ*z@ZbUe@$LkfkyA?Tt6#GX7$Zba@WL+pk zM0cH0_|>aRn?`H0?_g*2c4kzR0>=e)dpucnQK7Z`ra92WJ>ZV$zWF?oKRFL}F0YWE za(xjYE8SLC;5N3W(0$_2vtS{-1K(%{3IX4_*$e#jGme(b6McBu!TRbZ<3m8D_w1UB zmPwyFUdfgT@K<_8k#G6)tD*s|5iXmQm6aEzSYspcI96$B0E0v+1~dx$D`E>W5z{Jl z=lTtD?97Z#JYC!SAxORS$(7YnxYGgIMW*)-m3TW1RO^Z)vGZ=v^pC|xsZ`p*^Jv>K zVD&i+mCz0GUj|I_ol8VLMxMVl50Dj3vaqlyn=jqD&?4f@(HnSKy&h9T9-B{mcoJPW z>;_!b;xFCpHE&bSEob>1Yo^v#EAqQ7ri_tfn4MDRHf<+vc~|PU!W=+eLq}%`%AsR{ zhnkw2%s|zF-rr{PuQ7U)Y57L7JBKMCC@9!nFxIVC;CiMg3z4(=P_fr~VPWsF?gyfB z<6js>N@1NJr+35Q!%w*v?Y`O8lZ=inxzPPUd0 zY?;|4<~}|=H#GpES1LEw$*jA*=h^e;oHcS&FJ|oMT03Uc>aMc;*|TSduq5|@^qt2{ z_FutHV{yFA_vcVO*||c(CaGrh+yXht$uAe>6#zp*nzp(Qe|gJD&2M?JwZeC|&yux9 zr)!N*mw16O)RMwC6Xls?vzL0R77hW#EQ z2$ZLV&tI|5et*h-YGO!N$oDvcG5*A|cS!8UAMJ8h?Yf{P#nWBiG|BO9dV1Ucze%W#C78A%=uoZ1!Z_e?M?#sXFdJy61dcdhu`hJ z0^;SFRavPyWLRJZAxZ=ErKfPp-7Vm&c=$GJZ_uCWp=I^|VO7V&LfwY#O@$r}V2@X> zUe)b#+kU#6$75=&iFwW3Uujr{Nhz``yNOSt!ERTUa{v&XnKkm82FjzX%V3g@`rk9f zS=DSj;P98n3!*fl_Gs+izrTs;x^LHyt2yTnU00Rj1CO0J#3u(M)oCK_7$>23ZPk8g92IGY#NpY396^0okBJ_+_3eR z3#$bWEsp_iP$Hd2g$vkzQ(0@^9; z>K5rXc!^e}R}AGg4;1(9EOD!4Kol$)>4Ei6bu4LTbMR_lafK z?Q&j7^KCu)VsBn()pLM%k(3+(e`KTR4!P0KQOmX-Rps4R5{l?IFY=tjRdeCT<(jP8 z>bR4#ZyX9_NQ%^l%eyp33zz(K?A5JfUhTlL$KpxMozCy= z9BV)DHRdu%fn{2Pmga%B9bBP>ueLA*dyGRxXJ_YIhjR$>o6Y0oB84T{3hFN!5iQ6jVRH|;8`%A5} zbo=-$go;4b%b#5L-8q|sieA&Z2Cu^S=^wDQt3Er`VPCe*d(8sdBeL>X)PG%=7YzPN zP*8^Ug)4I|j;Y?|Sh99qf?T z56|>{u8KdW>(=UPB)ig8Oh+=rDGClU+tZL`5B=L@5Z zk`U{Ykt-cKYBr7zcC8re$~JZAe0#6lVk0O$Ze?R5h$@Q&U*9UIlVY@-^g$$E1<-7u zWCf&7Rv`eY-(BX<8-)aQ4_s?cq@3Fw$ViTILp3Q$NgF74aSE#WqPmvN=678vqMPFD z*il0;N{msn0r7b8GvMlv%YIrpU-I{}2JeF^Z~v+J09yDAHF?Mr!=(?WUqjueU!zh> zT&@%0DP9+~%9z2W4|V1tC_dHik_SBnD+si&RS7LdY1IVoQpeweHl+omZgv!p1zRq_U ziY^;x2FR<8P6|di32Z7H|2mh^8tDPZQ6@iG+OgH5+;^;}P`=ckcLky#(V zW40l{*VN=tAro*R08~l}jD{0}#wlxB97r};oxc2T)n#mm zH<0*c0A*!2ZZ)By73<}L?7pTU3iQp?#k#rZ{AT1$CtxRYEDKjbPoGC31q{&bLSp(3 zU3KE+GJP*3dy6?&OJsKoY&3D{%F2O&K&g?1M>^yd%p65=HK#K2IVxa}f9> zKfb7lEoWeX_j#I09Eo6@9PnN=Lq1ZXC1YhI_>o(3BM2HLIVPaK9}tUznVDIoJFyBG zXhPZ%1O0E*Q@_@wDm&!Y#?;$9*f@z*)6si?x=9#%td2oD6&wAh?!u`_y*vlY^F1O6 zC=$p6*Qy+%J9{*9fdZstW^|C6_sMc8L4(X=SJFq8ZZrt~=|AbFh3*8OJeUKNUM8{L zWl#k5+j6-$HXnbRW;MH5Jepyx0`abwiV7b#-!;f-MJ|R4R4Kpl*XMrg=L2_q;PWzYmEEO~!8%g4byxD|uG;EL$W^oU! zfyIx&E1+U08dT^;4%O#rzIQQJC?<-O@}S$h?MsW=JzK}B8wne(=cKR94ogU~O&2}; z_j1H`_ySRn#ChK;MkGX|b;*L#qKO??(n~9Ba#l@F^mxQ3S?s1dSOUqFXWo(xarOaV zlTIYKmMk^(CFq}$KUnwqb(G@VSk>d>!@XPua)+N>&g>+9nu9fJ87iD|tNaqJfKXgo zYjkoS0(NF^;nY|m33g(tBQYa=t!0k2#!CU360{?iZ#`P?aov3M#WjKf1hH)jMm{auc3e-{*;Zwp+Kpwprf!Zg ztFCbgLFtDmu$83foEC49L&IO1EwBBQOBe?!jsq*z`(s#v{e zpzv;X?*F_@*4Y6VNv>e*eNQRC&Xr$tG;wM^kAIf7 z4HekaEYip2O*<~ zFDeUU`}dCzW*1HfWTtM1%*~|bxX9ht=>E1 z*uuQiJtj@7r@iM_JCcoZx0ofb`fvHxrKlYR*f@}0A(HxVQPbpL_8ac`*{&mBC82hZ zh>)cHsyvi3h~(rjlL?BlPrpKe`bDV)qgLm$fFtg@781DMz0`QI#CwMWenrg%R^>OO z>>)yzt`3k1ywg_D)_jqlXntPJFWRPAPQ5L|y#XFP@*_{x4(xS4{bS9cXp7`ZB5|9e2xoPR16fIS zBxM}Jo)^02tCiPRF3prE9NDLw$|R|AQ_kF!Pwk4Tnml%}C{${kF$y>p}^| zl<}NYRX*2Md3%*?#;5X|D^~CE^+H_79?PpbfdwwU#K{D$R&BFLy}s3=6e(dMc-8@% zw`r@&rCM(UyDz!qH4N^%1klP_+2vX3yjC*=bdy7p&&RkP|v19uIBw`&>rzV=FlErSsA>k4nIfhaRT&I;g1l@-j z9~+VI(bF_x8mKS|j-ZgyX)tgo#-$ z0yy&9WSplkHrN$~LUQHiM!q%GrM}BK3z1#|Q!LRX1K8Y5Y9_S##1lP0q$Sn5rpEOg zK?b=ej?g9{Ss=5EQr5=&FGch0;Z7-wjdVBoIP_;5m4x5d(8AYm*x(D?idxX`e{qE-SyL~-X_Fe!^*7aTBPux%tlCk z+(RZ15sjSNVhGmQR-svP=+&*A_OEu%EooGibDYO>b27JG|wX%uh;JubDM3v`-*G6^eFh9<-nbfLMyV`_rHA;GJlZhd3O zN4`ryaZtHMNS2aB^=6^c zL~6mC;5E=1F;U0V^2(@>Kw;dgVe|Q0E|YWe2DxFkYXPK zR^lJqVmK-ebvNW@h4q|d@4D^?!l2_+?xqb|+d@Ef=6uPOryA_b-lB0JY?Mu6Ep*8i zG~N6FouvsYfRZH-s+xva4%Msf2U=Td;$(I{GJn>O%NFjs@Z+T!GiNEps@*JD(vS@n z!n#qI!TdaskClcdW@Spc>0r&^O#W$A#4&NzMX>Sb z51E_YMjd9@4}5;3F7F58yT*}TuQeHZ})0d#n^cXAvW%}qQ# z_v>w_y|CutIw;NNWxS_-iDQ;SrF23w_|S(ZSGX>`Qcyr6t(f|$>aqh`MC*=~Q?GxI zKhJfCeMnJQuM{t$EKv8*JidQ?IdGT{sD25k$x>ZoNA1e7I>-i0e4LNZro1?VzD>Z; zmOto%i8Dbw17sB6X&*--sOx=ub_dzEhGYgX+h zY(CoQB1Rr>_DPh0{_9#cUHC+jzQklcl2A(+Bjpm%olNr>KKOy;r)%adkU=j!4H7H^ za9QrNR8%czk_u{nPm}yd55=h(Lt9ez3I+}EB-An5l=~sj^R^MSM+Up<%O3qQlX=Hy zlk)IL6h%Z9s>^Wm0uMb)$%YxTc@`o8*^{KvmUrXEjn5$RL<0RmHNA#6s16`*d_2&| zyLbFuI`_+WuOjpjaz4ae&GLywJU5-(QZSHgyHO*=}{QR!bweKah zNl3bZ1zVTkbAE>U(H!tw|!%6Ej8t7}z5sjj9OsT9k*THLq8?aXXWUc)?L- zFY%2j6$2!rGZ1x>ODD2rY9GK4qb@(;Km}3hoNWO}oX(Z-a6>a|?8IKgt)OcUEi~=J z8^D{|jiN0XmxCk6KM1m*Cj@?j<}l(B0A+?BBeXZOKtvPB^Bs_Lq4$jEx59VLms-Pz zClsE&?I@Z)UL31ifOowiNqKeBRlDh7o;GB>V&;UAJHlg^mvO@T<%8Dv%{-PA&NJDngjGPUQ;sA4V=&yto+on~TtW zkzLM4G%0nR^0yT;x)O&x z=zb=Oa|s^nYJaO67<8b!di;x2a9z(|E^wGO+6J9}Y#omleJ|lkpMzx;Yv&#DRD~tt z;EHW%>Ud@W2bFqriD=%h#mA85G^c|km}i62ddn;G?++64Fn#99+&P+TS||r*RJhjo z4g3q-vkQ;MBSo_G3G^cmRQAA`5pa>;UD9jFmJW3|X#QdEcb?tyeg0g5$4+x#qpUlbxcP@R3b#f71qElL^bi=3i9PQs}k0J`mD zAMhNYZY7>yH|ivf9@(JuNy0t1C`AZ9fGevNbrO5PR}A#R*lc!!tX`DSXU*ri7U<62YcST}G1(XC zIaKadAt(ACu}ti)CT=Pe_t^reCjewB%AkkgRgrXfDC4-SeWW!8oH+2nrDw9HI|ZHDY$EZdWgyVC!VYls-I@NI@|yB`Sbb&me?-D=|9_% zl~WMsVr^lRp4Ym3{Pd}GV|K$LRAm*VLNBkTiSE-NMf<(1*y{ zewwsS@)K3Mya0x>YAD=7>6KbWYxB9=vzBlWgvxF>p;9Sc2mwKJN%W9*HM_bsUW&p{ z2JHiMST(xdX}+A;mMiBrTCDCd`o>^j8JNzGd@_<|ZY{-^#M{F++>*q`atB3T#~zWa ziau3?6VvZC>|oqOXGd9~RD&Ci=D$xBvaF@VhmnsC{73~!L7X-9S&Xaw=T=TbDc6~X zfEYR}h@z=nw?hsl#z26QI%buQ)X)BL#kLJ=U7+eT`H=x;ZN7j2H4w4M^3k(do&&34uYd|%Xs}fWz;RtK!cC?vtlhJIqv8FeqT17-{#Gw{kMcOY z+9@<5Jrp79QVJR2TG}LqO40dwcNg0f^W|rzyZE9n`9SW`2FTENKDBZWA!lAZic0~0~hWAXia#3 zT4nd{-S)qIzqBfELDg*9|2gDagK~Vp%o>}^9#6I#;e))=|60)=9m2K7Cvo2~A0DH- z^S7_ITGX`UE!1edx~19avO6_eXeoQ4EG1WF$&bDc5D*njt#R1~1&vH|216qWihO6* zn01j6fCToKu_LHkVCe_UlF_mNPd(5fiyUv-+(GRy#~uahHy~9xD?3wU(f;xrfMlvG zviB2mkec4}=g$+xjwNiLFF*d}?dc{r&YDOl4wYyBLim_2(B;yu1Yf9ndsMX7>o7e}ghqMi4s79vB z!FeIJb`^so^81aDk#dUaknSnbSGQm79NPhXojF^6cxTq=+LOp(djJFv)GTQCJ}Y_F zx&Z>+V#pPH^&+<%Q6B%SD4$SsHMU}-QO-|$e3x-R?b(}i7rkepu_GLRTUt1HhM+gu zu-XR&!E?&a^^;XnzLNTsPH66lxK%*nU4d@GA>~1S=#yfg$h6O4IB7AR z8r3ZtwT!(Pnx0?K?zP(A3qH%a43-euL$WAZB*IBCTUWA-0m?tK$xEO=OGIW-O&Pj{ zF>)kzkM~swCN1C=TYtaFeax!j>I?XmZlv9ZeoVzDWHb}*q@r3*Ueop(a>7E7Jn(EZ z%c@!!4b#9hlj@TN3G|!Gemc10*{JvjhBZK0Z>5Up+9ZC+;;&JAa8WDvk?SF!-9cy{ zt``!D?>!#h;vTLiT#<<8-rFNrjThC(qgY!Fs0>b+QHvk8{SG(ULP-~D>&t~U zUk1;!a1yn#m3FCGMWM-Y571p(FR`;OUiHqxcmO9ka%?3wK8?oyf>+;Pvc=1D0PrS8 z+3jdW1TP?sik>{MeuA_?3|U@4YWL|y%A7fYbq5J5s%-)*xrUwPcXYKQq4VG&)l`Z53CcsVxHI!FzyBs4vEuL1arVXQg@2V`MYyj5Yew!OKd!XSmqxZAC_)VL zi0l}e+UX`7PamJFoRCO@B7BL^KXc&Y^Q&b*fK;OsHJTJ=*n@}1YZNAYZpb}p@av`d zXzP>yA)Ew-OoRpjU}#cuc?d;r4ugOx)S&7^Mc!OMu`rr5BgrmeF`59vQVJQw(5wsl z+pln<%k~~*&RQomD?#4cDiAKp*hLjTKxo+v0RlbOnVRXfh??txa*>QDwNT66EUFSQ<@(%U-}TVK9{TA=PGvti<2n3EM#GA8i25yF3Bl%WI8yydC+#N$K*Ko_r)7c3uMr8(J{D!i>tY6 z$V5)AG?v*4a#5A!{>x!PFbRP8#y+%#A*&OazZa3KtLK3W99Xh28$wHLbVVy7CA3rj za{J2sx?@i&l1MQDy)-hj##W%dSQ*OoGK-szIy7IB6_&M*f(CBB;Gqp0HmK^=V>l&b z46a^5Giw?(A=d>XC7~TR4q;4aWBw}Mbe1o^Y;ze8CqdQ27P?)%X+Rr#@MB`iFVmpe zPLbD9i|DC#<23Q|?2GFCf7m>N{L26qf>KsbkcA!!oiAQgj2O?RmW;EvwZdgxP8HRm z@&tdKC=k|v`yP8;Gb(~Sjj+$UF$V~^k;V`ep?$@l?Yfs2geP(maI(^@vI~uSm?fv% z<<{+85~d5u3fm?wvkY=%;H&I8zHz~XJME=TH<=t>aNEFTr0*ekJ(7V1i)a0`B_h%^n-U=&Z> zKNLUzlizUXWYFI99wkGxYBh=sw)aR;XT#8T??u*>%8L_d!|Z|9%?wo8bP0lH@L>7N zmwqret|Y(#Tb{C|?X+#$ZnBGk11Wj^5gNP5?o@ZS~ra@`HD7o^F&f`{4z;&BAUT z>EaM|RQy^KRFC{}0XEG`VCB&aa+p(g;GY^UQ>rC%KjdR`z+?<>QFsHvm}BC`n{o-> z;0N1`0~_AAx(Yn))~~+OK4z)#IKKvFX-){LN_y%Rue2PGvsWY$Gax(xRb);r&38fh zA;+pc+@7rQE9`FDPS^xrXWKTB>BB(I0vTij?~wQ5vQ#*Pp1B3SKUtS;YnpsLVRdzL z*kv179fmK^_J{h%TR8I4mL;DmRp27hDS7P)(Q_K7t*aOhyDXrf(I?)3uKV?y+MKO6 zVNFqI7jHDk^g@~61CjF(38;1V1^>JwrxKY#9Q@1YtoLdfaJkV?L2C6Yd5;U&p?&L2|GhT zfSTH*zL$_via`l)WJB%TrCchlQjtlgwaZ5m(8EwwicGbNt2H>XGl+0{_g>3;;xZXz zo+rVNT3SgbdZ^WsQ6V>0Vo`+n*@K3RIY^h!FEeM~6?;@MF&N(7(r3Jn)ezmtJCsv_ zmY0|{;vL9j|7|k$QInf8M+4q*FAU@*c*?};)wJ{A8-gjufV8`PK}S=*zC3azhlhL>NHfQ@)O!h&3dF&0 z=Vr$qO7g5+UL2S-1QG6!rMn@G&m=iumY}vn(jpXU6_Raj@&{_T-*?cM8anNp;Yt~X z4$BL2&EP?M+a5~J9vGGr-v^A&<4<@dWy;ldt?;%9Bc%EOubROD!{=K&f)A)&0%W6Z zX2MO-p?i34@<>qbqsBKGuE-5xUo=OMjMs_pZB@Fn*%j0KVhiN1?pU{oY7|j$CJZt5 zZUI-8pq{!kIK0`{>ti}DV1l6-C(OkhLNl18e+&&(FEzc!En&k$BR;TOl|T#N2A=kV z3TqcWX%`%FlQUAzUO(7%bW+0Soz&|SpLc7#%C${eHdg;O=ir`nwb!nX-o1`)U3KqM zby?KAM!~z*)20i~K0Dq;9v&75PgbYfTiW9(v<-!6kXEb5m=j+f-6Qa@%!BAcPXY^d%&pJ?yVk z$*OaKDUc-_cB8Pd`raUvzEELm5@N@!AMe-H3``?zm)RBG{^-zN3D7<*C1hxGIHESg zQ8CIF147zAv3|sKB>GOYB;(F!FOevvid~0BKAV=_ z4X~IueWxo{7G~fFwKI*Dx`q9wdx3B#i&34!a%91<-YcXbHnGd>qCEQT{ z%3Sd2YJ({Ydk!!%yNUNy;v>KMG zP3bGr^{bLEuk^Nc>JdE98~jkoxCe~TPo?DJi$|7lM+8*GmEB(#R53DIxj!`vUE$~f zh|1Jgv=Y-5w%%8fluC|i$|7W^Am1{%KPYNED>9!^=Yx7MP94eFE!1XBp4``9yVWp# z%nW7qX}84XOUP9_jN^|;PCFrJ$2z!RUg#|SOKQ0#jDWk5=?)RD7>x@}#Wz*u*jmD_ z`JfEc{m$$$H8s-@;eA|DTcvnR8(IR%zh?OmM6w#AAQBT9SOywGl?uANIWlc;dtxeH z-qu5y5~*$bGk z?Rq)0@l_GIYDwtR(uS}fME={3qn8)&UA#~NQM3J>2kvH|@&R~RNCA0P=JZ6uW|`D$ z6cV3w8ZX(A{MrG0vUjjF^weHn!(<&HV-9s~A<*f?><=fI4|I7E!FA=<`ntn^J}oS+ z526RK7a-a5Cdle0&^t~bw<+2TT|SR;sEJB08yYX9c?o6t9;lE!kWT#2=Z$gLHdOy2 z4cgb*BqVS}%X*2Me*w_9?Q>{4`M+QHIoLP;CFKx%vJ@Ropt)i!A7{;*<;Q}R$rKBS zqNBHByTh@YFMdK>HFH4iRHX8f`3FM8L*CD1M5Q-sU(de}9KjD$$8q9)y_ke6BT?ux z1~@8*j-#vM4iA5<+x-ih{@Te@P7%PIH5MCsnIu|Jwxp^yjHc3} znX`V3>vto`a`fW7q59ERpemkO4kp>xPpF?wK>1*63%VFi2DV^hX-FGZ;<5)Y1}BlN)Ifox^S^kmu(x2Dnzf zfociAv2uMQ7tPzkjIij%PKFYsmAYcZ5P=JfsQQS zN!%Rr*h|9QK$W8^V0FsCCXCWigf zc&aEll+X~8{OG2ltQ;s8g*JR`aL;vN_7Q`6bP+gfwg|1Xp{J??IA$5`;1Ka-&@0@2 z<8w!Y?VDJ97ONFTlHXmZlV4?l1y+zd>)F`P+u`ItD<=H8L3er@O4I%}A0UIimeN>J z=ped7o>>r9b|aVasvU5nc`IoBT7PWOmo(Ew(~-}{*3B*S@p_918A9uod|)F=di7)~ zzae>VPsZ8|pm}O&F~klSOQA0bLdQ3!0~|g6MC#$+RG)S?u^XrNW!!~TGWg3$vO-gD z^rOpNdt740@(klwuLZ6LoG_?Y?uY6TZEf@S! zlwfrDSxL6NQC#L8;gxndMFnLs4s!nuMT`J|eEvBLL4%)N?1tPjPIezYe$>`8%s9Fa zdGfBmU_u1>w76z{$F*e1I(8Z5i&_`&Qhd>Tp638Gpj2fyqRG^}EOOpnWCFaoG*LHI zO;4VbhXRXzwCC`eBX$`oJFxF|UGjTYmhvh0nkpDBIcrX#L=!e1iHNTRha&;3?aVk5 zvyRJshhv{Bk{pyz$s59;{hmE=M?!bD>w731oHcNgg!hl>I#U0%O_qC02{o&)f-u7iG|(>0=*d|N)FQb>C!!{KTF2zLS{0W)jJWcJ`UcaNuHpiYc3$DhLF zj{G=xNu%fIJa`gg<%}el(}q#j+!H6e#hB(go&|9)(7|0)g9-AR1c0!k9sg?vLTj(X zDa;GncNxI#MPTvp9H7w$_$jqNx?^%yowctUAfHu-iC)LPEV^Pk>P(!Qtv)j+%^~aqQpLbCt z!p<=1dauHJ-)Xbjmmgu*WY_(rxz5n0_x_fZM?8RdiqXe0zR0Ql5)4<%>~~c(IM7(i z;alfD{3dd8F$B^?$4KhJ(6oT(0HHP~HR$XdMs;pvj0(lX8-0tLA!+xKqH=)KX&6GhyO6xg z@R{7n$;l~|y(LHaZo77$U5>VBgTBP-X`JCIH4G+>WQMkhy{x=kj+R> zZ=nuGkuz|*5r$B+=4OjP2L!a(f(ff&yO)4gH^k1iCqkd&SRnIdN0Mn_LFv}DeUN?1 zFq^!HHN*bVgw+3{I?N5ERW~J~&-Qf~)2vqGlNeqpCO04Meoiep48^-muX49!4d$XY zXqK&-?9@fI@^WP#a1d&agdN=rrFheP^xblhG7|eE(24y+j76%6KJm~h974R2Yj25o-kv7h0r^`4 zFZ8N`n(zICnz&K*B=lr!s2u1yDr7=EhmzKBLj(;5MkqRpfY+achvY*lse@-y+{$-f1& z6Zaq&tS@SFPF;^NbbVM6rARjQWT5N5Y+`IE^`)0%0?Yq~(GXUTzeWPKXad=^%f;IZ zW5{qaG1SCQBr0*B;KCrw)k#Rs9w#J}kuQSmanC-{xT+Dlpsd-o4(uBm?u(Y?uAB(9 z#el3g?H!0+i&CzG|MQ?FNz>E9LvF~zz`-27RR`4Fx&~-|&p|70De*D2AO3+z=VR}y z8Q32Ci+|6wYA3@9E3Dlp+rrdWiryh7jbB!q(9{v+0%D63kt6M6nw%C7+l;mh%U=Krnez$a zmk$lUk-1rT2wc~OCg|ZjA79;;*W=dXqEwNpK!$pnu!i_jhVbWt(kBmJAXjyFUoK5m z&JnzES!_)!v=Sxm%#0W)jnOky$ut3VNt#tYb89ajKU0n&jxl?p`UtRu1}1D?m&mBl zc8{jcFs*o}^kmJ&3(m=I-DD0U(akJBI(iKmXlVS8iT+Z`Yd(T>tOg8}wBhPhcP=ux zXSs+)Th!=UO(^X~4zG8|^YB)(th#5DBc~rA zbFlu^H;$!<7Z}(*@2QwggJd_{Sdt%u4+PGtfjfR)FVqYL^y1AQot<$H8k_=+x{;wO zBxE2f3t5GHg-#gWF-Q6(^Em7U5@f zoRa$~>@yVZ{`Tq)pDCIOfe&odZ-`D%XoSU;dVQo5Snk(QPmZ3LXi&Ls5Te65kl7pO5 z6?Nw5_#~RrD;O+*=v@l%z(mK_7kMha(v3AJfxUgLz4uKDkO8S~1krHmo;B<{$Hs4T z!@Wms(|a(Hl_I=&$QfFfN3L4*Hy)0B3jfPhYc=ts*D<|{q>VKAM{}CwSRh{rbxGa2 zB5J@jfj`Gf$F?1RyAS}*hkB}@YAvV|-pIwCV>GTu(-Y_*w+$Hzp8{|yVVzEb*`lD7 z`mblIAmF11;Ek-E@nuhtvrV!L$2SWz>D|9#ndF41FjRPS41yBbCLI)j1K^jx26emu{K#kfsrhV4~ z(^%9x0z?_gs)dW4hZQGiqNyt=(CKCwM_=9n6#}?i!ZPx+CMh~$^vsGFkZu}Lqkaqx zwMBQ(WM~e8LjYrQcZr^ZiqK#X^d60m6J$ym# z2TiRUtI|y^LBT%;3jn>kznv+thJACQ--WL)l;&5{egYY`A2`|dIjv`TRz$8?w-N`4~O{L@sR;7K&B=P0y;Ze0p9 zhR#O}B&G%sjK{cY0+(Jj@CJ38DW8ci&!~fSgQjFQqbW*Sk=0_IOk}(&rzsLbs0+p% zk`QCr5Rh4wYH0!#Rdn&ZrJIs;BpbjI3V+zgc!ECWDMWH9bF8quyeK73D zZjgb$C$fH2@RFdcpx2dBGlyqA5keC zPrz}dzfT4EU3uud7M&e#Sp^ZjsQKHv;+wO{2D{k*?6viOeywD2Ho*!*@-pcA_hOgI zBZk|LH<3!0cNBz#tDpcls|=EOMEjM54LDU`5|y(Q(pPAD1hw74WlM%&IS(H29o``G zncTd8dO3nC8zZG#y#8>%qG2B$;~|U#wKl`f89_D!Ms=)Vwv9bV!EO`*O)>pXW1!Cb z_s^ct!;m7=w`(uY994tTA){&%7)?8{-e#}206cnlNm<%T~oew05+0_o0 z;JGpF?ka~cQrC-S#bzqIbo64fa~2L?bngpqDu&88S>omniBM_if-0S*Sb6MYv3#r; zPm2A}IZcUanP+BM^ z6*UtNhLINyT#1`^yTjgh#|?!6GFbCoDCu+*N>E?~d2nGr*tQ8rbkRVPvh&eAnxbWO zgQ3=%Cd#3G<{NI4Cu9R}-IJFu_q}$wzA7j+>lx;+H&F)5flO9;LYB(AJ$6z}p88$6 z3<0^Z<3mV95+ss^UPtK?n!w==oFs1$I;nQH-Id=I*gzGGoZ~dy{2IpB$P&r69jk2G%&0U`|sluEhOvZd<0K+a0#EKW4ZOdf?~_&iS4{;}5r{@dH*U}w67;NAp9F!ZP6x4_5kp~TWaG`Vgt86Ro)iGW2- zb^+^{I$B@k#T$0ka7psOfJ$9qnnI;4-&R-xwj2fMLAIm{;W7=O-Mu*^58kiz-4G(Cb+@y8())vED< zH*fOdR!*_3_jFJV+*DjdLcWdxb+AsJA+X@MC@>42`C??57TDB%;2e@5hc;EQQ^07W z#Y%!V7t?uOct(SYYN)l;8x#;|0f@hZLsd#iuP3iFBm;wiZrCJPgOpMJ+lAX?bMKND zm@EdwWO{K1V<}Bu0N46cjs@X~!tf^fbD&KMH9aUG&uIkMr4uq=3ufoMfM9e+@@Z3I zcxW?Ct7wmyyjpk!jRb8{HmMiM>KoA+i8KpE$BV`3LXD)gv9HolEWv#0%}S}QZPy2K zk!Fj*y_e8TI-UBpPW9SCXlp*CE_4{g??rB7B4hj0_y{h+sES(EWUG|o*inNcV|0sY zoK>`7_z*R(02jE3VWK`N0yYrjB`+=+=vn6uz#H@#tsD%f=)T?hPO2Wv&tYQyesg_D zH6I6L|HBqt*;>ek7h)uboUx_Y+w;_eG`O6;2w1>5Urozy3EmxI;+?SbONV24o2vGt;i4RB60}Iv#F<_ z7<$?Oq};TkIM8JkYw`)Hi1bqN`S;GC<=z7RJyqI4s@a*ZA3V+0#oN)mD+~$3NibM% z!r-A6%S~hInf?$#MjEXixIJkzc-?o9V`+W}^KkxXVeCelp$u&;uXuzG{78NHRhe}V z2;6bR_XxcrVp%6t;cP@u7ay86gd%kZ0|NIzZv@9j-ZX=;OtmzGLrtI}PNk3rlJH^( zvk?@*fMFCwcXVEt0(}QiF2sC4hjeG!uA49GE+fdTDa&If566-&Bm4Pz9As2UDS^dL zXb$lIa>fShP*j&eUlLErUIH+z+!_#gg}W##^aXJSB6UPhF;L1$&%-66Q;f$48tn9G z-2t>lN2{_{8PMSnxST&iySgoDVBkf9H=WS{tC2azPRQ_Z4Ub3XhT?1t8g@ou`s?gq zz&#Ipc~a+DhoXjW6zJ>Dz9ctWL4nT7?8R|+k}+ew`zYXPe6Oeug61MnT#NVfwJwcL zPDbus_c8ByS0#V;k8G~H_7yePs7@&+of()j!=8Qr$fuWALM}{QRLC3sCS5(6GVdv8 zW~OvZOcfrzw)~pe+dVThW-Qg(+vWAEN^VTf*%g-^UcA^MGkN@;nxIJbPf}K=(t~1J z!}Ska`rQ3BK91|dukksz?|zrEKIQDz4KrtEOioG)R$kAY^(m}kAopl)lWn$5cF&Nr z$EUtCTOd_MZ>8gqws3R{)IKcR`xxoy5T>3t!>+9r87_2(+N__%=GStN(D`C>SuxC4 zK|Ml-G1w#a#T<(Ghv!O<(DI4Z4GOK8Ph8A8wE;t=BN3JHj_0RRvSRMs5ujNHHwHsdr{PKJ?0d zs{nULRNT}nqkYqlZd`<{d6!0-Yufd7**DB!bKs)Q0|)zk=leL_X*bZ)GoORc|I@;@ zayRd#$GgR1wg%paGaRt0%72hk(y_L_hl`6-)GLomwA8yPf_GomiY<*%afc5780NH7 ztUAg6nMNS?%lg39b_U$Tk@kB2iuMcnx|ODe1}s44;X!C znEDeAAG{U2@M2VMVAqiMs#xQtjv}Uw36c$$-&nvG*6!4Eq3yL^#=4T2zo#d*9f~g^ zA<3-Tw)?)0JTVl?BWiJzv(Q(>*3+N7$s#t?O`oi-{)!{>^D@5PI}p`0e4x1b zt?Kli9Gi0}IVw@F`ed;%Z^Aa4@9D&mC8WCS!K=F1L&pN;K<8jWYI^3h59cAfHqSHt z{3T)T94C_V(>nWCfv`7H*&8 z+h zd<`+0*s_x2JG3r$Rr?&xKGPT>X%zW>{-vFq<@=mgS;k*;(5Omw()ibd9-K#hU^*gA zzTW`7K0@V~&ETahj$OaU+}!*@w;?8Crh29(3fGH>sGQieMh$@bYjnPS&Tns0y#1ap z-Q89`7Ap~0T&25FU2MLt-u9JCw@wp>v5mH~1kelY*B`JOR$?-XPGRZcVXJ?Qjg^NzT`;mwP|N?;nprEU_|CqXCt|r zNGX(qh(Oq(JYuGj25PcqqMEMcV}1F|1=RPfEep0VGdVZ^pD2b~dFGzVsJE|Jhqj5&#`bRbat(IR-SI zeA0(-Lfq4##ZSVEToagFT54_MUw4F{D&61U1|?DwI_zQ9$F)GETR7L zF`krLIqI8^{_{PvommH3e&+H%o)nvp&o$0ed=yGqqZD%brRt{nx|&On|DTko(v$uD zYh?x1j3e(#n{~zqJ8}7IEt-K!Ydulu?_pYwB`$9Wvb=@l^XHT36gArzE7rRKXwRbQ;%U*v~RwR?Z}Zt76c zw=0n2#M&3MmDVDLi&RQhdDh-opyEAit;i&@BfV|W{_8LaE|_;dvrtE|{?uc0xoh7n zK#pZ9v{H`LpV}-qnZ-lzaCE;vy+bfXxgS*e&q?wIc}#8z(QlcqWj0X$4=R$HygqqZ zlVA+`1vg(=eex!G|1t-)V%99ALg?w3zr&>67NXX3tvU!|sXnidmT<5CjMMB zr>{NzaCV*eLB)5vqUt(Ie)k*m_xy)bbN=_f!w#Gp->irMS`llpo7pw9UWdmWSMgL< z+CLI#yD8+c1ZPsmbIqNhbw?~8PV&sBk9;P9wYN*nz5Y~Nt*XV^6VF$3YD%?Uyc=kH z%wgrE9f@Cddo%wXoZmmp#{;=$%4_yccx>scHjUd7laFcq<}MXx52!BVRJ25vV(N5a zyTgue-tGXcEFX`|!W-Xi%8N?lN12ASUFqHHc4z+IPj}sFT>;MA z>q=^X&>;ufZgH4i6Sh6`NVd1(>P7BCyPPIlE8+Ssy8b?}l!HS?O~WF&=AD|D?Qu`! zj_<0ktzP2Z=KB6amV}?rot$$b*7}pa4F03&AhYOSALVh^w#fXjX3bhS`{h*TapBFiM@R2QR*0e8Z)9+V*;B2H3RC=Y;!fvMIhxW3Eb~R(VmHyookJ47> zxcLTOdOtPnbM$P-@w)%bB}pCUcWr8^)z{~j`gyLeKlN@%#Y=3q9?$o)hxVToRVt}) zKdw;y)JrskU2S-p-u$jh&(*s~hK+6< zy~2;TjGTxTIOfa_;9Vf7^QidC(SBQEvMn@L;SI{{<|A%3zLr^2gV?gB@f0q8dv4yR-~9dCmCP@It-0mRfu!Yyk~-ThLUmpV%=$eI@4Ee) zqrk24^BR15d^NYb&{{97Jqj?3?B14!v)(&@n}NE8bb`9!15@=o#qrdq+-TkU#-`R1 zRC(%5Bsqw~I)pe070z(Y-4%rKByaZ}$qT$-*x{WyYv9#mwarF$v|?p5b8}v3r?2Kz zgpdDO zzPA!tnd`H|h33O+JvvwF@+@R2ksSyTLtC8a>I7|(1K*t< zIv}+)nWcEyN`O;y=%XdRhfIfW=B(kYEpf=8ivkq)((Vyh;4W0|5_=aZ?BvLs@a9d~ z#jYQ%ENhMo#G_5gkm`x!Oa7#Qr7m?T;X`CJm`J9&oKsP`JiAV#*4Fw>YQnNXZ@E>$ z0dKdRwn(Njs_)>-@S{%)pDdclOn$$7+V5!w#qGUeX2nqhAwNqTNbL}b>v$v7BXjmV zW79y|8gdGuJ|7sA&)5+1x-C_p4UE{UT%KE3B9biPnZ$EBf6x9apArVKF2%U&#S#&d z%B$w*tr}@dth+?=*)x~rBAFv9em6uJxxiy-xX!2d4rfZhr`SxleX{GCLmqi&M)!A= z7b44@T=Z>I?ST{|n>+pe?{^w@2Uu-F$N8%pr*O}E}`^R_F6NG3qMX=gk161I zZ)K4u0>n6=HgHg>o^5cbEsF46XPG@&Tov$RPMxR0-pCiJj6|dIa_j|cX(miL+uL$Ebogjc6B^?xCs5yPV@)`y>|bUmW~Ll$nYBtyOu5UvH&w`P>TwJWCt<6KYC4j+2;MtNV1Nl2r;kSekCGf9Sr=KuBps7j5 zqKk>NO^W0i^t@@n5&b=|%y4MX(YOs)4V9(n$J9qqq>(pG7J6e#LG4flMir4VKz(pB zl4Xi5B0NyBMZ4hMJr6jFQ-KA>tN6Lu{7d-|Ht_(M**-)- z0Qe|xZEd|-Ow80pwF^Oms1{i;T3kw!y>vmxP!0~Of6;^Y^U0x2?U8v<&R+R|oS62q z7$IYmXLfIp-w?*5UVZL-+3M+&k~y~!BVuoW38TUC`=4!MKx9B?Sx6{2&3!<}H2l`` zUY4$cJVC+K^CxB--5|mdxJcW^82M0-YC50QKKCS3Z}Ng8=#qk0NC~dAY{|x8gx8YQSp2JJWbdjz7m>D zhFQdQ=ueON1r9OE99D){WQ$o?BiH(zVOl#M)N9Qu^jt^)_MoeG`{T!t84=JZxoCVI*#S6T z;B;3b-H+>N-IH3=quLS;pE!7OQqOY^$>PY^8uK{jKC#6L(6P)D>js^~#cTP~L~1-S z2Q!&#gbutitT)yl(xO(`CGRH>fjd(>R9hwp%oMW5-{L%q_A;^XKt${IJ?uP!E+Rjf zk}{|f{R%O~j!qYUtzIjL*r+=m8wEKPjS0Un8Zq~{X*{V$nOG9Ibv{T*krFp~Hr%TM z)%Nqjnz4`OV+%>3IM*vvjNRYhn@c?5GJa{mucIcxWAWZ7q zSpc4}g)`OmzP`ThHvKUvGN2>d;1%q4Ge&UmR!}a$nz!>TxefNeiIipNp#OTQ)#Usy zjsklot6wmm&BY{)fo=PE%|?c%)UYW}v9uHI)6wUMOc2m>TAyZ#9tMAs1o1*5Lb=G+0A}=7>(I^s4IB z(7!ou_6mK@vS;}&kO&F*g*Rqdw|9ghV4TTSmRtT?nizvd>2c|?vRv7PI7W5DSnJ`< zON|%Oni5Tol1DPXLQD39MF%ZX0^VIWgwm!s26?%Rxl4E&S13JPPdoQT^)p6z7Ca0Ho|n!bK$_hRy8AqLv^>3nbM z(rU2jbjP!a$+hJFieESHq2SREC0f<-nH@9q`+r`+T8Enp`&|c1$*9y+r>Jx@^h>)z zgp`)Wrq8{GugqAkjtzv4Ee1>5xNi_YATsWxgz4GICUEokpw?j7xe!Z*5J~c1T-b$% zXe<$kE-c!(g8HjmT<9gAr+rLJR?+|!9baamOv4D}`v06=k=l5Kf9MePhiO$O+E+uB zZUB!!YCh7OO7iNU=9oq-F=C0rM6LV>N$`sws_qI)Ng3P`i%nqGooDVM3SZ2J7xYW9 z5r{Ox7?Bfh-tBGfv=T0sf!%}PsikC|k?fks;TAKV<8F=mhq4-5@T}+bu60u~sh_;P zy@_a_O$;UQ0${qh#F(sb4y9QJ z)3|l!;6kE9z~0IRh|fdoGilMtNiz^lC@SW{x3T3yDe-jZrrT5}Z9TEneJD#%mWzx| z&`)SYKQw7v9SnVLjxqH_=xO+)>da+Lv8m#+1a)ERJjZIE}iH0O`ZbpMJye({rf z4)=g~swLGcOs*i8Z9ry{Mv<7#Du#K709@JFMG5Ssfaj_isMPZPAz-*hMr{C>>($be%P)B7nhFG9j3{V#qr3M7$!Yw z)=DbL<%LYUR<2;o#iL4xaSkxd+rlm-O)F|2`nr4we~><#&P_u0$#)Z=ws~J5LQoEP zp79lpbVNL+0;A&5h0KFB_7-o_pB`8i<8@ze^V7WEk6Cqt?KZn+2A(qeYlf1#b8!z+ z+uv`0e|qDjxgO1LE9P0AG%`95RyJj3CRNFCx~ybu6zXo5?-{-r(Qs;PALg&p1vj8q zZz|^2M~olJa4JpW^E7IEDJevurokfTSk%%i2XjB->XL{qr%OPzybhwi+Q$x~B-NBi@AkH-Ev2a4HV&n# zNzHNB9{bQicnx6)AvOn1bkf(Poe_xX33++hE3>b9^*W!>OOv7GZGvpXNz*=8CMSlv zTO+rQEX(>s5lG14yXu62k!cG!rcUpK^iT9XK;R_@37hKJM~^l&l%sB{h;e*ZgcL9= zCMKph9%K+fYLp0me1N6^d~IiJ3Q1b9tnEkQFv*WT?E$$SucPrE$gMikBPls6U5VuE z1kUvZsBcc{>CK618e5ZgNgxK8?3i28L=0~d#47U&n{g2&Kza-fn1+KQ=j)NVr^_&b z)|<$LzlWFW<2F5{9E({X`c+uR>fA%N4o!w=!vK}%IG#OZEy0Fk!kNQIun-LwY-r;o ztplk8<~~86q+iFMO9+dg8{5Lb-xh9|Se^ri%DGql$^kY$b6H8hUV`^Yoks#ld1nlIvG$57I3hNC_tW*Nla9V}eN5g78 z=d}TIzFTcCKJ}C`8iVIwn{*qi4#>2kAa%vQ1sOcz|0=kqsHUXc}(02usvcfvl zG_f2lXE^qR-@kufm;Lg0GO=tZhwa+wk5v$18cLI!cz#G_fHR7A)ekP{apl`30)erq ze);8o>8xvC#i=+0Sasemg9z1aPzsQ8NO{&ZIw%5k`d#5WP;4L*Q1t>v%>!4>A81dHSX zT6(J;Sj63LC?D!F99^7M!X$Gl3IzRDm;bRcW2y7Gu9XaIJ+;4a)EK!&M@o8PqBuVA z2-!4|2%W}u(Adji5@Z4V(8#!hZQ_FTF^ASUdE}!EvJLo+2y3+<*^%a9Jw>1C7zEmR=#;t66Ws7g`_5>bS7ecWH3H4D4Ehx=p~#*LLP-R);dJj=QY)adACY zV?8ptxavB%nnrU=f?(P$2>_s2ucVS z|4>Rj3u9wpi-hnx*U{H%8bo4yhC@uJ)t@vr zP8yVDGOcUlxJ|0iJnzgazQKXbvPJ4Cxpf==E{MqevcX_Ff++ae#~sWCG-B10Tb0`*98 z$U{Z67!n*ia#cc7LuVG?oDDQ%#b;06?j1XJJk4kX;l&1869FRYMW>`w^!q#yg9dw^ zKTni261mcIqdBe~h!JXgG3(Dx@-*aE(D{COwycr=;nll5j)vg!}j z8}r(hW~>di7ck4Lw+E7i6fW@Wm55_t;!LSH_MRZ~Epw*^p2aK)Ko!J~*I>JVifRC< zuf0d+u_#az8My>;jrwG$JfOLvix`I?x;FUgFtjgp#a|7EASr!5Xl!W;KH)&fA7kCJ z-*Ggj5RPU22@O85Auu>!es0n?{bH@aR&jwP?m}xiFHbp4XIsNkIu}*_rVzN|Z$*r# zv}O@wj(qk<*rH=P2Mvn>F)<1=5~9sqyeVP_n-UH}WFTeX4(-npq+ug3*ZCI*G-#=) z1h|IyS=m#)smp{Y8Rd@_$&Y8mc-430M2KGI-6g^)@iw82*i8N=5>nhdl(wTciSz!7 zze0A_jHwCEEn>S<83Xt#(SbCLL|KI@+ajt{Q&W?Bteby~3nf8K{bN#KlC~a21etjg zBf1P}DCV0)ouDBja(yKt34f6$*ey zaNcT5IF#i+SZ}Kw);T=59v#w1>=s0junNEz*5F$U~E@tpDm2|$WRc1*d>5GaMeGYyH= zd2%|cvlql$GT#g89?lqT%c#eHnw2Id5*8Te_?UPy@W*hmtct#OZ!HZZAXzvKUg!p3 zr0DG27`p00f9r~L|85g3Z^V)nQmP;B1KPzA$j3ZDl=laTMGOowwG838ayW&E zGPF{JYcisT-U|6CHZ^>CjGTRPb}8tDtU$ATN9H#4)ZvhP?!EG0n&O2844^eJcXi8e z?<*A@d!;Tfub_~vQ7x!$L=*4eoLgCQ*i;M9OBnm*ZR#Yd9XRl&^)Sv48oFWX;*1e! z2AH+CnDw-X{ae-7FzbHuYE5mUy<`0;>MnoJDYNxY>xdIf3#078Lo2r_9hQ4C*p{Xt zzJMyCI@PtcD_h%?a_fM-lX_R>L&%c@YHD0Kc5HKc5uAi;8XypOWoLtPXs3#bibiF# zUMdyJtEnAx`D0Gqy1mH);_E}M6-oI`YILl-Fov6353JVJ6(2$7A z)H~71<5AGFNOBA5EFo^S`Q;-BaeW? z=HWx{>8uGHXM2U2)SoI=t(jY_n&*?wzeVcy6@e4zmapP=k6N_&tWOZ?wTOtD$?G+S zTP;CuVTdVDH1?GVY2?Xn*saLVb?nIMEIdH(z}xJk0kL;SB6ntVM`oquntQe?#B1N+ zHh-#YvbE2XS9l_$^=2fwo{;LvRIfMA*V!bhQw)|18b?kM07nioVDvsZ)c-3fr=ns~ z?q{-9J zk8u_H=kM|>ExXu#7mcCiku1+UDX&Tk1`g&06VABihou93yblCsjh$zT_Xo~@N2;+q z>o1J0t?szkEHUbT$ho#y4X0_eeo||()0p4H)!AHMgF^`DluJRQ7|H}M99!naBQ&_- zf*}lyLn|H&UnWO$l;I)I(u#`8$`-*O@VQ+&dORk@&Z8Z*qryMU&{kC>VO{!QaqLcA zU0n^H7FlM|EG06|B}v}AV+-6X5AYGA%VL#Mlz#7C)M+_&i82E(nnj&V&|^G@mS%~Z zvtv-{T;R@~s$r)lLiO!%NNLM}qE7=8t)n`TL|?xSVf^^krYRrb1m+!Y)EGiTVJUeB z>4N-g?4LRJs?Dr-U`z25_%P#&PKg*quYeKF;tPYttlqe8vL;AHT0jZ!n_P0Vzbg1 z9;OEZcm3V%H6$;9Mt5by0C}0=4A6GYpeRD46fZQvGRKf0d1{>4bX57FuppjseSGMM z$qgW2be0G@xcV_?F9ZHeWGVY65FH0y&fGf^3vWyK%NMUt%K#MQ;zF#Lf0s@?gA0Pm z!sTi3sk;vobv=whsY3+<33@2v_5=4X! zml~8p)r+8hd76F)XDapMB*FgNq=)>2l?6U3jr;RNdL&DOjW7+N(y0dFlClkls8skVdE;&#rEherl>MsdGU&t1j1l+V1sD;pN4dXRW zq|Iw-kV7`q^E?2w&3m_?>OV>osXA(GVHBKA!)Ca3t<-`t&e48h%2leRnJxnaF$AR| z^E0&=L561l`5M{T5SU7>Pa12_oXWCN)0r@~gDI7r+TS1u%^yFRpNuzSy}dN35j$~< z+?;L?3}(OmUa%MlIJH1MVA$6Odm1aU{^`b{bTsu2D!ow>T9ROltOY))!$9+24MaeD z)?dyAG?$DVQyQo#sKHZ7H;EDMJp4OTuAw+mHI&p!=x@{MMty7w&OQ&H5U@cx;{#iG zqY0Bzi?mZgwXx`VF`1I=JD}Q0cBbm;>XZ<|BOv}Dy9PJDfjgsL zVdj#}cX#k?JqbX9hM+%4w}B}l;oyqcXzt=SFeja!xg=ITeQ)g%Yg`=}4sgl@#bi=m z*gMX@&M%R1Q3pM=W~J0e_sF-Z-GA$5*Y(c<(c|?c$s5UQCSI!25_ls!br|b=0W-Fd zNOUZm9H`nlp_c)k_vN-a`M z^x0coVA{(XB0_SkQ}uS zXi`&F7eCiVviqj-kJO7DuA#0xA=XSwhwM$r=zMrF7Z=ULBBkD)Jpps!i1FgN=rcZA z_W6Vm{a;*uB_$M?VMc#*)}Jz2JrOi@Fv>;Xt=4d6&0?P98= z^jt&E5S1iR>WSk{$RYKqNJdOylNOha>F73*#&++!Cjss^`Tu2i+Gcx8_=ur}uNZbNa2A}|@Nh4UozO>3n zzXdZ78m09g-O^Rcv~B!`5o)aM`|iRsy5IU7(o!_%SYhlZ}%D2uv?_#ZyxHA5b1WdOZT zFXd{#OTY+VL%_IDG}5rJUG{3FujiqBDJXx3=qTE12Cl!j;>8WOh;v zlLrg7?BW$!()OBX_sK_|nSCO@_n#1fyr@+p-EZJwutoCr%OV#ZZJal^c9+zoOQGlO z)<_X~)hB*;HmKw-d0Rn?6g!+%_kta?kTSN%9sGYD8)fU$}>&cHBCQ&x=21P_=-vS;gis6KUGm=$< zk>;w#tJc0mF454V&@yZ43r_#coSe{je2U2+-z+H4QP5;`JX0_41~FK9mz-)YXMzbe z2ohERL>Y5&+59ab_?gGJg2ZGJYrXZ$I9nu-y5CqZk`w9& z5Pjvu2+!P^&4Tz^YtgoQg8r9~i>1b!Xs1=#w<97)ERxTJN($2HEY-WK>Rfg&WN!;* z*N9CQn)=)~e_56*Es`seLcyDFc`m}4G?*S0js>^#qi`csUC^0LT_KX)@9 z{`DUdc9B}ZYCr8B31!w z)#vK}%CN!qr||HRu}~F&RkXcljlo=>{!`rI-f|e zS4XF-`XbDH%UH)7^}92QtB^HOl2vzF@1`0v`iChDSX{>RZv{R42fhd(n8Gru_9Cs( zf8xC!oSX_TEei{Dzm{T=xQM<##9fFY zI{96`&mSi7%_1ij zTT_GK=&kYN^u;>P(x=iSzXB9n=>H+nIn3@?0e%c(cF_Bu zi}rs}I!$~o`18ZV%t@kJbcs>qe0vTR}i%cP*=n@!a+U zXr6Hx$YfWlB(TxYlHL1Bz4Z49&Fq|aUPU~+!k|GEMBiyW-^Iy&C%nTpPg>$={7pKV zp$F;h`Rx#NafItoXhR(65?4b&`CnEz9q^xz-uv1tZzGdk=4(7oLYCOQK8odA{|Kkd zG!<`e-PZIex8v>JPs*^(zZ{wwNXoD=0Zv6BsfF%Qs^)9%Jr@_zf}m>VXeobWvVX+< z`3rOImp3U?-`CX6xcN|XrgTqRp<#a+DcnpPj;LwddVPviVK?`2qccD8=Vlb64P0VX zb;0QVVPV8Y1n&hCvZI?3>$7hQL_?iH>G;*@W9$LExhyriO^cfp%dh_!*Z8OLk-vY{ ze^IVC@tyJK)1O#o#uJpDe&_zu!>#=!oAdCpQRh4FRl=lAQ(i5wi`jwLo!A?@UxXRd zPoq5rbkiDwa(RYHX>{v*3&~LaDX-Mk9KU}h-)cW2Zhr364}qR{+mUxE;Bsw7Hu1OVD&xB;tpL$M#^}GEJAZW@r$O0c;o#Jcdk=ebS`9hOS*rB-%U%*HSH#2(FZ}dIv zmj6ZHBQOgxf$WPHvXqYs%<--6s`IgTc;lP|CpNn)B8YzarKdJX0+}pU8#9Wy5bhe zDOWZqLSr78>?d%rMojdHq>j?z1A9`TT~|_qS~I(D%7dfCRR<^u4rVQeV}le;y*lSh ziuPz=c+HZ{()9A;38aK6?^47>#{-9?w(KB^(b#^;E8X7)?=+^RTN?2W{ z#&DS@h+kViy#ch~dvy|?z8LKtPsb(^haj0;!{}R3%*?xxxgZ-Ea0vSMx5oZ_J)Tw( ze6-3!ZNg>Zk8UBK6ubWjp;LC^T=w8gUI23kpR1G8gPWq%aUv(s=n$2FO{-#{r1R%~ zT#IU#uT;lUCCadNsD^FO`sQ9ox}j}9LY39(Pts02;3745c|3{NJW63D(srg{7NJEl zoT_HL_js7(fqHM>Bw21D5588IJy%_Q&sj$m@$3%!+{BWd(8MxDIYeBBA7;L!V-dCo ziNQTG*#47M>HU%5wn`D`5uIW~cnA$+%Jmw(2aufxqG4`DpN8((XZeGToS|~Q2m_2d@gfQeiu^EOx3{u6 zIL7t2p6I7m_80v|LVt%NN-D@6q4Jyy;hq=Miuzr^bZRT=W_|x+wSp)Fva!kaz|k&( z8mJLD`c8jz)s?2aP&rp{X{}rcYEW07CnhGQcJSZ=ngvQM(;(J#7$B10ClRH|f-d(q z;*aS3Mn=la=bc$7a||d7w5x}P3Lf|`^qWUn64g7W2Ok*SN2(O84OQjbvWfMOmBTFWSp;}B$&_rQA<$r= zHx6$hD^iloG_h{=o;Z2Z6AYjYd(k~f2Bgv}gLsq@JzmJdDxm~fmrkNZGTZF>(zL-@ zN+cj=T1+Oga)UGZgNPRcVlTAFLe&lL-{wrHq=rNFeow1^z{B#qEp)Zf@u%v7r8_>J zZI7*Cbemr~QVd&5}IhHl`dk5;OG z$Kf`6+?WJvrXh7_K2$E zir+anhBq*A_{Gx==;W6|r08KTz=y-$GqZa+u8XujSqc+YjN2X3ZfWoZJKdwx6OQgSW3EdxW6*js@2#NKM14H}tKV^KV-o!;l~?W^ z7cyrBTK($pePU$x$(CX0Xcem5^B7J(K_Z1qCY5EsC}RvoMQhuJv=95G!F%zLqgn28 zaA{i@%MJEDS?Iei4MuymeGZM0H3%|IJkgs%)0{!|Q`ds#yP6SA?^tM&+%s|~n>Y-n zE1qjHxx4eH-2K4DmKEbI8{>oVL=@HBmNE}&_C zI0Rb+F^-=^diGyP-6!d@{Nx-{K0=ZuqTR zx3b$nRUp$YfsedWv9L3igzOG9DzL&;PM|)uG9Txmfh7@5igT(kdJ8s$eRx$Hi6-m+ zZM)F2mVc`lt&d6J+UObq(L|FZ_}(j?^l@-<4SI&S5%9XTjkE&(R|XbGhSY;oYSQZj z@v;Ma!{!(ps%-$c z%!~#i!y`LD(9{tVHyf@EZCei=JkaSJj2vYe7GxAC+jGe`3p<7z`_z7P0SAZEbviwe zMru&E*}CnCn)K16Nsd5^hGXUSBAVZuQl(BzM~@mKS&9CkeyKzAE%LxJ0TYTZWsF7v zlIAta5RU7e1!*l>XwUkJv#>Au&_MTuUN*^Llvw4^U6;3()66}I8oMLz65`iR^wQ=b z!pb~XJxiRz7Gx`lPMpDWzg3H4qY&=EHz1B&1C}}&1Tn_k%KC%*$sTA+ZTUeA2^1jF zK{oYKsVmxBftTF9f8Ud2ad2l$80drcwhVWiyqi5SZ;Jc_EoFG%+c9p!fV@Ym5Eb&~ z5-g54pZqcI;#?yq6lA5F_>XrFsEHr{8CX1vssV&A$E>pda!*V&gNreq5&JVOSD^ic+NLd@*GEBV@$-X!04RA|44i!zw zo8sbZYBSReD;-SfQq$DD7JlV9Nyi}QSsFds1cH|GusuzWB+)r-&16_;CoOU}Rz}Z< zBtRC03f$SmRv`9rMMcHGOaH}@5K3p}{$DrX+tQKA2^$)H7`NPezDXnnrKWn&uAckc zk4NSFM|U{N$$e@J-ntieOSzi+>z;JWzlA;+R9FSAs1T9h#DAq+?O=wkP%)ZY5L&bB9JO1CTD zQ&}Zb?&LzslHIl+B~Dq3!3rU)bL3d}D4!@<=!~5|KmdT0*-l{N=U+TMzLQ)nO0TxR zFDz?J_By?B;@91K6sw+k|A}@Zq=yC%TZF`e4?2fPMg{-L%KAR+W1_ccup^A4Vq(mC zQz4_o7f&UsvLxsSM4XOugkf$x^QC7=RLy~ZxJrAv?sJ;ffH8&!!!SQ)!Rf&5#=P_2 z@A?zI&Ri@RwhsL@W8cP@yMm#sSudl7<_EMKwNBP({ZhMkcmy&AH`GI1+HN_?bZncTC3TWOA11Tb6BagTG#m1r4?_AB73<3{CktJ9NU=;y z3QoYv;VmuasdaWI;f|#iDOVxLGeyyXS&}#^J)pgz7>L~vox%nmk!1C!l$av07dl5oPV)5nGW_H6Nw!cEshqWHOzYU5;?OfG!!HvyjC&;R;B$3DF0pYaQ;0FxEzIGN_3(VF-hf z33r$bo0&Eex`9TsJGH8ZdXn~&Y~!KU+~e}u2!QmNpsqg6ORj>-pq?f-QeOO|f0fm4 z+yNi>-kU=g-v7Ri83%#h-XhYDf|EbCqX}Vv8GeA

<<7j?&;ecVp^`Co=M2pKZL` zrc>A?33HxWG8F!A9rxd7T;yBtNA<{rzy3_#p9rQ*Chrel#?jimi#NXsTxe6B1h~rK z43TMaeLtaWZ+=c0)X8NrFjJuqBRXECbq4zBX~jnSLG`3g_u;OHHY$~anrHyc38kjZ zFgd4brS5vriFi^XN3PI2!3T-b*F+~5h9ZZ^?YbLugDo^t4X+!d8K!lD@KA&7%V?3Fu*xL2r<9X4{h35kt z9EYg|pD{EYTBDZ3@tTX-Qd2Gn0owmt-Y;-N7$F)Ar;`@DzdQ<_LN^E|vfvu{Fp4_rYCj5|!EtdB()C9+ILNZj&X&F2wODtb zjYV=XTbHlcOKa@x0jZ4frkCv3rdLtXKUo!l6FuL39yikA&p*u;ePOOU-^VeQz@1`qe>v72MvA4=c}#}tLs{KAWd>W(jc`d z?llF0dK$VBR7&9n9@hh*gg9`@#iG7Spg+xOF&Rvf+Z{+lxCMjSk{aNv8QwC;=A7 z4nT3vjYV)+E6h>iJ=tl_=X@8qABhLX#66W({fx{=_gw$at>XASg-MNMB23r; zlkHLd`hZ>7hlFHn_t=Q*m_Ce8?&r0Q_UVs_f#tnltN6(**l(vpOK8(Ci=3TNR5uKe z&K?1w^Sz~1I2M5TSiobjZnB{o*st?%j*Bp)apM&N?}C^Xd!@Ux7#mcO3!QarbxVDm zg%3TvS)8%l0P=FO^OauTN~D3$dIn-tBz~U~1J-i(Sw#it+3yfen)h@Q`kbIrknz(O zaC*XnYd(KZU3+ZHWt!;>;}J-i8O1ylbi#TYa4`riwpL4Z<>4zzRVX7ShaNeo^dodo z^xXWfTDy?lBypZj$3lcHB0E^Qew|H>Ve>GIq)AcIg+ z1~9WAV~bGW5+2~Fe&T6ycfDVKbanZ_cMGQMw50xjMJGXw+%f4r&mxw?(g1jI406{FCLo zw!m4;4v>bnj5Z=YjyS=E5pC=7@{kLL6#M7|%ogL|XhH7Ne3LOp`S>_Cn?$X|WCa;- zP3`5=IGkcUp-_&>W+tpb|G9;rN$CfzuO^Ex#$7>`6m2kIcWH)_K+Ftn;B&jyacVvq z(No*l5=Ja;#AYrPv8YK{`7&7C{17AbMs}cLhhbm%!eYhw3J;Qp}q@f`W5&RmuXi z6PJQrKLLBP^>@}uXuLh`UlzJrU&#gI24TB>Q2Bl#gG_*0n%{mps3Djp;~JW17Nu{C z4M>QtNrpB#jE5US`cPseBqke{;1^YW3WVe|mI9XauwX#6JCDp5^v z{B6}ZlT4cXynS^Psq2?;xD7MFRLVIbd*a(_k9?md%90i(J{2D1@K;9X?e>OKvT`Z0 zCUSD?x}Hd*w{UNA2%g}DyG>96^TW!_Ge)#zC>PC%eQ9MwBNb5T%`4wostM^Sqp>5Z zD#j=Y)mlnj@pXE;a?6u0NXIsXfR3sRfGM3Qd5c-J7in~hC;XjzK(2L*?jcJbJ@}9; zj3p1GZXCNCXM3fbgQ;PVQ57YVmw6QLGd@`ilH|uQC`7c#*-YRbOf_@j0g(cXV@x8QuY|!taN6eJFvlh$v;4kN?Xir5EUOI;AsE~%&791Cai?GGS{u9VPP!v74H@75juzbzdRwf&Lik@O(;#T*DCe*vZtS{v*R{nT>=o&-hXXnXc zrQ6wx@9gy1J|rfmdN_;za6_RWB_-IVVl7;LPoM50xT2vPHIQY`D<^wENDzR*WF>nQ z2gktWpxT4~DlmofR5nr5p%^f=u)+Pw0Z-E3{b2QuZ+ihJn6`%Vr?$rasic(8ttO8% z*1t}$$BPc$Id(SkbIUbpQKZN?__L}c<9#GR+asqRO{fT)od8QRPy#|*cy>9$<w$-w_Q?VYp~LY;M|&$7anU4?D`g|3QA=wQEGY7GwLq5mD;S)HAyH&V+Y zUQXg*FIk!!^f_xP<|)rWfco-t(>(x?VAPppZrDAOq7L;hOd*+RPr&jQmXd1;IFhH+ z!3MXIgF-L(5{2)kf@z}uzC(=&nAX2en#tm%(0HBC8A+#6#gw$5c8reh+IxAYK zdI*7JEQM8xI9q{7J&ARL9_~Nb_Bl_o?fa(7>DI0iEcLj!6xh~?9VUp7yHMeqH$8@n zKOGo(ebUWjL(pr}g|&V|W~(W@S>U$-i-h^tI5^(=0+JG@LZ0*h8mxRsMa!&9b=CF8 zZ7rxe*8g>|PmFDEdM6`gz z3D>l=-8kvwCL;dZHW9&{JQ}=8n=ofkMFq|U4YXNgqO{4T&H%;^10mamXcAOwFMy|@ zYmV%3&--9>5lvCgMvtaC%FDwkgC-O7Ff5T~9cB_aW_N4LJjLLsr^Wm;BW^jhANa)e zfcbg2;|p<-6wn*i3JVA>;n}qGSl@u!wSgs4Nx#)UItmY(w9{xOf{YBQ>DOg=@%)!M zM>M;Rj!e49(=uL5?>zjEWn&om_LUwr#`G1B+IV4d(t1-`+Vr-Dx~nt5|yF>(+tk7#V%glgRk`(IQ+Zxl~3u$uT4QWw*Kr>gm+hHa) z;ToxnADdCLNuDMXlL(~u;oUavNsS$lvNXr;_aklAEb*Ao~liMtBJ2~A}pkQX1MQbAKZ#u0^iQ4SaaB@!ByjGnI%xi z2J|wN+Fi8U(9Ix^S@|w|L|Rg0Mj4V&hEDLprg!9<}^DHHW!c2tE8j7i{>gV=6>2kQmQ*Zsh0>J+0fD;hFEx1r|H z*abya4!@Qa*O=V~niPXblU^yG-7UIeMJjj7b=Oq>ugt|x*Rd@fBL&t@?=4qwTS#{O z9&>x3i^tC=(_YSsF90{>l4MoRwT1HCHb++c<#;QqyeLEc=+J=L$eVU*U8jAegWa4_ ziAlek#l?4`h`|_@h|ysPSZz6-Okk0~ewh(XgnSA6zc;irMVTo%jw#(eH^rOA9(v8=T)GNvjs^!%7k`l4oU2b?elIu#C z*Eg>tVIVqb-S8-HbbR3svdT_qTOnFeL`Z9{V}Rem4X+74*x=lvY=0=h8USr*cRP*U zg9?{yC_KGn*Q>WA;(Rt5ZsJd92T)}?`YT^>?$!Ibn33RcO%crTjs5zr?P&$s!wn<* zh$g`OSd>%3sOWd<_{sMh?$GSr%Yy!sAaR_$vm}*+9)+k4SOin5CgN+A@x~ zr|4XXk&up4rhz2c~7h*g53754&<0!p?j zZ9dXw)Rv)%@a*BL|Hz*IW6Nv7y|M#uX{>Q&>i}ST9%h&jjof<*y8&G#WZRG*hN(|0 zA0EqLWkPsm9f!1K9!$HyJel2f;CgSJ{g%Lzxcd!3(eYsbH-1A8nbn~riZEekr z>OEg+5#DFqou3cgnlVm|AX+mI*Jydc!Gn8J*0gG*UZ;&QbSrAN6eo9GS^*zy)p6@T zpF5+jA3LSf^HcJN#+q)dH!ZSKN_ptNu>8(}InnzP*Lv;0`tz+BHy z&7J=~%DeMJj$`}Hs{C1Zk3Bqb%jD#DXYT)UX2ox}AN`O2+Uh+it4*&qvfYz*1>GpP zq0=O#H0UI|wAXItfZmfB+sZ!_E~UaXJKjW`r`$vwFef-c5oMM)mpPe^>52#C*w6yALA@8q8 z758FjlvL>En6297pqnly(e+VmKtc{MVm&_|Avs30_lS-*>Vv^X&dx9FM?F-XR-ogZ z;#8aOGV}L-OEItlW{4y&BeKSfWYOSrvyrnuR48xaRIDusmef(Mh7;N)vC*!v{I(ij>7HF=W#!S>*yz->>dxcGV&qbpg=4<_9SfzDF?kTv4BQ)~ z_!`*lS6M?xdLL!C%o4D$o~;wA!)x|O z!QBAq_&K6V+w&rXVAchS$>)WLe>>-)_wal#3$@~6vc*hDXfI<|l%S=LNtitP{eN-e z%<1m%)jW96+SpKY7O&)~AUQKA<$(xiA(q-m{*s86Re!so91l4qZ%yk&YT9ca>nNE_QVLf>prCWZ-F;`uy{X33`F7lFv zqv*rTcei&@BuS=NgF`8AHTu)>X)0o3(=z<-vh8pI?l%e)UM`tg2tcpB& z>-XF6BB|3f^`7LN1dmqo8=IN=5b8k#&BoP^g9Yve80PEk=a(@yR7G>8Xsj`idTwAU zwNG>2QYV3CpWSjp5>cR*r-`>}L4N&SAHjNJ z2qv%=<7RFyT`WU8fiQi>$POsd=b~v5{0hZO1AL<$+zn|k9Sx;jyP*|#*G|wvvI5$e z?+0?~N`~8u*6_sXqw}tk-sL+u6ys6HKVk+5e|Pl=ktcaJ|GE=g6HpwuxB^U+9^6Zl zUUAtY@N%r@)xl$G>_7T~a}}m{Ei8_Fbs@3w+})?_z_N;Dv!LEbzdGhVY>}AnFkz7G zw!KvKleZKWmS1lm(T`xvX`V&)oXw@N`fwGNm(L>hjIHPBaLey-#M}Z z3f3FKTrOJV2Ka3*F5>n#v z0+`IEfIVCqeY&%h7vk_0;z?Ki4t6F*e?C>{qpjG#)k0DVaSH6l+Bd<;Mt4#-5%_A~ zC-h`?ubt}wpWy3YLB{`D*iJf}oxaL;%kMou8g{(sN?~fNYFuk>lcKizj(lZh-Mr_I18@zAO%7f1=_vkom`E z2?<*}6-&!r&ap!^Eizs9s#T|73WHxPn}xyM$VJRN;jJO?eCi~epLjkiQNfa z(TD$(8_ecdCPQ{vm@5A?GoAtjm}fyfVd>^0e`&A0m#jL_Iell|bIk>qDRx)(*d0k; z*K5L9Vtf{oiKVy=Y(@SnhB{W&N8T z*1^#UA#6<|Ef$(y{_N-`kLq5uQ9Z~I)}tmqic{u80a@5wG{U*LyC1MO>89# zYBKaC=gou|_BxR2=oBGi8~*-3rq9h7qcg@bbV%obhQ?|abFc!|2!Dj&__Z3Z$O;1- zO%!?MLtw34+cJRh#{RtzzcseY*N{-B+$aELP?9u}R@_ul&%Sx~9_Fz5!-f<*;6E;dC_v zY2yZPW{W?kac807p#H~|wAa&R-=9fF3%G&2D6v|%QYL6*^lYT7+V(iHEKtwp5T%`L z5-?18Rq8@m0wZrcPtMaAud8lf!h~Q$^2$86BkmnoihT{e;R(lCBvFa>ZaW=fEygKP zKQBHEXVCKFetv_~BoHX0mPp7DnaAK&+{XUq-t0QNAW7a1D^VmY6amW5j}V#X%PH2F zarBGB)HWBbq{JNI(3IO{qRpB3^G>D2h%!-!3tl2AYeZ6-!wgtuzs{B$K2^_N|CY5~ z(?D0IoJ@oaRCmcEhh|(l;G8N+=$aiOW&=9-M^dr*Q)FRFddh?^OIebytc6ZF(ZDzU z`qzKMOd(SBa}wr=fV`uF)`@klePQb06|9qxnB2N^PR(Iu4~=Xf>DI~go92<@{Nh}f zy4#hA+k^fA^{0m6HN-c^{fHw@VU23>m0BffSEkGkyeWi*Nh(E?*sT`Hciyoe1^n=q z+g*F;Sza7+T_~`s27gc5x!)qRr+W4U9Lnqd*j-dqQiYj|BI+d>*vmT-ad9?~FY6`M zpQ;a=5L-#TMq=_a9+`1qomRY*<4hXu)-DRzbw+^Oou~V`b{Nv<4&VfEO%`fK>E!{-&azDYjh zTzU-@!t>`nH|FO29nJzb#B?HJjW1TfKIVl{w1baus({D;^}SC;xj4Bbh?#>_eBwEK zgw_sCn#3p4tS>P$2P~LtSTIu@eL1$e7vbP@OE*E*4S~|`>0KJgJeO&2lsu<0gd?sC zElS5~&rA=mk4n`ZUWulrDm2`yC#Qbz-lwCZizG)M$g?lSoLe;ma{eo`Y9l#>lyQFDIrimDcbRD2m&#v}-G=6)Ps#8Ewtt|o^z zZw=lWwDY~SH0D2yTq7# z%+|dOySmNicx{!CAYFYCH*Y*JV&#OV4L6*s;dj==X_7KMMwNugNFN~y2hmux<;Bla zMCEhI6=Lm641(-QTpah+I0kS$m{)Lcns zxy{}~HBKm|MIDgoE;4c&*k~cSV#SJYR+PQ#2c>}=g|^NNaV)jtlYK=w-1uNadeIHg zb3dp6H_ArHCPqXz1Gsfoh)SK61dg1`P&oAIax1o-6N9tML0#|6;v0i~Mu#3P`yrM6 zU&zBE@?I_!T*6+nuB(hUOteJ6OPg{BLYY)BQm4Jlm54heE)EEm168bB4B0IfA%5oWC+ox+5H zGCkXGV^aKtm?Y(A&Kom2+O{H0IQWu)xv_D6cxnxQ-oLh`9UgHm6~I(V@8!9=w0M7= zxOgX2_fUG*k^p7e0dXfx%S^=dIz{^6bc;fYYFKi{huIqW@lG}TBZ}Tah@Shl*n~;) znzuK2u-$K1iAHUgvae&vN7wJ&UQ7_T| z8pgjaHY-7qij-efzAmz2b}oXWDx4iEk(dJeKt-|uY3wtNnLfY1PG37wgNMu5dd4IU zleYCWP{%=l{liRetb-Yt9E(hFKd+A|x?E|Un55-69(SH1CU0JU_+pXNPrpuhyxoqT zHTRGA{BzLx?y;D8zI_Rzj)-CHl7L6p)&kRR7l+UF89EB{9*6rmD%|czc(CG-XZldh zpYguLf8qW)+S-;&-}Q|eBqQqK#vQM?@WD=&M zl)qoJ`4(}k`~3oA@1-{1Knvm4N*rlphWW~F{`EL6&f^>r+6g5`6}fA;#OaRr<+}ZY zOdHav`!L@1DE86^@iQ85%h*=*$(Aa)p||y5u3`GdZ9jUD_2Y?e%dOjbGtWw>yZaDv zhb4WvDZNtdqZUJlm=7_~PuC-xB31A1SYS14T7d-xlAVM79xmT%r>j`6&#!b$<1V)< zt{7`Pviq<52X|i4i-a%I3DP{RvpsZ!KCMm_@b(f_HlE{r;;#0!V>lRA#!)@+wY!9< zJ(RChoH8-6N~_qwg~GvB+ez}sB>LfN1kn#!>E&c;>Mx;D{oE7$KBKL$;Jau2wg(aZ zhx41V-(COnuPs}*-d9>E==gj=%EfkEDI_9Hy~wLY$7(k~6~5~b4nahx(XAMqbr;>@ zM}^LvQ8Rs^;HCk6Of<&dSTP?~OhG3~&6!=Fg>b`ju>*_&?{oNmsZbJjaS2!nKa{zq zMF)d(gC!V-RY7=&n!Jd2axbd;cS~C6pa+umxJ3_205K0Mq4*Iwe%KjX*NbE=zyZ;2 z313zP7Tb4tXD@`|Jj2z2N6OThc4dk@9ZsY@l@?fpRf@yO3!)kO%v$#|%Y|DUk;Ss$ zqpz)Cm^~g(U3H`%J9Eupq+Zk>Ydp0KqBzCkr{;?l$F-}##^kr5<&nox>PX*4d^)v{ zUXZtXeqfv!lBf$y>X#4S{XYYiu{hxWn%*(DGD*HC91-&K+PFXL7LGX~qB$b4488O} zR7>Xuf-;?ZKgA%Ab73yx=T6kd0y*=&@T=EQZ>#aqvr3l#dnpAXq<~u`_`J3Zb05Ai zjz0Q1963kQIPM9(5j!Y*z?lnnI$D7OWl9M@3frb&V<68!6GG~Ov0zg~vu8dr&1@=! z|0!1RtdBV}C<$Omdn01pZ<1JEoGyDNOw{<7rgx9@P5jp@`A+*G%l|55p7I_XUXM77 zH8Gj|)N!2d+exuA6>m&y3d!L{B!@@;UPiUb4VBbOy(>I9O79DuosD_7=B0~N#ur{U zM}_{VuX{ulvDm3&tg(A=mE8M8;puA^Y469mcXOze(EOU^<3vSDwa?GwB^^G7^LV~g z?pmy;3zI0>U(OfXdC^<9x~kY*WX~ML{!KQm*N)xQKHzjQI3O&3F7l__q4ty+;Dm*S z5%Q-(Kh(4Dz;E)lKea}zf4|zWSS&_f3uI`F+%c}hv*J>Y;vO%QYgQf4sZU7h;3rRqK5TR4<4CoSQ&-C&Nx?qD<| zKIdMfA@O{ctF&ZePcLw!qu&-Qr z*3@iez1hzCNX38w$5}B3fR=s!tM)Q$hO5a3;RoMy03}V{_3iZA0!v~XWH^=6E7Ldj z-s`In#;(7;`eJ&R_Gx{x_^s%Fd`GhT-uIykL`TF8p2cuz(QfL;=X&L#90pha)pWGF zkUr1E$;p3JchOJ@6@${*y3Ez}TBFoD-|Lgf=a=6bh&hP ze&FG^PJHaBDZd9DZGG|d%KlAA(df*q^&dpBu(JJ4#57Z}QEPC}1`ghtFiiCAu`(48 zW9}eMO0pBsb+*WKe~GdWUt>HL$llbK)q!WKRECnS##76Y;SL>eJ0k1#jL1CvcuDch z4LiFBExLNpz;5EJ5Wg(?%yR0+S6_?AVc(vriH#SqK6NUB0&!CAHhVP%sq)JD+1^(H zqeUAB>h7oV1eUvJdI<|Kt*|=y*fMvNt0A;KW6*M zmqgS?db87C&-a}p$t-u@ zEm-8<+Gk`S$93<%N!*RSsk70-4u0hED`X>fAP$`9bo;pXD7tt{Fk8Z!w8U|aQ~7P# zd(Oa6H-qPO9mMDyy@Ks?kthG3oSvWWqlB`D|n3|E^hV5KU(;2opYZy8#o@ZK?0Vm9}pkk(e=cz(eF*XxSi$c$Z4- z$un+`yW`h2#Y7@NU|8v%>-D+quwB(2_k~`WilVZ1FGTQbL!AvWAJ*6Cxv$Jl>>2oc zcXwcEO>z7|YVNR8+18iY^Zz5~(ltS{zhAD|6I}&HgKfFG4>Z`!KN+DMpFG^Ry^ zlKw}Ib*$oQbFVHM_d@sYO3)tkKZf0QB+;X> z>zu)sagt4!>f}#oyjdQ%QcXPcnXTbrou}!pW7l5&o_eA`ddP2MZVDxLQm?2L8r~c7 z=u$d$Bd!$m2F1^vcYf-j#TRa`7dWbjb8*=9eIP)JLEqE<&J#{qT$MJhF}GvwBF)G& zPWIt#39H@b!OYMXtu+C(rx1D37BUaU_@&u1Wl2Tcv9jOA)5ZLI*1K#w`|I#(&exGH z^=1+EN%|+2d)BOt5V2r?o?9)jkpDcMksBJ%9KVOCdQ7VWv$J`>qEewCjND+Eu{S?c z`A=Sag}dD2sX;+!ow z-B}yCGIC*bGX>=rKuk&6s6W5s4jr^QPPmfUJMhFn_sG(=I2mUl5+>kDhb-ywN>< zC-dCW*!Tey=}Fflq=4urE1?@f;t;G71P|8YKtvArcd5xPJy2$6gtKZpvhkgHaUWKGsqXj+`Eh28%k5o;sPW~In?;@)uY<71)EQztLo>(ev75V7P;{ z*p~4XL@fQ-oG3!X51s-8iMS>7=`0jeu;9MIpd3=}gG3&!m+dPp9<6uL2jetCK9_eS zkm3)~?9Vmh>q}9L+x4Gsl?PhLC2q%1B2sIs?W2mRu*FI~728(%p?qVF(AE)ye&%{b%4joU*|qeoplD1^U%h zIfJ94!c*k$Ez-I{Vt=SxXkb}&QbCz(+fLrQWs8Vd%IQ-{eWqrSA-SU)6OC1Yhe&gk z4N_63@j5i_3D%J zX!QHf>1Wu%r4tL3KF{CwHJmzz%3(YA3~n-P8@{Xqmh69@FBgaH+?pxYKVC?KBj4EB zPTta%M(dk*{zkI>q#_}dVn@>UxJnWjdyI{P5N@5*_SGx}fTNkr*L*%-?1v&N@{8<7 z88=j|>?`}BU=h0IE7lrBJbH&80#=2Mhwiq{{<(uDWnBio1tDfY~1o_ zLPx{q(pqB_xYHxlCWybp}<8-K|5`#1V(A$Ws@%B7*H59^V%=OM_Xs( zPsuX_XvmDloCfWcmI4L#r`LngX;GH0y-9mBv>Z2O#LJh}q&$(g-GD@He{8nXcB>IdFf56>kOKwuj@ab-t;XhDVS{cmrG7lDBZR^c{0f=kkf&)oG}12 zq(*Nc%s#y8dlOj=z0`F?DyfF(f_-0oszqPFD5PEJaywi>kCYXBh#9?uM*3~^eq7%O zU%_ihy*7%gx$02JRQzc$H-4z0vr7K#aD39ZEAB_nBj*d_xYhP!3@;DEK>9b zjNm|G6hN*EXFENgGGms!oegPyILo=dBN)N-Nqyc#j{!$Hg`j^yfTNUklcgiZJMIYh zlTAfftWVUrVBICNwW`apirATpD^3mmI&R^p;U(Z9H8O_o*AEID=h02I{7 zMFPeEB=s`GQ@*@~BS{KcfLHB)dL5E((uZkUbV!dTir-4q}&gw^I*!5v{|DJBfqE)7Le6 zqjj|8%d{%T{`k*5pi+OibNJKZ4Rj`E5W&70%z)#T>)?qj@gex~B&!_6I_bVHfB+_g z(&sGm`@1j*+RwI^;c!+YuO1@~a~XoYxX%)gNdym6j1fnXFwvQN^i>)rx*4RWLrau4 z4w=dn0%>hfo+JriY!GCB$mepfk3A>|eU#_Dl0e9kvG{2yBvipK(V4LS0N4_;8Mwkg zOioq;hy#q3EZMU=fu9;6`EM?ef-$wvgladI@mNPmUq~6L5$Bfp?s#pdlkk? zINA)umk1@O%#8ZB_>qBN0a;FWB0?}QLAOgq1;)J~m-k;(*Ujw4pNqnlsC!WD>zdt%jG*2~G>07DgZnk>%i;HA%gk zSW<7&;~$|Q0GR`N1v4NWG-GyqK!kg%yH;0z=jZf|4% z$f|pWqJXtWWN4hIuQp(lIB}xw`T}TS%bD-STv2@i-kJBu`q4`T>LoAv+r~qlDSqBX!R0 z2d4eS8x_g4q)~1ixY#~t;{c8S<>K2pvK1si12acPl4q1{4zwPo6CQK6aZ<~sa?UYB zLwBNRv;fp51)Rr(bij-|2hDc*pC5U+F!FY1>oKj@ zE%y6Gg@-Ej%l#3%xYj{#)IMNAK_=B5=<9CG0cy1*OraD+iz=j2z^y=i=i;-&ttsMv zb~Hq>Ga)tis#0~Gw>uGMd%H;Eh&YA>ylBk1G=}e0lx+9A%M3pk3@P;odL*Yr;fuWe z0y&JX9<*{i$se2?f)ty_vo5SdbAak;?wNd`K|$n)7ylV^&no-=%OssGk$POaKll0P z^TdCkJ%o{`!s>icpF*bTj8>qW-?PJ{?P*nZ*_>n+Wu&npNTBODr)9f{K!~q}T3?oZ zpHhzXnDh=!0=`lr|FX1H2C1br6y{>zgWGy_!Tuq7)8Rn$>LQ-^L0kKxxC; zy#D>!MHvAbCjb@lDQum>L#EpF}Jpsk}?uFJeYLa#IY8;$$3;2gpFnsh1`5;*upmazLk_ zyYD$?1|(UVS4>pF-J#_evipI@3vcn|aDXgss>VTP0_Js1^5rMcWXra)=xfPR0xC1a zUw$Z>kZ36BcM+g8cHJgLa$=Yvul?ITrlBB1K1~%oPd|$f4rt1?)2k7F*zIKKHb}#pk zsB*%8=2wl??)tUwlcT&FV@?KWUu6fbtRcw-tpqsJhw~p+oih`PbCE0_(~1P-t8Om3 zWRV|Ja{##U5?z9MG;e!I0mj^B?5?+?<)^b{+O{VW1I9vfqSvxj3>zJ0Kdr@^EBo1l zeQ-<1xxfxl_U#GhNFWX|ffePTg&vs{jv6~PeMyMVE8pv*I7D?xLA0R!#j%zEuF1`| z%ir&pecfpi_cBN+Yp`2>a1p47(P^AJcke#4>NFngG-k*!GV;)-ttQ3*dAmyYB{?}E zH;+T+T@9lk4vsf9|XxPCjEP6At`#kI`2ZHxhUJ0~d zUkhBnODcOA6IW0pPo2irePna{;w4&x>gwu_dWlU5`qW`)KI6Im_a8GH<=Suh)W%nx zmCYbguf8_6C+J{&?A6z|6Z6Of)y(ar&{}J@b-4fujCxg_^zBB%EsRBxV?HUpJAnX(pEaOgkvu9-eM*9pVAaECx0g&I@9w)&u zA2cvX;=`wa0vyS|OjhaOyLIc9$?F~B8Dzj!C`n6jcS!v#jnw;&HJPM|S?w%>)G;G!rd|34fFn`*4S<6$^7V1%KBa$MEu^(| z?HAS7aQ{K5;LTfSP?+0PeSQ1wZD&`$Qc+j;ZfkJ(fzJsbeoM#Z{YdCTd`!3IwE0bS z!oG2ZDDFIDU02#}kW>~C_HaiSrzi3We`!sQo04pj=IL^@?{ZZ1_sTw6jlJCXDHNr* z9nh5`Ju)nZU+Fk?n1}AZQQJ08Xh7}**i^;rV?oew&(86XLs_|(g|N~i5$-Hc=c_6- z7@Pi?3iD!NPy|2buNyQ*-2MVY=vvHFfVf=a(x)>M;T6vPMQ8MCmH& zif-jASFQxd-Q!3=o=rD5V7+r$M0~vDY~XW!YW;iOgiNx|m-dBl04kZ{%AzNKjvd>& z9Um9HS$tf~&*yhz#@hfLMiKX6&d(mRjrmkS6{_Y2vO0=n^lekkIJw zFW9K4E;XNfkJhyeMI4gM!m*j%jU}G#iXL7}?`cFwBIG(Z&bPu@EXgJ$Eh;6eGI14y z74xd#o9K4_{Q0CN?<-^@mI8r|S{dOr$956kekBeId|ElE;Pnb5SI|B1(aRDv+aAa+ z)(RKi2F5P&`U8-oZzfS`)@1ZmFsk4=8@&oXMmnfQsa!M+$`C1f^M3SQAWtzNgHpS}VvYzO?$El6bGML`i<7s zRwG$LMpJu`k4>JJkbxA}X|H=(c4m_b$?uU#HZh-a^utMYPstl^5*&NkyB2Me z9(}XS8F~wPlEUG0N4G9zQi87HhB7~6~kqMM;Lmq}KpDuCoP#{MgBD+wdc+qTMl4NBT zxd_SnL{uu6WAf01l)g6QxxdBa7wsQjOOH$MZ}GHuBmHz_So7vp)yfZA8I;CFmYur3 zT?Cxr1D|}8i67ht4M61vJ0RON6H?GeuPv&38ASO+Q&aFAn7xt#V{H_!#Wd}{s%i7R zvn_g6n`=mr3p%y)YP)wU2~APawmtP<%l3so{5WoNyB#u~o$Yp8K7RDqM(VMrD>Vw( z`xwG%mpW>?rS13cV}Hi@V7|`%$^W#VwitTU<&{O!y4p-7a$^IZv8F!zPjy=b z!w2n@faM0<=v#XhKrrnUuwCyO;G|YGRWrI|R472pPElRGw^{QpiW{=Oc6WYvH)^pu z3TPzV_FLPnK!VJ+>V(?jD(H#`ld4erb$))-_z|QrhLoL8a5^y=$#Aqq@AWCP0e$^E zXRckkb(`yDnY(CU{SBUB!dBW?E@87EZNtuSrLO0?tL^6CZ1lZoPOmxQ8l)4k;q@#o z$u4}=70AdXy59ay^43O{2h`PPp`F?Isg2wRro##KX6?lgikmG}uFGp=jM0gZAM9q| zZ3LxZTd8G*Eu#(cvKM>bl_~hq@RE{Q4q_USf9M!f^xrlBg%2rNOrlbo3&yhEYBG0w z&c@!Ci{63u&=$2V z$l#Ab9gIxAJ%qg19N(h>X@r-Bg>l(gWUnXjL2fYB`?Z))M{Vk@lDDl|nr zkXOZ)aEjwhbeZH9#j#I;&4+E;E~xHlgNt})*U0DtW8Y_0pC7iZ&+qs>s5U{m%fddn z+q-Z~mS=$4Vy0z= zu4m?`X&M*WWVYP872hU^l5%UGmF?Vs7|eMvd`il#EUJ%4^=pOjT)7ex`>#B^mVTr7 zc@Ww<&Sg;sh2<*^dd54Kc~ZxKHf3?|wpu$)%SXHFn$C@+sn=)@=EKHlzo3F=^l$73_CWambBk}he zk%t~9khXoIcU|)xx^S;w88W=lhojhM%5+ULo>Oy2sz=^t~Ni| z)X)bde_v>B7F8CJAZr-fk$d)BYF&zpH=p3=7+J37>4LLfT|%D=hjjCqGHjPU2REe9 z367>=NKgEXSoCeJH*@A3O>=( z{=wG|QVQur$a^)jbtRJu(RLsOud6~3Y0I(dyszEA6dm&f^He&?!V^B;^Qi?74Owse zLLBa1VrAk#qsT)vwKhK;gAN37fRUM#nw{eBW*Mu{Unjl9oHoiaU*=!ywf>0tfQ3-4 zGp^X;IEG-2&HnYFaE}PaBwzvojZ>$!ume!tEs4-kl7&W{dV+FyPM@OTLdWawvS#xV3O`dFQ4lXd>le??mb)Y87Pspysj#S5 zHBRY1+E=F}@8W6OpiBM7OmHl#j~N&=`ICw+bvFwYB+AI!qKMyo5*xDD1q{C5JT_!BVF# zr)|xzC*nzRom#92q79!&Ap=wKL{)4A%aDcPh$GZR>B4ZWRjNzPIYuh_TNgQmME zSg3oCKYZn?5jRiRpgFhQ)WY)t8X{SqcI`m~WvJ*Vut2>SiD4Xd^5)N*hm(Mqu#juU zghP;8q33_B*gKkqNi*=k_~bkdd{$i{Xz;o~rAV?J$IX~T{^e(Wk9i%Tor=Uzz|;@p zs8o-u%T#X1-Fe^vqb}l<1A>Y!V;9MN!DU`Lor)g2N&<=MhvlgFdU#ijQ@iVU!N1L|_k7TrD&j9W_MN-7M5QMD|8x*8*vp{ku_aR4|w*N@KHJpGOU zl_JZEhdMh0JX@njHHJZ#fjVV1G8JeAR$th_x5Z?47F2J@tc!FMEl&@isFd`xs<}=* z^1oUiKItJQIY9vc_gY2?K_NQ9eD+ObQjQ_71&Mz&TO1CLMMF8ciiCwoAYvtCO&}C* z$B(;GpD(L~)Ky8)M3JHEL%5&jM*?clrH48=9Rzp%P9I;B(g(9zuE>?VNnc-m2=(;4 z92yFuTeq%42B}NaH8Mq+Vx7rnjq*|;Rd2369~4T}F7%2V2UM$N=V1FzNj3*kmQN3# z$@E`MW((5pCg?L6wq+$Z)=M+qKO~ybRa}rVm1Y{G0YXP89V3i<1qSXAQ2r67aWuei z6Ovv>dS_JP!1g6L&AN#&9rm`${&KR&F8%6cFRwxgM2gT=P26>h7aDzVKRYbSd*Xg;E_Qp{t#K8QAJAL&XmHmM^!&+$*{1*)23P3R;5aTJSLWpwqpOFH zHG5c~!M{0CGnKViiZ`vrKfE;_N-Q@cHaUbJD@lk};24TNwdMJ$bqT~)#z;jKRO3aT zIXRE{o*#~ito)FuQ~lcV)EUjs?U_gjj}*qm#kA>9kX!xZY@4H(crRPskDu4|SIjG{ z_YUJFHUc+h84ZJarT+t_0cnuJ#a|nL9IOJ@y#79G8e23>?v+V{f+mfi!~t{-poeYi1SlQ+t=S z@l}43WUZ=7^~ppZTm8etB@$pcXwCT_9bZ=AT;|tgw`-Zi+3nvATsgT#2ZZnzVyrs# ztpplkIiwN|`D4tCi%TMa*Flp6o!T$onf_F$xXgTU*1$b5_pc=skk)_#$fG=8F;|>f zo9!%$yXM2d+X@v!$V`?rz|6S(fhS7aK`H-*cXc?(E#~ImwmYbT*Pzp zXXiIYk?!9tS+UAL>3Lrt=1MWtL`66T0B$l`&jHVe)LNFA#0~TcO#zl~$LYCWpH6=v z6)WnfbJ0!cS_gw(P^o9PXdMvh)&^-n;)5b^m|tBHqfW_ zZMs{Cj4+J4_*QdhBR^x*0f-~g;;gz`P4*sb`MOcuq(Gi6`8bhADy+KSTAhi6Fa8mv z>Wr&cMU#hcw>t>6qc`v35t?HO^iz>sBR1cs>HxUMohY_hMP;OyZj`~J?i z7^6(Jq6~l6V*6g(!HNM%mb7qcD}JzX4=kY7ql(xAQ! ztLFjcvm7hNqwIuz+Vo_1iPC*wk>ShHzrMIhdfr1Cu zWQFEh$f~wMQ+b*-?Y#>ZpiMwOL2o0eaPQ((L#GMhCj;&SI==K<7*xb4d>m{>35EiY zfLJG8j{t8Ve+0={3b_S6BeHha;i0&GX^6(E;AQwv3B~6BLRy>DmEp5B{vx6#t1g3H zituC5y2--7y2v`tCaxJPVDslQQo$?Cw(d4a$!o1Qbu0n>{cC=*c|DXd-QWR$`wAxn zZBH0Qpp0DB`3qk8&}d}BWT8Uo(e48WrqW;zcsm_-;AjTb3HsiC07ws>>Wv3UX%*2u z14b7Y-)Oso$jA>eJOU>F%112R#-WG9nm*AT6EB8O;{E((?1Y9H09e-p6i z1h930$Q;c+qCMFd5Z?khIG%+Kyl=u z&V2b$i-Xb5oi2OE9M*+|gCe~l8eF1N?ar)C#2*J=HId6!(p4!K<7GZR`$N#9eLuQ+ zbpMUls5{>3l(t{U=4hz3!GUp-HRs;Yk+K?a(%4h4wV4gTyi>r0rJ|zl=(CYQijxyw z`53M%0@IibTxEs7A?wL+p$<8^E8 z@U=5kfzam~o>n+9SeEvm`59s$N*zssg!|I+;RUltJOocGrz4CU!~r9)YH+x=!- zzhwH4*B(zeb8&m>+1>JkAD>=Pq4gu30;WC$TULSQxe?R&O(iIG3$F21gj~Qhlz^-NsB^ZmSzw$lK^?XP=ky;2n&N%-Mo-@zW&6zEM zAGOZ|AKJZU$~G5S<}!VGmc#d-cjK2IeEHL!ltV^5ayQV3bi|{5A`_5@|6G=7H4Oi$ z!UU!~8UEvcUwYUT`S*hS|GXfi`NWb%dgIM7WzZSk+0y!ad6vtRWjbl+$1}Q(KyV|W zAixX6Z8M0Pg$}2-AO=;0vPS)r;Ya-*-beB!II4~zms*Aj0G!o^J^($P2+IYR0aGn| z82@Gb<|uTn>WLG0Kbe6WUC~wcb0SN!7PP3BU{sP|W)-(JqoexMubyoQl_r;u5RF=%$8aKL=c1ppPJiYPb$kQ6xi$=5xYE~ne6v&6C{)7gRwy;?;i+T+8JyaO%n%LSWO{}>7$sKsqK=}G)}G; z#9-G16TQq&m1$#{{G9j0){!?<0?m?4Z~>-RZ}K8)1Cf7&JCkT4LgtO6m#Yiq`@6DS z9$TMZ=ZrUHEf35H2z8Gc_2$ggiFdw_D4T(jg_*=A=NVuOw_nR=scf|9F# z96xh1$WrjpSeH%%A98Vqyz{E5zZ{&1dZ($~4&@#L#A+yt(gpQr+Xtl)*M^l$y!Ddl z(iR3jfTEc^`FT+~`mLr~DnovFk$`+;1aG-q1#MtA!Yk029k4TYw-t@6G16NA|A-(| zrW57A5xu{Bfx2j1sWHD`z-V3G=EsO}g8bb}oF_N>*nsD$G#ocxhVmFLlv$)PPG&o; zDOp)^osnI+j&hU9llB*_B#GMS<;m@P7z{kAz*ju`@*ZFR%w2?3WP!oErx#1o-}1pO z9CWnenj7TPx4AwhFV%jq>#365BoLvD$o9dx)C^*|?qoY_0@PkQO}Tvv@c-mph-6j9 z9^TbOV*G+`{0Bnr5*povSg*ugq!o(gbQLIE;^5G~IsaK%eiw~x;Hsw(_?oC-Y5D|3 zLMz^$TsF4!b$|WnU;dJ-j;~<;`pwJax7C4!g6M_=Aq}_g8jQH_4&^9Z9nLL*U_N1tYpsBl`vqPi8NhH1 zd*qUg-mYDAeWYdWVO_pg?H^Vuxv-B~Yfw1IF85Wk(kyTlwzqYhG5x&l?u$R~dcYbL zapy>I;2%fN2cC*bh<|4%t0iulD8F}G{WYtV(fYySk~{t=$_p)xtezULv1fyZ!p6t@ z6sFF&VA$E*JfYcg=Ao1yR>Kk5<}WLmoa^&#Dw?fQwF`UGCH>MapWP8|3~1_(euk&&0-9YRCE4rtvf?Fjd}a^>CeubrWX z0BN+WZOzYL|Lx0P7SPu>{CLYt>x8K-*b7!SMs9{nXxTcfi-Jd37f5F}PT8hA@xKh& z=`W)^^107|qf+n&BF8%L_HB{+9tyHqy!jNp^_*@j$YTOfLUuxU7vTs3mf1bys}}H= zPsg&LPWB^bK+}~VY@Xj?Qk|Jm7^&MP!TqP3d?_B<(j+zs)%@`A{-5?_g7a!%=tjY+T*W+L-m&(2z4S>29TTTwmY}M ze^1fG#KhZId-raB7&jVP$i-KS=3d|^oR3x5Q2xV=^6$r6O*njd?3#DfbN}{eg!iP< z(yJ$$S4z>K)U=97mO#X^&z?^jy(($c%+}dYKTSRM)Ot>udca(gW%%|uwiDohm*@<1&JNm(YY~vAW#8XH zPRKzBAN788FcYUUO4=Vn)S>zW7B-g>LMJGv$ToZbu3fpeYbO8$jIjUaw&jIcT;J$I zCD&K|_Ks)=_$6SumCrYyN%#M6%)Mq&_@lvFy&s(0vp|oV3SXzcOEm88TZ-XltA%#T zlxfpqIX^AQ7XRTRyb!#-|C*Mu$ttYS^hZBt&)&P3xH`eMuZ2EwA=;1$u6Vi8xml4F~=|BCBKJ9WLZ+KGc-(5B0snwL{)eUz7GCS#+EdgVEFmkrK!P9zRepzZZR_Ki@$|2Rnu^_1Mjwb2e zhN$XuW5u4n9=YXmS5o{7CXBk>U~h$ZeC?IJ%Fd8!kl87~voz_|6|zt^DI8!dS%6VC zGG!Cfo1o8rA|t}RgU1eR{!L{zZQ1Bn=D+hJ4cqY4fu7B`_H@tqhfgknPae}oX0yaW zMxX*(gTO@+W5)D-8x4HDMDc+ie|UU=I;GdtjgFg=Cu=oaWL$8c-|3;(J?v4p0AcWs zCCD@kSOYIl<{U6+Sc4v5>zXOUo{st0Y%DyuL(niht|_7ZMrqr1wv7me*RwSfEwcaV zTE?S%oJeF_jZ=C1FDi#bHIZc|fzNhpX!vA}Oh*FDaC__C5d<*~JnkTX&+GjFU`waN z9Dqlq*^7AUS^AI;IRJA82v`FyPqb|ptc_4>!n%_5Kb*+ko2(k~UFxVEFLf8Zl8o+J ze_=ovB{JgjFgwilVp@f9wEN! zzhR5#@)Ps{Nlo8$*=@7Ow&_UHeI+cCsCvdYaN{<~)V59D(yi(fs z=(e?%79&1M-BWyc(*QJxfmyf_)kd_m(0*jEM)?D1ko!tS#n;9t$~BGnMF-7|6mmvB z9%tbdM?NMo42j`CpELKLpB(rgc{Ho;Cjuf8BeTTX&9@V0c0#h!@K<25X70|#SvB(U zX(iVDh=;=;%p`jF&o}?{?~}h>lYg(qw`&3H<-co#>vH^iHMlOuziWg0V*GnG{=FK= zq5eH#xt77dSA*+f{JS=|FUG%DzV`KSN^q<7WY4!4dBy=99{Zjw<@vXcq=zV#rtpgZ5GljVpa}hgYm#A9ubb);Dr> z@2qmL^aMyjhT46QhL}l=UKp;5G1>xaPwb0o8rixl%=;;Q{i04NR%MYk4$5;2zyMjnhfm_KB*B7j0z=)~)h15j2g zKv`1GU?HtQTQ;1$+GjzTLDc1mVd8SHRhH|fxNQr0jrJ=^hLS{Y99=1px17n`l}K~z zyhqM$=K!!7@H_D!$%l<}UPZtGjXGaBb8OUF5p|jHTdO#0EVM;_Dk$83on8AGRX}QG zn5hdASxdQgWQ*_VB7FG(Qp%)-K$i`F-zU89`P4t3Pd%o8j65k%ytzZzPcTg9)3}}( z;|<`Ay-c>|EEW^k^+H|DcQcIZ1nvP({>Hnhyhq+~W^@r*ikE`vAF; z0Vgbs`Kfyh)u2o;?emUJG%Astb-wD=2lGDMGS}D2(xMDck(hlz6(7rgG4eAt(yhKg zV3L%<{^l$!6AP(vUfrSCKP~ZWTQSm~BC}~=9ZVA)G}MR z3J$*}SVHspTTB)hmN==UJx~+5cR)i!kTLwm$YLc_+!Qyy>FI2oMKnM`j3!OMUS+Na zY5&pSifF7(PMhp^jJMaqX2lI{$=xs^10uFF2%pm1rz7P6+33?BZh2LKz3EO8t56D? zMD$T;2X0)yemw@(^M8as39g(I6kQn^z^*qTa(U&tBbMP~F^mkP9B7D-`PNML;I&hC zOV<``AeIz8<7#f`lNPw7 z-$)by4d3b%W}dHHwfhmMAtX#$9ei2YC0sKxrtOdTJXja}1!*K`lP#pL9EYH+)7emj zxp?CK1>s@IAcD5M*}F4Osq#RW+SS;^k|%`O!I0;kXH#@ODGJhPk_@R$-d|POeWf+#3+R2T4&;N{Slx1Cz{$MUe zVxh5R7V3mTAt_R2ens}oMu z$Ly_W!-dE=;-z^&c-6?$JLUk*{^M7Ie?qethL1gY@-cZnsFH=p*W6$LTjFKrdr4)q zQFc#Y9+YtLVpE%w+LSN%GMbi{xXvI&KoXL?xb+r}VNBn?=on)PE=^0c!;M_MX&OiD zZnLgsBfhzV2b{j890=s3EZ^Ubh;7uody*c+2T6O9GI)Nmhl-rU(u2LZEWuzNg53)`%ySH-^9XTV=LlT-7eMr>>aB`Bi_JqARo zpnU!UE$j17_uSpr5M7|P(xRb|@LFy&R*OJaa3W3g6SLps;$Mz>ssrP$9f&<)tZJrdSrfodMViZfqHB| z^+pnwl-?1W%y^?C{RpiGjH6sRt8(TIh)g8m9Huz8Hemorv2-ZIOe86}DCo}(dpLLi z_#JSSF`b9PkuESE^o>Yi)!OsXYvh0o2|2%m&R?MV-c+?kAhi62w-41yPI%ctLS3}@ zFl!b~8FATIQkTwk&So7_u_R#ynur1vPeny7Au2zA6matzTccw-xgVpn8`m5SH`8f zdk+x%D34L~SeUfyJcVk3R6TquPUzw43j ze=YyMFXwiS4blW1U=k%$d8i%Cds(dg6JaDb9=05kWLp6_gZ*TfK}>;!dG6dfaM$$B`?JL> zCJ&+Vxvx{eYhXfR_Nb~}U;B1H(t^AD?(MHl1<=!^`@I(>F!0~?Rn{x>Rt_PNk`~$t z6pII2ga`Q{CaIs-Z$qs1=Ly)i57xy;VMUOPJrV7L>YGys?Yv9Ja=#(-_rytOB?8KU zsDTi$$FVgNRq)QVXrO}^l2MDa$KHeFA0x3RdsdWsoHIk9Mj}R-RAt$YH(k(Ynnlv` zu6l2Eaf6>H9DwZRQ4UbFV@4w46$JiM<#2{nmQZ>aajceJbpf^`Wxq)rRUFWynuu`Q zp?CDKj){9&$w_U%>NFq*8F%*68 zNVY<;Z~lvG&QYj7NZ3W)rinnzq?Q8ULyy_(59a+U>q?rQ*^orTwB@ONrmnRg8bBrU z)(}Q|>Rnd0Z;p}D5HVr!LiqiXTRA-I!a|EM3k%XR-Dgz;ns?9h$wnK&V<>h^a@UW} z0J>M*rc=nWl>fQc@(6kBlEoTn(y>>Iysrb)v=Lv-hLc#~P8gxxy8_ILG?1Vfn8MBYdC|m^YpK+Nhmk0i>tGIb7Xf*dD1~ndN%*5XjS&o zVnfCD!B=^^6HQJ{9$$FhU4$ntT3k~QoXF2lYZj;n1h$TqF>WN@(p5_5DD;9Xr#qH( zw!Kh0>i5F$#Q?2cs*fa~hp8^3uGax+(Y5jAz=|=K9f}zws^H@4TGC)gH=u6wy@Fqy zKTep6JKC~nf~TF5=1lc+KLLF^OI1b99r>#h_+|_AdnPZvr;T=JU7ODn zT%~aq!ZV%39O{dqjL|*Szn7_`mM)DMlf2PD=zKj+pY47!l7zjba84{qsFcl~AoEGV z2EcE3J7Xzdj}oIqQPMJ1Du9l?jJDoBRn?+*XE;q|B@Io;6hsQ}=|?dTj({mH@>ZuC zEMAF5zHdxz*e7Vj**hS8PbF38ev{PlF})L>R|j)1FNJle`xO;Q3~nIn`e}8=^xT^p zba7gXO0D))&LZ{1u$<>U{>mZquWrpCtRmUUw>Oqp-j}7+T;t?_N10@=El7uq=$=8@ z>&>oCT@WOyYRM92FukHTBBJhvhZ~tDKX}k+r%wmS<$CEGVK;H7j%ccb@HlSfn#CkE z|GM_Q8uEDW%qHaIfpEjf&RJwR0v>pF+z@UM=X#L+%MJQfCqLY??~8pTicQqv#nXaN zP&CLJylIHA&d=WheNclA`=slG^UD!1)og0027;o~dfQS_oqCB<2*(P_!!hCErZiw* z`2nilN%cOZpN6NDGjTT0nOLx0+T}sx;;Q_a@#E4PHv{JO3(TDKYS*fOu&LAasnXD5 zp!NN80(SpjKRcQ;vyopmAMk&_ly4e8iQ^E9)B6P<*PVGBejf-*Bt-e;y5?hWlY(D= zAbx0&J_KS7TH>iA5z}Lp#?nN%q1z-qWqxAi`B{u77>EV={uiAyWRY=40=Qa-6Wg_R zfxW4!y)APv?tq{=;vI2o^*C!)=(_vK!&7KFq;kE>*BddQPacTmXEdlmZ$**m3`bX| zLO|+cd!I{jePmVuVsoSHuOTQ7(@`4&Pn{r_kpF4Iv>1_z;t*VG90<^<>HAqgBpt;_ z)ZSV|<3*JGlB!m$cO>#nf_F;1yRbn`Mo$l@#y9WIfbHsCrn&cW|AgC{8cxU5V55@| zT~ynp+i`Jm$x3tP{*N`{sJS=18W~JkqNVYD6Ye~r@K3`MfxyPek2oxaURr|r(f|E@ zbZp%VO7P2pR(9%=7!w;7Vb^g#TdEmyOzeah2+!7xFL>6t_(@EVle&JxVmYtpfG*-; zZeDiYitQ-bw>pUFMw!qZfF|{Gk$dEN_?mA zN8*g0nq8=hyB+qd12x6EL~E?R9pTN#*3o;s748u_b)A>JYOX*XyO-hDeyjo$BGr(i zia-H?Ciyg=z*^V);^KUzO;Wz)O>IbBn?-L#Mt#J!xLgkoflVN6jO%7|wPF`r-F@J-%P%@all zcdw5sCP?+dJNVmUjy(?OI;8da(TEdkdXq>o562~wdKgb}sjo=2rz#;^ZuPuCsPnvJ zi$i-nuubYd-tS1i<&2l+>0!|~q^v)^{jZ_La(ZEN5mA@4{YGxl6Eyv^mhgNR19~%T zxcq3?5s3FDq2Xi1Q5BhN3F^bT-Xu3PEH}wVASSk4_uapDWegM!ql=Pv`1V7LFEG1_ zPGg6hz*~^GS)U$j0}HZ76+@ zz5O)3>*}o5l{}?=32=z0|1wBy3+jL26B3SmZ%pPLHfYwoOtftFm8wUlvZN_$`{E{G zvCG923Iwr__1jC&{xCYG>qX9qhQi1l;cSRr*Pe|VN(;dMr|T`5%oGe#KbT0Y`kZ)d zx~IRT!LGG&cVXchm;dI*peub~~%>J2hhipHm)3OK^(oz@^l)+zasOy#OFF zFa3s&ScklF$m;pjT>0kDdMF4BFX$?%7|5^V9DqL%w~E^M;hK;$9Jnv-hl?<%grDnMfb^ zbDBcOj2%nr?F4(T>mkcSYCcnF-_N10SPQQHVNGL0nThn?QuobI;iRd_tn8F9+!XAx zwOD6+>*2=v@=uQohI4+?dvCLep7!Nop-ha`kZ`18lUNvkAo+|*XU5UY${yPf(z{pn-0@-m^yM?* z&&N54qcNt%$3rr{8!fm?_|WrX)16B??^m8Vy0gZgWKEe8aWzS@zeo_7SRh3@Fw~@| zry&7(Nr}o0k{lj+Yt;9k>z$k@%@h}EAvhcM<7~Lmv8+{MiPh_K5*qB^m0dc_jMybk zT%Unu0LDS#RwQ?DsFFn|Lrd6MfBMS#oF!E@*6x~y$ zei3=B^v93iU_xX~I8g-PV^P&Ff!0b?c*yGy)ON1QoMlW76heNn7;O?^pc)-Ac0$O5 z2P>%S8Coe%S0lg*QA>Fle7Cg*bW?6~cTWTOslRezJNnwr6r+f**Hy%}n~D{GU%O=8 z$E(gy@d3T0!49k8nA4b7h7* z)J?(t5~e|klT(Uq@20_leqC61c@i_0LWDJ^Nix2d^xNihfiCwt_uGx|M}w+edL^9q91O+<2*K6CZa?dSyGPR^>^N4;-m z6hhn)qC1D^`IE>i4HSO6S#gF|(41*KJ5tuYQqOlzYl%E3#{2n&6e$2HJR}DYV&70J z1~vHse-(_~QQdnsLxXbYS1xS$@G)D`J?)l@O9_HnaC7k8VXs9J3MDkOBk9|tY(Jq| zA2Venzp+Jnl%O&UYgF2kKy>@N{{nrs+-Fcv>Oo?ia7}SgH{t{xL{)REdCy}hrzhy| z2Fz!M1JL6&XkKY*M4o_TeMg2Fr{Vd_tQt}dr@1K#5!Y;Qai!0bH4BioDVjH6kx)#e zp_sKN6BmWl%C2>&7TlQSSknKlB3_%jsy=bBR4PnQ6>6RyjpPm@Et>&o*N)Vgr+Og_bY66b; zkyg?3)uuYCbKclzWLl-N*}QR-k&DOk4^3MH*G9G3j=gou2Yvndxxd)4D?{}l>#^8& zL61f*eJ=cB!{1*>GZwq6P8#gwsTL2ZEc&dz@nG!Ix^j408)5S4FK# zMr`0!PA9ilxU~>Bl{(?n5yp7mI0~&>va2j_V!q*Kmw_zJS$D-WGb+2}hpjb?w;$}i zwp@EQo$*x^0pJi}5PeNKq~!QgdUJ=IW&8I{y8m~|)kF!6^pRknX~0Ye#Fy+{8EY1#}Ue|SmniswzeMW z_bD-(=~yq<`m}UB_bfpnimC8h$x{;S@aoT}#b#h3w%+=fiuT84xRK!T$p2b;WkR#r z=_~SbqV^junW>lCBs}Y=Nv}KuPvrLNC$;!OhSm%7bXI`mm=bH&994YDs!Q9%oN0zP zZyvsEznX4K?D_hk7mc_^CRo;6-WVOcoVxzmeIztJ!uFa5T3x5x3pr!0ZZO%-&?~YW z&W|akmo!T5$UoisY!PzOysU~>AKDL?#JwnvsqY?O!slCITCWG&z52#))rwxg7u#~n zx;jYj=)2>;etse5kC^>54t$33sWJ7^yg+Sb`a7HOyq)mt$g1 z7P6hw8PcG`;CwzMU3^a0&EoOgb0LQquq8ntB#jLWjXLJWxt;$l4xN`QO2=tJRidjn z^!exF71ArEBFr@ngU2K^tg2sPTJI*}l&)b(Op_a%z&OrSSo5 z%8YCO4{cu_Q1iOB?Hp&b*<>S1qoIgU5iOc*A|c9DC?OPS(nxA?Y|R6iNm1sZc82D` zR;{HBsSu57q0(e&9@bjl_56No)ylTt_k8d7z5eJpTeW_}^W4vUU-xxgw-L38{QZ}Y z8yA!nFYlc6a@K}d%5Qu5l`#ijgm>o$y33B{WxBe(K9wnDr^wrW^^WR&ALjNb-I&Uc zc2g|V7huY`_iOu2ic1Iku^-2k9S`H?YW%rKcpdnk! z=8E(d-SO17ZfezUzpc`+Wrl>It}|ouWKnr%U{L(0bu=kR+ioZBcwL+h)}U z{h8hJUK_3^-VLH40At0^b~{U!TmSiZy(yQS$!wJ!W2e~l+xobz5xm3yo?YNk_*z9_ z&g?wJsPU(pUydCimf<`ddxSA@@?@0q_seb2;%%5&k?8@BG9t8@(!^Oc_{Aou&nG7cu$?1`~3LMIfI9J z7~c%h^sSCvH_Wp9*B!&&mD$c4H*DM<`{N<84%-8Yag>}=H_WvL}s|n11f|6S#*oos(D()coHrz2mZV`3(D&Zv(Rm;$FQ{5idj4 zB`64-6c>{e^*^0jH%5(mPZJ*mJ12#v#)dVqo7OxE&t4|$chRTL293p7*TK0PtZE`H zI8JWHv)-IDnrt6bJm5$QUV?aFVDJzQjPd6D!P{YoX#K|!@|F&p-kB)T@g&P?C4S|7 z#8pM+#P&)Lmy{D9Z(Em+?8>QHy>LOvnR|CwtH#`Nyma10F7|kjctw{-UoGI`Ns7(2 zwae3}<$Tx_nd8r!I8OP@U2fU&F?GJ-cXl}D|6vmol6C3er+cQhwz2q)b?;Ip6*J0S zcd5Emij`xF7Es%O3>urHSP4M5TA5l=!GR39B{yeE6rXia$=w;4Wqd*IQpepjN#SX2 zOVi&_7c zQC3m9ahX-|gJEaGSFw6x?AWZGV{_MV-^Dw(x%F?9!~Wgz3WFk~eLw@E%z5-X{~R?$ zmk(kDixdGMO-_i}Mg8s(w;bSCx+}X%WrL|)EQej-R8sc4ol*3^lWK$Xy=Pvx?5x_k zF4yo%?Ci+OZ(apGy(hDLRu@GaE4aUHgIqkz8^QUbde>ZSRWhwe7d0+5fW3qgjexH8Bpgy`jx~9&L)verr*Nco1 z#JkhUgCjQm%>wT$CjL9(rOEtkr1<>GsnoV#bTDLYX^wYov8K%x`qP)mg-+TT64aF0 z{MmI%|L^-S5B{jNW@ctmWInRw@MFJ)b+onRFnb&YvxqIb#%5}9F1q!yQR|aT$N91h zonr5+Z|-k{n=cO!bczJ~*ZZuhtttAU*Ttt2#r^hA=f@Kr4?NpaO{VWk2B~?s2)O2D zn58XQY~f-ohc5Zd8tOW}I)>_`+x=ctkmG%QtCQQR^NEx35pJFx?bt>mi)_rcD^Gu& z7%^Ldy{+15#FhJuh$I#2!P8Iw;vzW{)U>eyfU8CZW{(TXp{gu0yVe?KiFF#4E*!23 z(UZ_>v{wu{s;zbHyx|5p&#!Nvsy5B#2D`-A=a$U4t90z`wfgKT&rRk3a%SX-Z`#KJ zFjG-kry^M$#;*quc)2=bXf=>^&q`@<{oG z!EL2XMp>|`LqNt$-8a%k|IDe#N(S&XGK2(&qS+}M{`Q`gJRXe5kD1?RD zYbjY6MJ{aG8f7Ktu*2oytgY)p&MOr%9$3nS`iQ5I>OQZ-+6H~dZja&@F$1;I>x{XT z{8DDnjVPZK{qbfq$8&7-dp3~{1M$KjZXtvYMm&Sa&=OmOe zgHLBy1*$2mPCB(7p;YF2rryE?2mf^y-qug9eXM(0G=u_3)xHhTDffw-AHe?71&P5|r)k z{2_)IF~H)HE(Cfaqsag2M@rNBQSsx3AKMvcRO6-)(B=`EJp^FPaU?78qjJ=0mKIK|e@RX<~iT&VxAe%31Z<( zZ#rtLR;?BN=0Srz8&RfqlR!X5XkhkU?Wo5glD?|6rQ^g)IJOl@1`u_!qBR!FZ3f%z z>e~l4X0gsm-DisKXq^1lmkXP#PkB_TNU#Tydr*>?He1YR^$HMpNRWn9p=UZt=cu+QC{HA6Wvo}nj z@ui+;MxEP_1E#K&YkJi&pMAwI;pglS8+NFKmnPYBs*WK$nydU7$-|f2N?5$arGfX& z@spI_or@;NPG6nl#?P~jOkGaJ1<5e^ki{zqn{PDz3ss0mM$08n46KbY zQE6pY4e_la)g0VaJk!uDZDvxf-NR#7zGYa{yw&&jEvh(ft2}*IV3zy>NqJ^RE~Aad z#9ld2muBM~iU)qqz2MOXbE6pu?Mf1c1_oCg7VBhw>WJ>>nidm#OBo4I(BdU6D?1E} zA7lyKMurq5cpAS>d>I^iPd3`4a_s2Q1x|Tt+0&otpWoXLa6Gp2rIHO|b?0{|&3xuE zoRYE*Ns}ttVm+?OQ-S*NosC9vv!l=LRx-ja*7YdOJEy*D8N$P!?4*1Zj?;~_n(MX^ z@3-RN6h5-H#^SiX;FlIz!?h=OKQhG8oLz;ENkvUmaLmvS*7W8Kjp1XbvMSUBZ|?*K zyEGU}thylQ z<+!EmcC}O77k>Xvn5ac%o3P}YHvHI=<2LZ1v#g#+{d?MGEH8{dCizyzi{k7PH$1M% zu2{RNDJ&~BaoT3X3Si0z%x0DQ56;^izP$9loA`M#4IVovk8mb+Gc8plqsYZw?zgs^Ws}qyweQ$`{CKOI`-z*uAZB z(qQ<=7eD%h)YNARs*=Nr@*w0-n!vp^_hb7fte43WhSQxHzLRHn%eXbv{$Y5&DXgMO z-*2o{jcz^m)EncQPsFNw;FtdX*pLd}xZtu)+e#HukMC&gsu=F9*gbDm)GR7ujaZ7; zu!$2b?-56(vu?WD4e#;X_efc8Id<%-iH`f-6;>Xd6X`sd+~?OXZ>Y0V9W+sfYpuz9 z&bk|`<&O#xBd`_?8NaEUfMWkr<&`Ghj!1NM)iDNX`Odb?S%^k z+|qA!H&p;|UGzI#2wBqr2V|?KnOU*)Eb3mjDaA?JNE?^k;5Pgc?N;z5LMnW%c&+l! zW4_C%%*XV0IHN3H%4h1YW=HR{mTD(fO4?_(b6uC@zPZwDA6gOBq|koo%TH$PQwLPH zw(&Huo~JYiP}e{`j|&^LNNCSO>qYk?r>DJfrLYuP5i(ywi&vO$kRNGTHzYh2`N>Bq zKkd4Ywp$WnW#$@Pw&%(?Glq;79!P8YmL_k){f)bl^(C4sB5mal%uvvtADhv~M15?x zlt&Wq5ugZfC7PRE)aYhWS#?)fOQk6EKCn&s5?44<*(|3ba(h0?Htn{&EitcNSs*Dg zsa!~H*#*eXk{y*U37@#|e8fo{si^zw$Qv(=wa)CsLm8#}PUVPO%G22=1QNOLa&M3Q@TW}!1`<%~v zo3XQ%)#SHmeU$OLxRAN4N!AE3=n=B(_Q-{noSG+5oZL1ly6T~p(q^NIgB^Mb>H6im zAKz1{LyldvNAoDFH@t!rPh{l#sqq5zd%V1uI_WhC#cp=;$8eSLLj`{rIyyQYs^3y6 z6SIN(uC&dFSJmcLDOZ$E&XY*&NH|?k+_n!P^cEs>mSX4}iQ<%x9We)*qRX_xU;cUN z;{Fg+ifor2ku4XH@im-NGcH6-N-iDdC%-MJ**3m&#^w-U0};cjEAyJlrd zrRVE!@_GA88YR7fcm4C)cPUlls$Y-YQL?mn8RvOB<7;_o%%^*bH>>y3k8seSo5p+B zEXpLAGC_-Ts#jJfZBdZ~1V?=TFk07p<_#(_P#=L*a)XRt%#dBdS#D)+-md!Y%-g4x z&V1s752ISXX#bnyrH)mD?SZv@*C#AE%Zr>AoZ!hAIOU@<$7 zD5f5dlLcyV`i3VU-kWT)Xthnb^6~b&(zy+ruJ^=TNk;6rYF+Ou$I&}h)^Xyc`0*h6 zJZWugxbs%MIaa}x!6^oi^V!3XEq3+rVCS7y<&e^@nkewHj!{4IaHct~I}-_EPL(u{ z=1J?v`?)Y*N8fcWcYy!f(|1mXg12SWiq|N&l*t1H9Q)av6e!_57qigTH7+9fKuEgVIToJ=t zR$DxrzbiP%z46_-eUrPiaeDdJ#H8zQmb&-yz@%FUm8y8miFqNMZ|q!YU>oL9EGH3bCRNWUVp z#ON}osSW>i>U_h}>UpR(|A~rwzKl(V5tZhW^k)@|VZpX6X7}SjM$?}03Q!GbrZ@Gv zPRTVEKOza7XTWXOpthNQe2_c0uwu>K6g%{yp8VL_n+iIu19%53)ce6#2{vGLizf$5S` z5arvFr#Nov_>2ntFIRYk=M}_9_JbASNV-! zM$gw6G-$*|Q@N&`ERxz#z+PPY;8M;zB%2{l3saKvU;h#tI=yA%$CR*#i{0Mc zK(H9>yBIf;4Q!nXglR@lRzT2O;of7nKGVC13gSz7NA_1gdhzeCT;4YB%T$ynIAy9| z#m<`IG;%y>hVasMyD7}*L5WM>1bUOwFD$a|r%Mi> z)=M|!)I_ui3V7ldxMpZpsvG~CMuJk-#O`Ed5;OD&6qkKu3$EPzL>ojIQUg8JOY zb{OnMQunjE5@26$DO{zLe7D083vo=W(i)`3`Dzkc@e4}E$K3*UyeH!QY*FAd6)oRn zeH0KYv_!qS%R^{@iF+dK(9XNlsL%a61Id8Gy&y!km?(zaCw!ink;TJ|#=F0-%q@y< zirt=nzf7_1-dvRA1`87_m710q7-+uOBt7ewjR9E&Yp91lVv?Q3!}>qTC~4@lR1&d~1vFd1qJDLz_VqOIZmX}hu;8J}9ot4P>YAFaMUnjt3u{JLMX zLVDhq>bPrGtu36unM+~l8bG;YipT866mY1UPU;{#X2k=w=vw8exfM85P>)DqM)f$yA?J# zQ{Yijejj*5EoE}JxQ)dd=pUedJW|w`_jD!Y6&eOFl2B{54<8g_1GGP1L&8iE0hL7m z?O%FN_f=$hyTXDL{s-tM3q?D1XNyMWO8RNFy5A*cA9=`XF%4xbpF3<%Y4?6 z<_r1{792fem+_yqLOE zbC$nFEiJiNdTAX=xjr752HFa-(E!A4jVX*RWm46OZ}Y$P{MnCfQh5v=Ot3!tKAJ_) z%MD}v>bnXCqojCk%W<-A?ld6Xj)H}^w+pQ zIVnBSNS;nx{=s`?X;7(=#;KXkNu=9Ba)^S(x2xY3 zY!91Lu9*JMMVt*+t}MW$%ktlG1e``%6_};rk3cjdfQ0|s4bBBiQWJo0H*Gzre{PMO zL-mB3Wu@VOk3=Yy*oOfbKpv)X3fKDb9`n~JIfJA#?^h_&WxbAD#REMy!@_%2YAu2O zPa}WIuByeo;m5Fivy$~$slC-sb7PZ~yBBt>w^kBvzeO*Evv3AU`^j9-AMa=VD?uRe zOaiN;%Pn!t-N=Zw@zZ^_pQ-tBU)+-wYwJqlP0RfJ{m*IM0q72_pFF{2(J~cXxH`Y@ z99kmQj5I{PyJCYRS=m=f;@PNmP1dLhft-mVrc9Aqlfi$#CPfn>xAopptf!B;LBU(8 zghsEwKS?Jv!kecMZL$z`I}f3(n^)2kv+K{>2G4H#M(U=be0!4^v6WQI#jnTQNb3P! zFa$k2i=`;xOofjKeN5T)Ecx+s2~`5NlJpBl#}+R!nCz-5}6X1D;? zedWjPgMGemZZabun`SyhXm6=P%e!sth7yO9D5W6DR17i%CrMxI2xyw)K{M z^*pNIEzKFzeD&pa$;Gxsi0+W_xo1>oFQv@-lO$E+$CjXa>%(y_AsjHdDD?lAbO*wO*K2H&!BgJT=9vu_0ezo z5lZWuK0tH3kgJUz%`<&SuE?MMB0y6XekyZ(+RT{qpjc8rGxkn3+zCQa@#~VIj=H+K zytvHPW-2N#x<{%IVMVPOF<`QdllD{eGjF%gm>Q~ZNUz)^Ja0df#BvIv`XzTA05_aQU$(4{W33{O z3x5t@0IO);bu^%k6;MKEHNmIMY{C=G3nDbKpK@V5xD58h~5g z?4va~dgch~@=&F!Mre_Z`568>mgUs>5)J3t)>y08QTY zNSS?-_G`)C$$s!7gl!_jGKNOimQHktC=tVm|J^qgBF2=!i+Q|~INs(>a*duU4j-DM zFu{k=-zZucrrNVA`HhvVD@*)X`#dOx{~=;w0qLT8Ead%^N;zkT^V4GOSLB8<1Bh?F zimD=SqU1pHSL1h_8#S5t7vgksU1Bt~o=4y|0#32tj>MC^{?h7v&$7bNe%2F7ga&jo zA58?D8p)$8M(s|~KjTC{a*o-DqfHtW#cE*JXGEjjh zX7U<4p+=L(@KUZKKa1&4j39`M#Jzj>tlQ@Jt_L1N8j>6))|WhV^NtO$S`lzLE37KH z#a1)N$6!WLY*KX~HFxIZv^A?UsNTb6%-~!d_BO{3t!+ZF5Ro?4kCB>1)hRjy85>HM zu&)DpAP`3Dos`Yg3ZvR>B*o}VUBkN7EGYVYrSLo%#L_y#xH+2Y^ux9M;315;7sZiL zqb3B(cJ`o6Qc>ag_0ORk=oNw?($)L?{QOuvbe{;kHVsHvNfWuSm_SY_=tBtams({R zXffvCB@VFB#G*4qQsOs~u<`0T76hFlX2(}D+hbNDe zPH4<7K^JFL>esB?-QjV_BZ58kF7#Lz-ffS-)RbiMRo)xdd(nq?0n&lKP4P<{7aeUIh}c|u01PAGK<0?w29^_R?F ziH8?4>QduZtFhqLMOg9qyP9!#l;oE*Yq=l==3wwu7Jk;}mE!NRNwao1NjiMkRA+af zynd?`;Z3CR8Xfa@QT5Z~fScl&%T5}H)S@e+wWnECH-DSJagz72R8-@)Y8*+DA%{6| ze^<^^OL0)+2e^MqXM6N*U_3_rZiqq$Ha8Q9*A3}|LKw6ZbAL4F&tHbwv#?oG3YA+h2C=Z9sAz0Y zM{^iNJhOcu&SinZE#)`rEc58Q#}muA-dUHcw|*z-AE61CV!xd0%9jQElQFBnrD`_-Cbm{aOqVJURDz_-eFRz|^umSl;y` z*b+U63pbqORF6e34>H0(t|a+?GcV(| z6xwdFva-^_+N4_JMX`l(S|)WZqaMY#aw(%KyjvF3DHXQ1woe~Y>EE`r1#ipep~=K? znp=^m@nwt6Mv|O3WKIVm3s7cq)09hSJt7aAfr=*}%k=sahTJ1^O7)k$`!_T+PH3N5 z#4Gr|nUR+%@Z(woXL9 zCe*MBup4uP!#1qI19gUVHJK%0Co*;$1SeU4=u+bqs^yR&C{UMKr8Rpu3)z7F`<#<} z>*B)Dodr_hXc$>B?@3*4wca|lq=mpoPt@y?n0dJ$RYl#C?#h%i);dMHD%#DPNWu3- zjEuH?A<{9WK3bl-`@gC`%=-xEry36B1%yFp!Gxm`0_!cechVLH2X5Mbs5#@qx77=k zeNn9MGasyb^M|&kWO@b{Q-faO4!oPXw+%m-%E>fQ| zPPl>#ati-ULX1@S3UEQhvInrPwjPBHW4vPbE5GWCmA$(U)vdG&B+d1LckdYcW;$$p zJ!?njAEmbXD^BaxJkHmJpn+<$LkKyA-x(%gK$)MW#Ehf2K@dO!HNBSQPp(}(IZGl@ za4{-OSSu0o+Q)Gn=RH};1?WBk8CVhD;e>Yf;w9ENl@|-?PNui1I?_H*TB@-mUgN#N zw+?ZjXziXY_c!+Kg>56qnYOI~sH`XH2N5SJ5ID3mxFL?&?Tw`zCL=U@R8a@*>aAPTeR#!8UFr)4fmdJgQwSWY;5m_BrO2N|tP&ZpIJlgzI9o}sjErfq$?os@Q z4La>K1&ScR)|H1R{1?N>FfEeqF!q03PN*u}r&GcxJ0{OwE}Mh-cSf#m zBL#z%r|hw{^(U5wWPV&`>&i??;_{S+cNt7VRt;?+X;Ru(yC$W3+zsqbcra$R@@WDK zz8&eDm+H7G=~UW)s5J}a5lL>3fn2l>=V^=8Ci#(bU#`|VWiign%UAcAE0Ze3#9zbK z0?hx5PEXe@DJ>nU%ZKEKa=;`5j^q})sz@m6k@UF$MGI+}vREuK&71rWP8hWH*%NFc zMq>ot7wL406Lq-6bYP{5Ms9VGA9|*zS5!ymSR3MRYlKm%IjkRv2A*kP0Ma4QN$_{M zb4c6CNbV&Muak;WibZNYXmkp%d=LjD#N+SD3?s6pBfJxs&s533E!vA(cj@*g2F?tA zNFN^8lw+en7Wq_*l``*}!brZA9bzqPsc5c>vINhdFvZnUGPNTpi#rCNjU1Z>UG4>= zktUXVWu|}wOZ|CVJ5J_83yK=B9#*V9zmR#YpLP03#mk}EAeLP+dRnfo_%r#SWa_oIG8|CIP(9TdwFD0Zs)iu$Oh>_0>&{G{^|> zDh~}3_kF1Z@N&PKYWMB|wmAw6QW~+&Dd}IF#y%!Q2@*q-rwVQBE{6r(Wu#b=39-$YQqjT0u`dTJ#jxZAHysxYtf=WU{qUr{Bp7sf`^6+C93X=H+M zb$Ysl$43*_vUEKxlefif4OH00`T*l>I8JHg%8rG1-61o z{r=!%Os-rpr8`WwRUi?_Cvg|yi(W~|Uwj()qw`5iXQnw>x%KZKfO_lif75O+t_}6f zrx}4LP4}*Sn`DbnJ&X?Lcu>LcTA|+<$Al{Mgp4YJ3ll7TW(S*EoiQ>*hv48GMX@7K z7=V)pO~D{~ZOkAEP@|b4V5>2+DPkRdrL;fJkWTLCy$gb5aZ zoG`($p|$RH9f8W|M5=VhQ{Q`fysZeXSeIxluAb{|?-WYtme2*vnMhglEM zw6Br&yF>H1PG41b~1fWnc&xSz>YsP3IWMOY0zJXL;=XUy{1d%GhDx@#On%11A2i8# zk!4MD#yI94X2|!#ks8o}Yy*%v4 zSgCuK$en1G;ym)B*rWC~OY7G*PiNghIwV%%92W%yPsmS+P9ai|_r>4Zu;|Aeu)=t^ zB}j}ZKF0}7^lqdmd|%i{D4>~(8#JEu{JE;bu=#6LR=~afhFf1Be75LLxkB1V{Yq3f zb3j8d4i%@4Z1?REQlnTP(jnt3?pspmS}-~c8wuc&R7!yFfiY6FUK{(uY#bImBrXJa zZLdg})14A&OghjNieOMry|_GsDn-g{=u(8B9pb2tpI1}DY3IACAW@4V5oZPGN_2}xK*c+2KZ-<9zD9BR)`9tCuS&ap(6Gv`8jg>b+#D?Fd*&l^SPN3JH z69{!HOFL+RRV<+r5tb*FO`kA84gOiqPxT{;IBYk&c}F47SrbQYF3EJKk(?;O;VB3@AX|eG$NB+J>2PcRHBVX7EK}v`>Kt`T z%0ix&D(n$&u^A1=P7BJ?I7h>J*qlNAHFW(zc7zNa8?C4+6mh-};h8V`yuEn?3g7iP zM}|6vq*d#U&1YzVCP&%|=rWV)_l+^4EO;W{uQrPBfEad29$5mg3#4lc)ReA)F9Cwf zwEmAo2~WTJy~`5*C#(i44qY4&!UnP$)_$okJBhS5_SQ9IQZ_-(*%Nq zOK8@({e8MUlV_p_%$kSKME}r>>qHI;Wr<>SURwd8+>_HE8=n@XE@Q~k6a-L!R?`$9 zq@d_i{)u?97#Z)%4mO2<18hPF8fF%L1IP|xabC)t(@#Hov`Ta!mtiXca-&y{eFIHd)0RquwSp$ldF663M7_|DKf8zr?(syYGT++~c3h{u$M?B2^ zJ*$pMv1o_<$95MG-%wHg-^p~KDj9!hpp_kjx*ejA{0 z6RY8HJRqeu0y^R4n7I&8rWx9ADKy{=5F`7#XJE*r*tkHnL+}7T{aa88g5fV8| zv;VLGLpLQgmGaIfGZMJ59_gz5Z)$=$67maA@o74Bnr}bHUqP^;;-@Bc7;*bd@otEE6%<`W4VsHL|Nz)%u#BH$MDfZKVZF(9|1}Pxq@=?=1pVx#eX1fn( z`QARH1us>&`xfnmOx&LU)F;(6?eLJUdIB-zIYhI!BQ5W&qLqRe~o_W8xh^*in7> zVY|)K&B#9)5C}xOeH>79qbS;flxmPzr)ep|+ap|9Qf+h)rL`apOK*w-1k68OzjyrT zql=wKYcy<%HxG`CjO^?#&Qze{Fof>hOn$K-5Zs$qHA(_kHTOV3IH`?01)RHjMx&m_NlKw{1OvIdV3y9ex z>He0%I)|I1=A$t?r8fGU`u4?}(4Qm$^6>*sm@azI+}t%jbBKmPvF9Co2Zk$IcYULl z8tXJ!qWy%9b?D1|tc3;@neZ)0M*v&Uy!JZy4=fhR1B65?XNTWAj-D4&7m9PO&QVai za6LaJ$mf3SD{46qgC8#D1I#6+dSoI;PnIKFzs6H#_GDjORS=yp{)h}r;lOP!n2b6i zB9M2POy;*@Dtl1h3egFL9t^1Kg^UeTR@LUSy*`@xmZy4_6)r%#68RfNC|^DU+zmRh zq~S>bf93x!;=D)M@SsdFWG-*Q{xp(;_CIHEul|vIk<4m>Yhr?9rGiy=;aQ4J1J~(O zIUj}$sS0I|(9!0#lsFo105FG>43%acY)Fh^nBS$V_eVDveBIk6Z zBC|ojuN25q^L0o(A>U=!OK5NB(rI~pJS}?7TmP${S5;NqwarF5ps6M2r-M@Rmw{cu zlLW_ruMLRu;4~6(1E}tO*qNw=Fp(u{o3AA;s+r$Zx#UYcrebtn1b?xU_#&>+;Tjki zY*KRgz?8uI_XHPgUunWD!WMY2`$gbNwRBCQZiD@5fE6XHShN{$YS{om$xWr%CF-1p zzfi)K1CL(7@*X}V!a4dPxc1@_Dni{YSTf2!fKtk@5N@!;+&H45jUXYni$>Q;Gb- z!#h!X-pu&qiUfez$>QzE~x~Z+0`uXE|e?@?%w=2BFSxQRY zRcmivL|;h;w{M{2R})EF-Yf;3@w(Mc%`4Kot<4&VG?ph>D9pI zjnWawVq`<+RsxNuC3>bpo+u|LN^gA8>|Uq)x2~R~QJo80J$%6YzaHnZ25@l7^)r-~ zLyT}KQq)Py5|yl{eFNeGP5H*Va}ubrYo@MQ1Po*zY>=lYxuMBm^@HWVq5K`f^s~`u)9FjSbt(R+! zl0K09o#=O^CxsGF$Qsky=92*-?2ox8TYPeaYb0a?aZ-~pP=s|#Y=gSeZOOeI)*w^@fZ2pAMA*hvn+v7A%^<*I8 z7!x4N9>5pM);E)l`vJM~zf-!Aj`;3JHMI8Zbpc<|s3oE6TP+qnB6#qc;BkH{rdr^tJLI74`?7tWI+8agGHi1s6; z07%q+Ak9l2nM45Pn**=qnjM(@8V^q` zvfZate;QQw_-pB7_Dam1^nuSNoAC4RKbsvC)sr0c%$@^pO2XRlsqyfcws7IX?kUcL zQ9Huyj;y-JNpm4dhAti51K3W{ra-rqW^b-^-}qNP6tRRF^8+pv;g};O=;xPb9+i-ei1b?`JJ*TsIf9b+hBG*;_UE^9$c&lT?3urWQsXxQvXc^n6oaJWRv&{TMP=1F z7#N8_6sm&oaf7uFI)kpum`vic_CoV^wTM1)qf6n}Z^Z89;cU|JrBHc5?EShP@Lvb} zx?wm%61+mIiHHS;0bIox^O+etdhM@1AkNF=ip?>{0vytf2fULL(>~4O;=7V&+Ik>F zecVD;!=M?D1OrWv0wiTFST~<#0$-=KU;Vrj0_~3>wi`)jG-;vPUt$!+XhVfwZSQp{(zH#asm%HJ`fuKm3{5+38uR;?;XHu6fN>#bYUk>-Wqtd9QcSc ztWH=2oCJ)T#i|^lr&snKZY%Pt8}IsC3EHf}F!|W*0Pl`VbOUwOH?Oa9PMYEu&1u$~ za3e#ohOpj#fTd1{@ZlA@b_FT5&{+|xvCvbF2%?LgD*=ryK!1w8M=@y@dc}p`rC2A> zWF_trf#~{*0TA$(fKw7NU7%>jledPZjU16V^3b8A2LQ`F;W3~c3fT%b-N;h zL!T)Q!=k8O%^x4Vv^36Jr`%+Jk+>t%{{9X7Gw`Ujn`q+`N}Bn%5$3-zo!b_S9m)$N zu4c!6<0c+41!(teA2bWj?{}Rg!lamc_h!9#H~jRFChF<3QgNWNw|1UiWHFK)jEP+r z7ze+0k!t5h%rfwERN<81{j833??5mkfdq{vLa3qn4L_D6_w`0#@WKgXhc3I~@Fb}# zg}MZsM5@T@CXF1Cb(-}Wmk!y{q>A>0#2jK845;rG?X{@B=o-kFYrujOv$FvTW#`47 zMk0Z8;exYDo0p4&Gj+7w=)YDn(;Yq|C5H&wwgMz@Vp}0eyi;ncVj1oq7ZDZ~@jvKE z#x1L*3v6u(8^RDipG$nUW=+rh^~OxW&PP*)sr4db_#o!Vg~50)3&wb@5eD&zcl< zC;kdIKgugoLaW~^^0c*~M7E37L4m)(Ye%`!G&Ypu+-rG})#O(&hd^a!9D z|5gY{S`I858u<-hj~{?-8zuNy`kv!7R3T}px|78-DD*jjTcT0JtZG$oDf^P-#ZzTf zEpwrpiswbkW*wHz9AZuL?oCOF3~3OJQ{9-2A%JX8+9Y8xtg+;wt$+X#K*E*vWBg>N z4|&|V!N|gu_N@Am;+62vpm;?(j1Dg;xbikYHVuswp?)k_-SgOB|=H zH8X@4lon1Xtr6x}!)aYF97ET5e_6*RDV1J(e3;N!CE_p$2?L-b8rvZI2Fb$;t?)88 zeRlzTMw0a@GL&>b8H%)wXAi7Z@OTr@_!p%>tQ)kkEEzn5Ny9xT)yFv_2?}eY>*}q) z{VyqK3ofjO{efqLZ6C2%U5{$Ha7#0JrPjJsY&>jQ`va7RRgsxA1A=CFf{0P2h`5`0 z-MU&EO?}2A-B3nd#L6)PNuNSt?RV({GM)6!=Tj=y317?OJ0@bj1OJVI@NyZ@VQKvF z+KdpwfrnuhWC+p6*5|~>WGU>TIeB+rLYRU2dKmHl>u65l;cK(s z6}oLgCNn%AhCNUcYW{SC{h)uH zQGzSH>_^v#dUx6xOeoGFnwv@a*H@h%LnMiXx|O^p4}zI3iL@l54(ICZt6tRlSgnK; z1~)5m)22;8!gCKY$+Em|)lMWpD+dTf&x=G?Ad#_6kb^Hw%?Ep9@kT(orGRmNM%Ud37Lo!^p^if^+efg4dBI>?Lgv7U@$Q zTYN<@WpcEMN;am>DZrM|IAafrD}uiAAWf6=xqrhAZ!Dh02xUihDzgXOwZBbt{L&%n$5fG$6d3p#VjQVOsbRL1B_*POu zM>CC@;iBvKWdUz?YQufL)!C-*)XbjL*Dmye`}qg;{Ew}6e)bBs0e$VMnuJGTZb8C6 z!sK7{S5mj|Djwef6}Z(4l3cjB0^Pr2QX3Y32}X4qm<_2>IuBnvuUU0pLwUjLLD7+; z`}MG+B*k#udhrJ)Rw_DyJ>tQmJDDeKZQ%;h;!R+EPG&a|Cw-GnV40zRPNpt#tRGD~ zlF)Hlc_CoEQGXTv_tYbyoyVzM@q^T8CT;MRa^&Of?-Bk1=Mu;zKE0ePnVUM=;xPVK zIZM{z8KTN*Pb%&`4uBK&ew+x;;G$FB;pzGxiIG&+{YGSDsmO?g+~dC)?%>cM$jK2t zDyL|gdiVIqU{jS-%&Zp=_Y@U|O;}cU2I_J=zIMzroJyZjEU=S4QDj0fYohhU_~!qM z>znX`USFV70}_X#Q87pQHSe!3p-{$W}@dwhRYD#X_d9yB^%Ol6vVE)(w>J5x=BS z)8J)yI(unQwF2iY?IhRTL$W^zrSCx&5JX7l-<^Wu-i<%@j}sxfkCqX*qH2mAu~5KW zp~@^g9`;N2hI~N}OcIHIhZXOOnRR7`H$3KSY;0WJ-Ip1VDc)m9{|Yiv_%KkFH-^B= z5XKQ^x^D2+BDa$^{(E=tzj(Zh9OOgknzM=6$&MQzzM>!ALb)f#^dCOa?rwC{d#VL9Q~ya@Y`6s zc5Q4sK<@DJnKnpmGvlE8^T_jZ2&5%wGY0tE&)c&RG;_lCK?aV=3DGniJx#v8Nv0h! zOXw_i8Eo{VQ9RVOG&vb=C8DT5hKl+Tn8If|Q-Z%c1Dq*Beq-61CKt|PcdU-m^Pc(n zbp)AT%e7&)+r)I+UATCW^RQ?&1>*Y21jQ|>xxZwGp! z#r}ZmJDt`WW%e19eOG1UZ2j-Lfx@+Ld=>5IY!i84H#uPY{AdY3ji`W8nd|u0PX@hE z60`Wssi`GW!;cY$4cm4@W9Qq%jB(6w)0p1^ql(MQSA9OGWsN}pa*QZnRH zCTy6f07bl(Jt_{^^x~CH;MVH`TH<1`s1ABm<^?|FdOj$NcvqwJq6X?VIagg~8$CR( z^Ox#I%=2$t4zaXa3o3m@X2Tn#oq*7`_<5ApDpC z1*!FoSUP-_a)#V-Zm!L0L0nMI+05i-jQ)uC?rD!dr}-4wq^x0BMr%m+;EtsR&m$Wz z{;W#4MHmKJuIf|a1&%cJOvo@xGCE^cP}WidvbQSGFJWWgJhkyh%FZ`%acS|l>bM9y z0>bfV5k3L5!WZ4=R4wk01yT*@=0e5RhWC`tnG9Y@gH2wP z*zAe{_JcP!|7vb4O|OG(wrynCghnsoZZqZL&yx2j8{K@cIJHn>24$z46^ThDz+@uRFt>^CvZ86PM4uAsj5w(ag8hoHuVNN$AN?pks?C zO`fc9;NuDM_UG*`ZOR0(hu@KDm7rRE?dSJ+&7U<&H~vEC;iL2Qd#gr;EtJrDRll_( zrL#z%R|Ja^>2?O*NpVe{wo_%0|W9DR_WNUhMaYrUq<2rpZ3#l3VMnRV>)i6`t_bu|8&aM($x}^ zK&G^pbEBd=&KnM4zHC_&+UIA@*iNBl8hZ+_g2?(yG-iq{v0}&UBg>!A;k#l<+27q( zKXhr;E!@PF>lT!C7_rv{_4O0gii@8CWP=2Q6EKV-5a^Fx zU#!{$k&FvHLO|^Bep20DTX|NZoA=nfY;L~JSj@AZ{mau0Md)_!X#4Vp82_JYT;TBj z{;u5i*U@A!jwhcmjtGun5n1nn4sG6I=3w34uUX0Tq@J{m102xQdS^o5eoJZg!H;z( z1n2qKJnS$8r)xp%lt-{76ZS^MEGUJ-K03t9iqXfzTq#k!@D7$^z& z0k|p!O27#Am;cHLwofDF1#4$L=cE9e2w7N8LV7QDM?;b^6xzDI;Vj?&(6J}a^hYq% z$8^-jgs~iZS&rGW5~1?~*`hId6pmmrx|A?&+?y+OYZnY{f+ZfYIVbtU@luz}jbt8u zRwVGBSzo?9S@t`C-(m^nrH3yNYwS5N0qV@2rdOaiM0N%YX(;Ax*XN6zfB!Pu->v>_&Xlm7 zf~m2F3I~*6VkJe=_Oe4QFUYrT=Qyzy1^C&~W2dct-W!p&6BF63b+1%nG%EWbRpke( z!U|QI?{vyJ>8*0+0Q(=2=9aeqf$);{B_PI))Ubms=zmc?5E+~9xgR~>=y;q!a@QX< zscwbELks}y)9nYVQRgBz0&$BAERY#WNHRzN={)-A6lYqCK{ycc2UaL!w4skqW z-J8nnXs&jP^Rgr**Y(wxN)#t)Lv0|c$=2w$ttV;{yF-GWN)jZm2o+pTN!0g zG|Tb8$_)aAw2coF5+uA5KoDiPH<&YLs*ge`krjFA(6`)VT`E~lyS~dm%h;#oTESoK z){@q6-b-Pu-5bb4PeT?OgXt4FWQ-1F|Go=l>vz*oa9CjeO>|?iUL-NS`Cil()5Db) zTDCzdBC=HQF5NWjEcIRh2daSqd0Y=i8=K&PgnCD^#rOoym4w3MrNn&=CZSQ(m#u9r zBYIN?Tf1iIW&mU&CY}qsrLvD#V64puFA$95Dc>9^CRfVED}xxeA^iY<>3JiS;(wT@ zN*hlSLLo+{#E6mmg=S+HF$50K32=8Z@CL|vK=&RIK^Hj|CjZQZO&bwGI=0MnZvD_g zhBf{m`u?pCs~BJ7@Cn+OzKt;A3C-yKM2qZ||48JFPUP6Im+X(}lI|=%<5SMD>gnWU zw%cWnC#>T!eEFN+7*>!e=jN7W^^>-H!_&s(_NH=KNRJfSdwW}WmnV@hvHt468D*+G z;6-L<7#1CzQQ7<+ht*o)7H&X6pprD2W{8PFWWJkFow|&{F+eBq)(WtAB7rr*I!l#q zNqr~K`$9T33Hx9sGMK7_ge26cY!@c|*!m)(X!g;W@~t)N3Gl6?#_^)9L?P!`R@jKI$-HQyrwa^@o-`!fI`ORN)x)Xkrfto|__>_Gb-f zI?K0u@({m@Wi6)NhX!XkgH_G#%4h14=I3c$#jehKmEHHY=Yu;GmCXu~RrLvljVl{z zu|`7|DLa>~TemJJpE_&fjL=L0kXZIr|9NgFoKU`RpY<7)Y0|W7F{u$iOCr1{xmtO} zx?r<`oe86BwE1@5n2W2EFfwljA<(2YXJ}WTe_{xWzfCH!;htOSiUA^%$>&K8_n%P( zJfUHoEq?r};@g=-!fKk+&?SXt1|dUzjuW8MPtoMbV0Awo!7;1OW1X-3^iFlvle?nS zcYiY`)bNvF$0QbO04sVgaV;a9GvQb#r~LASIRLQ9G{R*tk^aQDji`f<#U^p*3?s@_ zT!=#-_a|~6<0w5{IAXCVaSY{;8WIu$rnz5mPRQpele;rt67whGtxNF}A&oWIvZd5K z<708KfDbR?=jSx6-?colD`u5Evf}{9i|e*i&~~3m;3U#>MFc^@~HHE=HNMsRG zKESZ!mRrGI5gq%m0ZnBjQ^@%rM)4==@ws7aZ-Ks7hhh|__6qoJ%E^UFyc3_ljM}0( zas9D>&fuC~(*Aj$YiPoA#W(WxqYTVkB^4_Z7U)ry~m$~fTqD!qmzdLhv&&reY_r$gs+ArU)yr(5uuXtK=Y(;b87RSOv zg=-?q8)ce(eZDQ1^i}8SVRmc zL%TF?tcg;f25^FtxvH&$I!$Z>-M>R5eZnOJb0n?|U^khJze91qJ8YLJVH>(jUVf z)tK*~a(=~rAG!C*^R8QUUJo80e!W6hdo65|AI`p;EhLjriHy>GxM_Mm+fp%x7_Ls9 zydytSmz$-?S0}d9o}bU#RAb;{5L8@nscioThjIeD|%7UCWQ9N^Rh@IRCR z0y*}$YFc$1Yw!)fy~C>cT=CR{K9aE&CIJCEuU@&Q+wRkpBFS+|eytV$e@OfCc&yX5 z{kv(Zr)irONl1khWl5rBnNlJlmzQ6#byp{yb6 zZTTJ7_j}uV=6T-t{rvnf^O+|1{avo>yw3ADkK;HYw%CE790`m)!&h!4oPqsAdEBxx z_AO6P20Hrad#&Zg)8Xb9i`I}62|xLm+!+}~d&ddbPd_ZxTX~u`+qJ2*Qgmn#5A#6b z=gk~e^x^X0Rm^ufcQlect1qtucB`UL71$`qhKbk+laC56PJ2{+!?g0~j)E6Dy?$`A zb43vWFQtf`Ab5?~nDNSu)R)iU7xyUib=c)POG&Ts8mBe}B&40OZsL5N@8xqt4{L_E!hKIrc4W;m4fiaT7JmiH`Wr^injl5@36JTtp zV(Ij7t-+0f9rrPwP6NM%Di>oYieXuG+NmTe68t}=gBt4u6TRm8d>JFVWCLjzl0bcK zxWCaDg-ZwQ2n5p9Nd-65Z0HMQnb`A{Wwcqxwmj*4AykmwgEBSu_i3{r_ekOqje7`v zB8>dv-VfPBBx7WB>rk(NjKQqg_1)P@|M;)R`odV(1@^u(3qBngdDEVIyY=Rw2LIi} zOExcD{h6Bwl>P#u@(p^zv@0;IggO99x8% znuHd(b=k!%ix_&m;x+xJ%|(7nWOS_o_v8F)?%(#uY>XZZUU^XeA@kD*}}mk+KVpZ zv*n)HTbC3C^UOD(L}(!JsiJlM`PAd*p;kLkQyK*Uz?mo8XXHS@WsFb`o-we>h&)`P zf6VO56WO7ts5scxK74YpR%G~)VDGY*1OtHPk!@&6-Eq{)gi(2@*O|=fjE|51pC0M! z*RQQ0bN8{5*oQg7Wy5;kw+!!mrGH-#Jun`aOQ<(JiS=SPnD;gBQ$tp-&zJU!h2FD- zzuCpC6fm5Xm&qHO|LIi!LMyGri%q9GyG5BVIKyLy{IT}t8_X*;`5eai#w%z0xpg`> zi-)=Psc|J_bqV+x{wcLGeR16D)G%#6IA%E2FSKeYKOn~JY|dB~_w0GuU!S;d7C2^k zgz}j0D%#!gZ2V<$N6~< z%PC`tkq+lU9*=p0Gk6>ckmpo&WT&rOYR5`F(S^9X|U!O)Wr_=FF>oZ`e{)aw@Jp?JB&gQ`$YgRM_dQ?aQt* zn78}$XS3T~ncjHVOZCG4JaJ%jeH}AxcX#Y{k}8$b|D$Z11c#kkBsXps3zT=HQsX4o z$<=uN>l?yWymZfz83_?#28aI=QfSkmddlg0TlxIgjhTgJw_nV5QtQZ8#fL`~dHhw% zx$>_p&ba5#%eEdYPdgSgR7<_IS*1t5?DIRzB}^W&S5KqssXEJTw(ID}DBpeT#EIaR znMY@XHiTn{XccniNajAhFaz(4?DP&-$@CQl%!;!7zb?)qgxRH>oWvp)JH0lkV40_d zQ`gOoj~C|cIC``(s29(}zE--XUyX{=`yL#*GxWhjUzi=oPi&1%9Ad+bmYFKpEB`4Z zXtciY%$ZYKS*iat#AMBJXpv;h!f#uukxd_QKD13NQFX&y$q|_p#cg`_3Rs5m3HMT> z&_FLvTE8-VS)689zpg^_VKW@MWRX7DK-TRniQBKFDm4t|CG#HFe`x<9I-);K1phad zI=B{u@boV%YR4CZxjyQ7&wSjh<&_iSzRPxC9S%=i0vM3KyDeXvhw}`Wd z;GFHAn_>-67WCVScowdcJd*1FklWF;KuTA@$%*XrQb&_$*p0^NZfpJvYoNqs5&q#5 zX_dOLv7iG7_I1Sn1Sa{t4b~?gieXQ4T5kN;IfcDUT1$0@y9 zgm~3?lHaS&*gWtg?78Kd<4b$Hp8njZ__Fp+tOcL6+an8QSaG$s_*`N4zdD${eT=fs z0nbjU3v*`A&MlA|?vOfV(lX=B$+WLgsTecAz?mdZLbA;AqWo#>`l1a?>gwH=*{$v< zATQWO9Aqj&;*CY@Ns4y9{7LJ6z~G=%y7B3a3ze##n%ivOZ-rSD!ajd>cC+nO+H=WK zG53?y?F0kLA;e2(4_9Ho=FJ80CF04HdEgP*t`jy-?fc*{y|j(>7ftT`>5wrhg_GYw z9>nChn5x3bXDmmU{@R@mj}>*q#ZL(zOdv!&iUo*vaSk+ zBHzF6TVD;@xdZto9yK3*ZQ!9a4#pf%D#DE60oVJbab4$czVXaa4O@6IUQ*}KDsuNo zv{a7;wM>!k-V6Y%r8+}8)Ul@=l|p0f-?z6}0p$78-0XSCs>RLW&9x)BEh?Q6JVUEt ze4mt+4eNP3-B-bz#7lkq(M6FXeG}pc9aENaqI>~y`M=y$K(J?*{iQ?ST4VDaZy*KE zurf(7|61|Iw)UWv)Jj+j6k|~fjz=ihUC)*MyCRVL=3I=tXV!cVzjx%!T@`Dd^5e22 zmyTteARZ8S63EC_zwq#EXw2I3^+9}pn_eZkcCoQYXr?ijM4V-=_A7+H4~k&Zd8VhNg^+Quc^HI z@6H(>q^aT-@k z%UvHGG2pNgmiZKu?17E84{1KOIT=0ILojU3q1F$l5 zKi3g4Cy)5ADfYc>QVxBVD7+(9@^F$DU$|(YcMh9Vt8d-2!h2#ve@PyGc=_RkLL$#q z`4!((EER=>OxUf4p<4r?H!nX3TtkCy;Zl}ca_*Q2l=oX5V`3bjAe~vu(wX($hxgw9 znio2?)-ssf7S<$n235uLmc92Mza=fn@~JMd&1l_Pb-k7>fHs42 zpM2x25lR`f7~F(c`sc3xOAeLOW-a!zy$9PEU0vPnvUG*A#y|JdT<7L$snpg4ufvH$4!g)55`6T?k07K{Ass3wc>MX6P7<1%68qAEbok&z7R=*kxD|$)jk-TMy=j zP;0A2*Af%6mG5h{Nc$tE23o}Bs@&Jm6`89z69t2y03s^O6IxW32<|Y9WQ0RVH^Ga5 zJ_AhVu5b)@nw@`m=t3vAl%>%tGt}5@%+%<#6xxN8FL1+Ln>dk%NIQN^gO}k#lzD|M z-IV5Rd~^(gQ@OsmUESor*CW!pO~2VzTjRw z=g60@&CT)e3n;W@KYW9CvXb-J-_IOPy|SU(MozpT*(3i`LuTDOWM46OLT@JRZqtf# zpwI}AltU@0sAMg*U4@iBoC}+`YLnd3iV7jIfY}?8ZGaoZ^bZQ*GRM?uw>k-|i3!OElBXe=0ou+uCH{1s(HCi(lzhBcQ4;jNCyjnUCuctK!y>tpYnY}=TUs^ zY{`jzC6H+h~&xQ>#uB>o6=tG zo7=IC`G8Y7%0HM#@$5be+n@FR_%ICCw zLYdympiQTbr~vVcQV$F5Kf9{!?t{XmT@`-3id5!G@XK!BCxGn466?GAgXEl@ns^qH z2xEGc1?_CIO06#7@fiF)uiTqo90yWPP4(Jeb?-o1t!HPI`q!r7M~AlYAMy9o&*blM zY5=>LJ{QEa@SDc!BiW6MB-V^YnzNQ44*2ciO6a)j8S$rsBQhOU4J$vo+LuFDn0-sS z?cAqNRky8DZy!v3_wF5q6_ad$3p{`>@5n|rq>ia&BlU*E{N3{UP+dEa|7oKQtTv8y z1Ev2L;9}nwY+c+=+Q2Vs`Gg(_AK+f+83?5;0moiI_TlMYu-$%{v%>O(xI38TFj6fY zo@J)KVSwv_ARE&MN6E|jn}VaW3vR87`Nja>w=ed=GzONqlEQB~55k7llI*}-bh?d} zaG)mwBwiVr^)_$IH~I9Dp>}q+_$h(wkChJuK%Oqwxjv!8;iS)d#+0A7_Ha`qW@|Nfr`-S zlnU;OILj|1x0xq(4X~2fiIo}u5mv9M(Y|KbO+Sb=SWPEHE6d zH~b~1^fC@y^sza+1}Jqtv$9c(c@8gtC^~(ZvZTT^<)IpBrd7yswY-@s(jO!(V~$*` zZ${3BOy?Nx1BXl7QJX{`6gJ3*uktl2YCnQ7x58T1cK=R5hh{#s0YSEAQ`EsUtg zROs&1_R}BCQ}RH&-rrLI*Dgw!4DTh++cF9%Zx@wA9JlM| zj6e#6Jp0aA?M`AM>UN}*xW>r)w3QC3EF?hX-h9?FPnIq19YCUoC={9Mvo?7h8EShg ztzi$|2}wBiSxGf2sw%@>q4Jcms}L&#iu0^k2+kwARl>GvoEBkKSbNLp?YK(^UAhutM#S2DCtJwR;jjG z!|cURjur&9=aiHI)g3FG1#uGqc3~ZZr%ZvB2(2j3`Fv#Si!LV%KJT{z{^s@0?-7OP zGJ(GV&t0#TYIgw^GXGTEu-1|eJGGVw8odJx$bHFRjk`OC_>tTB^{osV1)``wLPB zyU=rdBpjXPRXUKAX!FVp$x+4^N1N@!HCiOSLq1wBrGGNmOvPG!^B(MKaQS(!E~M5eQj+H-Azr(71mxRf4;q_p0 z-{7BNuo8Jze~Q8hnwn)IbzjPM`uXMp36O4M0%WEvFsNxHVd&gbXlKNO2M<0EA=Mxh zdjkgo)l&-d+w&Y(BTr1?>G5!X*4Wh%?K}{TDLQ-KwNvkK=Q20xj=bOk95_c70P}e5 z{*Og0Dqz%_zu}M~y9}jcFwgFj!9_C`hVVH4Bwu>BdDwdm1_2$S;sgJ_AnUK_owy=Q z@L8pb>m_YdHx?(n4BE={@vU=gJK=0@{N?(VRf+-_!OMqqp5sECS0bFEs-`S$MSvI6ohBfH2tcL@_hz1G8i zS}TK9;GXhc$05wG>c|Y`V%GkQQLZmXM3G0SI`Wopdg!PfXBBq*s7+-0<-4C7*Ih#C zgmeYS`UdJe7OvzJKoc894|0?=5!6Dh=f#j1LnmVXk@2A~%~8(18wCb{%Sa zp(iLET*5ZPM=?Qo$YQ5qEK_Tr0nM(x48wF#?FkbV^gIz;``a2pe@K{^7l&`}C@*r_ zNK9(pr&!b+A4vm{kYAl)ztHg&Fy+1+IHl}OHr*X=NHY)Q$+_c^(`_@%M;bDz*Hrl2 zb6)ouI?1G=ls;nUZKRshnJ)BiUI}{`RB7`}l?T^C}yk7YKF-;SXV;7EZ9+XLIp%Eko%Lj`RGxO{)jW+e@zR9_0QM zU$Qq5(_+&a-mTWpSb-cKXKY=cRg;4@)j)dGBh}}f0>#{-BJs(=WPq9K>O8l<%27fiRwX<>)d zlREP~a^`Fq35E(@8BG$KcE#Gh+e2uQ?EWE`-|cQI_cP^6xNUt9Ina6BD=8SUx(@X@ ztE-3$D|eq;vH09y9u^xrir@NpF1~h;v*-1kOO6@)UPW!)k-2Q8+Mr`eWl9y-Tw}Gog*A z8SW)j8A~PM-v?o5YL3HecgVcTD+UDy_(2ZPOV?Occ9Kq ztWH9Rx-CF4wI9pmB=VUdeZ_;FlKHRr<7@x!Q4JWU@G7e%y5Sz^K_A|WtU38 z%lcY#3wu4{D`z}dHZovXM<_z5H1D_d-w(`KdouNtD;d{lT$N@`;G-+=$Y*6=j~^{w zh?T8ZczWs=0t(VyJOYMZLoLP|qIXh7tSw-dBTxDD;v`IRq02I#Vm@!rWtC@jcg%`) zx02;iIpg$wCS!Lz65y&-^#dfSy}B$k+HAOqZ*)vv?A)B7;B0gnV!`)Ue{ZQ?(ps*-YHQ>Q`XC;59iBcm^>ZVb*OzE|0isvgT`l@r023#(E@w z5sDr$bWG5ryNmvtAk{KOK0#8?%BPBL?xd2S2hapB6Qk_G59ADEdNX?XgZS`6` zT_jpYNGEUsB)dX1S$U8vp-U)C>hNrrCRX5may~K+WfR^R0wvjD^n<6-TMA}E!`&l7 zy zvnyu!PTOyIn5=tf6$ii&EE)|oPU!3BhY|%~p6;8}FE9>#d6F--$>C!Z zy7X@w6+Si1A?F3K5N)5F`uxZFI&i5acz_PohbFbv+>=j)fC02lj)ku$z*rjzPA+Ac z9@j>-OtqDA&_ap2=IhvE-0>}EUI8)#OwbE_xgClBNT=cOE`7+;te*1+saW$abXWp7-Ej;8DpdI&4h4g7j-& z2A`z>S?2hPA|%c#mD>HhjKX_mcNN}8YTq1OEzvgQwgX!Ct6eI}bBv2jSXz4|b zi%RciF|Ugy;wS`?PZ$bVN+ljrm5LFK9&W}pGFcps>LbgMP;F}R)j!}!a-hr%xl3V= z4HZ(4jw*g-w${j$XHm@MXMe!;zBb%b_!5XgS?(zLo~G2FdQ6mul-uH96^d291Zq$>t=ds@46_#-DMYfF3~vRMn!rI#0h(mr@-(V8LVwI;s=X)hBPV1PSQjTNT-cU zsFfC>1H#|=#Gw?r-xPZp+pnv}UJ*HFZ0^qW8WW)1_3Lrq;Ej+#@`Cchj`{2BK8AkivQfHj~z?6*MzmT_wlL2 z6z751ANp2TS1TD%&&2WwRcc8oql+HWlQPkL#k@inZRTM< z4b7RCt2DHBw{S8!|0S?lu5%sY?R3Oj# z+9RE;dkaT(MYb@-?jBF@fpTiq=c2GqUIVD-^h&PMUwF+FmBBkUSw_3hFIXKazUo;h9NoXgm+nu>mxG12{~=I_IVR)7VWO z`mZI>U6$Rh^e;E0#r3J%3fQ)M?rO0OKikChatQr>c60A)H&M#o$1t@{}h!P4R{tS)Po- zGw?xKmm#J9DfpOP5>7sq1*NGoVs2Rf`&qbdZhKU)=R=P`nsCijBm zliGJ8HPX+FMVk zhO(d5t@fu>hAz+1jJ>jY@en_?dE_Mo8xJ;(f+M+~ZW#9DliypEFmM4!zzqt99!>a9 zlkBBv;XESC$E%D=M+H2AphFO8dG%fjbedz{O0vXBZKx0&j_#waD4zv~G1gv{WK&&+ zp;WwU=I+`b-o;CSmXU}kpxmI_xw+fh#29_7pwT+a-F+09nZIq=zP5!u?5zI7*dhP7 zeGL@TBpLBw$wSCKo`+JeT7GN2tjfAyb?phlR>ky_aysUW=e?UQ3;a@xzVE7D%IdFC z0RuT4Ipg@I0vaJv4f_C<$Q@Q~L>LU~B=Z4U1OsI|^F5ZkPCult9~{v*&`ZH|bu~S9 zUxdn{IK+`h=cZySxO&}vhNa^t1BgdO-~k* z1`fy1O`Lh3`jD|HlWyar*4tVC5FX@93q0br(7;obXbT8Si9vKx{%~oIX3Hj28=$Rw zA>LEm9dK}Hh*ErJkuO$oZ)03)7L7F@H*>*Z;5~S;Bw2~Bs^v`o#g1=iV zhpN9+LdSLd_0mUnCH5mG9T zmE{Tk(qH`OjQNXM&$S2;7Irj-BD}1>{8G|3l%&)~KaI{_i<@v1X;UdSjxM^_6b1Y_ zYKfbfI|$_(aE{jp^H3vfV|3L`}s?5&fr`QKV zgxaG-N(?=V*C;KUX@bfrrws&#&7oje3e~1&AW^XRgCmto7^OmZ>L~Eg;ftaef4w9L z0?{oVq!#)YSgdYoD<>{O#8-jskdmn5=EN{-bJcvf0k(B(T+ zL}(*cHHm=q!to?Xn<^};{XN)mc`w7e+2t;GYT-tM^v4!U%nxKLj^oG7d^5tH2$!}$ zRP9EM%POhLn0>s2*ZU;NWEI{13Uc~%bN&9E6%(4@f1&b=!^(VC3TNex)3{@N03rGp zgbtrCT4SnpzpD<_)Jm(}Ejm`NU^1ZliEX`hoMkhSmr_>Kzc5U@B>WGg!sD^U8f9E@ zZ%d(AV%Xr3?a}s5uk5Pqh5jVP(kfj-zNCJPYJn>o8|?ZnYF%yfKH&&~FTn~#TLTOMsyd6dBaUuZT2=Ur{9|NWcK>vEOIt^bs?r2bpdA}BdmagOpoGwcWq zN{ETnRmZGQ;)!ZQ_Z>77y_!scg3O?g2b-?t?b5Ea z;f;!fOsubXt>Rh&piYegr{htXAa=&xCHV0wf*+%HL6m8wWg8t`6k)iHR~lIbDqW3j z&0CAaMIlEgWY)$(JlR_t;cE~}<_2L#9YtBZel*l0I?T!L8wFT1zM|#&2hPatI4JFV zy?QU|t62Zoi;zD^$ai$xax6Nj&7XSf##le%&ATT@6_HfCzPm^@dB(aC6S9DT9vSot z?+3IDN}<8Hv1@0QUkG{JK&RpeL+a6?3ymD-&Np%_xk%Afy%G-IpTQ_??I_^!RpfEp zK#W)9(2}Z!ZM8QFNMQi;%GHvAFY}CjWj4;F`}wAjw(xIJdA-_SR;4PV^=+|375jZo zeBMYjF4g?+oXAQy?@zEX+RsW-$Pw4GXT$yELb=LT~3b4sf78X7f z-{@-O*9lnT&(ZPwgliVq%bhl-DJ^&TfL#3;mpZ#58aot;(xm(q-|l~gqq}Wg>7Bjx zKt>}6Xq)8a7{kG9Rg0qBfaV~X1I9@REEtAh@vSxQP};oT+bfcVGQnr@wQB`SXM1|n z?nLG8ItruH;iMtzOQ)7a6&5+$p52Dq=!!r$TC6=JjDU5M!ABNvg~Srz*>24hozZ@V zmcQ)~QmUegt<7=+J_@WnG6BZ_JWCmOF7!M=7u+sk2!4F%*!!f$o2ZGsh_}~#F}q)d138DJ z^8`E!BqSvZ1VANQvSi8X$7i2TW6urD{C;O<4VAFo+5@5lP2Hl{+}}hc0O_;8mUx6% zvZ0$)ES@t9ed&0276AoWf`Y>2-u5`m&=sGO>zVLaJm*ei>_4E5lbYMHowb-UfNUMP zLf-H5DoH-H;qSLH4aw+cKsi9*A{0sQZQ9*riqJYF*~deJNBIky zhij9P0Qpm&3{)Z93*s{1FRHKLwbVjL;1|g^$z=w3(WZa25k;joQ;NEs(kPN)r>fH# zw#h$rJlyH1>!nJ!Xr5R_CQwa}RJ!Rgb*wcXi};y(OchX>9nvDwU7i$g-$8)66JAPx zr~QdDKz>r#rGKpg2@xr#I^=kQ$0{%?r2nXPG|FTm|Vl#!U-&6hp(kT)jC->q4Nlup? zL%9MG2Gp0KFFFet34fa<`-ECz&) zY^hV!n%)e4lXB|lNe$wa$MwG>z#f+LFPYpdId;rUeN$TiqCKDec!q@=0BTpfP>d1x zZZuH+>=L(|Kv$K~6m<0h0Sa|Bt5+uYAJQvCO(QLwl>9Ix;U?1;zjt@fnUx)~Vf`^p-ny%RfSS;({!z>Bi4SzX+Ygv8|F|%ZPO9b?hwuQYcHDr zyiQ$`&f>|w6pcjV^$XjewAF2z5&kkNjuXl5EE(w!WOuZ10ZoPjam@fF5hW@h9c@-p zI(Tef`UYSzjibGhDnIlR{KHaK1M9*YM;b{rNFa`-;iMbT>Tq=JM-~@b6}BRNUvExI z^wM4;g!rEC7yBO&^P{f_0mj92H$|_nCq@XhIBJVitx`u?otZVYL=HjPgqVAv{21Z~ z*P$(Rn~)5RBcxFkYPudn>mgI_CX+5B58=<1;wKlH&LJ80u5xR_=) z-afV}o`{UHf)dC<$a00~EcePwNyJ)$i2xNEl|B4VlC2dWu>@ z;3Y+RLkF>EM=D|NDp`Hbm>A|4I(#5Zv8T?<(wZS}PXhh%T)mLt^?pkJ;u1?i@G>eP zDA8_Sk4!jNxXbvi$e$sQu%gk!3HU&v_mR4y)9}lJbag~SqGyFd-8=1gJK)F@3#7Q?v*_=ETnUY@STWG{cYOOs)wf|p#C^z-CbX3ya2iYU)yzv-+#X|AVkU7 z4ET^SiMD5DIIEhb5~v6T4l;4H>+sXBO9|kM{55*0e}?w1{|wotR!WrxHQ`yJxkfSB zWQW~R4ePw=s{58A2@lfPsq?C@0V=jsdwB2LrtK0h>-7$`A5Bngx1H{6voCK;kvt|u zA;vGcNt!m~2;Nm|CUD_IR<~Z3#Br-sclU;dhI7w%87#2xX<7655o8fcdnvap(SFt= z*f+-SQ*UoqW3`r*TAftpkapWRhWRGBn+VGhzoXC^##W|qYA=}m6YyoEs^S@yt19iL z$fX`cCXXHChfg4?OY5&qQ22YZWRepSPQoIgn>HnLz>(7ew%5K>;ZpWPe zw4c6rTWE=m+diBG`v|0~l>vjwL2Z5nt;eS zNyiNZ z)mT&Lt?b*v#XbL>pZ-G*+Q>1ki-Yt5S0*)$rp_~~+UE|RMv*Ys2!dy7Q(XKiV=a%z zryJ)o`=5tkUSFseWkSdZ5F||FHf>#k-yWv)j4oCrk)4nejEkCb<=6iQQ4@u;-F)Hh z+0$aTI{s>8%OtU7?EilXI+~0?*$@ym+nEs$5f=#Ex|y9Yh-i4Fv|plfuXL#MaKBd| z^^R0B{}G)rQb8@Myo{`^1foTv1^5A z0!k|89(m}aObCr2yZ_Rt@Zpy*LLk$yr}r2%^Q4?fA3_Rz;}3}lMBIuP3w9cwDpVM% zD)K^%%qamH%z}`|N{_igI#gX9q2n*5k2E11z~Xv+rHbixX&}^m9z=GS2!Q+VlIj(0 zIk(XLq3?#Uv`Dj?L_nCU=(bF3{7`K5RQ0cA5*jklPQv4Y?g1KaKq5BUDfrkI6bPE4 zqj46&{&a+>B{!Exc1zQT4;2;b4c0$8Wc59(rMZ=x`H9MQUBp%2xW$6`djnrs_MrJT zVUQhcq7(^pgVz42r{a*DF5f*SNT6${G5o32t{fmc0u?#`H9-grk&?^znWB*aPZJhv z=6|0lQZIta10*0F^cv`a&Z@&d$=b$uI3%8vlT+M)x_(iKt6ieqqe`nSDvkhoX)yL* zI?7;V`Nd=GR9zzi|L#3pCxb=uZ`+NE9g+Pd|ES*l58|qcdTSz2U8=Vp(of(+n|kto zo~*WR6CYPwqc{S0XtZLT*xY`$gP^lfm**A#kAE$F6v=e}KeObD@pyIUT=GOyU|9y_J`+ zw*qW@waPy;M|z{Ly(6%^AJaJ7(DMsWYg1ei>~Q&eAOhm}kBvQEl44_ReK_6Z#EiLI zOS)U{-Md#l7k>j~*CE7FZ@1XAhO%=Cc`RIYN-_JM6Wx^x%K1 zp8?{b0)BRKd2CaAMSv*^r}DD7yC0y*jYM(@07Z0|a5Y|P#N|AR*GzlOuf6Y$edjQ3 zX|>EOcH!X2?c{VgX}{+LayoqW&g!DUifqWmHNL>aEXC zu8w7Cw879ac(f|d+60tOfZ|w&qa0PTt%1%5po=V>QW->Ab;FGp#^u;=>6iqQsZmoP z1M>Ibnn1eF40>v5IP@e)v5hP&GsYJQ_c&0PY- zIznkFy9`T|J++`9eZ%cBCeEzqJO48i$H|GSI%(5QkI1&%#tUC_M}3R_3(J{};$0rj z-ERLsG4^mCWJgj#suo3~CuVI3tW^8?J)v=pI1!WDYUI)y8Q_;nWK&&5a$)^ZiH@yiR_z(R z$h|nb2TFAfLpoNHJ93G=P3Pv{M6DG zqSF5WVF@JDBMX&kh?f!=?QXFjt|h42t`Oy**zbrJ2MZsQjx97(FzuA{g1AT;9tTlh zYzO_Grug<4zpKw;yJ*O)lY(@BYq+O@=orXf&Ivv(z)VrNGzo5|p!0EVYuGrqm1xjM zpAJ;q_V2tnr{1z7)I2&kl~5I>EUThSHh2UmoBRU(fGhJ$vx6r(k>ulzloV5j3S(u%uWjNx=-q#kLPEA6HY+@{SKModPGx6*%&L>*w`rD zZTlS6n>8}TRz*E94eJ-@f(9mH7l6Xhs6Qs#YhCXBvmmiz+VILLNve_~0gymL#!!)R zwJcaXh>WGjtBeOOT3kYNDfV+F=-WCQ*V}?wD>I@Gqpz?}`|n?#Bz})$>GX3QqYOLW z!RJHuQuDL@Slpm6p{U#gCK%Wy-j6MwV4&#=I!j}5ZP#70*SUvdU`bLb!hK*rp&AI$O2H7B!+VGb)asS(fu=DZ;WVlDblIIHZZEx0d&dgu>dFmI?VdSGMXMNkwg+pDw>Fo(Z1J^3UucPO zrawBiI#|3X@i^#HLK_u82Rs9UUQE~~xu&CqJn8Jow}Ykh+v|M$g2Q*f0!p_pSb zC;UdT^vf#kYM};DfRPDIKh`&737zVXT3Azg>z)i2_>#Qyb;DH}0seX0=b>MqJP-Z(@Gp#J#XZNYmnWwbGNvY*YZ%(we zQ}AHBZlJ*`O&{hh?Xs8-UcPVx{8 zA!8j{NF#PQY@~6-gY@S#*Jgg>Iiv2>5&%{y#W62g(?=b2!5^i~#mD=P=Pym}I47%w4qG_)X*{XS4eg==24`M}@b|tdEscJ7`MK6fGMSfeu1%j_Z$A34 zi{0aoP$y(D;W|~Z!9{MqSoBZZVjt4J2o-3f45w=(s||8=UX=gd@-k7b6<8&(#ankqn*J}`d+}q4D@MpHPWa23c`R9F_>(H9Dy{OLjU%9)J@i8nCya!C1bPla% z+w&t4-f3BH$CjxA*esM=qXtJ&4!8Z;K;m4YA&%#|fM@XIj5^XXs569;e|m)F{2X2@ zM@PpF;+{k4R4KEw#NnpId@@!C6Ye+P^Cf;5>T*M8s16@#tI)2FFu9$>loA-}j|{NL zf)i35v7mU7WkxoLiy+9)tGB4k!6bx9oz8!m25Y_r*)=)ubdkzLC68K5#d(U2x)y6ZJhiyfkj)+9N?38k&~_Vfk9asTo8wcFUn#=F zzeY}e)B)pcuIwqr&)AhmRGO{dF#P!q2Z)i-4wN@koBI8Kw-3eTdi zqppKk%tNHQxvv>sCk5%fY`XgMQ384sRU0%_A3wOce%ygJ%P{c!Jdwe_L~;qvf5j=9 zJUPa0+>jp^208o# z)N&nV1<_gVvzcnV1~xd>LUXx%Z7oI~Sk*-LSC(x@*pj-(lz?eSLuS7^F$e-gd|ZWZ zoAtVjVs!)`APR%eo$Cl={h7ogul`K*4JaM&Uz@H5@49Gho*_}%$nk^Q6B2B~dMnBa zDHg<-o-lBaHCnL*HD8CyNE2NuHA?^hE{DVd$@QPlzJW1`Ylh>O5n0%~MmVJh^e(l{ zy3JEZq%b4Qk>RX}+fTFTG11`()j8xqovIe=TuJrV=>LCyqW_#1eRGJ>e;r3fcQniEPK2Rl1cCn}2kd z!{qAQ6YiXX0(IWfwzthaOCb(1~6X@((yc;^l<9bOeF3HS*0uIMb80WyIJP?pK-%dRS z0mri;^MS@4`n}SI)N7`y>j3uox}+Z zjQ1KI*X3Pz40CT+$VF%;oM7kuSx(^AL`-)Y zdzYuVHIL7*6+NcJp*khf>a1pAS}un}?MAe$d8H}v%!!E#qBAHwQb{D*WY^HFkg9$|6tV1UOV zC_>lVqk3d9+?nrP{l-_Tk>CQisYbJ*7vy3d`n`*&(Mu$?Y)KX{6mID4?hesl9y;Y% zE2>%FREgi3x4q$%&L)5dW6j^WQrswgLJTwbNuIzVJWcRvjV41>tUeDcf0hFE-nJp1BY9K5%bFHLtQ0^-?+-3O+U&%?FZThS%2_n8oG0rE@3O zmUEWxnIT@;-fR~E>>ijeM{2&411d`2>2bCm^jBXJER0XV1RdC zf$5+5iH~o7JjN6fxjZIzsQuMWHM-zX^>(ab%aC%MS!Wqy*ss7v5*_`k3zi>^8MWt; zS-1xVa;vPQK3)NkO1V04F|s*O4HNxVK6CIw6Lni%JeO*-I|!X-tQr{?i|!^Ekt@W; zFZPh}Hny+LY9;W`KT~+*oVM86*@dU1NW#n|g6Q#Y+z7vO=dk}Z3b=;XK*U&Em7%6i zH&W|~dZt|G)~SCI@X@5}>>&6~O<_Q7M0@w4$tB(MeIC+hRhku}#Fuc1n`(5?^y!P> zt77o>twS&~g0nh@82<4*l)ebY(O`<*LV3iNIxyp< z7uMWzN5!xTvBFXhIhEEarY=kQf%~~CSfw5p4+ray;^a>>tZAuX9>Nu@@c zJ~pk-Emxq~;YwTZ;lqP9X;w$niH0ecAw2oG0nLck414SEHgW+Z5t*iI3i;;*4|YnJ&uf{sv8y+!s1}^vzh$j zq8r7hJj)&1DK5W_);9U2n@DqZORN2B`$hq`Dx7_W4V$C>_=6VC`cMg#sr&?wl$i{9 zw(G~Z626%^bCDzm;K2XnbcYR=yL}2HhK+c-0hjX~=s8^^BiT?Ga;wqiE*>td^Xm2~Kt z1^XL2%Mj9{z11F7#izPEo6FT8T5 zBu7wy7-GJ`_sl0N11(~Sy=VNSc@JAd|T4;h$ z=(T|cMHhdL%C2oU;s}U{A0yNGINHvzHenbKk~U>)9)>p1Xv?J&45StSJ`6{J z{zoEz{B~Bw5VnJAitK=4jax)USeEjbNM0!+o=tHe2ND6zL?0stGeVN6DmUl$9{jj66{Mk}LgV>P0(RcAVpD&Q%z zp{ePm{tSk^1W-JaGn*-4>kr5cH@}+9{@fc)*ZvV58*{OQ%5K}}fkI!9%TjFr>~qRl zmSB2NO*>c?CE%3U(vUa_s43v-xOk2safCv-nEW`XZ^2BUrDm{$EVpF)2A0;bVhDY@ zG}O`}o2xK6kmxhjyVP>(O2XeJV{#vXk-V5TOLY?Y8Vf*oCurY`V=InJqe~tOqjrMX z0aZ3pX33av+-guk)6Igb zQ5GX@U+m8|nn1O6>qYS3k%hIEM>iH9&)23vlnugmNF1lt#+4JUr0VK1<=$4nbyvos zeaS8GYQ53JoTr^ab%_`%s|?*|WXA&OgIKu|c8{@bz<6%i0d?~KvLOQOarDeB59keV z=%frQ_T4zfb#j>5b_wgwJt&}RCxa2gU@1J%5y;`Q4H`fioB491{2cO>y&73#C}0jg zHiiQ4D~?!p!(E-0IA@!XFQOuynKq5Bk6Yp}7&{F~3s{OksNHEM*Mjqe(t&wVm zLA|r`YfcH>)W=?Gx^a%{WRJ*@8ZBeSH>qr5$&zbjRYjbQY5xMF_2X48n2KRl8Jj^k zpv2%;p>5T`N9GwWMjLZ7>X-5U^e6s4K#EFl5d)dfJy3MK~M%%G~ z0ZzOS+WH&U6M^~Fw{QqxsF9=|fU^z>LQd6XnzMc`mO^}Nn<2G3(&VpJv$(z{EX zA9iS0E^qOe&fymwg>9dLzoa`GFPh=wJ5=K{38$6t;i-^oU~YW1De-r+*bSqdU6T6h zTqIx><&>Musou~PauN7V1AnCyhMFVhXO`dfaiYUb}G6?t*xyA z>2&?Xh}uXfKFh&P#<#x#L?b+X-~s0^Ix1sCvzm|!nJ(JH1Vl3v0urF=-A)Nr7>vG6 z_!{NruU|dX9cmXE8#wpBbS$8tWboWPN$PVMA{l)4R}>^_O*BeM&X1pn(r8kzk4)yE zddYw|W~gj}m*{Yp97-WLc2bu6R4GIeLkDi27K_#v~Hvw!``N+SXjQ+w~6O@w!y(C{ao;T6eD@2IRSIbzNYmpfmmb~X<% z)dv-sewuho5F<^?m#Lelbo6Ro44KfVnw&S61B4D%Rl@*GDJqn^pSsTX!K?#xoz&ZUt&k> zq`^drPe>E&?03ksq0c^fiOrN@U@B!8_$%UJ967QWJ9n{aR{U4cW$Xa; zA86ICFL1xGRC50r^Yv|3X*)bCLI(S`C6<09;&D&g3<&Vh$A>$*)!S`p4EeGg0H)(NzU>YFX~T45Ak~2Ut!io;`r(_ zekjxKJi;`Fl^|Px761a9lPb^f!v8buNpN4Xx6^hbt>DaFZUq zXYag>!1*6v5p%j>`*wF!dsmmt%;p10f;g~-$k7S08u3*UjNi+}lyfn$^*xo|a#+Ok z@XKp?PpZ2URxkP`AdR&`Gj)!`cxBnwC)o;2iFwGf4j5G>k$@GW83xbYRfA^2 zsYc)1|F*5NF3Xd~?59%#^pa!*P^QYOshWy$9E>>KEob?1?l4S%yNzR)Bu63+9IT(1 z*<^R#;-a)qkuV{5@gvIz!DrkaemNl;Y$J?)*3WWBG=V=!o8x{wC<5zc)Z2_5rwQjA zK0w|_4{*`4*Sm|DKBIv}9x1kDi!*((E=|1TL-{0{oeyxo6&WZcT0Ang2T1<1E=TqJ zSYIgoiNOob^3}FsD7*HY62{kSlZa^yz5nRchhtpzS(;kh#ED`*ZT+r5=MkbRNnf6v zr3+&Db)*9fzaq*!j;v#e{|@)mAj=XYNp;LJ5H4ufx2Z;3X>57%ELlsNG53d6i^kYO zbo7`yy~?ro%gEEZ%F~XGYV;$E*+?nwoT6K)*%{Oo;A|o`S;Z;NHm1F=iey9@64LpT znfOu-8E7xPLj{&CcANU~;iRf>A4)RpfRn^tXEr;%t)^VS-0L#c9u`uvTmp=w34@Me za>-7*^mXif!6n5^nTY%{ROYi18W0Vjhv^yWX9ts|*;JmE4U6V1QUJY;au= zRM%()vio>S$ZAFg?}0d%SLZ9Y3@EC)aeH+JX?v5wd&FqchlhtL_ad^M3>eNDh|h}a zD`0I})3=n0A(fKXvKGJ`i1}$pMf!9tOsrlYlGad2kTu$vJZaPS-0~4)sEVo^-+P5H@-VQD%&^}ezlvAEC45RQ%#2nz} zF|M}lKo(#{2op0Se_Coc?(jd2;qnWV`8$#^NV@NtqU?g9p`rO5Td$a#EIMg&*Zl6W z%DtK?Z#`al_n2+3r7zmZVb1A7?w&40QuHxokvWpH5(BFW-)B97p97gDlF3VKLc;+i z`;%~nA9*dH3~!GRioxit6hT!Y6(OLf{5>G8$_}t3^Q5LZ4i67&=2%8^UIeC(-281? z+1E-Q`rFU_#?rZX+ugHzIxXqrRGYAdkOrcXJ^kMCv~~9yEHd)*QH4TzNBuwjOR2#G3V&)z(S%-VPH>aC)|06OWiJ~D?K0aMX^xfRjXBCiYA|q>Yt{HfjvIERb z=yUQgEsV@UCF24KPfM<)I$~NKnaH4v6bfwGSd(^7!@=5;5exdd8KuG#I^eCLtPXhO zS=#rCQMd&GctF~DLH{&Qas65HcVHvp3%|exM5jT)*~sO$!sw^rZP@UcE!`#B{3}v# z%Z0&T4`a;-cj#M}?oK*Z9RAeM9$c|}A8zR#8SSqnYxFxGftA@;?CZ3MTs|cyLqlQC z^m^CLPJ`#D=00UOncjIdQKi0@-qeibgb6h~xdRvX)pE$+TfY)&F6_7-Clh(-SbuRe zEU9o;RaJFJI(JD%2W=VS6NYiz-)Vxc-NP8QS_6Pe}p`K$v?SA2~%kb<^f!Q9$SRi z4Jus0WWUGyYxN$j1-W5ZkmP87zRrZz<2r}4)_Kr(WIh!9Z693&@YS_slpcB#GHm^Q zthYx0D{>doqNGLtW}A7*DpkJ;=+|;Hf_I9byvuQt#iSVQ&w_g?)l0j(yV+{2inp`p z1!fJ_li8>1*Xkhp95OOmPNs78Rg`7!>0pH+);ID&|Eqzoo4E^=s1;14Hg|}1d3VaL5 z0ADQX*C+516Mh!Hh2;T`j)nfD-7`i#ef9I08~f`MI2d)5Dbvz%%?`0%*2gf_fIkIm z)rUW;*FF~rqx_3=s+}->zApf1YF|7~5T$Zf)=htDb{~LF;3@T;sw7|FJ05hp?ORxi z@D}(LNiQHRHSnK&A*xgyO8z${!j($jrI(k_m8^5_?jYRGQz5S-u0a}+YvCaq1>XI}@Q@%@*V!)Pog zntjTgZ3fg$h#Pcy*bLAB$^&|PH%|-#$s{-EQr8ejSv3mE91HVASz9IRP)cWgm||BmGC@hY&JKVsR<33ndFaheerBz4#UD#v`zpZz<7NA2u{MdT(;c{Gw>E~ z(@`dA62YP+% zGV2_negU2aCWhZ_K>1LsZ1%P6h9Si}$*l`tng8Fzrzo|b#;dJIQMT80AE3D?Fb>e7 z@Oio>-@0@d@(lL?%|u3C-9l5PwA)%68uTV}=kdeB$qwlKm*+@(lvN2Zco=3?R7!V4 zDN#~VQf(fZH;HltSy-+%iLa)CYjQmH`rC1X9J?py9<3N{x4M1p%ekNd{?Qi zVmX^ws@vMI2reTt9cLd!>n~3pOlNj!D!tnTW2RUej9jcxc-~wehB!3iOSrCvt7J`e z`0`&`05l617hIh9M-O`zraoNm)iB-ynLfT0GAZ<0V;*JDCdy%}HKanJIzt$R_w5w> z&{ql*w=6pXYmn(b_R?{JL@GfuF5egyok{uX=HAZc-u`m=&J`o z8HCvqg5O4=V}&RgvlvDJ#N_7_8p#wGgyYp@oxQtt{IL|R^@_GaaVa@>BGJabw>BPR_|M{pIt;{Xp z`f9e~?V$!@>?Rz2AhKZ}a^)ve0+sFT?3C67X5IHIyziF>dF&_wWcf;_LO@n*C4pG1 zEBOP7qwi}P(9F5jt*wz?9BHjNmt03w68yx1UB-E?ja8Z}K06FFvb2#~Nr%buK+vaH zw`9G^XnJ>`xisCMWYR-s%2fr+fd)cSAkXz|zS@#=pH&fveP z7J6$np)8_RE&XV)8gV~%=EL2fj0YsF#Y;=@tjzA)ADb;Qo>i`EXwaM@E!kSM^NWT^ z(k45EiJKjEolK4LkBp2Awp9D~LO#hQpH1Af+>6%E`%KF5_1+va`LUa)=b!{fdv?N@ zY7c`t2ygX&x=qaEeOGW()K8h@tup;HbJStGi+ew{G+%Jo*1m&p@9$sPhHePxm@i9F zx@)WF|KW(@GnLx9fTJJQIPk1sU4Alr?{Twz8~80^1$e!-{{HgX+jYB-S@3S%y573L z>bLAq&4W9#KT#-e9EmSJrGAabP3n)RaFU&?aZNI45E75tvtozlhhOFI{HQg|#F{L%y>>i~jyv|2u25=&zUrXx+Z}_xYa{LqK`|{O9=f*VUOZ zjckt1cvS-!=;cDW4BHkBqkQt3=ckW*d<7BUeG3}mOxc~?w9h_$y%5$}^kI)n@Q!%Z zsaw%lyg%kku^0U_aI7N6y+6MV3?@ezBmxgn9U-}&|9LBU*$t(LZ_!LeO&2?+ZU_ZU2Ab{r>+qz39#Uzxt-pP7byQOrYodk08{}==Cn~ z(&umE39HvtN;zsM1|-VUalg9p$;o5R^OXFe8^tJE{N8a&Vf(`+u#ENr(a}E(-?$5g zmgzeG)1frH_)D~(Jt+4TIS1kjAuvsUs6@IxVzALL-|qYafA57<{*a{*iFN`-?_bIP zU!WKG`akSvP8?q#+81B(J>R;Of@y0;1n>{n4WD&;N+F#~$I9`?T#Yk)u3h+OOI$%^dzA$rczQk^K0;B z%0z>wt94zx_6Q&(k;BrRw{b%m#4~+$PxjzD3e6$s7mebyu?egQmnx~%RNz8TU3Rk3 zWaJ%hVBt_pw)jlQ(ksAkuiwE|d))-ZZY3}=r*s?4ZCbk)kCLhpr;O`~zPiHAv^fL6 z$GSP=uGl$sPP_6wBWOG!56|b%pF6Isvrrmt>L|c>bcbt+|;KbqyrK6m{DRjnB8C z4w5CXrONKp2ITxW?Q8d~q1cUFRdZr}aa8HC2^Qh)`7x7OHMWv2az2ax$JV{u*)zCGC6O?0sv}G z&ouanYz>BvhQL*?(W5hjT0)<(Ij5sKY*`h)g}M0@NOIb%TqI&JVl;)2P@q7mRZxcM zZP8}KP|Zw)c6>hnMV`Lj)sx=KeOIj;DIb z+~d2Vs=yxt@(YGUiyJK=+~7nuJE61S#kt_dk+1jsJ}$%XLHUdanpaYtis5S1R`Xex>;zQ209D5KOq_5DZQfbasG= zX6G{Kb!{sT(7bhGS8yI%UY#zuak=kFoKIvFzIgK|fBGANd_)WYvG~)l@FKpET(+;5 zjU+kd3Kc-~eaTU`4}wV^UP#$#N$0~nWuHhc0BH-w|35w&{n3AZ+oix7X^nwwTR^1o}B4VE?LejyAGX#hEhLUCKmxpg{dl#Db9rZ;z5%vxAAHk_k9@XLC6=- zbY1E*V=!c9bU*2!73$&*vmJ_HZu9YRSEy)kp`iKC9S~kJZ6;7D)wQ+aJyn*V2_`2e zFGF8W6Ch9kt4j=Bh4vDzvqWqlI43*C=wu3WD*weqdpKlr_=oKkYQKK=LAsga*U?y& za+hUW*P;dJL$MFwgV^MvB+l1FQk$VT8@c?*(?92CSrWhnKR6b`OG5m)wJXfE3%ts- zFa8<5Ks_gX2H~Xb-FmFkNr@_>yWjqKakDJYC(@9sx@G2{m#ub&;|}(l++KaSbU-31 z8|R$upww@u5gQll_My9vNmiCAf?WS{$8trWoG=!3LdiK|U!8#VQc{`)@%9DsBi}dF z$a!bbaFS8(KKBt{<3i+~BkK|MPKItvH0Fmu3$IJn6V;ig4J8gziO^N!?$AfJj)`H{ zUCxe!0y766bQ)I6nrV+aTnK2;tMH8+wAtjgx9!92;)9q2hw^HcW8(dwpUYY&dC-%n za|dy>Ohj`RQ-c{O_!MYeg$Vh(JudVYrh+W>AJ5;rw=JC~JBTk~BC@}A50ljNXh?Oo zU6V{ndolOA-QBwno9w_mgwmgAmE5CShk&~F(}bWRc**i#?WSGjPo6w^=KbxR)gL}Q zuWB2H#4-_hU_79(;jt>ZD@MP%&Ee@n{CFLvh`{*R80YMLzLD_sGCiXqK~F6Yq6)Ga_`b&V%ytTZe56s(Xh4=*51%uj(~SYG9%%FtnnwXp>n$ zZr}dM{P7wM*wz_%A_R((yfnZ9T4`s@?KpUMomrHdLjv(0ZdF4#T>hW&*4<&}t3ZyIYL%&fTIboZx2NhBGF8W(0 z^EjTZr^+olknw3|$ron80)TnG+!VNdvu8hmht}2(g{ZzgjfOpt`)W;0a0=pRVP_fo z?X~U17b-!#eOy~^Ji%Cq}okqIF$@u9QJc-7be8y^CU#iuw zo{qswnkIc3KM!C8s$QL|2%2I{Y$G4r&I4vTV6tx&$7Qw|Xf4_RB3{Qedrat!A%;B? z-S6E!CW3OYXl!>~{1Kmzh<>xEPr!}7mYFa@2#-pG;?ly-GH%L;fxd_Hl4SU3!q23)<_M0EKTviRM1=Ou5 z`U`HDq@;U*&U*0-XAlK+v@tOK9T3JK5K*Dpc-U_JSA|HzX>qa_`QqQgp@Pdh=jj$8 z;Pjk-(|!AY_i@m!U2?1chm9VXEh>hvh{XN8tuRwXNeODs{<19nCm5gcr{l6y87o#h z1qWQJ{fyB`bW2RhL)B<7)XlZAjYGlR73q8IXc!=Tlg2hWy>sfUI&|)T7jg-#xypKE zrqhzIWPn)nrCjVseJ*b8I3(yi2S`;3vORd(=+wrkv!4K>KsJ#5Ez^fEH(|p)CMao$ zevCjw{Nu-u9bHYwd#Vv}#6MX98Fmo6+3nm4aeOEV)fjO3VXJ9B`ElyI=ZOXT6!^=J zH=0X#_icb0@-90V`@2l5mS#^1y;<8v!b}3BcNF_`g!VWBlrG#>g(b|6DCN_6X0Hzf z8E{&Uo|95>Y`P(#5`3jKi3_SbP~~*_?;dru9Suz?9bLO%eyiqe_V9Qmn~@fg&Z+&v z3}tEdRwuEEB!kd)_24?^hMKP(wy_+s=PF|)On*bZFxa^_E3vZyRlsG*jSL@-!0iFe z?-Pj%?!5`LM&myo)FTK+^EMZUZ&I?+GlQ(OBbur%)u~5_wzoubxBXRdwD#^Q%oB+D z#A_do%NxTQfo8q=KHbxK_|9(qBlEpgf1C`%TlT!eHroNZl|KWE^)&pF(#zJ|T?HW+ zQCH;hM~mGH4>x&xZPor2pNa#qQSZ~k@7?o>w097EPDgklOtk`9xu=h~jKu=JfRw9K zQ^>bg?Buks)Bf4_wt;P?rhR@SVM;pP!jWQXsv15|Co_u}&d5 zr~#9n{(1|7ksIX*fTvigrm#sN(6LD_3VfoDHaavKJtJ9VB*0X{jjm0@q#tW))&uQ- zsd@}e1st&fU>&sx8flPA;jZR8oV#$ zX%(`ffP_*$31v{c8-tWB`RZXhi4zsOkY}TO5ub6#QF$!Ww-%=7ovhS{N~(o(p*h-M znEtK;vbF=<@&g0QvMS35877kU4<) zA4yAgvwCMHoYfO^nm{L>9u(C~d*nL*VZ4E|Vuuwd<1g~*-u3IbbI0)+ zgEla;1IdRauH&V-eyS{t54nj-OCSNE?`d*?tNXXMO(=API&u@i;Sre5g@O>2pmRLe zdE#c(Zi5U_2)>jq4Ol?{iHd{(LcV)jE-DYW;tgTa$5u4<2s(U00U8IE(xd7dnGSW% z$Z4nfx7m)vt#OX>U=v56%z(!_d+^n^sBj2LM1W89sGQCb1eR@H%IxKH_d;&j&6_lV zk=rC}z>bRQ(boMi23xKO(&jp#QNiCR1c>hnGb?WQ#B#@~_kuf%d9W{B1-CJZuEmmG~zFC*+= z3YQDyE^C#+PH!wj)K+?{vt`7^_dB2DP`}>N4A8gE+-_QaH|YIBj@$QBusdZn?d5|p zM^X}xV9LkWzP_S%O#FFR#x)6#9&JCCWUv`>!`Nb<>1UgEPJ?KS5I!LORY+Y+(sZ5sk<}JOp$)VkUNl+XSEo z{Ys)`QK6Iy`O%=jLIB1`kJAO)ULR;^sy{LQtPa=wcWAO7$oLkRKJdWF;==3@KdWCs z=lZrkG3S9G`@(ey>`+%#6gFz6E)L>EkzVL2Xv4};Xb;hOK$fXOKo~K0U>&fY+|K66 zgdr{v*LnMB2e`?}<05o#+k*ur%QWA`SHE&Te{iNO8K!DJL`DZ%>s&k9y*-ZwQv>Ac zYk3$1j4$6)Qn;st^XN}pTb&0C{onwHt+}Td34hX|stpReU_^*(C0h)>YoJi-H^qC5 zs9n(yw&12DilA9P(5Q`r)an~UdBY(V{wGF_K@=we=oy!t;!F+B-ICQKb<0mA{_dd! zmg@WBFZ`ejQ(SXlXiqvtD%~t?J!bun6z1}3L=sPbylhRA$i%_BGY?A@ z0AqBjBTP?)QpXDE9`Pq&ede{6;rqT**nu+tx+chR!uOF{LK1MCaFL05ds zy0}M0?K>j&Dj@zZhK$*2H4Vsw{$Mx9;WG@AXZOOm?6`4Ub0M?qj#u)oc6-M#TOps8 z{^N?eP}XMy-5d6{QT-92cwxtHivNJ<(;XfO(EP6TePDsr2^W*Stjo6k&FEZEGHQd~ zH8Ny$M|8*N+C$I}8;U9vtr)I*nUgQCl*LONCp8eMGhpiIPJxNf05QEVTUiq9ThOWc z=y@=-j$NkJ#NcjyEcJ6G-rx+j?TBltpH$>U(- z#+&_sFM;@E0?NkkhL$e>Q!vA-(WURC2lbHVhw3%KYh7Zh^IU48B`aOM{A%&0gMRjCTjDC^SgK z)NNt3qTm8_*bXQ|+U7}}_@CoE9(SaDV?^;tbD(v4%4nU0BsBf~+bC!Mg`Cn3j#5;9 zkj7A=KW2w+2M)`&FQ*XN(Bo6Pil=S~L!C&pDMJNeSu0+|tl?K688vp@JVIJ##Q!YR zB72;KGD7-Sp_42I1xc+g+K6R`wu>J>dfFTY`2?7JjqNwZEC#pTr(?>QG*M|81Izg0 zq}tn8iP=`~#*TE@PN+t1sz_zqk+Tj{1M_Hpx>OW&lYK?)ntXOFI0d;X!ddbFhRdwC zV)zspERrxR5~^1(nMq5T&f5t63_X5lpmgie=uF2pEkFJo*VRd^nzA*_n42pr)zjOdYn0d-W0i44x2 zaJ(0v;fsT6^?6*&#+MPWi{?en`)l;K zmvHh4cy?oMBDJU1Kp zl*XoyEqlK@?Zjuy@M`*YeOk?Gbl}*@%FL6XUhhGmjD8_?(v~PX?>_4$*fzoop#^^i z5|DRzBY^ueA}*1wBoEVK{HZR(gSmcrEtqzJGV&hYs6uBy78B&Jf){z6PNINb)pX zg7-Sin?)g}Dw^xWP&8iyPVjpOTpY99URcO|=qogUjj5Ahu>1ewgYo6ApZu#^UTyOa zIvWid2x_|_3;cfxW<%O}Hx%=N5p_o(%yrARz~9;r@IlQ2OGozrFi&r>3JiW282}s@ ztmh76=lvV*#XbI&INv`F1k2z%+npx>1nEijdYR|CoPg5rQ&Ws!*$2>WNJN{aa;K}C zQf2;+OS+h(qrYWcs5icXFrzYJ4CxPckS>X;IVj_u2Sbb;g%7eW4;Fdfbf@s}>2H26 z45T$UND?sB&~-nw@y19dGSbaRbPPC?Ag1R!)PX(>9<-`;ojGD7j-hBC{Sjm4SRIQ5 zRm6C6u3=HsgRIWJEw`Xp1z`4iObsK=nfRi^Rwi8;R#O}~HOE8_i8ASVd7Bx9y;a8? z2xK7GAyD>~n`dD(e(5E0YV3c)+`llimB7t5wZVWLw5UIjH&8^3-gFO4LAU}}vdZf8 zY0RDQAGqwytTd^6<)B3!D9bX^LpH| zOM)pyD6(x4#^9acTdX+N!+YkqR|5nl4N?EnSG6+)wlig=5_n29wzF%O&)dSC2%hg^ zAg3Kahdwt8EP>n0wjN@nzXTqC=Ot+3+ENBLcnIJ^2M2tE(JN;^h@|ytm$LOG6GX)~ zJT~*e-hp}P^4+Veadr;0FEY9m&0B>)Gz=gyeeYG^?=q}hJ@;j8*rDG&_!t0PU1(>Q zdwa8|C1k4fKyt`4U}Sk1;B(}x8bKTSCdg(Hd8S9afL8pRv#+PhZUJn|lVM+=zV3t64Rh;lo@=v50m21lahA#cpY z&^hQxr)Fh&2N``SOKTdJ@atu53&-ZlWs4iA2Rr0tSbNphKvK`)a4b!ix%r9_ za9kq+Wy48&?r1paQD;u;j+OGP`q(__a3{u&2XqWcNgwU74IieeT+nyaM+^faajZv> zwzH)D)wA3^Fn1v)-HhbrETVXXk#b)Lo8VMoyTu7%1;N&gL2OLFzggQP(d2Q351^J< zyMnAnK!{%3{J0%BY}pQp%;muvs+X-VS+$xOYw zR&*Bw&|Z`lk&e3}MYfz}xnhy7|g4E%^^OPquCi~z954@F<9v&6HI zpo=1{x+%awnZgF$bN3>cA$K>83*aC6Z3^~@Y*7wW2hEu0dT!idBVk4KNk%FQeDgQp zn_Fg+KYR65nDT`Qg41TFkC;aYFLp9m7o?f4g0~nAu1Rw8JvKS6K?n>AFtVulOf(A# z$W#a+`V?3ZU}Q3;lMy0REQ?UqzKa9>DL0QiQL{q&ovm23@m^eU@vyrXP-ntGnIHoR zONx4c(|=aS0NBj$cA>O5RWN*CKulbqaM>PAo+#-O1Fx-cJglVl#hah!Q@tBZWBl!1gkqeqM_pTOp z>)UH=!HJu^Lk!?LgNB|$P1Fp>UpTOsZk zQGC!tiGiP953l_1rAbgp=}%K>GIUkgcr(Z=M8EldbfEiw<#_PR*TjIo!I3WP*YoLq zihyDF=qn(xpiU3-= zfbZ%(#4EE_^*+m?I5Z0gfKv2F26D79eXPiSf5e?lzyW@qWhc}Wq3?kcgu=-8x_5zl zY@yBw+4=~{F-eUqkMd4rZi}dpU7t=Ym4p=s=yu>Vbm^9Ngrr6-I|jN=P#2mmjZxaX ztop6XOnJPUM#n;ebU3ZYAC#zowTOEj{A}D|@PGsp^j6n2*`XN2=o~ukoYrNZIa0$8 zsKZt{ls8b^1O9LE0kImo{0D|R05<+WBA9K^_|@PAgV{b0%0Rh_rgg?|0Gd1Ws|$ll zXhqAPbN+A0GhB$aevo*XW3 zJ!5*u|5%m8s{stHPeF=m-UI0S6fezScj`if=SSLb(96oT{h;bL-eXcT*|GV&w9T)! zOae5^vu;YZ@!qXq99I2L@GBr~fx0-^y0M~L7;+ppm)1+};F!0}iZ2-Kmjq}!oHRIO z$r~dT9>kYS@088ZWek);e3D)Q6M#cs9=Mw7u2HV zAx0zUJ^Ef`n>jw>UwXS*z&Ep-XMhP)_#WoK6ll@dm-p!xCSGKa97)9yCS8TPW`-Lb z<(GmS7O9}aU@-<5mn{Oo9^adB|=9VJIiQeC)iPdnkHea%|vdQYC<@hR}2!Bz|E$SOzrBX3x70d<`75^tR zR(-J*@eV=%jMZHO7YM@8e1~Xf?C0vh8Ue*h9{CqrVrL^C3JJzb4gn3=K2p|(?A56d zr{GKY$yf#lEsg~(oL2!RI}VjLMEX5-zod&={VhJCGrk){NQv9bX7w}_(r+HxTRA_T zI_(WzjqLK)0q*6`W7&$b6)@y5KwfNfMLwWn&t^zd}s_7ay;`pe+x4?}FF67@{nDkNn8zQ7D6d@p0-Q zH2i}tJ1OReYo2Utj;vRQMWmVk0w4hwe-s0W4YqZgp<3d%`JFBT+sXeOO?4O0n$Ioz z^lNZl1Ax{%S{`P~QHEVx0XO;g5OytMa_W$O)xCoMvU#pxipKcX;d7P*$^fmf;a2i98~ zl0Xhn>Q4MPS(fLNrydZG?xVd?Yo_Unf6gZUA5sw2RF(w$AHwuS%qg^2tu?yuObzUg zs(>)il4FAukS|GGn}xRv!EN^K;9^O16oFuoTGMn#*0#eM03vW!?x4szAbDWne2`>4 zR7D-EinUj@L~Z95A3wG$sY?}j8DLr*)B_6+&Av7}1H8YPIP)oXWQZntK>=Ge(`fC* z-^YmJAo#Ea?zllHzruFsCzz}ABUr|=@MUOqyyQz5ze_ZYxHx6t#uFMaSc4);OAg+a z@7^QuXaTth9c-hagEc4K$n5b9|4keEmMubfXble1lXBderM4bn>@E$Z}s0*!W5zgF(Ea{XIOUp{_&~U>(x#AvP_O2tIdjnSKlezUnRdG z4(sH6(oU_qY)}Na8q4z+0DmlcHuy0ob%_@`=;gKi2)p{{i}wC0BQjVGVWv=Q$XoRn zV(oQKK*YTQQ9DY5NqpTk3V9N96CzamD5C9C`8>fPw~p517aM|M80^F3Hqt~Ph+Bzf z03ox-7?Jc#j&nkbd|19GOKAil5T4zj0zD{7pvh@glXQv3_c}OkLb)@JC2_-Ec$4Bs zK;kbs-ov=Dp$Xlwn7CLdm*9JnO{t#`;^f#m)oI>G4^p$3IKb=#f%l-i290c(oeeu1 z;gv)2QS0<(zxfQP(COWEHvkmPnS#ECk*pavcn|x&k}ON*Q0v$i>3oV7y*JUE(n5*h zZ|pI>5I^@|f$3C;NjCgu!&ayfZu}liA6R^>S@-~rY6kg)$kP)b(RX)?cthB^)SOGL zZ?Jc>_VD#~TE>f({Jba)g&nFY9P#9W34@EzhQTz3BP<0R_w@MZc$v$l4ko22cgQL~ zZ~H4so7JAlo>Z}uYDX?kyXN4@z$j(p^@!7vS3QG$yJjw+94cwTc|VH_>L$W#!ofH8 z(*l8%Xr`AZoqOntQcNXtkWEcX25EUiHC7Q)JK0b?Q9beb*40Q_4mqSpW$m4Fk8-C% zZkB+vgEfm+tOX6&3A992P94SLT2<&JA}Gna2HLE`KLor1W5}kRQ}z0tuC>8STF~@b z4ZyG0e~zczQrIycgI@A32AF9vMH*cJGb@$`Qg1`45Jn|>Wt6)pn2YkOW{)8|kW*p~ zG4)H1$<;s8>yWSlv_G1}`sh74?63d4RGVzAzKe+zd4AZt6MG^OMECi(tDt3;oC?z@ zf&y6d4E%aeq7{u%2|y*;7(d0_{j=?{kX0DSHj9gN8Oub`7p`mvlp*wtR(`bPtU0ku zB9*f(pIDwkT3P!%|6&I2affXVOcY2%=^>N6U=ku-2TL>hRP|s|DIcZm9zksvq>`0q zOnp|(yYmy4(m#RE*h7o~0YzfH7&isqheV29v=?9Z7a+z)O(3bVzqN_@aTyj-tAy(l zb5nC^EQLO`3x<%t9tWCY$*n7%SfCfOnvB6PsP%2>7*+{|$NWzEmsT!Lr3nrJqNlJu z9;GTg$Fd_&snTk}&Y5z{if@`TKm9XugLvLaL`sD1(=-EGx-LFeKI)DYBZbA2-Y>0i2_4NuyZ*kRC*lb zHekZi1dPH94z-w*!| zgC&*wW}Y3t_$GL&ihs@yw8zxv=Hwz`vOG6}*Z9$P{*s}V+&vaL!Xt_2bJ-T(f=3XH z>?^8nN`YCb=|F11C_q|W1-?G#oxe2%1CbK&C~Uq|dnn~nZ;hkt5EK!sm*1C>x~fc0 zUVtr0pi(`~+ zin}W0l-p$RXXK$U*Teimc@bOQH`AQi(IH%iR~F2)Q}doXxdd*pXF?q(N`Z#)nR{;S z`l<@t1l#n0>YF&tV3R^Nh(yAr%fRNwxikbNjG zLr#UReL%m)+~Dx+x1r1=U7j5=WMSFk=Dh9Bt}sq3D>;1W_T&-{n*rgO*5%Bc&|Qg> zJyx~5Lu9Ta?edT`6|;893(ZaYWwtBAU02{TS1m#jdq2sH3bxKb)1;6{ZE=9W1*#w$ zQLZ!m`{noBTXMGCg;boI4a_$vYQgx7xyPC!g`|9f?S+;s8+-<|Ac$sHkjI7^ppxZ9 zha&!G@@jK@)IJR$r6Cdkw~!7~pf-VIh+c4Pb8|$cz9Er+-)8e{s>$3g?^0%l0?l{Y zvSp#QZcBVtMpup{KocF1ltoto_stiGdiz@eUZC%gmRv$L>VKyhEl~WV;DyszR|eD< z3CUfBR-dG9>ST9yR;(nzkKT#IWLmB-DX}bnBfqACw11dlG4hTC@fomFXdS?RT=?b_ zP&X)LH*2pPjAaUbxrDEzQd3E!(63@ zv$t9JZh2u<>|=V_Z+ZI&$&*9lM&@vsOQL;S!P?{o`-#BHlS$L=$(HBxp1I`p%jJ_* z!vX?p`Y!jD9wgf8b_ec!E3otW`>ZywVG)paq$o%$(}R8i=WLjX5urPnNL^Q?G;l}M zx2tg`^>#cy{*Cg-cFygcUm8c6mx)#yR1m|(0}GUub{QGW4`u)UHWW)AdKYlHQ{uLB z?tF9<=6$?VH7&kE9cfb+m-RVA4Wx>_@F|$c{!+s}nL_pzRaZaVQ}{=RZG^a>3)HgR zPKFZLy>rl1WaeeEo~~|D5O1c5#97!Ul`y&bF~QO~Cxz<2KH&B7OHKjzE$GOFCgZlq^PJX&(JU-1 z)zx^OTAab5L(0siOFZt^nUH^8yWO{7(9J@Z({;L+eLS0>bMN$#-#-<#-ym4nbGD2n zSKE9g2ox*se8H-^Pt3nSS4Wr4{b#-Or5>$a$Dc=+nK-sOnL#1doy$ty&@6tR z2CFBz-B-a6KfGD*ZFO*3)bO0s1#;1=+B|og+FX1$ws|u%<;#Q1mum~0lJ`u!f3j!z zhSNw-YGCo_?vKTRLlkZN3JvWGWzWT6F_ol%zo%lp?^nKx5F!9v? zG}dZovFJ8dH)pgMaUnu_#g@sr4xbx1I5>!R^sRfZv)p$nZ134+#Tk%Vn|p{*rQaKFSk1a4ZcOJKKb`UhNdY+wMCS0^}& zaxA`6OeIYEi4Zn~vmS*G*WrO~I}US7>%0vLenebJ>Xf~6mJ-i7(|TqNs~Fm@Z;URt zUq7B|T5;YuIU&JE*>{sN93p;w&0<;ZkVl>m2{}m*eG2Zc^OWJ(?(Zv1xaic{zXRGy za5af++V#$`dx?7b(p($&7y$iY7};ruGK4ihP)e+ zJ5ai`%ihC9WC)mLy>EqDgiazFM7ZvQ`wAg(U-BBPgzTNR zS2^qj4}Uj3bax}~3jcMUM8gi-OFQTkerDrXVayt$ZOERpZwGk%f{(Dzt|V}#u^Eam zm1M&P0>F@pP_x`5no>T3nhm~8ZUc*spQzU(z4fXz^bFvB@AM3I3`w4x%ic&hwdaP~ zDv3JD!V>KCWe@?Mc!6Tkok8BAhyBz-H9?WVZ7g}gw{OLg=XQ6e{6n;LXlN6n-3yLwymxRJMvEKuVJsm~hpCj8$;_L@`0~D87a4!+)uSGa zYyQ&rW@I{l#uXC z`<&is8CKkGA#ZEwa5eVp4I>3T&HzX60(?^Z09@L(%LS_4&-B(jGUN`IakO5;vG-Xa zZ;TLhMVFGhcyvvVLi9D>?HV#oj$N|HAawR=cUcFKC~xF<`Unq4WZTX2oI`!pdS-G; z>Sr%edXBsIkCsEX^c&;e8-*(Ac;sjS!G6`K$uysYKK9A#J4mP+VUu?hj+kgmj>aU3 ze2BAI6DBHy-qwKA#S@$^E;jg;2dg<}L$RH5Mh7^pUwh64#&b}^)~KpJ;hJga76<+J zk#Y>J&iIojzGFKmnK;hw-$55m)ddl$6Wj;u}Td8~YbIcR;D+-Zjk1UHw* z_zW)?0@@dAoc|Y{+b8DOF_nU&-Vy1P>&{Ulv=aN*W{<2MHcF4tY|}9zmLGw)*I2@) zQlg<*?BQ!8Q2l4==hg}#P=0I4gY5h8f1}4ha;aUN4r~?eF-Dz7OZkadm<)87?5wM+ zi@F&L3JN-()`<{AY7k8rmg^t(9!$1j^ODinxW!YS+?zJiT-kkQ_Bv4m2h_!@MKX(!93eMk$~AzvoOQdzMMAiG5L|A@+8$^jrE@7 zdMasfs$O9(P0>JF#(h`!1K8OGany4WxEdCg31xPHqGQ^+95(X%c(ZX49s*{f$;9P1 zYm?n4o*vJzJsnztI*+r9n;!I5RWbvSy#rRhQIp$u-XI+jRD&cOqvj1ldy~B?Ql&y| zAD%te*%cB7R*!iLw_Q>cD+|k5ce>do&!-7K&<@f3C&StCPK2;Ywe|9obk8@N1;Nr* zF>g|>e@E4d(LSOM-SG40bO&0eirDuIim2m`ym-;=t-QIoUC`v+wNJ|I+t0>o+Aw20 z#2acJX%H)j^+pZ;Sfr>c-^2D!wpCB08Th0QiwQrTjN!#>URQ$ z$mtO!fB6^OckF)=?>^j#?g%JG4`Hj8EJ@T$J)z8mS$+xz(X{0k@^OLmNvye`ln6Mj^?UncPK(7}fvmU;FL}x4 z(Fy=`pj$s}a;8xgg5~JnyZ@EXg{=HaPSm9rN{H`IP9!8GAYs$d(HVamFMuX=D)Wl? zWVYScf2$q*=;Q80FawV%tuLQ>9$0mb%acRi{4%G^u?XfjKK6!l_vtU0!~_ld@0{mM zqQ;krb8mYjF z+uUxI0M5-^T$xmD;|p#x@c(FB6mCr^HcK`TV*s@&Ppxe+tMF~VVL`Oe8)(K8>sVjw%u!UqQ4 zgbrW#>S$y3Wa9E6me^5kg2g>ut{lz8;!1O0KWS!;_-gb;XK3Z&iz2pyq~-jXY&hLL z!z)SuIE|!Uxz)Tn;yvZH+Rz_|JKewVk$#K2s9+oXt8eF5b@3^1;GV?9@K{(_bm&!4 z?OVtG@-EOdzHGH7*WFs_a$HJtL=HA!Xu1KQm77H$&s&Pt;lr(~H|o|adlzJ~IS)bW z3~g=g>ZSr5Ke(#U5sg{)n_}qSX>IVH1JG;&z-E`A{V}IV?)Z^pH|m4^p2fqnmBY6O z$d9}WC`3ytJF!(@{P$NLLT2!;1-mF;j08m7_x}2o&o3C=w@IHOzM3uPudk~s zEMhZ`AVnZ});8IR8&9rMt>_V|ns(e$R#8emO==Sf4#m1SV~JfiL4=; zYv?eYe<#7j)@~$F*k0fn3-BUW&d(0U+G1#PI7WzAY6eYLf7zTVU;|ykyHilBMsHUB zju>BI!FDhAqcBB8eKNPExc#ssfO=)3U&yEt4knTfJ;2ItP_Uso&33m@s-RJ2- zi_Wsoo;=xMTFJSdLn@CeF_FyMHdxcxsBrS+jiQfdZ)xJ88kpo)g2irL$uthM^AwRc=G80cj+5R!R22cz$K@%5 z+)2GWR)0>(1VQS2>iX(xpU+(m(IY0#r8 z2dQQ8@d^Ha4rkoCb&K>*=Lb@8c>hqjC{uR}=^yB1k(};nTL}RbT*+W-u1p^2DhHla{T!z$5vdks{9dFJg=CXA-0Lc)Yj$JDC-&XU!mGL(VR{~0Hxk(kP zZ^vw@Pz&=pa}qQ6dr0M&VxGhC%w7;sa@?l*;%Kw0FvcvW$d|1oQqBc1DfBjRZC`EN zbDgF7=!uec0m6Br?Q}D|tf>yReiu$pODisJg_&v@fGL^!XK%wi=H^7>d`Tv*3d0&r zD0vg>^f$LGa})m5NVAV>9|Lb>-GC7bDZC`gQ2@-9`xgV?iJ-sv9qZ`Z z2jQIImQ);qQGjZ8ag~l)UwVMgcz_MZg3}i@&Av?tsL$@_R1yFU;_%$iaF*;OKVg&~ zx|4&wsy=F8Zx40-EAY6iV8EqMjj4j&nE)?j9A_|zQsBZTHRkXP+utf|e|&KBhNs5{ z5AEBy#5{lD+{UD8_5>2xsCoeCO8LAB*3N#jO0g+xP~WR00QsG-rTYfc+XA3r+Ve6K zXgY-lpX-i;;)b)86L$0V+z`3Y3zm`QY)OGi$RmF?seRl`hvESD0V0W0WA|*Lq%#-3 zAxvvOQCal|6a(T^6QQXGYSIcwN>_K|YBpg%*yMmeo_txT?|FCB(OfhkKZ@hCu ze^yQ`58w*0{P)2`bm}VrumV4{nY3le(*e;5fC_(a1AsSbuZ_B-`hkZp(qUF}FLcwS z9F?6s-oWw%HNy-b0O^gk4S(bI#6=U#aKnU2C`Et+11kBy`yi;Z#eV>pAOM-P=pNGowBtpPyy zGYCU2ZJj?A;&j8{f=d88?1UgpaL<_;d7 zz1FTWuz-yzx=#5bpUk=2BAifHw2=;He1;fpTJ54$yZtmEG_o8DknQL0A_iphK0FjU zQ-No)Qv#Y5<57cK{zW_$|pNXc*I;ejz+n`9h^c9oKx zLAp9%Ce(YUS&%y6Qe5Q@5N8F~5l?*C#`H^0*5DiH$jw$5s1FG}xSVHaOq zN!_?{BS35(o~{axEzoKgh_OXW0+Z#=GN5s)N17A~rBc*Z&cT9X5&a|C?nG8bf30=PVhS5`XkhpqTy-@WdFr+{eMr~{-} zqOGcu(m;!gY#u&gsbZD?8TM3&TkCmWsl7f0OFgw*esCV&=_eNs z@f!l&b)A^^%KJ;Y{^^0hY+EqaM8}dr9F-2tcJ@|{BiflI$|#Xl+yh8P(#}j zN=g@6X8I$#cbrgoe2AmDf-X()2-NmEg15b4|2axeZQjSY$HE9AA{-*8xy#4LJqOTTFShzag^9?j_7N`2rs1-}xvu!u$dn?qJTvXT3NHv?wBRVS9$JY%Qnl zQ$(f`gEeLz-D4{SQ}oLEyuZsFQ4Ss<%l(;tx-s98Xo1LP5@ z7>lXv($wq(*uBpKTd=C6Sxey8ly#WMgSAPjPSW_<;}L|i(9uY{xvwE|d96;j=4M;^ zkB#?+TU>}x`zx?u?ex2H+&6`}@B^G?Q1s_vS3XQ>9WFw?^o9u+=u4FE1u*xkEI8P6 z-KaBGOuPxMsp6ifkjk1ZLFYkF#bi3!f@;Okbd{=^nFVJ+>n%VR6#ZHgt%>M}?#JG~ z@)LD`19i9f_#*Z7uYc?P^298LW|QTzRPuwU+f>}02hTTb{9Q<4quWfe2z4e8H(C~T z&Y>b<%$>iIT*xM#&7Hd&q;vS_(~d7?nyU-(#V9dItWK+89hUHw2!4!U3=hvgoJj}g z?Fuq~ly61Y;@x`9HJ!-x0}Gv(@io}}Nf)&AQk<4{m_0t8_T)!^!$bD>H$4N3Ij5aY zoYvIZr?D1al^<>1!3TXtG3U&Sx;BEy3ksg;3pvWmbEZBa7xcrudlsYI{J9I{@q1aO z2Eg;c_nz*sJqPih$^~JE$WXMb>_Vg|mrT`DqIq8O@>}SV$y1`+0cgZ0+qWYe*ix#-j859X+Uds*efh+1h zF__YoDcNF_4eUEx{&6h0J%-;n1CJs+g~8|dke?#@<- zWmZz+tY}##grm7>v9SdeT7L;iC6`0ajq8VRckL(xNDjNUKhqJZU(0|Y>=0d4~)mle#%l-^r(C?zZ0^B!o`T$5JSXqHvuMe2KK4C02B_(TR>NbA>>tIYaYC}|0suf2YiT#CB z90G_-;vo?%79v?jaA6gMJun9Vh9to(@RpRxfQ;U&Qk}AZb*%_Ma+UdG+&$LmM1bUAHI9v?aS@63ZS%eOgr_GC9Z7J)NeTmNI_l(Vi^#OXZZI=F7eQp{;V0SK6^&^|iCQ zDnItss0?WEH8bD%O+I)ke2N`gNg2Wxp-192On-}&=~^>yU|wiG0=Gf>DA9I261JaS zo3o_ZR^daOLkP=l4;|N#kzB97h0PF9=hXNxVc&4tt!GI7xUl5L*GhKq3Zo?=ohHI) z!4bFd;6PJpiD)Qkn zTnDibTl{g?m79L8zt5^ciqHEGh7GM7Lk(=zU|*NsT8Z#xQTzLw$X|il_29iss>5nq zJhROxa(Lk4bFO4O#8*%B6KE}vw2FaXI|Q4fJo~hfx5|nQO%eUwN)V|{kK&K?D=?H8 zjk!CXrnY#WKD_ufRKb_mCa12cz$2K3+%bPSe)=Qy9Ji# z-9iex+*|17bv)gg@E9LeTs-HLtn7>czo~&F?-&j4!w?8)cttY}^SbRMz^d3P`7M@F z`W#Y(p04%&>jfU43%_#?!XpDFO^YahMKfA3U9tUDnpGJDG&8DD*!*D`KLDT{1b818<}&GucdOuo;_ zA-5TEMX?)AMek6hOLP)Agc%PPUX!f6Y`kpt=jc#{YbZ;6cIc3-u}}mTZ5&5a3D+g0 zL>p_m0mttfrNQ=SsHH#Lu^Uf_cH0WRo#8FoVx-Tk5(aHl8&Z{wLB;H8DXF(k-({O~ zS;_^b!FujS*gLeDlS9-RftTzVB|LPQwfsltQnsUqj zzWTazNdO*%AFby1Kusbdb0v=CA;ncF#FH?a7@7PSB@)g61=w$%qMiY8d8D}8@i^@n zgcRdzui`{h#H$>_GHWh;u|#6QCYMU?*cnlgw2EReew)$6gKpZAkZFo>?c3W9=%D^- zuV4fpd1wZ;grNLi)ymk?bC=JXi* zbAvUsbG+^RVc+6rv9razI%4SmKlQNqiY zr*-Z}o;pz#SJOWOKQ4eeccb6YhYTsb-L8&iK3tdAL0j`~aFwZ#Tt!@Ju=0uxHbCi? zj_#^)CEK35n^!tN{6OB#_CsTBWdp^5rU=i;-TqB)kFl4$CqyWex*FM29otLhqgUEh zvGK;4r~9|u+jH!aH)G0Y1I z$V*P24-HLe=94#DGC%|dfxVP#(jZW`lrYD=g=Tdxg7dCgpH^8ZQzY{phM7%lWv|?p z$$OH$y3H0XE8K2cml;P&dqKUc^g;hBa3$pVS9j0x6bEv&JC2Q*u0Da|bH!gRx*?O# zN`FiE62Jl;-`eM1v%aFTlB9E>8TDPWjl4$J8UJhYgb+;}v)cl7LbF|xF2?5-i9g6Z z_xP}ynwnYj@cUXN+`N&h`~0C&JAH9-eMe3Qm(;Lo7(qc9zRgB?LN@V#Y3ms1T#?f^ zxi6>xg1VG7cedN$o^+b>$R5zU@?5`OUczJAYF_Y14DTiwr-WdU6>a?#dow; z_YE~M+M|4M-Sp5QuHbFhOl`U@8v5OOyjQL7sjO@zyGnOG6d^uGUPLdtAgs*oHef95 zBVKGjjyO1c$PilYDpsC|M&PrloE-TMD-F{i$JUsIHXRuZp4`(RpN0o|vER|(#X-Dq zfd|p&*s-J&I#YkjF6alsNCanbHTndigurrbv$bDoQTExfuSOB~|FpdutR4mQej&8um7uzE>;5z{vUjbQMrOGOb`1wjx{#)XzHgQMbI7?ulsvY(;?z0#r-Mrv=+$N6 zk%7vKM|Awt)n!41;p)qi;q`2R_k$j@h%iU|XC|o^h6G(FGI|YczTJvD?%UeXQE@Q# z|KvxC(L-P5OJy}gT}0w*5~aaNO`XP4CUeWN{pK#>EGgPWjj4&Qaq3i|aKU7|B_Jt6 zdg$rWQA(HTWtw%k6apL`n<4b)EmX~Ma+giKf|PBTcUm$dHtx8K_Rl#I3WO(ewlHN# zvJX6@B@;hC1u>q`Q?fR1Ig;RuypI_O$BBVcj-Ch1q@aJGv?p~~y>V%ZE>fp~bp1RV zx%p9e@oh?}o91~wb7wnUUx|+TExe(Ek=ZMU-sD6h8}PPZkB4b_EIi)wZI!jvrdgh5 z3GrgUD)m&LPNs97w{PxUa$~Y$9O^V|&`3Vpt-!byeaCU`qKzJRx*KwEd~SJ4g>EH; zZZwY#0&PBGCS(4R@8Vr8m8TX1R2{9gwi0rbrttMhmxs_cnI#24c0t2W8l#_B=;hji zRuXb|)QT_+9nmjvssCu&DI^6v{d2An*Kd{HPo%y26XBtM8(ff^=VbzAn`42RD{W(>2ez0YG*@b&U4aL8+V{iIwEULOm^ zNR^TUEqNj5n?Y%lZ>yf#eP0pxqmAr&O=Kh)oga9(GQvEny7>(sd-+_47|GSOBFm_e zD+;puZW!eiIVC8`w8qZ_fd{g^_Zv0)pp=O)f z8k>0W4F=o@0ej9Z_$7Hsk^#Rv4Zjd-kT>*}x(Zr@QsV>R`=DrtUtk@1W0Y<1SsrYUob zvUD5Nd`6^IOkd~V--8*gsL47V<=(?tYkevOmNQ{a>}-WaMIU8}%k zqIAZ@B(-;trG=)RADrDebcvvv{!_J3KDnUkI#g-fT>E&W|M6&7cEF<$2X6-n1T{r>GF#SiiHXHWl)^LBZDm`+$6IdZ$FH(3`#$+ z40Jw(V0dK2T&nAVti2Oyb`b(laa8rZkw*WK z*|;!`nGI(nMH)QyocTO>(># zA?zuz%PAlygnZ;p`!kOKV37FV)~6Yv?FQU}cI^9F>)pG#yXt4Oq7(7>b|khBPzWvn zI4{dN)00VXsqCBs?~z6(=Kcw`8(o*1D{*8#OQ0}x$SRYsMZS^R31VPt;8XG5HpP%* z+w9suOyM*%$4kew<*6>u9K;IE*=Fh1ePv zb|WcE(+Xk47eU#)g%ZEhPK4`hTaKd{9=?I%GbdEyaJn>1=E|V977p2+%>iwitJpf0 zC!~pB;pQZ=^^@NRlD%%S%|{CSymilKLd;WgWn8gmfjb)HJMJTqOrZ8M_|%b0|p zoS>(SL~}UTRs6!h4z_b~WhE%!rM9HZW-%<06BDVnQ%S|pVM5Nc7^3p16vbkNxUh0F zUaTj&%|?Ks(?TYmDg8D?NU6GcnJBZMkwC-%7|l^ed3sAH!pU+g3T?AYeMkzyZIIO7 z^661hb$~fIzFd&8A3o>^aOTbeL%mI(ZF*~|1Bo!1MD_;8U~NXtX9@V3kM24|EpoxvcGRUr*-X@N2`+)s5N;&~tQAWUZO-Z~ zi9<1)uh3qzWUunNbrr~B+bz-4aWm=EIfSezA~M$T+ByOEjWBy%Uh5a)*xs#E!`8#C zGeYlweX|oa58JDnO|G51avN)@eT8Bwz*2xM%f*8bF=EpL9a^$wLHrSlQpDrl2kNI| zsNneE%|PIPqpTXo&nGK2!`7c9ips;{qMWkzLn~Lt9kooF|)VaA>-94q17(hqzv}x9n6PTK}4jz>i~YOAuPy!cf;;Qp}${z!z`lA zjymhWztDG zRR4a#cjfO$(6MQF4O#A;x`%B-Nn(Gt+xPJ-QNr zU4g_z{p|PIEG#hbqB`#<6ida_MWhcn2iEjskQ$8zBC7y4 z><4y5l}+!T^Fe;9fij`SL`3%K(Xm4YKTjUaxT8Mo#Wr-dJ%x(HAh+oS{8AYhA7FML zKipDU>ETY0we-7CebT=@FmiIG+3N^|*ne)3{A%0vS?K-Q=LBlMCo;9duo-hllK`yi z$)rKIntzYc_lIVAp62)(aG1&=fKfbDK-G_XpHkPS$;rV4*tefXwlM~7DqqViG zsT!!FPy6Z8;IJ6N>yi9_;yW@YkazW8G>C6E0o-8bjI#C+DEo5l0k({wL)fX1jq`Y0 z+ic&0Dj<8nm|f0cO2qMU(?Do{MUWud?%mD*MaN8FF^6&$?9AZ{k8NtRvG{K^j@`L$ zqFdE`sR~88F`*bxjPECOVeW*>lA%|~Zo}^&NOvA`wg6|tp7G;HbH%^oHbtLPlb;BW zub@gl#e=}##uNzga>^R9uHJQ0`CG=TQS3IFtOMj_59%fWFYr+ zA6P?%7fGOeid7$@-adNZh?3oR}V?gkA{j_qPh_%t?2z-o4>op}F~ii#w9AE1;Y z4dR?+Pd2n{0b}nHc)4~n8VoO-OaL*?`{ssLBeWp_HZFn-h_j7$1SZTV@F6H?t*~#O zg7%fyx9@uXB}7uge|5fi=5J!?N&;@KNnvJ~s3n-q`h2Ur>+QoAJfG{@DS zp8%?0YhdSw=P_TEFas?2jMb8ob5@|6YGCVWhqd@%b`0&N#@XzUty76>F8X#cO+!b_ zBep4V&%cBrAZ8!ekll^ovj-$uCssXY^t56Nq7#lTnV{ejB-@WgyRhQ-c6>)o-4!RH zuGnw2jhxL6^c9H0y+4HP!A?amBB6?!P8hYQpK+ecBYvMi&QMB9}3lu>`ZGiL@$ccyj z+SXffqb&-{k=u;uq3o|tQ9;;e$IVkoHUR7mWc5^<^yWAixaiN;mO-UH%%-3w0Nl9@e&BP_j2v$8!Jm@BgGy!zsg)ZXOW}&c3|(2ACKxkzn>u- zJs#{h-Uj@sS_<^Lg{(+=r!ZoQ#G-xp< z`trNX5jhu#2<$8j`UWli2jk{$ABE!!h@FJ>ziC@qOzc`Vh9RUG;j zqMNETf?flbYgFB(M!;;0nFa<+zjFO{u?ol-qFa8ny42L{>~0canYMO5HMw&?_NV=+ z)cR$t{uDuUuS!Kp3LOAnutajIb?;ht{k}1GnG?2~$J*TlmAm)Va@AcEw`}aJz;2Cl zw>Q-POEYy>7uE092z6T>!YF-45yO;Tm?*n88h1iVGoQzL(> zRR;WkE*V`k#cYZmO>3xJI8{_*NZ7!w@ZwcE8x z8{M>J6;nTmr`K@2`ASRl5Gr?%e)7%;;bLBw<@J)f9;2GA)9Ok?75O1~F^s5Td;D-# zNNO0!2U4HGSD?XE29e<^SE)KCR$^u@CNM_|qap;D>w-~=2;*wZ_yNk|x^gvYwifv- zgVG`B&`ccUI=7eSFwXDalSn4G5j2^8V6n}Grq2h=p#sJQ1=*P#9M&6H$t{y)O(NCAso~ErUi@oO(072d z3gl>=weQ>+zR=-bbQMt%k&e7^2@0l`Ps?oNPCU~E)?yvLk+$*kmb-__t7u@FKUvv&T2h281)!6N_`l!g>4t0{IlGop*G0QZ4bw7xm~vT|QKi z6ST@?JJD}6?mBTgHYsTlwK?yoqvy?G8je@cR*Zzf`pD@~5~h7X5ZKi17#Ij+tnLfoN4HT4Nx1(uSg@gsF<+zfk2QpE{~RTw(e>bc`1K1>`+j z#hv|$dj^L|1A+E_|0Z*GeNX0~t4iVuH#Gi4$qqH~hxE-$NEhMLCqpJ>!@+TsDf`o} z%8S~G`W-oJ=>?ukTPtY)-KzdYWhGm#SNRLC#(J(MYDRrUxoZ~tu2JE+#6Sn0jrPYRkR6jEkX9O7%YQOn*A91Yn|S2Kh)PzCG^Y132b%w#s?JLZA?U) zKN${`rcc{vY-Bo^OAS0OPc+#(s*$-es9xdqy4E_ba?J|NyZF-uo)UCVvYE^|>@>&* zu%D6=Gi&SKY#zD6I6NjubU@y-a9AvJS7bf{hGul)~VAFa7o^>j4}zQ+bX z&NYiy@z$5Fzkvn>+8U#qlVc+>+ROa~xGgHB=UA?aK9e9iw^_0=;P^4oE39E-B<%DD z9wUWLQ3k-*zJ?Kg=lQEI__>)MxC74BvwZ^x8B|ne# znXf2+XTq)b_kR%O{EQ7djyUv$(OpJ5onBsYH)x7Oh(rkOtJbaiD^zvO_p!wsp~e3z z32bFQ>x@E}!C!C#ayY=St5<8St=DKG(y#(gZ{(lu!XKR&L^g$y-&?xO*g|F^mu0RP z3u>Rw5%WtE7IQ}Or&ClgO7R1)+TL`vO`TRWhtDrNfS$F;iTb}sl&_S5 z1v8yX=d&HYe!YCPEP?1{#LVE2?PLb7nUK!0h8932UJfCX!R)onXHp)OPlQ$a9b1<8 zp=X+SsAx_!nAeOy#%=g+<`de9rC+6L8lzT0n{LN8UNQ8_a0%-iv?R2!w;ld9rR*Dz zDVxm9Cd#m5X^d;$sqbKT67_2~#W=GgI=HJh9K!z0OlTJ5rZM|K)&-+X&mo*>DCR~t z1V`=>DpR25*#WS=6<8g;cV>aJKa;#9s=>BeGQqs?DUtvZ;8SCV+86lqge3eA4y?O^ zP{Ak$+q6H%Eo)2YjRbLnKZn(WGUmASwE1Qrec=p>DQAiV38q&%!8vB^sc!O7TwTuh zG#E6uqt+Q){VJ}={&tZFZ$`Oh;Dk-2x61_Se|d!A|CMzc;Ey)aXrt-)L;^Eh#%(s>oB^8@REzPhd8|@#X$tgCV*@3y9I13^|3IW=cbqt~Ek@l7C<^)Qb_071 z{(N}yuLMhJ)lR%#4%-r>SR&&?E=582>Te=V$jLcHIu!BEWr?-^(msugNjJw0#q+BK^(~=X~lR?NO=24=SIFTSAFR z$oBZ3vS%*R?m+qKzGpMgwjnbmbx)^BzEeO51kKs>W7WgKF&5%mtrQU<#S*yr=xC2BvrFgs zbaLL1$BaI*Bbi!2=sC#4jpkioWGp>3$-EVE(s{eDx{7pv8kZGTO73sNHyyMfAbJF6`Tt~(*0MPL2CFQDpZzuZOfDUqol`Tn5K^IMRy zW^{&{$~gKfkcvS^!tw(}Fo2edmgt!24gQ5l9)w6?Zkv-uRha%J)eAK`;j*x~AItT|bxg z&9e=vKTOe+@z&RgHuL7h!&=vaDKeZ;Y4NLe8SGuoIoVEp{HTvuts8^a6`>wGT>&9j zf;FW4ex>o2LYCy-&y@<4Y(14q2Cjzezi@x_~!Q##3 z=AO)Tn3w8uqF;=f^Wp}uAUI)TMGeB*)HsBno0q;%isOJA0~VVFINykm*PvyIcmQHm zrI6TFI_tvhrPn~^p@B;eSNSR z4;`$3MqU!lIcO2QV{Bd+PO-mtY<$&kAgl_U<_=t*Sl{f_+}oJg{Ku zNnHxi5SmUN)1q;_&7$(9CqMFKU+0ok{nImCbMF{kH2r(Aw zBvb&VK)Nupn7C~nIKsR({N*XOV|)0sIT0S4JRBQmi~no1t(|=2@@SV8gbRQ;pahcN zv5!^vwDC@y*ZWg0Khex=ILkA!EuoBLHloC5I`3>JoQzH^#!-ZF2Rq zE)1&v^7WV<1GlWXDs0!K48Rn9YBPlSyhx4T?@BA~? z9-gh>K9ydSsxXIZxbl=!-k3*PYGrR6sRZS?Unp-+>gm)os`t1V*s)aXYAMfQ(zzu3 z%&L1@kSc^Q10*;rW;VWLzmQ!|&MKK7VOC+Qco30r^*Yf)e?5QxV@F^fns1je4N(Vw z=yB?v!sl_RjK|cZ4-K1QwjmHZ(XUI-BtCRS+LINvyYC^|F*H=p4@gKNqQGjx{U2 zDNPs|R$aFlcR(z%APn_VuA{KWzjr<@-vtMV4C@H2b6#pSUs99s{ai#FKFh1wgpenO zfkRh?vZ%=vg}AO4g!G(s!C&GDrKK2a28`MYS$g@Cm~f78co!U5gp$wERN3WiL#O2H z_?3q@L=3h!cMi)6Cx5w1xy(T|zXq;W+)HY!OZ>Q|kEle1#3p=73;VSovj&>^p{`Vh z?U3O}>xd7iz>)HiJC}Gfp6Xr8$n6bY*`C;Sr*5N0t}P5NVmdzLaLqA#IC>tF-(EYy zu~hYcXz1Fy*feyM&3>SukAo!{UkC*M_4w~feNr`o}zJrTGc*dF- z(6X+{tI{V1a+LIW4?qQNU@fRBbRQ36c<~WAd%Xu1-}}fuk#>hTo4Kb9((>(gXjk-4 zcIa)O?-=E;?)IGOT!f1^by9{#2LGz^P~N|?$wzm<`+mpk)Z`C$dyI-bZtgyJa5PwU zCs6J%^W{O&ZsnC#Z@yMcaV?4Ts@p_Vt=4sm2JysVu-P(3k=|Zjs6cZ8Y>%7ou4>=B z=3ku?oPCO-D3jAyLV3GyRWeDG7h)XJ^Xru4*YPjftBksMgPNM2UR$0J$UPjp>?MEt zjg9HRG$fIHKY*a{c#*AC8fWsFzr_f#@j5@dg+%Z3Iua3eOlE=)=%CAuhGf&|v zp5~JB6MM~By=YA1BOLyu7u@5NDqH$fB`sAiK71T04!z9Nom7`{6128U@yjr%XY|P| zE5XyDhCATVq#nRzQ6QjUGANc5Z2>k!SeV{HCUvjd^^3x3T88rdqjb=3MwE5uwDWoO)h@#KI zb1fr`6z#+h(-OOT!)H^nu>Yx_Au4iZ{e8q2h}jW!0J-~2NMzA!m)GWCG^P^me%C@- z4U_7(a>pa&glEGpVDt?s?kL4jJ4XQ=3&gR%{%?VB6D7SB4GNd%bV&a&L6vFELqx0% zcNZQidiW@0t5-VNKZS3@KQ*~!k_JmNy0E9LH;d;AB*Ku5Ti2`{TMYW?1-iOkL- zfX@?lZud-3ko|GEi5eCn%!y6)dHPlB=f+>DIT6R(%~^MX=9<98?jbBwkRA`l4EN0( zY5Nq-1D~N@uG1L_k{^<>H)Oez`0QouiTRN0aF7Hd4*l*|kY8lvsUySINQhEM6ZmxIqyI-jDGmdek$|- zYc^&VP~B>jIn!I(xpJ>OKBd8#*^+ugkc{8t+AbU1l{wL)PleP##MHKK)v(WWp?PR&jR72p=sOS?@6_#Ns)P%D7Uqo%Lx34Z_ z#8C_1F;Wc6RC5F<@pfL%oyM->jTIS;yMNZ@PEtPr9698^+aU=1_;Fu~4 zB@T!@ZfzyOyVTO z37EW)CWtZ}`Zq~|#Tb9aCW9pgs|kbgo?8mUzy_+vxDr&*^bD!utjzOPyb6=&zW~BE zO$l8~7qf~-k3GtEx!Fx}IHu9CN5%wy z=!C6C5kVXQiH#M-VbefBF&Z=k;gcRBs(o8!2?lm4g*Vis#AMHyvOpR{*XX-ZJMNu{ zAeUHMjtmFjRnsFfBm_F=*qS9kXqr>> z<9z%ItOPicE2t1}-?kd|zUWO}Gevzoy~mWnu)thtAROaCeG%Of8zp%k@ttiF5D}Ew zBka}%h}=@AROoKCuUPe-8nb&z+>RmmJaL&*Q1MEau7A&6(Aki0#t6QngbKSK=Z*u=|M*TlHRH+WF;V<@?hTEX_RR{;YA$x2q`io80jhHF#-V z>X-AiH`qqxkpN~~R?c#fsSVD`{BL(f=J71_eW0C6Y~! zV!ftR*tZ(kWKRk(P6<7TABb_m1A~0*g7S;x-X5YNBxY!s47dr@YdG9_T}5AN)3d+< z)piE>{6h0E_pThHqhV@X)L<I5m)Ae7|D9 z#~OygJu@e8$fdRX;K$|gT1YQ$cpY&qf(I9~k0L9*VA`nq8SV2iGcIrR_lrIjsaL1W zL8sHD!p*?|A|9fPp`>KZoO}gKNOKe#1>%-bR<>>AX5%ObH@I^ez@3Z3@kRdfNUz*gcVPrIdyf7DzBR&|rP0zpUzNR~j2x~`rU-b>T45tvs zXt=Ep3$Di!{9x<4D{PPt$Pv>79E>qEOrrLE%KB~bjcJN>idjzj&|{Rp)%U582!>=uG^u+Mpvjs?AIfq; zV;8WmJz3SLGJzptv*;TcN+9;xZD@o*D}gUZNHzJ`W$pAu{E*4j3GW~B+)``(AYrgA z;nTARgLynZP6tKX((==77bGV%twUf`oof@Xoy_z`bWn0p>d#v2Jdc<0n)hsmwwAYV z$P^YU+aXUI*~UVOxo{5XD=Uh^*sSq=7@lEi#v3Ay?q?~)ImuXS4DsG_VR;vjMjupc zi?xK7GPR`d4<94lB{FbPnaKeJuBS&|a${O4=v_il&AKoX6Q%0lW{7E-g+)2l=_aOS zhZxQaZdL7pkvO5~V6}?@hZuVgP%>QZ{}@W_+ZAfuH~YvBMrGYgDHoPQL_Kb@X_$3y zgX%tv}U4TSkpAng0Od&>C&90h7zCVIHpbClMZd?hmg6;ckViM5$Obo#`Th>-c z&6aI!$L36DN&(5%6mtECU9-!>o7@lw|J02WI2tTSDFULB(5o4p^;a z66z4UeD#_=S*A)vH1R*22nkhA*-X${mHmkN7xMJ@BWBGheL<<=n(G!hHxOf&ts_a{ z?~HW(Qr69_e7-cKykB2pDDjL_s4~gRB!Q6&MI1-I(Rt0{XYU~GAG-7y93~`3T+-T#axaB+Pxg$W<`SAm{~-Y_5?hG%aUatG#jko7-BGf4M0-S#%^ zxZtO&UAlm{|K=Lj&m}*Z*rZ%F5wPsLRb$hlYg62J$RL1q)dO?SR=f|i%E+%6DH2sx zu4Z(7>h-mFz^H!nY9Xc1^OPE1LHaozL|Y#~v%noC4_mbPHY-Q3-rpC;d#K(csI**L zsX+KDtBGnQe$ABsXuA?02-F2pr5%t5)0%05i%yxs5LFmY~J;+ow_qt%CNGkerOx zXl+}EgXt{1$cK-^eL<(d=CK#f6$uRua%8%m(oB{JqAwz`cc*Yn%pAfjKaU9>PJB#H zOtJUP;s2CNHSDmtp!-U7c&O>N09RT8;=GITUvj(4KXaRF0&FNz(zM!LZMrJ`&Wa zk@JEclEu91*B@j#4}CZOL+@vf|F~=IFeldn`KARowIwzm$_uHpdAHmuT{=&|hj+hR z#n+M6GJ6SVVRc8xT|3f*g<3k3boO`bH5Dt`UBXn2G;CrEaLhK{IAKx{KDsc*WOg_A z7t^N0O?O{ZmoHp6R@$Po1DW5q1DM_f_UDd?@i}n+A4to=&=GT|XN%waczJ=lgI$J< zELR&*DuBOShI$iNj8$)*_>(mvn=YSs3LXiX%#k|O)|LZtVOfN)$=k}qYmmBs^}RT3 zFS3b6aG_8|S8^V?-L` zGQ!i3Y7Bxlag6L>8$yoJ(v9fNVvqPZE_uzSjby6?MrbR3m zntg!c$_LN=Z_^2ANlilE60ZK??}L1D277F%8Z6o&YE12P+=1w{t5VQ0?5O#EOP!7x@RXcvDs|-H(X# zEm3%}3947yt=nQ%%`cQ@wjvSGH5s5cte;gr<#+H7! zRLHQeV_$3qoj`GjwxeBNG-yRLy@N4OyZdewB!pro52e>o#PE5uKX@JdQsuiC(zR;{R#VsvBsG*oi754F4>eED7{4?7q9IGoMD?U8GfX3C8Gwo(r^{K4pIpT_ z<2Qk0=|oM)WDVV1$T0JdwBA%!fq-#hHX8&b$_C-@Eoj5Zcd>i~@ZH%wM_K8Mlrq(J z_yu$J)BSd75MnW}tH*j`wMw-N8PnN@KpE39hxnC|Vtg0#YV|00MBs`5*fJe7HtX|2 zku*6}e;C}kD-D8y#TOP?K*_F;>l76=!~Q(EO%2g=Mri3w6tZ)Tu0sG*11L>7N)n^t zs9?->_SH=$$oD=SXHdC!s09%M0xpLKP5#@M0M_Iu^%>QNGcM z+*Si7+pHK;*>6s+W=U`nl3@240ZYd$6}|-CnN688Fs6^Y^|YBjWV19LltEo0{5t%-HR%c<^Z1p{k_YWUv&5PG~xU z?z9w=VAfNkQ{#HdOASHyEtj(L_QFCj&52}!gf>xq$@07v-<1oop$EUWKfc`WewL_M z*}n5)>N``rCO+>cjb>-)H8Jx`&D>LFtZZ;aP-7iryAa3VLKg$*+RM9f6(FO#iRxp^ zTYq-Mh&Zt@V+vf!ctiWMYF`vCaAz*QHux+H)z9NS!K4#SzBc7B6|cqa?SLvo7wS|a zBM;;x($S%M9Qw}_Y468IXJMo2Y;lh2WEKPA8_b#t4SMi!+!4L1n^}4{9BzWp4-^<20Ro5bx2pR#b-7y=n0@czz3bn^B~-w8{RYUx(`OmIer+(5Au%`CezXwCn)ioG)?V|uLU}erHtsr@2s0E^Hz*@ z9H|A^u8&u5eNFRep!4^`(r_5~L>-*?yni_ioPAF8VpM#Bsp;6j%S4V_56BWASEtY@ z!7Cc{P~%Y-lu-l{v69KjvC1rdl1f?%tvNLQzW-M4y;%qu8A^yjlror+xWh-N~2q8w{8?Sp&!p|a<%D#ynBZpBmd zFhG;NgH-F{$}NnS6B93QIrR&fbA__(KW|bz`yAcy!w~L;+d z^uf+Ct7!d{yePznPB(Y;j6Ps*Yt}r0fifGRK6q}7P!A{eGCxlVWW9S} zO^`c~4_P4#ko#j2e9QxiwcAzpFgg8W()P1WrF&kTTBFTdjsZ{ERZB{@R8n>d!~lgN zh&9eXY69#v`(JJAKlgo?EXMfH2sty?=*VdtDhL0?36U7~eU<=VEL&Fpp&a!7s6EX{ z(}Mf~gh^qZjAG*48?QBbEt>H>dN}3WYk_i-c^Aa!g;kB!H-spJLM~*e@uWi>HS9H) z_02ydn!Ef8hvl3iyKBIKh6)Vcj&Bw(+Hc6D_F7Vg1LHXeVGybR{e^m{&%Ut`_0c zi7oyY1!dF$ul}wiW^}Xiyu0zrM=S@cGfKkTm&2^71h~c_You%B91Kr7_5QC3tUZAC zu0xmn;L!lbDuTeHLr)TG5MrM~%6yu%IP|zZSvzRw zBW>DZC$Xrg3NsZ)IzrIwM^$fG>3%6*tjyxem1p-p zG9KCl0zAxX??LAT<2zz9ihJ&0k)gALICT(3;K@-la93qjR11AzQcLr;pCzkhAs`v> zd~C)D6d#2gFA~=_)*@L^5-ErvN}}>SBpX?`F(kOZk3R-M7M#Bz)}_DV>h)!&ou{n9 zp|N>%=h>dkjVKP8gRk4C_J>ZM<~vw1CJv87bW3+r>b!IVO!zgrvoW#1U@{~7L^*XX z{A|dBnqqI(XeV~UOhX|^!P(7(pphUBW~2{%i)l0tPWi9ZL`&W)jbbCq{VY;4y5rR6 zVkoO8K@KGWj+?+cexFyCd#pHN&Wx+^KaH!m<0nJ*K@^Vv;Yd4+Ic`JeGf-Y9B0_F| z2(t-YpgQ#S*=!8l=1?+ykea{>yT;=$(lUldW>YFd)QE5p34-xh6w`QAJz&`SD8bE? zFBxO{xFu0#-Ul}Ka*8iC)=XwhH+ka@87lQT(dMN03(Qo07#7SAM66kaY3;;-OX1hDxXm`>q4*(;m4OID zlER46O^5x}x=-(=OqO2ogsCxjB>or;9>}$zQlB%2Ong?NnZ$~g#0GV2^6DPbSKVXe zbv3!mF`St3mH5G*6HFb{0PlOHnF+`xZ0Q*yh6QjQnkCfTTvI7Y}0zrihIW|KGY4DcS9H9B!ie3;aAm{x265aWg( zIQ~Vwk0C6Hjjb^AfNEgePu`p^oi1O~2hqWSf5B;1?<=Q>&D_C_W$r*+{(d&oF-|WM zFh?xA<)n$w6I(zp)!l5=AC+!#G1<{bp1n407UVkpus1avY!mRF#O&B=uAk-}^P zx_meZY?2#DTPR+L84$d@>au*hBJ~Lbx)MNLYI*SqdhT&B2P6~!0y)V6>+%mbUBESf zv-PNJnw4<16*o6ABn%xg#+6K)Mw%qNF^ir7>`jGm%t!YI#J(e9E5hJ>Ad}LI2p8K$ zZgPf&^7!2&|7$kjl0E`5%q_tu!$LPyk=fl6cO05+$xIgKBRy!QX=YJ^+0Yppd!>fz zn*U6BOWhw$3<@`#YX~BA(PGJg+jRG(#2NICRf?FLo;fuSfjha05*?-?&&5W{4Bxn) zEw|+V18i2d3j{m(sHmxhD$QZ}H5T-=uND_yNUr4pmX0>gZli2eqjvA;Xm7XeZPeZo zv0x&HNjj%4hnz3z7BGiB0o$3kAxKk~FSJR2e`@2kc7K!?-oL{YhXXamvf5y-nW$MV zk1ZwT8pv$%j)@v70n>lt5dEF93xXXeUnX+3(zxFucvux4W`1#SHsrUz_l-p%t$2hnH38(bS8l?7m!MEltaVy z4;X;Y$Gk~-x%Mv{xx{1B`r1aIG1SCTJHtx1B*VL5m+c)F&kObLua*PPj+Wmp3tgd2 zVk^D>_58vZPCtXpPOeml#dB&xgAhTFR`g zl)$hKTGL-ShvOS3s4Tf@pNK3`R$GdBfYB#pAJrjkIZR(@sM!r3JajuK+?1MSF9`ZR zb$wtno46o%ydt%Kc%-G$#%>qgjmF5^xbWhJS+jm$_&WaBE+2jVQxkVjS-HM>w)o<( zSmBIqM_#SEP{I{`YSg(So z(RkgKE6r}u&`T6lK@+oqkI z(|qlTcJ(Et$2M}B$1Ii?+|z`O-h|FiAz9BydKhJ{^ut%@UVSti=*;NMF%KGeWcSK*&Cipa6RQKzWGpz4by9FoozmvRQkO(qD_!0m6NrqE zc-ND6Mo?ys%s{rdax@CB4bmSg6i&(Mb8vgACW*^%YUr;{27aO|IQdxyxdhck@2|ac z>G$f;hPb+gx$WH7TZHrfekj`bVF7J0%XmaLMM3uD$Cr;Ugm6B#32oxiDNj+m_r||; z^Ygsf=ljJ=PN0^}m{5nm9u1ZgTTxAM{Jw&`I{WKoVl(ki=e7#X z{Q0(O=&C)jpg;6*2O=yI@zgqtDdY1oW}Omt`_Q{cM19CBO>^hw&6nJJ#OwL~4wFm$ z6df*~IhpOFthj?(x5unYkXnjF32*_M6g`#<&C%`zaZG{-xO0FVioZ z3$$733_o$+-+D>(u}yriyy>IV&|kJMGxu;$jEUE>agP=io11t*hBM$Y&!z*5z8zyc zX?~JbbI&*L6u!LH#`GHN7uY+V-76t?RNQet?Ubh=ISiGRIW9Vl#?anGjnt(!Rjl=> z(*D+*6#tO+QRs2k^!w`f{qGGAKifm&n>vl}?v^tHZ=JDe{EI3S&O4puI21D4vf+a;_ zs1CF_n37eOUA3;Z??(18ci`+xqBEw;wKEnEYB)I^|K;&XUhL^M$01Iv&_yrA_!arr z;hNX=+M)Y(YU(Y$$i$2e-OISITh+AtYLq}>p_a!>YnhEj4NkOwwym-WEiHM!$IT>J zH2n6#4+t%?viIuik4}mBb-(&z^<6Jw3ib&|-n8j>($v{$uGl>@&3gSa=lo;4--plV z^n^p+f{*~Yio8LO$b!cEMncWZ6A)7Jn=}GXmV5X0sy|#+Jvx?n+?H~!u}JPM`>JfX zOx$*tp5DV`7p?HSb*Gz$ckEZ%J7Y#+QMvpb(~$n)l!1?CNs==pk0-~!D^o~&?SE!p zoO!V5(Ynk9W?j<@ADBLhiCIdH?We%&ADzy&e9NF;+qCJaU3+qGJ>Q>6jG9F24cDDU z3q20&-BUcN_L;}Gj{zJUzg~m`q2{rp3pjbz(rae=$q8g0LiKilHZgJ~2im-kc5BEe zxKgNt3Ze}ldn z9`tMKJ}R`jP^?t0LV4t?viR}Ei!~$jPG!T0tt6_6nZWd$G#u=GB+KDPutA>RN0GOpMk#oljd^6BK*u-DZKd*f4?@>A}@Atkm;MCn-6fB{O^T@t;gB9aYJlM#Bci}3=)b1 zoX%bq@47zLV&OO%eK2iTJ(TPxq);w$%v4c~>P7rVT;JqBFqpUReA=A3ojNm8=g@cu zXL-KM#tQFBuURoy%0@1Zk+xajLi`bDCvRUNMW><;T`8OVswxMCCsXR=6&ycx$XsUB zUFdh{vk8RF%G57noV3oNnnj$n-k}L2f$25d&fVNz{>&nhCgYuU_3GB0+J(0|_Fa^V zqrEc5jf@HHdN*XvLPTFEaV?ggPVdj-eVB0Ly6UnPxqs5&GhA>#p35fwK%j7N`(^>j zc{!l{w&Ts`?Gdn)$W9W@7`8E4iPm6c+{d1zFq{0$4IyGX0e-t&-s6NlpTA6Vo!y)M zVMM8GQh8es$b36g72#A&;}Z=}aGu&lJnN~?{N-=oJqskOm2#^LwoDwY&Ncxt@p<2E zUz^azLg|lX`>wn^_E{Zgu1D^ssEn)@?gdh*IR{f`eGEa?I^jz4R$*lC7U4#ddAyZF zg*jqLFt55ONuC|Fj~m-^1ZD=K3-{)X4=RR)PN9F)8^DE zbpG50IUzI3hjHtS>EEh$uM2Dw#`dSMh1^hcIwHV$Iw(g7kJM&~ zC83pdk&~GN_y*{9m!bH`2A7tJ=c%7FaGt4Wp}yJt5lWgX8jzTd?hmYn7ZXckPOc*ydkUwEBn) zZ1mEk?(RHD_F}HRlxb9)400iDjZW&{~p=W z>ldQqBAR_nz>=05*wp@j_H=DaWJ>>)jHJk%8)f4wO&7)V~$XK;+e7bq`%W^mh z7dpk~=JJRo{E2C#`wOuHUhaPVZbi?Vhg~Xnfjv)7_*2DQUa5BIq{iigO-I)6^0#Za z*u^kfwI$ir$r9;JM^KRfL2*m{1)5}AYBm487Vo>)t@T(^ za(;&{{@tSd>~0;|kA*i@q@0>KSd-79&9>$_Gl#SlmqrHb#AX+ z6Jaz#F!yi7m8l(b>n$lU=FKCi0IRe0=#KERwxc-UNZAJ^a1;hin|iV*h1w|;BchBj zGygT{xCwid+r-$OzK zWX6Ha+&jhm;dSkUWQWAWoWYVeYzj728JJ1!y!fc&9=jc+lFIS@^$pE<)v(%na$;^v0zyZ8?TylpMnAEYiId4OQH6KZzJ zMJ6(wvdZfW{Q*!8$#C+XKb#=->z)X`*lkQqJ$0INQI%3Kbp+FNlhSJ{dbx1sYVRO= zdnPt_BtEyx-I-;SoUX3n)cn>#LrcisHOF}5I!RgKadbf-ykB#+z@QmM4Ba6MIwcR@D1E=V(ay9Vp`8pK2#^}~Rpy<>as5NzYDA_97zOV5BW#0$ zR9cbC{;E3Xh{hE?C{eat`&+byBr>^*?-N=4cp}@~vgJM4PWhY5HJ6D+^LtJ^GfQ{6 z!W7ff1a+M_eU7eheM<$cPS#Z)g`599oRhu2gIBJdjXBDbH#>ZVwd+eHY5#qCzSyVR zkIO-jAgPL{8C%F+zwTA%eUEqB#Un0`G<=uXj*sX*b389fa3pf=R@tpPH&4r(?fw|4 zO(}1_X3&~BEAwWmOe&p}@?QFM+I#6=QR<8?m37QPA<0h)IceT6WH?SJQ<#PT13RN&u&%cXemc23q!^uJg50N5|Y`H}9W-_`%wb%Ye+0+K&1#G{Wl?jvN&%V&Tm z^2*P&u~9^`dz@AP7iw$e?2^cwufEJ2fMZ|9gZYUn{_v8631UPDX4DaY~OqT^?+W9Ua0BCdo%dMR$)nWniM9EFU0{LIsJe7Wk3y_ z&~epArslXI92}Lm$FiH{5mG4a2p;>TSHJ9#}JsiypAL8}Hu0C2D^!8Ulu!7q>|-^f+5wov`1(=#$)$d(ywPZ3xy_-bWfhHb4raW8lBFv7e!?Xs+N`;b8-p^KJnS& z^YAaT`!P;utur$^N|;4a*Yx$XX0c&G#gxv=Q4PKPRyv5gO`+i>Qq@EE>!NGU{j+Ac zbi;B%Ytd1hVd=e-zq;U3ea&4l+%An7zX`pCb%9aIVgl?*MdIYtp%WU2&6`x zqNr7Mx4gcp-Z27w<&TeK=Xe`C?&C>XS*ct`k9pkDy8&T^?nh-5jiOf&>m!F+e-w_4 zb@L_=8}bKI^CWbI$qV|)z%_86{jQ9RKm>X#|MO{#Lc`(9B9}${@12}LGtI&-E{`?1 zi|^WXX8u7|5wzgR{^$GrpMw|pcrgC$TY-|2gPy`v+9eeTx92nr0C~}5IBBO3HXY~! zwZzOeoAtX-3BO-uz&VR81s4`ogwGRFYIlst9^SF@b@IijB-@}md0$X=-)xcQlP9XE zviC&3=Ql<9-gD>UBV-)6klRmJX&~pkmmWRR92%ke-!xHxG)G=YoOQ#r&$`ibH>34f zmebj_()AntL6m(n2`B6Wph#U|AIib)6loPddR`F7mMo#*IhYur2>4m)Zzlj^EF2-O ze_bT$?D*{;ie0L#%*}dzftjvLl}V_LmBL*p`nSm+2vKJK^RqJQ?<*~0Whni6lCahF zEWIDb*uMe51BN=_Zxg@V!D>gw&8eF6=uRsF55{Y`Ez`m`i_Cg?Au=E#f(v)70D9ah zxU;{;0R zu=?dCe{c*zk)JrSkZuOvuP#I$q0(@~{t}ceSG(t3Oj49;w{cSpWEi>Ir66lMq#vtT zGB3x@@Ye`kkOQQSiiw@e+;o3TLxDh$e@~ymI^fFo=cvt72CIs0t0vtJ+!i*dp`qnT zRrP?S8=6&Wj|+S{XdiNLEa{?-5r6j=gIvi=eN?_ZZ#RpR%bQsM9JolcJa&d&u<65y z_cm^VRZrq=CXv;e&s?nydGcJVf~IR{<)({jrlrwvw)ny%#SU&Su)3uA|4921uo~0w z@5UCQj4eyb5?L!kX+?}8qOzwXL}^oLQwK9TnnVhfRE(4?WoutKrLk0`WT{RIt&~(w z+v$A2=Y7v_jO%d%w<~M_gU`yx81*^VzT*To3DXU$oe9gV}RE-%)&DqE%v*0<2-92Cbo<=RX!{iG={1Voe+vut}=uMfDA(6M>9rs%JKY#c> zbZ%N`VH=v4cXz4f+(kwMM=MfSY*hD6Q$}u@fx7ou?|^(kt3rB)s#u1TC+m0d^>XOU z$!b2^a-Z?J1OS-(&7RpLS49Xp)D1>3IF+>UGqi&;%J%FINxTz5{)p zbR;&0d6C(tXUVo(227q?X|N6Vp`N8BoXbKu*>M1pE>HFx7ZA*#$^2%JfZb{gG_OxO zxZ^s_?lYRF)c8qzCwA8UyO(z=Gk&6;NnPq4bW#f6%nktmo2l*X{U`pqeS4nANc}N5 z*l+CIVn&MCB}??X+X)Ip&sZI8gtjL4mYVP0C8BY5RXQykg=WFep1#UNSuCz1{fup{ zir~+o0w-PObM^A zuCY;zu90^zlSwA-s-C)NKZ~AI6#<$Lfb%poMi*caJy|qFRe@CH{bQgG$JozZ$1Vs(vO=^QMInD}D2ezoc?x zk{J2aURFsosztSl^(TibWeQEf6pIhY#n*KUG@CvD1v--}q{R#=w~_5&;&LL>Ht3Hi z_ZOu1Y_iGLIJBW`^8le)qt%x|K68v5ejbqv$D=N5bk{xzw!yZ-=jkG!zrN&J$BkzO zxn&_T}F%N?-6d12z-jLp5y~RLk-o*c;ul zZ1BAl6oEVA3h-i+pI~nk3r6-ePQqf>xiw3sI+Nb0-Q|AAn)R!_r4Ke$n&Jq#e%-|K zeTKqG_L~Je&D=s(kX9zZZ0MEGHcOjE#P>?DCIA)S&?cK}Rl)3}AwuaQpnQb}@$syC zPLskfYOwMnbendrO-tY|dWC~4*i+lrgPE%$X@Na@e(y}|(HLOTF-nG8_FsG>yK5fE zwkdcaeE9el)-E+pWSFh1q|Q>&!j_HxL(Yh4`J%|UZyO&GGIxvgXaaHE{PJ!o?ugGe zvC@%Y($8_3X58@^4LrJK-vp~q&s05V0Gfi2So!>2ZBk3^>f>rS4Y4$}^KLw!9|H)* ztI>a0+DHkhyUMv6eRfUN{(IF*RBO~FL$x!c?EtWh?h8MvU@_1SBaQHAUn%tKH-jAp zwYU%O%7f3teaPBI`ca%(H1O-~wRFiy!hou{e7ix>Ps!Vxq_tC08h}&YXBceNorv1- z=AI4YIu{-v{sWv$9JGf6US-MXszfIfiWu-&Z9^V>Ft9jobj_3HIi zCa>9qY5-8;tSUbm3pN+%zU~X|U)j%bZEjWiX$SFDO{?ZS*fc5i4lxvjGqstb%h(!V zFLvw*KiKxokW~Mqkzkf>DT>F@@Xq;ht-PUg+ulOEM`yq~;-bEAVPDOwHKdgn0Lr-Q zQmHPZWwiJL*_`Hwvrb#@wa*}s|BOX*rfW}{U{lh89{bz3&x5uMj7Ime^Rn5{gDCQS z!_dCm$$?_6GCozbfqc!)A~$d6DxCM%edC|H*t>`1DLnvG>XM2@R13vgwBFA@cK>&r z*e5Oz*tg%K`J`O(okAm(TleqJk9jyYGL3h8sg*=t&Fh>h?>`h`4(gxVb58`7ZT{D< zZR?`BWm5ZVER2($L>0+8Svrj=nOA*=KE8^?ELhd{SugJ(32 zX4-uqwf-ruaDV^!>PN@_Jd@?}PUA?!5smRBRH;@6#2GNwqsmVJMdk$%0J#arBYqdA zDqr`?7IStX?rC%iH#fJvkF|VjKS%3CN7L@ZEE|I2po-GYiwBuBg5~zvbeTDYDZhQy zWPY}QRKE{Ne}FCq1;Dap00qGxJ=tRspbUDB-yg);Tia6o0o`~Ml|5onb%3B`b7kOM zv$VWgxA1GuSwc*p<-l5@KkCs%ki%`XZTzusrXgi9g*kI2E=syR)JkhFnCRC2+WID- zEN;2D*^^G0@hZ`km7tlA7&)@J;olpioY{5JDV6Gu!l0^Q-5_m6{ReYE&iz~anU-(m zmw?;z0w?u`VB7i%z_ta|G3rzd2d~|CpV3)bDyC=e05Ub0z+h-1YCY$2CLA-7s6V%6 zNS60+>Bi`?>NQbdyr^9sx12Q-aU3Bd^Jv1rZKyMx^sP96J178?D2_`UI?AfORudhDb-$ zU6%zl+;pvJCXS{VI0k{&aZU=J;<5Ca>KYuAcgvrQ^9{q^#6NiUTqY?*q+~&0N(Lx5 ztWZL+ZCQvJ7|VscPb)Lhv-bul^EZmS&0@3@a=9A_O1)!L%w*0cE#HSnon>u9DeMC; z|I;Tw)C7V%1%&bkPE17g@ZW@%C(@9tQel-C_%Mh9ALgombRs4Gilt>g! zQgC9fc3wTdsaUdD$4*Tuc_(nc7Uv2gi)2>0PC-)#RtnH+OE4(Ka`q^pLEWKwn%2nLsi?BHS?y zTq7Zi5^0(weQ?Z>b!ow8qa+Y@O;^Z$YTFHH$ZrbBVbM!?ePx-8>l)u(1AOzzo_F>D z7@)1k;BCixJ9%>NaiQJ+n9AVhn_IRopdDy74j_VlyJZnNfk+xCIw3$GBuf1eslRlO z1SH1BzHFKB_En3Xe}PgFAZBen>xNTczZPGrNgd_1rT=}bnpxIrKlX`Dy{gpR!N+c8 z;{U*cdvyRi?HFmywwi-=<81vJipmkut}Z;=LX$Gl>;9aEDs+nQk!l4+#*>z!S-`_NA$ecoswUnU>^Zr?-}9-cexX6W zmhVz)_5Db9-ZAM-`1;P3&#ZsAuZaN4Z%WQAQReeeOqQvuq&_D)QI5+i%uP1hSzq!G z-bqYZ;K;v&y_a=4MlVR-V%*VO*qr3(9?_y3UAXa(R$+Ghh7Y%jHI5Ay8hQqzA@0Wu zp&;C~^TOA!Rz7lC$?)aaz2f-P9ry)MwwLM&3w86Y5(DYSoKM&NfC#x6Crz@p2G?WH zdmC%H&A~xm?ar`zWfx_@&fQlz_C@m1`*jJsv(SK%ZY%Qob)4?w`r?iQRis`9D;C%A zamQgC(QPP-(ex8ir6MJBwi z0E}qbD7g7Wr6fPCI|SL3=+>517uH7jD@)eRlx=@D&unN$+vn2s6>E~` zRLtpu4vgmWrU+ydyE@;!WHa7?p}Ol6aD$C6j*jZzrzRoOZjgow2!|DEsEVu^u4D!Q z|6}X2N&*OjV;vaW-fn!f7)+Zp(gXRB{BPia&sGc+8nv6iCUx>LAL7Q5imYsi`YHbn zfMR+Oc>JYDZ&YMS=m`m{Ke|TTNppZW2y`?Nt*m>0_qf>J@Rbjz-c*rHW8puAdHpV0 zZu5wc1HPUL(hnFyL&LMatA^JbI~act-HJhnV(!f=+?tHjB6C6=5<@D+*lAoHDaF1b zCDC~t_`sSE4aN0^pB}II84Jz&M6=K{#sQ7KS+wKe0#TV=M}5lIIl;rXaUk`$Qy??R zvfh$f^Iof3zo`fXRQIq+*6*Ke14+qtejQ95$(1`}x&6glymB;qv*~;NCN?Yd5wm;Y z5X@V*KWYZL@9fQPJd&%*3Dw^}gi$5(kD^D(%SUX9qin|a%x7}7kIt$GeC@hdRxw%j zi&WD;O`XYxtp}Qu?Y2Skh>r50dA$L}crOLU>X@p3+t>GY#`rEoUk};~V(UJmTXgwmeY|<$T&QXeLV1%+) zw|_bDIkPNLvNkWh@we>2g=->oKX|d!S`6+@Keus@$ccMDm2MRcpF^0l?BFGZ?W(M2 z7o2e;JXX~d>V;F+EXSTp(5otI-J>HUM6P@S_Vm&lGVd9Jx*JrUmxT!c=#-aoje{gW zaCpVWiju~px8ms9o((6ZBa=S3HRvAU-MF*BUTsJT`c7A(RQ(dNwA|(;|Ii-y^zQ>r z#X*~Q7~CYZ`Ex}^O)DD&XG~-6QghpyJxjQDenv{2kFOU-1=$A2Znf6Uygp;F@5KGP zc>xo^ma(0>Cb!o{TyB`=r)0SOo%E6=BX8dMGKr8}1bb=Sp=TTVV*Z_7iCcX0-z)E3 zQvcxaz7nNd$41g9y~vHWS8eTwGR>mnO_ajKNZFvQ ztj;gwmDf>}7SK(XZ%_B=TAWCVHT!ToTjAsS9R1?^%GyLbg)4r2_Ic86p{kE?O>>Jb zKqUoes}|I~U~LO+jRTreb{MxhME{=T#5r!j-x#=r^z51z?{c+-uk09DDM3%FB9*2u z)t(}@Oi(IYr(~*~1LmG}W}r%QV30X4` zfs6>Q_P(mv++fX1>MDD{yTT6r-jR5#BFeTxort~K`K(SMci(s8MuFd-TQ|5j!v?T-#GhtX1X5`tp2n)%~K^peH~S`v>21=H%veU^P%aRWMQFS zu?7vuYF(up+t<@ycgE|4EkB~9QWLK`y+fE%4|OZita7Ekh9U>=o5xxQiibx?M5f$^ z&1N#WVNNcZO|0kLi0Q-c%{i&7=mn#kPXB}q=>p7jgYuU{f-1wqz z>25en?~2v-DYi<!-6rJm5C@4l9{3zXGh#5qO!cF zkUqFXS`^s@QCHbh+26E@o(23qC}wE_X`m%|yz-CR0suPxJrR>Yp9ej=v^I*^`+(=R zT>R8t^*9OV2?Pv>EhuqO1))VEnxa>gQcEMg&NcY}X_s&jsvYz}V1&^FXazdPLS=R zP9}xeL=$a2XXm9jhK7MylqC%ta2Uf-4b_aYw;FlR*BRR9K6f=d3(SsG?Dxk?n-qE_ z;Xs^^k`m{QwlAQd`%81>-CV)}++u|YR@P8o(zCj$y>pK5R@8Iv=U*IO67h8>@L$q> zl4T7P>ey!D1c?7TKvX}j!NQSH8ZGo%KJmXOvQKrZ_t5A_)E^HPi+>_C$`FU9cvqK5 zWRljl0-BnzuldtyXgF#}^w`7bb=bo~G?4bRqe0{5>xg&$c~5@;kH0QRjJ%5*F(qEN zH+0O|p5S{of&DWwlF))?hW6*B#--Z1N-pMiymXK1`Sm0Qq|qNb(jfj&#cKJYjm!yF zC_wtrpKLU=(05-}Uqi+T>DW_`YAvC_s6~!rw7B@MW1UB6TLCK{ z7@xFul4FwCV;L2l;+OK-El2MXnG34$?v~d!{_#pl&CV^CjX#43k^zG%AnBiYc})+1 zB$J`g88p^t&Y{C@l8fJJ)CVw_QvRM}Ysh_r$a0F8(>nJ9pAO;ieL{pLaJz;>J?a&! z_MwuZ&S=GPJVAj{_upr{2wI{GJy^#X2v<7RIJBGxRT)iDl^t4qsuBvCK6>MZwcf0I zj4=N`&G9!K!FHv1M8%R0L+~T~_5Zf3^9U60526F_z*?ZKr^&j4KDMaH=TcDH(WQ8j z=YbT(52xT@O7iCIYUO8ATT*gvufNbh(gptyMJ^@lgsugr=SHO&IufG(L|{Uw)Gty) zXW=}q47CDL^Ej4f-?0-v<&6K}e`@UZKAbyYI{4aU!t1+q%1)88?Q(oi!_u`cr-c~5 zehNFutJh_CmfUL(n*MsF>=TbBdz*L1{J@k|z7HK6=71dUxqb=dOWNGNbQ>*9R?ZCr z^|s}Xi~hiYIavevi}2VjW`wg+n}V2Q`{E`W_sMDvs_S+y5)<#G@05%R`)j+npg6bT zd!HKj2%+-{Qj}8IKHT@?_vfqd2#Mb7V>|Unbg4K1R}+m}kqoVIdkVru#CBd-#?KHP z2oDH7&utag*id$1`UM<=1DRrBUx%B#MlFmreB$!}Z9VDH=N24V4SghU zeC=Sv2Tl#?98FU`OC>orm0VzdPfKM}9-{6OsoUc-b7|yD$FN+qk4kCVe8+n2x{Cnv zr)A#5)5h%;Zw31xs*pRXpCG?`3y&GtWOK?)S*-B;p&&>js>)z2^DZ196t7I_8qxt* zl@Q(Pv!X*gP-Zx3FWQeD$5DZ z>(TEtM(IGA(Igh6?{wkr!S37l=UhF=2@ssbzyqY)(}cc}yYKM6RK32?+LIKI$O-Jj zBf9nQh?#tGoUFPSo|EddU4cy_GRpG22&+ziXo**%eFoM9_GscvP%((cxTtnD^RVXS=EZrRS8v`l%0-!V`4@|)cT7HPyMB9kA zQo=5`vA)s^j6>EDfeN2+5o2B86E3LWXaxokhdvrQQGLdzd0EZPuchP-M~+_7Bd#XO zF4Hdc#$7ljwY9g9-tXVAQ*-0CfG8VkW+>q}UoQ8JrWjP3i&67#fds0qX#^ZFH}5Ri zOKDsM=i@PZayCJ>SX7kYr+P<3Xw*@1{0+T={R9+}Bu88!kUdR~y@ITo!XK?@t3ug- z05FbAD{LBKj#wUdZ@Hz=-W%8`nUb0sRUDc>ZX#wj)Zf-b?u?dH5sRT)>=vIJp?)Fa{rksZ(Na(Aa6i|| z_h8W`@dp;aChkbE5WpZHwVvQX6!;TJ=K#2{9X_43*Dza(O(}0Hu(rzm6TU=33`1@E z5)DLD$fBIJyIE*HQGoPZHI%gz9vDhU^ITPPG0YgWyQVus&@qU?{B zk4KFR@h1S-lM0W0!ytx4@jGTTF1|B-(~kRuEX=at&LEBu4l#Rfd(Ab4Mqn0N8e(RH zjIIwRO=188VU{I?{M;k6_Cs6#^lw<8w2uFzor|jyYt03wK`D)b(b-1x617m(uXP!z zRnm`4nd~u-Pp0UZ5TF$O7rxA%39`p}G+!XJp~73O;-YLw?8Wv+LgaS{`#vD;Vl?C) zx3?$%;hPVx1`fQR%c}Y73sOfYZtoMdH9UOKJxnG1)r#dr>mo$-jz&c?y_TwX$8Ezv z#hkUJ1wZptpmOg#?RDncwa1R)d*yoM3%yvDs}xBbV)+0nL@@Z-j@I8R7M&Xa>B3A= zCWo961n5Mt#@Bi#ctAKILtJXZl$B2{3k!PM^~y0!*Viyq7<`~wOTJF#o)IsuKPD{x zNEU8ME#K+qd;kO3C>=kU{@_On(j)SB;=f|YHN2)7UL=1IiChgT!oGc9TE(}WM7`5A zBC{>iH*DMI6X?z^OO!kilq;p?uL{-Ocl^^=2f{m6mTpHsCr2oqiBxk& zTc-X~C$<#~F4djI#`>U@fH#{1qmDNu)|6Sr@%j_06T;$ws}_2vC#xq<7WOVYkwMC| z^kfKq!1QG8n?4w55K0k$?32G}`4sV@dB;(*I@hjaJV05rmdsD7=&4^-!OL`o)pJk7 zDVBQibJx8tfl-%bBT&^1OQvsXfkeBz(V--VU7aS9OjDkg2DS+X7~pqUJpJ}f9Y&B~ z5IDUxr%Q8vf+hoVdWhFASa~P*lFrUp_lRUvx9$?A)7{e-1~ThZ@?^9pIke5b4lCOHC$3xECo8G%b_lsDP>SZjGXYi?7TO-$ z7QOH%hL@xSjq(xHqs+*{`9?FeYpaz^7S!9lUhkKm_c(4`*h;bmZ2zyd)$IUS14+SB z501D&iqHq7>7ea9_3eR$9psGcr**(3Olw#Z)EMaufDsFdcj9CNxMVqHk&(z~M_Qpu zUp#t_aUCV0pSuv4}Bli3&a10|t_o2L&@N4wSkLPa_-(&QBD)I~a9n;7lt>14mr!9+B`J3E3>6 zpX)Ibb0`G^68m{Mu0Q@>6}Qu?<3;NJSjhNh;`<|#)r5p;@Qkj*rLjj~WKYw9sa3U? zGq1=A5FUE2_G|`s|KCxg+~2m8%R=yVa;8YkCo9fdktoBK3oUPG!xQGm3*k=V6Zijd zZR4q;X*-QBlY(%yqknr!HPl6B{Z!dQ`>V1mCw7+vFhtoO&1hpE1iIzMNr}{CI%$sK zZ)uv(g)yBYnf!u9ZwrB1$T=Q`qx8Yu>CP8l?Ujs5R#iD(06Fn~0w)Yr637SDACj;F zT0SUE3U6AA{j|m36I3o7rNr-rT}J#i-O!QHj+6s2c;#?0bOTyMR8oIWN-Z3uRe1H> zGN#CVab}&%jde_6F>3u03PZ{8NNf4-mD<`D@yFTc)Ay9VLr>_*Dsw~81?5eTNfa2h zScCgEzLJi64ThyxJAC518|&@^Od#AoY&`V#1Z+pR9R-@Y3Y#*U0ba*tpgXFZ*k8CU za31d7vs)67cvV@feG(%fu}0O>*B@!0R#V3fDr=$OP0z+b`~31#3deM~J&|!4Q)NA9LgPa9JK^ zOT)+^_;cr~!c1b_huj%1C%zV9f``Dl%2)UNKWVoiPD*T5{Z~~}p4QR}Zi%V80*Do{ z$%Yv3U=vg|q-3sxgodgM&ULXHfJiW{{q0Z1`LH!;5j(cUjgnO#TOz-rC+h(_L} zv~;0>@3nm4PJlUZYcEtaqYQ$d?nOjc+kfsoxJLm|6of{MQ8N1DAzYTeZoVMz3qIB3W-VtFfdNW!- zNgUIHGYPRG#gqwV^Q~_&un*Pf`TRxsXV$^Z2Z~c$4_^yt5DdIgRVStpyQ}&LO4H7?P37An9W_B@gM16T1e8J(i}o~@@-M9$l&ZU5*-AebS}BF6 zbZp*4nqsutPFYL}@^uhuKzRg-ejTE3n9HAd{uYq6+Oc8QBws7MjbEs^Lw_(B5fyje zZt0fGKWlNiKCsUr&FPcB7)dUvP>2=2nqAQ_&6H0V|XIeSnY*w@<+ zZ{$OthFg);_w>iHTi6AFmOv|Hs%k=|2m4x0z!_oa`>@V&^ZUsDp@h@~VjD+QPK=d= zQesTVQWeeWI_D0A+ezv@LFQ>wHbs?`tM>ssl?*BGU51+8G-AsZ70($;9lsPeNlJZ~ zV_Mo+ni5vY=#a?u+@e<)U$p@n?A1a|kp5swhEP!=@_j9=ESr8&BBV>Q3V8y8yXP|G zcS(-@WPaBC!jp##g>%xDB}ZStBY5!OwxFerI%RQS6}A)2^P4w_4(fSUL?BE4Gkn`5 zQ3$kwz+*);uctfydWc*V?L6=XaJvHniHc?=0USc^0Q~N-2l7>r6TBkgZ**b~6wC%? zao7MFNzfnSHj+N*)S67;(EwVAZ=QJQD91LEmYPYE1cU_&hTcum+q06B6^HB|&Ft`J zcoIE3dR5<|5MjRO^<%qg09wtSbOv*{Uf;_b9#PuqYoO=5&tZbBKsP>9H16;|Nxl+{ zIy;c(TP$Wl0w{7VCy9SSZTN8fThzGp>8_mCvil*zemeI(wjP)zjP@L2Cb|K0_K;_L z3#$C|%@IF|+tGg1dWZ1-q^Tm$_()?MI;#QI6xO2S5jn5YK+88VsaQ(FZ%TL+P3@C5 zwa3MAgREyILlcuB^!I$v>2FrT=bqRNOZ=o%9teU_5dC5+ux)QMOniIx`KsXtbO27} znksB!D6mM*LVV+QG1}&iEE%d>ryDNhM>i?tzx@~8k_1|=DY_8a_UGaU5H7wKQp%T4 zcc*wM!3M2RtU>H})OoBq5I*M*iPpDJQTB;RY8oNvzIriPMDe;b_ik^+3kuU7_~D<_ z`z7tFf08f~)4pm-c#tL_)`6125v*ZD;ksQ|pWO=A3U#V&-JO}ZuZ|Lp#l+Bj7Hb=+ zflx$6Wi=c|BMxRDsbM{|e}@M?a>Rx5T0`+V%7oW+@1?$wGiIrr*qkl;n}DVI<9U)e zFFOAP|K|^hB%=nbmEp??sZ1NwUh?$bSrPU6zs!mN(B|oNCdARb;Soqe>>WboQwS1u zr2-}BR;3Fz@rjSUbS>HtkoQR0_#^@NSd@gW>qvG<)x?!W!-eF_h~k-&WxCLt0XH~b z1WS~QsUe@`9e%KrpnH1!bH3oID4Z^=ef1L?P8PP4wy7UnO1Lg6Y=)xvX{jIx05tlsBZFiIYbz<)yd+ZnYvwC1NFU@_aZ)ru z16?(_RM%Eo+@ljDul$#}KS0EQ9MG~{oX-?2DtMx!h#!bHX`oa^gM?h4fd5de2*qEb zA7?r-WFVu906L3nzb#t}+GgA7plRd&cgpg-OygN7Uj+txGtF$swr=pY8zkL%{QaA->hp;L(U$8bDmXQbfeyC z<#Li7_T?v(eGhp027kQkvnkTxM)d7Cax_6~Ml)WgPxhE(DeQ;DXROcONo>5YF9{yr za(PJ4CQoz-(QaHMrYN5dvnCPRNvHUM-A2zFkhzg!FkbG4uE`* zA*pw!jw^BxJg`V-3dt8Mq#7sxZd2e6?~cFtynuu(;RPP!=!A}?SD}mE5STksURq6> zlbdqUBC@_rFnkBu9>fqZzDq|Ys|I0}z%_oU2V6=}XE>drX;He{A`Oumk*OxP1w2Jz zNK5s%Z@a${`dU9}%Oa({3+ffRx%%Y4Nn8+XA5$(hj$BBn4~EaYJHcsm&QOKiW6<4F z@i+YVsUO%b#S*hQq*Jn-ymZ%aY|#}IXh})>CWasJ`VAblBwXnS*MEJ2@|!jb0*{Xs zok)@x*@aee_;Ni?6{M8TvokbiP9ZK8qMyBL!Z#A|P5Xvig_gvr?m5QZI-Ii*`RamO z4F^u~In!;?gNKm5Y3o_k`4q(GLidI{E3cdJBI zp>_Ktqlokb!g2yrEJyu8d$Q0V@p~zG2fk&H5L>%>$bp$ght+Ve{oiZLou($wMLw8l z<|$3#3m=FQo+!iVZmGVhZ>m?(nqKjl#vY{U;#<~;8k8i4ObIW+<%pwT)DoN4b`nN_ye?Cz1+sQv)lkYVK12e0i%b~=);~% zc~GGd=m1Cy=27-lopK(s@(Begsx;~`nm(krg%hgg%>*Qc%^_O74F)hK&c z-|ChBOsH5gVn8lk`v07uP)}GFiRq#752qp+{?SwZ8K9X62QrC8(Y_|zpaOaB zy$OuKuOFx@FBISNuPAbUO_6HNj-XuXh^}yJxs~zjP1Z1gT|rit z!Y+l%Vw4ac#73=pV6mUGhd|Bul!U}( zW8IOUrkl4S;NR*6rrtUsaobp`h?ga9v;Jho;421{)P!Zzr}Hw2r=%vHu3;>gqzWLRL3Pc=u_7yUS-S~#X)g8u;+a3m?3=#92qL|9WdOoB=d z*Uw}z;4a|A;c}?J{n&L3)Xz_XGYR9p;ECHxfAv$$Qt}lDSP?|Uj@s8>=5}P9wR=2~ zWTP&YRW7j$*OsV%c~ya^#rICs@}*8FDn6``jv;K@a1Sw*L}Ey2FLX&tGv;g0mCM}^ z-t7l@6-^~^(0{$A?JEfSqp!R$0nph0U{4*DX$>?VL{YcxD%fnO?L5KhZBtOAl4pix zDA0c>lU2wMuiF6%FSH&?>D+W z=blmOYKqk5T4VmPN_%lbjs>?pPR^ApV(hdtFmQxw*l_A~h;HTSq*#ndr=!bunFJ>=121J4jlRz_?QyA{316~@FoU5CBQuveQ&`DB-q5u@B(>fv7pMJ`;{g<07dc* zr{kE_P?Y^TxvZOWr^`lJTf(>Z%`LOw1jUv?58EAq($*T>{<6XCUn+hL^`lGOf_hK; zML=A5_75btcWI$th$Uea@KtE9?JeJbn9~tdUfa!3+0xTqLHIi#yo&POVn(g@7lGW zpCDA0WU_#h_=TnTETgf4z{r!RG!`o81!0h3jE*=3XjPtc$^7WX%`!E;*VO*mOyqXd zWK%-z5iY^4kr4+?n|%>9lP>$14?tEd;$IO|{4@AiIhgMIM&um+58aviNp@eBv~M~` zul!fK52QmxaokZ3XTkHIx*LV~dn&;QeITv23?QYk+q+%8smg2$hyjTuu`PYAy)%J_fbmBOHerVp-N&wb^Y{#Bb)i`Si zk^9+8M$zm?lTC22%;`733B|k6F;8p5FC6HKX>xcSqgeBSrgRkOckO3mv| ziz=0_CPhb{ck0m&{}vFi;faiQZZ#Tmfm;s03wOo7M7xko*?+@m$>s8Xv{$l^S$IOisIuNqoYN>$;i62FYaKnX`GlK&q!W|)PT>f zT7o>3tM}{89lG_3ZbM=#+GG@Y{LLnK=3yd`pjPdZtfv*PFIZoQv<6}3C+XC5+ldLQ zl^we-U)r)b^5E*%9Y(Hu+h(}n*RbF!INojuYqXbkbUm=E%B{I%v1lfCv#+D+@rHJ- znl<@{&Adb<)UN=bNbDL|r~r4ZLMFcYIVBeu6X!Xr&Iw+_vrtbVL*uzed+ ziKcX0-Swo2{c@|ur=w!fCMpsqn>Tw&>lZ>VkyNQ3vlYqm+oiS-sphwcgOH>-?A!@{ zmI#yz&E~0Fs1cFU%>-v4@Q{@vx|jaQBiI+;M@$msCC}uV4HP1$y+p8z;|4C5>+&V0 z(7yWRH01KPq}i8e3=3AWT`8j*VlaEG-jO4*!$m|k8yeou%hTh&lH#7TO01dnsj@P> z<6HFFiSP8!2cCY{<=%2n>-VCIqFMPHjw|7JsGb>(jsH5H9GizG$3Cl5?h0L;W-CP5Uc~`~L5O&nJ zt_vc0w5Sa%Y~wlj;UBcW%{q+O=b4d|SF4jKynNIF8APXX6HTY)z=N#cbg{(6%XNA% zkXO`EFU_pFPS=qb3~r0DILQI$&-1XQ6O9&iWOY_&n!O}KG~3Vv=Y0;$6%=@)sMjql z&dpjseBlUj`CgeP2xz8*ZAa*z{q|-v(S*4fkQGZ&Oun-G!}2uZ|BCZhkBx zS6!vSOW1R7TVB=EXV3CZ3>LA^n&Mdf{e$&ZBT1>ehhGYc^GRdWDr= zL55^?^Fi3z`9VsDR>+UGam;o0ykk59Um&x04DgYv z5S8^d%HA|EvJH1))Nv)~>hyJk^XrS#W{mRR-#Z^ekeK_Z#(=0o93}|j{F-ZYKTP%z zX%cl>o4??eK1GkmL%v5v*dp)fELif+&qpZ_L9UwPB{GQQ2NOY%Sv;M99yom*@$DeW zze;?aFH&&;1|2@^bnUTqPlm2wp9(o*0)}}~M0UHXriqEi`wtOF-~xmJ+I&hyqmcX~ z?=xp=(~l2qZ#>JmZ5Habg~eipjgQ@lly(WIEjDRFHn{ZnkNqGDmk zU=|6q&Whf41?J;_PqEe;M*h7f)t~OiE?j6_k!-)nlf}ghnVU_^BT~D>jS<|9nCe3k z`F^_NU3ZKr`i1rH=v0Mq-DX#)SNO;^=@{~|)=5~w6J}32Y7ylIytKw+vQxMg7$)Lh zBqjWf=h0`-Kx{jmrW3{&jAr+aeVXji2Y{i>@$V_f#0Jb1ut_pOhIXnKnwUqJ4aXre zXyEJTfK8lZ*2Eb)dI`K!j4g^q^6zYBYUNYvFIIMM^;hM@zOIFNV~4|J#iK5?Rjl|eeCFyc876$gP~zbDO~pPrk@*KXy~_s0$WymQb~nA7mT=5L2PXbyRW6*MCqioCs=f%n*^8#`H{fYz*nVG60-Pe5nk5` ztAzUTgv}&vqQ%AKnFA>t&t*gO(7)=iOrfkKQce(CccE(w)e;O*R!22o|!OuhF zdaYV}wBBwo0OK1|lhVP&C=u%xhDaFVg;-g;<5|&0Gm`t!m&_}b7`=XJeG^gwZvKSO z3+@?>0@*SOH99tshFm!5Il_jfLUc`;_(Z5w122OX+feR$!ii9`JYo=S(Vd~FJYwti zz>0R|&tHjgcjQRyZhDpyl~@A3Y~6{=jGQ1eNol+r8$$Q86csTFZ;klsKome%ll{J>@c}buK0PCWm}t~gEcB` z&)q(?l%292N8(LJxY4z$HCd{$dv|z-d8O&lX%!@Se{iq94fAr~)PGVY>AZJibn@Xp z92}C*U67sCom%BEXLMBtg{mUIevHFceJG1SrCnbw&RE(^QmdHr=6_7upm!DSSkN)} zJH;Q8unO!Okmxv_?B%;69G`iy}X3+8VApk|4xjxH8AfEVl=`$*w1yOhmH30?lrwPc4+N&n@Z6uih2 z`{{7F9=WGgwm|Ng?gaqHQPrK9;yh$mr$h_Ln{JJ~&4tj==+$21Yr=!9@}yAs)x5We zE?81%c4^%>{){&NE;*qs z@bC(}Oj^Y!A74uANUG>o-8NM$YLr@Z*vr1Yb6Ix4B2C|ibs9;GQM?oqVqRBAA~~F$ zVVWSd8HhxfXv;1pGz9xA^H9#G!wtu7fQzsw(1V zq^GFjPe@PESqO!!DRD*Omo{zSOwgl3Ic(z#+3v*)N5R6&DYA=j4%eE2-3qBizYGRy zrW?No{B^2jMCuGTEIlHm57YKriGycWk4})BIaDr98u!D*5PERf4+(^Vw;M2(Ql?tJ z3g=n)Aw0gx2GQOw&6_HBb>>4BSgxw61(DK4X-n9)fMnnI&CTaed$0+7{CK5X`Iyj` ze~+8@aQCADCt0S};@h`xf0muSdGVxz(9Wl!)pnsBm)*OXLT}xDn9^ZYrr)qxWOcYH|LlIC7l z_$J4dKaMW7?TZOoxeN2!Htyf(*2?m0sbYlaQtO>A=8%O#<K`FHq`|g6WywJ{A(W0>JA5sGD9%l&J7W#jRY60Zd!$%(7!7NwTLd?VTiwpr%Po zyK?@hGGa5un4b6{<*4zg8FPl0Xj1JVbUuIVa(BR%?h#l$al^znY3*}bTA^JJQi${UQGwlm~Y0 z{}?>|pjx!kdf|+kI67i1ka>$AF&4T{2gVGF1{O*$kegAA0m1QyF_3Qk`()<_A&f3( zwf5?3?S2|NML;)`zc}FW zt;_N5+&9qfjG%=mw9202Xj(Hw;2EG@}{9wB~E52%JAoD{K#~{xF?v-7n4{k0K zV{B77^7$p3$7R<=CDmixqMvH8+>9BTF)=aia=cGz@sf_MUro5B!Wv(j%^qpcUT5>T z2|_2HBN^{O(<+z(V!0fi`+H!Hn5)>ex_lAB_(+%j@Sy(h-BH^xdeXB7&b|7bQn%dN zk`hTq_SEc6WI$VstOIJoLsTOi-Th%J&K%0n%7?A^c@dfNWGoSN{fm7*x*S=!49AZ% zWtAxfue^F3HKB~;9b#}Aw68thRKnB|p}zD%aMieushI<1#TY-;ijl~2p)Y=|XW+~3 z5lbzqepW``hes zzn&xz7|!bb^V!V#Uj7H8{9czL_gSuwjwRGnl)GFJBepsJ$ey%?p!x1`eaSe&Xp zDx;Eid1}I%DL7c*Cm#H~dyn8|)jj$r zhkUDu3n5%}Z~YAUxe*Tr#6owSQ2L;Y=gN3X~aZq}TYsp0OCd7l_7?L|zxk^h91?L8*;#dwh`)b$Xxv zQBZ}iK!+o0i|>``mwZ-aCP+RT?KBQ>hVlsiDLF{gMy%%)4|EQ1u{b9sgMH10xA=C1{+bgQg&|Kmsd6CB#u5^bzFG(DOMRmnaTG-!rhZF z{W9;st$&6#X)S3ife)46o1mykPXOv0u?K>2%f*Kj7cx+;XA}@MSCD>IRwU3p z7fn6#xBAG)TkAEY+I{Nk(%Q1E_3WliD4Z9a3^W=LYrD+_t&^Rd_z6ky4w>{yMltq@ z>Oe}s%W1V6-RW>`GOyBs9k-asuMi2UfnUZ??j44}qKDa$K|cKxh9-nlbBFknMKa2O%#Ql+ znW*q&X^FEMsqAgRLK8A10a`f9XU7FoCM(sp_MnXlJA>9pe)C(@!@oi(o` zFz6?a>@l3}xcV^Wpr6YUvGf1UzS7rs#iW7qWXMkoJueasJ9TvH`crBw;|M%law~gDt zf!l2sl5h-zl^x7q6y^k1{5fgs@HpNaNdHY2hOGK?u8K-nQbVjbXIN8Q&hn(H5X0i5 zcB1g=uauCT!0+Fd+&Q54`tR`PMxo-t5mFkR6@TS8tnXS%^X-GS54_`e*Yml66j16FTLeVmZ3(Ii)DhV7sNBEW5pfr zm%c~LIPr`tVT`iz@r%Qu^NQX$8`DRqDU1bDJX;cYMP)@mzn!@{C8G@aqXl#_MkOkD z=#e8S_6pzm8p($ukh^0mY;A}sT!}W`vXC++fX`zjxW}^ zk?~5e8=YE_vDJ(e-D$v>Ggh-*yq3*M`;QS4f>(Z`3aSj(L5PkL)ZQG0^_6A4hO8!X zK5(cEP7KGR`}~66gtR>UWOS8NFZ!|K?ikg;%N-QNY=%%sr|iPxi~L@F`*v|tp`o%+ zbIGC3KKHA}E?g&}^(qFnJ^Ed0kk{VT58LXqQ3P#=f0qLt$ z0?)89=C#|FOR7(n0}M(G%7pJ3qio$K{9MipfOSn*BY)KL)T@>5Bo+5U3LNT1N)=t~jiK$Hq?b+z{MoD}Of8HLXGGcT%WG%l8VU%5NaAn|hFc!TSB|7@N=KF`H({JFwp+8P!f;#WX z9G0}gA(J}0!0Z>|Qsis6Q^)S)2@_r-lwa(&!u}UpWd&2WgL`ee_@&gP<4Y6BMNE41 zHzVe+b12TkusQb4lzKMe=47jX^SB%YA&UeUr?ps+kYD4R_1Nd5hn>JAZO^}QIfbE} zb?bPJM@PM|kPz)!w^{WY$>`8%eKg;q@D!)1!^mglnJkD!YMa&)epj+Bd0Juk_gj4Z_t4YL_`1#fX+khi|Sx*p}j;QCoQ*OI`_y5 zxJ7n}IUGbJN9BuvYd!M>PcnKTWYxoxkNDy=8d|f0~)x0{AOe5WDv`lkqj+wCVWesby zMPi)#5k-A7)rCrH$z09SBwm{aFQTqasJe>$L=aOqaHWh^LjHjgJZ$^0j_Iq zku+;x;j&k)DO-~pJ(_qqfE?#-{dW9?0YQohP|v4{*U@$$Vz2sfm}t=6MCon2(GjS= zjEv@G7GmBOx~9p(Eg$_d$F@(cn4P%r(P{H?hxP&QmNIC@g6;TeXn zXhb{=SX9I+ze#!doO89ip^V?OYyFF9o6hPY(Wy;7RK@=kY_=agRFE>cR>)-aR zh@`GiQHDLEKaey=<%O3P!?662Ql^j=AM4|HsOE{FvYWUOnA1yZvsA(D2QpnvzmCLu z#nW4pW$VW4!1mug-E_csq0(D3)|{zMs0y(=>Q&5FE!((G@8YO0Z!A!9@N?qygD9;0idrfKx+vmG#30CH0(r34XE$Lx+4 z?d3T+9>a9b-ae_H&?@K~AvhN-D}F{37S*%rF+w^{)L~d#U5%+QIRj(k*zQS-6cqHG zR!`uEk0RtsZDGQpnm;<$%zu(SYA=K1m~|)9?f!i--IH>8xz<1J9ft!|bTV3Je|R~q zw|3oua{Zh%)<&?3+0C@j0UPJiUMF3FKz>QRpWm3ZyD@5!1WbqtrjdsDM7=OkDz~X2 zk)MdPmcpDUYJ^oi8Ad9D;{j>gPt-6~8Bzz|&!c_9`pu+*$8-FmKLRO-GJ%teR0;6V zU;05h2Qn6DG(ehTXA#O}ToY&(VtZ5#@D{jYYpjQgIdg{5w#)7*L7J@H!=p#Xrf|PI zQ%UQ{XVQ=zGsmVW8`Ds1o{m)93($%0)1{7(sh#`GJBzwJt~ z+(yuIPojEmq69TQYE5P_!9jRxNJ0ap1npiR3xM4og?FEcUu5BcVkb{WE+<6o)*4CY zRaH65#NNpory4T83Nw5j-+Jt1Zro!u&$~nvH|}PeN8FsdmCU2%Rw|mckcv)NBcLU*Uf+rXQAIAwMTHW?{xkbD<$j=cj{?N%mD`u;aa~1}cP0DUAn|p3}TN_41I7#7( ztVHhUx8>FRlessL@nkhWl7n&q#^B+T1nf&>b~G)z>uJl$+$t0#=$`vWj|xDhp7we7Wzz_On-5-=VV=asF8>#i=pDP2o7VX}t$H%M)uV1< zD6g|>iN%}SV|V>Nx+P-_&L>*mWO0t ze)u-lG)o(MRscWUQ0F8JIJpu}+6R)7YI4KVp5(l|W$UQXx5bX^ph?#Up5FrQ_1WaP z2;=BCK1**meRJ&RZR;Mzi0=4V>){gy2mpAcYt-fmkApN~r@~CLaMh0N$>t@_<>IBB zrQO)M)?adI7_-j3)>9wzXL9(b$qaYxcgNx|pyI*>yvwoDckq^!sA~4pM9&Pp)%XF4 zYMX&+L5{O;pQ!Q0;X-%GV72s{ET)(R%VT7eRF`4A&DE|ZMpte~*X4cBFD^^{p2p+w z-n_LjMPZS5?gmNdT4aQgwADd?GNkJY?p}CrpVjv-*#&Y zwALrPu5lNiYZyEyp2H%WVcfiLFg?!t#vS$0^=co5<7e*0MuCcP(!`0c+M25F1S=jm zaYW;EriDa(O6$8(pIysRXclWj96LnuD*b7fRb~AZR)Jw5OZ4x(%`_#SJKuF0zqMzI z@UpD$%X~|jI+){*^*}oOGX+W#oR#MFC$D6b#Y^dN9z0qnQGdtJ8eyCuLalj6D)nqG zxlU{8_zP=*c*tmFRL1I1*ZPO>VA*JdH8zYyC5q6IbJ7m>m+vke;Yh_<9bTxEol@G6 z^0SClwRun39SF^$q!^!dCC<1$;<-)NvW@pVsu9-SFru3wEn{6%)$e55cTUN}AwjE4 z%m1Ote|?!|ZYd|lhE74IGe5}8quew2cHlUZZhY{lp~K{#T#-SZY|dkqhUALpDe~ud z+?FvKlfTN=4^9^63@ph^ImQ|O{`T{PrYif&V1-V#JmJnm>8`dKrMrqmoa8hbgzHc4 zZtO5)(ZhiQnL+dv`ogw>@6m}jxNFz0au0pn;QEM60ArWijA=9SX>}dgC02iOUKZ-u zu`Y_!e={Z>#x}doR$H~^mzv7%MQyAk{cxREmDxcT&XqO9$%TS%PwV_7=f+hi(g{}H zOx;Sc8tkR05%{A?M5WYkzm5oIT1M|*ks=Q68xToBzG3IS{8g#qVWlNv?ipoao^Be0 zT8x~C;xSY1GGl1~VerpPQQ6N;Uh#W=N7=5W6*i+aCZ9MemrOKbkq5ODi)J|OjF-?tOjnscsQ?C{j z79*R4o()UMMko{71ETW60&7_c8-!-dD7jV6?L zXy_azTcv}0*kX~LDy$1ggo*Y%DyJ36QwhVj09dJK)Jdbx)n+wajQUD4s!bg0X}1Rk zKdKY4o7J2$wT+>n)9ieFYGa2{fU5KT7Q2usYPXD2zpV?N(xC64HL2g~`PSj?bST%f zwrg`-MO|n>$1uy*)*)PsTLVA8OZ;Q!a;MxAt_B?uF=M)xy_EZLacLKJj0LTaR^O#n zSLz7Vd6H2Ge8%R|sZI-Rca!U%N)H{&yyw1)@3GU)cQ3R+1+$~hXT|)9@Az&xj;EV< z?i5V6PA+rOnLRcS%dEvUjpG`^OLpt{zAQEN(7Q#u;%!zG8Z%O=G_uaKe@iw%CeRem zy`5Rh*2)~Zpn8L|l9ARV9EqOGUoQ8;;(PS@5~7v{56(QfwAQ3-r80x~#dAN8VPZVEs>DluKEiW~XkS}?HtudK65}zwt zZrxJZ`fdG(F>UTz1?-6@Mlq9|QaxR<&bTUJM5s!{XqW!wXex)upddWUtzMY2u+_r? z8e_gj);;I1RB>ru1JhGoZj`h;Tf{o_=uV5Yq%E5F00@S!4v)nAglAAV5V=%yP#IfE zQbj}A_(Rvxph~>htGp@SMSijb>1j=m78>sa;Ij1SQO%W2Bc#&GD##d=nd?8ZhqZc! zj&zc%7f$wa{jK3s6*oO))chK!(8tL4azB!ambS7igg(>->y8Y}uESL7?$)o?|9b4h zvD)R^Po^o7^Kt7LZNFL9J4VkML8zzPoKTJEk&~BJn`v|e*4a8ZI6!r+e$m%r_{o)j zubMum?LpLjBW5I&^g73sYRI;q*DrY zXHDv$2?46x-_XZa&K7d)#P1*{%=fq|*}>)UQc;q4&os>PJ7?u`KFactI*H=GI;+7!uV6{6BeGMXC8KOYx zdf~LeGpPl;c2qgFf}w13i|M$PNKO8Nrp?@fy=D7EJKf0iWTBFlLRa1n-r;Iv3FA&Y#VS7bWT}xrS;u0PPuz} zQoK01?&2GP#5Aj)EOb^<4-GcL<%J0Z)felmFq8O0f?YUFi?tq;XW7ZMo=zFifWzHz z_Sm(YnK~09bE^gjMJZ=zXA@!aXBQm@hZu8nb3OajcmEG-Zyrx&{>Be$YHBJmt%lTT z(QYYAM5t-OP^MHOOA84JA+j9pl8RI$TZ<)?rL0*iNkxpkvV|h+vBbeSoagE_>dBm!Bc+^tEl^Tli)#e8YCy978hI`EanmE0rKWmX@p80oq(;=&3#lDOkQU+b zN8%AyaH(VBKOl;fHv(oDwSNrtAfCdJJlFq`V%#Ecm7LItY%Z+PBL*sJZwE9FJcR}~ z8ULfdUDNT{W{dqILecIKD!hAf79R`cs9Kw)1cdvBIwf;#tou~BwyL=#KYPs6jY1S5 zb*lCClF(B5%&sXF2acq*ueSP-pfUdf?Ii4L&_mRHu*?RURSb*LcZ<)543=YR-8l^b zj#U(D0&GN4$&MT6%*pL#WrcobE*h(T^77>dJWbeLL-zAL96^GFeLlG}np*T|2N~&Xe*+D#Exf$qS%NEFf|2omSvk(7Op z>;CXL&(#TnGWs5Qd{BSX+C0kfJc#9O2-e<1BWpLf9%thJ7`*jZWlc%GJ#t?$p*k~oT-+(mqR zn=@I}473k#dv9F0Fv;ru<4pkp0n1XCJ2||S6XuFA7AGM<%f6%P=bdU zpA7Y-pK0KiuMN7(a936HCI8y=VE6jT^Ii?If|+y*6*V9T6E>1Dc5a_m4ngTGKulEn zIgSRM5+BPTkzs5_8{Std*!!Yo3AKi7wY*?+T5WK%`Ig^msQ^cUHevrW!T_<~eD(wi z#*>7)I%`Dgx=+E7UHz+V%3gg4=8<8x|2HZgYb^`}j#B9^`3EutbaLJ23v7q5Yj9Mz zz{=IlJgNmj`Izjoc~e20ACVA{XWe{%KlULuRm<{|P1SFwVA-uc$%?~dzw<~mawM?- zP1IdtB$S^bo;Y1E|C+7XA9#AOe+o^vC;07P^9t21OC?hWt_JpcafZC|`n(;@4B0m{ z_LVOpXBxeRWA`@e7=2RgEPOi6$Q;H0c;fZt23LXaF(DAA28;L&l>6Qth{+Y#Pf)K+ zvMLGF;D0yab(4zjt=eHfN<{1N!w%?d zyerW4C%K+g55IZyr;Kg{1{eT~CSvG?k~fXOC^6ETxAMmEIXi7v8EakH3c4}~iYph%MHw%;+H_;L?KeoR~e&5D7TG5VRK0V{XVp*QQqiahgl}f2C*&jw3 z2MNPe0%_OZTeZ3#N>V}SCTb^7;*a~>DVOtpW1sPMBRdhOY+7#O>MfIce(Yyq1Uq0V zPhV}de0v+mjB)wy-BgAW!+o>JeDgCtK789=mQBCDKz{G18;1<{Unrll>-XQSV)rjO ze91IORxs(fK=FBSY({h)M@8k5WTrV76 z^6Xew(XO>GJ=dL>Dm|-{{cP2g?5YO=n4yFX+4SHB)s`nu@~w3n^EMf0n6V%0>u)w} zQnR)7Q&iHN>3Ie#5S*)PjUTo88iBCFL(p$CpBVj4?2wJN*sixPpv(;$d=K5o@dzJJ zcEdllCj^v_FOAcmYhh*O;I?-FJy!%V4;??w{?q6$t*u78Tv!^|GOjdy!ip_zAm$ss zNOPJ8pODl+PsMw0^0EgD9N+9P42~=)xa!;#>~)#L5uG&DYHS3bbwq_xnoV`$WTEcF zUrnsjyE2X3uG1pM+C&E)nPkjlxhaTS)vT>NL}&M?P7j)LU&zSDTOoWJa{%X+uBN8v zXUFsQU?S1ar_|SrtJW+oo+l5tlFb|dc%Lhz6ET)q)9tRTGZ+sI%)!rZi~EM#KMXRG z6pfHV-QKn~YDuVgul-`J_IF=aw|on|qsOx<^oRc+>ezvu3wNAG$?-W4&A zhL)L9Cf_1WsYm=mvNOYHwxnj|VQ@%M(5$Yu0ce#W~^#?;p)>B2xcH_B6jRd_`|z&?S5b* zl?9<$VSRnugB7n=d4wNxEG)VxOdP7NyDcK9{fq0C?-Dn--J6HZQm~8bitlXNRy~wo z-PcEAW}%VIQGtu)7s7h^_SJhywMY%uOW8O3_AD`%G)muH%Wjn4`RAWLFCE9nHqI<4 z$?@`+b3AexXY+sthY{iubNG!V{@la1HXr|F581(wmUSzP1KfIR!@IK+w`_?oE0tBM zvvifH+eT|%Djh5p+^Y188CH(vv+Ix8^p*|Uk&wrcdj8nK7YeeX-k z>uu~BO&-Kw{Y&#t@KRSA^9gN$^;xGZQ$e1tK@D*Ud7?&RReP_&r-ayUCpAWTcE!fU z_z33?zsUaJ;N+j_lq+P&u##5oW2iG`C)#kSeGDPJGhHZ$5+Y>L3v-G=A9 zHc#}7$bPwVww%4|D+}lY@earx*bFM~DLA|Z;v`R3w4=tXqwLM56?0;VW&i76l{dr2 zUfUMYvvBpVB?m%X^Sks*28t?O&Z!3#9-C8xJhSBeb(1uThliTL?k!(2u=0idp?l_k zX&bS)kYn{tS5e9L17kSc59$8lmvt)dyn7FLUmx0y_pJFYYh7}V+c}@|>P&1xwrjdk z=63FbK0W58wdE1LyQ!5e4;eB$bemcp7)$I6MLf(D&jOdTNj8s|ZC6;mbketp#9vw9~@aQB9D zIlt+OAJbRuA5;5k(tT!)9bEzzx1K`(Wz+U8lf2hhDA;;(0=9(y?z(K;2dn^zm9r!p z3rO=pTnmk{wf2-{BI9fEaS1)??AD^ah|6cbtI3sJwTA2)MdMS=l02EhujxRSEtOUG zO|nulwGxiGv*2O$o{3u{vtKpC%$jm1eE|uRDi+Gzi@3IfN-rIc*CF#P1!|xdWCvX! z$^7Je)fRG!cr_0J4bRhm&$Z0R_x3kZ)COJdQM2~bp!5Bn14G^5gLb!FOjM|fPA>RU zZ9?IU=9SbZ9rLjB&4iH4-o|Km^9q|t;?A2D6svXO+LUyay;h-hr-$Bkb!j@cMl%yl z^E8508N>TF?Me2taHWn+xz)#()j710T~=CBxtqP$V8xiAZu~tkl(mfE_03To*>bCA zr5TN7urb^rlEu@=+0zYc=ae5l{fRH)Yu0cDr#O@|5ZdbIAL--m`f1@{UvgHoudlCc zrp&v(m$MnIWxB!cPUC5F@e1)pJb!p%Hd>Se4Y51*UJKS&pNDqx>{&evt2t$Mh1bKo zf}i{CZcBi`V#8QfA7K-`E?BKsfBaJRnl2HK4UHIA+Xvpytxj%J$!$g8p_SE=M5k4S zGXhMcRnJ`(G}jxDe&V|MN)h`vT9e@PW-_Lr_dI^KC!slJi!Wkvw*h_&5=GqXdySy+ zMkA#_A;|voahu*2L`GBWaRjEU0I=1}=ay0mDmqS=yr%Kzi(egB)j4>gBYj{iKHKWF zCQGM&HwmL4OQ&gK51RIRR7P-ydX`nH<<;MBg9S$! z3NtAz?Au*wc5i94k61=;IcxB_P)$fTYg>DJk9|vNDT_ZRM^7VY9R6s)!BoNI)=tR^ z!9T9&_toJH$ej>A_>l2gtPG z)+s9fk>Kobl4d>K{IPXy8l)5lbDqz{=nbY0$2KY@Wdy;!$z{J1hxn)P+1v^i_Im8m z-+Za?)Tb?4itd6%CiIO5H*&X0(D8~dvK4WfDscfxfM>ZUc4DKS9KPrM2X?RzjeJjH zF3HCtqpiI`=HNvie-3-S%ID5&&4Mh)zAuTV%fSYWEt)niLifK z^w(cLs}H}s)zxL2Yx(BQn*_@W$)N%Ud#gw-30pqVO#Fo;W_oC)z^ObeDt$Wk?1v|I z;%?T{$}>7oxx840Q`<1ua-e$t6_m3**fh6YO>G(G%jGM?Azra0hcya=RQs?m6)}Zx zumr|OYOTEs=NN8u0R#p0bLp)re_5^Sm|mP(S%D0#4$*iKW>oZBN4Gi0N~@_P7#IXF z4p7!#jKAcYHZtVXUh)A+6I6Bmy{S+}Rr<9(-9F>pXJ=!a`{kR@R9RQDu=64Nvq{qz zDV1I!oV8JrO$o!Fv@a_S+cB$idmCt34>bKHEpspH#8eEw4i&A)bZfd6Jg2*XgV~*L z@3#B4DVb_4*`v-I$)=kveRT__4zqv3lCt1Of*u>S`s(nGYWOZTq|bP1LObFbBsHF| zKaL=+>R$*ynhD&#we5ZU^k<;3(?yOB%#sq&cMsImu>zrL7$*LkqHN)kH6#72;bPkq~Z zzK@F5nFA33ex=M0AI|T(Szo{3ja6A&&KRm;gflJ?j_-ya*FiukQ(%k7nyChhdk)yx zEO4{#+;EtjSzavh6~+;l9<2VuH|g*ib7Y~KZB0U>aaCbTx=7Z_BSa$F-cFvnr9m?f z3JX+_jYwoV?#YJid*F&*L|$o{0OMwN4ZbmF*lt|*ofm7zjx&i4 zD@eq2ip5=GgK{|Lhw6MQ-3I;!1I*aeEA|%Hl|rN1K`fFwFm@I7X4KTkY;MAptOLkc z*Wunl$6>?>b9#@Q+B(F!@bQ&QwN((SH>Dlv_U*x%kGAYfmkhh@7xS&_B&?D_182CF z^FWH;vxXbFksdnCe~*cI+GYxb@qous-(LE%G}G`f$TH2o40yngXN6R zGc~BiQ7PI$VE290E4jazU`&{6z9$osv?k@R?H=|fpUyWV!Vv4i?9M=R--+foq!bLC zc0*k{F&hFOvF9qHe0x)gY9XbiJ-N1gu(#C#2K`*iNs}ghjkb2v)btbAXiy>=zm;2?-bElNwT|h+(<(ok+xa`h8Q*A7e|kVy$v*5f^svHTW;*bXJtA&jQQp%qcZ!c zJjz@)F5*V!`D{G<^YrQ-u4M6(z6^69i4vWsG5FS+ez&)^!qUX#Mb}L>+m$$X%lBAs z;az_4P$B1d4TK?&rE`Y^H-2E}a(2JKFHBy3O8s@v&)>Ag^EGZ>hPaOR#khVS4b<1+ zjw@L@2}_r2C_0x8wC0-79~v3%rT;1>vHI8eRQ_btMY57hW|u!Hj)sh%mCB`zt=)w2 zQ#?$mlLeugIKoU@P#0ldGYxP)+~yl8v(JL0hxaU$UdT1C_1sNWl=3Wj`k-TM^{fV! z`O#@^!D8l31*;y|hJN_qT2WC~eREGa%xe!~3a}!9+ABe=oZOC&5jQ64UEI;rQY?PU ziUO0+`G~k2!xy1Ms0pav|1f6P3-#hwtTDP882DhfctlG}%h!xm_|N!CYA~L!LCdwr zI{5bG%iCGj)68X7*}iX)HlWjAc_(|yzAAw7q7@o9Nk2#uQ6q~6XgirFk5Em7V~41W z@}%uH75HesT4hM@0vqYZ%eD$qrzX;wc%hmbpqXi>#Gb_N7`V# zDobXLvU6$IO_;yitDfkD_PrFB&I$gR~Hv-B0z5Gqy}8g9)C>pNCI zcYPw=Z9a9h7}yZR8eMXAyaRVB;&kncTZPA-$U?L1KJY$kM{S;)PEDl=onK&UnDe;b z-u1*Qm;2Q%|6_%iMs_x1kKwr$k_YE3m~+!IFq(uxXJDzC0UtT9c#~j=Sx1p){1QCUn);oZGp4bhG- zMU&CdnUE=4$)$W?wl3bO%${KB>;6ssU8}E*_Y!KFhA|sX$tNg8#0^CHvEh}Gs2e>C z<4Nl<)W59V#Wb`v5o{L@?*P%Cm!*m;<=N!D@i%X{f5|sYV$W9nJ?8>tmh1@*4kt&` zCF9?cF<(t?((rwgXNUU#`)x4+Bqr*S#Ka$lxfh#ch$Z2#Ko0|km7?nYM>3nu)^|rZ z&(#_=C`t(EZkt+O9*u~c9|uKhG4E9c{5fQmfzD200BD-Evn zQqF)XC%5ZnTiao)+Dt>oY6o3j>w1e633lhiVAVqH_8KykUB50953;`5EBoz=pvWzbR=#41NtY4r`uUT3pnT!hdfCo9<~y&VZ@@_BL(a6P zm#FE??HrB?jpF`(Q%p|-1FnTgE1dU>7PD^9s3QFH5($1D?Za(mjMvSE|? zS+dr|4SzZ($K)iL0SaZto)C;82%)0=tD&p*c1MpU9LN=DfA=Vx$&7dB&}-eP#TUb5 zxMwv^IE8!~=3g7Er!#+vt1z+~_Etr}dMM}km6le|HRIlM*r83Bf3$c|7?nx(Aj1ut zOdw44a4L}-nXwpkMs5w-S#?g)RL(;2mei!F%4Ha{x^V|vmQ} zd~1k9dN~fv8v1r9-&{#2ym)c2`%`qnWS-MlxUe9;)@0N~*V19ukp$qr`HwRPsOcF` zsC*nsQ`)$wHW~4tv_Ib_zV*f0nQfLV{-1xIpP1|~W;+dk$i&y*EhTw#?RGrGnCLpP zbM{B}o~C^YoI6p2I?eio{jN)u0MS~cu_q}gA*pcXw`P;?<7QH#Rl+01tM+q6KZyeJzo-e z&=(&DATE8?$`+^SX7;-$c3OZ7m4hdC>>o?&tX%TLD`NPW==P#w3LG+oxIr{?=7bM^ zmcr-Glj?26nPmK7Xi>FYt1@l{+{R8H&p=F%Z$u8{_Myte-dZbsFqqbkg&=FHd+cs{ z3+6v+13+~ZRlomY`{1~47SJ!+rcHiDN+Z49zthvSeN(=)* zKtIQ+xgI)hPk=d3s5ZX-PJk&i11?Ehb2kU8roVwgEh`i&9xq=MwIySiYiIKZ9c94? z();~kz?h&k-iU@6rw0T=nIf2FjRIwm-x5^&W>TK3CvVatA007_kw~JcLT|+ZuMu3@ zs1<&Hh5iuIGQs6PzUS`2ZI{Hh46MB|(hT4l1I(s*D9PAvHk3N{q^822787k%+AmHn zUWkQnYfn1@zjh2$uG+)W>T|$Pd@614j&gl3CzcRnHMi<)>CIO7MG~OV+WeOr6u*kL zJ(|VTQ*gYEs=Nr}`CV8Ds8?b-Q{Cg$#q*SmvxZt!ah5cayw!G9)D%fdmwyzCds0zj zS=qv;PeVf6$@koqsfBWUP0Z^3(PAw6EeJa7aBSLq<{mq%crrfWrGr7~md&mkP!|)2 zJfZ%-eOKgn{5X^0GMn+yrZEFt9GssI6rRCDa(V9O*<|&{w*hK@@$n@&c`8+}eyL6S z>4adKO++pJHTgPa*;Oy9%(lU(rS5wqb=gCRGfUs=-B%YQ=-Z|RdhqXWe%{*C6Z^sq z#S|sx{%Z|GyY}a2*EBn~#NwZqQ+>l_?n@NCE`DBK2oxO_!$)?&QhrA6EB0}1F2W&{-_S=kNI^Jvaj z{ey%8w)jQmN?o)(_Utewz@zEqCIPAGLAA4s_W@)<^J-Im-FnL|-KORuHBZ*Kw*Xn9 z@RanL!kFy}G&G+7`%W>v2)gB03G|OmI0K{|DAajgyg^jG_4N`cz=9zLCm8kQp5ZE;#R}hRf`*27WYR7L`d)TqR6=nx%W?1?f7*ob;aGFXATcsl3;~KA}uN7gve!6zyY| zS3j(pr_s{3)?BV5{+NWSPk;a}0@0QB>;ZL8=Y`@HMWw>NDI8Uhy{kZIn6rw@Zy;X-TGrG=YFtmn;8BJfBBPVjk zO*Ed|Z$)=i?o9MqgSlTKUGU@K8)KSO7rE;A>0ON2&a<-;-`4j|*w{hgkvo2pHtn zG5id5vAmLim5C+O3!crqIska~58umk9fk*21<)So3W<>!diHsgmFREnyMh7Uvet}` z)fdjLNT3g39}!twsytvq5KPjs@X~J1dl5E4`$Zjj1}``y$M$^AhK{CZAg&bH*aW^C z9fYeRU-L{i-tu_`b!DusvFVd3t`Z;hU6sex+8ag~JvKQPY4g&Kpcl7p;wp1vHO6QWMF`2e~pV93G%qS_b?nBp%oSWAYx=BueDR0TyJT_MFOFwH5U_PtGk_|JYfe57)n9h4@vmOCW_(`Tav_f7FY0TA=jKm~smshb#j6x;T7V7tKh5G&67N>)ue`5Q+FZUy^XnG8#+=J)_z*5 zUOWP|>e8)$n@g_+43eU-g5V8}9}Wg9xI8Bu#ZQ152b7+rK z3;#3-3h(GxLuO~bs$0~f9J=q@qg-uv?_s{}kVBmzuQPTFoiFkhu_0TeHNZHOhKtqv8g2hGI%oQXML|rg;oV>0LFcLwSBz7w4|KuvlshrzRL>KE}#V zEHU9CW;}|a$q@6r`t$)+fejQ|-!zHt(gjp}lWci(#@YLA%Oim>fu0efz?t-WN$uBO zZ}T~&tXg?MLLm7EzCocXXwCbUHt{W(Ml9IRQ^02#DQNrdFghYHAarMaM;cX(aRJ*` zA9a4wqH>yGoR7Ifv(neb`G7>LJ@#a|W{54DrE~xXx{_i)!tO;ZM8KJ<7netelUw(Y z0Cl{t6NEbew@CWc3ii-JL%*KYLFiWNJhZ>CkJByWui^vjKWnQ|{O;W%e9Db#$Jd13 z2^0WGQi(;4O^34IO!37ue-#V%P-cV3GlSj|Z%SEtBW}m2czX6ludUsC&N8a7tFbW` zOeeMEJAiiOmrZf@UkK1CdgNe-mRz(2Mzh3AV149d^Z&q|*pHuq_X8NW1_7Bqj!z== z9E%^#wr`lan2AH^e27x#;fij|0#|@0&xcqADW>g4_=GIpv$JdWVhs^m)?xf{)lh~W z=38P&8~HNWxlO-LlRS@J+`W|xLh5ZKD{TNoHx+d0qy0shXivuD{~!X5=*vRJ~@MQlu3ngHXXmqRjk(Zh5jYFU1r9NGo>{~ zzAwx%h)EN`j`x*f0cfL~I3M5Zk&vVA=f}DFk@IBfHhMj5GQnBnG1s|LZag37@2L8^ zZb}l+5&vBhKn5rScf)r@f0jU=xJVC zS36;x9zhV$yv--<-MV}W(IamqEXh&;Gs3M&=*U1DL+?Z~kJuwvHB)9%LxQd1kvb;ADYioJ|8MQUHYxKgJ0N+!qcd_R2Rhq zj_llqJAd^!KsWJ2{bhX4Tt>t$<0mZHj%9&-e8o^*I2Otb zARj*`P7p0eU)@%11;*0j?>&^9uUQ}Z>2Ifj+^S~w*dvScO35=+jE7LL{-(H&E|-%o zbGub(RHZgwqqwE)=NE229l~Sis&@{UxT)~SUMI2rsancAzdo}Ma2{Pp@N1%j*Yth) zQWEkVX;?4p2+wmq|2$}!Gx5)T;a^-^uQDc~qt5|no`of83JB9pC?33Mk#zR8zKq9( zz4{2pOtOm9x{M1xAq!g4&SjYUS6t${g9HigLnBo}Ey>0|B41js@!N3uU`ZXvW79A| z9=Mw;YOE>opOmm0&~gk&yMdNpD4yiU!!}B!6WMwZD()czS2Vz|@aA25LgFQn(4Sww zugR)5s#>C?2A&Gxtj9tUuna5hSmTT(C#T4HSx|chA3eIk%P{7OXXMM+=$_@cFd&+| zl@dG&7Z+>d(JS1`z*fj~ah*`K3tjqBd8MaMGma}xPuIX(K9t#hUvCHUh8T{fd3rgy9t^>cadnSu0Q!NO#4AhIAZJOYKlqm;zy1y` zpkz9?#oJFFn)hb+@X-9gl<=<6<^^f6zDc?&t}(ZwiPLr8c^_SSxa;_^Beg472FBW^ z!W}wZs^RBujwgGVBOgj+?<72^WfW`u-W(z)LKBO#`;D{#-{-Sv-s%*|Txd#mfqT6i zzaOu9FbxOj#e$QiytcT&8yW+Wq%H%bA-P6z!<;#5b^4=kM?7;zcb0TSg6zgd?Zx9c z0?FWaj#|1x`~c)we0ZyM?O@hs=|sjN2Te+n*#-D5V> zYpUI=gdGhwX;~?Ic)X>mM<^hvfu<%)*Pydc?BCh$^++@wQAa(-qyk6BuTJF&_g|CW z41+%=KhkjIa(mOcgMAwmrH{VAQ({|`;WBZI>CM;d-<;gm`J!ALV}!)k?VGfYK3h=* zDMh*?-GQ6{)%v3U;vPO!-^*1*;P>ASna*Lfm8zknF+VX|4_)pqrtmjjpT%EvD2cWH zqaILT&|r|{{G;LGwVo+a^cy9z0`g^ocF)~$E)Q1>mf6MS_GaSm8z ze1Z2jzv6Az+t5rtUm8-hn$gR6U3ouhujc_iI4W

04C%gcVJY@$qcVHF8;Kr zDQ5|#Dhm1wU8c6#DR~|xDI#nuu;xa-H_pVK)AP)$C!|t|nvN_*4J2S5K+l<&I6#(; z12ZLy^$deGzb)1it6wI$+`aw|k*x4uU+Ux~p?K`2QiU3%$EI+bl%O_ItKCo5<2^ZE zjHUv-x(?VIG91Ex0_=v0y^xQK5lVa5F7xSZEK(2B*X9E2UwZAudOQ^?CD1>h%k&l7 zqUhfG>=2QtF!Tnjz+`~C;;H#CzWj>k^*6q!Q!at_8~KWXgr^6qRzeZadk=@*RQyP$ zlp@DIniA(Wi2in(^M8?)9HlU!XstWkvm4-j`DzQ-Sz-)QG6~xmKH2XN-|-V_?SOL8 zODhU*qO-qG&@58jSI?p(wJ9fqaY!nK?C29d*jTUwA_9%=0BI-{ULk>&*6&Rfte~E1 zbfFRwuA$GZ$%Ztb^077{tE&?=kk71WVHG-mq=6X22WXHt9}QIN_vR9YA9-Lp2BZ)wk5)LW7|&dSfgR&+d>8w4|=@2W*@F!gO_ zpP%xAk`p+)6#f=G_H`xXwrFSoZHQ^$hr*nRTuTh0q=qSn0qRau8{SHY7GxhphYEK{W^VUOajwCu`?z}v1|?;@CTQw6L=%Y2G})Cl2Gz^{%cHS-xIc7vtAbG zjqvD+MUK9&3a_h_tLduScpmY$GybSw*?Ab_5|@M^s??^{sytZ%8tO+G-p$`Q##Q?T z=>Mss`CdQ1$x(k~8M7U@(ARfO6lPqt&6k0co6_5=Tj>(%fi1^ZmJK`t;-j%${Mbu< z(T?&xX7}!%HUoopq8XIkC=MTm&gs^lAS43jlH?uprz+S(){$++sO>=4@0NlI^Q9S{ zj^BQGI>;NDS?Z4jZk-qp^x);w6 z5uxUc-;c-HcZad6SBf@Y^logpHYLCsw~gV-j7HQisan;=(&YWN-hcEtmZJ|$4VZdG z?9xSrLOLY&hQamx?sbJBwTU2KUK6r|G~9c44W~U&y@(EmS=aol+b^+PM{7?OxGuL1RUK|{MK2V_9=?)#vSPwR$?ToicB%UUeoIc} z2LqM-wB+`ar1yi+x~SxFDXuuJOGMD69i$BM7N9FvXrW{0Hdpor4L$V8Xoq!)t#Wa9 ziL@8qvP0N+Z({=xjoaF;1<}=ss%Q|z6Ln41wpH*Sz+y`^gBg%rI&+Krj)&yYaXW`c zr%#wjA0XvApfq>nD)}x$N455NA?x9DF;SeCI-5Yro@`SREx`c((1L<_ z;VFh~i=f0W(%b8e9#m09Oyj0~#|h!L-d(oCE9;W4lq|&PFal^H0+)0O|LknZwdFv! zrdNwxC9baH3X$tOgx&^V>?^yLor#8{e*gBBbYQtX7%Zqp^@Jfiig)GspAmhf&d|z8 zBv3r+sz8zJCz2|!WcS<9NTk$MAlG&Gjc^ce1m=SN&V8>+*#M3}6m0Yfasm~9tz1FS zPjm_TM2!q~jVFQUR)wk^J=wBEhOibIVQvG^a~FYSld>oPLG;KE0D`FGNd4yl2nOo9 z7Ik0~xC^GqRg=~fcqh?ja69h`LSh2O`T;-vuGhwKMrcgp)v}g?O~g0M%m3Y;pY~1& zwf+(NOm9Yh0c+pZ{=$DJ?9IYVeP6TB&)T#|L(N;!eF)tsgtqyDr^2}BFFYxjH`|qw zhNoi~-0CdlMC>ooIKAL~dMvG`twP~u+=3xizev)#Fq%ox$@;te+KC#2YVp4}ydguF zF2Aer5WskQlodw7z{)DKwY8HVUppN41ou+pO6Fi00;X}F08X#+he$kX`m)wn3PO{GJY0qcq&F?N{YhZlfd-+M(H-BKTQxzIL)M}5 zU-jHq@GtHI_+p^8RA+ttzEmsam|w5*(5`BgA>c*FD7BO#9Q&Mnod zU-|ibbB?Y3_PoU`f3u<+jg`LV;rB>^8VaY+HKuQOfpH@)11;D(Va=@nBo6yX0q?d2()2OwfEgH$4#3 zfC37~7d85lyCXsR-b#<*T@Vvt`6@t2rtmPk&})aWc0T&vy{SEq1?5*Yr(Y$SYLhbz z!-4j%XL^{w0KCUkt5IBZx4>8x!jn;aVzrr5LgO~W)K7)`PZ=|RUg>sLW{6N@+^>~x z&>8JWyIYQ|njnYImD|e7j$=g&^dj-V(EtfSzM#wP|58E)IR-QBmQRDCLx7pV$any? zkCV#md?3#ugIzk#-)VXF`JAn-6I5TxD^K%{Ycg{|tNDYt2n+2;w0`6NruF;!@icjH z|D5J=a>@6h1E;_JI5H1wG`#;?cb@3LkHFCZv>$veX1vt|Sw zz$E5&Yfp42B?+P(`3F(iS2z5%mK`O=^U2u^yM-sMHimuSLl!3pU{DN;zpU(ug_Xp% ztpby!o_b87c}?cse&nbnF|Cua7Up$9(Yfoh#W40Gr4NkF=Lym!$YsKzg;(^%&6qr! zSE8}p+}T&6XvS3=?m+femo`fDAg0k&Sm~9m%yA7^Fm;w#$?eu0NlV+5fG09#qwCL8 zLDNC2QN$31yHbk^@%>FMQx1yR{=8aCL3Rfy-v}m;d@eM9bHVt%vqq#ynT{AcRvN-? zLZD%8%burV;I3Mm_C^fz2mBji8e9V|@m$Q33c3CA<$h%jTW@F}l(RWD-M2qg*Er+W z@t()MWwCx=+&K(Sd(qiXWjBm|kqO0#Q_Zwx^mH9?Q~TXkBB0f~#8OX9&0i-9nZcc% znc_q<%MnMX==9@v6y`0L`Myz1YTBL{i_}6@Ez~~ z-nE-2$Mqtj35JLeksl0IGt^14Uaj@{Wnh-AT#9`e#0ERA=@R)8u>&~iMTFe2A5PeV z7JuQwGbi9U3P^=y4h5nqmpeN6{+CvcFe~J&#N6;Bt(2L-(!7ab+ffZI{)D*-w_yqk zDL9k=#L4r$AjF=~r{Q9Wj-7})YVg_weoCJ7al^8pX8Dol91qXiJZT4tVMlvz?A)9V zjihks#G3HO`EfXE^#8%@SakPYxkGpz|2oLb#55Wj{G%nIHS-{_{|Ebu#R&QXMq9TM zoxhx+f8F_P)ACU|2K4^OlA$IHdVAtz3yYiW%?37(l+Yc0LPrviu$qjHcvZA##oOzh zRo@Y>ArP2N&DU#ZlMw@HQId^z{zJQV?QR4*rW&eX@F$UUk2q{P)K zuS82sTkv+rY_1PNZ^w0GXMQ8-JK}$6FnI*M*zX<5|EgG695j>I6ttknC~iWEU-PRh zw&h_b0Iq|rtYQ7TO`EUYq!mWHY<*H^;C8Ud>?=IOJIc1~Y<8;r$xj%#@x6(3SgNxU&Va3=ZzOo%9k*{L~__V)yI2+!Kl4}vM=LStXuoFOlC&y zsE*vhil%i?u6YGL%CG(6#V;Ds$mxAUN-8t^{ z!;t1U@3+~Mt;}Dj;Hnq|{$HL=Xs1luVL2#H2`0~`wNj(neR3{x6-YF;TYZ41OnJkd z8^hCbMCnQefAwXr9{T!Ddk%*=|7-#v!fHc@|7-R})rSG2avSWiDc`5tZVK%Il}XHE;8}|u6-`6KhOe!au zm#-+X{&pVml?XzMgQYT-!}NHr^|LdZYyE_rCI5vmRFu$g9KgudQ!*ZV#d7B2P@=>5N8H8xeRf#j%>Grb zVF7<)ITQmt+bc8?c_A@l)yf-;3{y9uFbjlH$QW`|7F1tr|o9d)at!3IUd z4t=s5EK|yQ^YscJpWi00JK$Cb``ae7&R5c?{y1MsQGcSK$6lHD9;~7(wD9KYXM(VI z0hRxOLH@28heZGe?sBFuMdP%|jQdl)#4hwCoNK-_3ceC!GxzK1SEO%yI$g+qa#)7Q zi-%S&0h~+;91@Qf{Xxl_1eL$8w)N(Rt&VDIrwC+1PWYOAFLp_zj{xBTQ~AFq9{}Uo z)tEAz+8MK4KU~dX?xVRx{_*_`BxC1+0L6Hc>7VflhD~QrugwcwJ091fpmXK`B>4Nt zN$LgKJWyBVRgYn_uXK3{`BbW6MXymOk@Jz#gK5yz^m2&n<7?cLHoY{HcHP-v##4{V z98IC?!PkSKHyjIeti4@kshEa-M(PQ-b9~Otx>C*F*24$fT4`C~HJq|m=XNtc*Uf>d ziF&Gv-3uLPU$*w|bCfSk(^KwMre_%P2_r$slZ-$r>3f*;wi?+C&3CJD0{B0LJr}yA zgF^Y`T)j{qR_GRvD9|{ryO;A7jyilk z-v80K3A6%?i0V+u&m>x*fQH-@3rV9nxSQPpNAZ(nuKRF&Frw7iK8S&YAQm@1!}_MG#k+{F?=)X`}8h6O9- z!X#B%-k6ptpfudysJeBKwGAKHny4m0K6wE$X+vmT{G}KlPS{n*e@3*l6+TdTfTl@J zZFE+gUImE#2qU}Wv0VZb#ALg%2AnHgDHqb^&`Dt?emHNGV(bBYI8hk9fmd$G7Kb0? z7S%6Zg3=a;wNT?sMIx3TEY@tA_`>uCmvMEhag7|2eXuiUNfpSgo#+b;{uNo}7pLVb zt3rPd*iu#X8i43BAY99^B^t4!2+oY}?ed1JY4UHlB9oB_6pXz)kQtoRL%nY9(M5K5HQstRl`BV9*~F;C_sA@N zCDF-}SU48F_uD%n8rzZ98GrZM2bJt5RaG5~P0O;}!(zCZ3!(S=&$K6>=W8hBe}-f! zPjZHR?0zhmOdTy5ZP{D8qgT}OhJ%4e_Rp?6eS%yCg|;pG(OCbv)ZtPX_f139A+v)z zT=(HxLwM68@HsZ*QWmU#w;O}1OZ?8rou z*ZkT|81C~*Rfn2FTObX!Cw>vvcFV}G8H}OJb>==7LryD02Qaxwat8o40OzI<{6rG( zVV^IdS$Mscgn&>%>f`!dh%r*50qvnuxO(wm5Os8+2QT`SRl}_L9I)UtPQ6+Yw>-ID zK~V31R5JKX@Rt_@=9F1HTjB?^xPbgO{|?VAF7(=ya_n)(>fX}K$Wl%o-RTG?O|i9 z{*LjNvQ#^q-fqc}Z*G=a8|_%@q8pf*tg$3ldvW?@Rhe;nj1<7kQyz+lRu$3ONHm>p zDeu;4dAoH(xwVB^2kPTjPtDUCtmhteQy#^cwIJBYzSC{e$L#Z?IphDd@ic!_BQ-(S zc*tTRS{v@bouy)4NrFi}L&j~F>{dYOA-rABXm9PUwC31Yt;NeSx?{s!kscI#*8UxI zJOEUF5b}8JN%Nz<`C2*RZJ_W-k=b5QDUptCWZ7#zBx=OmH)r7|-{h1-ri)b1H_0cb z5bS%6^1nERI>WwA`HAhBR`m2OVb6HI8dA{3G90=k;Pc)=j<@C%ZI*Job6uiPL=e(Q zFzr`4!3yQw?xr>U>hzeC3ZN=_3PZ~PqD(uV7speSczDm*_4dw9*-XuMUlT)Jx8VOD zb9Eclmq!NC7EM)gUHXc9x?vh5mTV^?ii72ho3PWCO_F_nvK2;1)qjj8Kh3-MSo#NK zN_4hzBYc_ZQmJW!dzK${0Pk`fH4i7Rm#?)b79DGcDMwHlPpSP_P@L*@(_EJ!TdUL4ftDSnf|aJnA-4lp_(5)>M*lXknv|d6cA%{796P1dk{Se`&U^o5)q+=zg=p zS8US)3S+PV9BGD$W%s#kIfCirm4br~7$imo5+6k*QUg}^iDonx1ll4EMBI#6@&&La zd*7FS9~94T6AFNL`ce>lUjNZP@iI(?aw@ab8tP&u=P3AxGVrbfl1<~pdPg$0JWr)$is-yCD75ERZWKXU4b;NA;8BSm+#o1NWy3lG zYP4mKxv_Or6Zv#%Ua9&ElK^M@>SE@_pPQEAYzJ!Rdda&d$47t?qwvfBAVv}X8yQ7n z?GX1TZLsbUzXwtjh$`5&%x8}RoBLQvEaFc`tAh4%r^1SSm9bPI!I8&YYv(;|+Gbqbzx9eF1U8jM z@BE1EdeEpNKDDsZ^Mdv}x|jN0B(Lb#!U_({J^0=8ZsSQ@$BE{1qY4U9LT$AX;>4o0MjBGZIK_A>4?0e)BCE>FbRkI1p{ zV)E2@(^eTqgphat|BD#b=6{M|>D?c+?Yr)%BGjV7tt)l}7?^`ompmvJDGR>Wr;Ti< zK=rGbc;gDWmIr{(P=YLm@P=d7&*{{f`X?{Rspl@Qx+9JVl;ehNMpwj^O`8Ze{VyIo zkNu-UVjvm2a!wb?KjVFogqdkwICm(156OHkA5TtSz9J7^Ag2SVS{VR7l7`cwDfFZx zUFHr+d+dI_otUMo=r)q0+j>c2o*->~%aI51nfzpn@*?NvMSgXs;E17w9RMFy9V?qe zV|yQK4PEj6bzGiU$w1fJ)3v1)stmTAiJ~fe@?~Xm6=2tnaLtMTDso$mq#}%_SHl2i z|486gu(D#{n2SWb;aqoig>%CtOLuQ%Md?CvKvD%kX1B5-UfqcR0SK=XtSXneeOgFj z^@jNamPPb5yrwBfxjVJd@O19eMEOD!DPw150XU2LD1d;sJhN6kQWI|jF^mTX}=4nis65r1?l5vx2cpn?}G%TP?=(s=x&?qm2 znZ*a7tM^a`v5j$KF0fZ*c#Ls>AsoeVD=TXmqDOs@;W)a-&$Ia^D>F?wbWN`=8ER>V z>RMA}BEKM>C9Z^&D=vPqiRai$`64!F-sr}Ey9Tn^NaLUH4R<6|h^yNa)AOK7{AO|j zT&(}y!A1t#}tvaE);oxOtNip6IWyogg zyt5y1n?S5eg4?FSw+BoRJ7;cv3(h3TlfM==su-|kawfQk-gu*uZCxmMP`we;KX8Ha zlb0rt54hxSB=&Hc+EurdmX>tlUif?NCY9obluoz;32_Z`mXq)GA3*kn-(ghg+*nf! zoRR)UM+GlFGe$W_{r*2>LG3yB;2sj#%C|F5KaD5HACV}SnGw6;8RA$v1bxi=M}DC-6TVX(AB2!L5*{{vS-}7rlEtCT0O!MFhdJk^TYp zW(Cjer}22D$hBhyJI^Qu1r?>G`BEvkW$M_K-pIJrlocyMbBYS@3vD$QyPwYASxNc( zLr44j*?*E6f|nPZHeqNNa5-qAo1=I2Nl~SZ$8+55CUQcs9KPKwwH9LKN0CDcP;Z0P zf-Nw|LsdlS3u4_A#`&XX=i2s7X-EDgoK38knZ}StFaSmu!`PE^2$v1};Ducp(7Y}I zuCkHD)r_&(Fxo2jfQs-z(@7$Hph;UXD+WvcC^fTR)Rpt-(zy|81>Oj*fN*9FX)iz$ zk3OAdF<_)xuR}^BRDAuHT;e17mbyn&ip(a#9@4G%Xf@^TC=3Ao&UKMY`ZvQ& z3ePYTwt*+$-XyX1-lztHs&rub{w@B88<@dsGZsSy;+eg^_QjI41&uh~}SH_uafX$Sez}E2W6-yic-XQ?%7vj~Fk}KxAS*@FePOa3Yad za44V+`EH;=y56SG+6G_Q#~VFrxi&;%Z9EE9T>uSkdZMZ%aF_&^2?pPlaDSXbjYE)| z104iJNq$Y$?Fqdux6@i&4OYR!c@1( zAPMKH+c`4bjSQA#;?GD1AP0f$7x6d*d-4CMBUjbp`(XmGDI<=u*Z^^zit1gs=Cq0- z56SHq3@L3)8|)2FF;RX?hwOnPNj21zD~}mHQT8J08Rieehz{82Aq*{SpV0Ij%A!KE zWVr#ZqzNG{f}bBWMq|K5{nIA-Eve782$EdLqeXW*tkc);SNXYf-^Nh{ypA-k*X4;d*cj9*IJG*nN{o?*+>P z=%_!@d4rv%Vlc#Y(sKCloC<;gXx8d{iLUlvwJ|J&fAO?2DwH;6Zw`WKC=X|RJP)Dg zop+858D=UsWmEZU5@ItvPRbx=GhrxBf{d8$qzR}p`=JRS>_Xh#+Yu*uNCm=yzi-x6Dc4M{wrRpVbN*94|~o{KI@ABt4h==Kk?nqxq*02U}#nQ(r6Sf+(L9Z z$|C&7i8vtWFM`xH^x|Cno@Eh{#JKJj6B~(BeZ-S8iy1{a0c5cj?;~yu1p{6?-#pnk z4$F$;cebYeNHdUNnH2ZI$jJowN3#^3=WpLtZ)$2*$;sQ0)#0^*l5_U{&k?x_NZtXQVP?bA18_kEKg~@m#AQ{wct9f@k|zvZa_4^Ur;*5%5_{SJue>I8fL%s)pAqOLFr6#mO&K*yr$*c)_QEDlwhng3* zCEryG&Y~mM-z`pS3WwlWkZFTzxd*Y%x5Tg4+hHgm0mYBBE)k4?L;u3)ZA&#n96YLy zwQ3ZD?wCnpk$}EZ&LJcsh?$$bINk?I_m2|*vhC*^PRxZ6Gy>hgHUwj!lZ2>jutgU; zHAetsmQ~gq`}5v1Vlt|}L#!1L=1LBhxDQ8>0Bu6V-Z5rbuP2Co(}7(2ecKA=rzZRff=jN~R0Y5T7(gCeZv-|31g%qw`MTM9)4NJ-M}!M?}Q_`_4r7vyHm=Te{pUL&%>qhzPT4~mLB zd&6^tnr9D&S)mpj$Xz_J<1>FZaOJPdDt(hpTaa;bDbUalhWjmvie|&k6sf=2H zLIvC;X)>%74#UX*pKcG+UH#GA#27tq{4BGmG#3F3rUcddTcEVzlSAQR&0K)Mh~+X& z@6nZ1D8q`U*m`xUSkalOJ z&LhyycL3>IrYZ)qPDBQ+SYh||(^aA;uzq)fxXGxmr#t5Zz-^_Xay*Ft`CBVUR9~Ju zy!pnu(35gMNLM}XHib^*u9!JSF2tiY=D*%$W@b*F9iXt(>|0~>JF+miIi3Z`bprGK zKeN&P|KwGB@5rRhR8_6oPsO?aJ4|6phll5P-voEg8j$y+=nT>%d#~LdrV}aIoXM&w zzrA6)b8p$?iJUdI^>>mEFMA>)}{>} zJkNb!_jR4ud7bC>ILD5iSH$^27aFilRMFBy%v;GJF{lR(ZXvP4uj5Ch&YK(r+(GAe zBm?%W{|g@=9}s1%XcWP)UkrG8l)`ix%+jt-r=5bNKq-$1j+A6@!ML{fMNa~FYs}IP zRv~Se%9$}>P?rg7M@lguQa~P5Y9NldJa=Q%#DsT^MMK@$p=G~{ug{MG-$YiW&^u02QaMG2aj{^jyuR;EAOXcZ*K)$?&+pLiYh8n=r zg%ER}VX)Pu6b8S5YSkY{EP&LN*brBnfMOu7c0e>|F$@Rhz8E_b0YyaNwnGQA3B#QK zEtxjAs(75Duqv4bT1vnrgHb2~D-ZN9z2pHRNk6;_k)#Te0%VLbFJ!U4oOi{gky@+Y zo4dW{kaq$_5lwA(I~ZH)0JWnuei)xYoiqFxBrE?nt4IJe2#O`YLnH%fFLD4l0AK@x zpY(Yf8``-Y`1Z^Iq9VO7+l8s{Izc)G^cGZ(A(2+oM%yo-{#CJN1nI$xv2Gw`K5@yC z^^<}oA~5@QftLPCtR{Ha6jwu46UdOKd-aWTrLE-FaMu755Ql*KZQ87M7Gf|`#(_SQ zJAC26KLi$I`u_isLV;WpwyyvPq2 z84LVu4DB#HhQ`;yVgewkyI!y+k*3SWyiMrJ0~Z35HE(RdyiqV>P$E>EE29-4j;$$R zbyw-58;Fu4b|c{s#^--f91Z2jCCnm|CcSlvqthqX;+ZcV1BQ_ybWj><)O5SD+Mg$> zta_0|1Q3W;n!G6r&O+gCfg}LYUPcXfAXJw?RJp??7}$0xvJ}StP!G9*d&_s7Acxm5 zg|82@#_Km4ri={0+{%bzih?D(@x$J7(9%Hn9B=*r+2(HK`~Yb%7+e)_Z}|srZbguH zVGpDG6@=^s_5|d0E8GCYuxuv%6XOP;*9Gkfc$VmONA|i~1tHTUyf70&TlyfkaH)bf z1L-PtN#D_(C9O2S!F$^&WRI*(6|p7JBXtMjf%FfP2S_5r5Be9gHkV3?|a z)YeP_sAfP|xd3I&wmA!;$USSs$t*ezgVYv5szKzv0U6lf5?4VY zVyPCS_pi;V8mYCaCkisS{h&ntA_r{_RzOBTvKC`5{Z3^6h^ z18X(lSFLsjz?M)C^Xd9a3o*!Hkpt!sr=0!Lh@DG1yt{XI5LrAZxRj=yCyt9P)eArL&l%nDKVS@V>p%Q!gX(RJ z^9Z09s=wi%AWH(!D1OlgrGcA41>gN(JkRgZO#BL^bD1xW5ZzBm_W;f}q}jB5G#7Iu zs_*!S{gY^sGmH*c7n;b_1U#I;`QpJ|B)R%~{_qdt@nC>+L<9kFCg>e33-4qLpuG0y z$L)Aao9lY~!Z6Bbml=n(SU82YKBxt;GuSS&YjpHXDe>YW5!F8i4~wG|fi>sRz5wY5`2}p?Yf)TA ziiY<;DMG<6A!&2N^2~;MO49LN*T0uY|^{VYAC0!P`TtA5X|5JAy zqze99=chSKWfFuHzs&#A!gY7W97k4^sQx+!R9pxE?)VJV?pi2r+#7IM)^*)~26H^N z0W63^(rEk@hH+qQvR0eI7F-))oNd`x29*oy;A_sIPL9vA3Mu9P(jEZ1Z!J(Ku zz_Ln*fLH3llmvqzHXw!3ZN;Ut#jl9H$#+^l@X^5L|F-eCOnQQjy&uC`!En^j#28Rh z;0&^2LUQ96HV6fE#ark{;`9HE-u=jI7#W+PK^*+aZ>YhArx=#{uIS4Ra^iiqKJ{7y zl~)(o2S?+i9)MjHWJ$M|D`*rMp*4Qpc`)Zt3Zz4*YXLeYGx2t8t?8qU7eJwMbwEQ| z_(URz0^PL+Sq#u}eYQLg0M)ItQ0prqp?hlxecxXr1s66rb7W+;qccZQ4Jd=?&BRWz zD5dA3zT9$+i*H3@R44>_nm}ilZ3r5>DJ)tOj~Y^EF5eB&7_-A&u0&0sY9BDiI~Ej~}5ahP8UF_$f1dHUz{F z9T@&`zWn6Z>10Au;N-T>cyewBA0L{7Kz?a=$;CmXK}zMx*7~d47}xYY0oC|E@rN{M zG~tzGB8Z!N`;J5m;Y#hvI|)Fu4u?WdAmj(tJngR z?C|9+Cj1ONIH6T%x)Wz-2meDhY>$IpboBefxCaui{{{k{JBeaQIdg>+ze z1eaJ)Q{04>cZg3@51n0 zVWgS?#*qeF1hsQ=_PWmV{4%-J#I!oO1PDAfwuGmUGr(LSn2K50l+~mb5baoXpbP?z zCY<`nL5SeXK@jR#6^Ftmwm#b#TX}zzc)BW`XxYWQyL-j1#BvfsHNrFw#QY1_#(FlIw<@%TOa2D|j z$cU+VP2n*=4jUO_gv;#lFBSeC(8n>c&R8!tm*{)8fnxO#M<`yZ@*gniLuoh+V&=0qZg; zWe3Hog)8u>*5umyHXbEJj<|SNEQ0uO1MJQJ(Cfu= zbHE9cctAFQfD>Y>f>$-HH!hOTpa-;piAJ8&nT_&}W%3|M;P4{>d&dy)JpV`v|BDKVseS{4Hkj2HP*Y16(y$T)M)c1t_|y4D6|`Kb=Rg zBCw`G*$uP2gsA|eTmfyU$Y8oilz?}Ed?Lss{DR8kr8m=rh8G2yWzFHGo1M03M&E(* z_#^PIq4@*-V)6HCwIi!eiNi6$+FaUWUR_M{tpQ?Zt0!hy zOTLCh7cBL6q}eZ)_+&P&1zYS) z=Ao9gW5;N?SSCV-CQj34jNn-f+-fezK0K+{2u-8Fa0bYBe!E9`2oM|~$1jUF$7y`1 z#KfWftz9|3SUcx#(RCQ`_XVC3zc4myH&&0Zi+rcm6hB2_70(~{uXWZ%zO*1t-&cJM zbcFEo2#3gk_qR~i9H+oG^8qv2fR4SopHx5;=O-#SmR1CIFn`YPy(gsPp;HFtG%HQ} z4k6$>#7^C$0bDyd3t1UPFomB0p|+Ki4n!fJs1I9a)PVeqD&^cxXTk)A^VhHXyvGU= zq~COPo8gLq7ZA8G2j>=KxPlD~l%3NIi-~i7L@KkB9y)Q6%xhDSzFP+`fgFp%g&SvR zrmTB@#^=B=;alIuQpvc~R`bIo6gyNmzf8N{`@Es%L$s+}qfJ~HOK41%Th@!= zwosXB+jXiy!j~@#rU+qUj)rDeKtof zW96D}E(J`cTLWteo=BOnMf1F7L{16M%s7gQhy-+SLnK=(xxKC)DYn4sk@<6xt*g|m zW!o*n5J@DOHw9m*sn5o2I3cC$-d?+>3QyoODa7N6A=q%?bf;kbZWq5GY_UJvEJ=2Y-witpMxM9UC?U>JH5&ODJDK2<@A3 zoaVt@fg*6n{|99kCB)CTgZLRkoO>w*Tu66Jg5`HVR5E}WJ^(k2=OJcaAP4xuxKCzW za<=wo8B6ll@};>Ya^RSs%y#Hj;U8s_yphUlnerX@U2Wa4uOO5*HY)U`w}yoWOiky z9xpH!e*c5BVLrNK7IZZXr`np;4rn?wb9Wx;M2-X+P7^Ul@Vyk|;m0>}z+DtMXuHgo z34a5D9&n#B)EGyuV8c$sb~_YqU6z9eWhOYy0{T!paI^jZRr(JkmVtD)hfbd)4 z^ihKk1wt@%{!n~XMfieTQ!s&&)sM+;V=~jkh~3-GP0|gnd_#_{j)C8;w}ANbvQ%5` z=}?4Htm?wZH(gz)L^`*_s_Syhdr%?;xk{u&+F@be#@wn7cG`$|+IV%_I^rpuqX=(@ zR+0JQtX%D|3;kojFXUw9Q?|4NiwYYm9;?TrO<)Q1%J4n|>TWiG@8{ZH2&^cAO-R2bl<(yyVBwOemwiR@Xnp$4ujlas<0WJExfHYWPzH>`N$&@G;*QO+Auqu z#E;G_poI1nF{uWOE5U5Lmu2`GMaUEdQ*b`(je2wQJW?zBHL2#SaggN{?6<9nxdi2*O&LUVG zzp3{;+ZI3qisY)`(h6C`uV&v{klFVIQs+4lyhH^u^+O|#P)5M2Y|HP!=Egol>4GfS z(5x6hN-vh=Y(YaCG?!hBh5-so_MwR%wqv!3 zB{a&U{vks*wZXYAGPx{caN09vy;cxPYf_UkUd#vgd%_?&aAE(n4C;W9TfOW#yk$$P zz58azY|V@t6_$)(|8cDBSDWc4vnNjmJmQrBWr~LKbP*H}YK9!gGD5y7wMM8ov+KYF zi8lxH{3MKf_VktszlG#>9FeGC$_uRD%#UYDE4Z{7X3n*w&lG#S3RZ|R+u_F@EEzxvWN%f!<2){(O955|LzgjaManAZ6E%uA({ z4%dLLNdFTy;qORGmfqNHnWf z+N&gP)5>6%g(;Vb)a5St-SF% zLEG6;{=-|tkkQ!-T?JdMK6@6;*8w~&G*=%aoxSz(3`i$|Yzd$4LRPwMSmG-AeSY-Q z@J^RWNf$SmuM-)>1676Dp`>gHILb^!7r1J?J9;;ZIR|m}Ke@@3(NO)iBcB z-{RXG<0x!9{B22Ku?z&}Zx%zhcyJ~M!gBax@1G!J2@YA_D>{^@!wa^#fm>D-03o(l zZ7^1jn!W#LNgl#jgAk^bgg3i26^A-dlM#-gbW-b>iT7x<_Lr;;=i*BbEZ(jFX0P3u z;1abmafj!rEoW?NLgA{TE|9T0Zk^{Wwsr>KIQH$5Sq!!;fImC&!+*Y+>!;3IX~!(b zVZrWN*?{Q7e)mzk@T80LCDK#>fY)xj%qJBvU3wVy=G8b0Um>{tRrj(ye)uD~wx7#Qz+4UTi*w<#RxK%NuT`qW3FK z8x{hI>yzpa4UwiZXscrxPx(n1yymj2^+M{uI1P*6Z3UsD#g=LPrsayzDO`2m3O`lWyjC*8ewHy}*8uIGl-x&=xF?)H#uE~kx^Yb7i+!<7Uz?j1* z^t_gZyKSc@9CKV2%c9=qIbFL(0M0Jh30oz@Fa$x-r=U!j7how?loa99>mHcmI09ff z^$=AnPOj;24oGvf&h*-iui?yh7u06io^73L8Q%Y7OA+eru?)RmE1sCw_V?dsjLeya z{7As?>f6vO$i9iMYR>qj;YIp@TfW^B290<%J3zq|svA(zZDQYrJ_1PS7ITV2PKI=c z(+_xBA#H@MOx6W7GcXkQH%78cDO<;>oupjPsU?UoVaf<3BP+E7Xd_zh$hF6OXs0dK zHreliz6Xux&!T7%?o$I9+suIq<6!eInqf{bZ%v1a62|^mvPECW*xO%P>J@$j{kUEK zwFiT_nY{ow(8X&FjvZ_KG|{iEeGU0X!+0)?@sLHqkYOfH1VZXc%^!Tc`9V69nWE@E_~lQ zuleGCr;$idQ(^>GeEEaTGT@B>BG=Kgv*26w*Rr4wbf1Zo*3>EYZDB5J=C2P_zIZ1G z5G4)ExBELp>-r}r!z17H`(RSAcz9QDW%;=mwOV@Mol4jzXp~F@IyRv1m`{7fwCHl8RLOoL*OXoNx_WC21l?A zsaP&YF`ZmnLKY--8`5ZGtK-zAg{Z3L4Tx|gYTnGo`I{hl zLy~2ZNS*}OO2Do}!We{B0C}NJE0Agk2-PIyTficjU+BUJAgdge z4|Z|bur8jZ(thv*@*8)E-8cXJ){I>!**o%1ZfpkmZsap7RTG#`>N2#xoT! zO|&?cBIhTm+w_<2=jlel@&rpYY>qK}_Ci+8n;_{GVWUrqD|ql4Ioc$;q+U=u)gGHQ zCqDK9gctgTUZ*Dx-Z6!2;KfCkcV9+oM|V?bW<0)n_i0+sXd|$|_S1Ef0-%iyT`j}c z!AzFnq14E881)ml-Wr_;RP7rs6bv9NkUb4P;G!@B`7v=^=2Orr3H*tS-U7Aw<7OQe zSvin%EsWBojdcmYrC@$ciNny937i^a*^Kzl=ja&1C=B6 zMn5~~$}plA6tkFA4}QQeI3stdwb=t=EvN=)dgt;1Mh}c~@GrmNA8}B6IS*NOctOA& z;sabH%<&;u8Twdhy^&zILUett(0^8l0IwImFNOix0Q0EB6it(rKAMIxyk3XR)Pem3 z+9{jbEv8L7J9QSKmU5X)JG3E^xDhY_Q{#r}q)3)FZhVR1OeitVJg~-B7)~YqP*Q#M zrSb=|apuB{thweHcn3OpY=jSGcIHcM*~bWqKWhvrykHP#eLm9rugjRL(9G8G>g2@{ z^}Z$XY&|jM2SV?EEL|PW>Ui^NXz^c{Q zDo-8gi;1-ea7&`2@X0tyPoM9t9vnZjBaFW@_x5(deoF=}WlQ(={q`M&_wIn2xARQT zga;W-$34!kR||Oxh8#==1TUMnSzI6^k(Be^?mkx_x%6sGYjgdvW{Z><+U(;(mu5x6 z{;=##qEm{B^`PYZE0_DFqjoBP ziAk4DC4gAl<-(W5WpGYpe!3tir0o9TTht9l{vg=)a;1iYcKVIQ)FO);(m3iw_MqV4 z1Dru=DRm)AjnAzceM8J<>GORCEAAaRqD1RxVk42`1h?v|#dbq^y?kr0RI58VBS+dS z>JKOD)fN_)cvTGvv9xH;o8>2^%e9*7Hdt0YH?CcpoV%T)sFc=b0bbd6ofk&W$I4X6 zFj2=E7sOgN``6~><}gNT@HS8CRld--%-cu%(daDi*R(^o+JyKl2Vx5(#B$|$o%|{+ zQv41J-mibS`%}W4S-QEH*!&N5VbAaz{Y(?H1Fym)^5E;GhL)BhZAbpZz54fea`dU% zEUK&3a@q`3d);z0rwfQXJmTp5t~Fcdw8@{cR5Xbf7fEv$-SVQ!`3;LIl_39w2V2%F zsZ=xm!GfnEeP^svhKl+*$VpW0tL%yzv{4filMViGB4Uiv`RA=tZA*sBXOwQ|cvGwH zy&B%!e8cSo1GP`^0V)#7#wu7m{oxrxK20-)5Bqtp2sM>IJ5v~3tTbU?ayhR(G+5gpKpYs3-??0|d&!bK2jf)@OsQz&eRCR`*Q1LdEtK&4tcaun( z9@qoa-Ga?JdY{{FdnLWG&JlN+YclVcjZmt627f8=)jr_QG-X)LKJtzHt=hwDs_=MV z%wQK8SzKn)A=r)|=n80Sa<(Pv>M3fokFFVyPdN%Bk;anmuakILO;2upaKA2M4lFQ9VZ8kYz6MIbU9Dkby(~z?+x@^lz zTBdKQgYbESIBRqN-D)e}f9X|rVJ&U`NFP7OCKQqNhk&B8Krz1de&+eu5HnoUNCDeD4p9}BLB?*W8+R$I>4uxhVc0DY7(!q(`lJoMnr@kFm4P+Z1#_v6Gnz7IlNVBS zYIa)A{2Ler(P74dw|V>y#mh4S3|G;W=oyMbPvw4_dv2nF2Gx;H8+RNWISxw;F`IMs<45`B$KbOX*Y?SB zB|_-9%talmUSLK>rl=9Rr-0f&)gtAF!WC|>_&INHXU$oCE^1bK-LH>hEkFJdb~{J^ zP@9F=g<(mvT9@>cp~8OM#mX7Wyu&KF%1%jVXHpwtg^kjmco=Bln)>4cw}qN9CTc3} zrPy1LD5*uIKH0#*I^S_eQCTryH28Vfgrud8Wsc*er2J7Akf@tMwjoqmBKwUv1Xy(r!ADgV==oz#!D`x)*GHCw^? zqJ{2QpmrA5T`wyo`71|{fAQSD$07fz$Y#q%Q|9)Y#5*jm!dSDu%?T}yC!>y~MKi-W zI9Pb%U&GX=Ud*uT$v!E)^0`fFKklUPWNY?OM)`q##;RHh6jIhR~I&!G=%5rUPKoL}?p96wo{#$@4O zHHdJ2(VG|PKwmK|{Z%g&PMzv%b1?63QF<+$q5@}|)9ej9$J z*aW{JQE4*mXW9q;x-NxOJlF9+(UI?j)ItX4V*gFs!+o zj11id$RajxCZo_c$#~7TV?gL+spHR#EqCUs_pzqgmz5l*ht=~;L~v_foX`BlTc4d+ zTbA0d|L~SX_)mP(X8i#THUk~&;mrehR|l~mNibWcR( zex5h)wC0$cs-A=d#9M@aa@taG=P=tn4-dXgP+o@=iIY@>KRkRq-yTF8!IK! zmF}#UTbpX(k||}*GCGYb_IdtGqejQA7*vt)oW~y9OhvkOfjrTG>(O1 zuY%g~oFR@hjU-83=qs6Bs_hu?O)J$J9c@B;dJTZ~q0_QKuvubk<`?!~+1DtzD6 zyYD3lqzN7iejeXu!IjyCiDckhL#ylHIEsC+g zQeS4;)aP1l)t>L3u1Ug%#CaY?nlTm16H?`2NFRRBxB~CaSk$-O(x=O%3eQj+H0R^f zcJrd;n7{9kOouJ8*9CERnOdFCpQiR^Tsc0f-o8@*(b<`s+Rf@|KgIzSRsUOJ-7gP? zC>v?z6f~dmkP3ca)bnq{$1VN1uM`g$JDX0Mj*iN@>A~H66&>#%op~{JP}+`T0#a1P zxPOe)qL`k%63C5yHdv5iDafgDCRmN$dN!pa4Nk>(I(K$Dq$I^!7(J_!8*a`UHXC;p zXWUEf22ZH!g3X~I2y!S<=nfAg_AowELPHyLA8y+V>Eiag%uOL?$K^EXv|KWWs3dn9 z3QPE$_;P8c?vhgNo%f}EvArIOP>^kRSPTdG(EHAS8I*D{9z9ah z79~7L&d101xy=JI zN{Xadt_Rs2b~BAK^q|>o`tI)M=b8|!dnQWU{)O2wPm=CYJ_-(Uyxf10)W0zg%u>|S zqTIZh530e5@E(3!6W{#;Ns?jx5Q-(Vo?LJ@vHJLj_(aP;Mdn_}z)wiDECPJdHH79jBTeb49D{rRdJ>3}o|eZ*MYM zAelHc$I#&E#o9+kp?P7>UbuNuRG=b>i&5i4Y(MEiN6263BibyWeIO@0FjWPt(lPhNSw&Os&q~h1diWlMrcTy?uClyMO7TY%EYErW< z2KGJP*?3yrTyvLFt}5iDq8iNB)F$aN_+jU9HjQSd}~W* z0h@D$437FDA@|Ss)mH8<-dU&xsEYjKcW*?d;tT|x$3pf&)pG3UG{+9t0a#I&VYLH9 z@qwlHc24t1CC$hhG$x4-He!a65Vf3MgH;t2Q@kl<-%~7p+<^kxtf>-k7n{C^d-gDn zzbU9Fzm!LFSMP`SVNPYo=Yx4TmbrpYu}S5dIrzB%V^GG3<_gNj{UF&#GXC%DD`XC$M|LU>waLF=9LtWWbq9V+B0iQQ!~_P;--^-pi!<3V@V z<-s4d+ZLomeb2)V2F(&xUxC0?=R8Nke(kbisFUp!iaSj%OMQ^CiGC@&w_kBTLSSmO z)G^HJndnmb(DzTY?_86heoT>J(>#FB*Uu}QQnd3>5`_}qyFaLHcV#F!>xp70OQ(iX z`x?b=IGsOl{4dkc^5Ig@xxsm(9hf zFgcnYnPl+o`NQG^2S}#oI$Y=kOwvTf#4;w;`i$?wJ`J-`PR0$T!~*uirOcSqM!z_x z`}OYKFX#q9kQimwR(|-n%wjOHIO0TJ^k*;jMv7&u4eiCt-nr^YY-z6~HLNRKL_ZzP-U zP}A-S2}=$vw>i+W38^qzAdNBY-

%!4rWCfK zxYz*S^)e3+bjw0*YPz-gXl?5C1@$He;>?8INVuKrpKM#z_aHiG%+QkHG?Xf%+mgY5 zMV$A3vWcjz%s$$nfxFCobUj?y(J)y&xXt&gJa^8Ydmt^k`@U?57Our}ZdjWp$L$;K z{{3X^uqzS%rR8YqbZWfZ=nqR(lN`5d^qhH}9I}tH+R7IyFU)lp^i%{v7OS`~p)Q@{$h4tDx%e_+8teLvv6@f0mC&q_N1Ta&LFybZUG-$p-EOZh_UWXBg~O{EV^ ztdxXE58n#;$GRTnhw??!z1h@2(dNfc&2-`Zvw<|Gjaq&B?| zs8AJXmi|_oGvJ|Y5WkH$grEF-Bd4$M0G`OvW-+MlG<)Blsdg#tAYZ5&T>=V1X7khf zHf~B)X^(0xgB79{E~P79r4Ip3bhPAD&i&L+Ia;29AYWt1+&WRo=b5UI>nA{`+Mfo2 zFny+L-=CY*FF|N}zvRp_S$`!QFwSNhl_f&ice3R0p~kg~fas~*E?Uobr~8;8epEzI zNLX5$q0c+p6H!gt&cj7L1X)D>A6u=B;CPk=pgPe4M^CR?{&apikIiAbyK%wzp=e*o za}0$cwaO7^lD$Me)l`*@V(>oluE-r%kF5d$2y;Ek?Dep}tJ{T=B)OQCCL|%{^vl3!aBJZSxVcv|cZi?4aud=(f7mN-XCy9(C_ESV7&NkxCmuTtrO*xF6!#&h`jW zcuB~dC^V}Nv!?aL#Qnjj5bJWKv}f>!$^|0nif5h0;WC(gaEbNEA?R&+|XQ z6om|2^ziz}gb9 zQp+ki?Od0|2#7E{w@=NK+TGP{N_kSEg5xl2`?k?l?7H=8n!<$kshPP)>dnT%O?_g+bUnTSn! zu9BI|XFCwscnsD18KDw^el-zr&?-eudZY8raX_*9#vO(TFpd+?|J>#m zhZ-*e($S4-#b*#ibcM>knG=`7im7%FyGGNcu55sm`-O$#9gF{J?h8PwRpmk*PYIC`0_cvDe3{c=ftP1SC~yl6@)- zg==EJ1sNC-E1xKYQ##qFnV$Z63XaK#kI|~lV&?q6!FhEH`4ZWz3wWfLumteiL~W1U z%1|J7%ay~0QlS#@@UR~{t5+IYP1;0&83kBU+i@*~WJ4kXh$ z8h^2=+DQ?1Qb)DPN?}QYW{*v}WsNuFKmCCDgf2CY9j0#|Ee=Fb)6;|P?Ou-69R>z( zpc{~TD4F^}yhTdrGzU%#;9+QC7_VfF*duY}=?}F^%cIb)rY*hy=J$#Nawl)m@W1#^ zV~wIT+Aao1d}jC=qz_H=AN(oTnc7{G57FOfkd*NrzQH7TrBq|jRBJZB+fJFxp+oL_ zdBxcc&p4%zdykHON$GjMwB`K{LS~hxz$Pnn%j9 zQyXQW52s=aLnrk6l~|1pa82Q5^HzK9rW4|l{}JgkE`VgG*;*+a?Q20FxBN6f;?coa zNgUQ3?Pc6d94*#tO@nNLFxL*%p_0~Os{eSy> zHSp>@nPv2tu07o1!N0(wM;5>+e*5>*z?G!t^E?l=tvI*xQ}=00+Jdp=8i z-thIvXV{-2vO$$hdX1Y7Izc30W>IM7K_}B#S@GuF#HZGu+tO?Y4%77~6Lsbn+@i+NOY;9}DD$(3H~=>iV}^-xcQKtcUg zyErSF198Vbx1mOGKjr(Y#O{X$f7z~9fAJF~O8>Q_VXf#3&yDt?OPqO%f>luju(IEQZMU?V=z`z@MrwV5ju3xaZ4<3_isAnfdor zsl6y-_OKj-t-i-jm>uO?I746mz9w7P&CWB$TI^O`!49qI^n#>IV{P#CcG8`Mo(4*Z z;LzW)V;4iiy3`8}sgm72L=47vztcSvu9x1n_Z)?j6p=wXI@$sLYSA8b{^FVf$7;!w zOxC2rDw&cEq1I}sob;{D>H)s~r>6r^EcaK7^TyK(g4;dNg zqn-i;THP#&ZV%HWNc0;h{gt~eN|eWLTRbG92|6T`qHt7k^H8%sg6=%l53?I)p2~-; zBIw!zw2+{TB+qf%zQaTsGFfxy*((F547!gp%Vah~Pb|?`DNeM}zp-*f?bW`aSn*1aK7C!5ga1qoZx9dwPXrxh zVqysMg}sN6>t9J;nMIE{ec zBQ^FO1}Z4|?n{c$b*i6T4v(KNy*s_@oD#HwZT?Y#4MfYo@N9p%>1n72r?mQJtI91> znBxX05nfL#DXJ$2We+?ilGz3kGBV4lW@Z8u5Z3s7WRg1Ls{ieroiu8IfPC1#rn(_q zVuLqqqPH#Y_&ZU493q_kMzKJw;{}unnoEQ2ZmPW1sm*W?Bz%Wba*;8Z{?<`V=Z$ur1_@mlAX^^qUR^U(v$|V4b0ocWG&$?_5 z`vf0|3AtpBx!@_Rw~rVSteMt^r)Dnq*%&%NZn}`YX-OO9UD?+fckXl#;ZHP2ZEQE% zKX;A~WQUi)5fYG@1P>daU-%2u@bozids=yxpW^cO@-9fmBH?-$IwD+NXe2{zPAc>` zEw%zxK&JYeDP(MrcsBfz$?{FFkw>2TJFw-3Z4S$P0!;5`gj zHhK)os^WeBgdwQ2^J{i)!-K?%2qoX?cCWio1;dHzBQTo>O;{8;l$)O6BwBDBa4^iI zYUfYg+h4(6iW9|YB=OnS>gf$1azrKAkHY5&`zPd<{!Ia_=)SJ|bJ_|>MrNI}kJx9T zi)_qz;aq$sDv&!pg2j#QG^V&>;M$02iG(KA8Uj>kSc&Qp;6#UIw@Ve?Ueo?~yS{eA z-sPz#s)VaqB6Ra+_u(t{X)wic9F}%BGV)SpIh*V$0J#i{t5NC9Aw7{{3cvj`$d zDSwPQwVg(79Bwy9TWR1hJhP@Jx29n00GJJ}eKS05q63cS3^?nj%ykUczIw%3R zn>g64Jo}Fj5dQ7%hfO!w2DTz_yrIK>ez2f`ins-? z4VG)IcJ`^E?Cc?k84x7qCPKrSppa^b{W=^O>78{&IsU=QAtn{tFmOE-izA<3-TQ(^y4pRxAVO3(9^w3Q z{Cs$N71TokZ@fL$a>h0TH>lz6{6?!$sotr$S?^1Q(mMV{>d?obO?qG0m2@ZEzFF4i z1d1=2Ekw8^FMg{*v=cEg>tDE1`$$ykx!Np@Yqq%?O9Sq-_&ba_R0XMp<3_1PXp?;A zb7=X=&(s_P;zf=tO?zDTe4I(||Zga2!Z9a~z*M zkf4IfCV;A@?YF7T?4_GD!6QvmdRfp+PB08-xfe(kJC&S1Ct`)im-t?^vxV zG(drek~`UGXHaeFr)f>Rx)AU`K~e*~(|qXk`i%@OE~8GnEJyMLIkY5RQ>C}o^+2Np zPGd;I@AV#lBN|#$h-oJGmYdq=*V-Sw=;=*(C?0mFSz5FLgq(P2!arDB>=k-?p!5(O z@RrlHq-ED}P0-V_1n3fYhKHakXG~>*JoT49u1WL|cAO@U`AGW*aO)jwjH0^Id><}9 zG!zHX3`$Js3_>y8ROlye!^*@NOpmBLO*nvNAbor*=PX0<%PT@w&LRbo=j-FV`0JMT zoBnzwZQJBtn|4B&SJ>J4XPI6yi=Eeq3nd+~3af0Ynf#Aj`?)X*N78x$dip0f zu4#tNnM&cBDkSZE04iPlwpBDja0O5aoajuft+@(e2v~Dzq_-s+6KVt4XI>RilDhY| z2=bbxJWIB~RjzbGS3llUKLlqvPNt#OK*^qsZ2_W^S)?qu5B_JJ@B!trgeW2otjua=e(OSL zimGi8Cb`b&0otvWN|P?3Wu_HsJ7qXB)NaR_)NDFywx`M`gj_Sdt0wIfJg43&l{{!B zlJBbh48X8mt+ZWUNO%?{;@N&V-|fncqc)ozUxc1wpH1Pt|KMSH>I~kEqguTy_@AEs zODJ-8iHieE@J!i$u2%=4WI3R(4-DhhXEkL%ego3t)a-tKzu9%dIei>HZ-=)+i$Q=B z!U-^j&zs3v(XkC64QOs8zJ^2|k8naB*=!bSqX&u7tZgql#6bFHbJ?$A2g;yl zV*NYHX-MUk6BoqwJzyhyBk>oI;i!*?$7bR$K;42QI>fAR4P}CXea$OL?t+W5MRrmUvlMBV}gBxKElh+=(5d_;mysDUHik5F8wD+j zJ)|Lzq3rtHHd!BqW0X@i>J#cij@q8^_i4Rhp(xbnY-Ut7Bq~1x`&K&H(q8 zYX$=!>gBOR9>4G6`73@Ym9e|$DghzFY0Cul-khx@zQg8AzcJ3CJvNZlIb_-SOeMc~ zOxp*d!LCNr1kJ?3BYQ{!GhqX*XL%wf!2w5u%VfWYS)%)q3sY=&Z;|w!pk|f%VcQ2H z$erwqPe;(#MHjlns#p8r%fS?-Oa*Vou9%Kb;WwTFltR+CH%o{i##baY=MiP|OEiIk zH&n-mYlMN)sNLV~Kt6S^NK(P4^iEhu)7DaN`8#2{(;wTY?rfIe652S161{*ZYg;r} z$31#&to=i}f|ngdb_hJKD!Y)!woMM4CJG`KG;jFWlg5smyPTO5`UC zT#-$FETDqZ;4$cX_c2xz=<0qv8{EnA(}eY2XB^iKU$b}8Xn;n6LXvi>76Cm^^7!C- zhqxG^&Du;yGw5eC$n7J)bZI|ek|Dn@CnAHV|FLip2o+&3I9eDCG_IQui?{A5^g=|6 zhQvpRt_QtgHef5n*faxUu|p-h@7o3Om!7NbyI)W>{H`@zUb~B8bLGi0t%#i#ujiOs z7cba15rjJRaT8yPnGdtEMX~2aY#v`>lSu9}P}v}45^}0Lo|AWAOMZ zxm*r?1@w@4W*wcKbMq*sJjf37=K}{yccgt(t=4Or1D$;swY8np<UmzM{e*IEdzEV_mF55><7 zr5Bi`50c-xbB9IK{%20d?LG1yCA_T}lrk$a*()zx&z$^{nCNsrn3u)nwYk&LQfS|> z3~RH9exsSrGn6^ig6nyDt9`aV&ex{DhFj#=I{DrAFf>L@(<;-=?-7L&$qYR z#J*pcY%DnJdusO{Ci#1scFi^s7RgyobNOVF)MTVW)Bntm24>%b=k%}HpR|a7~guO?_Hms#1mx7)Qck*JjAoO~{g)uE|&r)WwT5HtV?40H64npTECS z|0w*K?qqYRe`@+0#(i3y=^)9|un-x-pt;*Qs`~y)-rar72QulFe`V#n z0Wlm?Bc>u&Nm(K+J(1vwsGC4b}V&Pk2 zCM;s^vuDq8qzSKly5%1VC9ro2)OP0UO!#zbt7NwG0!Lc5;AGw5&IE`T`Bdf^eiaKF zyb~POqIu{#5-lQTP2Em!-aR+H9`*{ke4B+Kazqv<9ST^@wWU7!m)XubF0Tn6O|w)RbD< z$*lawAi-&641M{MPxC+)Qcm&~LUU(=d>B7>y>qrvqs`{p4esVabT=E`>4<*O7Z(?o zOKOd4u$7jU_8NPXPC%pc9L0lZ z(mv6fX-`=hl<2D6|FRL$l&Wdzzm@h=doYt#>=kKI!b_o7#ByxveP?dZmQ%0)8;2$x zjGlBhJr!bjMcj9WV?xA9>KsM&oy&cZbiB6;qO6IG z(A>a#50ARb{&H$53?H~`{srFc_P#&i7VWOy4EG1F4Q>uxm{gwobP@E;rhI~#uN5*F;>8!IkT2qRTfR<5}`xA5AOV((tA`mEnp z`avqFeGJ_D{?=t?V>3@!aK3n211=@lq;+}MtbD(}6WeZ(3>^!6tDT)yG&y&%k^J|D z;Auy69?|qQBpQFwk5P#;gIE2Qlx{cNa(Qmc%BvB6a5cep+G}PH({HaYqJ8$aM}N&* z^$j=LzbH1mlJuoL7RaKR_MvDp9ddyGiVfF}Gcz-*r|lx*irc=2F&EZ{v1UMEAO(uL zKf106iH`fa%ToT(*7ON~IDqin4c;NJdsgAtD(iqEJajC?mTP zvZ*MtyAvUMM#H>iRmdpyBV;6d^L|dx^Vj>l?|mHiad+Gq*YCQ{^ZWg*?|B)ha{S-d zMm2K2@3SYB#ngr^Y@zlBD17^j3SX;Dmte^~tV4$t;P@YO;N_^bTJzekM7I-_eA8h%am)n5-^YKT)*{FRMDFIlVbzmF<<^-)Q7yG;yy zd8za7FNgky`dJ0_lP4*FhU!Ib04ULmudaa#nniz_B?FF{lBtjx;+)Q??3;rd5Lt*-I3c^S*!p4{BZF9-z(7@lNx@l zER?TAuXfi9j_rbnZw`e1f4ijSNMTQKGfMr#I^^QA0!M}-v=sb3mxfGsM~)VBd+g}^ z-(F#UkaRFz8yg#rn@vY|N3L|QESZ$9EHCV6Z$s0Iv^PMT<+$;7&5ri}g)Qc?Qfq&Up|J0B8x! z*rTF2ul+GjvD~;b8d-D>r!KoJ%a+uKLxEiKhx4(DHbidw&$S+}k4$;^@L{siiO$Hy zrWUMRc4i?BzqE|vJ)i*E={Bohc0@?9O5mirx*=+K^3xs`)(bbaF)eWl2L)(V%})&( z7T#R_x-UX6K1I+_RYWG(L>k$v{f9gd2#)vdS#04}klRd4oA%fpBy4PPqOlr9pl7Ph zB-25*)275)?(bXb7KPQN(Vm)}Esh-m=_Qw`mO)=2g?~iH{=&H|)8l}j!dDt%cHj)| zi^KVyHjD{2xx%$mKVx10_}4}Vs{j=|eDvrzSV|XeG~tYCt9$)O(ich1qYDp>SW(Vc z9r`}{6Cey?Q#$a5GgU@Y?DD@4&jCE?0v?9)Dp3gCuqqkJZ{S`1Wa)um8L1jE(qey# zhca%bN5{o+zqvj2;Qjme=fE%BaMP#ejli+YK)ABdJ*5(p$u~9~ei#|4f{v=JjBd$` z&wuKvZFP7#CUKT$v<(b)yoj+ssl0LR^~Pna_{vuXITTQ{pa%~ge6=lKLegg0Zn$jW z5BZcdc|Le|Pl+V%3m=Fb9*0@m1*f2B+cKGAP@nhD38&$S76{Q1V0&Rvj^D8`TCR9| z<_{;?JG7Ax9!sM8&0<$}JOA*eL0=|Qux`4rKNJD&#&^E z_k!!sKh3(1AMDjCT)xVNF@{rkuE6F3p_nE`HUdYPAr~UHlZJXy3WlJPnE~ivz9c(Y~1-3B99>-(5$8cI*73 zOr9O^S(KO=pRjPXO9H2=TQxXv)|L9Fvm>^i?1Yfw3zUIHl-=fd^eNpXqh%$<%~dVi z=vl{pIjbvI2*U;Z|?9Or5wm9-62G2OeBqk>dG#nm!Ra0Mo%Ort3 z_OSqyMLz7Cp24)tV4P#@y5H#VQ}yws>M5Mc5v23Vupq`F# zRlD)Q`1x>=!pmbJo)uk14rX6pU#d!vf0i;*@TUN4SVFmeX!FeTHBax8b<^A1TcLOM z!ZvCe`arOgtid&T8f=GeT6Edh?axAQx@;9q74*n$g4ZS62sgC1zuz{O-de}H+YZ0B zr9K7FCFpU9_Ij!jL;DrpqS2_r;D-A8mJlnNGw#*3?v)+$PX>>7hPiRNe4vx6K&{$N z)~3jXVuNSPqHbv72YV6Ou52s_xscHted|-InVuy zG$Vg@9;2Kq_izWBkiEZU2h-BRObBX8Nr(Er7~yzpb^0giDJdzIv`lTB4b&s*_9faA z9ubi{*Dz}DuUO+Xy|+dBcz?sQo(g6yjF&WgaTFED3UAH0fRP3d7jpNYDs;bpp8QfU zM5lCdyl(8rU4|;Oq#cYV)11sRY-CS2DlAx$-Miyz46i^2-9oC}gNnT4xPfky%_?Jf zzE!U;eK5M7Yd23bVpuPh_Ny+EREyH_fA?ltwP&Bpw=zIYSGKdW8_xZ#+5EQYd-^xk&KLQ#b$V&~ zq-c@l4J0r*(46FRQpF*~CE{9p_T?zV{jXcW);765M^k@rent@UjR` z50?GI=DskUev6&zNatBY#cZtg&6_u;3K=*7kayU--_L*EH&cW*a4yeMPi|##A`46~ zO4i8+MMvl`4!$RA=KCAh>NPYRTgjgl+~;#I*4E~Ba21~%fONuPU&CvMr9kU{U3l>J z>;D!Z_}*W1w1Q|w3gZO>PBNeTa+h#$=sDXCUTp}(mKEmX!*wR1tnekKW(X`V&|pw4 z=EnO?SuOg8c^dDoiBE0vRi6}PW~QK(utGsF8wUE=uV?iX{>_{1qoos?L@*--4@5LC zBQiRgy?eGwic-?OWHVg8vYb^b6g{AGggwIY*iAg|SLgHg6yN-PrX}Q+OQv_e@UuQz&Cbs^C4Jn(!-M127i}@LNgNen67cBRKiY9O3MW#O zLP~zS`FXW!-KAl$$+G{P8$MK7zOpp>6?;C-I8be29706}N;LQ9e@+FU+*DtUbI)%+ z?C<(H=|s?jXO11agXP9+`b30IrDQC8dj9;m_UY51<2e~(9T_(PH7pb@V;9Z>AH{jh zUwdUzt^ky?0}CLi;hC(;%JNd$lwjbsf>UUXsc9Jx+KgTIYaafc%y!7~%Za*c3xn)J z8OBHEK_NoVpyJ43{KgL5q!P-=&dGM7jzV;zU_S;DEyDlg##;_OUgn2HITV*BKhOUC zz|iyiMO^UlfbHZv5>e_Q*1*~oy-&}%Q~vg(-(3a;X>td?q6w!dq}g09=pBU^@;$(1 zqOUR&K$3LLeTMn3I*RA!=CrZ5rn+VS6q>p9uy^}Gy3K4jsznt)lF}Zs2&7dc^P`tvH?uaH7)?JLxrB(v(^{pW6r?=Iq=Ll>>YBV#2B-o6XP z5(F@hI}jNRk`YG3l6t(T=~JGb8!F~=eSUi&H9kIm&)bA6sxhP3^IBB8*o)+*5}hRH zO8q!rkmN3H2K`K9&Nd^5Qy-rmN*m--qO4i|_uqe0eXpZ|BwcNcwflO>w^F2Jn5lcQ zPs(t1V@>Y7!p%or3Ig{>MBhw)k{+vIHcaj^*QYG(7ZnX@R9un!gM~i(Ic}K+AI=|u zF!j2h@`aZGp(w+<@HHs>sGELJM=Jk`w%HjWbm{@8mKh9SWmmXg6Ocg zwZ{7lvIP{DXL|;|W|^U6D#t0fo2I4bKUiK`sD@hjZI_5h@EYn3y6$CV%=>ET1qB7i z#;Uoi4Q=0Svn`vuLUzseedo4v9=_>!O0r!5JBW$U5uAQh5adlwqhB&=&a^*w|Dnts zOyy%C3iG?RiyCtfwda-bo5KJsi6B5k5~yix3_au%EqxA9WU`{hUiY$WIsTt&W{7UIga=j>=?zq0XtjL|=E8}>9kDfB`gyqSyZ5t_$ z9Cj6qI9kB9qM&Rl{qa79Q}Mh#EQ~93Pet(2_GXrf(;pt-nR(U`-+S9`ajd#}ac-jL zKD&({S#M=Sk|J}<@5g`D^)$$i2JSd`r2>CW%FWFke99{&RbXkgcWksmZXP52Um>z% za>1^-z}CdQPSPAuI}03h7MP8T(ACu&4fI29DA{b<+S;UGiCui__4QWUg(OhxmidSb zsT`B~QS21OK&aU~%Tt-2$I))E=J)+uSC{0uvP^WlfzAS30%Nhaw}Q&+#W{T2)Fjy= zSKbKj84IP}89kdqeh63u!I1<-g6py{GJ>(_kl%}&qN3bg=ANYFOP?Xu8P$7R%#Kn~ z2;|ETfI_-4Fy@2MLdW<%X~*~Bg6gD;5KKkj^yFX*;x+?Y4XyL^ppm;IHBwJOFYvnk zMA6ej4&n`Vn=OXECgM^X3@DfKGcJ@=5n&v^E9je3w|v=Lz3uIdvD+WHeYm(ZKh>ON zroy@f3CTLqat;ErJ8_bJYo-Y|gog}=C9GqjwZ_<0?HH@K^*D$RUgCGdfsegKj)uVX zv3|uB8M1~`X~dbC6PSjC1%yug>gziJ#OF2<-5sc-C4a~f$~bPlqldTT+tH|d9UEeE z7jS?a%%easQ(2qxrD@XP2qedt?fj-q&t#o~>m@t)Y2uEtNm@nyo!qCN6Eyp6)Pprd zaoM>PcS_H_qN7!de(g!=z9=(F{9XFg&1*j`o5$%gTo7Dq%tZZ+k=?iAnpQ{byxh%4 z(hOF4E&2$CJE>$Ezge8dE%(Jcl2#Tx$37lxtUwdv=Sw{muf>Kjmlk;+=m+{s7v5ON z4o)=b9kc(TL~$d`x)An9+MCyRh9gpFz-t3kxZNcPu@No(d#q)=m#6SvZ*kb-Uk`kA zEdNSTkFfwY8GG|Qwdo^{YGuaz_U{jJ{Zk|y>(ckc87hL=hx>FUb8}qsx0SIT7ytSy z213dZDq9+{wavB8R;j1SBK6n#*Q2wPCs$+r!^6Y<{4DF3nU?O8?U99=TYZ#aM=8^o*Sslx@^d|T4aR?~31Z9RD9qaU%KzI!Z;obQ(=P_Ovb(00YycMPTgA!4Q# zetja6^(kcx+A;;X+}pNo!xJ#607PeJG>MhHJvF#3+A3R(7*QRxdneg*OfK}W)H+1- z-K)r>`5mm&ln?5%IDAzx;A*Uk5J7Z|c_BJhI5X4k{U%|YI>B!WB9l2^BAGanVk>QQ z%7395%genB7xi9%tb02^q$-#<80Nv<2&fY&*@wk)^%9-6E~Uf-@bqJh%t0>C#WCmU z_N!@+U+l~|*%kdt=E@9DI_sPj6!V6=D2! z`Zw07C`d;VQ3vepO-bjA&^-Kdk3ev~a1lKiZzcJ;vz^_`+i@l3l%<*O5NNsjK?VZL zP67M#{ZPTHU}12Os$qp8yZx?PLw(TxsjM}}1yZ_~{y346fPwg)M4+PmM&eHm$1nD# z>ZMU;%2(V8Hzx{0YCv7ZsTaM`TWjw!)<&42@7h)nxJc|x<-#?wr;A(aS7vU51f6_JMo(0KH6a=m2g(4hKz>MEC;z-m@8#(KglLL+=T6 zgt|$$sfc@a1}d?ofLEwUz0!Ua39(eSP}uHK3A)Rt4W2?f%{U zqQcm9q~*QJ2_?b7V?fD7$X=y5sQ9Um+lEzIw|J;6I@zD@B7Itg%f+0-@#`ga_uyeW z6xu2Xd3Yw+^kvn>$0%;y)+$O|#T{-F^*tcr0kQI~`gx4GbpG8an`xv)5CKI0I(2yC ziXjf594_jQwQDuFs#l`*yR<3)OMh4Btdr@t-eq%$Vz3d#Jf?Hvh?D%M{-wd5Iq#4D z%)(WqeXBp-=kGVTRPEP6P0hTbsT!k>)qvqzJ5t+ISVMinx!=2j#qS>G$}|gp;gURiN{qqJF$$z(9||Oo%I& zP#8Z^;jaE)zX-k3Q_OLl`6FieB}E!;73qF@b8QZE9D^Jnb*(+AR~C?zRGA+^}#Tb5ZhF%_(G z>Ls&4^9UQ{rrxl{udB;whCTb-_}oP}vvx!21t%)bhUkFcMhEI_)pn1eWY>uVyokMR znKVc1??H(_dKX2C(eB-PR$bc7U(Q+?0+WOlHtJ54G}hM#(Q_WIsPJd*0VCE`#Ll}- zuzi%gtD~ObX?^|eM58kDnEM3@Aih^DEiJR`urZp<>~KyaAKz4z-Z*VW_{w9sDjD;d zQ0bvpITYFMx!?n}`@-E!=KY0Z{e)wk`C@MmzD)gfXjEM|v=02s!nI}4nu8ae!N4It+b31t9$qQXERa+sDLO>@n@}uCJ)Ps!X~VhsW$sjPTF;9q@ZZ z$JG4`?S_(3@&^Y6U9Cdp=VZ)duz9ibtL8o}uHMy6njE3|0A(k)qt`OqxBEb?yYKak zA|ZlmNpxcihxBV7^%D+hBd3N8ZHg`v4!T;4_+k!r1oU2PCw+&go>Q$|*o`$E^IK}E znQQMxgP|bKcaL0n^h&&{y1n8!ksHZnfc@}M%9m>9*WDIaew-r9$Viq6iCEYTB=}0U z{R};-^FlKSqK_C~V#k!}xJ-OTsPu^1^hDD(#uaN}qHz#Zk4hvUY4vq0aK?v`oJIw* z@hhN88Ysa3Ax~asi%~qw%6r++C&JU*bJdZT)7~n1K_{X-z zv5|L~)_q@t!ctE_5A-k~H-uWB#huSIvM9bI+-BkW^fjeBVfX=_TDc!JO zwzZ&a8&P=O7CO5#&bJA4wCll_4IJNkp!p44JIlc3_Pba$H%OsYY&w%X9v7TP0W>$i2p>{f5E7!9yppe&Ab)Q_RgzYNVqm-?JOde=EJ7v88&qkY)^9 zh}^g;WSTGYOgR8lvL}LeZbNsr1xVZR!XjzV!G;i^PoF+P-Fbju5T-ATp(|P4G?@n( z2tnB>J|2AcV=B%v7+cVC8%3-8Z$-h~m5znwkm|@g)~S^u)4zZ2xnS*(fNzC0gy{by zfpE0gfXNl_-q&v-jYyI@dG7Vaqsnem?!TW-@oTd(%_MAK;Nrm78yev+wRLsB$k8Y# z3AFi=3=vMpZ$CezWbI(iFX?g3#&ix!H?cKH)2bATw)(_Zed0q4#|!7Fma{zsc7UKY z%rfQb)He2#_|#Ype~7MpMD)OfU|+FdX58XjMX`TVGl(m3cqwh=%eNpK(Em|4e_4#V z=~Nn8&wknNm$w;Zt(C@|rwNKA*b3gTEz1ET!yD!mo{ikKZA|!!8$?~wb1?d7>s?I7 zDmwx@j>$8@>f5Tg_>-Y0B`KJRsf}L1Mv|yiBvGX$H>M1WLlM8_PK(ply)K`OOxt;V zIhe}lTQXBeLRy83M7qG9Fe&}&PDIPFijaU-DVLOu5eHD`DQ6b*W2hZq@(;?v2eT}i z6aBsxcAv{BkCImT@{0cu_ouGQkr&9X^OK+H$|<4`*iwW?Kp`X+-AUKAhVQ7VQD>2f3ac#+d`Yxnyjg- zOPl}BJd;xQ@4t!h@jSp(6w)D(ITN3s9${zf@UhZ6bB2k8U+}WS7Q)_im*iUxnOpxn z!e|nFY7t0U(o>}F0=hoAqO3vVqxC!?eqU1*cMD{U9MC+P4H`AOH0sGs9s^;Vj9FvS zmD(zt+pgk#smYPfg24_QllYNqHAR<*7DLP$VtzRHKXXd`7NI)1*TKP|=dq!UKX&E| zL3-aUFZXhY%>9zYt%wE4Abj^b;Ac?PE9~s+>^Ub@W2ql4wpS<+7EB;<((Bi!SF*RM zmx;QQO^ot#KJQgNtI_er0{WKe>^pvg6F-=yt^uG20){C#Ld_XyNsCla*{gTfJ!S6M z39H3VIThV}%dR22#XQBcj6u(OAi0GQNI-gl<TjY1&}m8mdN*Ut(gRBGS4J zc$%GPb1j|=u{kCbDTKJ@XX{LsnMirnQxtcRlTDm1+JC5VN9}}g0h%hC{Oy|@?vu^z z>9o2~D5xx$&zy?Y)VL4Q1_%tZ51_P z>#_Z4V5;r@jNZWtkDTa++;Ii=AjT{+&Q$yeuf!eFs+_Z;PPe7f1bNbxdD-=)W{>W2O-z0*iDSoSTrLgc^ zXXtSv>=J2jRfHO^6Fs~1CZCnRwN&bfg%s1&#on4Q!f(ihuK!@3KxA)bEjdO4Ehr-e z-5G;Q0vG76SNt&?-`&UXAN=Bpw(73`~$E%BBtCn+4#?gE-q*-mh`CyS`Im|Ad3R<$V%WKA*Fhqj*6*Ms)sbgqJ~Pb ziz@{qjrauUJHk-hOqK@Ej=?+9dm~ZMSLjHY=bgieq}_*1EW<-N&U-Y<&kzwLkIo7&Z#X9 zs3=`n@VXb7&gI6>HIc4)N(?>nDZ1s5&=TY22SN$}B6YVM>j9+Ed zfFplKpmKFPWR}famI7sqIsNH+$ZN-F(z=(y{c1?BV-NaFAin=FA0JU?rMhn1CF*R( zntHz!OB$$Q?zg7+nIqVkMtnmH^V1WSYRri!k5*&pnI`l4$ybl@btUl2iSp9)(D(RG zTn;Siw)xiHd+_WkP;Gb0j+QzAqwpVg9A-oQ(vPgBRS_F%4ku$)vvO_knsh>r0AE`c zY1BIuAZQcy#}pP@OGf~||HXGd$7k z=BpNlpX8-*#0klE+~r>60qHtzov1P1yYN!yMU_+8#Irud13LwZ7;psFS0Gc?)Cn1U$Hc%cKme4M)!GMzC4{JB?DBrB+VZo!F2mO&PDv8| zOikK-Z}@Bm=@*bn>`Sp(cMsu^NXWW)*+~Ww=+7a@iU>#bo%OV)c)EH+$(yOw$~^Oa z{}WwBTopEeyFB}*?7^K$s`~&^0*v3>u0*mMdy-Dwqp9zz;(`HLU@h&EG z%fNWb>fqd{|H^kT?p2QsGRI*oxu!3^CpNg@mW(wiF;tp`$Zsc6hL*ge6`o4T*A?y= zcDg7O1?_-J$blFTp{}njT|0bQM~T3KR&$7m!Q_JTSlb0wY{wT1pLSm#S6V14DuTm? zBX#Hk2q7a;IRO#c`e(8jKkebJzE8)VD)0Bi&E#di??L{16_*e8um}jm4K-pB&7zd; zWV{ukgKlvph7z~3c$JXbEO4&#CYDXGdY!>@aXjDAde4jIA7NB*L|$+-z-LJqe11Id zf|7u*+(!R8<61L)*qe@}c)|JZQ6-Nd!|n&fJ|JcId|a9f+?pP&gdm9Exv7d_ZVU(Q z{+0$vO~?w>H4D#-*!W~jfoK9NP-BU~q1P67 zNQs9jPHXreY)N88uG&k49vo+(r2HISDu@6z4pV}t zcyKuQibyr{@cH}nlrRt)yLZ2e8ZBT=7$JU`6T+U{axYLgT! z8#;8o6^AD*PdMY&rk&DP_1Ku1lCq1GabFI5)h%3d(yPfugm#dkJOFKvndtirii-z~ zfy6CsRnla_#I@r@;9C;gA%Ii1MX=3>6=$qOW%G!_xJm~8n;{S>k4 zOF<@9XC8=L4cT!p$iLRxn>dLSa;XRyNOfj?x=|}7R2{g3nP3Lo+?v{2e^6%P7L#rX z^!qw<^SZP-s%NNxdy7t$L&ohvz9QH!<sNKPr82iQszo zxOOiXuj+)z1Q1t@SjPmFRuM-kvvm9arkG_6~wZu{mD=ZN` zrQY>zvNdGRRS5?cWZDI8dfaaEM8P!LPcXA%i%BtD(2fQVu6}gZcn4B2Z+G)iz;Pdt z8_YMkqcgiIhTY!5fi&%9csY2tOGILBt^{9dc|3w!$RUKNixer#TUIi4w?jqmSuHcT zZ1yQ?rWFNJ7e7XcHKX*VZi=W~iT>J}eYuVTi1>gY(!unRTHA|SRuiD}IOkkgazTt5 z&7Zdvl~)wVw&>xzFp?RAvZ;V~r_dZnCMLZTY^<6B%=>`ct)$vVwc(jKgFsV$>Pz8| zJG4(JTd#TbF3=I7jo9Po-Bp*rPeZ|)1&QMF<#aqp1aMGhMq+it3y3;+Rr6!Cl<4we zzhbp7w+9M6yYE~F#Gc#|`T}Tuq#OwM1Z6h(9p%95d zY$eh#!AI|XXw828`-sr$)T!{HGp4ahrX@(SDG^i!dun4FTY4911cfj)dwxZl1aI4` zuj@LS+83}pyt*X(Zcq4%RM7?E@KAhld^29NrI=xI~rPQz(Pd^p0@i9*5+r6`5gppPVGi5NL(tV`**NZ23L_f%Fk!5L;B zEPx)CFMY&`COR@-;+`(vW+4uTk79aeY#u$Al3hKeA~|XJ82|aOv*82t5i#b+Ysm=4R0*6d{NNY z<<}!;)ZWy`f*JDH^BHPIJ7OfRoeAWS5yx|QisQTm=N2KPuvQ#wl#8&BF$D+?$aEjM zZa4eWm-yqv#mln)^)CroxxvUFVGd{L^`@B6)9chyX!{~LbC?hBooEasD){c|_ZA9M zJ6@=|c^;$0yQ}_@TJ;4Xxa!Ns35OWm!Za6QAJS7_bj;p*t;xd0>(%=#AQeV|n-SOz z9cByx6@6NMt+E%uJ0PDGgwV1{--vhpoR0J0rqj!M+Wos7QKRwFB>Jphj}7(Z_vRo~ zMMy(xU$sz(p-^{#s+CiS{Inkoh}%`0&yca%+)Y~YQ@SK>3)oQ9-02@K$^y)G%0HkU zfBQq9YeS%Y zqO6rlSr@GJFMbV5TnP1)q=0u1y$5nznr#m8U?CP>OY~T{u-KOvHq`l7#BE6RQEB1$ z5qtQSr5hx-YZ)J&dt2iCZ?}GbsVA@pH~C-x5Kcqy0fn{-ky~y?cBp1@Ca_96m$5;s z(7T8=(=s1+i2L1HSJ=XvT(w`ssyUeP#c8)(5h5Ymt#B|k zZM8JO**T%CJES5++A+~&$w**gVd2-eS7_BUXxmIY-Q_TLNvuXFUl(S-&y=xNb2w-a zI!Us@^I^3N=E9zFBm~ytb=7*QhJ+YVY}xUQtoxFLIQ>T#?$C<(5&Pl1A|=W!*k3#8 zL?t3GJ%D-L7UPEF6;ix7d`Ic*7hS|_VM+v{QYLo@3|5S~gHi-y`XH+GOV#DUuD>w8 z_mWV+>I2wh=P#KAiG3A&U5U1C9j;bgLi)54IE53``TDDKS$uq{T37b$+ZPzOJo&;7 zV{I+2(IiLXxiaH92Ju4tyXVSM(D%X}jAMD3?)%|{D7bI5c#f^KTFS5tL?QB@o|@@h z&>*%Hj>Ij}V-}9+^jD4!m9tq5DUwY}U>}A|^ILrvEbqT1Wq~NP$I;HBdtR$0pb?Oi z!)zSX33Bb}%opGjO%h2C#{!5V*`_ls$cl6hQMS5ne4ry7^K71cErWxgr=&LbeFknW zG9?NWP$)zHB=ZJIexbwgnf4T17RH(=30Jx=eCjC+DVAxc9%z-Z7LitoxCXoMvzuoo zhuTDmvX}Ra7&ycuhK}#IYTUsw_e+r#_yZ3u5OwS9UYhhel6sqMZ}iws7X1h#5F2 z&WpQT_n6EuQV2Wf&HS=P zoXS1G3P}2S(6qHE;p!s(n?8&)~*Hy9XOt)5%sk-PJ2}~nsk6c#u zpZ-N)WETcBn-i31rgp1i{6&I<^=2WQr8R7;b-&XqbE?+;I2PhdJS>luh3@K=#l{tK zuOy2Z3=sAqV2%WeZ`laX%@|{)6000lB?EEYzPczU3^KL1k*>4FP1Cp~|kHn*nCuZMV9-9c>D!iI@ujA2u-64N(Rokx}if#kD6 z)`Iy46UO}r)^QTDLsze{$lrn@!+*OH16$^lXJ|6cwqAJbX0uvZL_O!!(?v`1 zR*WaBZY5FS2-zdXH33O@!V1Wh+KUxIGu`Naj(G_jIkTm>O5qN&4yYFj8POc=|!R*!5V{RARsn^r&A-4<} z5Rzp=^-Y)js#d4!CJfZ5}7Ii*k8(hR<2sO&z!#7)iK}!P zwJ#PGg>;0LY-M+1L!$Kw0A!Z#+PCl3{2$9{LFR4~LW+nTKq>&-rtJjjD=X)1P3UbD z;`-u7@`gnJ2awM?Z^2tX3WnlOa!!zry-#IV6t;YNu1;Jg9PFHV$VvMeAMB(oOi`~c zG$fo(;-q~{eO0fPaPiA0hY`XMvuKJ>Ej|O_LzFBMGKW&W<1Eggia|nLuP(e%uy*DE zcI6n!Ev7wgE+*7Kq^LK17N(-!2yZ|ytEoQH`E3~&2;dt-Btmw&+jC|1k5f5&r&Z1= zGRHg^6W+zGN`L=>pEG3b9x?-uACqY*lU_%1);-lBAq0UW(bzDxJ;H>tKQ+BH4^Uxu z1<5Kw%o(MSv3k7zWXTQ&O%*5t7t-6`2Ymef=XLQI0iH?x-fHrqbo-^xr=-&7-BsLN zmOaTmBO_NtA0Qca2yAtynfvdMAnR(UCRszqsyBvvdsvdkA)*KqQ^zmG!?p;*wcX!$ zG-sbtqu>0a5yQ?jM95_pCYP-wW)uJ|z>kPJ|7vshTZ4v37

v>^ck6qUIVo6Y&TmU9XtG( zvaI)e1rla0IyOszyec$Hnk}pRm%TPN99~GW1tf4lneJp`7*c7wH70Y4veDcbN>hnp z4`sI%3wZjEAMv$<89@rJ?|A)e&4YPOH2K*nb3hlMi;!sR^gfa8_7~;_mGvfSOY=B| z{|fuFVhy+O=MazD^wJ_bI$J=1#zm1S_+v&u&xWcxw+n5mZ@C>r?$CJKEs zhkGTrYNUEv-S6N|V^NNh_f3dW-r@o0w}Dg;bdaNHve|GCvO@O1R~gakY={rfN;|JPJMEYbkCL zlgdLYG&Zkbjhj#Vc-#J00hBB;-^%w1nHCv?gswVmJ!oN*V-l?|{7*TmVurqg4z>3f060Q8`sXbl)77Z zbj7za1@Z#@Th)G}lTGa8H6F;L&CE8MqT=AQHo(Z@c0cgiQ130M>E!#>H-mKlsqa^y zI1W|d8)X_8L^Qsk!Tx2kL=rm%lr`~cT7@QSsMJo!t!a4Gw_fUe>`HI^BCX-iVRrD$ zP}XWfLvq*`7GR=a{|?hL*QPsyOu2U0Xnhq)PJ3`#fnv0??d-Xg3yS(@pP$Zu+lIY6 z@oPbOs&F~Ppt=8rTmL#mJUaQ+G_HQq7^9%nawc!{ABrzov%~*6bknQ18FKAtcFnID zWENV*`R;kB8xMZAEJ4J2t=e~Oa6$4vSqFi_Kr57~#^d0?6}1c4sFO#9Z}7g!;JVYs)W(JNyT@Ie{-(aH_7C8<48tV;duOqdy*JSKnX!kX^hf4xr;a zXVa3B_NajRa7O^I$O<;=*KqFkso0a~%40vsz~iinXjX+ONVxv?svnu~&DnyAidxbv z#7S>271+K&AC#*gs$b#;c{LsA;UOtUIJkp0UBbDx2F@w$0Y=~Cq4;Dg-!aXWNyVtL zqu4hV+25^y4Sk1Fk59q1o~kDt0#M|leKPT51}X9xRf#Hy1I#SWt3b^HZg*dg?@2Am zKd(QWdH^I5r5X%}A6g26NBPr29b(y+Dze3aY3O59&TDHEl-oqnZ?9H-%KbTuPN1by zaX!y&qhE}Q!`13(c+@WErpbFv@pGMTz1p{T0pQ^pV)VgObJ8&;TRtySEk6E*Ro^u^p zRNWexHk- z`so>9gjgWuTx7ov%dbh*pM43~ekrKnEgG{?V1%b|nAjG}?H1_Fr;QzKu7jqY6IdAOEMvG+WFW;bVzMq&i}Uwy@GI zszqU-`@aJjAmvz@P260u*?#cS(BqKK|i9zrq=yYO!|YS+`{z*mK4A{L6g$=9|WksDc`qTq|K$0nNt915^9zAOBM8l~yl7emB zF)~)=1KDbs+)C~hA`|T9YmEoUEAOsz+L`9i6w@-kL_vL^LE5^jY<>u%^9)4ZgsF>A zz4-2Q@~hh6*(i9I+I=@|t9YDl?MAjfxQ;o2h8lFcD5z0jzK>j{|J9mQMsC5tMFz5- zV_R+w=ekqP$;yt}s9|LUo8*cd^cJR@($Ked$E;^US;34H%QwT703CKOQEH@pLlo$6 zNI=}?s`Vw{xh?3fkPg-qu18OgNjs?^&bvOU7rm@eX3)P{U{_)!N;wt1Vv`xbm^3?4 z=4}Al(jrIf=*P5IiqEzA8i$P6yM%~KQO|;~SG&Zd27i#h2&1u5#kC~(mrFh0rDFC; zIa0AG!+2z{0{9*>Z(F|ir#NynyzWz{u>kX82)Uz?YS)>5NPXTdl2xSCtdC6t2zd&=%iZs_+w`dks zpektZpmfjk_l*x)-T~Zv@M)YCKEB1bhJp!Wvex}i1+BPuMu3(EK)nTN9KkU)?u=bx zzZV9AHRY$F`;AXA63D$ASr2?g)jjRpSf`Ktf@jEfg<<4?0br6DU%fm2O-{O0UEuA3 z&m0~_=a0NaaFCu1-v5mS7oZk6nMn~IZ=Bq7D=mGo%)OckZAo2;DrNuNnfsvEVu$y)p}Sbt8|rs;#AQtyrJClhz%` zUt|&e`##Qg;x-p%?svIY=mE!^_J@S{kWL-YFjh_)NF9^bRm=K<5F@O|_+fpo&37)& zi}km*j*pQ;p=w3eRQxt~rw{1QcP(+1n)KGr{e30^V|pp({W1q81lod{KLEhBeH@dDM0_VRYK|~Peu8z&11nSe!l0H)hC&8Ju z)O$Aqn_8h!1LBSE2G1;3aHCTZIqwjX*Jozwf+8L~#!M>K7<~CIBS;MQq za<*b&9m>BQE9_>K8Y~1R7Nr}>8c%t><2ery9);o7KFRzR6i`*ht?}r>NJ3xmD)_Hu zR;N>v@8!*+SxiuRTfi6MU8N<$Sx1hK--91E2sAwAJZ1S_Xnm_H>%x#VEYsaZr421Q|LWGHD71{hLefFZv!P$GSb?5Jnb*~3F zgiHqX5s;6EB<~cJxO%faT(vRi)Q3Q-#z*=oTnw?XBQ}+xwFV?B@Nw9tlg{TacAxVj z>Ds#yHQhd-@#t!Zj5HbT!*&_hq~x796t1Z=4XKNvHV&5HJ9rit@x&(Nfeh-`D%9m; zNfk(%oT}bJmT={}^cEDVoe2d0vUdf)eX(iZd$e+uxNc1NkgM zKZ@a~TyOt6vikc?po@0 z*8=3$5Ncj`=y1u8-KSS&c6h+;5PCD7%s5WAsLqp<6gpM?T5+=|CEza(p8!Q@o*3xl zO0Qw)2z^@8Y^&wT{lfI60yPavFJ>I84VPjI2HnKivKl7=$I`aKOaW$ql0}liP zaf<|paSwm(7^N=T4$=t(>A+Td?LGCQ$pozRDZSR50)+4xKoZtw`v!1S<8#X}i$2S8 z7!5qIn4ii$OswrS){s~kG9JY7%Uyr0zS?0h$wYd$)tQhLwy$U~dDnm|J%{oms#Jqt88%X=71;Pk)MIqHMkdmn&ksEXGl zjp=qp5kT{h!l+ z_Zq9U9@uQ%6&5`Me#P7${K|3MS4=*C1t^c#upN+b;{bi+36^SZ>+!Xb5Tk{v$GUF3 zSj%8dTyOy=et)Zo=J{aN(Y@3g%cS-l91n@b*aMzvvh2mbEv_@awT_ylmTXBWTR-75CnFOWc z0oEDxzYW-x3T#7jqEYa7DT{k?c>&iR-TCgP8e{cd*PZM1uYS%$zN*?1R#gV|=Nt^A z>R&jjDl_h?zhcO!e*pUkxA?9qC_)`7>wA3XbJ>Oo6>!NfTi))^IW!*jQ~=YZV{_mq z`PKdcaN%wl;qo!BV;A)yW&XZi-4}2H7%oBoba&UvpPA!F(Ub&RUkr5Ety9!GZf%Sr z_pvi?+Jfu6Ttc~YEl6WmT{Ct`3iQj{W2A;p&*xI{KUi}v-4)_1jG|A*g^{$(+SF>b6c^#e8X`CtbRZq9 z;UY(HCayW-7S3E?6Srj5tg7U6W(+v7PQ@47#&iO{PxXz<+dv9UPizt4o&nZd@E`>@ zZFipoPDCg(vXZH`ewwxaT%ic3med0h1tTnL&JK6y;~O+7mg>QNR!g;Z$^P z0YEk&_bBr{Cz%kO-@q0V4*TLHOW1SQa_|D9Vmegxq&`2O=u&iE zG~$dnVYP_$*maHXU;h_B%r*ppr%vQ$9I;zFxH6T4j~PB^Pf-14$`{ne${t3c9a z{PhPTXt%61*=Tp0_m5tvBu?bO3!%}!;2~H6M#3ltBalaS!H`{R9bMn9e^rCx_PG#T z#p}8pCS6fKYEEeZov=M@(AHLmm5U!er&!IMn9Bgv3w951Jh}i~cW%ZP2XoKDy-C$u zNH#A_U_^{4VyJp^1uwG}Qq5x3bpj{cUsr$LUPb>@ZVm%LUv3;jR4|>h5hRbm44!)q z1=aQRJ9shmBlEp{nAslsPWRleNB7Ou}{& z)QnX*F^?P2l%v3WXI*BSuFKyqg4sSmM?g=Kwg?yGzeg@c=)jUxw;VyWTpDXrIb}&^ zpq`uJkImZKbQ*m30(S-Av^)IU9B!MKxzu|Ogq8eo_vC$P%(#S}^zg?U1vU;8{(l#* zuT3?(dDTP)aUapf{?nFZWK}AOvM(Y@mXxF+p5B|^OIjs6kRU=}CvO}Aee7KpV1L?~al|BV|pTZhAmb5BYHlBMexs9b5hAQcyxMN$!ItjR0!c-ld9@KD3; zKdz06?u?D9ngkhrXEeS6eKI`UW$inTDC33ySX>o+c;Z5g;p5?ov&sar5rrWxA~~BnU3uRd0f(_qzM7w_ zmarxYeZ4a#xTnu?6ChrahtO0%W%s)PGyz=vPAv6+g*5rturYl$rjzC69G{3Y9z{KZ zQ1UnSllBnFtIAZ6T8(Q{3J~CsnfvBvw2B?iL5+jiTAsjs;}6@>^?7SS;*U%M{n!6hS?bdz2vi2V) z@ou5XDu7t%-S+fU35z4^Hl}x`_?SzAmd~y~;z^i+irP`W!Fl!+GpzP64%TBOFaZ zTZWCUz98W9uy|ep4ACBErtNdDK$!clR{SJ7$BW>iWMJXc1_dQnFaPDD3>oL`Qg)(T zczwJfL0CKcFF)~^^@5EfS@K3PZD8R!wq4*!Z=Hh8SMEv{mIByV?yDj94*yGX-Q^*L zXFlu(Hg5WtTYq-);$qElex<|g_P6y{kN$#7Pac+~gba@XV+E=U7=Mqh&(4{V^3#Kw zb!|Wj2xOK?ar%T+3~XTkjZ3Jn$3<5Ek}eNVQ^VP4Bta8u^-%(d5z^ti_#GQPV}q+N z0(kwZC)5M9f&Wst&Y|}jJ)}Sd?;HO$gA#b{Q>L$5Z5q93QXkrJD#diC`-!2?n|H(32$@!INpLao4wsIEh*M0A(14ol|5x5%U(F@`M`cd;h zF)nN&L3CZ(SadZVSN!_12+Y#ZNLHB~>-N0#QfsPeDs;#XtklV&tb!cJk=4sY?$%5U zb8TDaPGEi9PpBd21RQ-=c|UMLJ+Y9JElVd?4P22nE=YyBMe&!E(jEj4DlYTY@;=#HS>sZ(7#YvUih2qRBptQ;Y9N9@1m9zK+(VTbyQ<*Tn8pB{+zG@^@_TtA~g24^)r%C z)F?ZW{4+O+b(h4``EiKLl-nZZy!i~ejp0m612l>bBF59xdI3}H5eXlb7?=w@()Iv zcoVrUjp<832}hysf_)0wk+pRv$2^!KI598y6*lPmrvS3%w@e=d8eN9!6)~-%$C;vH zNkc}HcD6|gW44~NL50!)`JJ?T1bhTCimlCENVvy863XoZoR65W!F#k)zc5L*aTpA# z{wbl<{S|sZUjpO=zYObn^;E~#XLimrTLG7I`2(6-)lh$~lJpB*?gpQrvwtB~M@lVO z+*E(!7~u8J5&zv4>)aIOvmZ-&eE)Kpkd>(qR?mi4c7%MyOhl(-A;A}h?;LWA|49Ds z75u{tTFoiveAA$GX~oiJ&94_Z3YOnC9@mG2)dbeR{&=IP6WXO8^!;1!B;KOpFWadW zv5((_GmK*NKX51_wDw~+9_MoRe7fuUa55{hEO))h?w>MrY;UsB*mE%Y|A~H%0$CLp zrrAl2oKq0MIPVYu1*&W>3NF(zKB^xu0?arkEu$BD z4I|YiurFLy_2T}A#JHt}lUHGnqPirdJJ@&;%Fla|@;`g}GlK-Q%vH%m)HW2eD?QLy zyU}T$u>q?-s$xZrC5B!NI(t&n;8$xr!M`Y;QQet>9BB+E zzFM8mw9=LChuweB+wb@PWYvo+c`qF%tK4!_$MP31H3iNT>qm`bKU{>veOIX$5BlFy zj9j8likV`Q%1l*rq44`&bS?#BXU(XLak>c{i}IEgIl(ZKw9#rmRxfM0ef}3*6SNtd z^w1U1N5WJN?&nE?x@J^wh6`k6MQRP(o&5*7g~7pDxq@5c1CN#7dPBAc_(-CZosh)C z)4KrghYmD@M$&j$?d{q+r%qa3h*5TDASHa{ac}TC0&djEyX%SHfdgT(%RHdE!|xMg z#DiQac(=@dwe(pReuNlkd0KBaE2QMYDv-uDLIK2Ji_wm_#aQv8t zkGp@Nmo4=t{`y`f?^1!If2MKp^@kY4WbE)y#3IG%7i8qLe*g+9+A_cf01^f4RmW~U zfnUHn6L6}K7MSDxr&-713T_%nDbQlH)vfV7*(*#w+P`ZcaU}nL^230Z@+9wtp8W-9 z$@s5)#LBN5PhBxEg07cz__Mim!_G#+iSqw{14xIv1shc!TT>oU5sPiXe;`XBWo_;G zZ5&y%#=N|Q{*XO)l{e^Zk8>kn^A3)`p;Xf#kDr(td3*+Gli1l{I5C&cu7HPZ>kkc* zp7!MX({*u{r&P2X2h0tJav^*wKE3%3L(u~&?IlnNai|_gdC+FWYsyfdfGWaT1MD6y z)c+pzfWB5D=rP~TmE@|#zy+&I`*DH|qDh?P0d2vDw@R<-@qy#j=ib%>y)aBa%6s5! z(67mrC3^;QEr`*6O)!(=Psv404$QeS`tzNAuKoNte*0`&f4wiubM~7Y_O$=Gv3&wM z6^8L1cn{c=@?+fg*+2XXwnHug^B=d*-_TpbwdDW7&RVnl&+t1ef1gMAQK@J=dV8%0%chn2S=sudRL&J2%Zf2iHH9kyM5Pz<`#h*1$Ypp*zbn` zJb>aHh_`Y6ZW(l(xCjOWNT6kS)qreMJ0^!KvHr<>Pzxh4RvS`2w?7rDqz{4yn!EYI zAL5nYq0avfzj&8;V|!g|?GPMkk6HL7c8v|8p~XVDT+s60!C6Nz7{=);$Z)>ACWK8b zi&w^|uP@=B$}%xih|y+0Y`yx}S^+2CFT%jzl0rY=WUD^1M2Gk8F;U{*e%Yb;>wqhG z1z@_nR3N1Lf@?CeYy^Z&N@y!hckJ_L<7kUb~4Re*D(SYIQI z#ZbHcMFN-`oZY?^H}pv=h9-CC`ftlTsqGa2kL{mRTTR~X#X;D=uin;>-`+!)b=dWs zc%aQQiJf_c+8?JY{)ubv?qvwydM^3_sA5bdQG)cqLgh*DGdPPK8G5KX|4}iw&-3=J zjWAKo|Iv3-ZHq=;n}avr0CQ>W-b)4xrfkv+U*H zz29%JaY{!q()YOG*x`pi$OrvL1UA|2%Kx1>qL&Cc3TXV`%7t z!nu7!1&4Mv#C{tQ-gtGySagc0M0+rJ-4y;+A3*8F!q-Ke}>ZOkp*?lbXKg87wb>|2MxslqMwXxB)MWFWOJ-d)m49W-;ZiV-v(&} zd{1X2xAwYZZYb`|e8W4?0%6k{e5Laz1*#CfF#GpwbHi(E?KapN)Sth14d^=El!Yyl zLqrAb{>?Yy1kQ{2dx66)8Zx;d>Ghlu>~;8Y;o|SU>(w3NM$b-&ZE@LW8_a6}1~Xr6 zFqSM&i2ft)>% zAe^7pyZevQ(r3Ky^=byNGW)FqS9TrnjaPf-0fpON@k?>DH32Rg*=&aaM@|-Oek*js zYaYT@;=*h3jnowaxs7DPFI#+dVsRQbmA$y(msIvHbb5y1ezVvDT@sF&L}= z4VOn0U5PbL*l*xAhBJ9Mo&5T_pU8j(g0!k;!FWvSzx}0xb7ndkGG52t&Hoo!)M~j# zs_Irvv$c0|d8c9o*Yx^dWK^>ff>h-CK~eRte@{>UeUa$&c^EQ0E(Nq8|F^$1P_uEdr6Z&v2DwD|6gaqhp{TQs7*`!)4~mV-L(*JMX$Boh9`E?=a4BLTkp z3$)T|mlr!WgZf8~rdeWQcq64?`7sB*>8G||pipq@ZpxJ`dG=e_d+R1H{W0HToG=Cx zaVJrm>@<-6JDEzFmagfU0+MCXVJI8;$XZDRFJ!1?vYO!B|lu^P@XejBS{4e1Y45VqEs_zmHjL}T5B-e5nC&0iJN0D`m29yWV3voQ>+ zx%nTnyTNH$Zes5Flfpi%_wnJ%X&FP{%>9)fKX{6eZY?STV1M}Xud)*t@+_A+vKtb~j`Er4SF z>Q7orV?&Z}+mEeHRLdetH^Y{C1XJtt*w}<<>~GgarU--c)PbJ>ot5h&)WC(8HO33_ zK##rv@V5?K%R2SAuKGM3b7&it-YeRKYrr)AB@f_oMF~;fzHu_`(3Ok!jwYfXv;siq z!zS%k^|GEFhB(eoE3E*Ga@-22XYYFq-k?g0I#@=`D(j-PseFf7CUgjxT(VJL&%WW; zV>qV4>ZA{qM&LV|4z!cqR^FymXEyC{l18)x2a|*SDGGRdJCLR;^c&!H521Qb1xM#L z&b5tE2n+hkh6VwL8lX4Fo_p1BZyeEh30^snqAoQtpeu@$o|uH)vyqcU^aCdkAb`C_ zrk4%L(HsEhdDB*EHB#tDomgR(Wk5|?9aHEHU3Lb>R85EnHAJx^kftLHCRkkqc2%}1 zmKYNkLsNZ9I6jrZ)K(fn4m4w89l$Azl1r&gZJ&s99gPj!+^_>Aq;r9@pN(d*%^}kp z%GAOGywZ98M>^5$|UEU(N>o46UHtTd*dw7v5jt0(3>AF(5RMj#e}91 zXDIY(W3GlS+XMf~AQ7~)*=mzRf<`jf+;FYz%oYw!xm~pshtStg%%i)o2Ur*la0Bc1 z1d=J;r(t6O#bAIY;fw^wcT0!TW@GP3;IfwrrImKu51w}b{)o~9uhgRju2$c@JstSZ zL~n3(LFka>W_d&UrZ`ooBO`duAavQJ$;+t%d)uutFuV{rB9OuhoR~r2eg2>Ft=F*i z`3usaHMkQ5-@FX?D(<%RfA!V>?LXYNv5ubZ;capYZ9CH$*ETLT5$x+tdw~bblBciA zv@`WdBGI%-+|PNFT1XXVbX&GB%hk0uF*A#;6@JDnz$Kk_8|$U_ z-IjU}xWR@Cis`GA=S`~K_m7_1cFNGJt!T|JJf~2YTg!RXS-M=43M221==DbTWkZRF z<9%cMFjdz-OmkdNf`|_4y_~{(AKl(_gg4<-Ag3GrraCymV)A(Wfk(Zi&xSN1-5pMW z;l$@WC!Y*?@Yp4qyJkp0$tvB{XIQ3sKCK^3k7#o}h&FUxWcGa|-&zx978PiXVla); z-k_g)R^A^&L6huW)t=YhJLxNe$Hr$tJbDCPmt`kmb4>{=n*kJ%a|uuy2)<7Dq0z9~ zNecG97&WiS%B~ez0kW6Us3Z=6BGJSPpF4z6pyow_QDuUQ!z zpDzi1j$YiQh*S8D6HHs=rHVCLENOH+Krcxd*p*S})FCXP7fv7)PhhB0D6%5T3gRzS ztsRhty2p2W0-qa~6(E1v-tY-dFx6s8JeEN?V%QnwO%8idNvT(sE6cz_&fobY2`bAu zzFu&ahhV*dH~!N2sraX7jU-kXx`aj$g(dQS z+6A+9RoY#J;8pM*4;;A;RN*yD++9q&?G>0A1ayU4r>`}Sz2MB=eCM9o62(QNdZC@j zT<^l-x&kR6Ec9=cvxh$RPs%dd2l^n5$zdM))y?dEtb^g{iW33_hqfxUeOW5k;}vUa z91U6L*?K$JBk%S8;`|2DK)L?1b0x?D%wWrejfqckZr=RV#2Bs`@fVwr2|U#OHoD}m=! zx%tMjDrGo1)9y?L%tLE~43zrYFgiPr2#cQCs_=W2o3SjzS5k$+H6RW!SEJ1Yj1e!n zY8T~<$m>v1Wvr46UZ7qgXSlufPn24l#t&77}gJ7*G`_Rg=*Rr^%s zpuE;l^k~La=mWVPbWFGDvSpd>V5}AI${;Kaq&-diwv89M@xlDdhz>K%QDwBt>$=n) zLSGd|wy|}8FM%L7NtXH8`8cX!OxSCDrwOInr#H2z(4``cz6k}UP+}!@Pcd`PZ z`hg?&1T)dD_xs8*mOcsUOl%T)+cL=e6N2+qa}}n0s$DlX_g!1(;3$nAWE7d%c&L6o zH#|+1S3NRKU({ximeIf7Ap}Wn^}*7~Xt;AmF%um(JN9U_l2diTvMi^qP$YRm+58<} zvW>hFj$NtnD34hsB-|2REA>(@ zV|u${X+7@w9N8;xrZElcKxAsvi_jHlE7Nn5#tznNJzSM)X$`l>>CE2T&e3Zc=;IAr zQ3)1UP76D&-Y+d%FzP^kDu!x?>M&%!EcsbD-1O>f4gRosn_}G94yBn?J2ff1 z9U2r+x6pxC6j@fiZi&!xsx4VCw5(Q2-Wn9sv00mU^PW<_AwA?}#UT8&OHEceY4PZk zx6foON#w-vjAPzKbzhF+l@_G#=xB&0v>>6vA+_N$qY@=G{;{e3*=>_T`y&+|6r1!1 zLW^N2Ladh3gxiycZLg^7)qA~5n~}D_?0LRTf30`v8fuf|4XXttUy5#h9h21lx_AC* zhMRt`s_2YuY?}qNp*=Cq-XGfTO;ggBK+>_3L*zAVbk)viS%OWvx_G8^o-RAEZn*=` ziPoiB$PZH>ZPCM2mrXJ{r_Q+35oo;=jY(=SHj~crO4hJ`h?LjTg{msGZ!e%;`RQM- zydFz#+@0MP+R&*TpNa7Zm%j>ZDk3>;I12JG0+63nay;3{Miqrg3AW|Sh^7ia>BGy| ziB{Vt!PfBeW&%qKu??EM*>uR`O@f(~C>;?Ijx>93`J6V%D~mz25KZWQ<5bb- zjanE^PiW@8YqUF2av?8*9PP?g`o?*p~sdVgltB;D~ zEov48){~`O`O%RA(2<6Mp!xAhWJDK36l}*1T5=Q#)+Vh?<&R*S(s?IRg{mJy|+=TY{)#wY!S0 zbfHjCy`sCKS6=IV{JV^1+qZ56o5x*;RQ$Utrd<|XrmENWD%pd3$&8%?#?X`TQ%g%W zI`IUjxoc&ZdWk&u$sc_rtF!>yo_y87)OEKDQkGDWS>;YCYOU44-7#MF{#Ysc>CCOv zdLqTs9fz0?~8`gkj0&#Fi0K;v8dp@NJ&Rt=mZJQ&u6C`4U!{8VSt zm|wd>crV3#i}OHPhj|Hp(=3BiQQT~b_q0cAzk68nlR9VU3vEufXwy@Vcv>fY8gjtX z;16kAOil};4xbrcv3!<(Ol(B4T|C24m%3}RZKwm<qYO2jy-t{K$Z#$J_d4*y$W! zBUXVMG+~}M?OQu08XWE-^X(IqU8xu_@eNurhJQCypOo(tB^$YEe zl<(*&eX0>H|7N+e$mWD{X^H0w%0$p?q_2Ym<1R+oEm?PLYBYt!)kT~ipbFwFE zHhl!>Z+DWDBYCnq%jx9Bem1jt_q8?iY8n==sVDM-9z0#GfC*|zD1g_O!w{v;nlQOT zh1ND{ZcdMM$~=!O`BX`1QE)p=0q&S1=6QjpPw$Gh@0&mMMaqJ^B+$y-wJ zZYP_eE+dDcXe3A1M>&xg^;EYGG1hG7oMK7YV3B%;yX$J^Wkb@&@55JuV(SBg+xFU7 z1?UxN1SAfu$Z8phT9_>;<2g-@7i2h(w;1QSK-sP5;)NxKR0Dgi3$iUsB*a2^iz;lE z?_GmDTQ(RmJB4SSlxV{ z0-2D7=GKFnm1xq`zT2i8IbmuNZS+>85R&za9qN49L=I^Glb%|1te3hKm3SOy znU#slVTtgUng~k1D4O0tXW$w89P&hq*?O5fF|a-Jw7()|Hp(%p;&|!`1JP7xi>dN# z1;%j3J$3QAda8p}q&kaErCaIt?Vbw9O5>Lf89VDJL`rnra9Eo>3BH3<^eBKTQYe$* zO+*nE!{cAG->?V=Oj&BlX zE8;$`jqp)g0G7N?dIhFCzTV=z2sCi9o7w2y97@41Hb)^ud?+sJi<1STwYw=|q1Y@hKJ-P-XGkWRMun(xI{Sju_D^jPFJB zdRq$(oar6`f^Fj7;X}!Wx(aJq^IZIDEvwWSqdfJ-oHz#|uXXk^F7F{!y0m(*i#{HjE$ z;gy<7Ip^@En1o3~O!`DX&XV zEJ}_|5OYpbkK65tdFhJnRdiZ#$iy2cr{ol~d`Y1dkE-da>Jq=^snQ}FYLaXQJE+E> zHAAl4lzg8olhf8>fTZ@aG>@rcEEjrOKjA95>GG^Jj0LxDu4)E`-pjEl^w2`*u*h+x zyX&`F;7XMho3EODb@W%<2&M2dq4Q&*{s$LnRD{WeBMODwyLqYTqx+MiB25*7kf^?w zI#pg-3RJsDOl!_}WZXY*OGY(WQ@0^sCkCnN-_t}Zim;UAzoCH&&}*--ndO(2PGs+p z&g1J{l!y#@sS+m<27t=EUPW%k_|rZ84ekuHK}kwlWZYFAv?yiE)=AU>smv{c%qYRt zRzS83L6>wObYf|BFMM|D+PXOC!TwS| z!@SU8#G-d0uaI6{t3(4ii~lBxobqMVcHW{v?h`#~g%go@qnh4AUK(|`WdehPfE=00 z#?-Om{tMGvxdCkUfd*njE%x>W3-j>YEt5Ki(R*FXmeU6SJLn#24&1>3Kz!P^ z`I$@y)cgkV%N@pknb*^E({}1T8L9iw$J!=&GAN{b1_xWW&o?1`s!u6V@v^xro(YbM zBAI)hA17(<`T$|GeaUJlajd9y5kULhlfrAEXmE`Ewe%inJ1c@H1#5wSSZe7o}PXgW#mH`u}zS=s5QZCD54^=m_>M+xJ0BF$$##L2mCn{S zcHuYg6BL;})yXu5QdTIWwlQJ#it5#9Yc@E;Ug?8TRd@mLalO}dtK3R;B+z+%kaz}J z|FpkfxV?9fJ-I2qzTjS}1*VRy(kR~oyw~VzzHR6%QB0u+*(~H5RDL2Mf`)n)W~1wH zFj8=9fh&6WIojtw-~5f3LpDNd%|$a~Ctw9KT9mWO@g8kl0qbilpWeS@j(FP8u^tb=y2mE9hzgjAtX4duyFl1d z)2(}heZ1_#FbVJCz8KXxonS$p&PXC!=z}5>KCAik)|(Q6t)IY@aL-9l>{6Ohn`&>k z($Z^XcjoLk_}6?WnM?;!IVdZ{+N(2L6wZayZf~#IL@v&i4XF-st5TMut$HUP%afN8 zW{FG_YmimS_kF6D6{w1#ti0L4Kx9rQe7DP#Q&-=MT|hgx^DIf0kGmUlV1Rz=GjpNb z0wGnGTPv;gKtwR^;P{x~8UW3edle_`rPZY8|j}vf26YHVuP(8~cFCHv^o> zg6OR1_|t&F0u1dDFuNj^*w@3aD^k&D(80#K(-qV@6+~U63t>g=p&E98u0k@(n)`0_ z^gKrwW>}v~`YRIRiF#?F6YesYJ~&TBBl@;3%-_mxS-{MwuX8;#Z686ueGEYP*Da=D zhdk}KG+f#9ixR?2(%`L|8nCf9hR+1*I;PBZ6FYqY_;#0z^YnyjqMh-+@dgnd7()Mq z1n+tYU~{+jutXuX$!OwcvwEl?n#ndIQ%k_=3+go8jfR1x16PUxi6N=PE@o3q&@<-N z6%>$wq?1N4fIiQz*{C99Gz=3e6PluM6$>*yUbNq2u1WzaJH*)WTtR%WJBpo(-pY6>Xt7Px ztbKhw^*Iw2A8kbYBayq`JP^L93A>Cg4n)9K(;>A{DxtJcYH;0hJL;xsUct;(XrgwH zZ$9t)OXgnW;^ZlriXpBQ@D9A+d8Up?!%|s zk7+4`v2JE_!pm9Rovkr~d~~7yx^A6|>af{XPmg!k1WF6)nVe+0sH9hB`R>?2hmCtj zX7jV|YlK0jGHgu#p(~Swi4Zp%+tHYvfy?dkp!z^A51mrA4uJ0$i|`E69$Zi93P4Xt zueLIqcijxP|1c@EHSV~WZ_&;q*)V$9JiOJ9dHW)7BBqV<uu84q)rrGPE8+2wAP&6}2IiZbu8 ziYDf+hdvUSmtJg&_!?-_augE=Gma_8rpkU8ZQi_0W3Y7+>H>A{b>ZA((IFT7G zeND9~o0D!4Tx>aPdWm%UgG~!gJ+C4*wA2^_qM}b$(?m;UtyNM0}W*;lkNmKKSgJ>nZn`>>}p$|%1oqLpghvbHjKxlo11!8HF~x( z)(X<3%v{Zc)35KS^o9Cbc+UWxy7I!w0| z$*cS3Bm@{q6g$ z`f617T(;nVOiODRl1#Ts(9DEjqdd?>sHJ>Na#X4 zWNRrg$ksKcsxeb>o~nA02nDy?o_Yy6S6?*jG}9v%o-|51I4KjDs6=*`Ycq;NuBS5j z)P)lJ!vpT^jR(V2A~zlox<-*-Xrd%e=V<4Fp1Cs>7y?&wMSU5@R3lhx|m zOext+m?3^zd8y56Q{VcD6q{&U8Rvb^wndC_A&25TlW5|Qr(4EJe`RzIRE*o2OY~eM zZ_2aU~7I|#q3GdzP{JQqts8Qj`Xvlsl(F4*I$41 znf7&|&xKY|>3O-S31?>atncUmYp`Y%K0d9!mH1SBn#o^jwL`g|Jvr&Q`-f>|8RGl3 z+7eCzt~%=mZMlCnx$Ehn#7j!pp1yq|qBCE4%Sow0kd4trmZQo$Dog z8cRNvDL(&%&ZvzXa@X@R)eNVU{F|(p3H7~i=RNrAzQMbs+Ye5zeF&_S?1mImVZ4PL^d> zEuRaa$rg60sa`((@ooM?>dH-_s8$MBIHx=hX3l0QEBh>b9 zihYDs%h*r9^`{FbO_6k6IT4x)i;5a37@%>QAHS)I=*j@#Cu3^5{Z2=eULbDaOWbWU z?u1S16ji7^O^*X4o7iU=b~0S{uA2c$yd)l!Ef~F}=XSu(Q1fbto@*jLL+ik6(|F4| z7ix<#4h7L(ImR^40rVN?t+u3YX*B$l#3;L+k-D#A-b3p9%OxHCrDN+_$0}vKBh@1v zH_BP9PrC;qHeP2xrAQv~s2g;WhtFm~^W2Fg$Y< zidXtFG^?6#QXM)D*Hi~%avb}76lG?5Y zgjweR=0`Upx!mXZMU+ZSA(gseZG85ibU&deUE=JfnJ9_eJKLBgyhL7}0@MbatLj4t z%cea2sy)%u7VG}QjX@0%7-~nY`<78(jiDA5VVyO9glx5!sa9&i`%VgmNb!@X1tH~3 z%L|B=`H6T-SXa)fQz}fSI9lMvaz$@MY63HT!0MuHO4_<`X|JC3(+%EP9kNO0S+eR^ zX88V`84OL7(x6nG4_0!ip5$@VRS4!>W;~^8 zSndHm!TNc+jBTpnt9?y|dQ>TA3VVw}Uo!KAKI+Wfs@bEu7e9YMW!L&zz@t$I>v zvvu`LRXounsrzm10M@=9vz=A%V5Uk4tgG!dapiYQgF}oR`gPkji4rM&?DJ+AcgRRpm>%BBqRDnCU z_|jnMrJrRdj6dqrnN%+yy;P1SCj7F&()$@YXvf^-=uao<+e`!^hoLM~xo#J_%N{%D zMMvRf40f6fHgYw&n2ZXZc^K2#xUToH7O)?;r%>&%%kDFWr4iemua|Am0u#=;w~uxi zhN@}GHR(gMw-@Z=YGp{2{H9PTp*^)Zt(y@yC{wK&xrJWctg#mMLUrwGi0(#LJHfmh zJ!Yoxv8wOzb9=o63K%{~UZD@~uQ$X~Bz7A@*!FN6z7g1JEj5#4)e3p zsTw*j-q+m$t;KSQE=f93+xSZwT9k9tT)MZXrJx!5QTlZjdb5byLZNu}#?e8CXVEiK znLV|&obQy9QT!_k96cGa18&=hOgm7(ZkDC>CaNtFu*KyU>QQEu>Qp8M1cy&WPpZ-T zPJI+-W(hiTA#U~Kz3K;l?N97ler)V8HTuF;#YF2VPj3&=t#oZI(N&=h)=Q#M!o5XZC`ySF-|$;%=Z`IhizT6j1# zn&x9CwqFQImH{^??LCH+yrBITm5@SR`+Mfx=F~^7=T?`sJ_&A>rZGo+@Q$nS*}A{l zRC;rrlA$!FFjae2f`^Tz;&j-d4*$p99o1s^7*#1RHKr}WB%S0#p(?NqUv-;xgRDZQ z49G!Hr%`swVwlT~X|&nSJB5_z5>)cI)C1j=^d6=34^_+U3aaY?M3Z`~w1qaqqe5uu z>%wR>Tzt5AF;><0dV08&g_BKzPP8$K5+|zmC20+DL~RC7r83dX3V4@_1V^5u2tVXI zvLx}OAL|k0hR~XcUC`EQj0;7bC?VSQOi5IN!tci#fJG#Xb{{HaRx^rL%dE8|t#F(1 zDUdFz;JdSqZH+AeZa$HGYt@M+&6u-Z6!s0g@a1 z5ADQQ6cr3-&+mVqePptLcNw^k4?_2B?haep*`0m&Br4u4emuZJ|K@~=){>EPVH%gx zL++t{W$v$lqpEkL2E1utMqm&n2l_|ZMScDul^dtE%wW0 z3d45l_8&ZAsU4dA|0+ z{zB=(K{1J_J#HN88oI!P2ZBDGy4(^yo-AedYB|gDVR8LS|3}s)(NgwjWCFTt5q7;Q z^%BMw?WT<>hcu|C+IW*2)RzdF{D}*rn>{Xy72a7msdUDybz@H_`CDi$V%wX!w~JLb zi;Fdm^-+k{Wuc)<$8Md(lPXAtr0(gH31inkqCW?0R~pV8gvb3**ZdXZEXpk#{WSC7 zma=y|;dN34uTi?wY9tJwC_j07-8)U3a?0UG{;j=UE^uAeTXNL?hgILs2xn7@hqU3} z^5kOX3@2gcf4O?0ogB~PB;F_L{k2+wnH7Y26#W+Jf)yG;?{4lX9+9N1yG(VZY;UW~ z<&Byy0v+Tbl@ql|u*Vl35lElDu?>UY^tT;&LK#pUZF}eC*lDUen}|mb_Bi<)zMA@Y z_!_zte?+_PqXR|&|>y4tKVi~W|Mzs$RXCT=d7d$MDX0lh{czN z^`^&sy|a7?&+BI7J|$)`x8*qeaWU#xt>sy-zJiC%-oUTdKyd`UI#R#}ceTf)r9d`fR@N3Ae@b^242#&MahzsypM7 zZ#cwUi$p~{mwZb?$pR2)VpHyIp~ISbH~Ld8N(CpU0y8P~m?dvohL3FBc(VgLUi1Xh6R2>drfybqS7F`0qS3N`{UKO5_)w5cpsi0 zg!t=I3HHgT)p^i)lqNl?O|cmh7jdUf6Rxsa^w7H#kO3{Xd#ueCuuXt7H;aI|S;c3} zJnTe*+sXE)zGxeQCIV!aOpru{(vmSToGIotec&TvMF8EuPqokt?)*xXv*LP?f@6pg zPc!D!Bl}mL**vYb-Me)yszpY~hFdRa5)zJNp~!H3dN=>5Lfm)-5AnoWPTue_C!FA~I= zWG1cyT{L%=h9VJC=U7vZ+;5Ld)(&Erj6w=LKN&V~xb6S6cjfO;esP-+*(ZvzW;ZE& zWQod>os!5lmSh|23>suBCLv`V#@--dB$TCQ$i9y(BU!SO>|_{Uy<^n%{`UR_@BDVI z>pbVo=RTj$eczwwdYtE*13ip4SwElvY@wo^Ce!^m8c^(-1& zJ)+Tv5PE}BA2(aei<~4s-V`m0;un9+8#+yfjW%-U(o0jeGuxPIS0remP|>k*(kiwS zw+m`KRJVF4DP!f|BAts0ITQ|e%}kh*L*iCnnc5*tE(9)Hk0=DqdLX#E0h!GAay+FO z{xcx1bT3#QH5d-;6m{!fuJP;wAw^JhIenR-!NPd+GDOiJ$;J3~2uGPTnWv)Q4T^SM zMj#gC+H>`Y0bXM}TKt`|%^k6C?}N<{pS!3;q-J7=MyhL+a$SPWP$OYtcmlIcP!xq_ z?zwe~J=L%<5Q(nkv`QXeg3mJz@<|E27iXfmf33!{4uEC+S&}U5)#$;+3kO2MY|xfr zA5Hi(=r+&xdSg0?OjL8@Dd{7s*)O|ZU{Id~${q}$VWEMu7f=)62g(#GPEH+#2Jszq z;D~?0s33zueFrY|@HOJfV>#wE6EiKB&s5&Fs~b)#HbnqY7g(W{v8XzxKxXy|Ee@0eZGm6gARwn+0_Yff5uK?>JSe z!8lnr+9zXSbOU*%7O>;=XskZBJ5*9iQ)&m%dz&y$ShVizF6zo|3spI(mmCIaUKNu? z$6JX~j_(=(O!4b?7M=PaX{P^Zmf@+KMqs<0C?R5CU!FrQ!o+Yjb8DAxVet!@OInk( zG-x}P8HXT;b%|l@-X^pVN&BwgwW)wwiauZ8;xqpt)xU2ejmloZj%>Od;~-Hbo}0q} zJHrAoo>di?11UkHDGug`o;DEzZqo)O3cA>gjmp7mHnHF$0O$`6;g!SjTbzc;Tr~@SC$1 zqnKi=;Dmh-3Ws*~)&cFlCk)nxi%~Jp$VH46_O)hx`=z*oI#CveIfb=aD0ro|wnv*e zPjJM$D`*>)%Sa}P8p9WFKy8VPr9g%!?>?&lWb+?^Rsyy$&8>0s492H3sl+`UXPIn{ zYPxmT?~`%LC1}~vE>|5)Jf%_8uO^}IPxYL;$3-UCNRj7R)ZjMr1G7$QdbMAW>kWn$ zG-g1EbK+1C^!TJbCL{r?+YcWrn7;dT^+sH5OdDt;`Z8HAeBV`WO&|)u{?P_te3=0ztlz3>5_BXysDymoB5 z)&HqDjd32L{4#Pk6en8fJAnY^G(cdDeILkgmwXMe22;Ys69#3nfs{Z2w*VuWodE`~ zgObq?WZhhO_9QCM61fV!_xGCK=9b)pM=2AZ>OZes?mVs9s=JGHg!_>hD`1GAZ*zwS z)N*-JM}0WKy$6ADaB`2NZcPdlG}_Pf5c62;jH&bKmQ{Y>?2P53MC%W=ZTEW0Vz*8R zzTIRzwi}G}oO(FkS?;5Trzd_POXrzWaN>)GsS zodn?+gNhX^q^qs{i?cZ9PXg1lf;8`aOi@CXHto-w4QXZNu5Wd)%LeP9hsciMQOPHH zeXyOdamN6^r{Wv|v?@nV+dx|bnh962Czxte5$nerV*5!fmxin#Tl{BKV_@UC;^kpX zCl2b6KmAtQ_Jj3F62mez=#P}6Ncc0Ld?j|T;03Y$l_$v$%yt!84Ti>(_in6xmp5W@ zp$BfyQ#d^tHQR*^b$+#)=^pHXuab6h^j3HJDikoOdT7=Cpj{%DFR4fV=>R*u+2$?;y41sz~g4_GTGzzF%JOJCdH(JNzF`oumV?u;Xe#PRi-}K7&)vU8nRf zA|}@$XpZl`9ySF_<877?r0Cr}cikRtINDtMBKlEGs#7AuV3UeM>}Xg>mi)ji6Fs!0 z0B2_XZpp`f!#balk`m@G)i0(Z*x=8OZIHHB-CpYLf`tyitugVoP&o>z&mGh%%~U%C znS)cH)%>`trCQNOmB|V1hL=$D97I-1G3cw70`6+f1pna=B5-k-^`+nMfa2%JF00sj z{cSJ^QYtf}d*$Wp!;?l;;c5TXBfggGuWRBKGp?((8fI{qVPQ4R6xvfUH5$kpSIbpFvQue;B~9?>?VV%X?iAF7#0}pU%99Y|k*d6Ruvg*CDVy}Wy2d~4 zv5OX`Lz19Bvy*v?I7Oa0NZkksrm^!iByQ#wo88FRv6hD#n&x5_7IvVmH?CJDPrR4Jk?K);6%a1PK`idYIm7bVi=>WCTkk7t2BiV z7nVFqn7u3@F-=%Ml1Ze=d9E>&JJ9FM?hjQQYtM)<+Pu?8=q>!~xg&}LzG-_GXoh{; zIb(hHKTi_*Ie7dqY2@M?VR<`65_fHsz>us%o9k#Jw=Z%7_hs=`QpbU?jLHA(~19hPxBj;_g4-}c zH)FX}aSmwyRvY`&ZPgSlU&S> zig28iu-chCOxgLNO|O%bKJUbjzfmb3$)DQx z?N8AXGReQgH`iS*D5xhyE2yD&;?0+@1-D;WJFq6qRcX>8b5E0f$T>sSqjzi_8P9N7 zSeC`ap;LBW3j}d<^A3j48~qQ59DkQDmE#=!U?nNUhXUCLA<1EjeP>|L;c(fk6dN3k z&sXYzy4E*wX9f$o^pxb?gK4>8@deE%y}xlKfP?JH6NW6Axt8)I^UeWgOJ$R-GCmX? z{rP6EFl$uv0NwDy{$7;H#q~H^GsbzRWirnSiqB$yfc2%NT9EZz9JDX?${e& z5zHEF9(Ji4Jv;B)=*`88C_ayO&nXcoxMt{#Wk1nxxUE_xj*X-u_}WC3Gx#9SJgWmP z7ebiJVag6~&jM*>-ACmPjSb?Y~Hjlmv2rDBqQ zM#eAbxmG6qsF>H@GUkqp@N>Tr<$V~AtKUBZGl$IO5#PYt^n=RY(fGC~N%4S=#E{no zAd@=zl+ELQM!o~C2D!e&~eWUd|B^V>vq?zDm9W{lq^HaQB6KHrO2h{nbQ-|;ZI`xKH1uoc z!o@t4aP1$m*zu6X&QemPN12KX(8^-#!6`WH9zK<*`8ikGGnH+l8t0>DlizJ zN3P!tm56vRw-C;i27GSFx-FKn9wRCc7M+FL^?BML7F@4g8?q%%92eVy%TC39$o9dy z`o!;pBVA%;mBsQzBi zh?<$vTC2*9Jq!``p^AfT3hhnvG~q0FPkXbrgJ-uLuz*c1)<5_@Bp2ED9RDT2s+hKjwd5HBjrSn7vOg8F(=>Wvh}Ad$L*XGMTzxfw9WbdTm3(P$4T;kr-j^pfB2E$ Xjl8OLK-&d475S&HW1@|@=@9ilHOdAW literal 0 HcmV?d00001 diff --git a/docs/assets/images/mii/llm-latency-sd-latency.png b/docs/assets/images/mii/llm-latency-sd-latency.png new file mode 100755 index 0000000000000000000000000000000000000000..0632f92db51b3897f01993c2514320a63fe2b3d4 GIT binary patch literal 52128 zcmd43cT`i`7B>na(z}qLAkvGJAP6WW3Q|Is-a}EUfOMoIy*B|1MIjUcDT0b9MXG{; z1nD9I0s>M(ui@L-L67I&`^I?RA8(9z#yJO)ti9Ik^Ec;QAx2wMnTq@jIROCy6+-2@ z4gmqBB>@4EKPeILPE^4mDe!~PLq}PjpuC4=9(Zxm?wZCm0)na}iUTWR;B|twx}GBN zb7Ep*dV2cPr%!!-eZb3W*RIj{)3AiF7#kZ84-W(H0RIR685tR&4WI@7fA#8B;1@^; z#Kgn|_!R$=K8PMz4}6MW3H);Z&3!wRopiD^um)H!n<5LWyzbr#EHiyPU}IzBP~Z>| z5s^`sQB+g}tX~QgS*c`kdl#8$mr^unR$78`}hV2$0w%VuWfDX__(pT zwY{@{2zNMa1{RauQ8D%)Ah_6s`;U&7hzLspNo&uynRWW$s={5{advI^jlu0hFX}2c1SVru+T9>1$ptbQxc^bj>r7 z0AuLKF*NkH1CB@XA$_)^akT%ukLVR?;1%4vEJsGDB$d+bc2)HXD@U()cxCQF-2qM{ z-!w>JMiWK6OzV)}OU3 zn+olAby)c=8Xw8$8xX?wZNsL}n~dkfJv#`M5MJg%IIX*uACfDX?byLRRA{{)C)q4oT`wlg4lLZCycFk6d1di2=z3(ekB} zNh^YW%8MeFf)TZr@T*1w&6V_Kj|`4!B3g|Dw2R9Injg#vp- zG?P5SPfDJGvY&}vbM!!l`si>nHezrdi>Kc0c)@ZEUQ0nNMoPMInZ10-_;+-b; zz8oj%_bt+K-@5fNikn@zv(N3BVV3X#EHfh2CDXvC+ho~wGtd7sLB2_LTQXl=?aes; z;r8~Yp>3lpvjnFFx0YgU1 z9Sj(C$`d89^RhkBb7-axGMLKiT*sq$906T#6A7j(0vi1eeKJc~xQwxaS6IwM_$v+- zdPKJcsOZ|4&Ja5cBt7{N53W<(law@KC?P6C_f1#N| z{vatTaN%)=b9$o-D|s`wh{05;f=2c228%)LZf#qMC=-K3$%V$s@^>tjf>`bGtc7O( z+7AN;E1jd4L>~8aD3qcmf6fj>d?A6H(;IjoY}H5pboGvXLk^mAF2Ffwt=xcEWbD=5 zZbjbSCEZa;R%+wOpMriD1lODdcN%@~r2LpWg9voNo@Xqms?S|Ou?Xh#80`%ZPiAkdZ?ozN(4POF9LQU(wTfb>X#9 z^}-C(B_X>hi!`qk_Z#!W>biS}wkFYJ66O9c{0ek>mi?uSb4o7+>n~*-p3l*Gj5!e8 zj!D;R9eg-i((Wm#iYYmaj5wBt2XM`JAffNyD$*wQr{bKXXh3EE05{FB!!G}&*%Yb_EQbYa8mB|?m zO|7Y*&DeL^dq$+(gI%QtGWAujR10oDpPaIFUe`v9yizRF zV>Oj~^5)xeZxgY0y~(8FsNk@zHYdzRyVqRYLvk`#mC=u6+uvHu8aEvBvz&r37aTL= z%7tO4U-_QR^L&)8RMLi3B$+wWOfh5oT#e%G(GM@RSbDclcS|x6Ly#mFA9R3X=9SA! z4DM;7zj{T|I)f2*DO?CY_CbNASla$60inRcg!8Oai;ow?ruf^k8U=&bL=Ch&65E#D z)87c*vD&^Zb0Z_#O(dOpF=s1(N#WX+W=(o0T?=6gvwBLQn3foU3y^FvbfW$}J)Iq- zszU5|*SE&yJB52DC%SC6yuY=)+qvcXaQ@L~0q2|W2j_D%JEq?Yo_l2aoWB26OlI-u zniHeGo12fEQsfTJo6$N=YH0_RJ6}Gwyx%x+*-C0O<3{~YG3huUX;W0OlxB*(Y~$eH zS)961>{g{)k*`f(Fz`8xW4T8Wbw*E7nA|M-+3O4g%yr|k_UxIs%Cy@ovC5C))YADy zAPkk`v39BkCSG@MobA?Q>bYs`4kS&@sZL&632YS9Bm^#>6p zMKy0-y=rdPnx*eDqOk$tC(}|dFgEwx97kMk{-)RNm7NvXs=$r>YV(${_r7N-MO}Th z3IJ4eOg^3LjwN?<@VHC-zySX1Q{2;%nzm4H*EiBJ$`V89wDB}&*{L2gN0XE)Jb@*s z*^eTS>f5iCq{Qqr5=QUWNL?4xk0{uysj!H#+dflJrTzwUuAJuOb04#}zL(?V`{ANi z=6$bznh0!6DHd>bQwY(tjq8u|c%EqJ({NnW=Q-mG&UY|2nAl zXYW`Zmu337IaJrH3RoMw>as4j*cTj3ajVGE;vGQ({X;xh50a;u9(?7Xx*pZnJ|01T!2iulbB^tn0vTP^Gcg0 zE%J8t0F(0z?(|ofoKF?B+Y4O$TI;EprUQ9YahzQOrR*b#oMrRKPfJ&`*O$%Q=ob$G0_d{h*CII(msx8){j9;2nC&s+#ly|(K{MO9TTOwmOGs8A(%)}n)oAQgHN)+Z|I|U1syv;G2D6mouxTi^X>5i!84T^oo}opA)gVdZw9hP z@&s!X+qpE@7Z**jY zI7dq~tZv2KZur#x)E5;v?*06cg-z+TDNHs*S>{pP=>ItX#?q*yr<@KX4 zO{-f3Znrpgr?+~TI-&LGrMqs8DT!CP@tt!(`cxTo4}Hbj4kKh#2NP1md~5u>p@0x1 z3%B!w6oeb1MB6g57K**44WoV6nj@GnUkl64ZdM|Ib5PQhRD~=4nXtx zS>fE|%ylx+*-Pl2D?I01%q;xdxbD6=?c=`Zg%~l5QIdIW;;yMPGva`}>B0OOPr(}l zA&MR#$)DqNsw=E9k1_V>x-H&s+r>EfW7dnL`?Y;u37`*v4kssw&!Ay+;jK(6Bek|) z=6aRcPZEe9W!oP<0u5o)sbF-VGw${*a_Gk`P#Vy?8g0t}CC^t(GKVZg5f(Swva{qG zr%YaIm$rG7a?hcRQ-y~7d?;YEiZ=dI=gxx^ym1Cau#GCwHAt@(mMk20cV3$6*(0y^ z&1jDvbMWAn)oQ`xK@HQesSsyeTbossai$YOGEY4Od~hmBCW+z zs#1M!Z>@HQnTY9c?*`SpY(X#l^zh!M$xA;IZR`v8$Cu1?N6WvyvZdtpIa6TL&ZK{N z(cLDNYQ`_o5#qDe9Lck!gh;>Re)Nlr$H?5vZ_)kMV}R{Kp^_@&(mQ287JmLPy>#~m zD1H?a+Vr-O_A5Lh_1G=L3>CMPwb4=p`d5|(U)$HS6iY^*12~q`Rma7e9(*coPp$o? zrjHm)9~+FH{4wa2IeM_}y4htoQ`SXA!Zb8;rNsQ}2o0=+z_R)<_JK=dR()xCXRf$( z-<~1_QsWlrd|5U-pp+-g5cIiFRJy+Lo7-hJ*7y8&nYKkL>K}1+_w?*JBGntKtTh&; zyfe~AV|gT~+c-T6E$u?H8V^i`#g|jO@_#(xU)Xpgn)S8L#iVj?cL(QzlTEh9BR7`3 z%PXUcrYTyV``%V_YgY+r_r2=+2{7nmou$PbGHCfy`I0o3l}| znYwOKWs5wqQ+>;qY^2^8rx>+d8T$`HVid+)PV1@73xB!k<#vN#E>>E@e9(0?@0{sC z$$rb;-YAiD<^WF#iab-GjTJ46!X>o?lsWqOkf_7+R?X8iW@i2`?@noIB1IO<>H8}( zQS;oh({4T%ubkfjVIjfZdYV=T8&Kd@WGp5uR*?Tj^=x;|fP3{F_iBz(hA#KPnPueZ zM8_eEE|bf_8>*t?5wGF1$m}F#wN2|(ATAr4<3v(m^G>fmyOn50@zt8v==GPpc$w71 zz23ZHTklIjB3w}~EJQl}t)OkW*?X_%d}&5FK*M6eI<8(B9P)nV*PY((xw|>lb5U2_ z!ooHG;VTmtCQTRpn}N(YB3|i_+%bAvS=}#qLyqleW;kPb3PL_w?jU1yv8{ghc~03x zM;o!T8L}TSL>x|(DZPu*_+sxaK0np-f_QZCo9~I_yo;n)4TjwJqAXdsybt% zRQcU1xQd6ohN9nOE>`KO21+gWzLcV~6!$_HR{Omv=0EQ;H>CDv_~U4^BvbU+D1pfz z28wXy|kKd&lj0#J~`EI zUJO|Lyx^$&XrPOG->KcRu=}OdE0^Kt3V>IZax8WkWmMmAk$T~w-)FTnWiilj4I80$ z<4WVQpql9TpuVn&pWU==l@!zVqmnYDx7)R6dm4qF(dvsQrbbeOZkP@7ILf>R0njJ7 zVb{~PDIuY^!!dvt-AZ$>ajK9|ywcp`$jszZffQ)_vE&|~dAt0Un`J%a#FhKxk0`Vd z86uf&)o-Rm&F|LN4t=p3aXCxTnt&(mS>k|N0g?XY`I2D-`e|WMLd)oL3obKD^SAe9 z3a382(o5UCb(E$sAK>A4w=HfQ5inl(oC#8cDVZmgHuPF6)VcAv81ofO?H8?5URaFi zQDoLUFn!Kas%X<$W#mshqoOLBN#`?XXr3cishS;3X8-oODGOrgz2*xyz8L|-Tpjhb zuEXaJWjl4ddg_Kfg-XS`A{A?(x8E*kBNH%BuMVQQ{H0;@<@Bgq#4n9PiZZeLug1ze zB77z_u7VJS(u>o}@3O4l8;N9KuP`>xs;VAH9*8vt znOj#%-M0wOzBx_nb?dn#67dq2Rr(M;n`!SooK2&x=aukVpbg7hOcR5ghIPV4SCZ5E zZhJ%&WespfPmj*{B+w5sPtAuMnsb?!7kN0CX$wpH)RxS41(v%AyS^x9i_rX_{LFzD zIlgvj4q}XS`NHL>gQ73d^-7pt4zP4wHhDB!+%HD&_LecXd`!P{FEh5U{Xoa-GE3RFmN&IuXGObX2=d=K z4LtsO^TXVjj>P@Kz24wvzdKxNI~g~c>CWSESSpWB>B zyPk))jrGxI=(2&?nTeZ$H`iYKJ~8)yPCuab)b~kH%8SAhHb~?vxs2Vzly&*LQ0Y+qiDp|cu|xvE<_8b$%#S}L;|Cho=+$VZE7~Dkf%h9Di4Tw?%v#0Yf)h<;5vjP7Q@O@z`s7K-lYw7R z@%B%Nh;dx#D1nCqpYr1m06x7>C*=8r0M|?J#BT`vdh0p`E8kV%t<;Nn(tux?|3BT% zUVCB!4hIG#g~1JtjZy*t5wfRoCw>lTu(?@fnamFG(E$>DrrXvt1!@EEGyfi>IMSWH zPNf0ChkND(_{`OY7!DpdFe^ij_^|pwt#NDJ@M|%OCo${Qj0);jr{<0}!gK)6{7&PT zd}7r~U@cbrAzCyb5_gy~J5G0J+E2?Ku*hYRibfJf0`fQ)fs>^@6gfmpjl>G`c;IL&pOO2kpC%G)JK0Y>ixobon&K?Nv~2j58Q{%JS5{kk7m zJ3onMp=Qs2TsM4@6Bu4_$+Cb9M>i|yjgl~RDW3thM03*ORGMJVnPnu~kF$W~B#`Rs zx*=5uSHC3TIXQk09E_z-)9@01-iUqf?4W1#_#BhbRd3u(0vcGnLX1EMBO7IA|XE?Gj5)(6c#!X{XD zSKK@2z;~Wx&_!Y*fKzfpKst>u{*Z^cNx2~D4OY_JLz2*}PS78s&+J2uK1W{=AD4TH^o z5m5FUZs?U7uMZhq<{3H204<{p4;dBZS>0O(Bfr;G{9c#5l-bOU$c!Ke+YH_P33aNNX8y*Ay`Ov$QCP0{Cq?sm~ zrIH)L+gcDz4O>|H$inmsw>L0{b6^2!u!*Hf$%h(+k2_xf2B8l+i`%_~C|-)Bpqndq z>^Ey7U_=lv+&JJkfXlw&J4;f?0(T>jY)aR-L^m49T6KLF*>lt9-ke!3`BvX#I&4Eyw8X)A zB03I?UaB&YvzrW&LQlF7N8J01>6)zYI&MSTRd4_<5(;TXTE8%CnDcjHa&>^o==b;kH*m|HbsJ(jp6)7#bSBDhILIn7Ym*`f7G|ved(&RIF)d`$!m3_pMRPFDL{SrdO^0R+9Nb)1A81b71(kcZeqA zOlsn8dXOI_%U*IGD%zBMY72g(b=1 zgQko{bzZQF@8Q1!ct6cHq&fV@3d{3@_s_VvcclG4BBw;>gQo8He>(Hn?P^Y<#D8d$ zpYlx_>~z#oGdyeLMiHzjS!tl1=4dahrCQ?Jm0nHT@TdDG9S~9K;dm)f5x}hc*bt}$ z;@`aV{)Hs_*!bdsLa)l7c*9ZuPFBX~0@S%z|v0y}z#LhIt-jTq0cgdGlS za_QyMMd|Yse;nBw)PH`gLQoZS-!KEC(u_dl?{LC{zgO!x6un@!{6WOudqp5+&$$gU z&wpj?n)+#p{~#pcLSoDN-W$HaWVb$hL+W1kh(X8=RIe7Z<9`Hcr+YupERg^3 zTZOn>U+eum`uH!2)AWc6U<69{s}+JNG8mXl7SK?7%XKzwngxhCnnR2cTVqhDoHX3zBiF*6y7hT$JbCX5t+x4{~}DH2`>wL}%2 z3;Jm4!vNWNtVKJ;d?J)*U^#}zXaqMz|N9B4AoedxTq);tP6i`M=;u#L1Jl3h@Z1#> zi{*}9&BI*=%|fBXm0T6VQ@h)sQ&567A$eyD(sRaAW%?0kbT=>Kz`XtmID3I~+=YWd zj^qy|mjR#u>PmFKEGKoVSLF;bK0lNsbI8fdZBmX9D4ZE$xpo`-{vA(~eOOHW#O({4 zvHfaJIWp)8J=M-ko_qBS7=kg#3#1gM11+d9g-1#gG7GbzJJ-Vju_u%U-K)Yl$4WHW zH@Hqp!aQ9ERN)e=gVCIUi7aq4-*YKC_j_+J zBUMs!j)TkucN1yjO-`9QQaYt89P^xIIog5+%B+`9h&Cj1$x`5BU18`TJWS!rqrW0_&o1)p$*Ti(tyv6}19Sg9!0qu}f)Equ1 z-XMdm5yjWu3SkdS&42Ke92PGEq;ovV$Xg99K3F+02JqwjQuo9Hk3HX5Ffx*6{q8gc z0xgD*qkO^Bd3%np2_rYi3Z>Ywn{kb+V~)D-=p?5hwMRQcY#411?9UcG0AVtKv?n+F zh@dCfOM=XwBiAy%Sx_Y1bqA?E5!_dG=K>3U;?#D$~k#8T59x95G_>uH=pO1^cHmG0|4alxKJ96r1njkgqOC zDAmG+AXR}5$a+O-`o=7F$3LDIyJLgZX+t*Cd0#%P*!5)aJc*0L%3!Ybqd-nf1htt0 z2qg)R!2tmB{{{xP?9?r9e+~-VUqs=c86QYu%MNzEH@5}JxVgDLmy4AHfgm|34_Na= zT`h}sYhwanfjHEP*NYN%dVK6!`;bqpFb^JMCCP&rD|v-YKAQi-&Mp?)?Bn%y4uXSL zT%d_P6@S{wlxu;5uPLDSDtWNCuWl;?lpkZ?Q#Fh>2T*g|3Jkd76l25P&{b#3MAg(9 zbbzr)&)KYG6g5aQFt!p23fo1WzLk76RTC5n@R-KZbqK6I`3neaRZ>wto|o%X;zhZ# zkA;NZ=MTkVfb!J6_-u&EIIN`~J=px&oFP zTz#J|5+w#whO3+A#~>ZS=VPEP1PW;8-$Xs+Z`ygYCc+i*@3$XMM?un^Z0HcN$B+MC z*Cll$!55^SWbR^jcQ0?M6>>dO#0ea5VCKHIa3a6Fl@);}AA%dkXccrsKzWA-YIsBj zvm(&a=j$Hax!TKsE%SLO=pJAd&i?EysNCX)2V7Gq%;t11B2}rq=?YX}`}B1P1NG0l z&7SXV8IhklxaL`GQ-E^fS_GSlqG;0t+xT2xBXlyq7VMn3y{wq#fhtC4hgt6d zw2;g?hkq&(MJHNG9oZ>M3`*gV<2};H2jT(;Pk<6u2Lc{Imwxx@N$6yePS0s{<=>QR zFj|J^YR+h>=|4{OB(GKh>a%G15IV_Og8Tp@RdmR|)&c=c>&YV#3#g^aLYp8OsB-e? z4S^Rcp`l(dy=DGrVIG#x{h!z($!TB<6TCAu?9V9hTT;SMk}ZD=w@{ly3CiB^0IuN_ z!u||`UnOyZ(vIBWCS_UJQ+KUvo}!Z>Id+c(3i+p`+aWW+jkphmT-h!7a~&*HJgzfX z?r^(082!DTH)!+3twPetQvf^E;I7GQ@3c?E?{dCMJ=OXb1u?Uv`7#UCG_ZqpMCu+a z4ydoF=A&W&DOr7sKAWoa_u7Z<;i5<91%6fWBX~u@mHbCPV+c486jyfVbW$V#=Il*F zV{lN3Lo*39)ujofwQ$3{e#IenXP5ja_m0xPL|Q+XnOl;*xX3aYqBLWl*n|Ny;Hr$UfNmM;U^Vf>G zXTha+TaefIj%}!T5T1OoIXSZJA0=+=B?TXF-d{^P+L5IGsh!0UbPxvA#u0dNBHbG7 zffB668e}^bYI>bzc$E!v(ALDxjlhQ4T%qNSXY=No_XfRHVKf#X2<3Ln+_m@NL>G?Q zch%e!0(@^Agyq)>2FQ2r$fH@04Iof%AxZ=p;*r{Zto_!L4Ag?@b_|dc@e&QN=2{(6 zSQb0iI33;y<$WhT+3RVJBLUZgJ5SQtkm_5&1-AgVRW|{4piDpdy?Me1Hf4lsu`kJP3qaJS7SCGx(~5z4mDY~x0Y-=uRqG( zyh_)8>EPjg)IQ!AQUDNOi(kpnMnp*BIDe8YZIlW&+P{<|j1kjGU5*2VcYX4%g^c*QlI2(DMbimUpG;0-sWIW&amW$?l4 z9lLe(ZaqO53Ck}bA1_2SEMQq^-sOQJ(Bz8zVTVF(H{(^w&q6BoSp`|3GS^ECWw+aN zSL)ezbsG|~6K`ITIKluPwj7REDejaD_*3X?LZXSzqIWM4QR?&(DkbuxfxBfFu$!n{ zZUt(DIe1F@k9C*%=P)ofDw=I7vkWKqraL z?8L~lo%X8ojo(p{LwKK)`nJj(4R9yUo@1%~%jQa%#$zir#VkFVrW)RFHf|j&O!}qtY%fa3v=V?>ekr)$m8>DM*T*Y)Y+-x@VcI= zgHJU@dScHv?YJJ1(g0t67@ojSH=l}4(P zJ$r%*7R_OTvUSB!6x?s+Z?>05kKc+vlBx&s+s9fXf?GqW<`05AlY3sPWH-)b zV{;S9Fv>cbbg9cxcq730s9ZKxSqVF)t`np#2;Ou)*8ayzz+93#@r?iXSosjQ>Ge46 zlaGypT;JUdt#AzzT5jWA#1}QUKt(8>j`W+Rm`}i~g4pSZj6?#p4w0LQissH<8kwbycvE>M2d2w<^9-L@v z#MlWJUU#z*cE(1YBDo*2xhJ&fIk;~~0$p-tAMu~TcXQ-HHFGXd!5V_6Y7duug=CMT zQsqwW9ZQ8P!JDZKl}9HT>q1a!!Q0pI(BUxK z%4+L((RfJ};2lGp>W!U3cpGFb9)_~Rhc0raBLaew8RAlFv8=CxKKiql^-Gf+ z?xa1-l?pY3Shk{O&a#Q_E8%ZxZ0vwGoiU^kN z@2}rw){z`aS*c$hBK}1T6(t! zQ;Zz(&#MB5d>E5kNyD5GT21)C;4s66)5*k1=-H^$(>H#6&JRC3KS5D}QQK64PTVxH zn;?TtQB<)$_s~3U9D%_(;pgJMG*++3>yY+WQaFnDIPV!?K`AcU-)piuBn_+Ztd+IMH(bIURvN5hEVrcjeZDrXdJ zD8ospE-A(phSp2etJ5^pnY$KI8Dwd^+KrZh`s!7;P3HOn+o+S24ObkD^mK8)geV0`UT6v~l? z0@l!Yf|?7sqQ6dJ^7kfk+VNWDG|GQ6ztJ{8&=?i$Lq=YNV#tlDIG#T2kVH_`ifk7A zRnDb_;u$+(zF!h#l0@OS9u&}l!^MoB>#Yv&F(!UI_(Z4Dg#}WCv`U@llg>7exWHf)Og1e$d zxLwNW8@Rxx-uzRL%nWQcX7NzG(SKoYWs5O!SV!_7oc~?YcVgF?U;Qj32*$`6FZKL> z!fnZ>Va=Dpvy?>_=HKHtu()Cs;3Mm29}w8~@cTi?Qnb+Kzm7*TShlXa88H7C`hpeV zmOTwZ3HtmYweS>4N{l3yHU9o#K(Rc9%9RAXoNxeqhYBhUZ|XzQ*%loVJiSbd@2^8_ zKrr}Gyr(@>g0j{Lus{B0OXWZQ3sw%r6=IE_ruSiP55$BEVybn9`BIc##X|4l1D-WL z;5}>~d3zH2EMz?iKz+vn3I{1Sv@s+{LkVl5{A2!!v+?wn;R!rfhd;dmzg}Vy4@#SB zlJ~OJlX?uN>*H;iw|3e-P=0wp$y#n?jj^sL^KY9HCM?#nfWE0wT+KI0yVj4eFSK z&Pg-3=dhbVcUS9g*!^@iS78a>@tvmPl(I+HjYoIny7|`)YAB(Vxa;XCH!ukX7Q${$ zTrf)UJa#&;Ua<*G|8BK*IF9vQSX|sMx)OX4n2@|6p9k$htv?-J5Yv=q-o|}#2knG6 zGp=ABE?LN5Kvj9#C)F^Be(Qx(1NAxd60cg48rnOcZ;h0rfoV*X*$xOzA4D9Wp!C@C z4!PN_KDaa}@y$Z@DgdW zY*|qpbaP<#+3cg*T!!VsI-s86!^fZ+I!*PKJC_!fa-PHE? z|CA@c%c;*sTSS!86#uiZOSm=1sRPG;Gj_25OOb!}wV!^if;jV6;dCD1XNdR1i%rSj zDF2br-yhUQ!Litij}u3hKJ!V9uG#PpTuYY!xY|F$0KUP7yj`M2emb@BPT3a8+&yO2D-*UeJP(CLxcYn+tX#~B>gN%<{? zWVyc6Y~d9*4kDuHODpu$JFs%Zw84{UI{X~V}k z@$Hfo_|ZW>^mjcqpU>JxKL3s|(&ev0^QVJ+2nOg1dV3&zzy<^=$soAHT1}>rk~{Gt z;qkaxE&HCuR8aaJp6Jh)+jlFuAV1Y@}Cc^++bYpwxgT>bdyoV3{daikyNi8 znz|f~3%Dw5znU}oryl-D+M8Xj|=9Yhv^al%bgy z5&nOC$!N@n<{kO^zo|t}DeGynrE20k3k+%UEzDElr3w*yNfuCK@!5Yo z=1Hb8oM78W6>$aMFi>&u-r3zPchfW+re4u^0p zzfiK64RtS1jIJF2%t?-Ud|YOa*!>>m?4@GW!l9=^bD{ZtCF9Xv8ymmXo{-Mthp1A# zth5;o%a|=?%o)s4PdEa<<0^S(yhoG6J>0L+P!&tQjzwYja0tMl`t@=UT1Hz)Ri@;l0QgnW3gPdDi#bRvXM^nWCTU7T(?RJ z)K4PQMWdktHn zSQ#tKH7|$5#AmOK%|r7e16ru-@7Q0HpbV93N=8ZkQwB_l z-8t?x(kScQm<-REbrMzB0eXVRjQ-PGIp?y8lerdCkf_T3eSfd3spTE`>?UPcCS~~h z44J9edo6w$tNzzU;Z=|z%^9}wv@bEB-~MjP`NrYM@O`nP!2hb6)???ZDZXR;r<=w= zYa9~hmqCjZ%yo-;(xMDFGzfJN=x)4IJh>Y_NfggJ<`*)VtDEXc1Um8WUYr;$&=f;g zhZEQwIux%8Q8w1`;=65sI)Kb2AOWwIw5_FoC8I9jn+Z!!fUHCRj!$QRF^&&jR^=qVwKzs)v@2LT`mmHRxQ{y>ARI38m$8+c~QUHkvb2A7TA;{o;5#| zKZ8el)jI^!x6leJxf{CADk?T~`Evu*j$7a;2WxvaAFb++wcXbM&IFrHUBR1#?WL%q zZ)$+cUBR=<_la@o6K&ABHo!{ps}g|YZ7(#hR=q}9;3L2&DQ}h_Dh1C?H*S+flomoi zAPAUN;4w}ifz34J9?FTul^q{gWJ;3NO+?@5hCVZoIZ(wsR)z|D*=V2)5(3CEGJ&4R-<_M{sTz+7j4 zPmPojyIHYhG$|sQtb*;SI-P7OBHSmmMzpfR-J^!S4C>a8cjFtddJdcdDLfIOjO-lO zjX5ttM0t;f)=E#bqm^`;P{;FqB2d4AA>e-;CH;s^>GmmTR+sgoy zOeh~vZvND`LQeeTUNaz`!|>Y4m2f(ilD0~YY?YX1gOT%t!=1&C!wG=Uc8o9Vxsbuw zT?WaWAd3q*yE8xQfx%5L2!kT8;#B!I7hbq}FWKsXW;EeREk5hK`3b1Qs&l*mK_4s7 zny(WBV|Pk;Z(pQT9+m+r1`O%r$^Ifg(RzcnIsA|yj6t+-W95Lag<$%|LG*o;G#+Lk zs5vQNTc7=LlejS4B#tT+XB$4QqBnWMa}R9zp?|KHj`GVs?_Ro-03Ny<~I>p*pw9nzTFuEl! zrdQ+~cYsPKsRFtl^w0V1|0N=WVKSJi;0;9vkI$32BmNJdR;zbMnR&VKlh&= z@-adsa&A(-Xc)Sq&^_5I+$|WXd#9=!7-fQ!#{d1;De}iaMbCl7W*)7Pgepm8OvS;m zT}ac2G}XVt6}LI)>GXVdpo?3;!zwe6xN4BY{qaX<;fB3S8Za z!e;VsV2sx3p4B|R_KD5VaW26(%YoNr&e-A4#`sQCbfKt^=v3tQN%xSd9opq6CFnoS z^4;KQi|r`7?Qj~74dYzEd;4oB&nH5quL(55XalL1+=d6uiKo2|L`lx?(nUqm24CD{ z;7zt2jaKd~W6&*F|0C3w_*70G^^KKlgf1-{UVU0pZc|wve_HhWtF56FSfNPjBTH9Z z)qBAQXO+FqfKfXL%^7VhBjs9HY|UgHbBhVie(AoHjXFgD_F)^tZ7uop$%W~E?tkGs z;x;h~E3d-XeO2H0hU+A+J%rQe;RROJ6i&47{;O#@BbXCm8{QWO1`_x@bE}|SyUJrL zCua3lBWpUZ9ReG^{wxN#&f7-HCnZCTVxW8}L{LQPu z_*ay1zR6aRL--OO@9>`so_Lg%A+=SWec*Imlx8e90&j~aPcTsbDwf-8O`+l{Han0> z-|7Q{gWWovV!Xf_NiH_^DB9;>kQn=Y41!r?_KUyh4>MJN-yY-T|G|iNBA!V8;~%9Q zx0W{or-`C_183E7dDWl#L?|%1xs=2s^=gZ4KHy-1c#Omm?u+-&F~n^kzc*TTRHI1A}I`i~bK_tKdyf8n#9GC9!+>f4AUPV9SKGV1bz{#_@? zyn_BIXy8u@AT90rTBXay1Up?4nJDeHVc%{e;8ec=$Y1XF)!}2HU$HWV)vP zT}e6q1PAqPvXnMBRw(#aOqkP>15vywp5^%|$k{d5u+FY+2aejHbEL#y`g`0PTM0oa z;yP2Lpoj93bNa4x<|SPNg_sB>1}4#Ku18C3?AkxcUi`lJ=N;kZh3!ABw~ zf*zQU-vY4%8(;iT?k*X>p&Kx-IpU5W8zP48K2#i~FkPyS-`IDdk`d0w0gCw+{l=dKtM)!vyrou_FKU$~$&88=CKouMSIT}HM_wTiAV4ji3pVQQXWX!jZ+UrnxFLM1&;1f;s5#$4cK+7*;t83R&22b zv#LC2PTOvanlbDZNcVwO&4XZqE_0ubM0kfSi5}aibMI*DXpe1V@2GJ4X1u?H*ttPD z98}3VcPn|9qpWYFEb2&1_u2!=h08p@Bp)sk*sSAjTw@=K0fT)K5HXmXv1#5hQG%ph zu!~)$0lvSnk>}o&BMEn zW56w-M}gxtq7TvMfcX5MZo(eH?i%!eB09*}~~u+WyHe7WAo zzXW3F`TjP2zj#<-@H8qV=*EZ5{`Uf2Qrd029%hxe9slY51GmZD`f2hDsZLEfkjiBV z$iW-#F#1doa0XK^OcZvrSl=&VyqR<7Y9`*ZF;hQ}=6Zdi#e*6e8m3>=AM&AZvELQ* z52r6ZSl|7Rq{2B$3M+h`FDlgoW0TW9PUDJ4;Y1J?V(7d$4S<$9r^$)pb@6@{;C6t# zBFa{l+Cf_&;}G3e)IH8Hf2~vI))ET5){Dm&;#5ui!0q$JyEVd`Yp2LkQUC)6Pp&}0l5aVy(goJA_uhBkJh%!04F+ZY2~zDhyOWsKo4ZId zHOAWZRoa z-#l!p&#qr&60H0>&ZGa-=As&yYvg>Y-@ni45gn6743MS4^$hjkp6}BUL@#WOtvOnH zUR|B>bwIdVGzc=HeeU9__>J56o{B*3)0vhMIhDj?TN-t04i%Cqwr^$72&sBg7T+s? zwk4M-ab5y53ahH?ntgE=bztDpZFRZ{xFMEgPPBUevVn>8U6bJ}a?KX`BP9`lqX`G7=XiqjYbp{zpk{xpZl=DPx=<>{ncc`6rzp zDX$P;0-P26LDfZ}{bkMw6e$4FQfh&w==h@9?@|{XEM(-bAq8W<5y8cHB78?bZg*XC z_c7!nTt_`cK9>H59`eIyVX&Tky2C^h)j9v!8!(dhb5Vb8Ql_pqQ-jjv)OTweRSI!y z2JVy*Gaq5tg2)$6fhE2|6$vHqC~(pKcj4K0VCp!4M~l_xP`um>dA2`G0R8Gb9 zS?ze+blkS@eNG1<^+pAD&OZNV_YVX%_^eRn&1MCFQ+!iGVWR*D+)ET5ZQR&U14JO` z`jJ0cE5ks2Sqjz}NIf3{Gc#+R2vuaK!^Dk{NuY;!yz;c1j8k}3D1I^6kQF@zN|F6a zKag;@SWiO#>1=%@wK)sga-Wfx&>bBS(66Ph+noWlP7FspfMqmLiEw%0OLn~j;`0Hf zG+Da?paCYm+Qqd>!@yw9nkNa6GM|hbOY^il?uZ5bat_CI0pE4eKVP)rh*!r(A;26f zFaZ0#>SEKd0*I|@!L}`w7%UHH#~lF6j{;!SI5R##{caYVOY$;|VDP()XQ>Xy619eI z`xb2BCC=ARtmQ zNSDNbk}4$~Lr6<^*Z0f}y6&#J`@Z}BzyEh#dvUGG^PF?;`*;8DIOp6CTkC-ZDg|lO z60QM?lbmQgeo+9T$?#M&)N;P!@FZ3)P|6iXzi$`$KH8anl+f4ZQR`I5LzOS>P-KDA zo($-dR=N!pbABzpoV~02`Ndfjj|<)Q1B4erD~zi8g$`q)#7h!~ti=*zpcf}>0W7X} z_uhEmahdb$d(KDe;`?#`hMtRC7ciF`f+PMp|7!Bv$0&Q~xl@r6jh>ehYHd=-Yf}c+ zR^EV%U;pUZljAdLDZ7SMPu6OC+d@Lhk7*}wGtTxVx`svZQWmMQ^Fk{=fOVh`K0jvm zvgByR?VK1*QaX>O86`?2HW`oAmx)v#;?Scj1i1g?gl3b@{nEq$l=co&B%LdMP~gp&aMBx=9IC>a@bQpk8( za03v4-M`XIZbw5IpcU>&ih@FD&9r$fcqL)lfB80z^}any(PnllJXcUdCU<2>02ar1 z`r`HR#8TMfaL40ZyQU7$i&PD`L}$5Ogg8OtkF0ES5u=}ZlSqYw!QJQvpTg<1_?$u9_8HwVA5k zJz;bmExOlVfKr#^eWADaOX$89a=%eVh-~f(8ejP@sgR7z{PvXy2)H-${rPHtR1tOS z`cd~e;##zj48=u|Tx9h?=>`aSX8ZUwc6$Gn18f+OpM(QfM|@GirBIze6hK|$l;V{- z(&kHtq)f+kcwg=otd$W0{m2oPjiz>F9b-KnON(-yUBIG7q2OL%SXHl#II1rPNHdaoc!Tcg_M6yLhZRl|?)QJpH<7HIhT9 zTG(^wH8fD}t^@cLjLyL3#mcyF%w-LPUYqM{!g7~H^hS>^w(eUaRV{E}`n^DXEP|gD zp4<11{Caf%d{T;)fYNCcDuni$MvgXey$%u*qC{%?y$%j=og+jJtDIEZ(8h`uiu$Xn zPdY7>@?^&a{|n5XQ0J-J_$X;CviP1q+6B316Y9`CgSN8RCYV0f=Se8qdHno+emB!A(nPu~DAM9Uwa&CkByGQ@TKowMm-VbPkW^A{k#c{+s_}Itf zw>QuDd3vvC|Kgz}yvzGxl+w1TN>#fQDtREj@w@&!`!BZ~zSF--M()z`7AI}yDWnF! z@)^|7C67vjbO7fsGAy8ykev3?Oz{%L%P&ual0-veF#_gyh2A3`=WAXkvs)rCY|nT*}uAKUry?8Ri-IDqcM zme2Rlt`y=-tlu^A76Vp&v;fp|1%EvIP(zn@CCq4G1Lp&k`ME@1OW|@OOSvn?->JFl z18{Pm4_&Dg;6T?{hWc+k$fwZTU(q|PIhPj5nTRfuJ-KuM@Ux&mnABJH;^fH7Jd5&K zt?a;#-U?8Da*4#pozwjBN@(Jy&WKztev0Hqh#BY&G&^ zeU{iA(5u&8ct%QanKjbIFjuhmHNyv;JEY*e*HfFE1C23akU;~VhUp4 zan#o&f@T1^yx?c<1!N36+*eNH*;#IwzuGSv3n4VxKUnewwMqWyp|3&O@iyNv@BRMH z!-Xm6ZQ#mMEjlbkxc~-fH(sPvj<*&5Nc4XS!v5ep{*wnO4EvG6IN~@qL)J|H#Ee39 z1;|jc6zwlj^8{D*YqYqI^EPz&_9?Hb%4U zpotf4Zpfbk(jA4zyW&7`YI}Mko#Loj7qu(;i?Q$DSxXuScqrbtLDGQQnEcf&VmsCX zmXFpaUV@LK9WmB;WL80j<_YA_Z$Yc?I>pNXrkAmYa(3eUcznk(8FjQAGCaJ1Hb>;o zZ)v62`4#c)Qs5Gc1T)HC{X(SNFY)B-f`H?pp|vM!)dp!41%`ali7N8vSG~Q!2Zr`| zpmc`#kTMe$vixdoPh_+W9_0V6xV|yQuM}{f=IwUp2^ti;|7samswd#6O`u;*EYzM~ z>9@dThOMy%4oPH&@&j$o@YK9kvGl6jIJnq$M(T*6W4xPDQ-At<5WoUhxdpAL@RR(b(rc;D_M##x39IFOG&5b-+c-kN-@ z3AE$!>bgNji{Dg5rn7<+={aQV@SgaSkm7^I*W=VzMR2aZb>U@(Jkc`!lWorMq-Pqt zk3GG05@DV`AX6GYTRso;Dn)Lq8)JU~u4Y2i$9$5qd`D(29nKQBiBIC#H6hFA-y0;n zfjbJUAiO|(QOrBY3=cg8R5>3#1jm0lsah7;gQg7oa_ZsU$dO3T= z@iAtSQ*OXb%PcM#GX3jR04Uh5{fU6qNds=a&}ijqdP3bV=cMB-&4Ee`uAW^7ZlbF1 z$rzOYr)8AN#0Iba*(7uGOP5Fd@p`<@Iisac{GHrpX4}NGfzxL{8M9Lr=!3v|>d`ja z`4Thqk}#{rvjIfq%ki&;Rr^a z7?@76_j_aVXEM{JY2p8{MSjv7=-k$+D3uq8hemT263q_+Pr`xg3cf{B%{Rm<%k1AI(k^4_=-Q)7y1%+`%CX<$3{mabl>QT<%VX#&g~phd^hrF4tR^ z8$X6*&AJK=2C&Z!JyY)S4abatzvYr3b`vwkKfGXO7_I|gN%DtqlsH_i?^TZr!@$Z3 zKrQ6p6R;cNL1rDg_MXG@`nPw!ZQLHY3WY0^w~jxFKR1r2?|r~Y)aV7-I7eI>+}C!@ zk$=IP1aFPU%4xk7KOJ*J5`Y1>j-Wq!HKZX|{k$0GDfTdNOwM$>L;LMdRQiL0h9aj> z(cWJHLDrA4R0glMSW-zfl3fF=Zh8$iv@Im|o_Rz~tBkmu8i>yBvlhaTeul4~a356s^1@I{Y=)9Nb59X=A?v^R&FNNrQ@jQ`Pg!Ani zr#k3aR6;~;{S5H7-+LUss|~Gfpz%joC(p*$Bbir(obSEsM9d9JovLqqQG~;pek20q z1gB7u^AF692E*XybImEmQ*)sP1xUidZMJ~iYxuz;x(ms(;09C->MYO#fvIn-i#`{D59B zTyB*%cUWQb$KwQh)S>(XRS?<4^;KD_kZ+)PQE^_(7U;%tb_DcrV)Dv z*EMDu+s^Gus1(93LP2D5#*7c>reZdBRv=`*qcNoka?B&fsLlt{~SsjD$G#jx|=OB!{QvF~ zbHn#X0Eg|K;`!A^(Eaxoe#QYELnx~nB3nQJ_GD9JbL=Y607&)xnP$p`kdVnm4fJ3! zXmjD1bKav`Z4{xlX?_qXNkdR9<%AmO6TH2Og7LD^O(bCc4v!HS(u(sLhohnEI~;gp%$H+}r3-`!8NDwPGA; zy66diLFoqN#nFI8TQSvfw(*y!^4GFL3;G87yw6|#;4w;nf(I1DA3(MMGn}4aSxEJQ z3jf5a8HOm3Cftis)y*x5J?HN!2rYgab|8neaUQF#ru?o8m6F^zofNeYt){ zVgEoE%RPTdJO3K8{t~=s=zchMta2tKr^aoL&BLu}pD9M~an-MC& zK#LefOqGRrdfr~L4@S*W(OU+84GR8|m|49q+NGQ(rk>z@YkO%6<4^U0Us@IlEpN%< z$NaZXqyP;Zgq89Cg{t`OjsX?JGyeuM&0n_>|2ydNmEh~|n?Jxum7ad!?tje}D3f(DCGdGxL+`Ltfy+`f2fCT)v%UW@ccL{c zgmtnW=$|L>7Nd=0-&^%((f2>#rO(+ulEe{snsr}*)-ve*!2eE6TM`;d#Co^?D@Sr{ zi`x5vjL?J<1Q-56@=q$813% z-l$`aKS)Xb3Ce?j^EuUzR)nd3I|pU>C)jwQzs(=A{~Iz~x{3-zkfHdo9|Q-})6ay7 zyF6Gqpg26rqB8hLh&@x==sLln5uYe|2$4kjS5!2G8UfiYj^EPt(<{UN?#B)-u~= z@}x*d6sRMe{9(Q9`{3i|yJb7d9ju=8>9C`r@AQM3C9>CRCD+$0BR?ns{AX{<--U=D z5bLY{6BhXgf}?)>86u8v8F_-Ho~)Yae}bGPus^f1H~v@vo@S)TXu{D!+W%&*X4E;c zR|1mo)PZ)=QJZ2fR)i1Y!}sO?2^aspf{KYXUwin~&ZH<;6aH_cz;+$>1cRreHt;ha z&=p>7@YEmcM}G(@&}{T8D<4$H;DXjPrWoE|iK4RDpFKyMGe-H~kG1DNL`#25M*_eB zRgPePTplRIW0ZOE-T2g-d+74df|c*g4{bK$ zzthh*3qRqBzH#?AUiv%X>E-bXhwO8R{!S0yO#2_Y&hNyACP(}~u^7_4Z~k4fhqXKF zIsrm1N%7O$uRQZF7RvD@3#cEzKnnfVsib4x{)H-r2ec#gdIMr0vPZN za*c%C%U}K1&sY9AJ52=I?q8j?8T0E>1p38TM513Q@x_aWZ8ToL&)(}tg8iJ0ufmPZ zsa-Gp+aI^7&_T(6`O<{6Y0wIA)FmQs960CjYci^SkqbERM?Rmv`bEb3pV!{&{jNnl z?{9!d2M$P=;bKPp5(AgD%GH|7hq!uQ!3A|*^5?-n3LhLA7ztlvg-2&^gzTTW;PKUq z{Ky<0@NJ5F0Na9icH5_oepbIdc5-JtHvo^x>fQR)W10HEe{)!<#8PBU z;ykD5d;GsmhESNS)~9y->Os$wZUbZfj-nru#Bd*>daA;R76|x+tUG<9iwA+oU$Ozyyp1jSM;Jm+9<9*t-i=(rkI@+vbz(dV zhqG@NeRAffWrXiQgjOd(zD+>wKJqj-!_9TwH`)QTq zJ2OCA9EuarMD)$0Ueu;rxy2|4W6G=I#dt#GJ`l!Y& zYo>CKk1YJGj`}+}p|Mqsf%MYf(FaoZ=t1BAF)VoScZJe_G4;R8>i-3X{x0_Lx}^t% zEht%|`3`MT-#G;&>7m8+yVF6*=5LGE|8EN0W)azI@#NC8Y8fCj{_|lTbg}y1FdTwU zNxWXf2Z(F&e;C*P3!K%Z}LKmEtUU)(Qpm-wX*_O+kKAlphF*&+8j{)K9n zTY+A!+xVFeev){bU+*T|C=|Jd>~nW?3c3IDU3~N(`?iF7yRE(bH}O!$u81|cpH)0A zWO&w9@r@vx10K=*+aZHI<%||_5e$|4exe~i;|y+6Xy`_ZLXqW_sKY%ks(9NxFu*%^7P ztQ~A@L|wUn<3;on|DKWW%`m^VMEY+UR@(|L1TI}RU;NjD%C*;g zIEQBtngcywL|h~CQw=MQow6@UY3Vmbep(5Agy3Ir!V)X?yJ?BRsgn}^d^do8*gvaD zi1lOR-7RcEL5lR>Lh9ZVki0lC^{M!G7T8C4KOIu@VBKP+Gu!xovrx9+GN2taI6gcV z%2+qW@U!fa^dmDR#<{yA9m7CZ+(Sv!_UV7`iQdM?Hw^m7ii~3_u*Ojf8oTT^-lO{2 z-n2O*(4}_e%-jFXf^FEKb>^eW5AsQ0ik$@-VkQ6R$e3?IoE9&-p>5(*FB zgzJ?ME&y%WI&{2G7;JRDZaHBj1aJc#@UKB{S};5q4~LmbQYNctrAzM# zv*Sjz%-Zds+B1h!8j#wx9Y~pRN#r)zN{3rtqL10lP|r%T7DshTmUi>zoJ!n%CQqD_ zg?+;@ns!Jp77Xh*A>jmu2ReXsr1)TrPglHqW(Cj|r6#|zZ1|$h3Y|Y^KX{)TjGiF` zn> zB5oc$X|@Acj!V_psp%vtNw9QBt;bOvvB8S5@P1_0p=iA`O#dhl_;$_@|66Yu7{RHU z$VDJOROncIRUsIf&9>oHM`es*x?@V(fXgf;s4Ix-xz930=ie#mn=>c zl5UK3d)z--DFNm@%fps>xNoWjS=9`3Td}w?ryPI_X9d3dQ&?4ttD6zkKGmjMs?ZAYOrTO<%|!+!J=*K3}Z0GYW1H+MkAOus3*?q@Bu7IRFRawL|z@oX1(7 zW2Rv370XT&RT2A@f85!nf7iBkMcVV|j+p>V@AC`EHVW&07D4y*r>*QWJ8vcgb$gT?{kz=^Z4CctSkGuZFCCPFg7AF%DcIY;%6r32x zz1oq^X*)V6E}LW*Dwm>L8F!1l*qcC3hc{Z`q!~k{lgLN}@YTL#oU$#ejo2Lq5pqWR zX_iy(-O@^2tsfGWVJYNrFi=_9T^%|@ZG%_J(Je+bAsW*9*kax`S*uxROxdQwc2Svk zMjgY`>+>ba%DK+*h3hG55c5HW0EQcL8uGpgdgbAQ97h- z-u1oxce~me?xm>(&U48M1S4}F8W0J=F*lSwF;fa9lfKM%_?@Q!=F-UB(!AvnM?W=C z!e$%Z2S z<0sFZzcR9(q8V(^zGLV=IL@A+Wf76c%&^FDV~sL6>eLE1BCBJ)Y}4IwmOCE*rdi01 zFR2|ic)JI0hYqtcwJW8k^Qq@m#Y7#jdJ)ylqguP&&G6(j*t)?IgnPC=i{!JiipNTX zDXhs!8nbXa(TXFs<{1)Tipi(aN9u~^ua0VbG0e}f!?t?ebTuFWz;bj#Re!a%{Ohjr zn4@`l^7xonxP-$q3zQUlpG-a45h)xd5zwc#-Cv|Sc@!f~_(O9?w$ zn+n&%RmbfGR=^QXbhNKH%*%3dFAaY=pd8wqcwH!|x0u?ogjasBx4vr|5?w|E<{Q6# zUCZjIqhe=5i&V0vBe+E#4~JO4vWAecj`VdpENVHp&E{T3uOO?8=sbiqkUvt6f;G5i z&Vj2uTYXFIjr+`?vd4;`Rn)q@fWh925I@2Vih{vjvw@}dtVQ06E(H7Yiby#($sG<$ zlT3Go#*WX=s_v^EbZ7H!=uYEij!86E@A&m^pYPnv+$ftIA{S||2!pU0voh5>t4!}E zvNNcwAILi&k`6mArs2>XS|@jgaWM7k8$nXI)`_S!w6=jlF*PY21u-uQ6=`Hu`u| zr3#I_u8b*%%yfs2(AjdK+}1Z$kLMmfZkdI3u0RTg+#>Qs4muB_c@3F%gb(&55`3`w z;Bk3LWOM_b*B+%nEN)OKv{qBh#+@CE#nsB^E-4+Zk>TGGSb9Op=v?thM{H|ghTf_T ztL9-zZmT(fWlS@eI$QBG@0KC^nx5H1Z4BvXz6!pY#)?lVwa*f$ojqq^l%AuJ>rlfj zt26^%Fq^#kDJK)NCl@k0;Lmt4j%35cm zqR$GB80nDc3{Dz;Rjhb0l!fWuy>C0(!Vk@eTkDi zBMjPq^;IMMd4Yg~5-S(O&>Ow1PP=RSHqf0pT_Zx!y)qcmR^3tN7qL~an3!1wk zI(54iHN)3MwB4@dKr_ZfROY671CLngli9eAe2nuimoOMWwsbJ1*XC3n@MsJ3a3|Yd_0qy;DMC}Uzc39 zuiBTH(H_v1Q{FS2TziYR+WojLRgi-B@j@2mNLo+D4qJB7QokRc1lPmffmaR?zpMCRuix zH?@Msxyx~U!V%O|rfP`a(G0o6np^&G{TU2eIA5CIn+LbeEY;zMF65NtvejL*hC{zhXPEdJH|5WIftVORCrx+yi?PjeWJ)4Cm?-h61oOb9J~Ia+K3>+>;QmvNiVLS(u14P4+4hhT%*{Yk7*oDWKhW&H*2H zl~4CfWyMbMnyqm-ZgDT`rEc1Sd4oa=u#{m(iDb+oz$?)jF8mbNS_L?wFY@9DIZCN4 zYAij}yRsQ?Q8)|PA@o{1@S_5qiE%^tIK&i4FVPQz97e1JA$^&9Zk0YV1H9wWp_Ly; z2I=iG%7a#e1fOQi7F+C<8!${^`^_1H>N;cxOc|bXgz?t)9ID|i+0XebXbpE{?r&ql zVsl+j)h;&j&WcvX!m~0!t&Xm2%-^w7#yDrbjH7o!4-x&oDf?!k*~Q8Z;lWPD>ep^oxo z&a=IVs<|X3o4IZ-F;e6*&J3{_&4O!m;w`| z`-H&N8lM-*NCRHCrpMT{aNt9LjfJk-y5KOOkFXH4tcx){Y7~|^8c;>E3Jv+b*gc2q ziEN9HNE@&>_A1r2;HpbD-?D3e)mCMQ0b`s3N74fg!M}0l%(J zfuXhVF%3RFCVX(orkn-D&m}1&%WB_12q0xaZ(~{1WJ`+!yr_qq@Y$}UWXwG(#49X- z%WsgBKt=)s7-DkspBot}mq2t9V_~HX{+W-r13+U~s+X{ZDtcK>iKY?=-O6hU&v#A(}I}tzO-TI(ZMuu zM+K&5OpEjzF&PD2r@8bcK50I-wZ?g{U&|CbbG_A|+=4LZVx3K;uE3iN?D;!2!&fZH zNjgmX2C8SQqGoff?ju5^G5Dt zbv0tztm=jYAGjMj{>mYYWgfRWe&3Q#&?nqv| zgMh@tH@i}2_D1F7+8R_y{IXgY(=&P1$3E$#eT?SQ=u2o0NX}O07-Q4pcpkkM5$Y>8 z3RbzzS_vO-bo%7hZQ7v?VCCNUb$5d`6*8xpv2Im{@zJ7ZE#FqK$y_!r(W7eNInp&& zHg!R(>Nc#l!GvyuM^?je?oCDenei9R&PDABGJch|?E5OV>=(AVgsIOS>~~nEhL7b- z#52ry@yWR{Ij@Hq;_AB-Kx9L5rryvd21~MUlweJ>n?wxD&9{L$d=K(En=i;o;+jtv z=EnvKv4eYdvSKX4L2I5Q7h7tQVW}D+-Ev864SO5pd10>v33<12-5k$lt6R|n@tlMk z87y3+SSbduOsCbcc)P`fdnM{0JZE(LpT+sNj#;%o*Q%z=pWiT{vpK+-Rf;~KowvcJ zT)1;BbC2|6GS+mBh`3D0=6r1EkZf*PK{3N)UWGpM9N-gpOIV(0{zI(Pbp7_rc*M};@sbROJ8INI|~2Sj>SIdw0yRK@vKqX?%TrKn5ZLExNR!9%ewBtr?v=h@`<+vVfHex&n-{CC`S)ny6Z5uXhMA(&Xs;_m4GAAMaC5-hlnv2PaG#Vy$6k;K>)06_87jW)#pt}=_4V`=zplBJHzmzfh3fH$ z(&nB_+xzTHawHKfi)Aiy0hs#2j9naxR`j4*!NkWsS-cVS2P;|wGB&qA9(?$NnSy7T zotj$!Gc98d*g>cu=iU7wdv~rBE5XnT{+r$f5SeH3`ikuI-I}LU8)LHdgP^9TT66v- zLmmnAE|cy$%d$;N-^$;As<{;k`zKq=V{C138DkdY3b&}V~R1zctqwia)< zGBL{&&36Ym)p}1AEVJkx^~~HQ0cAY*=Ckr`V{e=?Y-uMGOABnh z?Z>8IVx>dZ=Zns~=D3|S@j^1UakLD$r5lV246SPp@Rpvwn&}(8S(twRY-e5}+~q^` zPFB2~5$XF`wF?8cZYlK@IZxP#?EgjRbHG62hf|?yGD%uZF!9th>?z)KwBH zw@shBXBf`HDtdz|EXV2a3QG$$+&-i?$NKMgo38c%fg#Oi?1e1(H;ck^z?YCey37oU zdg4+~$a@|jBg551jmT2@0(NEOBzwh-3ECR6QK4Z#f~p(K=-Hhej+vO?60bPd6urjV z;Kser_PKgfAsjWEtAk)8wXR`%12dTYb_N_*-LR@2d`+`jR4SGO>~t0Q0+>*W;C*;^ zzwPHbt#Um3&Z@7HMMJl~YYU5WR-O5-iEgDq?ZZ;fa~s6TDy+L|argR|dMIq2QfF6V zTG{GiEZe*XGk*irkH|u#v2n~7^%4ZL=4enEj~`gMIrixW1Foc%WPK{Ny^kmdLy!v{2FE_g-n%f9G_^fH1%Yfa&b#>dx zmmsSRP-R!`D4UHr3lFD|-JTek)96??YbnYb#v9T%Nz+%+A}ZoaUl2LO2r+9_NzCK+ zZjhT7^pDo$tEuZ`zwGfWkg*5kd*>cxY`3IbTYgY;LMIaBJi9;RA5XJO>nAMe7`Jgt}OCTAVWHflC!$NF>^DFSBJEIsX0*y{rNE8_AJ;zt2S+m~0%)@;#I zYHr@lY3b*Bl3OGgW=-r&-y%fXwxCl;SVvhnPZ*!TMrlkoD&_QQrZT3r_%1{(f*-sX zbFq#6qZJ3!JhzX{i1aR@pRim1$r|fLgN@CJ0Tg(34VMlEbQ!%>Mp-LX?#4;>wrJ(z z6Lj%OG{19ZuKbXAgk$$w*$L)3IT$kq{u{CR;sV z$3DeABT}XW8Y`aHk?Z@=;s`JrGy~QX{B*i#0baylIuIK-wh}ZI;uAfd37z-o6ME(M zM8|vK<$~Ii13#-G9T;2)$i_4hvT(K885XAX1`_MYCvRSFakkB;ck@~~FsZoj=6FZR z?dCkY>w6$(iUEQ=c|)6EDE)|Q6qW6MnjDWl2>Qr)cvZT7LHR_J9Twatb$#wIHxwq6%%KiMPwVhEs=HYS* zS@ESTk^ag!mq!atHVvafY8wlE30Xt|uKmN8u2~IUUP~OikDl|NQ*N{K}PEK?4}wBS7NJS2{dMKPMsJkpbhQuF{SwhQ~zLkP&*{PF|Oe z;){kqD|36?|G6#${@BgC&vQ07x~_J8GPjFtE>W`_e<*7`xBRh=U}>oP^uBsKtnS?8 zL?_ns$=2k7MxOSHikFfZH}KuhXgTZxCi1dS)(AnYymo=0q&j|0PSR{Saen>C7N+~E z0UNBaI;8SC#zeEsWE``EX}KmKx7i$&s~ALWP-+zgsenVYmU>DZhtvm*0p2NAJOc9Q z3bowoWmEEM6|llsmRbgAOW6`iW~QdO;^=AAN_6R-cF%MrFzHV$CA(_fS~XM3g4Z+y zIpoe&I9eOHs`ESz`KW1<*t|t?&H}*~exqiXj68Tcr|^qX3&u!)ih`rF`Tfjujg1&h zc-O(u>FKxc#_qB(I5+f7W=_6*1rLf#%ID3P+OsU<_RpB}D0H#NELWKm*Kj!HcS_}q zbMEei`J!Q3@;8%gRxvp(T0|c9DR*nm%<#(pGB>r%o7T=i*by{J=&-_TH{c9&oEW48yNuJP`v&D&+3YG@s{D3~Bg*6bPYDDzn! zFNlq{QQ^jE>Teqw)@F{-z9h-92qXBjP*(BE;M$`oUf8H0tJo_kV zx~Oygs)9@B$ndx|&eA#Qa9zVVM8YE$(7Hp${~%UTa5K z{5O@_AL51E0t_@ zG~hElg4W19lUuk~q>Rl$57F~yA9l8KGiGrWkxHnEeLuzFF#6HUCgsb;sNydZCq-7Q z7h{%^Br9v2I-IPJz?TzPM8>fz=nM)A6^n zsNm{On9@s|l<7WO&J>+kg%QrOHKt}-;72hBw68oD9G^wZ&93pJjey5873!2>4Acfm zE%9l(g*uInIhEFZY?$#06!~N4Z2D`v_!=*WecCmR9o&G1QnC+yY+r#Z(plT?N^G&r z+?#P*6lzeja@_b#fSQTh52`2?%yMwWoU+-(e`p`=la-u$oH7 zm&K{6<7K-u3+2>I61?Fe)m2s1x%WELnnt8FgPuFgbWW-Dx(2DPn$l@T#;EKGA(Rgv z-wq+zt>2%j*Im5^rw~!+(Y{w-ppq>9QrU%ZPZuO@GYm7{qIeW}bh*9OyVrDW-jfH* zKu<|Wx`N?CFyeZ}peyKUz+Rw0GA(b?1isN==2>y6_ftSv;8hfUHlI{ynsY+ zR&7hRsx_#0y17NHNv&N|suB35Sz{HSu>0~9jcHjU66}TvRk}(o(_*XcPGPF6ciktV z*>a1j3)yI0HO;Wt8mDOIb$I4Zzcx~zd3MN|WbJGnf!I%kU@*qP8M17GHNp~g+e^Hh z;K_CFL%1Eab{s??hHR6u`5VGFBI~9iCg#efi6O9C_#OFWstNvd)%S5C7OxBoj^MM% z$PY@Zjma*>0H0Izds^xIh5Vj!=fZ2OUeT+(F)$T|cxKupzv4WI)o_9By&(S?|Eg;G zWV6V%&UY;AYs@Q`=;!dC<@8m~wzj7CK`%Ndf!#)7Pg}ca^tK9oIuYv;!$G@_d#xk1 zrTx1@3)NzH4%M^6x$%s;@;QfD36Zun+y#D}Rs60I#D!KGt?q zVzj&8BiwP#T4(Lt>)%otc8(Tk#PJ2~W@=Z?mW>_qQ%AlxWjw6%IV#1H3a_7g&7v}K z4Ur-Zpg2iZ#O+2^WvG8k+HO}_x&>AcsBZIND7R*g^7v$PT60FN9dGG=HOFOXe7%>h z&%lDjl5?vC)H9o{>7P6rGbxSjq?0MZl(8bdg)U3u^L$6fughU6Tf0YvA%xjWrIm}n@7*x$uc~PC5csfIsR5y;R-UOvhec-C| z@9Ka-)7qC@{AlxUr629OZ3P+X>F-%Iee^vjYxIFDPnZTsx7AHl`4Eb`SNIOQcU$c| zv4$>KQt8)>i29G=8)+*%)|GNpgU&<|Yn6n)d{Rn|Fkh+~+DqQTs(nu%ZoDE`+VIq6 zj8aTidyXRb9W{QuEJ$95XjX~sO)W9R?jXU(gPQKBix+29`SG16bi84y7#syVGAp+o zNaSFqX0q?y-fTi+XH2rrT9o&QCs(O_bULMHu^Kufs)ct4Of8PDJ2?!IA}oN482i`< zDZ&F1OqwDy76k)x2-zohn_?(~K6nBmFR5-w^lSkpkKn81jW0#b{)+Z~M$T`kW(MGX zQU5E$n!us0tsBA=daIxId<={)N*6dLkkuN${w(Yp?L3rkCgZS`Y!b%RH4fXdg?a4{ z%Do>ky+2?yT9b*HUq>HKIO-G=P^u5jmB|);xsbrtxs&Pd_koh8yXs9>B-?7X`(PR$ zcWRYw*y)Q}QcBI)&}t!C8MW(oHZONJC01t&+44kC$^P!18`6j@U9GfmByN@_2ZT>#pxrbM@r{hQjkTrx^k1iIrC3UEyGcI{B%JNk7sW(N|be1a^EUb#4Ebm zjV(cApCooa=2hI|QH*$lib1zdXDJYrq4jE$ap5Z--!M7a4{ur(nI*$*_vdbi$rcNB zSGB69#q)}tDanafQR4q#dehy8Pm+;2>1n-bzW20nK9@{vR4LWCFfa*7b6H{Yq5Py<_u@~^D}HT1)RTeNQV#gCMQLboQ<81(G{ zdor}Em>T*N5a0evuB5lAbEvTLNQ$f}5lctTka~K(EiH3~^v*Vb9&hzHT|ZEw&$vw$ z00&=`KtI(_QzBT^4VT(xgLRXhHHOUUNAb@Wl-VtLlhIqy#4fz-A(r3`qa7EF8K(t# zEG3%^!2IU&5MbZb56&>tirod#cr{F)Ny|7aoSLwiq?rki$|^VD`{{dpE%~ZC->}2J ztNqrU@Q|taym<+P_fCe<8L{$N`{}#bZ@lZfSrMx&(_U7DYI7U@EX5G}^E*qPeu?mhCR1Ut-bQ#|K>Y=nq_Iux#9 zTA<<+`m+3asQvKs)z>4Kn_;WIs*`7kC^o)WVH3zVzBMzu@!BpA50Uy9d*}miIj&9b{ z^|MbN)kc(Mw0XjHUFAMfTy*Z!w$9Mgi?s77Q12o|(V6zNA@_T~rK?5W7KK7qI>h(l zu%1Ww2=m4-q(9@GCBT_h7L76;eZSrYS$i}`P}80sWgcY}b1#+QEwF9jCzjAON-tMf zc-Ml8jE2&Gpk$PQNL^*MK<6GieApB}JN<1WPV%5_37d%%wcN>DQuR~5EmzM!ddil{ zSTiTDrd@U2Z*MVFUoZhi#cPj{uUQO~eZ~7YugSEJokqe9Zh6+f{%M|&jdk!%EgxUY zW)R7msyL-i_4@rgivVg^%Pqew81QW_J2IZ%>uw?K?B2WOo}s!`S5){$jgKKeub(g+ zC#l}M+K2FrdU2(R0Qsy24xf26iA8`z*)4|7p2I$eCAA0Z+<|Ps+iY#<@|y}N+V*!S zj^3zV*A;Y{pPl4CSDU}{c~iJxFnE<}JAUcen+m04$?XV=J?y?Iy#pb+mk!r<>>H%I z_j1|KbxcAU9~zFnsGK3KM4qOQUk4Y}*%)tivTbQvQv-OFVwnkwtOWa(_!u}9$phM=W5U$eNb zFhX$^lSIW%E$iG&Hb(mRjui|o?_f5(;VRWTeL|#+H7g6h)tW{w1!$M9C#scxqD{ka zcd|-}jkTJ&gr>&9oaD6zL?M{gpmcF^^z1A7bAvpc7oL|)t&iIBbB@4P=rPYJ52z|% zCuCiEi{?!X5S6Y_W#5=#hSzSx8~9ndcr`NLA&YCwLF%PCfM;B2IUA|_2xa&#?x|(m z;2y~B;^?OdaSO3@Ngo2*LSI}T3lswPlEfe)BGZ_lrNH>^l*a7US-4_F)^D+F@F8o?JMI5m$)A%y_2l3qFn*494 zzN&#kA9}DnwliX1B;7IpTF$JjF4WeDF-PmQC+@g)>#bAGXX-p$Axd*sN+=Mwr1+{& zn53VYG1F+eO2i{`hVz2>f+XfFX)ZpJ4#Kd`=WFGJ>niQeK@e8JOSx5byGoFN)@2G)17;GN0}Dwx(`tf zf73bWK)`u=^X$E0z7TNwl3u%73QdmR`kINxm5SAqG_2di6#J654~}US ztCvJCnTb>T?yFh~-)N1cN>B+0Hk;f`{DPoOqrI%=2%S^@{k)x*;kM4xX9{g!!>pK- z)%b4p*@X9^qnvaHr#`s|{o zL^=Ui<5@KC+A5{2AC}Ap=+WDtgs7CmP$_zyU;>g{7C6Pjq5guM4Gd^wrrMyQynHgV zJU`CEs-ypkQUWs=V2FskMpGj8YndSVTl^*bw89r7_JCNhWr5TNceBQHAkJG=E=d>1 zl4;I^_Bls|K}Fd^!!c#u#5zM*g3$75`h6rPnbh}Ua1e!@E)BA)xIn6xYEhS`WmS^K zXVqw03?F*XAVunZZ;yHN0d{*6y zyv2z=DI2o9IxJBFNxJhA0i^AP=|*dkg|jg_$D+hP&wL>X@j~0!fuzM!H|J&o2VteW z0LAn87yZ%|;D&`=ZI>a7(wq&jW|^Wyu|Sf=#k(I&M=#N5;a?@!ri@5@g2G^VN=C9z zRfQ`8l!dT#HS6YsydN4QFsT06LQ539IP{Q#a})^m@O5c=jMfbI8s6+`cfM$PNDbJd zn(KyjpK0L$j@H~aJGXT&;kh&8ROsj>RFEVi z&@EfPpz)N#%Gz*^2S&Q0OI zPg9CW_|UK?f)e3NLNZ?ar=6xSy|t@O{?2A}&rPmbbGR#1J`eY7r!?R*)8JL}WRG>8 zix%gUbYD7$@Mya@esi4WO+1*EUTTbq2593LV4wEUd1r>?AV;jnseVh zz8C8RC|BiOr9*VW9A~IJBF>mv=~`c%&^2htDF-HMtAT+L`7^0QSmN2JbcMr>pj2$wb|rkCHU_^dgRtH+7u7uRlS zce%#^TC46L7o=f$QQBV-Jot+F60gc=i;4GXGNDSUTZ2j5E%{Ex$%AF^xQUIXQpBy- zifLBUV8K3eE1~6r=1{lx=m65}&Dy5By^dv%QI3d9oh?!-O=i>+^8TejP3xG2(FP=3 zokJwElp$ZV@*d%)0G}4Gu9{S_fO&krFv$B+`p3owy~>5rzA;#;nISShy5cmU>As{E zFGr>4w7Ln!gk5iPn5$EAcVuYiLYr{?hCS5J`d^muIy)@731QH26q}rDN|EawV0A!=YdF82 zhfSdr6@$lS5iuj5S7?lDx)0-DxaW#m0peJT@U$R^J6$@)t#uWs^Ejc>5ay zbp23?m|~dYrBOHKsG7Of7#&!Dm+SB|-m9$*(TuvqKtS$?^20ws?9|IjW9-T4@Zngs(67m|w(+7}Ms9qGOjtJyPYix|4g|(`IVs4m!xV2b zuDk(KUEU`?=QH%YY8XefH4R3~8p5k%sWvq>`FJw9toq7)TW+4<(B;@ZAm_*#&^GRq z;^@|vP^ouiNR_xjRx&+zK^jr+%U4#_7y7~iF6apterm$jMMozm$3Wff=Fq`wP8{M+1^b>sT?-uRN1?tgLkqj? zB2FwQ{<^DU27S8B0omXYYZ^E#o2l#cjaKttZf)k!l@aaBg%Q2RnS94*sb?5fi7pl~ zz8b(bkw2OMS+LV2g~TQJ*ji`GR9L*OfB02ELoo?#Dm+N+z;4Wmt-&AWiB#7}U|vEI zJPbB@_5XBt<=<@W?OyAg>Z((UYo6X7TGI)w*4U!08eSo0YDlOWN{Axnik?oWqG^mJ z97STN1d*7dEv4o$lo%RAh#{ykrrcETz3=~U_pf{H{p{znKEvM6`mXP8X>PP(OIO)p z&mUNzRQaBRIp0DZML(~ye_@x6BBfD7f@(_b&2Ka(CK5QC?Ejo@1kuPHSO>Y@xgV() zo|{jcxqs~!7}O|A*{@7g{tB)70QLLT+vd^v>^OOUCT#RsSBXo)$^2&j^yfqi3LpE?oR=X`uXF_4xf2vU zY;?D$^Kg*Yn`UKC-A*JO{D(9CKhDwe4pl>c7t4R50RnxVDgu_PSOxNwHD;$@3mPIi zc&XZ2ifA}D!y1~+rcTuJ9w(tDa7;6umKHpBFAE=z*2ux*G*+s{{4G`8!bvMk3M?99 z!C96d-w54=#u3_&?nH^5#9K0Kl#%zw^QvO`nbO1>*f(>Rj$SCK!y$Z(c=B3WjQ^`~ zLa(j%C}b!B{prK_A4@y6Xlm#w z(O1q7#K!Kv{Xl+Vx4Lf{L0KIr9CP|+APS)&41wtT0ThqbM73TRF;Lr%5_a_dek@{F zRKcZHFx%9|?)NW#F&ai%8A#A9dwQzLvpq@9T5PA#SO$8kiEW_?`+ho}>$Y>cZ7^}4 z-Rmov@-K+|NAoVQYL=NOxaT`!fe^f~Y`*;A;EPTy$%Mi;HGsL(Dds|MRb2BV)ZAlr zwI;}6Cam1Bt~MC!ml5J?g5ld9XAUqvOWJ9QA3FlZk36tztJX>`P(SzbhVBZHVfXJ{ zta-#tJmY>t@{lS27J|&=NqeVc88g)#J$b3NtZ0qe&x4@lX?qObf;?{18=jQ)R#J*w zs`JB8Ovqe&*^fy%+8&*f=5!tFJcizfY{`OFkvh66IP6||1pUF>*NcX^xANX0#Hs9S z$?4Uu04;jz+qcpS6BpqvKQ1MhR<)(>t&I9^wTG53mFF-%;Hv; z>Z3fy<}-a+Hg9&L%)&CBS^*6jQw~2H1hp34ChxtDE^aXBwy_Bos`D!fQCiPn`Rj1b z_#8xlhGPhEV`oPkbOWmdE`~FLYbUfq=TAD%dJ?5_^tf%H$}d$CWv>n{SP@Gf1YxRe z7*AW8X(jQlrA!AhM3%Q{+BA*gy}UR3Ui=M{@^>>(O2@PFyf0ujw;N$*Ok6M*DAx{9 z7B@13iOSG5(@m1*i%RpucjE3imOuXE5fPd9`hKAMTFq4USEd$xBO^h`#N4ZXsS0lQ ziv)vyq{=UcV0e862=|vIsEfp>1-GyFDM6waiH7DnhT=y?J^hSax4_%H)I%%94&pX-Ifz^0NYBFZ<7GAm(`8Y7ujkmZfP*)1)MYQ#Ds42a1^BjFm(1b5* z7>APaVPee~3aGov;ChN1bD0f31Q!}Y3yuD}}ur|s#uIP0I0iQER-HMaoLbh^BW33N! zn4wgAB^MK1P-^>cdme~7jvnf#aTY5gdXv3sRQ-DY=5J+kWz{I~c^S1z35W3Sf$O~N zpZ%9zExw$zyxUictzZ1eh{`mIg1{TXRHQAyA{X{lPQ){aYT!%$Hg|NpH*f&5QW5KL zs2kcT!`kTUtK+s~&IOE1YsXNB(^8Ofaesv7F1-)J?HV*FDHwP7%Pcu3mFiR>p5~k(1q+3dxzw8mLeMp-#VV6nm2g#@2kT4ti(S=r{`e z@bYLzzF|T1n4{6CKTBHL>hz!1UH3xgimc}2D7Ah!x})et|Dl_IN}h=2Mubm$2z!UX zR+*a7>&Nf;q#op_z@ZOQQ>6c?M%6H)Y9};*3G|t>vk9;pFz<$_-r^gV-(Ope#XSPq zH^Iv9^cGfUKimkSNo-Z{8{Q|!CE<{UaR%@{@P{SR9L^MN{3lWLJZ7bB`Vxo5C6^T1 z11z7Z3|UchP^&5e9%syoKk4^eNF%u6aR_mZ`U+gw^J*)-ETn#thl*NC`Y6$t`w}-nIZPk|MRfeHhH~EO;~6h(9DGCU<)nKXhv! z6E8%`9ubZ!AJ-awwF*!YPO$o4j-p;xdJn`k{gC*SbJq!rHHt3Ytas(Vw%~DlEBal; zPaKMQ&mIcvyKsz^#R9d_X1EK7Y**vzAwjQUZG0T5Qn$itCp7*CQsX9xrI35=i(M({ znbQ|!V;xw1>Yp^u&gcBaK$xE&CB(XP#pBDT;I(S-+@1hz5ORp;ST$(wO$n*c71FDI zo`SyS6lInnR`Gc|TK)6^p}$E<(b2!0Vy}a53CpT?BfRRI=+vb7nO98}|M)76ZQMHv zJzXYQ8JgC0-7d1%b3ws~+aY4jok84Iolg=H204S%vqP<3pe#BmuL3GMUrf=bhDcYa zwNLQ#C+<&Om1yMT5#K^LNy(<5Q&(rWr94o!=uZLtM&z#qU^H#LhUP9gQ?|x9Y{So- znA@~xVywS{FHdXiE%-|JVKH{Girc$R9W;g6ZJEcEsL0Bh@QGjVgWoEp!Ic}2A3B9R zR4KicQ8V1tX_Em~Lo3qovc{^X zkTfQ@^SarjNNw@wj=Mz?l?Y3^{D`@WH9ItY?OIP+aLC#+!BrMe;}n%c*Lz2o-%KuU zD*fCc6q}_T)LLy3zP!YJ!xP%?!JOSK9Hp0xB!$%1MLNM`5&}XLneY7h!U44d<3r>G z9N{RfXA7m$8`Rc#@B_ zgN~pJ!W+042HvpK!dwuta#xd*F6t~1md+?;@FHp?{PfW}=fHmh3erK+#$am@4$(26 z_0VhK{-Gw3>1mt1i)%Is)lRf3b2ymAGVR8Ee>ki59m1vAJI@cQm4ne_Od(B&GRE?W(@8Zk8*=!{uHV zc~P-}XaV8=Di_Gg0_0mJ?lk_h(9(U`MK&*h^8{qEE3mQr7E$ zt%#4|gE9+o1}QvThq00S-%7P!qVVfm?VF1->4y}zE6yQ#N2BMCM`M`$k>B|DjQ38R z;rdtfUJh$U>x|kzZqa-FtQpIOyaO65`yq5I((SYJ>9c#1Hf!q@I>%xMr>0F0xt_^>J;jCDfz+~0%N$>#)IKVV@7A&f z+Mvzu>l-`Zapjnilq)}L!*6cf{y$FbvWL^E#}=$*)KOeS>Of|o#VR5qE=0nJf=DqUvIG)BY%5g@LJpkxK%#Q8uAOBnp2CIlbe z3;WjVTO*^I4CAQ-?}N`D9oym;V`Q}U2K@J(Ix5RoQc~z@OSyemg~2^cJHN2 zDLdfb->?#Xc=om5dxV88`V{`+n#bg~MXx=H)$cbO=7`aebIJFYzg&aIx!B|X&%L60 zjQJYn=XAdV&bwaHMdtb6?{HfVGhu(0{HHs%y+a1x8X29)ubT@inNh--|NZ^{^5K8= z;e}ny*=b@$a+u{?L0k>1#W6eKhd*PkHopsh06&%orRaF<0m5GI&)|>O^PjzY%jcC< z&52gf|Ka03-|$_LeB-UxEN)KBL;62P$lt`MbAYU*ho}UM|td^`zyOuP^bMR9CS>u^zt9 z$oiz=jo^QiwDFfOxnX&8&OBeAc*C>7cP@kRTA#AXXYd_HuEmy1U!!X9hTYUls-F8i z|Mt<oSZ@?L%C)$;7s3#B*b%zy2LM<4kRKD+8-20!Np znKkEA&YqWxIr{j0!e?#hp9~AHefDnaYhwGtK?A?fpaahXCojMLzK(+DzL%$L4Nmko z#MF+c*SLSEa}uy*nm)hD-(SD&<4ez8-DWej|MlB`-mC}0-UqMp1XX^k^)(6PFMRd3 z{eI6MzRne|v+&!F!M7RVg&(H9rj~Xa9=u|Tc*5q3*X}Y+?tA^NTnc#CQG+#H^P2qO^>?{eJ+IiITR0u!y`TRu z{7O%ZToFLOnm&i$edD#BG$^1v9eUuw+fKrSofQ`7HmH z*dAOcwREhmerbr$@&$l6z2SBH=?qK9qpyr~F7lN&+e@ONx^rNSjg-q`LhZ*V=cXa)q(gi&xq9~@$R7wmDaIKJYO2=LGo z-Z>@?Rq$e;DOtrf1}G#!X^($?{o?w}edm@6hpuk?0$leXi?nIGfrl;7dZ8!0=;G$R z^uP~}T6rx6APw6vdLM>%&ku%Yb{o_o(#C6YGK;Pf7zo5C32~1m#cPkP3tv30pVTWN z!K(tP>yyjh^7vI=pJc%+7Itst&*wzu{*do6|4jGE5b(~5^LK3GzK4}4skZ))a9D>< zG~`<0Np}xSzX$c|JyChbtO2CUW<4jpg;w%de7yL z!AqXqWi2i>*dWaFY^zfS2Fb%o(lerl_8T3=XKshztO0`X**cs%Bc?YP&wBi|;TD%F zIWlU7t@d8_fMZQt!>4wg+5ld1W0_}a0ULHl3Ak{UZ=RVId<&~Io3^ZaM!cmTF~F~U zSDZz^jm=xLZW#nTSe}nJ?0}coe!8Q-Dl1-N=lVCp8pS|TfI^n>;~9s zUUBP-6OoxNx&MKDZ}^bV+qRrr3WdZay!#Bn672d9Bz*mJe@7NhqA#s-$J|GE5yJOsJ_lU^IthGp|LxUPE7!dpEv3MZ6Jd$oV# z4h$+IG+h9j;>xoJUMQPfJ?)JDjluq}IAKlre?^?vf1)c?x+#Ks89~a_kIM=A3H67C zqzS14qrWbh74Xeu>us6F7ueC^|Hh7_)>@+_J|^Gv1n%`7Zhd=w@>_ju7ZIhGEL zrmQ@`FVw&LEA9AZ#Y2}id5dmbCM&4O54aP%y(31f7*e@D+XyTl%LJ8?|H>jBTq0TS zUq2u>4Z5jYV|=+YyR-bzq8__^B4@*M zTta{NuF>VJK*ct`+$QSg+9iq4kKV$c7bXT`7ws9Q@P|rUI1kN%$1GNr|FxeGj1(z+adD7ykN0d`|L-t_lfz zeMFr+_h7Pcx5>@(?=J?@q#9pdXpGum6D&lesn2r=8A3HWdeI)K;%dHi9#xG0WZ&%f zZE}*=%;V()N09s9IM0MI;fD)IwI@o)r9&yJs1(ut#BQ(Dhe;f)5Effsj z!%q`x(>V7qb}AvrUpf+-#)_vedyo!50!y>|zYS`zF&6DHSyfCZ99{j}M_ht9{uB>^3^b%cLVCH$B@T=B!}Yxjv!tPGZ2(MLn?v z6AN2K%KI%H)r<<}W%Ys)zKFAzzB!c32yxzPr#9&xw&VN9W%-~uSNGWUhdN3krR_)# zon$urFo9kWuO0Zc6{pzO!B}w?YrTbjA)!eCTtxOOdTU&CGHPyD!1p_7 z%6c&qf0K@{eLeA{& zasLdBOo5VNb0gZ~t~OWhRrqMHR4*Y3{F6py*-P<iPamh^xIR=xhwkC$t`2I3<;GuA}Zg)Q16D_}d9+LnYUc;t;1OD?AK17}kdiVIZ! z1dXpiMK)TDBn6Gz|5^-O8P4mk>W2>st&R2c`)9T?QH5$dIZ+P*R8iTgCwg!_|0e5c zsgu8(w(DX7+Dc+aGcjGT-b$U@`f;RCAW@K=5Q8SC?K8lo_7EoigEEkI8O38$x zRBWmW7b2M?nfK;^29!*lhD{UNcW?-kSQ&n&oqChQ-T`V4uApkKiz-m%=I!vLJ7g-U zk{2WOA$lT{x_KeYsz&81a(IJn#=8jMH-s)AIr}qOE6h#VLHIgqO0$amy9{lw?$JE! z%w}E^a2lWa9x0BXjH&tSq@F|2-(*2IoIG7`EbqvYbaP4`Ek1p1h~sursFwuI&Q|tw zSs~ei0;Llx$vkUb2FEEmQ&}DnG@BBuMYSa}8F}28d-FIE)fN+W;oNsAJevwrHw7_9 zjT$;~fx@|d)b8NUJr142HSYDX5hdgd)HCH(X3j(7``1qagnf(vP;ubZH}GRIv)!%C zb?l;jbrk8N=j+HFVZ9+;DfP4*WZ^|5FSlvq-(;8u@3=G1bnZ62!|LtN2h_jIBRj;{ z)Q_>ovrEV}h_vN|6;A{@1wG!>tjCMc?ZnvSAdxk$M(!;?vfEDIDyKY&;g0X?Xq|U} z+ek{;K^O0)*ZU=|7ABAcg2san_t67IwRp9v9=`&qyBTtRSW9KfDH4&Nod&+wB`(V* zRA28^9yeB05@G|Xk}HS;Ns41ApD|2o3)!ZP)|L{`1CdNkk-R;5Bsqsos)+3BB>z)mzMzmpe zR;%lpt!JUmDZ4b;l5uwzu4^+%AA#Pbw8_>KCmrJ9f7ss6)(<_Sou8`wWuc43Pe!f0AjV2G5bT~#*9iZs`Rn5>Fx`vO%AW-Paz`x=O6tPV zVCM9;xhO8U>SyiK?&=;6aZzw(EA`5HuNdxP+AT~iipk#E z+I?RpX$9Zuc{hJx2cM{qh)@Kdza|~DVf4i$Gg&lJxSnrvY@42jyX@8fB)m{UJJ8(t zBqW=RjA)*8T)L!Mx^pCMONOh08@C)#-@zP>GC`NxwnbTFX!l9;vV-D=e(9uedsq(M z+*q>vP~|GIzb83n;ArmyE3xlYv@U$I;dw!uZB_nyFD@P-ui((lN;Rf;ep{vu4yIYc zg(TD)QCiBe5ZaRuhc7;QF1<+MH(T+c@nu@WSK*dt5ko(X;MYV)-DL@;%q5Cu<;r+e zefNLDi=O1wX3OCwd`P zUt7U-7R!9F1&bs6F)n-il=nZ>ofb87TV$G_Uz|jH~fD}aiC6f(FVTdJUf^=Q!7R}RjSc=q)*>W|f zO6n)|yvrOt;ri!Vn*r^M1>b4eSAIxj5%R{+Ix$Azfzh*)(%d1j$f{x+myBA!UdwGa z+5c&H^Ss_$`o$5xsm5zq6mRB%)YyBC6O^X@1Ks0oqU-g~gpKUDzUZ{VUhQ5)^a?Ig z(oD_d9LazO9yutn3c%yOHNw*G|jn+4MJy#W?S2=1Jo91=XEraIn z1mrt*%K39#gZ#eFR1$(t>?{^<-ZMv^S|5m5ef#^Cy{Akfj^MQo7t(9u9(Kq0SKnec zajKHE;ToT8f&ZNGs*;!$0=jy#p_4F)YO(MqO6#=`Ra3twCwAQ82tI*I_gBCn6b+|L z(;@W1Mx^!0yyl)C%jAvnSQ@q)m1Ff;X~?BOQ9&DB)oEw$X<)&Kizk=9()hA`h?1nj zZ7kgBJwWq8UF)6@s*a|rD(n9(N+_$kcwbMMCKj8zwT&}+$kj_vJG|!Bxp2*`)WxG# zsi@R%S&P%_hfa?^BxF)6T2lb#E~KWCyG;DidRr6+=ic%Dijl!|m+k#@3yEDfC&T(q zI8hb9LQww@t39q8;S;IqtMMNfdT}k2nU-U@g3o9!@r|4eCY-{&uGUFq*XACU{LA$7vlqv? ze{#|$JW#}F+Yr}G7WDUrMSZ)q|4>H=b)|;AQ5!solF|HkBkz5M2qedPl@KyGMR8h~ zk5ctdNW#&Qh^?tA^R)ASp}NzptTFP^V^9FS=`i{+X2tx2IOF}l#J1FpB8*m`rCzbg zE;-at;N@w!N7qw!OiEDJSMQ(3vXmIn*?EV;>E30nxV-~G(@wtS46mwDF2yq4<^tiNNxHlD(ixv)e`dPWHMOP8R<66-O3M4-SpW)pvcLdo-PWq z8zq6c^#zVMZEQpD_Wn}`qowXyN7taue1(2**qZhjX6i>+O%A!YBYB_+@9B^cn^xY- ztU6k|T1i<*CIsHz&IRm>Xx9V*$~EkSZlU=G8{yCxmwRl!vr5%P`o;P zQuX9!A*+QcFU;p6kWysZ8qe-=f(uz4K*c(97_9LULZl0`>FyPEQXww6MMwuaPVNGJ z+ea1HPqhf4F=;{hlcD<%+Tbg^R5)En7;2r*cYI;u)&a$ybDs?OhoIQ5GL-lwE@ji# zP^+bEgNfTEI0LvgcWXTPg_@Ii`kc=|p2pfW?ZKWc_Elp>!xz8NmpKt{LR*oiiMW#7 zSJG>vQJR?7TPaE-JVSSWGgMrmy0F`y$bIxJvD&JB|z zLnhVC(P3{cBV{e7+P~jQ>_mA!d9({#5S!jjSDDyrYqdGU7HcM>sAiFXF^UT)g=32L zNpe2{JGDsl_?$4IL$Jp#JdY|IvNYLPh5-Xzg8na$$t68%(M;An31 zs0$sr-C?r4+)BCX7Ge*aUX>s#mrC2=V|0+9pzT-on6qg+DF2S!(iMjs;d&2%(WwR-| z-q}SNZ+&v5@yRxa^Qa884%$Oq!7UwbxuQ1fPF#J73%<-Fi}OTzN>`L;;=Oup#Y$$w zNF^q76mkipZy-yWrLCkOd@^EW4NEEA)3{ki84G0AdNoQ&u?5nnyLnm2a7|S<=6)8) z5U29$gK{5@QyfIehgW3eQ|VhFTvI~J@o<7LZKfr*sd|Fb)BW9>Z852J(){(RA>+$f zO|0oMh4Rubsb+0-*9cV|Zy4dcH-=vyQLft8&B1Z#iWU}?B%jn#1GTJ05M~c;ubuz; z2tMv?9%BV(y?D1~#PPgdCs!0Ig7W6R3>XJY(`Vg0V= z@!klgYN$dKScoKV!badYwNskYxX~@b@%E6kByD?5b+irDF*+F&lvQ`0;hmuh5CwgK zk7%cC$*5yL^kv>pSEN;lDmKGI}L;DF*T+D_^MfeJp*8t5|lbE3z3{4&hqK}Cg|}>f)~|ln_Km(HgPCc7~sJ;9gsZ&6tWSE6_oS$slJsA5t8F z*%(x@*h~{P;=t}`GU<$yDK4x6Nh80INpb09C65nSUmEVQAqdNLswWud&n!!)G~LJP z^5+X$<&i^T(IGHh2yz~{OE%L_dUm-)r7KI>5wmewB4|cFyMVogA}$M8T9puO6YF~> zocnFP?p;D2jW33>V*EP`W^hSi9 zwd@>;cnSGo88f}Vj8hiM9A_q>)29Z*A#j8JFtV$884HKv%%q-wD;i`>m4`N*%h(`R zwak9d^-Ib>@<)+-K^g4&+z?FY!|#w&kt`){6iKefi*q^UQ>s$3O%R!aB@gE?o|+53 zp%0P6oVO$8HX!uyBIC07+XU=@)4S~)jySUORKMG#p8REkkAYgBFt0K~81>s%u;&SR-Sc$g2y%c!7G*gD^p#)@&=+&Q8!pS8nRl;0|C`Ac zaer0}j^TR6tikp`yf?%NuTaM!WfaRqT1I+Mcg{Mit&!y_VxJ6EpRP)%j7GQ*>D|$} zPQ28NOXCjbB|;v4!%djO0o_V}N156jn&U4}CJe5Embr7XsQdwzCW)z7pvF3{Vjw3M zQi@f&U15+7^LlcXfU|B=dw<nhiAu%o@ko2nT;ognt< z&c>tBeSZk_gG5nH13Lt{a6BN_q7;42G%W_fp~Md^$?Z;^E}cZd*S?)( ziw}-_&@XIJ<-Kg#I4ni7B|1r=zS~nVZ2B-)6sf&LP}-6ALgv>mV406q+`i+}JnjR2 zfOYUb@9}fn`P_z{{BQEC-YZ<=5YK#15N#3TPm1Yd=6}NJV$|bH=-PnwqiPdn*1pq#%ImC73XWojEo3<8GHKc8dj5+2T?c z`w8Z7zpP2s=>&2Ww&-hgGetvE9S|Wm;CRWA5W_=2QoZoz2eCFpMO~ej>nhK;Alwe$ zs=Hd~?i6%)GF-Du6a`X?D#Ds4mXQv+qKSZ;|4P_15~K>XaRu21@r>G#p)fn<(HN zzOC37;}V)rVr*@55hhf%4fh1vT~rRq9jL0P2RmW?=fBkdjzz+n;`Z|<*YRPlR=zFO zmKhhx9nWbNb;;{1&$;9h)64CuNZZ!pdB-2W_lShb3g}K$P;Ef|f(juP5S4|ixRn>{ zx!9u^N9+{o%0i?>KnkAmy+=HXxf>PPzIg(PgXm@(Ulb~&y>T=cR z^#XNnPLc}h!*$k0kMR<7$cmucSaF*R$WcWeT-OFAR@rG6LEn=&_SMLm_T%CDMs-tV zjk3{&?RZ>@{j`d)lHotlutk&X4kI80bUeIpFXlYMziAGewmCGvUcU>0OXoN->+$61 zz?}08Q6PPS!gI*RTMSIK z++6b{nYMSt)fGeqvSn}622vGMuoEeX6a0kFwe8<5@6hV``rBLj^=Y@3gQm;Pb3MjOs5Dw=I+J92uM&PXmaLg z?lrdIyllqQ`iE`tzJw2sQCR48ddX;p+Y9f~^jF(68vMO*3TkcL?`mr7rH>a~fqaZF zpAc_jgVv4oLR4z8smfMemi)zM$Yk%4b)$8M!@b_aw?6J%e=#nHYIC-rfQw6S&Ge}C zWUOW-Ls%x%=Xf!p!=aN(d*aF1uBj}KA4HOU+cP=I-$AN7L={?^z@+lrJyoFtVdXs0 z)EU(ymZ(l)dvTQpNptZ}?_N6zbu|-MN+?It3UO-cW5=pgBQL@NiaEf!8lWOBk*1JB zOvC9x2w)rWb4X(Uz=w5JpkPAc$+()qrNXfAAGq;*2}F5qvhyL@hG7l%-egD(`p=a? zBl{=b)+c}LzEHW$;~a0zu!#qMPs};SuxgJ>kxNAm52SGO<~#di1?vw-A1#lsTr935 zkv_VCSr)=n96_=J63sb%*eM50Pc;go0#E2Zp<$C5WC?xq2 z)K@1^Bv+e-h@w!N>2{j~Z;>>|ZpQ_jQ-=F=hMlcwnMMk`q{Njhm-3yAa7yf)BmwSt zkES;4Hm=qzL}(8QqUB~l9nKZEtGLWI)}JyIBQx1D6Sv>PUr8=f(%@l{!QZV;o2*1( z=O1i;uPdi0%N@tq&Sjp>D`;N7bL#1=GmSj8vRWGhi6|rXQJZUq2F9P!T&2#<_-7Xl6S8ZV4dBqjszkoU zVt)|pRP1%5JnL#D(JT;DYrG)l+5Q31mLv_bbZ9j@&%ExnnXU6ZDi!GJ(IHI*upFYfCNs51{Vv1wIC}Z==4(~8rbXX zZf60FgXSY~S=qT(3+TbDsiz-RLNP&3mz#icm(COTfqH?o-0mHR2>#VjAwKFjBxo3+ zTo9Q@_Go#v0+fY1Euj*=bcrfhnrISU=-Ue$lxEO_TZ$$B{#0hJ$b-1J=vWXDNLW?Z zk#?EnbH(I6MJN8YV2H?Rj^7@x5mYPtH;gWHWoJ=ymqL-DYkG!%EQ6;+i@inYsS5g4 z{==z<^yg%~8m(VOP-i?+Hf_%^{_V+V&sL)oA-dac%k4X4-yx^TJP64A1!?p1R_;^n zVZXdDx%u~+!lEODf4s*n%8KzM6Mw_Sp3_xWsa4hO17~h7Nv|p4*{%@$s`Q|wwN!&t zfsW)v^(aPr0d>5Qprl?0NqMLoS7b*1Dxz@Id^k!%2lE4`cnLGx?pjzaJTE&Gl_FjP~TSWp~RO9TVhxoQ4O154-{ zfnGihKrQK|6Q3*mD3uq#bc%MvNcRCIrEl*<;2H0+L1q{N37jXA+G|j3S|_=Qx}=lR zQycc^`5#zLRJ$x6*D7K)d8-22n+H?L*hr~CIPo6?Ddcns968Mm)v*L>wb+jn>PfCw zU}>Xu!!^Wu{MAsV2rtL?bM?(GT&$p8iFX32QZAU+FxlvVt+f7Rg*KI-@I`+$!f)RY zn(48^+`Y-o#0`pvx80Sh=Nn&su--}p+D7sBK;&-lHT|YHvMMQ3@jFa~zg3kIHc_i9 z)eafEA{S!@TzZ}E#BYlZ!rq(5O!R^{iQ=UXNf@Wv9K`%Qe>N{8tT+EOJ?`f5^7xi$ zM7N7S22$dxM^1B8=8{%o^k6-+;_&BM-cty+4lIWtZ|qd?yfK!tAzoGyE(K!Nfev6D zFBgfitP5qvJq`S+UK@ktD}Wi*U5PKC=rw@vdYw`~t{>;e$>dAU z*jV8CmPNQ;;U(Gx(t*f$o3}~CNTzT9+Ifwk?Zr>mEgTO@tc-7=NckibSxdwu=HhCW zMd;h$V1y;_9?&&*NwaMg5+;QsfN65totW$%qydjQjg|gHQEhC8eS-^3JwP|3Ph~5i zN6;#drlVk_TF)@~#{B!+i_0vGx5}?>MvUSPMxCiX|AL0O=RKkc6JQFGIZA-cUirQe zTa#}(EMBPIr7xX=AH`#4jSgr%msr&(i^c9ePEyjDI)r~UHxEl5AMhCQy%stvsr@OY znZo-{gq%NxUK70Oyl`6KM z_19fKur=X=-t)Oi*S%|WLUR=)#FozkkA8dpE*6aJn6EpUDX%qojiMtLzvR{(x^lEV z=S;i%GC>aD+Y-mMcN;^QkDmlVceP8bX4BhpG{9UaLj$Mp4rD0O<6AeGzMYhwW>-#7 zVLZ+|T~an-wspiYJ^2!#c*$XCZ|i{OGO(o~r$Sie!Zp6^+9!?*o~DS`aIP4mxNdZK zwU<4+iT2Y+nuY!Sgx<<68LJ>i`gCjZH__sqZumpA6ugJ9zr=AuMG|poPVYbHeJhQ_5sV&Dbqh8Mf zow}y}LEX0b=;!!|hU1T+oKX;+9*fyXn!$sQd2s2qFl=DscNdEbzt9Hx?Uw2SaCjap zE1+?!KyvdoNwr~_5yEvvDw-G9^BnK`WL>=>4FR-e(LW&-MQA;Dgm(1|nh0x1zF=+u zmwU%X!6AU{cWC1*1r6^;nwFF&JnH^KiIBI#`0yhn<^G7SR%D*dc_@r?C_M|P1S9Yk z=t$r8oR4auyZlW9Vb!6c_lDqD1-Ls;(TS4=pQ+*;1>m*$__x2ubDN%BhGmF~tW~fD z`$o;Bzof2~MB4xjM*FotRc#j0R_;;|@)9Yl4T`O|Z2gX{IKsK%|`vgZo@qIRE^bPNsk(Bl5te94&!i>r}h zdgipk`Z9;FWCpqK`zL7$Vueb;nw1$nXEma1tHy&2+Hk7XGkZcLn2|X@(IbLGL85E# z`l|Ep)eY^o`t>&M@FL^OtAA_x#GBmSN3@DKlI@cFX!xo4aKGcX)KXi&_{S%uuW>`T z5Ll0DZ)Z`ccqt!9FCKs_{F){n?hx~&C{z0T-YQ%YlhV=-^})2k01IN?0jgTWUxgVR zD#VbPGvJ;OYTz&LLyYuP4vf%}0NVhM&;WhWdT{&kq zAHrs$hQq=Q?V`#!a0jl#cx#SHw)3h~AJ%wrkZ0;M<)Cl)&hwhxzIWf?!|)t4w9D^n zLA|g5YzAPF47(r=6?@N5-0MS&=$8Y%b<)k+PQDQMlqTZP1pawP%l+eTgv zI+>kB|L1-vLpKx?2zZBx@S7X@FFKXIE_j@kq({vrRUDi)1H(Qj6-B0(vD&RrCFpG@ zWtm_yZ;xRg#r^d|P9U(>OgK72s~HRHjSlC)0v`neU-vClka$z_)MNC82LrUiu#nF^ z-MS3og!MeuWdb0KPCFvkl*G5JTKy)eoD~42JXN*jZFW8+UErJKoeq24HrN${2bIEV z@6OMKofZeDr&Rm4`xUO6Ohmu5;k(;N45S|KUiMcrWrgv ztVoZ#bCS)wlD=3h+sFLlee-y~q=|j)L{;k_)D{N2$BFluY2e^UP|Jh^H7z+(Ui~eI zn>=V&F+QE`B%r7SFEx2d5Fc^2>6AZg*O*^HplQ(ixDar}1~IhRH_>IG|BHUMJjfZi zsyl@wlA)o`?ghN{kDR4nl6twmq_(fT^~bq?BJo3*#0}#xiOyiR-fK$xibS@@v>zj$ z;#KD$=O1j?HW1M-yH0}577A&>o|EN??mO`_=O4of9D<|upL3MoBXCmY&amaq6|K>tpgSRdx?o_ElfL`{zlqfkFt+XH~A;ejS`(rwR*81v> z;3`MJP1F__EcrF^0C@ZG4f4K+$*b6by1EQZ?in!bOMU$b2=n9o(-*f;}pF1cqUcfiAo%ebHW> zSTVC~j7?30gvnG0doD8-i8Wr5({ecK_m&eO854t>1IhLGub6>>%$3|o2&7sKVLcT4T}@VyY(O?Z zf5fvEB&BF-IZmL{Nw1P-$tQDSA`)A*zlF@~-n@ao)H3(*QPu5x@W55l8^JwfQO_5v z$hW#h6Bz!Cmrn#<-eK*cCOe1vW#*WOB~}2ZGFGr$aHAk|xE|IXwF?u(KxA||@iH~y zG!#wajN=@G!6t8+%}G!MssU-wPQkuv0XxJ{Fv017&9H*cA*C1hNRvS+tLg=)s}K$4 z0xac?^p|o$bXT2Lg-RC`Nq$EbMldP&7!jW<8n0k-X2u`ZLZ%JcF5)G~(9ywo0BXoKH-6n_az*1Xg-5(jtNHlQi2 z+XX@?3oz0_ppq5IR3ryi4QclGX!dZdUujx3(6qaH=E>;_p$LX2M?40?BHT?amlm0N z%1vy|q7EL|hBDIJ#;InFp2>sv1SP=pKGWNC@=99jiJN_1JN> zIjH?rhB6bCoi?z15|bHW+!#m|+@8tk#+`qqV;WaYQrC-tD}(Q@fh~5|M*H z2)7mDMAI%_+JR&e?h->>j?06_AeTA>dADdXA`y|(xk6l5$_oSSUtj6Ix+)b;8dM-| zBw#u5A_wY1wV}ePb3(}mKr)Sybl3BDA&Qu5BKV`f6h@R+ZHC6bc7d-G!YvG+2+8o{0?S2eE2RX33Ozz75lKda$8Q3GcPB^CQwlCP0lHjrWPrP!%sQEw>7mr zP4UWrb{^z1sbR#Hl)p*p03m zTu*(4DHH&N8YUcMvmN!IfL05qMwBNO%X4`CGEMMVh)jS>DykN68#~}ut(SCR4u<8_ z$x@&XcR09gW!w*&+0}0YykQ|hsYbYVZ!h8XB6b<81CI^#Q#erAa@PnwKY;2)h5 z6CiU`q|;-MBHV*;P{|Clk@#Vtxx6BNavuy(l(0^}o-#z`0TeCy3Z7-12m|Ac7%NqJ zLcy-c+QXn)?!W9tj@Xj>=;?c^TD^&XXPw#LT-0F6@(ZWhkIqt zf}9Bo;5~0VotG6;#rYmKd;#3879;nZkMJC+A^PJAVn2a8lOmaI>Qcs0%9B1wwojG- zF}84RC<;fD4Cdu{7iJq(XvW@C?bSZxI0<3?k~3G1==)4SEh{Jb6JT4AY&%!g^eD{# zaMipFlIX?29pJIe_=~YGgUdV`&LRvf1DkB+V}48T``^D7fB80R4QKV^3;tN1e?~*x zygLJ;Yq7*uHs||}HK|CB;(PxpB%P;S=cz%&DKxe6PXyOv?*)saI4$f9>yu5%2GncQ>x*c2{Orj3syN!{g+-Wet{V3jHzBx=fDr zGtTiG3%YOnX;o_`ioCH#KzV8|9n(Yu{^3iKMxlKunO_r!$hjQPudI1O8HKFEHMW-$ z&BJijf{+*Zy;(F=T4aVFG9)X4d@6eMFfe)wdAv+jS{?Yt?g~Si*>x5*yVrmHv4cN+ zBfVZ^8dQu2nbA=PeZ6-D*yyv_RS&!O_TbEkK`K$kV-HMDp3fo5*c$CL#$&NCbqXEe zVMcmVr_lCAaMA7urwKrT1bA+m`a;tP2j2ljiI7E~mU@8k+91{!?6rqy3D4CtiJQGU z@khi<`;eQx3(VuHN9;2M?7RZP!Eq1aJ&Y4KCLJ*X43d1cVM{PmHU{cXP(Su8C{pu^ z)!vXP=$+!pOH{H<5#@=aJiU;L=6=qJI{p~j`Jy+nD)upS?ci+;=&2|qNKU;r5A;No z0aB!TxwwM)<~bmS#Pn&7)g;-^{%h=-TOm+LWMIO5|D`og5|yLopi6sL>{bQ|nq#Ms zsSebY8Bubq4dqTIF2-Y^wAX1c7vK{T5ve*}?o78;TA<0R427EK-w8kb0^|f*aPGwx zF1-5;KE1%T_5>(h)y(~+8_bIxvk`ub`P}wQz1Q1aWA$lrkhk$qn^J=p$Jn*2ZPS#MI^jCNIWM)(3@@ ziW}BX##tyPb6%h~pHb*2F73HqpzQsBBK*_&xazufro{7P5*}L>xO}40ncXE|9llEx zsQXwkL7EmUqp=OI{sa`$p!m*YD0L^tO^`hkTID}|>~T(>3~z=O!*B#Xm%-9+I%8@L z2O;%(uKZ`x-R97>qko(!t^)o+QdJiQ(lz9qBnD^onJP%dR+y^; z(u27Dr!fKP<45GOq1}m7v1gSo0v_;8xWmXhJXyr&iJc?+d>c)7yLU!Dj$0Xl%%}xZ z2Qfm*YE8PceqzB$0`9C!;Vy^LSTA>1seD1$UbA3{Z(89eeD>vU;Yw)NBR9h#o+uWp!qphKOFOFxW$%%&2wKU7T!a6XpU5ciAC~HR3jigD34160j$KCL!6g! zImXj)=8VhE$=|_gAJ`(p3=&-tuj}Un7j)X-g3A7?TpkUFiPyOKUpjfu)m3}cjzr<+ zrwU}An&hyN0x;A$MUl49uo2<^(v4MWo{xe?eq)D#piESB4mVeK`P!v$~Pd;2CwCi!}D$VplsPPOx9?*aEhzrT8zb)$Z$r@(&JL?8I3eSq;4Zh6Kd z;4Ewn)%qWPR|S7O%HgSPRxl%bZsrkpW~_Nc0U%c@e2VV}<&*Bys_j7kyu)m$tj0Ip z%eQgw?9mp+8hQ}^IflbhUiUA+*y^!aUF|2I*JmvCa9^p{upSX`5}&uPa6QX^!LOiN z)1LSKvJr>+w1AfT{`YE*k9V97sTAIO-+aUw$3WLUu?@T$db>t|RrzEp6UU}~;I2r$ zFZ5&y`Bdk0Ci246Zz2&V@_=gJhKG7rPaiol4H*lpz|=Gjq;uW1bGM$8k7B26$+$O< z{ei6xs7{W_BCwXE9ixa!P^&v0o7pjGU~CKnntt1<#g(279(}$ zjM1q@f^sS14af5%+@Jv?#*YjD_U)?~)%tvr)%BG}kqB`E!E%`#oPmkp>?nAE8jhre zJDIHDQzpijlPSkDZ&!3uRQ5&aW?r0Mt;(SRlsPI$nhEpHnjTHkbyngi?hRFK+F=aW zc1-7w!|8_UYw5=F?O9+;3D^nE3iq4+l?OHy(hrU2&X&Hp(|bB#DXf&H(5Cl1Q)jht z+~Yt^l(qOwCZ#fPX;ezevLaLCj%>ylWP-q%T&Pb=KI*C3I}j#$$(wE^&IwBl)z=Q4 zW{vmSyuqPaRd8YmEXa^M?s%=7I)jZo&TSWwbtClZ?p4XTpt~vDf;4%+Bfqh?ncW2H zp+zFEF&makjHiaS)vGC?;}+%54D5*5NQh zTf!;LL?F)z6sjNv&koZWBf|67O$O06zAP|>9@sURa^u;C)zf-441*2W(rB@oE*Z0SFG+cS2xKjmnix|L;GpBxB!So{aTitrCrMev`b#QFJLxqOHB^rDDxi;h;tHkG82xw?df7aFEnvQ#~!Jpr1*w z-YGs3>^odDcWcJ0A_;Zw=mWT{*7{wNGq5V-=~D&XbOzxG(54-m)Yt*5Vw>SY!UG{IBdv1AVG=7m-Jno6tp zci{PJCl!*RQjCe4!X>g{p>|NR<0Vs`rR?}=#^{FO&2n!QRr4xBfBABK{L!H(J6a6a?o-bsSp2O!kWg0=)~qyl2PcG0%WP<7gb_Zc4$KG} z%x#GX@n|{ACBxkBy$QG-!S|Eqr3cKW)IV4*mR-fNS06n5IW+gG897vlyr1>@lHO=H zfCCjS&G3ck{`Y(_37vPI-nPhNVW66yYeGnQ#(Rrx`9;oSV;jOflZ()45#^nO#_0)oGzyL4@dz5d~-`YST#-rMZQcd;67)*sT=hDJ3ZE*T26!; zS}89ZS}y^k$GQ4u%G6iOJy@S3w6@EQk4fJG@pmE+8~oOl>)TSpEpWN5W+Y$4$x&oy zyv@^nO?KTbObQ%8Sjr5)*|ychOo3l|@5GSw3&eE!!^lWQ!7l1ag*#>-cz_|Bzt!XU z_uA3iEp3mM5zcG`Cn^hV+|;X1u6zq~d?J6?n5p!DRi4+ga1N1;3J;iYBk{X zlh^+mG%c&_G6^|KjNTID5m{M5J6udg|IPCVUlk)=cY zL6S~+{1ij0Uv4GN!p;Olcd3Y^skNV6;7B(VGr%dOWLGtgpbPVzj@SvhkrZGcy>BxR zuoQXJUzL#G&AM{7pZER-&(!x=F8rNLAConee#dr>;j1=VKe(8&2Aj!cEv`bO9%c76 zpAM4;*AgoRbCEzh`f>Et5U;Kx;6utZx-l~P%L#gKWH2x@-l`Un;o@0kkN&ullWDw< zy~iSwdF3aPDC+UT;x>n)$S17eba(#@NbXqGSR!1oq}t!%4w$VSu|;E;P-84Hyf8?5 z3{AV`kR#45)?&eqPp~t<8}B^>7XErp-k%@>Skv1qWpC)bZ!Vq&C(T4&Y(b~(yS>Ar zxwN3Wi*d1#f8_tL_wI2?*xZe2EO)w@l*c$ zg2=WmR;G0EQh8yfcu9eoqIm&ZWt0~vRPYK*QxsBC1VluBFVybsyY2n{{rUN4kL@wQ z*X3NE=e*8&9_GDMYwfg58p!vvOMMCn!m+?XImvwJ1}87DX`iky6H(pQStrbt22pC; z(ItT;quh2>CejfUni1Aq za$9p)TbTr!N)GWnb|vvC0w+%gTmT&Q4+>I1<8Y0bW<&+2+CzN0gHGVLRfd#XhWu~G z$|LOUbKxU%DDexAif^9h*VUA3F1Os!kt;rQys_9{Tw#g#4Hyh%nU`*$2YHI4#B!2V z`OUyI##)NRR5#1=s6{Faf*{vt3h(eO^*Tlli7TvlR|yo*knwx-j`W^PW>&>okf@!w zIwBf~3D20Z7$1@>P8SEfMzi2Ztx4~pSK9sSi>2@i*S<&MLsbiXPmhifq47o+!h)FF zMd7+sSb(VeW=fcnamUV%nnyyxtxs~0>7rUTdbEM*p781^=0o1YcflCYh=HDWNnVLU z_quyOdpI>?TL7VLs~4*F*F++xn)2x8HLW8TEkt4IRvHPg;qf7>@`{wO&JoFF=HXF= z;DW3d8EHZ1ql+cFIvBlUG5?GF`)^o0=EyVMSws2p&-}~GJfjoaeckBd?(-B0sjX)? zapUv#eA(C?FGC!*ms8#=kJiJ0&bWt&4Rk6Ca~&0jv_#=jke=vH|v8 zzdsAagCoD*R(W5)h$Va}@0IKLTy$+l1QTswqhxxPX<@(NB{r43HLpEkQ{OB+x?#l^;{xH70b)^lr`HbeS zAZ!BU>t`04T074$&vpw-4%gBm#2pqR@=G;pg!u2V zLgd?gNvm0r`=aXTf0Q@uhwT{IhoCQA&6aPpVtZ&oe=P28CsnXtZs?6|kPYJ4F=a+N zJwvi;W*i!H27lx@)$Hq^tb3orzB)TwWUmbDEO`!yhffXYD&&f%5K zXBdkzBY4amLtUb=n5a4b`gfnke?Af#d%)~G3ov(JzxL~hxfq1CIvPKE2(IJ(y=$gp zOi*r5FlJ-6mPJg4hxyU%LpY>vP5yIvG25CEnT{A1hq|B|2>giA&^j8C?>b40AqJyv zjI~lKAM5v;6MU1UmUukJKwIp$~G~m1@Lng-0%bw-gUp;sKLv2=H-Cg_lTmD4(Hj6NdT?wGH$fbc7t4qGZ zjJ#!RToZxmLx_E!J`lk}*iq?s25*6`-WV9tP}AUnGM7dxdwB;wCu$^!-ww@r8q`Hq zM!Z;z_b3)k@E_9bWU-0q15XeyTSiP#<;=M1#!AjH1;t3_-=pgjjq_cZ1m2IBZ^iFZ zlTfzXzt72}Bhl0@x_$@$+}9~Wzi^ZD1=aX?Ypv$$2S|u%CL$#3ez)l{sUqTW2XVbN zLKfY!jB`X&c3r?r+yCqiFM`xKYnda&5>~8e#X+pepW+q`Wl$Mo-WEq1B-f!)I^03= z%8(LJ=q5r2aJ3A%b`zKM7sY+3zv^TA`Re;kpWLTQHNGAx29hanOgS^#h9Q^4ue=f- z3pds8laXbNd%m@U2PM@{4nO(q5<{3=4krywHYGDDa21SO^GP2E6~F(i<-@zR22^oD zTJ-@7oRR_;$rLd^bc~899#(d@x1Qg=cLE&LJJk|Xb79igyiaG6IDVC-(AWxzOK={(#A?QB;3Gi)5$Rl=I>qJT`vHg zW$islLPrZMn#9CWMsL`JsPd~s;q!H65N5zYu^_kR5b7dt=sW||n{D(O)y)nb+2Y6< zrE^qwY~d;@a*|Lv5Rw9nNgn695XK#g3P_6}Dq@ax&4Kfmm8sgAkcKIl9Ue{FB9NLn z_74ts=Yh1zn?m{G=>cGWuK9qhsTm)o5}qEVpcZq4`cJ#p$j@YIt*PE+fg>+lEKb$3 zC}q~vtoV^0s}Vm(%3U6<;C7#@Rx&dgCRWn53<}b!-uwZ^p-XvBO31(^XL_^`6*7EB zNs(R8FNJqjR6cezq6SmZg=m*;q#%{`#=$5&mf~B(5Zc5moG4ryG|9d=%I9FZq0PTs z^pD(3OI}Iju2?r(rmUl#hU9Iqksk0pGRZyjNiCOiOO7(Kgdu9e0IgxZY#G}r_Hvn` z?VdSfV<<^O=FRoLHLQ)RHWORh+)9gS8=ue{r1l|ldKfKEuHHQv8;^WX-9{@^ACNP3)^gNZ5>=N+-gUP zR8?Mt_b-%PzXcz1P$Uad0mWTfVVaSHA_b5=v0@41y?_an1`Q<#VRUZD>Mqd7+siGv z2jonfM2W(R1v>QsCwlH`#YTI2Mqyg@_oxY03jdmP6T!EXG=L*X3Lg1x#WU+T#C5vm z2hGfVL^KDPt)p;Rp0K&fj%|6A8#2MaZ!~0R&mB?8Tz1eGkUT zWqKtxzr^lY=X?c+^EeJV8c-akN^p!%HFU;hwUZ;Cov$8j=L~+uZ?Bpx;YLr%h9NP! zh{77SJFuX}%c9S_>v=epnNj(8LnFtkljkM6>>p4AdcNDl^36bQ3G^M~oflT~>lOt3 zC-g%Q^p~bqKM!BL@K*3`M{|4_O^FOn#yfJ$IAr?cj=k!`^qF@_YM9&u)>Gm?e3yo( z_O@V_`V`30b*bPe86`j|IPYDu({vjB-xdZ7mDxYj{+L;j!mqSN~6;j`~Kfck-(cUct@`w3>8|Y z&F}ZkRRX@QzDM0R@9++wsuyv@k19rX-}qizKlTl^f?d?;MHckCp8@r(*Pe=%AXISJm*H_fc$9HK9RW>iHO z3mQ-)lH+Y*>DjpJQbWnmbu0XWlK{Npz`tN0b>U$f3)9+5g|nIP+e?zYJg&3iJw9b=R<00QleiPGm;oWTp4!T=+MOPJR6EEbxirO1cnI> zo*(vISB{IZMm(oqKc0>jO-yfIIIfA-vUq^WMd92hViKE%>#>u=ybjK!d<^M`6B`V| zD_IqjF-5LmQ(dx$_o$sQjd=onIAFlaCJ)y2SbA5D6AouG2RQ5+JepZ@z)S(&WJu8y zM1@|o)rC@cfvkNiGjv>5X}`ZSif*Rn?Xm|K^TLx<|= zIMM)hwO6RWn?RQX7z&7bEnZN~Mx?+MP5?SxfmNQr=md$1EJ~Zu4^r64qa%3n&rES0 z$rds@QAJ>MS4ud(zWJ#TCeZN}Qev7r$|cvRfPXNYpmgZs-w}(mfU6vgj-}EOnLVBV zNE!%LQuqW%H9=O@){GaDGn27~aFxsCh+Zvbtv!EwLRfY7(1X#3yW&0ND6FbtGBgGE zl!s+!7-3o8%f}889`;Bw5nVZkGn$?o?W0Mu4_Rq9Ku5}ZDDrVp2Isek#DEU9?Eq!V z%FwT)Rh5rJP>P9uj_e^z3W&%@NxS9kJxD`bIo~+3KaDapU}1p292-yejMRQW(p)t4n}DMPj#+gZQ!6 zYn4D~Iq1YHuh4&D1H)76R^?M!!)xq&rjt9|r|Yk>_}lh6>(#XWnYDP`V>;c?R9D=k zY4J^?(yRqGak-pFTX^fvNW~ClFs|B66DIVqeCXy2>!7nYwJ!_9&;&MpRTygkOH#NC zs^ImX|FgsDf3zz9<_mnHq5U$isalMeiNJQ?O1CbuURn-Ta&_G!I^D=rR|ZznaD#rx z9Xo8t_2*d~@RoSvs-mu?$sUq-JK!F8x@BYlt92J zjHCYpC5!Ag(8yZzsx(%VZ#Eq30bkrwzG=%fjMGUMdRNnh^AmQpbcm_00xZa~eWJa{ zGPW~P@TmK&VW#jYht{vNx@_2V;|-Z(iBcr9s_O}&eN7mM)~U!l>ZPZkONAsiQES9i zlG+wjr;%kB(CdcGC{hb@C5Kif4^=`t$e3!6u{l_Io+{iTuiiO*@85={cu&teVzshW zZ|sqSx*$GVm_qUy&@&5Q(&R4>z zGtV0<(zJ*S4UZblir}$A&%?jC4JuR?ZAHa^Q@Uf_fq$^nhF$r36<=Q&h0WAJea9Y; zcZNFw`pZ3j#rr!&h2`M&LKd{xu<7p*Jlp^1r%!NoaLjRgP&i5}pLt+^6gx31|IX(A z!VyX3fQ#SdCRS`jlj9*UWoM{&vC%*I)s@YlO#VuaY{N{Sdi%X2zn${olXrf*9JEPN z18@5MEz%or07T1fj-D$3mF^l$$jr;$6X^m~cZf>-`@&=JreeY2O}>1fPWEy(sLFby9#e8=3 zA8s-JjO&%~#u?%Gk$O9S0O58(ZsKa%c8sa2SM`xqO4Vr1g$~~IT)hvG_w;?dv)Dgr z&ak(`?wwj~mp}B*SRO=dm-|==Co*KZv%6Z}2+%fj_XyiZP%V+E#?}L3NOd`A}j|vqU>I(T?;q+k~9TeceU%B?m<4D2Q|G zlw2@7TGC=*6Y4oisp+R_ebx2b0(hp$e04><$QVq3xyG#XRr4^I*Nnz6PdtyfJbFEu zc?P_68l66ZS3iqRO_iR>0jaIx=9LiXp<5BvbYBeTNUB@><|`1sv7o}nqH^<3gTk^} zHWI}bZV_~rciK6E*pA(fO!TnO!Z=Uq>)mxhkKGUY}P>T2!XD->3B{vvvfi zOhFY`ssnJb9@n0$ZtZbBvuFO5%#ww-;BR%AGVL{rg_Xf$r#6VVZ79a*{2%8nr^F45HeR*ZyqT<7mg>dB+G%sdIl+}!*J zn852(!_z-8rSW{C0K%{*z2wA~ZRED^_=N^ow+@J)@WWSBhMvtU2Fx|G#^0*8dOHWg z_Gd%;-2#1Hf+B5g&yp24jlQ_xqh=Ug6m1`03 zR2SOD-fBJ@6oDW;J8pGQ%<{LE>GMo3Ks$&|-M%}r0UPAZ1H9~iErAHgs6ibEuq#99UeG75n%1R> zN117eqsXAr;~8+djNOxr#l{xV2kWu16HtK< zxt>cG_%Q7Pcod>}tcbFn4-?oc2q{GvC)=)d<3-a@)#V%g1CGEM2$@qUCGkOtCciX> z4?ojB&iU;Eb#XtaKXjCkB;V@GYp(2>FC+GTrQSl?^cE%`eKxJh&JvDFR5J|Z9Xua( zRg6QnoY+526avKb?r-IXZnm&~J>!qi^9=~4XCg60eh)s2C+t~^W>6!or0HTdCus1q z6c|3rhn-F#$alF?~F>ch|F$__BfEg#}fZjweezFNG+B7NUy)qD9=}l=#u$-QZHZHR3 zl`>9{H$Sccqzi)uwYv;vOgzqL=0)*<=B}w1#b6*Zcc;_x>N9o;^x3)(T5{s~qh05? ztIh$oLrz$@8>20L8;YauBIv;1+J0qp9jY-30ZtNW%Q7&V>}rmMM2hy32UUb!A;KlKv}FwbS4b7$?He(}M3 zhGfw*_spX9d~2s#*3d7`K!E0;Jiq3fH@P=KOAu-x!Z49`5=W`-h%iS1En-r8E>`9u zgcP=}@lKp^RT8hU;?|!FC@bStsuIk1O zN3z7_D!Uf5us(4wj%HN z%Y4OpPOpOui8$OaawC_FI8G3J~MD zB$h7}L|Fx(16&}Qy8wkx?Ax6dU_8v!8*O>xE`uMZ6Mo|jD2=bAM!!X+o>|SLXqz7m z33UMFx9XgJ_tq!whp*1&F*WbI$ zhmuY`Ia`RBa%{4n}oz?0V)oJoWw^?4-@!*tI5U} zo~|!1oLNYmwb4$KRq8!>t$-~t04-V`kRR7mIwh5*3@~c8AM({Qj8iJA1=EaXpsmYC zGhcULB%iSl_=DA7rfvaI2U>jMhO6~ny6cMa><3Fs*5%9eAKGN{mcDK0WdMV?A&hI= zgRasQM;-lYxQIIs`#I6|lJ!AWs;~(vM`+ZW&guy&@Q$O%>Ps$@507^We8eLnFD81_ zDzzPH)f7z-8l!8)cToR8g0!SY22pWmRu`mD>E6(MH=6Y=^&Tq-u;I$&x?<=M$|sS_ zq#`@fsv%-zBOZ>=rX&e3Go0{mcuU$evZU{BF>_r{`Nc!mp4^Iz#dTyD&)|j%@55KH zppg4pie*T--y&PYWqUx%Qclm8?X4G|L~A~=Y)3VsMD#^c8axpeXNMlj=0j!fs93w9 zKTK@0aWE!b*jg1LrS;+8Y19&cNJquP8mLVj3zWNkb#3}J)VhSb>VlQVSKPzTez@f) zGDd5d!q#Va3BDzJJ&ur5vt{Cb*~`mNW^Xb~nJ&S+zP1d?LP7rgrd1)O9^t`*TOowu!&(nM;1!+&Q3~s6tl^O6AQJt`CUBH>B=)DN& zBu(upq^2J6^5_M+RGzLOeVJb@jp{oT-DpGnTOG^Rasw@`c_5K4dru!22P>pQl0{`SxGg-+ z&dyo^D&7Ed_(O4V`5Nmb^ro%b|Jom zMt)t&4k|WmH@#LI0p!pR@w%wYaw{lec-M^v?86zDfxw)xp9CB5Xx>L-SK6fHD8?(S zG_3064ME9_!%mQ1gd@LL7Q^wbVO?iu?grfOh8;<=AI*mVh0Tj>s))xP<|93C^G%Ha z=4@lJ=^eK*^<2b+{ad)UwqO$U@L@r>3Gi9(ID}U_fqF5Nuf+-}(yj*}(m!I>YLSOe z)$1K)gK^StncK8}xPG@y2ruo4b=XZ*$fEBo$XqF!3Fw7pF!r>r0IuTtG8|`e7(rwk z(D%x^ef`o*MAwI91;67DUL?-qk0`Mi{!Q@yPU^P>Nm`^_WlP2AG+5-KSN#BRM z5eT0o63pZn1Xb+NA!8t3;Y5D;%mtR#<|{%1vh3vJ>^+;PoY*qHZtIc3Xc^bBq?FeV zwT+DhJ@)iWVON!zG+lK=(fza?Ih5gF%&$mgcP>ZpmOJvt0!8N7eSd5AY&YHP*8$V^ z^hkFO^~_vbmCeNS?6tTZy|@*$N&#yDH_`?4sS5y0`zcc?Z5^7^{YcrqrMtwJ zB#S;V8fqKMHtJeX!Uk;hTs7%30VU-wcI2A_#%zqkKRNPOZpMg7>3tNP*09@AEDvi z5wM$@o2F8u|JXwykCsTbQ)k(FX$=^e-|qJxK&h!4wy39XF9R+AZ1ko)zHZ3TndkFXc%5FiYucj(0e<1v9_4Ag!7FzH8GBr@u_kgfmc}v- zvPwPj9J40*wE{rF>fcs?Fm%XglaIcX;w1VHWQ@FW9#;%VMK`rnu|b5-+Yb{h3c_SM z))_a&qERivCUp9$!KPdO4=CV6PV`;3jReAWPK=XsgpIiBmd;leeo#=+< zYTcf$chIZ4R~RUAWe}hdzB=6x=nrPtP#cw#)B7!;RmQMy_ZlHtr=Ub zCzYW!44=`OtwYK8{fYXBu5w4z_8)}`><>mJDR@-NEGRi!}ebVJLv8Q_J=OGuF9!?*}&@6OU~ux4J4L?PyZ(BjbAUpuHl- z13-&D?^O8mP#YPel?HtWIE1B)Glc@LH+XdGJvaAY-G7`OLy!>KFegpJ2kPT>Zl#hh2{IP^_uOB zt$!flelcds4}Awokbn0Z-c#Tw3{63`?84DeZ0n=-`9kC<=KArap$td-=e@#gL6->t zE>z~}ktG3mB87phm*@_Aa}H!8>;K^n_GqZRCQNku#Pf=jYz;FMC;JCQj2$>~U8K+C zv1VoU8Zd-`F5U~r7Ug+2mEuY%GbjS|gG~ptIvMA99oYKs)%4X!|Gm=|y$Vbfe$E3N zw0j_4Lqv5F(XQFc`}WC2d%-4}&~Ty^oe#B5M~Y&=Y&$POKVNhMDr-v-cBvUTMUtv} z{?8*pnkFLZ^O*u`H4y`V1mPVCA4ZZA}QimKeM@{hSdVC}i z=!OV+wwp}s$=RwCIZ#m!239pw(hSK>{l2d_);{^fBY7$(U4Hz*_Y>I~7O1N{PUiiO zfa?4+)@rinMLHN>64cfQzev_{4#EJ6|$f%Va47b}^4j4^Fh&Rco^ z?gKVp1Qk|Ap2nMoZ{bZLWJBxi?+%mHM7!G85$0hHboNHq>x~-UXBw)wU~nMdzp{iT z4|>^BgV%HXCY05E7M3^2T`5XdPi~nu(mIaLi3nD=4ffA4!l)ZjXR=fI=f{YTrF%j? zQ{>Zad_Pmei2{&ILnp3Qb6tWtAJcmiQ2p6l1tgo~BEnF5mGVRN=a55GIxNdF4#M$k z=EKQsOj$3gT)nk_Y_|L^?{ozH(|I1Es7M&7X;uap5%&0nWD04@CA?TkSU9-u~*qDKwB724>6f_)^Lfe-N|S z++=^2d}L6`+aegh7=vR_E^g-+)&QuVA>s-Z-a_k+Fe->l(K|kHz#sEvY_I-e|HFS& zZPX4huUtZK7g^{OM;bo@2W{FlT$vtf!%V2mT+JYOli5hCo7&=JSw3n1%(K}$%hoY; zsuV?YPZqq^0{knmnc}8_=~Jr#z!Z5iIw0DWk+O*2xr6sq)^N@?cCu^(b4Od475LOY zT{tw|Rnzr_r}vW`Fu?6Yq;J2yWEEtIIr^79=F)R#HuPHJU_7AIh^mf=04rO$3=A|( z^k#;^vKhXlbVn4;E4k;QwHejp0}ExfsD>X}^>i_pdF?yk9^khkiPLFg)di^SrsDw+ z&Gn3^Ae0R7dxD81Ky<{NV!5l@<59&r80N)%+4bLL2W;oY`Thu;+D^KcuS$)%)|BipgzV&6tGd<&Ks=>P1h?whXh z8D;GU%4#pG?fG8%6o-E0r1{mdsa!qqaM@avB5H^oVDhjta$OpMLGdg27^oV(0r3_E zj`DxJ6%i6r!6o{Blg%h;uHyZZ=t|8ULwMiUCS;!D?ciDtPhNaHLq6c_UK34`+0JQ)JE)AABH8kPa5KYAV!hZl!(T|_qHv?y$_ar_T-!f^Zf{!WKKaDkeI|=v zqTjs-vfs0WM1Q~b)@=&j*N*`^bhvKYIzY?H{YVhZe znY3nCazVW`0Z}Q7N##cVPp1RFu~qe=e~pa$(>RqrG<87wc6gO~>Nv>oHSfanQSspy zR^zI|Us%b$dvioSac-hhgj&;V64N`n+KSIr>}^#e6CV+g^W(q`O)!s)ihlOrpO(qG z?vT*YV${{qS80gp5O@l-W^3JUAbP$i<`;8}#(q#hT7W|sCDFOEKoQ0ja8s9#Ztq>D zqwHLd8y;&9#^}iy!~GSFlp(5>?9n4cy9GIg$dNAon_&3k+J#&-*?xWqt^eG{gF*3o zm7IE$UEI>089rl0#$|>L4YZ0Sog&~Wu2>g)7kw#aa`m^Tb~XWqrXX!f;8iXnRN7Pv@CdWehhIjDxLeq8`V?4 z3{R_G<{oXY3gWTJIwXrYM4|hP#e~6}bp9-29~QMj*8ef9P*gAt2;+pGlUz!B=gIhdFy=T@*n? zGjr^RXLuWsS*Nf&tL$_zxL1dV6j8Oh$w7~6I)Jk*9-H3)G|?X^<^*6h&W5(yj6knH+wgbUW_D&z3#Pm)V*#dXParSO06fMFv*X zL|}~4(2>q$;ocH|JFU>b0$Jst@dzbYZ9heYi$q=2_yDQ3w4W1JCNmw(VCFL)vQ2eE zaoW_(0c|LlkD6ffc45-?7UmgO6&|b3-o_{81p8$ z7GMzr<1wIAg!kt#6{ZTf1|JeFE?h9@Mbbw*I6wAC_Qsfrj;Y6bn@kECsz5;o2$iF< zZf`EG*3%RH0a_q*ahRi$XFq3rv?p!m-3l-dzWZ+V){plW0cN@?Fu9QPT>aCR`LkS+ zlKT5O^zGT>C+<2=1#urVs(+-c->s~27k{@nGyS?y&RzI~|sQa=yq!TpJ?bF{S z|K=Jy@##LlIbX>SE7Et^1k$Br^j4&Mp;KUBob6nwdui>H(+|foMUjF0Il);@-9HcJ z1ZX6;AKByl-6FYLB%{Kl=Xyn%$l#!Au4&_f&-Ptf=$`POZ?k@0F}Hn_LQKU+Fs&6d zwm*>~^B<&2BW-k9`^%V~h(QVm#l3U#>lQo|E;{!V4yw=g!i0h(CA#o?6nKk~0qrdgfnj@7ggt>rhMFDOXh- zb}BH)>8M-r&F@z=|C2Y14wTAky;V)K-SR(|NwiOIV1=F)=$9lT4ez^$-W8mx@$!p0 z64j;TpeJgyXnm8DA*DNGQ~F z0U~A4ChQ`#>v>UKMILfJF#^Ku9j5Adi$<2F)ZsaeiCeT4!-vpxJy=h357LXZs{8ks zpa(NxKf(Xnc+}_FCuNB{4G#Lv-p8o74Ygc#apNGI_3<>JyqHwc0rrcwFigSt-&3PM*sG2w@ZJVL}g=r02PQrSb zLj4{Ym}1F5VV_wwwuPv?``QPQ#flJyFeT_A3^|H&*($S2zgcaI8f0~HTohloI2~Qj z*{qX25#9CNjjSjSG?7^id5uKtX6{%MuqgD9n*X7+fMv25Q~iru6`H={o5C$K4>xV| zc73`dU}Y%u1h6lq$tp`%tk3xNUb$kI#43uKLYd zr|`hp*#dnxxuTTUnVX20u1}U-pNzTX3lCXx^O(5=IxksvaDn`6>7cI(gSezBa+&NY~T`W+8|$I}VOQ@}VP4A#aHfYW1SQ9#Fn9ZOsd_}`WnwlrF=3Q}CYhoEp> z3WCSM|DxZ!TT8p+GC1rlm13(k6PPcHU!sVJLzAT7Z5swXZef*KkC7ej-2Gh>Zo*Ef zp#t+9dV26IRAd;WIy{FHB8U(i)w^7#{9z8+G+LPwY_m_;KFX>L2HR-{^{);RGH){< zH3Sp$%wxI;_>SKwa8$;KKlsD8uu~l=nv3Gj;DNzN+hBFSnZA4pr7CjXuHp@+rc1}t z8b4}OoQ$e%j&Hw7*$#Gh)|a_5NY&bUX=nRMG8>^|a$;dQ#B}yIiP0H1)6odtD3^5- zX9~+Rz~ZHbWVo69No(2ef$9rO6}7ym{fVh%*udu)3JmdL;sO}(>Px;_MoggMZ%Q(u z3JTW)SQ>pUy+6X4WvsirLVBtgvE`(W7w7lp%1NTk>sA_dWst{lDuZq9Uk3RvZ|Wx8 zB7L(#ytHZ9Vc=SJhs!+EB;Q))$rdN>rLyHW#IUB=-4R#Kat3t~)#N)$QdojOKO4$f zP33c4*xaYa@_j!2yP%GtVN!BMyNyynWY^jI1qz=vEQ z!}hnz4w5|D{a=&>Q=NtF3!n-;+EL+nvy&|Z1Mei2p4)Durfg_GFt9Y&ntM0Di|V`Z zg8R~$s*k|gSwCl#UfK=;lY;Hr9R}nC>-JAL$BtyFJZERIP7=mBPN0!nDFh34=#2q= zMMQ+VS@iDqq2kNWP7~-Zs0?3WiAzVS0A7<7VeQMwG9c^8+Ds+U1dQ(Ts-b`m9|<{4 z-I31;`Jp4ZuM_K+Mo>ua-~rpoUM(yqrzZ(YcTj&F`@L`(!-`)j@|`2==|?1VmMfgw zQai5vR|1u=@i$&X!--wR9U;;c2jn$PCkdb@w4I#9unLm3HXQziT_KI4b{T6fsM3bU zBx=ktcvz;lgZ{KfEKj?o4{>9iZ`W@ zA}V3~ZF#&$O5-w&a9nohZ0}fr>OM!OBlv~##g0B|Xxd-+D+y|$06tR-yJhaI7ddAh z1pHA9qK<6Jzk4E*1T{_B0SR7*e$%|pu5m)U3(sWzy16IrbMsD&cPgK{Q zojwx&=Z`?ub`v=^1)Dn;mUmE|V{(;xUN2Dr9jk)9mbJz1wX`j`FU%eMdX9xO**6E{ zFtuP-GvD;2iXaZm;Mq4;pTs4^fI$QmiFw`%(PIIsm2m@4P!*SnHZs+qv4XuviFc|A~{DegSoAb~nG z#O#q)3G{Kc<%o1%r|-G|JXo&#*#ovxlezANY3c6rPBj*tm^YNB(6uX1yCd~g0D8}c z7sz>7UN$N!D(N4KG7B=w$135`V_qs8R`kqOE-t0!DhySV)6*NOu|G@*w?zZ<1}|elE5%mNQ9NkB7JCPDUJn znGEi~m3GlVk(6sqE*2YMVPJV111Jovjb(Ofk-(t#=73|)RxabH{oP3 z52kLM!I>1z0X>uSC(HI4kloD`uDG42Dp*2GO$K6bC)T6#U_cdcH=BaJpeJJY08t7} z52NNu?Xflz^JHaHETQsoeyPiPA0ft9v^GQ7DtDrieRDX`fgfxvpuUw)#ribyz+)EL z`+l*nHZ8V2H@uGW%kF)G5W7 zHRg!(m^2CkekS}Xtf!dz%lQ;`MXoHR3?2Afa*}?{>aIy&tI(Xn>*rg94nk$SyM8** zo|iTZIu9N0;Fm@2F-}qLd_Q_HW-JSqjQ#lSzuKFdji!kLNh)CIp%BEf4>5 z%YTqG?>l1ccg@izb6aY_s(tMCv#j?q-yl$x@%=9cwyWJ%>DeWT02Pcb1OJgr*u6-7 z53`q}Vl=bwe*=^wn`KQHY4@cQs}iB8MkxW~u%C0}g|~WD)&~-k%OFlv#=Zel;Cl6> zI!GOt3Mt}{7@7yl2|^=vB1D=*#CAT?u$x9WrKB@T0Eqs=`TvC|1`aA{qOqV})jD)G zo~HUGPNr&5Z4Sl`5@cSeTOyAuNBv$0MO4qFKW{kYz;}h^Rl_V6ND#*t-irA;z$U@0 z7K5QW4YwU1d~3t3XQzGRW_L>o=XaiZCefWUw@yVXj9}=QPsT1LyBp~o_@M(#mVx$m z0-Nv<=F2NkEZ~?7?vU~$IBvdlqZTJyu=idJ+vz3&3?9M+c4sc(cJv67RTvLpr*+sw z)_KnQvmA#OC+lutioswfi`0F>%0^=;OwT6mvJJUv8-;YZAb8vH$7t(b@di-5Iyx3x zA9}K-=2x?vJr52)QTW?C**i&ahhsBNXfgpKLOnMDiWQGlLo>bQ4S~smH`qGbj zi*H8Ecl4DVFphAJC{aTWA}1iTr$P33@Q06wtRVIP$(Z=1r%@T;-^{>2W7N)pHn#;uodg zpOiDrMd9h(AW3x&HyLf##Uq4;wdu!h3Nq?Lwes*ev z58xSn(7fRUBw@gOuKNa#^rhH9C4<1kuw-Fml+*mWB2LBZEQhze&(?|`RS)XAmE-$> za%W3W0g9{3mmV_!y7SO_4`RuY03=!UXXu?(+Wgf{)p0w8fcHY}ck0Z6auf?Wm6}>q z)lu(yUsBHtW*Dd0bP(W3D;QR)$cPqcQitO23!Y_{DvdLGOnS%vLiFoaB;_CqO@AvX zMJ^bbx7&8WyKmdP6@(tWj)P2LbMDYpBz6*+hU5m=K3#OBx2M+GwuSg1iJYyK1LN%0*%#;Ps(k(w}hECwiYzPIi zR4H&PD{S)+#r@+8SjAr#ra4<2{n`JLbUj5KmbN?j;G8)za-~^M>~^U|t~$(pcA&TY z0f*ZHf~p)}y@O^MgorL&&~(sxlD=KwkI&om*TIL6<8=Ae zEn*AGT3P+Uv)@5ana3?XYhiWDmE2Ls{CSPfn%5yNFU&9M$3)of*~mB&i{?Txo}HCr zW-^!XI=MqEexfQ~J;$)_v&pFHrB3sgoa!Fm@`$drqtKZ2H z)ojI5#E`#B%Nig72}|*;z3Eg$^ty|5sMO$18}juzA`2?a0_2&5-$@CK%6MOTE6+ zpzC#@BmM0`L*1&0ig+d6NawR_RWEG@6^C>_yRNJ>iL(Wc=(<9yH@!{~Pyf;?xa!Zb z$Il}UotU^CWciRZwfTZ{c-NX`Ra2=Vr>i2>ZxwVWh4r=`RA!hCokU-r5yfnHT1n~^ z368G2KJh}6#x&<(7!pW_wpf?1jjr*9oubvPXyz#Ix>zTbZC{xD*BG&^^TsI)Uy1hI zWAmY-f@{Ad5HmfPEOV#iL?kJ3>6jz^>BPKUR0dFmG{%Z=ygU74N0lxdmE_1&Mr`b0 z+Oi*@$c6rl&CSGl+b+I;5CEWHopBnHdgGH4*lBp8*~!@ZLbj8?Jv};5wB46n!4@`} zrgM6twYA9;ZN_{~*w``!$adLt!t#T+Y#7|XfpatXKLo@9nb&AqSe@`N9rAosEMeXQ`-GfK+?jEZdiFC`u~#xj#{rACIQrvioxGL`Mxp;edhQ09 zj|JNt8Wt4C3f*c06v9vjZ--q8UUka#L})LMu~8|yK9z8biEoq#xdzl^Chx?jcrE>6 zneRdel*uXA%9$)&YuWbiT`PAmS3ELZy1dc=faPDD1ks`t(b@2#eO+I0FU`(6Teew! zc^s@Z7`EFU_*SefxX`jQ-wCU)&Y0a<6Lb^!W)a%^noarbpH+DxnkuxWxF=cN*ime~ zWZqc+aNjn*1j@HMP!=%%aZ-(oB7xE(7Wi1?6>N-V48-I*fP1d9$|NBMSTYfmAcI;+ z6wX&j7SnrbkWMOJtWup#nM&N2w>F3TWWjO$BdL_RA8~P~!9e4@T>~>w=8qk$&G&t0 zXK`I)tM*nqujO2rFT{wJojy{)UeWwy?VOt+dObD09q)Ixqcd+;&`1dopa4h8?8RU2 zIO&>IZHxHXzs5K2^S)|a!OrXh%dDYc>BLOVvO#=(?0!7Rq_rnx?3at#@?775SR#H7 z$Onb*UV8f1cth2}lP^R3{Z(eAQ&|WaB{z0?v-zA8xwCKA@$!@?4}jN1tgI3LUJ++m z=@RibbF$6~WAZtsFvVCov?927Ji&)|b3%S1!*O7H-oeBjeeE_KN?}LkL%*~USovsd zoMLr`>}4$YF-)@KRD3hls_kIb0cOr$NdMlewnh=YPouq3^`~{PzKt zN8nEjj~`o%2cFa3>-g~>2+fE;CeyQiV;O(G$PXW0l(j$Zblv2A3N)-py>lH}c~)2~ zQF!9~r`N)D31J(F`19dLjpWYkfy$JPp?c}rz!_6%9%~0Sn9QA}gb6`XwUI0DDNC_J4V0mdM&@7QRb zS1dCB z;G2X$7DKZ~bsc-f8%3tdE>{aKlyVl_`|{IJP;L1m$m<6iObhnIuP2FtMJmqns1KbdI20sa5adb$s#G=<9nK*hwAsGt<#dNY*T<^63O3GGgTb?YY z4s&~yR$V)d09wnoHoR<2_aT9DvuHa_LLo4;hrX4#y6Ki?ct z3-Su{eV&_`sLvvqoO2NIwIGMn5%T!b-ITL|(hfm1(H(^&JSz#Dzk(P|*4ehLf9t&N zC8wdAfYkzD8pxfN5hoxck8%Z*A;+femZQ^r*yy3XHKhf!B#yHeT)DqTW;JJFAB?>J z?Cid4$ONcJ-PzqRxy_1%MErP@KI~G#^RjV7irNbB^!*eP@35CeDuux5B-8zOuy62Y zAu>89L+dF6!zCU`zJoP1Sn3q|zgtH5|JZx;xFplPahRFT)SNRe&6zVT=F&7Zt!8Fc zs7U3ssHIMtxm!79N@|5W0(G3Urb$}daw8{^T+kdva{+9eWOAcW!Q3znNl|b^Lfg zZLVe&W4aIi==Sw4xdx!(uNVQ|_Ra&8(*$;>&7YlZ4v;GAOer6w+v}XXAgSSOp|RLI z#!9YzmCEtodBJaaAnkoI<|V6m9E)F*?pcAiv4^5t1Az8Az8Xa*cqDHVIZiLz5pCSZ z+v9JlN~_a-mVs)E7K0huesK(ORA#HhTRwvhRBUUe`IB*mm?OpM7V(wbvq3}VQsMZk zT+WOV?&9+`Z91c#PL7jy%W(+47cp96vS0AjI~Uos)}_>6TCB{h`s~7iDo1B>FSP4? zCSZ_yOTGij%1rz}gwcvR?m)YiX=y{d<_pgkqK#jKXp<5LbqJu61RpIuhd`s7;mMptKGc(>69v+t?dm68yNdIavngO)7djD2}X@ ze2Dr7lD@hViJ0E`r)MiNg}vsVK0A&83^`ioThhVbUK?5cF;G->W+T9S%B(xx@P}Er zZPgLVega#LpC#UqQ@)EA6-mJj);)lo zN=0lzqWSuEe#O4Z8kcgofc5rt9Xxk3!=hp$B3`bQgl)AZ@Am3~bInKbfkWo@eDl_6 zX4~vh5GS)wsR>y>bW7{N(`QL|xy4MyW4|rTy|W8EHaQR;J|07;U(u(_w$3x9T$Ay| z^}QJPIi&x0stnZ0gcIeKg&TtKZt&n}TAhdZYd}p$h2{;Fxqi*7AFrA=uvB*V@AaD8 zj4pnSqf8oNit{du$h*&pqK?v1D#0wEE(_w%bJ$MlPQ-_D>-hpVRg^`M@F{XflI%? zIaK^-?rSf#!14gp@`$<5#pBzUG2Kjv{I9IS=3kZSU68aG1X37TBh>xjNL&}3`l}69C`P;L&sG*%F%+uH<$iAuaBpB+~%Qgp9-M0 z1Qo&S%;G83RD6@Epd&v_2{;(qI7-)9y6=}aS($nhZqcfW7O7XP0YzFirCco{_y~Hd zcnR2B9J%+bj5;iPRbFIssUgbI05eosUL?*Y2@qjL0Yp!YHx*>idJ9(lpl5Mz?H1#8 z)-&@ros$Dly z5w1a)lF&$Bn(DJJ>ULndrt>B4`oUeZtKr?@0eqTN?+m<9zd|2L6PZ%2$s}!}FpY?1 zP4l(E#hi#zK4$>E=L_fYJ>rj-xK&U!rgKrFslzb?Kko zkL!$Z03J!_TX4RWGiCn|W!M6E0$gRj%+$`6fRXSOt0!IGG?ni+z`>kXxh;uDdJ%9r9mG1@S+rIRS zS|0c|ht&xz4SSBqxA?rL8Jyd8n@7=Zogs`e7Tiwoi*KjE!(LpR7-!55w?V{5#np@C zp}`d5ow}fuGR}jR(}*IT9+U?zd_s;<6=H)N!ThpFQmFV?d(|F-F)d*|rB|df(ox~3 zFDW`s^158YVBgy(;Wxgrt5%Ji{(4w(2VXQRiPYt!%w5-zQBpa5Iqj_ISNUeL&dOun zK!g8Nz1QU0wi1DL79jq$VIf>U^QS_JL#CrcJ94r0Ol-d+zOn!vI&z?Erjw{ZKO$DYm1E z!KSs(HtAGkcC{h0wg^lUmGl!9@f(bzn*CVgkNuF+N^ZG6<3hEsth7>)6I2<;=6-)DKy3 z10HT9-W-r~aLIQN3(n`9nK|Am@FGpF-Xh2ZBsEM+YIv}3wbl`NaXa8Nx7e7iq^pDp zTx+>ad>4`30kgFz=1hZ{)C}NP+A3{m!#b4*Wb$5gVA5Ojm_#&-+<+U3bSC+Y?veVV zhHD74TmUW-)1J914KvDSd3QS8b!7nuRsqd)fwtIf>!bj8QTMBl&`I0}-wG`Z&H7Ap zrTgsRy{Uk(Oc9`-OM(DG59wNb`o3S_5PW%PAVOSuT-f_oM#sdIqj*d?wWZ>SJg4On zmN`ZLhN}@&wa$qv+(8?opp<#EdGaCJ0Gpb*8?_el)!CagyTeOB}iFo+~5%sNhK^LXQv zgKcqd?0y|T)L`zWM_0x>YO+>mpb9^ri7y**)G;r(!Zq;^L(SEpt1#WYYZ(z?&AP$^~ zif)Xhxqxmbrv}ob$D^9dcy9J01&JtYD`~YxgMzG7)>Z>@*&8=MV&i5hVZHY*U}J|X z>X75htbb7p;3SWm-8a=|Z@G6zu5_I|nY3hyUo^m@>_l+vQKP4S^5JGPA2+c*ib0dD z?d~j0Q7G`R94D~KfesXCjAqI7xe-Uj;%bz5>=kLsytXxI#R+ia!hAIs9WvSmWdON= zL?PApA8C@aBwKX81-S;Hc^)=8DLzg-JvaBu!IXiVh7yc+PZ{LE+eRZC7f#xD=^7hU z*^&m5Jp?Yw?W4|iT@`@_1&Pz{>^3ZQ%FvFv%3>OqVW~w0Q}|GrVOr~?;SuI0s_1DQ z$Xr6n6K?0|Le^C=bOe1fu7Spv!PIFr>Cc(cR=%p(X0U2it2_gvL;7jxQVZp)M8mpO zv?3d!l|UzJ8->KA4tX!`H~grWD9q+i?_*j7Pw0Nq-#oZ{k1nGtdb4_cLv}awK)#&> zczekBM-Yx52ksH&y-ze*bZympmi`P-uF8e-OaXT6>x0ZZ8SuPzU!KI;lY-SQkM^P2 z348*vI0{SAChf5Z@5DWlz)7RbG(Fg`>Dv>5nIXbXBTki%eZsvoGcD?y$zTIo!V!*oN;fdb>lJ^*#s!UADAWVGoYj+xRvurCofMpE))KDpJ7itOv~uvtYa zb1cXkpn6Pt06Q-_v8A$2ZzfgmwynG==)%79fv~d|7{>vnar}EGOEap-Km+HI?h`LxEt!c`xxog6j2?v6(#XJ5oM4;}>o6i;-uO zZ`10;j*uoe3l6F31az?WovBszZ^H~c1eH^UsreEQP>kcxmo#G1W_8@x(OO(6T=At4 zt<2e1D=|P92F?llao?zWSVNZ6JL%dbU%ndM>4lBIahut?T-ybngL% z?+Il(;eWZJPUlN>9d%R`sD1#TSQ#KSLO+z~H|@>!sneGyeRY*{k?Df+z6Z>|JQCXY zn08DKHZH+nYLlEakd#~GD=Y<$oTO=^VMHw(EJAAE8L=q{e-zPDnmBD_*Jb1kCx*^J z04u`>3=7UMz^?+{CwF%Nf3~ez74_;P&H>pvz*Tcwf<)`oNH5h7g>as^bor0-tUb(R z0UEC`9VAdRS7e*7UlB$mZlMjj2v)UiR>PrGN;6%AgY+^%oInPr`@}S0KcpDo>R>Jk z!c?*C&FihmBamL5_nux&I)NECnR_1ZtK!92nS6RL=gTeyv-`Z7_$@Y(u!Fn9M)IO{ zDO!NtCm3h?1qAjzG%qXr-tBt^r}kJUj^R9Q5J)2fNtx&&WoMuwhm~lQcgkg*0AgLV zzMlgH^~)Q7lTlxf%ih3xvTr$fclSL2fiDzX)hnc8jHlRxTfcoDE}y!hVI^YOpObfo z1qTq@QhRTXV00{g8p5{tE&22y6drChPACVUgx0N1gVgu_f*J4&JYvy$5A`t zk;>hh7@ym*Kk0;kfmrrwLY{f3lbw7U#k_Cl3D)V`b$~J0+Y3&np%Vfa*?vnAK$f9J zlN`aFa!0C2V2=Mj*5LIkUX&4BAo~D3AKw@BKot!a-ye`iGLvpjr6gY~sdorf6gpEoZ#I*>j$`LKp z#Ay>_S!GaWJglbZkr;_6%ye`JrQwyp4@D3D^fU)9t!Vv9uy5K=w zYOla*_Z6p{QO{eDK)9Nq76^5Dbvw}jz+xhoBX_s4h|lvtM$ndq*^zPOl*tiAr8c!C zO2pzrsDS7hs2^KC9?X>H3^X|u-2aZB> zPm-VHLV>{igaw(M!3qg{@%5+~r^AWWEkA!jb-dHM-Qp1Ibf$uh(mqBzN03^P4eJg% zbIbjQ%H5%vQl}LFrqd@_>yaM7Oe?kzJgq z=_h-RBSa}c?C0|RO;~g5bY7X*sgr(~ zGMNRs7!m;nlt@->_n0s%aYeJBlQ)>0OiM6xg)=LWd+vr#1IasEQcSNv!psv_=<|+K zp8l|Iv^1RZ$=tKMpQwdIh&Pi2;LsA)0yzq!GtffZThpIj3N6Z7N|}%C?Fquiz9`ta zi%qEM8iCy+Ph`^W1ZGZl6C2e)_?a2ObRJe~L0~?;&~VwkUu%zxlXcVtF+)fOf?>`3 zDX>FNzFGQUsm2U|J-vHPfoij;@LRexA7f`qG?Kx&-PG;(Q_o?0 zQFe)6B2Z@+KwurdJn(r~x-((_qcU-&=L9+#>>nNE$t_O0y2&=ZK&kqzTKworm5gVROuej878W1cgP}^?GFw)IK*xiEYZna2Ju<5As z_DMZE!EukvAtXiopcWdW9|M?h!W_EDY`RT=+>qMRwbq5V-5knPcr0v8KW=iP41k7L&VkyDzXcF0C5yO_B{$nfEs}K0$ALCHVugu`up%w`oIvm_Z;4YZ zh#@Jz&IM*boF+*p^5@}S@62Srpj`uRazUMinXLs1iIrMfdo3s0$ukN17HxM8dsk1JFk8QCP27^{e=nTni)_*;`k`lO~T8 zJEviIxmZ?Do1_~EM+-@3InFiL6_-v3xTZIigb!w{k!`gjYr2MEX^)= z*0?BP7ptRn4@mT`=z#={aU(Ox3k+ESggv!igeC8P^!2iN)X%#%2|U+HE&gwh-<3rk6XCu<`6^L|~@0jRo@m+vGrNrgL|K zZvIyBY*Ra0(P+t%@9cT;>%%}=xmWjatdp5?blZGgiiLI)?o=k z%W%iSFuX5H6C!}sjt-lD+Kzz*-d^!J6W<2e0#52sDa^4gKh|j@;09_-z}e=T6urHj z6IWbOPjV8c;fFn~ia|_}w8RKR!u+J?g@zu{z7<75oz{Q>Y7W?$yA}ckvYZp~qWftE z!~v9SFk9MKQJI?W)s-i{R;OiSTjd6wtt$wZff(w1D4Uvr+8>STc(C_eY7Q{lj#6GH zWC^)11**HMHVFAaXJ&P5o@`opcRoi6ut(X#n>h1O| zlPfgo$uL+jH!s!+r4jx!g!DyT*LTC??DG6pSX(V*<+tk{*yJ7fT?}d*Cq8|ZbWA(I z*x;9;_7$wHnTG?0{P#+F+17s27p782A@1lRNn?g_M*wP%5-1XbT~hhRQOm?~M(B{0 zJ+nI1SfkXc{>g+Pc|! z?BG1FUm)-o7A(J|-X4PX0W1L+NB*xSv$kgXIg#52N`o>%LkxI1C(P%v38eO9q6qTQ z%}MGw80gwnplL@b@vjJ#2aY2dXGme*fYhi z-Cx6wN(A{DN0_TE^t;b%WA)JnMPbXg&^S&&_bmg6uIz}`I*_?@EbIn5Dw(7ALuaw1 zw8Xdb zinDd&_DtYVDS-6 zJqYgjk+BJRVOi*6d*EBQ;`#s@FqlMNI?7KgD{>FG$vwvYf(23k^H?W?cVMA%_Wk~V z+b>lN)&zHkIH2{b`*4(}aU97J02CH?HF|^!a{Dte-2}DrpVIxAv-AW(+8uaH9x{yh zGJug6?LCfCbpbI&Ia} zuz>G@+nQau^0$f=Z;2?UQ~9C6AOgPj2#A2+n6wjoXEBJ*sB)B69K-usOe+I=?ghwQ zgsTLGKcu07;o3Fn6A0L_@OZhsJcaY5oSfWJ8akqgx*(u$+>i&%zb8+=SelK?H|+mj z!vh+dU!tzgw*9Ply7@Daz+w<_OxrvMJJ@Cs%zB-`5o%Q4;xf^_1s@1TyK!o6jybHD zMdMpq>CVRtEJx8tL6i#4{rSSZ>CC{El2#n00R#NVQHHz28Il=Mq@BiF41-mGRNCR= z8jP5$doM9H^5+D-A1oHQ!Abg$9waR1d|=1ftsn+fsV&j}tGkBRp0+^wj zO`RO29ieX+5{yWArN?pi0}+|m51ggMu1>&kyt!sOM z5G#V}{$`TXOH=8aZnhB&8a5Tb)GP0Z817^Km7g)VO+-X1$=muvw2c66`vubCA&Z#o z6`##Pzy*_O`5I9te-ic+Xu?>XCl7gUPK-v%*-CUETq5^-1ZZQIr02VBmGvZGg{7ii zR;^6V8gTRDK7=&EE$Xy&DA3{;>y)Bl53h~(LF_?OH&^=w_5AYTrX@~GfLm_6H|q9- z(4w!>IJLGQo*?q5TS7v67u1zU`V3jwF?HOJ_K?q}PYFoQOD#R5XFILUC9)i>!)99j ziww+bFVG&L+wuC&XM;Chv(Hg;MwoQy`>JXIWs96Rd zZ7bL3R52Q&rq8#Sr|%5=X+ny|yDlBti%yHqEJ67yV?>g-)J>-sL$t8E5*eZ z_Ut-I_~)A>cL2BbJo0Oi6ya#+54p z>X3V#(bmij#V;*-(|F#@M1V#&CiSZ~R)jaN@7V{b6?(Bw1%Q323Cqo>&ErpuT%XTI z?EMH4vjyhPK(0IW761=DTNkcFKr*9>H^P=pO*C3j!U|} zcw4d^6UsEfbm;Mr@xYyd!s<$9UTwYAqf5?`J8ykQLr^y8xU;9eTFSmQI`lpneYq@q z_fEAb9q?K6>O&rWm*z6K-aD6@>WADy8?4b=6+LGRKh1zbx6Pm)$Dwj5T9i`fV9*Id zT|kF+7W5AErdF*4n#VEitp*t1W`$M?E*R)x zG`nRtux{0*)wH5*;YOa1dOf2ueQpZl>$M=S(%1Ty%S}0(xjk#!=72HHiSij(+vb5> zSog4a)E=5w=lk3o)?I^s^1HH4TF#LfVSLan_kzJw5?q_3<}9gAUwd-hhzo{gnDr!xTINMGt;0v4N2|AI|}|Kc&8fF()&< zDR0DNppWGiJyV~#EU_VqJSr%Eql8PSb`&9Sf1j`>gk~rb-~zbxKxasP9?IJ%LWI~Y z(3e^c3f38b%*BLC->Bkv?)o5ZAfBff#EIhN29sviVMe*w^#SdWo*DF-*Q_Ch0g3+g zEzDtRDz4Bp;0_kmak?qbW;xm;nkLX$o+I^~MhqfIS8rS{G+(xJ5Tx|iTTA^Ce5c`M zZ>*7nhF-&|tel$osa}tI(ZkRTAL(;bZiom)oPFhRBH^BBva&&T%X`vqt6wxOp?KL&<7eN*`aKOT2_2QGBUipRB@%K4(7Juw=KPJjt zVBWC14o>#d5~NQbOA2beq|P;Sw;&&nA_mASlZ3Yu()Fs7?E|sl0rI@T?$1bhtdQr5 ziz8vF7>cLBA`GMh4W!B=S|qD*Eo^08o-kuNASvOyG&%atv~dzTV-oJzgGaM$?p7`K zA81CoJ@H*pqI+vI{$#mvCi<#8^|Eze`FXQlwwodM_4dW;ysZ!1jaW;T+0>B>>^- zC65bGvA)LX`_YqBVAvy#ZPSMozB6QhlML~zWpyxF#-Pgz1$m(8XY_ZLOBN;kpY`|OGnz(iOm`)qP+v%yU276S~hI6#s{ zMVaC*r)FccE_KR+dpSdaUL#L{OMHALJ9P}u2}65Hm+1lWx(2GfoS60%2yHBuaYtJ~ zIrX+x$@BIBtQ@46INZDiC|Df`B z9rlh&j>h&@daD5GKq({z3vpf3kyE5n98( ziz%-Ox2QMGobXwn=ebzX)EWS?oNd|E_Zi9`4rvuoz=}w}xY4;U4!OrL25akI?D+IM zJArA%FN$-~s|qBQ1aDrJ znBRu7Y#Y!^@zXbyc1Y8Ku$@Rds|xk9wZQ!Qiz@l$JY$#Es-;HCPiS%_AZ#J;GyFKk)VUHvf z%45G9YP_JRLjopa#Tuy7fhJ%B+g`sGSS%p4bq+|n`8s5P>VUogh}6}kFxV9eNW^bF z19cTK*5nM;KC1!Qe2|<1V*_0O?R|pVa^7nM@PCOx(6@!Y*Wu&;67`|mf)~3y2#0n- zTORUQaAkDl15cgtZm9Sj3gcS$!n`010=K>2Q3a7%4?NiGz3t~6KQK=y`unUNd`unw zW3IV!GYXi!har&IR#D!y@+c&?Lim=0`Y#wbblZEqA0(l(>T2JYWNyRqHlj*`-weR3R0R84z0jFC~13|E+`%X&vC){|(kUC|TQv+zMd}J7}?F z|9>Xo|NFuV(i}inv;W16;_LsdjN<Iow- zwCtZ5H-8{t;Ah)KM&FG|)M8!IAW(RR$cIev>P95VV0 zeC&Pe8(N{78^bJYq16qGpb6y8DNqLMw%h;0^ntxy4?R4;dI|Wjqc<=el_mTB;CF&n z1DP$b2lu9U+AVn^9q?JJFyI*Z*E@z?abV;3>?K!Og3pe%sS`>+STLkFN`V3k*6ayf z9seWN{JzNhuNDkh1w@V64e75SnbO{e2~qii(p)1x2`dK^sZYR*6u%+41&nptrS7Z&ipg&>2)`orw z?QI<9krn!L6+j$taZ=)d`z>M_^nY-H=BLoFLKAIxvj$nAKTm1Kyu10n+adJoe-=D{ z^8bw>JbwJaBN$9IW&7=Zm;a#0fAIM~9OFMM?mt3AqoP4F>pxNtf&#eyqgOy$J9PaI zK?zN16{M7ce*p#99`Ve$BNx9E=xZ%&7qzt=#@cYrXKdN+pv@O}cgy?lZeI?}8R!`~ zD3BEhgJqmp)ag!nc&%MKe=`@PNx@)y3l^2p+OjA6fO(^SzX)_s16lp9i%RL0fJ;}e zXx?1!4MKZh+;!o5%4uu=#~;ld~8@ z{b~NmL!BTD2ZQ-1Eh??4D^`B0w)P>D^}o@E8_vMyH{2QMD_0<8KKh|)ffXSNe*n2Y zt@mJ~#zHQL7=!6q@r&?Qzg!!p-Z1y8#WPgDez8Xl#IPfaU{b&CwMTt02LE|HtoDBf za0HTROJ7Ca*EeGw%;#PRCXuk*UPloTi28ns2KzUMJQO_Mu+g|WZMCoh%qbo zz^(*ArZ+6*i=}tAdmxM#UmrD9zdku#j@z2ZgZR(Fp$+~&e?8HGws*?%*9YwU_LRl7c34An^4K7RD6KdE zTH7sr9#$(+o4{RvU7GBgN7r9`JxufZ#B|gjw(Yf2{V2rgi+L!?*>^X(PNffXPZ*jcCz7>?fZK+w<-lt_amfTEy&X{NoJ<>Km$lxJq{G zSv-io0QEs|`J+*cFB?K)7il7Eg_`$af9-2s)872<8(>*Fn%8@SRUaZpBq$>Qk|^vi zOPmPpjKy6?dT3scw*2D_Y3es57WzNr!`PIXPK76MOWJzZen1me-e41_upx>u z76M2qZw%;Ozi?mQPVh9~!Z~mTlEwvKtiZzM6$enq&DY}yG)O5(?eNsRb)^y7ybx4s zfrxhz1x#;QxaHz%^%n(vwdlH?e``C$5(-U>*W=`w#ZZY^`F!5WAK(9AKFPv!VJW8p zX>n5iFz06B?ZpLsFji;b?b@>dGQc0DP9}aou&w#PI|zI@k!IoUcYFG9H`C9@3uHH03c7zpv9h+bIt4!^>uJPCNs+3y$?B8%(^3Y0=#`?NSfmu= zF;a$#FOZt3BC3%pVRev-0DdD!l_RQ&Ic0muh*HX+bq%VNBTBvsUD3?zQONK!9U{C5 ze)=hoHrsW!>lepY75WeLg|UA0xkkQu18P($HA&(s`}7T2d_ppMtH7;=nt=vGh721p zo5guG6|=kNS?ZNrn`3P?$;9?IZPWtj-bY(t!QcIH;)}8T4MD}m5Bf<}9Q>#sQ85R` zg((`1V!~C!hqHR{B3+SEK#UAmP2j0=qAw1o8s_y#@K~I50@hDV85jw^S0Pr3_iE+Hl2A{ola0JuF9#u| z@kYfGQCbSh*E)9g(gH4Q)acf17~2)_&031xABIFdPA-<`NtIa@B5P@hAi@OCEtQ`{ zPS1(jjqqT{EU{9|sF5>N;riV#Ut#9986xjZyh>y}ig7i;pOlR>_`4w;=D+LIugUnW ziC=Ak1-t(d#Ln1$DMRiXEoAZ(Q-^`deDl*$n z43jkpnY!~&9eFhzat&I#vATxyz4=xLJ-zHew{c+CzI>y|IDzo#;+<_54FYtAT#-)IR}${PC${pR0Z+DhLS7c%`E?121v3 zs>aJqtQi{1q}nhI1?r7MZ@B7upuRG!sCo8uCBw7JG=WgS7Q;t1JAl-EO(L=uuPK@qGZOmT1A z{DQ}FE(}#1SpD~pfp;8)q-Hg_V8QmXQ7e|nN7)OV+-WZFy3JO0j)>Y-b23GNuj=>P&$R)H7sL~K@XFU;MEFLIq=#cCdsZ6J&vHZXdt0G^ztMHx;%M@ zT$d<&T%0iVl$R3JXnmG<`KQ;B{0@kHByqPDd$(j~*u%+}gZcW6mC5Ms0&9?An$>`@ z7t5B;O8f!4S_x*Z4I7zA!IYSJW^!tByTUUYl4;8{dEh{*6>1@F*>(vnG-px7-P28* zU6$3ukUJs(w)ki97<0FF?7J+Xg{egGvB!8*dtXd}EzS0WAFfO9i|7-3ou-Dod6WGo zjjSj86*HfVC2kRy)Nm)V8Zjo^2VOUM^y+W;or$Qa*L`iGP7-SNPqVi!vr;gOIjuIM z1wFST!(OR545&vW=dEq04}VWj&R2v9K>+5MAk7-I)I>C^Gjy*C$Dc>w#u;NH${zfi z@jcey#MhkO_hxu@Mp3OxtWo_s5VC)kQ*DA!hUbW5^n+)Un7qP7+yisQW$i}lHg1_$ z*X3J{)a~3kKgY99fznbZ+&2xZL$cL*$OYB@@t;#>DNaCO_fY|EP%2wnDO!+98_Md;PH_)leGj4U-R;2j zUVSA{d&!i`yAc`Z$?6dq0E3zrF=!H_?Eg&lJS1kklBbvx@mWZ)k_vWh{WHKQ#%@%b zSj|nZ^o^cry9r2BYRQ-zTg`ZF`$ZBar+X_uYqQj$GQNTKH*v{l(wWi<`RbTC_in{e zj*;X1vfQ3kf`b7X;tbR{l5(|GQa%`RHL#L@`q3_@R?NPp5R+jdYIc{|r;=ToW_~RL zvYYrx#@q;~Ab>>jS-plQ-y2oqrLO_T#LRG@#UwDZ$-Xy`N7yCUv95>o35dZ`y3s>E zHRr2|%}nCU9#){DWZr_h65Il8Ij#L4?SuMgQ-iBoJeI%&7Ypi~{GyeIvhzNwC?j-! zh#d%x$|O@Xu4IXQ6(oX|x7TCGZv5-l6}+TuveeL8@#~9w+np<=4G1^Uv&)XT7|Gv9 zap!#Ln~-%9=IZ&)JCskWMI8*1pB(DYtcS&)PT;zZt^U*MH9lMLKIQJw-6aDEe*F}f z!#Nex*g5n=u|5z#ox`Zw- zRQfDFq*^{vr{SX5@q+RAZ>vK2z1yVcD^D_pls5^|;3_i9TjstU*~<5?R;JFY;kDm! zGzfFb3tC;tP*tGu9515b%Hh>NrU^N5N9q%WndK>EK8plB)I0lS(iYfmp@ zHUwFXmk9c6f4d(oRQAM&(B=JgWWa*Q5V)>;S&@80li2V0Ste54?ESqbfk;(UP8c}R zKO5#Q*kDIJ2Gj!hN;!m>-?fJA^8YmO*69p5!(cc$?h2G5-a>pwtB;#JBTSGIfRv2s zIVGQWtnPEc+jJ3QW-KVe7dInXi5s{VTOkljCm+$tF2^Ne|a=2vIqj?Lr4 zK4&rSFCZfh)(Mg5H^;ay+xD^3V)vrsvZEC%@+o^jHhZXYkv0Q-RnGWnI4ozh%cKMWeWT zc-ZmR&L#lNG?QM!9%@cF-(O%U>cz~tZ$UZ_`3F(Md8-LLgKF{Z*{}sjhrE?&L@m@5 zo?_WLoj!Z(s9-keC};!fNP*^7O!_Y$4$zd8#g)!CE{n2fdq>Y z+ysLRN^)AaAU*m1g>!`@;?wp{Y)YayaaOYMRfK4b9<##{!s+_~UB?u=AE9;nj%t6O z1iJ4_ifTXy@qkZ%o^zMUsd06nLkfED$+|G>HlbcIHU>MUFb&VVIU%T%ILM2=g>!=$ z+z$B`_Hc&|Zr0Zn=?jx%XuUKTyug9;g_;bSx!9ejLFQm?F0xWC;-* zQ^B5a9|~*WqNlf`S_WSf_EupeHY|~^^2{m^H&RHI{MQ9KU4^?_?j&>HhP=o3H&Zkf zeJ5+#%tJ^}^XAQ!HDL{El^Qk)nZ$_u<=*zIKRzNaeATx02w3KN8hIzoR8nPpmbanK z;B1$9^XisB8c$t5*p)d!7u1mb;?X~DAoASEKCM*o%}}w)Xd)Ps$gIq9pvA=t+9w6y zc4_Jpf?t-h=(BRhxQK5Q4bUwl_p58Y*a^{Rx;&gTDP9~UP(;SsbskcyR&>R)6eIk( zR@P-{IB!3{`ccfr_Tr5i*usLFN;LX?)iyB`g@h@QdzVauD8f(T9x7 z?bK*C7wxm~(>-ELxO5nAuf7r%aWlNVrSb04j(7)NinjHQpsk_daIe@ z+@1&*Mb$i(_8rm(PBS2oYIj04-FtFJcbj*ECX$DZLHF%wZHJL`vyMxsSSLg=x{409 z`v&j>9o`MuLI>(}ROaR8c+RZ5NQVA-zS7dMhpXxksL(8NFIE6>lW{jrH+p&;tSP4J zrU^2pbL`$!&K5QIpZkeSRAeD=Cr9a}4G||5dh^1d?}q2BhDOy(+6pb{_C2s93K%04 z5kPd=IfC@R`14|8^2a&V|;Ata-r`XhT&&CYimNHk`(z5!pxU~(uTQup;i>Cfp z&a_b1EIp`$0=+a`6ViRek2!LvEW9=P*5#rhH*}mw9G01YoL;TNjcMpcMY<7C9toEM z{pyRN3P4Nn_O>t+(C9TiutjnFr%Zn^{=yt(%WMtP)Z{oeLm12fx}@@`{NIzG%(TVOIOm8dl&2S(m6^%X&N#R;RsfZ+r1= z=SFHH9(s1}xX(Op83#tb1p;k_P7QrsoUH5#=0s6O9q?!Gz`k zM>=z{L*sT7^fqYVhE6)#Q6CH$Sy<(>$(1GyDKOR;Gj~4vdje$1)K$=DfGlAQR3L4m zy=4xYc-nl+b=Zi|5tC$b)hu=1EUEosr$8ew2cU9$@r5tL$zA|Bpj$)$`(i?U$t~;mE^hH#42W3v{je*=RGOwwUflxC%nI-o3SHoD-0Qv zKFXF^^LcJn`-SPekhuiO)}=?gE(BihOWl7vi`s;!fG4a@UX^xt`hH;3X2JbH(5n$F z@KTe^QG4sCb1coRV!X^u0NM(q5Gu4U#e; z|3sBZP-))N8=Cg5*G>7UuQL?Uy2Ql#jOe0xiQb{j$Kcc5}Mu=;h|M-Q`0Q0CGYPjTfD z&F$7xKHH0zQ3km=^;XIedV09wie7&U6$z!YL)3cZXm73odA$ zQC_^p)&PwRxz_Xm&^(QP7&d?LvMPb8mHiWZ(2e3@V>sUoNG!y-fyP%SSg5L;WIxel zG8i*PwdO*t2NePw@@QDX>8;+mYBcMp0%IVoCxK6F<-pIXFXS^l0wek;kScU(rthsi zppUmk7V<4OVfSQqi!4iHA~?kho|mGg*I-q^wXI%N=XTIBdhLGDWXRtbz03fp@>_-x z+e~u>@RVqsQFV7(c^PwuC)AT`!_sVz;;5#5PC}f>{FIA@1RXSV$rp4HGHahzmvz>; z(j>1~_(tHe!*Aa{fhXC=>-m>zz9H|q}vO*TjufW5^vv z`vuo{O3*$sNt`=jaDraIj_9xjZU`Cpv;gO+NSn99`u;MPD^@MRgKA1eveBHtsCV`? z5v!8vDw^=6$}eG$ogj_vQb4*lXn)?lxLB*5i;MMKVuo0Xj`oSqVx~DamT(9406^uz zvUhq!dH*YaUxfm)*`5=&^&v|LzzB-;cilFNY!UjquQ<2EGg5Po_! zafO)*x{vBbyGI8(BZ_TUe#hEpWe9#%MXya|FJcWEj09_=+gl3!^Cvas=AOXsz!&4g zr4y<(D@Kian4(_n46w3sGe@6%wLUtNk*w z_ZtGfXzZ3hIKdLWm;^+`%>lz0C=$b#v%~(mlAg26?a*o`Se$*d@RBl-Xs`zJEJ7+LdR+#5?!6%iG=0_2n#z-Ds;Pq+^}82e|~DaTn> zOW7faa84bcGTh|GZmT3=kE@sGzkG`j^cKZrGZCGW7E@LnHwuQrl=OZa>*P_brjfm5 z-epxg5U@Rs#A7$d38Rn?bjD$c)7{`l6PYLJa&|;N09QJ`QQ

qmYGWGl;b3Yy2euR7HWv>QxG z{;kW3;^2pAS?LDX(Dg(_Oe`3O#wI7Mzp}8s?+jX=rx7uetwB_UAu_`dH8LG0f;bWH8Um4q6F>P)U1$%w8 zl>d931^(J>sX~?)^Fsj-i^=3nq#eU*oKU|Gcpb<{t42?uo?vaj^#p zBm3I%k%DKsYF^BFf6THAxb$Zvd8278L5a@GCvR>@M#ZDGZaT;8xLqY|X;X68Sa;_a z(SNc`o3cg=Lf62x%NR5@0H;yR{d{Mc(7^#x3VNvz9t$~Raf!@-L4R<${Vx&Vhm8|D z;M{q!t)@NyO`{AQnr?>2*pXhOUgeo<4c8!TMI{?2bFkamua1t0o*3$Bjhzq?jTL)N zA9VV3_*xF@<=#u&HL;Q+S2lpRB#GCJJVImx10vV-`^-_{`i?Bt!(Q?l`0c8CZF^Mb z#*4ERCB!lhUa;jEQZn9rCkX|#eYNhOa%~*-y3S>yr&~f0j>c}XT24tL`xyPB)paX< zKSscEA;nwj(wu5pM1TME$>jQK_H?_HK^@K4j~TTit@4)-pz=T~oW3DC7fR7MK@;{Y zkxSP*O|>?Ccp_HxxWe)%hAMJFcX(ZM*5p4={clcy0x1@e;HCg;QKrZS2PNWF7$Uj| z@dAnPL|kdU$$Zpk;%|TJI@NW-j$r!mA?fYVm={KK0etK3weGYJYCp>Xbe_Ue&C>;i zDdVJEJI}zOTP^3Q0^f};UhyO|DjFEoSo9m9@4H4yQOJ?X!6tq7y5eap4~`@Fr@FT0 z18hl?+9oXjl)f@I8o(7{9%>xPFTb7@j9>7U=UmP$;=|J2nDD2*MQtjN-v*6Z67?Or`z|ru}1x* zZ2hF&ZMq`XL+lX{UhfO-p)?cuQx*d$&JI%u=(si9U#YfIEE{tQVX{#b)m!zIzib9t z*OQc^cn)BHJ2>rp@{iTTl|}P_RP>xSpAy*ikoxT?Z0+>FT6vt)p;0Umcmo!V)QuUs zdq&Oo9E_}My=WgiDG5XdEWR;PCtSCUSl2JnOkxy~jx>08o5_Pz)Pw}Z8Del*i9T5L z-KZg{XR>7OfW7HOT$x0gO0exmsTiozw^h_BiSs^y@?zp~z` z&@oWet0@bM`dfPUA5W}&vlL3bmc-kGmW_73OP-<%B*alki^$f#KSba}?7yD`T0_6K z`xq;l73#hiLEx>hDWI*Jrhirynnn$s@egsft|PPVmb_T4zHLf+EtvTuIYkvU`tI@> z2yPGN<)rqzeBT5}Sp~%fZh4p zYdpIAA#dP_x8<@rTOO|l`NUGL7EWpQHvqalQ&Z1+NPyp8+rnZD%5y`7I|7s@oHl!w zlhN|iN$lQZWcDHhBYDL%WyuD%{~g=R7i^Ds3JMHuHk>hW zrQ!H!kRneuEn7RX>(R_x1lv}NHT$OY$LD$+kE+VI(+=q z2ry3(5>d+8O0M*_X>)Bx*}73?i~|u}&x>Q;(fbPzs%r1Y?VN3Ic*!NC740WOLZs?f z{hOufZ+1bW3Ti&Pq>70?@Q_H5FRfS@m%xS(bOyS z#wTDsSggcHA`jMn_?L?lXzbuIg5_ZK$*nN}m?9uc_6SSm+%18+1VcM=?5*w0fwiunV6RIF*HJ+n;Zr7))jg=MN3&%eSLjQToy z2c1YMf-W$M9lb~s)JY6V%p)ue=>cUi{ILjkrr0+?xdY!z$cOBCwS6`(sPG#t^a zwU5{Z+@5Tr(-GW`QrR5lNS{t-xHmMBrXKFQJv#hGoGonG7f40$pA7n)uuGO)OC2 zZVkg!RV!M_RF+ggVqzi6Y9?~UMr6Bc6`g<28LF6jf0+4RoQqS)HBiH00D?tk`#8Nz${Lscf-W@wpiW!zM-7MN z-#Cfq>C1U8HmRkiG>d!sDHUHj!6>pSX#ZLGYP1}SpPa9?FI20cK(*`vf}#gs&nd9& zTLQ;bdDc?nu#HgD1{uw;AhpHqPyQ!pWaeC#8YN zxOys%`%hlKPQ3cHLq#Vo@vBF7z*d3)M40Ld9VeQDMWs@G*Eg$OnM&3A4+r_Ja6866aG=@4s-K%E3;sr*C(lFR!CTzwD}q5e zyP?4v)Vq=rj-9x?GGMSmy;oiUosQCR&mPWMR8tvgqB{U^yH&o8p0$rL=y73x5#cZA z?ebLv>^C56IP^-GsuUg8`gB6$sw_`dkze=+y7<6Df_gQoAagsZuzR-xA{0ggq?6o!8TWi??Vf{_!7A?e!Mq1VDE(+^5 z3hUyp)&9UNT$+u2Vw`LUA8e1j(s0oj#dXY8z}F$~U)*ZA>_&p0oZe;eq2v{lR5i>y zYt|S)PABO+_B+~>x1O2Gmt7s!vbvlm#Vr)+!8>#icW+wm+G*I7_kX&95GZA6GkC0X z@u#D4bgD;4A`CDr&4OrgX1m)JHx%M`5R-qmkC1I%8tev6dk8Kut~eF0S5BWehh@ot z`QcHW9Ja=ilG|tH`}sBC?g8w0F3sANpHu7uwmduLf#RwP@IBny0t|LZ3ESZR(J%Mu zRQy*4QtivBLsWNL8*wHg;uky;7d#V*$GT;`=P>oF9dsId2`sgl<5Gg(DU%I3C-CD5 zox|C-?`PAsk9gwT`Uea%kz7o^8+r=HF0YCB#_l!~`15^XW{d6X{EEdmgDK!do2;-l z?+Ft^SuIrP-RG#F{-$(Q_Np2(l6&|q2@Y4y4xa86iIrMx;rm+VqCRJkraaa>9qqN? zlkHyM{~ajF!C#xJe`!vkTF}KN2?sSQKqe^jKJIJ(BjZa-F448z=zbPy~B;BXZ% zwd?zLgThT46>~M^V+vihRJQNoM_|`}K1bzNk?Sdl%~?v{BfXlI!0n}ZQQY_gE&?Db z8zdPc2iaga)utm@5&gOX)W(;TaK@FtKSu1-?r(S1SIEr~ zS`JY$uA8I=n`WX2DL>F~z$J1dZmJm>C5f>`evIO9dr=A2`ZSOgIEsER6kU|F}`t) z9sq;RVK-og+rDEwVpHVwC&bj=FA8qREZE0ip;Eq=861H~mN6d4}C5(4Pg&Juo7`}RPp(Ou?OsI13a{%9N+pwe&6&H%Gs7fv;ExZapA*!q&i=Ghp#@A^mMkHFi~KdA5VM zr8W)#-Li&UF9ct8@YEzcTQCHcq2=EU)aHMG4ITeaJ8v9O6w!YYL~{f56fn#wOV#4s zvE><|g@xx-dk7i+=x2zMW@g!4AsQwP~s)mM#@#Ir(s`0NBnP68)W9vBp zyV``?ic7CNf{BUgz-1GOBY*GD?SU!+0@vC9k{M!n_OH7gLWv%Q$L_VM*Ke5lj?w;I zEBJVd*!rCO!-u9OaRG1c*GR=#kZ0n0%_HQqb+KSF_;T}`>k2JFfBtFY$je@y=$=^< z7f(O6Of@BUZ8Net{l;vDsh}_t3&O za(OPve!9Kqn7GX0q#L#_BqJQ_! z(6{R>b~_`WSp80Qeu*m&^!fWp%sV=Uf1k*vG6S{l_mkF!r~H%FPQJEqcJ4^<=xcp% z?G_A;Fwx|a=tx^E!sJ5Cs)HfnoQ-*jA9k$ECt4_r`1f?>5S+_sh?=`S7%z|Vr6^lSd6ai>9r<*3YNdJS8>k` z@7~pIdDKQhZYmkog=Ic?6!d`I=uy5qv%JuN8VvCA3ODSwu53mI6RI>5TeR@rnpt>K z{1L)Lz@KdMLtJI{X=Vg-rE<1TymI)JHn(aIe&3$^ubbLjpPNN)4|PUzl`Iy0A(3!< z+iYekKdT6&Tok;UC5PN9wZr1?BoapUu%&1JSNZ7zY5vCAeM$Ge{i|Q&iYX{3 zxHp8zu=rztg{_7_HajP)&|^DdEjg_FSu$UgbD*Q4rrp-C zkbw7Bps3MKs$b!0a*rs3DC1{Z2tW&cKV>?*@3NFcC^j(^A{KE z582n#h(YtrAkRs<9fw0)e3lhtt51;m;1kEq39Zxldrtl$!6hXnppSIxRC6q?kBrk^ z7M&2)5S--9MeM??fxBYO9R_Glwai=>4ls!Wv6fD*cJCj3$Il}zyxXB#4#FUb9`W6^ z-A;8FgudBDyG<{<%`jVVS`#wITlk(Yc^|@1I zye=xj1&6ESqh=}kKH25{v^=G`-9o6tZItbZxu;g8k;{Pk&fI2xVnuD+1U%7-4Q{(F z-V>sy&>ig7EA>@(5Ctb0irMu|t(PuwQijA%X9dkzw^oojB@T2XE!zj+#(VK)SIxc1 zI(Za54EQyyG7>CNAhewbe>0$xiovJo;3Twll&-F#m+O%Z$1`Rg>&p^ua+LKGp%~19 zel&e4Lr7)r`=p-?TlW6Uk~2HAqz8WI<_b=!R{2e-R+X}!;@cVcp*H@VVYB=fjp?1@ zGXSJ6kJ0!2+g%m?E9v#jegfm?CU@gJ=S2!4MxdFw`#a)Sf zsKR!l3dSN!{PGDcpP@E3ZtHAM!J?pB)Rf*^vv$KD%=2U>bj!q9&=b|d?|wVX>NQ<9 zM0{9n`*ban;<$KMfPMUDB?!Lt$a7Sdb}cb`NOT_dJXA8s=}waGU#3>MXKMZO(&fmC zD`PyLYo`=o=1++q7QjD6RwKDmaHC@kbM=9XO5LXspv~)7>8S*7<>D-k0peH|p{?!Za2;KFfc+LcjXQdu7(~s?hYi zsr^*C^`GUI@4vDVt6w@kgW71HCK6KGAj3@VMLzT=5>cXc<7L{QbGVAt2$s-Ga1|J2 zSeWSMj+aD`Co+S2Jonft**vJXKe)DMWBXJogYVS;VWFR46P?`e7=p)}<=(!9=W#Ml zUy96Asj127A7m&KR)`Mt>*x{o{WB*jDrOb56Y!_Nl?ATz$aH{mXbbQqFY$^ZTAig! zxesKP?PbC5U_e$~7+31}u@l=&ko0c8K$}u2%VHT*%j9D6@dNL?Z@seKD}juI$!MFM zsiUBJ?IGZNfD`c6SFCsa;9nzD;)@N4y~%bmCak44judni1yBZzLb>>VTfZVNcS(G9 zWo62>`j5Sb25-NSm}N<}PxWntyRRF$!Q|)_PV3Wl#p=>5aigYHz=a0Q;Gy`Lnv@n8 z>~Y*JBGwwowy4Kc>G^^-C#*9cN%riM+&T1fCe{XR9nIX< z1yV;$I4>)28TJZ$=!JvY(CS0p1rVH9O>MI_D9x^|NKC6U}> zq}9+sPQGu6&;amnwX7z#&@Gj$FLF8pz!1Wd)5S}l}Wn$Z^C^}EX^>?}&bvRA~7P37sruTUL6 z?BY2+-|T9{cuFX$`T+RPXtAoH=~~XuwY-nV|H@Pdfb6vRClC(Ca$f-O8@kNN@bE`) zM3!2y%y!Vt7e-A1n(Val!Ap!OvTs#tPzR;%rT#?Zx`3>X9^O zjn3YeNq)`LT9&7uqvO1`yzyAN%DR}oIUwwcoVD`puYKIbTA9U%58vIBp>}Z`6(`@m zQktnZefe^~c`kmBR{KVuS!n(Umc-n-`rHk3lk+s9|&uSuKLLsvzdbdA(WkS>hN141bU*Q zVyX`ctSt?_0BwjHBrPm?&3t-4HU3o*_BE`r9kdIZs*M-c9sDP_LFf<~yOra-V9%cu z6gQ0C3 z7c$pTS0I5%BpkA%(p36qiH7+t5?YUEgkom&Zguu9t+*GGO5H2~$x*;a-#{{>A)b@CnYD3H zTHSq9dda7zk;|1oSeRf*-}|#g6hCLd!s3vepp>@TyuQqyUiF<$8R8ozsKl!3A-B>q zjQf8)Z{VbQuxuZ5LX#R3BW0?}SXg<8FLU%wcuZ}Q{L$WxREpn{G>YEf$#WG|IT`sW zKK3>afxjAT20P)tHlTuoX)Jri>jD~l;NYDlrE#3R? zU1k(l-je<@KO-ky)|S_1niMCCueW7s zUc<8Mw~ya8c4bm#Q&sZ0n+{FbY4zKBIPOj!+Py{8ArY*|2L!3MFnLnFi;qPVRwC*v z5#i{(%Z{&#K*>fVBN35EmtO1J^lO*6*8X^JYd}2FouwaY8U0eA+l(lJBYp0jt&w5i zZeI2-m3V$n83&!tlVY?XnY|fUXEEKJo!(5xq6AAiwpm0(yQ2NoOB5R;w>Q#nbmmj@ z-7 zPp|B1@8d=RZqL@MKOG)m*oC2!3k16c+vClrZz!t9+`fE)`*WMq+Yr=n@aU=?SP6^q zqbCqHvCmt+EkewsMc6@Yd_J8#tgiZTcd4uDf2)^#RnQ8kz+G$B!qjzlnl)pneAyor zYaKuThQKV|a1(1b6yC8GUe(=8bsCc5A=D^VXrArEin)BizMHwVq`R4_VjbP05N~9m z0L{Rw1l1N0aY9fg^Wj+|p5J(1+dN-qr_i)cMOmknS9jtOXa<|253)4g-y&fYc=?8v z+@$gkNWyo^muQrckuga&y*V&`1z|B4W;HP3`Ze0= z$I43;9De&@zSp=&Fe5q_T_} z7YK3xH~@m&XM$EL@JCG0US1PE?q*Xz#-`LT%GDLz%j#*UTDr25^WQs`#27N6!+lyC zzb)<$r$_l-q7v3Oqtl`S@5o4eYFc@3&K1^ZAdv5MD9|hPmd1y@DW9UmpzPA0(IW@( z3+s2d{VbJt$~di8OXwEdwJO6eE-RxktXnwRfysLgm>g|`O%LBqT3^(0`^stc^+*7I zde1i+9`u8hEpF-_cZ;00S|Xq<6vGhJ1Eq0YF`tZ_uB9c&{QBJB>P9)m3Mk{W`gAMDvVpV(-ODRDLq6GwZ>G1;V}=}s)A6Lg~(qXNH6 zBUwv?yzbhqXCL0M911y}f3LZ&_xk;?&4ZY7k@D%?%hCHanRgDJ#;Cn+yK0J-9K0-* zNOsBvw^i|ZFuVM0BTb@uHN1=gHX+55&i|*eMkQAwX&)qPzQGq*@ts``h~{dgZC~BFJp+ zzy}T9@_&+|Qry6i<#lQ%eDOrucegh6UM-vN0)zn|X9#yTLSV-8=^A6_r1yu($bTrV-%+587e8tYN7wa~;YqJ5$vT$~VIl-T7DZbe1^Vh+dt6(+;6#^5Wpt zszn-ky#+b_y6gbRe(Q1y02PhZvZAwe*n%G#=6KHcFjvt{OwtEeM!e*dDO=}NC^9=2 zeR8(rW3i(}!szUkS6V$1w?ZW>939Pk`D4l(!{osRTGel__e^cS*|5oB)%=@)J^%(d*b>Vl_AfB)c zy}@?WErpLy9pQtE#{z%&ocY0mKBeHdl;FOpd1D^_tU5lblTNQW2&1Tq8t%<*A``eh z+jk~h7vCfw;mbf!cm?v`Y@*)}+-~^E-S#W#H zIGN{R#%F}4!+a~wnB%w5Zr@9&a~W;053<1bZn4ZI>n;&A#Ll<+sf)RJ&bV$ARd?ObkF$-CVos!(BMRFQ>DWv~#=SwX%-8OTM`k zh%?%1AqY$p8#SYhe!RKqc3zuCx9lo~M8@_!{}d3zH$)!@n>d|)nNYO-)^m_hmMuH` ztm>eGccE5)XO$93Sks#cJc7iVr(B4ByS>+)en0?Jr^>c;oQ2O0XKFP5Gbg9Jt^PVL z8U!-Is&MEy+^#|oceDefG{N@*_I&K8-ircC-7h@UoL%7Qf^lRubI$;xaVsf+Q`}CT zgC9rt;_L3xV*L-F6q#;flIL(%B};Mz5_`B>P)HsFf7`x z(M7jNSr4RiFlXx9qdZGqqGB5vlOn9N74$88jfP24sezs(5(HJ$a`MZxWXbPX9_c?D zVLL5W=b9kLv*pOO!vm`{G!8m$dt;TO(HIo=+-&udOPO(vW%%yG04Wa zsFd&(R4dPH*j)AE1aJ^$g}-(R zMNzrz??nOHiasHkV;c(Ddk%T_lI@*d|Nlu2gOeUjI8G_fG?hzyq-X;MH)8K{87-j9 z4>FByae~II#dmNYktc{A12R1-wu$w;$v6R56THISu*!gE+tE@=O%^yxP-!9jdIZVR z!9+*MR34m-E_he=BUaA07P4efUsWIyCnlwsKl3=ad#<#!=I;ZC>tQEL+;{E(zk_x}ZkkM#5A*Mo( zN?xMO-E>`}7nS37T@curGhJ~AtdloJ4vend-xh2>olW$Jp*@c|Bpq$TXvY^b>FCdM zVriPC8%29FGmw4V=xMO(UyV;Z11t!KTJhXNQC{7vR~@y}HJRRR8AmU?_sJqSisBZ+ z+P+;-NPVortJSj;Bx-lF$rd69$)pDtgjdcP1!}?hC1a-ZR#OxQlP7= znF}}_?IwCuXpH=GoNCC$!5x`8nN5Mf95r@&=ct3rvA%XjYCIs;PR>0CZksC~m-)U4 z`U)=}7%gIt-^+KWcv3JWdYG)xo zK2mCnA{CSP8kNu3YlBoa?Rm1aJ{+jbh%hU*@Jg@@4k0eSs$M2pbN5vF;-y4HMKpgn z3<6AET_B@Kif#f~q?fvfyu)N~L|8my^3{-oaWZ)EscX>Nc5>6+#}=+2rP*7-IO_TmEK!*uKg?G>oyOAgmE0Do+p3njIX8<|LjVE;}2Npkgt2Vr$ zQ|_OGIJ%FPu|K4)OpJ^_m2zI-qaV~3(e=W}n?@nV?x%>qf zWLl9i(ybX-|4|<1I;f=TtXUu)`U?B~^@oq>*R|jK581#}nZeq2IU?aX-!Zno~q@h$KRQM(P9QVBw3f$ULgt=eb`^Z`Y8u zV4uulR?~G}e&`=zM5pSJ`dv?p6)$e*th>Rq+t<~ZM`-^BUE88CrAzUsDqHB)H6sZ{ z6EA#d_7YLRSRS~A6E?T?IY1(9U6*%H*^Ql3sYm|W#Yjwct`VYIb4=whf&s>TC6dIK zqs+n6nZ**MzK>^nfj!n;*l^CcqdoPdh;WTEc}kzwIZAfm5>t4;VC#i+O@^owCZ7`Z zgrNR+9Q7!*y+>~@#X=3eQp!LWi$^Dh7DyO>f>g})!44@nqmMGuC8JJEm17E4&05|* zOFPa|-jP10J&nEkoBxuV5q6OSm$7I&b=3SBwV| zD)~Uvv$|46@H{P=SaCdYg7q*spWxe7!g>-&d)Lfeg)qo)=)NG_F}?n@BJB4(`5jz{(R=4Z z?vAVriRiP9cWw~etgr`$ay2*K8Y0=?)Q|+Q3*)EdrSY%aKQmBpw0vU;2rY$}1?LDj z)B=uyXk+c@v)E&mcb4u|{iK!^KtJZ9AMJ$BUp^kbUOay844wfb4i_9-UBwX3v( z{+)17Mt~7k%96ZrPua{RVlTPbk7Vy@Fc}dYWf1XfO`tS-Fzj6tkY+>TJ+cu&D$Nn9 zI0mIK8$$Q%C@P+dO4fnGSxQFDUk3?G2BMKR=N|9RX-ddnV2p|tSRCmbY8Wvyge`_9B4#4Bv0aEh)`tv>52%>rEzOX!BGW=i9}#=$H7 z>1YEDiwaG1@a0If-bZ-^^AMiG{>;Sx ziabyjSHWul{&W$cWMydj5R)46?YYhg_|FQPm_e@JIrt3EF@^?obTR7 z2kCc+@%-a(UCC*1vVA%Qrstvl5G=2f+$RYAh6>K94B=Lt5V~*2^NJp?k4ysuF;G{Z zPwDQc0Pi4f4Nh9h;3PQoaZ07wkt9)+05~g4<@~fhYq82)1!KaXmaoXHZpiyXyv}G4 z9eEAAmZ>5HI#6C)R%h)ewcQg>F5Hh42xNC?u>yIGnq72k_r97}iW&2o#W{JOw);L^ zS5O?y*e6Vk+bHnVv`8A>kU?DtR68R9ew`Kqm1W0(y3np80*HP1pmKK^)I~pGQz94a zw;a`#4FvNw7=N_8Y;~ilgQaGgd|z~x=2)H?WCCHy7eDc+rY&qB-(XlB`{bxiXim@L@JOK~w%g&9ga9UVW45U&KT;i zA9v8uC52n@kv#aH{Uu-KEapw8DDIFeIr>D5+Qvw!V0~CI_RvK_KE;jeHA%Dqk1}B6 z<<3uUI6Xak=Vw=!%CzGRwsA4rT5L`%ps%gYYEZBKjo zW?TiWGua_a{~_AEi=fY^urwI6*phw&XJv8MkdmjnO2(+;j-s^jet8O8BFVF4mT7xW z!7cjsrP-$`14peb;Xc?UN*5xmdd$*j?2=~Rzz0#uvOn)g;Sb9G*4^amA)T z`YFV-C!+;6_D3JUvXFZ2uW1uCgu=E3*I6Kh|9H#Q;?Xqrwb;*fD&=Pw3G41xYM+jV zWiG8ClmiNddaHDVQeXZ|ys$;a&2oQ7ntL1$lxur`K})#1GwtSzcS{Bs34NURKf$#t z9Q)uh0;Mzjs2t`=TCJKYL|azyWv8@hV3)&G9u%WO{{r3kk`9W$kRlb zwSP1|$S$TGh#Qw{a=Q|{cTMpD9vHdC24SF!VOcR_qC}K75OMF>z5Q{49k0>k(TCv0 zJJ;_$>V5omnR+7wbHqNArpYcKFLaBI*5x{z8}Y#P=1nxt`_XV`>|Mg?u%MM;EsC&M zQS%t{QN}#VsMw1+!C9*3Rgd|TmGzTzC#RT;>4!L&qzQt<@oECP=z%NC=^Si1m8vh; zZKgj`UPZ@2{R?l3e-Vs|JltTcT@BiKUQ(kELNEE)WK?CbK zQl(2vk#SnZS`Q{}CBT*fCGR}mOjrPd^fZ??>~nQ9@UFV>CN}HN(uZ2VO@pGlBAf6>Iw219srPJpywns*m;(Lgd{DUn1Eic(G5Sg=TO$ z4r&6ys4}HQtnQAP$hD{g)fsU%f)pW7u!Bq&u^iSs3$%nSVxa0)!jad5ay0(b^5=Im z;85*3k_G3ZI9JX3}gPn2(7T3kLE%B%>~PSU$$@775E#KU&OaW zYWtgyo4|jfYlk}D_KGN4VHu2^;EyT+j%7+=YK{$dVc8MhU;E?!3DKQ3f|9%RCl@Qu z+kvgEBtVJrnENxVLxom(6M;=nWLOrj`w96y|Ay zK!3u|ZK2CGvPjU3vHj)Kf<1ET=7BEpE$16YnVTs4N9XUm3Ix~5&!!H@*l6`CEbtGG zV87dvKPt66uiCT4b3)84*&^+76ZJ#}A=b?36LEK*`Zm$m$mO2Z8M?~dNYT^wCd6ZM z4OjLaX{?uS`AfmKShs>|j*nNAuC{lEA0!V8R~eK&yZtQlN;&HM|12>Rt8)ZDF(ewG zBY%S>UFQsrVeJ)#n}L+2DW^?^5kP+FsKB46?t7ea-*hd7FXyb}23M=YzzrUpIBs-Cq@W{R2!A8RKz8RE7B zgeP74XDB>ro&RibI<#|4TuXZ&8Qgl)lQAbv)eTxi)` z0)7&-$v)vv)fa5srQAF`H-+qgP=$GOtSq7RjF?*NZkz-8oGiL&{7IUv48|>cBTl9+ zW3>>w7DAQ=Qtc0}mZm?a-&&4j|8Jxeo_oX@M%V|p3~~TS(?n}R+F}^$a!?-Dzx{Xx z!2R#gLhy%zodklJ7+8O}s(+jdGU}?RVmcY0rekmwId{*R&)AQd$;`!j2DXuW`SHGm zJi(DK|FL!EXoXB9#va58-^J8Y7qiI5XensQXvpuCE{|`JH-X4AP!NZe;+AkV9Wc3`*rKF^=U<+aQ0tl6V$H%u*QP^L-d+87xR z^BRu@)ecu8)iy_rhJCe%n!TpPTjs2-uCh#_SjWdH8O%B`^1pi&SQ=n`I)~gJ%{X-Q zT|l(d>ZMn1VlnW0$uj8jykm+Yw!!J!*!qO%T-$lrdaIaIJLo^o0I2!jV9BX18DN%R zHxC2?8h}N@6)z`3wQ&d~1|ps+Lm=NRD|i`FGO%}ZGsPQ!l6=TWr_ONco5dQXXWY9_ zkSsC6m-m1@oFkMs8OCZ({)VXBZ6U;V@kwO^kW64y^Z?lpR{c3Z(W1%pm99;CI^mm& zTZ9K&4Tf@3JdPS+^@u)t@7S_ViteX&cI>Mp6k=*R2B9Gj(Y1`WuX&FVDJhrOE| zI_^K+;$YZa9wz)mBLXQ!Fft6|4OH%R3gThBAM}f=zI@gG&AzWL15`TlWRr$x?9{{i zyzCE;c@^_A^3)z>LJr0Efum~DVP5vAEh&DfsdOlFJBLj2O{P%CfPHD9cb2ttZF$SO zxuaA!Gqtm50-v)xJ`H}1bLnOY4_3R{JUyj&FnF{nLm5A)R68YZ9q}pey4re>4!S9C zif}0_FtLEtCI-Snhfx+nXtmd`+c#?qbn09)Php9Fo3nE?U|S*VKD#~U_NlAcpk{#3 z*9KU1*^!UE8oa5F5SWEUn&yjyOOthjwmJ=&yX&`tP++45-{z@4My*$UPuf1@T)wJKFUTx1&z%6=G@TutVpR5h>e4;x3~lAE@C z_JvYE@}bQ#6=4!1K4@d|>bCOkPCV=$KE2U%Y7-~0-37nFTg5T^K)!?tUErTREq`+}qS@bKqckN;TQ;mk z>tpoNVSc1#BVVB83a-7gug|mwZ%}ff=w-H*tWs3Z%jXqh9Xkg?Bp4nl;ZclA&~IPK zoxI*ew}Y3XFc>0Gm5~%fKTL|*5pa=J-T7PKc;vAw;;kgQaFgtSyd^NDWwh|F&?Kz5 zFd+Ba|4sBhg^h-dr66lT%-aJNZs1lJ>A})BHGl)~aW+`wsR(;6VXvDYW#T$4NVFYQ z&fh|mw$c~%|v;&CGvKqE!4O$81+i^HvWJKI)nBr z@EjB5VYf4~MZtjz+`Dg?(H4YyaC;NUSI69*LR&f@jKWpI_wUNysg`qK6jcmR#6y*X z1eTSc4d#dsX691h#tTT({Xe$;GAyg@`yPjHMFf!$=~OzUySuv^=?0}647$6!yQM*; zySt>75b2P3){UOu$M5xj?ia`N;&`34_g-tRImaAh%*>`!winIhSMMAi9^raS>FviN z{JnU7IkKud6+{gI4OYG*`ctJ)Ub&7N26F-*!jp?wfG31EisC9a2SqX*)6(-TwGb*F zEd79zAg#A-;)E?$$>>j`W-gN0;h+RMUcqq+5&w@tg~+A=CixHN5)XkhD&6Q)J_MQG zrM=6cdp4*dV0rkYYrYcln+7`hLm@7|3te2xx@)5U#E+``#sO3yCs@+lO2_XJBU=7W zq%r~w&v2#?3l|BDk;Cicng$fp(|0yYLvvJ26|1}S3YstaN=aQVNSXT)GAPS{fTJ+t z_7(-(39EH}W{=RoM8$guux1AC&=p;}xPF@j;l?!?ZTEtJQ=fDRBsU^0@CeK!_39P* zn7&*g|6@A*`RG8&4(vqcf#?uw(FY`|g*#R1SRm)GmnM9htj?OwX%2XEOqRa-(;5zg zB{_O}nfG7*++(W@SR<>{m-hny(>p1`C}8ac2rb@gbzQ2zJufz48;$f)ez5)o-`h(3 zhJdvS7=-9dSLp-#5KuW3{TOn;W}P|)W{`3~lbW#Q0~HfP4Sj)YB-EM|M1%O7lLg?F zdto{xX418TWO)qWQZGO=3Q4%*0iKo6@&UjzmMXFSD+( zq_p(fy2ELk5PLH-Ey3c~ZqwsMTN`vi>^IbZWZTtRqrGN3pWc3S+LgrSmXopfj4q6l ziaSL!&A7sRt~~@_NAG%L%=OT?B@yVoM~8|%nQ8(&DI}qdHVMY~+$VLAwgW0KfWn&@ zs8}<`fu2<=HE9|_k^dpACKyH$*Kd`APN`XbEoF%j=i`#0?3qY>;+#bxJM{ksUOQmBHu9vmxzwl~Z~gH~^sm>d zfK6-Z`{_{Q%US^f59RA;Wg{`NVVCoCF!Z3j>sD}5xsw?B(4@qQ z0??B2D;4#(vL`TDeySDntSBJ01n3BGX?$ZLq7Q6RyEJ(3xNi*Z_NqW1j}CZr0G|Gd z*H#(O-UD-+R&hPIJFu~!ZhGLG7Y_mDtp_L*Hh3@HuX+=>&wZw)`KAGPZ)X)am{{7D zcsQ}%?^O_~3sKG7`?z5SujPPy4M?d=#g83iZ%_yQbBVnpY1py5OM-}ta$3IVXP`}n zPxOam)c#@vRuVO>gO*+b@_wdzrsj#jXx}@9NDZMUBI&3==FlM@l=lD^1%C>}@orq;pS6UQaPo6|`MEow%~~JId`R z47gxXMGTEPf0QD7(c%f=Fff}3x-L_VL~KkeGRR#dMtifYi$ES-rwVnq!tL*D(gB5b zTTnin1Y-hY>EqU-w1+H||DY_8>llFYgL~3LcQ6yc$agWfm9pW71Gspg`~^PT0KWi| zm&yDD&ppuQ26;Wy3m&91;p=xK{BD1nm*0LAA6t-%j}0#Dq0<$(V6{s*_^4e@Tsp0e zjl_uS>bk)jsK@Ih5v2!Wox>^?uW_VJFBjw30~e0+dY%~FT+^g2bn+?WiIYD5`bg@UV{7GokUub$H!}8 zSq%)i#~aNhX)0w7hF;%qiB#>$1BA#MH?7*~RbW zBaxu7SWxrBGX21LRY>8%KfG`V$776B)#0N7PT;5}`sNngPFQ}YjU0>FZ3-znT|kHm zz$$3{2-L&(x>&H&EIWbuh#Ey>OTlcyHtj&!wd;|88P7X~8+xuB8lfCE!bFe}Qd%D^qLqtbJ0^D$0Y)%1JI4-;BdKTT%=kW37d#si zqKX%8jBOWFrcQU_%k@kH(;`BYXknT)u0)=C1ggx#?sR~GB8XVCn>G(jjT^9WDflJO z7GyiT&Tn`3o6b~HRc`tz?>sC%FaRB!-q<*OXUp^qPk0=+^bSjVfvmKUcZ`{hOaYk{l!gHzj))CC}J?mk07H-ya)mh}Zk&|lE7!`9BN>VWe9wCvt#*?Ar7 zz_{Pgg=SR=rPYrakX(zWarL4oBbJoBw^kpn;;YcH#sxLBMbOV{Nu^xXZ=x0!Gx850 z9Dc*+Y^}l|OI{BnUH^@E*vuj|nZ`5X*n{dIZ!Z`ZEE0V6YMn1_asAnD9;oPnK*(66 z@6)?#3tj4VluXlqUKip2H3)|g{^y#>Ay}|5NaJM!=&i35tsPu71VsB>sDYF9nClM= zLp^i$2oeIt71mQG`tvv!*x*WrxmQB@;!>dJ;u7tZ-6cTdKs>bp#-i{gyex1`V1C{O z^fb+qU>t`=BrztC(<+CN6|IG^RPiibkat`VbzD$QXvA1tTWVy`v}0!;LZOU}#x^9(wT z2h3}>b2e?)fv0;%hRrRLJ=PZWI~l2V^=%7<$xg3T`1c>0oQK;Rr>dSAtD#&vqc8$w zz&MT$45-wb!x??M=xo1j~;V70&8Q>#9I!>rNW%cl{xYdOSe0ZCl;3i+`==^-drN&#ntREdOch(e?Rn#G=DhfV>8&bP*!PQ#9 zXKS6|+vheDN5UKpE~u(3qq-lSlsc<;w>V)CyiPol~^%kV>{`^Iz)>zi=# zX)yBx>y&I6hJ>g-NtJ-wKHyZswg^LtN*}K^bvmHue3&Z1hN>`9^%m!B^Mz5xS(OUk zWQm6SlLVR!n}MtVUz%muI(hGDF(ROz*ZVSkYDw}4@+}g`lJ=n^yJU7KA3LI=HT88- z8MZz%*{N3*HJ~dQ4w{fkG;eo7zB%L9=>nX-Mb@^`U%6A->455e9ZR(=86VL#pqD19 z16M*g1MD@Dzzoolf5vsWLLO542K*Pe-!>rqWa`GLK8NzaSut1v+$J#1@MO&)9fUg< z`jxCO^Ru9nyJ*2i6ISG%w7g*luGrf#Km)np8MLEO^?1JLqYLkR#1f)eJ?t|EIw_SL}XJA}Mm4;9td=&*qT@;QU z4+vHfnJqH?pz))M3l0~Sg`q+9DfhYG&*>G3^Pa$33E;2$2|03GasBBpiT`D7(0L#a zUH@}v2eZ%4***vXMy-89$D7mPvwQg(4gW5?L1=J{3v3|yeo2}l=Y(mTaASWDpl{xS z4TeYSC1hfrc-mquBpx-bn3!f#VeaMwRSAQTz21Zy)2t~!It;9)$nipyLPy3!F?gAM z`k%EUC{lK*SwL$`D&U_$VMW9D#JiwF0jPiOjTD@4f2U5vb-WmSPBsT}&-u%JLSjCl z^j+Q7!;Ebcu&u)bG5Ag4X-bqMPZLD?^&O}NCYCN^pln`qMN~c@`f+&*5qJgI5G+-i zQ+;%G1KKw-2d_)E5>ZX~M!SL&WDe*s$?!l##rvpejBxr}rDn)rQwa=X0(gpTwQE%h zHUR}-Do#@3pxQ3ukpdexAToGpzT?vBYzZO!XDIM52zZ|;e}C(@X zn?lsl!q}rj=Rg(_3#d$L>79YQQHXvoM75w+OLM(n8- zak(X>8ZYh5s4X?k09n{`vxN=NHL|${8IQ3<;oiI;ZdA2hE%=V&J$>Tqn~5BU8ZE?A z?K@Vd{e>Kyq+(|~v=bzvj&MI-SOUp%%h*UuryM1$??2wQzaYRT9{5i4OM+JG!LZ-8 zm-f-pJn#rp58UGx+|4Qm?(&*d3&6Fl476om+_6JDFn};^&Pxdl2Y}cZZPB042-B`s{)pHNv~RlLiV>z_@Ke5tOBYW`OprE z@D|O(1olf!{Sx)^^O!Qv&A59%z^HmvjXi7nqTA<%wpthh!v9S8`sNCVXgJJXS=_Yt zxlm{1w=fhL5;_||kaST}DYrqRdXnP-79-2gj-mB^f_ux3Rt}yap{|P}6{#ocfBO*qy+MA>y#w==V!iM<9k zfa?WHxY09y)MECjg68uMTpWzQbHPC%UI$2WC}IMb%K3t+A6V3G9p;kYK6YB=tR|&7 zW+yq`6hS4R8z})?Y>y9{MsE#O{dZ(F8e^B|)F?Ltg{%%Hl9<0Y^%-hZ;IUlEuBy2WAx!#zjrgM#4N@S}&# ze7fQ;W|{btqO_8%&Jt5`l}eOTNrPk-Y8S?G)ga9D{oF&&SX|R@6{3TdAC~`&D)8L_ z=9`8<82ZyuBJL9~x`5J1xMxa%Zd;r-uliG^1axqlz<&(4Zw6 z66UUNK>m$kN!TWXHB9+7YAH*0W~}ZBgNHciY>15+Jctvr0P4o1#Ub5m(((`7KI31t z51>=1|Bp99W-^fMmx9x^EmT|!j5%hsM2wI2Mi$M=#(_V!&;ruG3s|Rt>j7f2u;|ny z*T)aybSC1q0pV@K1v5dV=6!JTAi#7#!jF~IR0br1v{E;-4LSwd*ZrT_>;kMN;oeA83_;5oo!9n_c7&0$LcM zAy7$C>2!p1USyh0=JEZ9>qM^?XU@FnEw~N6|(%6sN(>(k92@?pnq~5rx3A_2b-kgWXud3`bpsa8Mp9eP)n5mTez=^1#|?WZJjmrzzP3# z#r!W*fcSFl)}C#_Iv)!)gR;jhrQMnUbTlc2g+xUfWcDdNL1LqQmV1tRHo zGw8g89_ot9nE_D^&w{ii0pXGf9}~5X+zvV0lhvKH^_`i4xQxx6%~~NWw>bMDsqwfR zc*}xo(KukUex+h}3;lj9|73rV@FcKnxgRR!fe8f#`)UK=RJ}pTld?iht=NC;3OexM zFM!m5dZ?xJx=+*@=(B2Ud_h&=jF@nUS}iWl$RidClDzfJ4O@`wC&rP2x&TVAWT~Rn z1>PB+3(7#C`iJ0`EMrtgOmMRIG}51< z1hEh?I4VK${w#x<89MLx#0KmkqkHkfWkmlca5(@6yEnkb>)y-`?(ehUBBu!Zz(N#u z>5dtVxcgD7fd9^+W%i+SG$Z?*Mx-GI|0N}GF1cq{O0&29q-xuz0t`zv6I?LA7?dLJ z2G-BlT%b9(O@7sc0kALmO^}^Sd=^Pkic8*#{}Vmf-c?N0t4!};lj9o;p9JeT@ur`% zm#etiC0?%L6jsFCMfoF@;$DXa05?2adq-Ej$vDgpUh)5#&-PXOgCm_DI@Ck}k>fH{9ge(=!O*?mfRS`}mIBbnV5 zl9=+;c9PO!s-8m6ySSvIR^vlsb-+Obr66BRjKS;B+F?D`+|@q&p=!PG4CXMtv9;$f zyvm`U{Qm^0p1ofP)S&lW=Oq^e4Gs@fKYNg}k6uOB7mV_|SAsm;(wMnnM1_!(b zQ?8+g6BVz42p%*y2+}rU4%YyhEA+m1pSJabsPv1ui^`6Op8y)nIXvn?1cRDN-&4TO zl`qFl&cqj=%2S7A6!IQZt2`;kr^>IU>|!bjz@F`j#vuddQEJ{-5|*+`xsn21N{57- zy)bh}1CD@pN0=YVrv6&DcY1?nQx7*(p8 zwk-<-E(6{TXxq_dAAxx?#--Q!1)vYEF&7=2DrwTP@qHShky4d%y%`Fr88?93qV}pz z$1)5zmvHKr%#Q3}6mYDOpGZuxKKX8*E)VDpzB%j6xDeOJe{i!KpR=Ej%8Qxq)-Pxl ztO<7Y!(^3>hN77#__UcxxO=mSsVRe>zI?dtHr%>6kZn#TXEJY`q-CkbWswrc`RR4M zq)~_#oo2jfNIXV9s_nbX-aF27y+A^C1-h@i8QZ7cplNx|xy`e^IIx5yPUM z@aC>kdE5VnTq*>+8dth{2D-WzUf2~M8W!J6n-c60u}Z|St(dwzG!2d7FIcoa4Fc^x z0zv{eem4qoPWO)|*bOF2lrBrMmris>ZcT4aW7Ng_CQ_0R8;PWrlS!9{flKTT3^kp; zQxpacph|RE__KRvPahy?Z<*T*hVruEo3m^2gaPv^up&M+MOdnp2XMLRm9SsHnC!C7iau<(pF@if6+ z$eXp$dKbxyeE6ZG@TAaQi&ZApGNACS7HX`QoRLp>-n0b(vhNb}%~d*b=~gt^nF{)& zbff8xo^PHiRvY~ijN!@F_t#{W-+%R6t4n@xl%qyDvjCkAnVo-xjanuvd_Teh3C~{p zL7Z;B2CtBe6y|ExG`Hsezeh}4@3}a75jLN?76%^DS$AG!MFqDGjWFfG{JqCNh&pVW zW?xLtIow7B6q$6&d+l3*DI=bNXCxJ09}^qi8Xi?!j9~)RnY$3V8tKasP4;mURX(Z{ zg_Tfy%?J(q9fX!twWH3p!hvI^vU&1g3YFJ~k5wkuFT;6}*L^y=5=Aa&%#RU+U*iu^ z32yl)mo>4d5hd%4L`_EHv1Og{&Xh8Qfl|vWBJT?xjfOKp*G7PbKhjy<7oeE^78DCE;UIf`7`4c*@vY zCJr^d3ElL$YlP?9{v3}_qC+M^dFeEhwvRA{4%Gw?G58PtAGEkVuRqCuh+M(vbA%Qq zmER!ZCmf4d&%>yKjua^hGx()-2(Kw6K|HYl_QymA(NZeuO8>JdP=)*c2A-*h@PjbvGICd;8| ze&g^PT1gDc&m$ctr}$GiU$#E$*^ByD_Gc~XyoM#j#p1ER=dgfJGUt4kF`q~#HU26l zMpQAHj75!_T};|$g2VL2J*mqg-M61I;gpHsgnCw}EvNjYS##1&@9VkdfNQrxp36)a z5RhIR;vhjr)$W&gmUj+vpX3~?ew#LUiJVVwkEhW9!3ac(6;j-~=(@alhR<8j?wjwY zaBgeJ=Q}wRCiCcDUJN((d)S9lUMogLK(MFFyfq13OS}y&a4a; zQ)q8dUkxL&$#Z=dfM#=*ME3!=?fzRa+bp(BVLh^#>>i?<_OA=?qT&BFl&R<-tDA`g zWBs=nc+?`7rM4L(-7)0T+51Za2ZM&dOH_}akxi11voqKky&zb9q6@F1YX{X(IR8*G z{xHwL8m$|7Vs`gr(JjMDD^5nNsr2GYFFp;I;ae@2jo%zRPI5exiHw()a~_ZT`9%n% z0t_djWGLi9>jMGdXc>R)8G61-TGLa-qFL47pkhxI7;G|gq_rHWOKF+-PP9J%J{PV z_h5-NA2#cjHb1*^+GKi?fEn2!9uQ53GAt7iEe||@VH^}u-ASS8U;n6IJ^hMep>&Nx z4d0}`qtFq13fZWgJz))~Db z@Ofkd{GGSTB-bXabLi?7vr@WO!#-~36B9w)I$4v=vLr;s1+EPiHS5{jut)bb4Fob@t^9rv8IIT0IN@W8Wa|dWhq7Nm zaimm^+KkU%31=UFMDYIV&~ZbAjk*8RGi<(}awb62zq#VVD3-^sHV7#dhgSRqvm#ep zRK?PPr;+^Bc5E5@!QioS4ZXPMtI(duA?dsu7J^qq&?Ne`_I*L?^#c^zzFX4RyaQNp zmljrap~3UP^YL~i{5Fhk6Lnu8f12~3pPcN%AJ?O9sVbUgq!U*a$Q-=trbt1H8PcR$ zt0G#a`ZhKV zlZa$f9%Y|WBITq?+-}qE@jHT4Tw}4WJ}Q_^k0i0U2{a`oyQ~k(Y!3#GYm`@?4FqWO ze_|I&BKWhM!ux#aPoU)>4^j92CjIfc)`psehxN+%}C&L`wR=S;6gFI|d*S?^Sm4E^*(QQ16 zzNvu}YG1#1&y@Xh(Z7)?mrl`8)mTTgSfgKLh?EWrt*(`A!`ugSq2w4yxz5~JceiqK(M=v!z%6~zWQw?G@ zIPLSqJ<#Y)!)AR{24vlfdxkcs5HPL)T(Uk59giW`6~gSt*$T(O@R#*JO%3Te8T2+z znF|onnI8w4bchxe?DaWY0AMc;Ij7%Z@3hbv8~EX@v&4E~R`K^JSf<6paK>jWPA-4m z4=QqXqbY3I1Z@b%2FC2m2XV~r33PJ?Zx5MjB4~H$YtRU?s?=^`hO@3DGEH=GZNuea zUzmm5T*~38Dk$=Qbqn{hy=-6Y{^siD44+^Lz9_`|zFYA3x$vKhjU43(BTul!#bFKW zOmcdMetJCtB11uVhI;j<$@=b&TFx$-I@kv+e{I0vnyID0|D+-}D59pcn!+UiLJpT( zmL{aS{np#D#Plnr{7{Tg{pr-zvWcpoVyQiXnkZ~Z;Ku~qgE%Yq$sWbUFhgo2_GXW^ z3PD6DxJ&QoZM6J56 zg~o}|e=gB*`JD^5w$R>HZB-}PzWMPd$9i&f$rEI6OF#L4a0-8c7Q^nfxVHxG9uA?X zcz-c2SdPCq!o$HL!L0+5p_1%DekXP?jbw9mtMSWLpWsy`Npn{7<$>{~V1A*!@qtT_ zRszqr;`SGjKZ?BJyR30kn74RyKxq$F8U{B$-Hm9?nxhj6Ng_v zd3c=soWx=ge369GSG>8^M`>+>vQ{9bj502k$vzP<`U54@(r*VOw}(ZlizOSpt=6ZX zYfosQl3b3X{V9t{!_=9Kx^^RT*choTTf?F~h)1mCKA)g@A7efY4t&jXH{y>b zZo`;sn-_$;p?!qxL`eB&dNUc1xvM?K36eAuTzZ03g;=S8DAVxEdP=;$o}OoU%p#jfe-(>05(NkB+<>Q zYX>oBc#|sIeiTLen?k{Ro&c`>7LGyeS?OE-LN^2hX6VU*yGJ_ze5cw}@DyjT;@2zS zJFO(LN~S=3h5snwGN)4Uto3j#oWodzjfNhy8cVe_a7xPHkrGOv+L;87TOaxnfuszX zxw${e>m@}qox0Z46R|C5DBhUFV0oj}g+q%>!@Fjr<($WL$LCS@#{8USl^Pt2msLdkLu`EPrSSdg{)b_e?_C)N2l2 z?+*}fXZJ{OwK<-~*dbx5?rdw_?3GmXoUJcDE+Xh-McLqVPBL6&C`lhDTixn|BX|UT z(ZB9@$e$f_AzP66aAp$rm6C$V`c~OoOnR;*!W-*9cc%g!5_`qIt=^0-`6*Mtc|6XO zqqZ+w9TuG621#wJc5tA9abzBO64lqbf!7h%6jk1&EjATqPmp)f7x4gNM0Za%Jl`60 ztAE7z^<`7`Jx4S1?@rEodld_*&1Zf;)XTw1BN~gK8Ml5JG>jA&8eB(`f7LW$QQn7M zPab5|JfQsHWC_kUUi>R2#Mk+rJo@Lt_~t{KxQR9MGSJY zzC1`z5CE~hJz7V`gZu(sm4M?sXq7XVi46)CT(6C?o307ruI#L}544ToPCD#HMa_Fz zU@PP8C%hO(TMj=n-TC{FnXLTohEp)(9BQLYyzpzQ>D%j*a+bldTNaT~~Jded<5%51Gp)v!a}@-6h|dYkc`8 z_+sSsp_s}0N089ceS;US<}Z-#B4qt5F8V?R+pZ;r8^f@#SdRqocgeQQCyx>jS!%@eN6Cj@_``-gWd?X>p#M(wpSi`)&?*>tAlh7{Rb`8GM^#TD zkbTS(cTh?*xMYEQj`Vy+ye|ehc~(((W~qf$l2#f^^h2uXO% zhzq)Ch(OAl(H;wZbkpv0mI9>M1Jhus00ULRHv?j<@{8%o6ne|m6zTmGuzGSf{g)jRrV&=ZLX*Oh9|U)n}}|T}Yl9GXGJmU??sBP|TlF27N{(sQ^}) z54t9SnE%gvdZvJUTr>rN_;Aq7wtQ-K^KxOPmNPw#%8p1(qy^RZ2qFevO{i4B+m}7F z+vxf@-ONRXZ(3RyYTfFY^wBN5&mxuKBxy3&;?>^8F2*-zG=F808uvTuR8WjHmXu`> zHp(04RLOV@sSP|99{SF4{PUgtDg5Dwq(q7lP9HLsgO@kxh-0fZ9^rW z`-HH`Q`A6vyO}`!3jX*#{u}%g@{r&jPX88J9>`!98qY44g5zaus~F!c*(d3BKbE9vZ9VjSyS3h7 zlqiELLy$@7YZh)PGG4zo+)3k}m$yAVlA1$SO&r{*OZKq$x|;NnH!;9wU?3mN2w z3uGk;eIYYKC7m`sZ!IP|BvoZfIT@szcBwCmbs4la|MO_@nb!ey;oso@KaXMV00<3` zGEix6Wi$=gGUn%0IG$s;AV-cA1Sfz&EdJZh8=eKKRmQOaSKZOd>fbhN(@zgoAeD(u zje6-RWf-Te7z&KkGKcob*ikQimIr=S?{;W%q;gQT-;7-ZOfs zr`R*wF=%uKBtt`uMBv)S=ND*6rfWmbW)r`VE7;kX(!^JjjCt>NEMXwEe~?x9 z*X)k$1xV2=@CKq%Eu3hlVeCq4tuLD1R(5VaiDwzLRkNk5dpyEbLeO1|7_}g;H$Dh1 zp-06N?>AW$O~u}W6v)+~;X*3-hOWaQTdlx+WKM%S=M!$7#p%MAXfIwlROGQ5*CBy0RTff1yhZx!G6QO zKe+-aM?P6w`q@M6VgqfhGG_b}X~xTGv*{@+lM*0W_ z62rXa3Rla**;2X5oy8KlF(k9~4{|l$+e_uD;saG9RLcX=D7n2SkKBwxuSFYFr=%8O zAu56ZqBAG{--Y43LW(t{*>dzDcqy)Jv5&D1JR13k{xr0BvWLk2xlGD_L?~|Y8yN$( zVVt=rNATlBGMspfB&4h;v7jhVGtgW-*5an_1gb;fg$6gPuirK2ogd&so11XlbH|e1 zW^gHKLW56*heA3wOjeJM-W<=M9A&Sk(ue?S$!}K z(=lk}QJA<{j)i|dfY|8FP%@LMQFD52YRCuYa7<~$)5XHfziLO)Nk{q0kMF{}fB1M> zwu5sDnI@Bot1q}Mm4LQRlOXAecXNm41-3CQuIZ*cIcbX8Ts*1%;XNk92Jy`(2FHMnIOR^4YhM;9x+^N91-){V;x?uq5PpSf+;(SO9sG zqep-Gb?uSldNl=R0PFHvYf{6l_nPUFQfcAgWjHXk6ZOBCfsDNpOUD~em3Z>P0|l}# z9`N#59o1B$6d#{{>IIgv^2TwIy;1(*2MdjMSzPb+!WE|C-J7(vo4V|qpQEl0RC+sD zF{$RK(5jd@=hLBmHOqK9q?tzQj|4e5!l0;?qLcV0636jDmEFOSsn zDK_Zfat{duA!|afzF2rm3!;$kN{Y^fIPn>?a-{Q~lz`8&La$YPr=t zaG=Aa=cRWFMBwwzx66f%y=OE<&+(0d`IOS`5(@BA5Ie4nI*HCWMDv7IxXSve>z3E3 zchn>3pSVe+ugKC>(f4TOjvLJFqg$#j_%pdlAO*!2nm_(JX1JXBS*1C7EahL9H_5oUrY;qYVervEAkYpH7Xg3NecQl$g?Lbuyf6(upd$ zt;wZWeRkVgu9y*g((+Ji=ce`A*EQlOm!8EG+DhL)*&f^AzvQ$f`yr&6YKl(wq-#pv zzgw$`&8HaD2lmdDmpu|SW+)bl>--6L&!zwtNE3sRrYDOZ(za&2Q2{So$4OR4(%=*)0I{A~QE zZL+IKct2%0L5f$5?vGQ|-+EcON?9dae`>C8RC@&|`il4<4 zwVI3&Zs|7<(0m+^Alw8YbW!WOd$FOcb% zNH~4cc_~NZwieGvHv+ePck*dH;}>$JZO~b$`T*vJM!iT$-6r=rSD1WF*P0`vO(Z9i zV*bgeXzKQ$N?VSJt;Y~HN2gdEbP{1h6N6}4+}D^$j`7CLQiwAMiax}u1tv~Pr*E|i z%%#_3U-Ry_locx1;wpS!UVnyy2ntRDY0YRKv4KlrkuqiT2XPqbBU$PIyZiDCApGU#)$JYvThVR_KYlLv%{x2K)T$QZcGLH3(QzeDr zU~~A;s21RAAni-nXqvCz5w80HB)hx~pVWY7v==M5as_k6wr zE?SYNKlaqclaii*=6xz>=LshNrO-ux`h{H9bl-%-)}ZVviDc>c z#81&gbx?MdBS*N>6NA(E*l`6sCOHQ|1Bow|mZ8mCn#Sz8&-fe}4Q=Wj8d#xaif8*}HK$Fj+HyfjLQ)|N5{W#S`zh zg>r<|hX<}8pj)rCfPRu#_zpz|fvIOC*(WKGO_y@#KIM5aelX)3oIPDSUBOoHDeqIF zl^SPQ#YYY9uZi3mfqX|-ly2ngGBD;Ix2Pm{I7VWfNhH4|$HWmsv9hYF8&8Gl5LwsV z6VH**UzoYc`R6Q7Uyi0oA6f?H*N(brq!~$%`+-^zn)Aa9PPTD=VLU>KnuZ&(nt(Rp z!0(&-Kbh_p2%bsw=tmRzgfn7S&~UrTL=B}xe0Tsf(7wJdpdJg50Q>`0jy&UrLI&O! zc^M1ey}y6>X?;e2AhtA~>@V#}0;XGXR%ZkT$ACD#{ZYaOXyhr<*vl`5X98iSu~UucmU~cQ|JBs{`QmVyv;Y8aKY`h3LR8#kDQ!}iKBa0juNns2TkM20StfT;4=40>~U9b%JZ&Ql561)s!4@oLm6gpkZFjS##Pz-O}y$h zG$J&3MDslHr;Em^0r@P$N-c41Q-!1?q9qp~(GJ3ccsZN1q>a?Vd+OVtb6{0KIJ)Ob zMK6iFf2F7fD5NPUKz_n)({m`tz8;d z^5a?V0V;&$`$m!Eb6lF3^`Y!rB{#!DRnq=qw9P_#lqn^i0;calh4RBhoccWjUo#!Bz=-bfdx^vBjPj`z@*J9;FqE65Bui^PyP{E z6$uS^24ycB&BN`E!?gP5m29v96-OYmiC8E91hYn|lN$$sIR;C*`(=pVEMdlCyGxa* z(}Nnj*p*t?bmXMsc|CeVk~K3S0q`RiO(Ns`l|B+%vAUiC%-e0zL7NTHbPu> z9I7t7CBltLki?#|_B;;RsOt@YjPx*`fukWsD%3PR{2N6-FU<$8CiWZO$X zpn*2lHA*zqo4VM8#LARq-mxK8a)Pyf<7DjxExDxU^n`dIaER6Av?&BNU)Gz{Bkyk6 z?cN^1aCW2Q9}4C-_(TToQjMV0{oI*8Ll*FGv1cz)>6L`z^)~hijdnqPmSOBMY}A;K z0fyG$MN=#vFGS%xZdq{EHBe+8zOJ5VPZ?YnL4J*1v6@H)hFWtUpi>=FL%tYPgniFllWj6q)!h9 z6RV5$RR!7a!&l8D?QymUJkyYbtkBHlkt$F+*=rV~#WdRZydaYw zhDK^$WzSEQ_l8nf&(b}CG1-iTIrC@%9$IJ9EX~>wN(8jk zVY`YF+Tf^$^frT+HuNdLZ}j~?fnOHNNW^+zF}a0aG3j4d4S<2)`ARF)&gzy%kQ8vM zJ&FdG-n-V$79ztChMh$<*5~_}`)6OsT9{W7Eo2Bt4Z%+C$o*6Rxz2J(B&iJ`tIMb{ z&onXcuOS^!ic2u^lWCchFO>JX?ZlHh^cQQX&9FLE5tOT&aP?|nq*lCbY<6>IhM{f` zk1TIHnsQX6+f|=6r@^ZM5xne-V09byb%=PZ6%JGKTK&_)mL8S4g*X5P0r%fFX=7{bn( z!>C|PR8o1EMOO47L#F_w0<@Voq4+6#*bDk{*e}bniE#{dKW#fkZ5IwyoEtsLQ$*-0 z;0rm*xl)jV*$>cC>;TlL$y%lk>>2%QX0h~z!52HjSxor>GC>KjHzLmZIF_68gLJ4@ z41b@%-0I&az}!h(8}MPVq~JlY@0}e44aUEOy@wsJS*9APsnN7jH9YnH&3PEd92+)k z`|r;QUFu8w7}X-48O3ZMjcrBQnxWW#kAPeh9SaTRx;nf~-VU&GOpoIkt0tR|FaS>4 zg{3$KGWajQ8g07e4*y-nEDRKr{@a7@iw@a>k~pK_`&OKx;o4B7O0R`9KEJocafdW z+x=8t61E4y1`H*jiLMGD+4UAnnM?pjJumWh^Tc4;6I>gPXYIX z`4Qv-HbMS=nW^Ug-0kFmAop#+XR^L{2iIos$!d!1pD^*bk{Luaj~Sxg7M8`qpCk#n zeRfcO3^k7-=XbWlvz{7~DbAy(R2lfhG46`?58f3SRpQ{9Nq#rMM5-LbwvIQZw2xu& z;vbW0tFd*|%M<2dkkG*(tBb>*q7pQ9EluTD*>f?S!D!mG*ZxMUNX~m1vuZf}OmlK2 z8s83-kX(loP?Q<^Bm8%{cjcY5C`aSD7PnovP35;S^-KXLMtvW>M!6Gd2&E9TSPlWyF#8P9#a$m#i&xat z+J3CKl~jz*V^I7X-ufRvZbbp79{QUH?+|2#e=g~mx*Xr&@zY54 zN1m}FcQj43K4DL*XLbv3dbo`|;YuWHe2xmz;}{e!S1a9dR3>~!erc10D!Us^q38`~%dw|n;jdwyx(GXc zu7Dp;p@}v07k)2A_-~);C}V+r3gzkMv2A^%q*?iqqTmJM6bnH9%xCDHFf*fduCW}J z{h+Fh+^%f1WUs4tNx$KWH1eeCTo)EuNk1|}R}|cRYM)AAeaqM%>$S-&EGn6wQ_N6r zn_JR$Bq=$Py`jEq@+bu8U&0ynZuKn;PBrSs{xSS5Qi6zE;`R1czTv`K|mYLH$!t z<n_Pvr`hxu`1FSSI+WHWM#uO*w$ zANo)$sfzGl`s|9G!<UbemIJbNvmeTpq5zqhAxEwP7; z=9a*^&9_GiLmz-2k?J(UTI^(Z$CXm}*C)Y=Ge(L+N4zX9mhgMNV`g{bkzk{uy+9d4 z>77>~zDi`eQqpq*!!s7vBqEqpHhKmvsiTqy>EwLy0uT&qbQ zSKApz3p~^3`*57DQG=0!Plo6o%{uY{3Gxm##)o|~r-f0%SW&jk*+6BhciYDW*%>`+ zB$7DB>Ao0{c6_<}iE!m`;Z!fFc}B%M1p7 znuDKSGbky&Vyyd=!9V))gR{?7(+O5|)hQ*FSH8SA)2L2fN#3+<;VYecTHMlBz}G0i z_~Fg9q;bj-JTLaN857_~h0}CSy40JVuK^#`F&13M60!G6Hp_X*Pb>47RNkGFbbEc| zKy=4V=%vH{eDT*l9MuDgve@yt)a-Sx2j7xNHeY}W4ZRC$&w}r$tV$z1dZK#`hx9^_ zeXRv*pU1)3$t$z|zlmS|v}nH^IgquK-AWHY?Bh5OC&6Jc?X}tOvpK2xU@_^(S^bYx z#FC8vd^^GRa*?Zwb3%M%vaW*V_GxdSIYPM+vYKOf8q_TKYn~*12;D{O)tA0n2cnA@J zEZpX{4*;eNe&=Zu2%u;lllj-KscmFZEFbt_>Rf8#e$V&ZP1;KIjq!3^_a^$1V^jTj zaEO+zh-uYEj#&ge zFPY|XHKu{e70d~EZ2%Cj;e~y$EuDeD`=K_Q>(DM^abM`^M!#5JKj5B(G7(SBRrCmY zK7{@>40I@3`*HH&Dx;jW)$s@>X(HO&NvP+rqFKEgEH|Bk@taX#jmciA80c3TCMxwGV{~ey{qd2RV0*c7MDnqH33k>S zg*f~JXT4MvEfMfJ(-;2OY%=z%IogdP6WEoA3b+q#GhCL2T~Dy5WJ4;C=f2n>^JG|h zyk}JLjx@g#&B*cHD}b!t@qJD1r~;iOJJ_%^&v(xUhu;vZJQ(qRIBaBY!w_Kucj5H9 z(n*NU-Qc$?7fs^fc}q4Fa|#$LH*UU})C)PiJLK&`shw2j z2*3JCe@M%wyp`is=M&3>BbV_V3yScYOT6viY^3zR+@?#ebm@EVK9bBCT7(U~#dgOE zuhB=5pQyR{?zz!}kl?I=n2a3k-B?!d;p5kcS*Pw%6_qj8aTd(_dLu; zM`&3X{P>LovBrf(-0o}R)( zR>B zEb854Il@h3SBZ=JTFNahKnxXUS9UGe^;_H$ANuHMf$;V2Ag1C6!>b?Hz8G*Kpx8Z0 z0~!Fu6vEG3l4N5dk=B2t?{xhhr;PaMRNd7P0#fH`mds4fQehn^kr4AIku{^?iyTT&1r+2g&&VKU0QA*#vo)4VA@XUO{US-{< z_m?eykCbgGImRPsK^C6NVMI^|qD@9-&eyy-xVms<)xSUC{5xG$-vi8{_i+u<=98b) z(y5e*(nC>1!NqQi_Zv^1&sa7neLoTNxd@cg-FF}F{UVs4)x&cF*w|G)J@tyUv-fQK ziP_~bxM^-9A>W3llt`Aoy5#^vZ$*ySOQ;31J>SAT)hvXf`b8O=wyV`}RtWcun+E|k z@I5GFL62EWY1Grlil}}51*{1*z-6-YiZjg8UuVYPV0~CtzJo!)GW&6H)gZ5(MNsZw z_Qq|V??TNedsi~xF#XUzpX9S4^};D9_>;T`S!eM{^3hw~%P~A7#}-Fqs@dGrs(;F~ zl*^GfjaVtY-A0b~&!|>xl>Fu6is>EEryIV;`K(T(0w>(7buDB6h^yXJTOb8f(jdj(}t{MRS17G|@#1!);HIrex)7r3In;e(-t_87^EIGi z1G6BZWWc1S_#Ta{%9T51Wfe`&7s#kz(MbG}W}KqE=|^q^V*h;0udA#^O|mtBIvH~O z+@JKlpx+=dRtDamWxrG#%6|hYCD+4xT_onKtPdW`R>;?yx*}A0%FoZM=*EQm#1nwu zFX#*x`IUCwk&7bGL8ZTheQg{5%@y?cRljNg%Qko8FdX{rkBI$V<#`0io>B=`jjl-5 zAdu}lIdCZN0>uDv`}&*DrfbQlgkG}Zf5{!L-OzY)slUfD8(M1tUH{sbwSKs80QOP7cn<0k+EM%VZ`&>^DdkvoDs1 zeEbq1eQwQd`}OcImV&esFurC8&Hb4cqX58>cZf!^G4VIwQj+R>S+N;UtFriEq{dw^ z4CZU8H5qrwEH8u|R7k#XrBZP;fHmgThfBB$wa0uR!E3u&GS~m9Rlk`eFXcQ#?qiMc zT$U~-GVK;&dh+}54`m-Cq`Ww2$_Q~%UNc%{tTvABz<*+Qyc6J@#QVc>k4d2v zY3kvaSOw5tBHIX{fR%w>oebT58B|oyt4|h|u3x~5Z#E>B{nat#Qb~UesS4QnZ0(V; z-$kkP*#MBwy^2Xf;Bs@in?@x0jHg+&W`gtjkT@w3hw?dSL1Xln^+DDm=Fr@+(b=d> z8=74taIq{??CT;)_E-s3jkpi4vgO2SlecAH z8!7#`d%C9H@hDLo>sxVL-%L#$5B4eA;KFRZ1QE}gOg^hZBU^DU^1+w=45_ewUpY>q z{SbP|_DApa9tp+S-;-o0#KMSPoee~LJN4xvIs`jP*=u4JcSsbZ`NoiR`Su61XN7^U zH=Tg5E5Hav(s-FRvZhhzprv0UhC*OXMCzD(rsgjak?)bZNiDbrB2W=DE zVKR`6D$JVpYh>;M++$y7mi+UYYlXuT#ic0qbQZMsvWsV@BQ5$%8+|y{rSlIKix1%# zAKJBsIQDzuWh{{q>>X$t8CD=UwEsF`X(A&~TuVk0xIq2P^d%(v$4H}KGX0Ouc1KgM z`tLjVk-yE*0zKcj^H#4Bedq5K=M;xLw-Pd(|L zC<99o#_Ig}h=w^d^m)MhQnheUq5H0nar=wTWp$O13n-#8!(X4S$ZcHp{7(r1l5Aw* z7=Y$&JxpVCn$kpFa1E!*1PAI$NmKDdaZ{)n+kCR?dE;ap+H>x4o)(Ik!*(?ec24zKdk(_( zjQGvXK1{YWc|J5^yP=V)5;*f2_?qYhRB9cu^|80#uRe}Dv3c+RG#>9zst15|0f;PI z%a!~>{F=Aa0k{^IyDhEH1+x?O_h} zADSR)109dDyU&+oJe#ZQdE|a}^YY&n!Cu+9?YG}&huxw88=k=rm7$PiSqv(pj$=*y z7{{|83E-rg?uE0MuU9gjUl`oU>q4r+>aUbx<65M*LjANlsR_XpGoZPsD+2rc1>6@E z@y!o1WGk$zO%T&OB4a;XsVO)FDW_TecW2$V@am;!AMbcKE?HiG>2F5Rmh3*j3NBWo zz)1lvv)WpfHMH1r>ca*;?yz7QCv#~W?}-K!3-)dyGu+aHmx|7m&hEhOIB!UA8` zLEASk$MQd z)sRgi{k{mf*-0H^@Am2Uz{dKTw)*yVDg{@B0>NV9SP1+!?Scv8egcu|0I#@4_=Rt93=)>Ubkesz^33-WQwZ&#zwsX< zj|>R{whs7tj#uADWr77F!`GSto1}Kcg$s&r>ZQZTjJ7T{R*zo@h#^}yf0O4L%4aQt zt6(0}Sq^ms3$hJR`^VHRzs2nf4WY(PYs-TfI)4q%_SFE}MIBD#V5ac{ z)0Q8}2G_%umI3irY5z8&X(&wSFKYi<*Zoymbh1T7o=@OxC*}9)FQ(o zby|@C`Y)7wTIlWbSz@V^Q|{Z4CYK@f6Sb>?6d%F0H8UR4YXqAoL!T7vcvd(rE_+-N zo&pd}XjqVnh!E_vzyHkBomhs@g$UV--Y}x`7-}6Tw~ly3!I=wu*MU={%p(od358E2 zGjICmdS_lJ8N0Z;ev8pBit5jNu=}{nNqxcrkV9$$K2#2_Ke&9;5S$%4D5tjAIAbh` zBaLt$?5*!|&ktMyFhZT2yU)`ID<|TA)8#10AGqiO_8mwuI3H}&P z9KAi&+Vu}XGo~>13?TCWkFA;b+s{yzlg;Ic>h3KY_FHZLg0|L+r<0_|Q0k)vfUb!aW1G z3i*B_`2H&!OeGuk$)P;oY=q?}>t>s&&5lS8{=CUQ5LO_mz*ibV_9-#~`UKc2J$d~% z_Q2Wj`K!CI_2y8+7sL7-zWzaREJ8`}$I+vLO?PRazX2&_-DAWwj~^-X9yeV*DY(+W zVq+MWGtAOt!(R+V^mv}WaWBW#|2Qf2OM9%Jw$^}4$P0KByXiRg_$eP(ppoVx8;d0@R5auDx9$>SP z6E0$TI|n)l*e;9ALfK$;W4uKO&j^_#vF(@i6;HvZ|GX>1`L8<1aUKsg?}GYP)G-O7PT;D-QPF4I_aqej#%JS0CL^mKAKyxb z!c-<{b`;y2P0;75`ie}miG-e^XO1`hZkFzt;1H@Iop(R;SZg*;4+>hy@cyo#%hVo0 z+em}3wPHRHK(vJ&LCcS28H0eW^*~=cZ0G(i-g9$hdio2#gwY<*5J5@4HTPxQgIC28 zZlUoXJUdNNGZJHm_z8#L1{?=ksXNl7q61JbJ(qDmVd~H9MELh2%*_8rTpmJF@GYw4mC_^<7Pb^-YinuGfP19L zu!O9tFR#)ye}Sf=Ly84v#F@GKGjl`p)Gd5O9SnZLwX2&4hU7iT-4yu&l{^lva4nR@mAzY zdKb}hfsBs(NSq%k6ePj5sE;*JO@7i>TfI&Y5(a$rIp)p9iXTWtANt3}?LYa~yXbq7 zch#_QkANbCJCGTqv0whLGebFil@KSz9ed9jOKvFcaaYvHFWi;EME}sOf;mdd-0-gq zQiQ1-2jz$}lSv#CwERH{zA=sN{=lsK_)HxE`z3-Cw+oZMRafBF{K41G-rsZDJ6%{_ z>5LMmCI1f!3k!o9jX`xb@LMCUel=wULi!B#Wl#CixZ{Tup}CLvh)rc8hO$;}e{ z2J+m|&{n(VPV<@`g2Bx@tJPO{g*vJwP5@^MN@FC8tL0r6a{m~wgWDGnY^Tjb8hD+m zp=K707S>w<0R1KVX`~FQ^#IZi_;Y%bD=j z?}>lv(Mefc89EfCWDxVQq*BJFk%6yQ1*<$X|a_grezc_f&A+eR-8!RyXqur7!vBa~qG4gwVvkdf?o-%!|T- zf!g+a8x#dTFxg)w~o+X82@S=0aqO!mkfSZ;#a+-Kbr3|#U4qphZY%O>Kvnh zB!L~dzy-Ph+n0HFpH`0SU787E`70(?W{@z{S{u?!Nz5C`%XkHDCBg~YeFFNW?B&EI zA+9KtE+B0M?6yS>`inrJNT=O)o88^#5K9lm0N9=$8G>ywQlUUE59HUs-ld>49h?`5 z^v)qfQYgC*0LPbZk52`pJWcI$T@a*5cZxKuU%hGkV@SYI@LR^wE;0UXX7$c%SC z-(>cPPf9#A*a2EFpn+0)plix&Sf+{PBO%^O$A0(jL;tL^CuB>IWFPVj5Rx?HPd`0o zZ8))SC!f9Z%s->sU;?Zk$*>^LP;i?{Q3=aH*fofaeRp57vB$6OqXFj~`~`?EmxraX z(at3V3dQ|4``==t|JNW_g>igjR!0dzVgcN=0IYERPw%NW=YtEjK-5a7C9wM=JzEJl zsXnaQ*Z*GY>)_SGswpY9tpJQ_XfTAZ-S5Jtu3(vPyD$1@Q)hE`3>zY9kaa{tTdX?& z=KP;8@>bjlD3NCXgNA4ZNw+QH{O0~oksguU!L31Zg2|If8^A>XLf0R?g@bjEcMlDR z%65AUB=Lg01YAPI;==SR|8rEqk>G1VUlt^M+gax=1V6UJ2|e+jl6JEhvcb?BL>aqg zQhG>{pj56DY!1=ReWcr~_RrzD0_{asN*Y^G^|y(TyfTt*Eca=EgYe=%rGV!a!01Q< z1Lm))vRVrXG5%6w#72q#_bR*t=L#vNLQWf;RLBV->1sQua`?{)^;CQJo4_B!3Bax# zp!1N1gF_l=jLk3Y3c+5*w$%F9s{gy%0a(kn`^?_smOuWE#|jKBl3hiH2fmRhWLV*d zc5N3A?>w~k^h}O|ibam~bvh|Scug-6^L`1415+wz|%{TLG+<1`pSr_XGO{g9$N z#G6s-6A}EaL(cZ}B6$zks~LF{Ow(`v=R-r6mmLLf);7m}j3;yBi-rCfQyV$Hac(1e zNd{HTMyz9UHZ`A*G~_ND^0$0#K6f-=Iq`YN(Q@o98M_MNA9#n!Zcbj zZ}>wgYi0t`7T-YXlhI}rx8MiMHX`mCyRF_h32KeGTFE3K_4(OAEu&2*>tRi_8=(P( z5N5&z_1S8}2~E|- z{yA%5jm?HY8WWZ-b)~JT<@_ZD#kN-J-^PowDL^c6wy(_1$aj%=zm9SbcQ$Mzbg${n zQ0V%-%I#et1Ig9h1;cS!+S8}Yg=?mJY{TTi+)Az_5{lBocnp@Nw7weX4Ac-($%qV( zf3zOzS)NSC*ju-_hX`*Pta}UkuP?Q&an*;yoU2kSNRxsvOnHKb6q=2SL*=K{eix-> z&-|Wxg$}m6HY_N!*Hr)i-_2ctF_+Si{hGC~t|7eg?zp=?*m|Uc;Y{q{(fET^_B3`| zDXc}vF7zJ2^;Ur1S5nM5Yf-Ei&bXC`GQla9$W!AV*cOA@nos_odV}t?k8m`WhPFz7 zW{Wd*=5Hv6V|>s3emR^D5G02Ozs6^*CTB~@3DBw+BTxBpZ?1Bq(OULQit^;DeJ~s9 zmdP%y-0a7r;fdhw4)Z&&IJKF{v5vh{q;U6&ur_&$;&Mme)4%WfM;N&#VP%DNH|!w} zvKC&g4D{o9VCd^2;p0liT2$DOU3Q5(rBwo6lxVmF{dLvV^2Dh6=>k)O)*BPcHJwIh zm6f>Lh$J1G2gX-k@W+Ld1trydEZw`B|MbIO$D@sf4nDM&Z+)BMNIWaY3v@rlYjg)S+LC4P@GvVHa(Vsy` z_Q^2W)IKp0I358e9f*v9=sV7)9QR?;$NEoSFpt&fTn@lYTpb7Y2kW%p$Oot(jZ!jy{BZjAH3DM-=AGWl+xzy9QZv2_ zMY4*bC`JMVX zYv~)Di3rC+$6?%%aQneccddJ7qdI9r=%OcMQx>3n4c(;1Y&2;4${2#dmg6QYr+^UkL4BZ3fMmeCgXOI1Z4<#{m98& z_wMO+xnVr@EX+Q!ck9V~$xW@=ptjbw9u) zr-$3mcg<{`lI7)Xw^hC8y_P8GbPJ%8{NazM@!~Vg{wprQ(UYw8t^~}^36oxhUZ0$R zGB8t)2hPS*+~}zdnb}JCZrvL>GWb=$!tK!jzjRq|T>Sn?_4Pr8;Fm5E+exThx%x+K zWes_54VuRZ=8qTG@GmQ@x}FTzD;e;odDyd9l6I;*@hbQX$QDNo_@k%V2Px#xc=43N zdqu(}c2Bw{vL@%VuJ+6imHB{zo*a(Ktu{l8xQ%7;C>r#~Z@2SHZLKCr?VC?4@`QA#jXZ2aPxG^- z-aovLWIOoW{USYVAHi*I9!{qV!FTT}KhJLY(*?7=x?X|BwU!{vp0;w3Eq>Ka3Puk199JW1}tdW#(nu3)Fw*chDHx)hu50UrXmfK_;$qM zl#}?O(5)`g)l-Y=T;yD1A|aqv>YPs<-0y2~hAd%wH1E#zA5})zIec3T%mFJn4Xtw; zs4MW*sWeNd^$KMGZnWSV2EQLv*qC|(4cIcY91Ndd)koPCk{?bA9Bi%+1y$s^7F%;z zdGGN52|Ax#2hWbemwl@Lw2~1sx+TY<5{L;j_ZGw*`VWAUVjB@p**ntsZwFGJQfXI zG$u=>X9bL96c<4$-^w3sKwrd2dg&M7LBfV#UF1=DcqKO)vk|^p<<={dZ9nY@PA%bD zn{!Fee2@HT;_f2Zim;_*-qnPDA{xouMPWx%J6W$)`=uTLUlmwC*;S}ck?=-C4jIV1 zrr6nkv+(Q>H45^ZQcF{5n=4hOGjj4u;#a!* z_cMSS0L0C1$ci|O68hU4I`~kdrcyeV7ni7JoaS0SwW0GTW@E^RXb7#P72PEZ9q4eg ztvLOu53y@%%+Ho&q>pHs@K>9bQM3crRciM}V#aT;o>(dL zf}tIiwqHtF7EbHPMhEK0CncqJV1QRyQz4_lJJu>%c4lX6pt)gf6}22UV>2rYInfT; z^M!RNCDw`VK&1qo7K@>3gu-atjBXYWOrb~ipoMYHSx{WRZjUGE-2pGGbJQmx0WjHx z3b{W3Cln8a@HmC=Oht?#dE8leNsqdc)3RUM7PCd;FKrg%lK{PMS>?D?nq_r%9sp(0 z@+^3%8uQM?b_Mig?VIWrF2`zV0;@!QEf9qy_ukpIlh&Jp>(dnJmlCXf*S3PSiJG}} zsBKI@Z-E)u9D8RC?ArvlP5qUyaWgTmgD%fEAC+jd#Yq)#S^j{HO!RxT!~{d@6CO8J zT)a5}r6?^V33)6n;fSZHx(_3AG?KmmgHDt_J#N~b!B4G`75fw%fO##As9K6n%qUFx zARBVyj*F7Dga>;`fi;9q5k?)*ld9>&{gsjYFxG-h?e-NUMoeLDbO8xnrH>pihCT1a zOoptcrJD7wFF?T7bC--HGd#)7TLz8^5sl;Omq4_wL5FJQkzrUah?Bh6#0y1wtvg4) znlDm^WDiZ1YpG089l-HQAo2+03ldn^P(vc5$~71~pcVvpY+nlk!w@(x&Sn^5 zKpO~9T4+8W_u-~K0>52(zFaA9`}7Dh$pmWqRjNl2z0HLQgD0Luy?S%0=b!ntT7n*) zi#}gI6okuIGWa*pRGY_%vf*AqULcIPuV2QFj^@mmbXmY%gv5`jCMl5!_9PDH*D#P zNfKrN`webVod?gPVb9air*-QvD#&aKVn)_!oL8q}zs?#l^C}6 zU}Aav+RPx%lPfdy4_=QJe5|$XVz6>vWgdl&MK~FUXCB3^%REZO(=lkLqSwx64?TIzsWA!zx7hnV32d54?xi z!=Ew%{OVJtPGb$UGY;#UELso0H)R7D8%kyuiZYg=RHx#6OI;dz8?Za9)|3$gE4LAxtn8`Va4M30e zG`7$fuWW>X``ueg*nvVndZaKnqh+H3-s}(a3|@}5Am3c)GY;!6pE`iAEU;cmV&G<> zr8+#ksVQ}*X7!nvsQ{>_o22X1a1at;T5|F16<;}H_?PaDelbOLtq!PBP}^-WFGg~m zf#uLs8BbGQTwVdEcR4*RJISyCSa8|B7N*=*G;uj1atxjDQ^*kM1y3Z9uXBtia>C#9zbdeT5IDU|Uyf=P`B)4VnU_jR)~ zPv$F~UpjdB$u)uaYi5l^p>jSTV=C%?a@RAnc8Ef0SzH4jXLSw+`yw#SmPb?kmQ8z3lVEtN7>zfeFjLWVNNJPa_ zC%0>?&UI0E-JWkDZ>~y|ic)E|9|@hdEt<4Qx8pUrRWvVC>>Qm}35RqOWE{$^*1&5f zr#2EehC^)mSe7nZ_!w#vLU+V#3z>0Ft$kKkIP28NOqj$TZub`3m3O1WJ|Q8MH;wYC zq0!btE^Er#(z?cs^HNPb+3V)PadQQY1aoF2DOhy(bm(!62*Y%E5%`4Q^rZuZ3ow&* zpowigBmUOU5lRV;G|OTQ_=n*nK}nm=EIcuw~)Y{nNRIehch|md3mpMChIAFm@5f zl{m9HFnr)&PsxWnO?`FBhDNsV8yu%-!M`;Y)Dx zAp|_Bwen4@P~1?G>Wz!1mL;B5C`A}1|5>*9Xs_^4R_-fq;f3YN{IoG)=|ZdR5NG*G z!~4T@xcqf*suj~3U^TbEG6Z3q9!yInLI1_hocq$EUD-uTVEe)$;|EZWMc$}>(ta1B zu`(vC3(j*Dyq2og_rPOE%1LKs2E#EZRQPssmof~oy&_3Gg4K)gi&s9VjoLC+hUK+! zxW4MieekFzCWJ|>>yRKwD&gZbk)o! zRP1pZp?!LPT4#a7cmr-nNgGG7cjx ztJ4@b9=|{~ABi>1^+#cq;i1r-b$x}bs|yCmEgny$?7^aC2h7Y3pAwjwnBs@VaCSe9 zEs$+i8-jMC88-tL^P%6qdy8aNZgRJ^3wb6WFq$@o&+RDG0k=@H@5kx;`6E8lyz;w@yp^&d3(-4t2)-fP8Tnz)Q@QJO>pzFD#maS4(PJ zCtro$%*Y#|o7vaW{N*mLELBpGF4|FgS-@BYhvyuO5J5v~toe0|Zec;Nl<{{$+?e&* z7(^$+9UiZ^v99vBBPV_ZXF=!;+MIVHm-`5|eEgg57~+4e$U~g`Xw#$u2nH~G@IJc& zZ^b7eLJn&Oa~Hc&n;|c1krCtV;v%t5!Zd@p#qa{nD*BdI>UIUCEpVuyVatYqQ$`js zHSekA4VPwfEl~2x+mpiE^$vZX>ji&D1@lW8 z6!*C4J50>ByT9U#-BR>ZJgcXk^mvwcf`A+o2==!qs}&jGI_Sz$MxmcDYzVVD@7@#_ ziP>CRbTNYjH{uR2Q3;FtE6A4{!0CYSGQob_Rg2cUB>;}Qkfn7>tA-(jLG-TlWcLo(`u0R29+;|B7M_>nRg* zQ0j-$3fCdHK%Kp=p)uSW44NQTV6#(8ygSCrG|!oQxq4%UqKjd{#Zz?^bnt6)bez{R zfa_97f4-Lb*+a{$4(r7=sN&{$DEP7;XHVPI{yo)s7kYk#HVGn%Bah1x;W+>fvK^CP zcsV&n1K|KcppS_B>8e5(7%$I00Ddq%-_!~*BAr)5!NG$6bScJ~uiqi)xNfKNZd$~( zLv3cFh_7q()Cwwss0u*1gE)eo)q)p(q=b?Mb=@%Vm0a&_)UAFF+ZYE?6w1F9`tt+_ zqI-FsbcZ6k8lM3oS?MEY;lxArBenu~@f=}%a}Z_vS(QftFjr#zft=G4hsI&6#!#M4 zPjcV4BMs-;%~bD6!|?^Em3jR&LayXtv)oKbz21^J_Deezu)-bH*++8=aTy~Mkc;(j zHV~E);7b;x;S5>_zc&HM>-;%k5`1_vP93?jlJayqLH#?yXr|R=)&kS-8011XrC~LF z$E1#ItrT@*h2Hu1S?}Lnv1^5liZCOaa>(fH*~msFSkCp{g+4&RK}_W2(|RBV+g#)s zABT7D`PAF(i~^*SP!Q8PU0I02bB&uyOX*oG?oG>5H0mx`+^j=&fV!kHVlWKwL9Hyk zPC-Kqs*p67l7XV)c_LzLQkCUN$UPxR4?d&6&Ah0v4^59N&EgG_7?oyUpPGm*a1R|| z7>L3Ez}%Y1FH>yKAmB&6*E{2Q3w3q(p~zUQcfJ`k&bdhQ{bG5sg3D#=hzLz}X{`@6 za%$JoD|jMF&A*?F16ZHhY4CXbqEJa)n-9!)rQMLil18N`{qqyVC%rGN&)KXON#L>zzS;~ z4TGAMRa59v9Bgql@co6Ka%AX^?Z`}rwY8^yMGoFR|5$#(YRlnBwU(sM4~j3LwGEoB zCKD7`n_5I?>I3$K|1hXXX6zLdzeCca=2F4$eH%*oE11-mow=KL(dGqTEF_#-?4TVG zIphz)AAW+IoScs+#kNVU#r_fC;Y4*WZ(5dU083w%`BUEb1cH}?X<4`u$W;eau$W)9 zuc;Dxk07?hJ){Z*6NMNlKolSw5I>l}dfTt1pJ}-e_kt(WH)SOH ztb1qP+M}T`f25K4e6!oC44B4*kF8gE+1ZxC-E~VF|D19Eu}%#XEweEn#Lm?asamCFxn`Zq|MD6lz#p8+hG$=3#3{&|VL1{S05%kK} z=@UzHO9W=z?w*E6BI8|4jWvj{+G3^0fQC~eMFC5E9~bhtm^PjFd6aig>-RcyEo|w; z=85Nl(-pXoyE+-q^)%=yWSiiX8(gbe<7{^|WD*k)U%uCQR6*GvU^jttG=$Lv4Ka?U zau%}e`YYDToHUmA;pOIbA0#z~6QfREqyxY9#JuL>B7|{Iy-oY;YLR1IKY~5X8H|uA zVi}V=qkLshxRkM|>r460Hf(24-7}omm(b-GPRi<^Up%z|VNZ_?YHdpwtuUQ3OLa*` zzI;5%n53>C%LEcsAj0BmCqyP>g`>x-JHWaEHkaG5x@94~T#znM%C|B5(aeCgIT*r{ z!uYl^u!z(np=^)VQs@B9z3$%4rQ9tjJcH70Ne09q9yKFuW%V|6qie`{Q_DwgT=6&h zVKZejYpnL*Xu=d!U^}c|U`;?j07!e8B@}z$W3k(r64{a}MtD;rDlsYZk`qg2@Dv;x z&bX{o&U$Of-Vz$xi5DC+CvRESkUi*4N_B5h{E{b4&AVANURiHD(=;wK2WnS?^XW$2 z;LRjPD@>1ke5@HzAq@79SE_5gPGoFU8)Z^LCrK_-G|mVJr}|%K!@)WGIpH;prQl$S z_@~tFX3N;Tp{CT#f?!Mf_OHzsVY&&qn-CR(35QHcg!zPXvsiE*2Scd-rp^dN*U%E_ z4WzrAxCM!sW~QLRM657bk~33l{F8!ONRh4KbM^WtSq+)33+vMWLpRb1B(hv+S&Uvr zYqTiF6pYeaDt3Uch9J!PXtm}qisk6E@>YBQq{oX8T_%b~v(UFtHs2xN>am2tqve^R za_)G2!s_}AgL29$cLhcwZ@B*|p#Znj^eM|%_Tbw9@d1Tt?-+k6Z+hKW0EEoNrG0(2*^sliFmlR{tEDeu{i!ZmR}a>d=pDF{9UjY z*_)#=>k%4T?Nu`y%ZrPkfq%Df(!k}02*O#B=mikQ)?iw4IJn|e7J)<3_Evylf1lIL zT#tLX8sqd6Q~*Ft_I1AOKs!ia7WzEq6&H}=2+^dLlYLZh3*(GKG|KsD1;0EXSCgWw zkMB4LoYxHweiN}ZxC9yi#Bk+l1z%@t{|5S=hl>@WDIZH$&eY|EBLV!YiDEOFlhrA;5952B*o>sU*bSirj zY-1{KoZ!5$rC@TZXN%<=KxYYz#RDWX@rKPo7)PCrZrYKdH~@!NF_S%q?kT9y9R96a zJT*N%Z6aqRc9k^Wf?4$~%QX>^yptzRM3Wrcx9^wqFY&zmiFM7Oj*breV+Z!_%k2>qR%`1ayUDIKRo}Gj{s$m+>0B*@3i_!gIy&tEt+9; z0TK6&Wbn?tIKf7neCP$5SZ>v!?Uxn-1(P2ek=)lV`_5qxRQ`ts9xeOxM_GKYgsoSk z*VkHeB`}|2@Azn@*;#$Fi??LWyt=er;RQ!Y^zY^$6Z$W;6As}Sq2z+3uf4jYd2*cK znTJ6UW9O#LEt=!uRaY76(gN14idfjL)<%$%G9@6Pd?l~6U3F25N&|N{_YTJe-Cq=Ej!80~;9S^v8rRCy6La>( zouj5H_+nm)ll%7%e8p!Nf6YMg_(<=SFxOLrV>zg7v9JcWu34A96pB0>jPMlkBu$9F zin?WbYD{cG;m6GMn5g`eq`0VjpF_ffrsQ3GvuPxY%pEbDJdXt1>W#;}Bc_5^yZn4u z{kE)%rsWPS&npwnVh&M$$;zd|U<{@v^=HP#?0?faiSs8>ba^I`N3nU<6kKTdu$&!V zxAMd{PU)x7-7iJNVXqGN<%9`3u0)*XtE?3^skxm5vNMYz5GR4fdGVI_cm4U>4pwFE z6O3jZDzuz$aCp1ket#=xseT>Rn^dgVo$RcZH7b0@(oWjT;vgUx-F4jrK+B=nB5o(G{Q`low9T{AkZpOMokO zerkNm{cW^>gSgG0^>9Z>Q!uhc7{z3@bIV6O1oLi;4`&{QeN?^p_^VjUwd$Yp^A2wp zhtHtnt3OcC{tSAlGeCNAIKYs9p5l=1(et#Ox%2Od<}~#roJ?$)Ck#cEZs|B0|6FBD z&n*mD9M)DbwouiMvA*Fpo{OW+I{_kI^Zm2CEqFFXIbN}4D{|J$(I{jL& zKQiGLZ!tV0hR6>m%x~uEqQ*erk{&Wq(VdAj=R7xREtbz~AK72XJbtv*>2~h04sQI# z(H;fC)4vDLZK?!YJng+cd%E>!dVnK3`azLm%`=_!#LZwoB{WTQ@!>bBMAv)-3*k~5 zOV6}aK8+{}&bi(>u-amtp4ZHO?gj1! zZjMrIS*>vDC)CSLn$gMk{rx8kbQwCQS$d)t)#y*M(enkY)gsDi8@M}hE1_n%zO&v2W+ z(RZwu(ec6RD;k3FFsq|1iAMKnj56LUa2xQuJEY542W**&A#1}%KXjtx3PsU9p$kn$ z#B~$D_LsjobDZMzDRQBwHy;tF8`eHP@}@4xx%zE`3%%`Qf2O;`+MBOey1hbQWmNQX z)|scLma<;L|LFMY#V^Sp>|7x{r{+NAydtxOW$3rqw|+nB*~SCCaRZekGUvN*w?%RJ zsK-~gD2K12^KBP&PhM)P=$aH5zGyI(U}RldFW60g`RZ8~exdy2$WN$*l-FNU`K`&w zsl|zNYwAmdTLt)G8%e~=?H~F#FDxAY9P&de!St6$EmIbX(xUmB<9D+tnoLa{{fAss(-bGL5 zb219&o##tGW8CLa#jxmA`93{wW6DWGXy%SU!M(X+WAfJ}cV0YX8l_=(u#Xxqc**|T zVF|>{iQmA($s<>;v9UqWYL$3HO6j)O*B*buEQ3CebZeTICw_FzCk?+CPBz|nAZZxf zj&ZD&CObnZ@sqxoe^fU=9|upJ3X*HC@2nWrxJ$fIbj<6BRE4mEB^^J9 z4~1PH=3VyzoAF?N7BSuFdLEzFMh+*esfgQ@t!K!F(hKT%drJtz+A;S{tTtf2fEM5zDMt^mk^gI~y zPD^`|>%nZmb?iESTequ1s`mEz7F)o<^abU9+0}5P9~32HtH{ z7>Ha+Iw<^^#;4}T?&Qeb)o5xKYH6UqeBr_cV+}#>3Dvi^>hj#v{IY|-pDgV@(fhVB zGt^b|-5l79^1@Pf{cD&uPV2r;edJWFMIJxh4a<4z6>8KjZ!Kz49sF7U-r-Z zn+HAkZAumQP?xJ`h)oOn54e5~&Ae$tzC1>}peXxb+9?d5os;SmR;bPmX?d+;wfc*0 zxsm%WM+wZ*zNHNJjWQEE%gPRX6nnSg*T3(Y8RdRddrHbfoFEqNipiAjJ#3FJ!)!D`?tJZe zr5j?S;Eb`O3i!`Gl(=V_607NJBiSEbZBr(R{&rtoDZw=pJw+({onn!qh;dL5J$|=3 zXXb@GvuJU#nE!Db`OcK#jV;ne{j|%j+?vg$Aq>4ZL>5)Krg!GFWEn5g%QV-|H-)i3 zX(OFdVKy8let%D?t6TV6I2VOGo+R^Iw#8L*A1*=sTZlPuJ|T3S#g8q0!HXqz4fk;T zF@m0>&sLtmsn-xME?aEBtMA};{7s#9TSUzOO$cY9F0JpahNPbv<;Mk6X-B!Ut<#rz zVt;>-qKv?2^GuZHKZG%F?@J)b<`*BncI_3bm7$KQ?XYDxes151;L+y?y~zaAB?9DL zA{y`Ec^M|*SukDYwRSn0k`?qx#s(7u7BM+r4p~ke7;qLc`o>f8MwD{RCVxc8_;kkV zgQKo>CmF2v1(hmJ9(dz~>+N&K z1GP}dxGhlD_M^W=xdqb^XY72weo73pD0Ifu*G=+kp|H_mca*^tx2)cu(ImdhN-hA|3k1y#1P zLB9DVrm@|r&@}}F=H=U=j31R{S<+VNTbiJ-o}aSt8Ox8N(Hn8PzNM0!twfZDSMX6v z=aRsdqzqcbNGW0C&M{fr+xN7?3cOOBjpQC3n8`i7e)w=mSN?<=-*38^+eGWQZbum@ z)YD=p>&;3-*atlveqe6=#&{R6ot`Ca>uss(T}0z=r9E3N>m0G-(EmiB%WV2prfYcd zc;y?>J4bp8XN%6!%hMDTS^&?aAd3t`w7U4SBPjS(2vz{>PDBLU4pLbWKlkd3UnM1} zg9CvK^LwuOe)g>|hvN^HJW8Ladz?$@`27E)>N^~6_Q*(PB-xvg zz4xXh^D#5Bca$Bn$q3nd&k(Zr=65~%obx;1Kj55`_jBLZ^_utTcF-dH4fojS!`K77 z69A++_tZzLPTXLAKgG{8F|{&YBRNUDkz;Rvpe4)5Z{lM1c4++Q)AcnMi<-M*bEM(* zXC%5RR&`d2zi)_XxRX^Rp5ed=oVVOLHLb{uFsvki!#tTl_j6MX0CBfg?A}!de?Iu( zfU^8xq&zd!Lf>F{RpoZl`{g(~L-Dpjvc7RQfi3k@?L=pcA zS<*)h!QSEcTsD4wZ;UY7-d7yHXJvCy;o;$bn!f=NnDZqgw;C1|d`%K5%hM^K-X$1L zPfG7kqRSoA)pI24K4f|2?p=@2)anGDWyM)bw@xVSLNxBYC_+VY(C`6nd}R?s3{i ztGpptNx5L z$*svb5P!C}utaP$*_ip<iX{W91a_5OkYN_<*@p2fQRa0Bv&i$MQd|^lJ+rR z2IMGW_O+3H7`z)KOAbUkd9~;DUq0VJ96`JtKbZM(T*uq$&W-H*qBZnsUzIv%pGvYt z5T1PZVp-FwLDfe!2`3Vh)FBX2C@OLiy}_p8?x?2iLdp?DN6JoGG&O6T3Dv!BmD;}~Iiz(`p`msfa~_giVP z(L~g-I$tFVfT-b$%_;o7&s=yGjW_+KL++^`iYtW>y_@Xk{w`HP@ysP@xL?crLoc;^ ztz;{`NXhz=;U~@2tK;T!4$f;_H}?7I=!!%e2D*gq_|4p=c`Dt5`4vB=_ObQveU;|F z+}&q_=wrdCHzwIgUdLt)H*s$r1ek-6EC#DEr3gMVlpe;_PKuE)Lq4qIG@Wre%#V*6jN$%U%X-+Ri8SW z@Kt@4+zJ7gE*@Sr-?)4o-xA| zFOhWpjC#t>R$Yb@caDtJ!)cFy%8t>jdY2f74n-pKr#yEtnQ6{V#$<`^A{j+TB=G6! z>9pDxQ@?#GSZr}^sTldD?uaVCR>4P~rgP6=DzGp#pb&os2;W9%+99sHoOAsOjA0Fw zHz1jKB{YR~EgaLCJu)u{j3>HqAH5MlBOTRM7i^V;c_n3zyHlW_8Cg-#+t0noM{<7ZL)ymm9BPH!TQoo0wsubexO^@R(nU)Cb(tS>Xyd(aI zgBoT3n?2_>(KvGB_-yWPk7?k1z3&ty6Sj58V)y9D0vw8`Y|)PAHkq6dy4RyCK`geL8Ene4dOp5_%wO+%Z(a(zYD6RSLccQ(twHOgEXud5vt zYpMT*Wuc^%h(4i5kt*7YE&^IR?mfaQ1Y=uKm!5B8+LDQj%P!uQS)hG+pHM3_ z9_d_uim=CRBmh7RqgJ2F>66B2h;aJ)Nj%_OU%RGUFT~1~Bq{3J+okYA!S#WD^Uds6 zR%2ZhChwz+G2JA*@J~B?HaBN_9^Dz3n9`D)mVYUitR(*~SYew{9-ZNJyp5Ra zvGwL`k=qRJDb+W<;1_C?x>76gINL1s`ft#tZU^({TPZkcKD6Y!_&8f2(S=6B9(SXf zYS!a-Oydom)ge!dnm#J&44|PnQ?qhs{jGw0x1Ol7;TpE3Q( z4M3@w?b)!9BZw{@;wQ(AR)0A>!RjakBXRcgdt2q0y2*l*Hz_>n7&!GB>+ihp9!|e} zqRDkIsdu_@g{2>zac{CAS}LxF?xB!XDo>UG4^;`y1wkREe>Y=+{)BdCof>j{JRQ@! zXY~<8f?Dd_O}`}KbCyOAi6cNJ=iGtkE^|%>|LSTLem&a*Xt8jGJc?xx%F=yXgTJGG z=zpU{QOkUnT{x1{9DEUS*em**YB*wQn6KR>Icg4ZsbP z+X{Hn&527sLZq*VRS3&-rdnI8z=w%QVjg47#kq)@tq>;3!7vY;E8&0&fEXSnX9JPZ zA83#m=4#g!yk@j!NduI3B9ix7*;^m^iwp)^ER=Ri3WG=q%u?SL^ z=&j6oy5tc#_d|v_O8ux8On5Z4TpE4_Pe&0^U|^vF_j-@&_a^3}Mo951cQ1eW@riZf zr~vD|^ED=ovrqnQaJ(#2>GflL$?nybzIr7thga#bVL~8!0LtxD-Hert$CH;NF*j~$ zPK{x*5 zdolmT1)xtFbPQi&yZI2??p4f}Ag9dBKV#SL_75;rNeqrqR&{yO;vlsn*138GZ&l`H z$05SZ_`xt5Lv{O1k2P>f+371GUCs%K5-|ON#>suF`YPEm|DJQZ|FAC^i;WoeLO?r% zfbRMU$UKWG?E)2=zXKL%Hy0HcKdmlDeLci+fIYpJlG3CYL!(Z9qzk`)kTl%m-B+Ig zC-L6tDd{ccfI8N_Kusny&UGG~gyE|A!Ec`*Yu}d1Gdmx*^XXihtPgO3A*@hSh44u0 z_7bqgzmUk8|C2L28{RZw%drg_Wk1;i(Mz?ic$5Bo_D4i0Q%28$H=eFH-Va|9>-f8}VeTwR%Ux$(~c$Fd73SBI6mA|2%PeJeM zi`tc`cw3BOW_5aZ$jt|xjeC8Hu;lP7Y?8mV1Gp$BSv}Ug{umR{J({=BagVbzTQD33 z2q~wvgxGrgyVltyjDteO3{CS#>rAxvtDyLTWI-gRB;chZalP)Y`fst>=vh+ zUBSAHfI|Bg^^&7u>98^iFErNBv_|zS(+OqZOx`HE*l99 zsVDLjHkGB_o3^{R6hsa-ZR)s_Gb0k5?BE}&cr&}uH!=hM%e#-77@{!pU}u6B*nGz4 znJW<^d1gEE1|eA&)1K`xpl{8*@6 zzdyJqg|l|k&)MihTuH2KNATThUE8UVn>YN>82$j{_V9WcD)+t&$W`q`I+MvS(ul|w zHNoq&N|PhL%`+GFWjhyANbnH$_y-6=vK+VU;c6S&;4yBnnr^{a#&f@Y5YjpV_8KT6 z2nM@b$?Mo34Vai&hloX6F=d(fvsaE`+KIl?4pZEcOXMuiEZr$M`XNVf6pd$`tWMFG z{bL-U4@jGnjq;3tF8*AA=9hDe*Z8)~LmA}%?y`%VsIxWD%YPIT3R?PQg0VWE5Kr9kaeq+58d}Xw+ zH&`*?}msV08EwgcEsbFvTi`jsE&7b*H^&fjL(oKA^ONZ79DKDHiA8gPt=h(+r z&RN`fsrJCY@HZ*%`ow9mqGhBaIo}*t=2ZqzyO6A7ggq_$>)RTF{JybKI|^!-&lyC? zI^IGhlG`6hvY#r4Itmq>6ZpSFSjz`Dc#qKG5(Vfe9WK}ew16I0tS04BPBCINV{yqzzjBl=4UvHym9S7^= zsPL+w0j}bJ-GgZHoQNIEFQYd-y zjquwwPCSY<;^l&FCitg(WNpy4QKFjD(2R_CU;?QFcGfsixJ%@Oict>rnKKhb53(k9TYr0=~7!vnfgI@6F25 zR!M`!*LNGFNd6&zVmxY!oFQ0G*ZKYjRJivp`pkoJoltdGXM95N5l>TIdR4a&msySX z+S-56`CNZGx*hwzkQ)$QC*y>cRHDBc5V&G5cyIhLi2Bu3aWFJFo(#xmg4VR&hDwl} zt`3}aF!+SX`lj8+#JV{Lc3fS7&;rHR*D$gYa^&UbaBtt-M4fwp=2S1Fi}F1>yRV=7 zjag)*#`@ia+*fO5pzs2v=|)S;=%x2LMc*Pzu^sq+2KxGi?{)4eTsdCd^>{VS_uLO0 zPtG55%B<~Q-Ez)}{e$xsj=TD$@a#~fW%ewnrfg#EpfF%dec=1vQ9YjKFR7FUmV+asK^VUkvnitXx}aebqy ztZpR<*uLR8ygCm!zBy2q!A1Po$1!+`3Ma6BPTiF|I!{ST`B?OwH67O549(XT(1w$= z=}!*Xmi-8%yEN#!j@Z`3bdSK=hb%;k=VzWA6h&Axc>qdZHMgeGfk5w?pNbwrEJGjk z3t`RdCR;V5iDiS`H&R8+l$R**c8eE~Sn>v+z_R({+W;SUQs(}UhO;`RnyhkB)<{l_ zpeOQY+OQy}%=&ktjF+$F_G`}{E@72S4+%vlV-Gw8l!Dz*AzP7CCPYn-xp)F!cu10( ztcMVuH28T0RABWueeQLSKCn{o;D7CmTbcW=3pnl%#e8QXoyXQs+^FLE zx1V;Im@j8B#eChH|2jVFu`I{%7lY$K-ebeL6JifY%W#F9(ca~Z@M?? zi$;OBvE+s$wRf=W2E=4dd9TNxi1J>iag}&|t$EQW6JOK_pLQV{z|R!j=Pvr3PR4@v zI<-)tVretBpqxH5M(}8gWUTd)WM4PK)&%+uBF^{#sTY4n_r=pSpRkG+{WnsI;#)<< zWE)m;7e-m!wJI}1Cl^){7+b$}hC8aI$AljFl-;LSNd7!UV^BZ(EY8{Q(|noZxrMfc zy_kbO@l?;IAlEWs!wG@^Y|WjGpG9h!31Znd{Cv&gEIS+h))Px~N3KXOOINS`n!kcM=3!6xIXzaiEV;Klo_!LNEc9^l^lr1aJu&y4U;ngrOP6D2RB*QTnL#Pd5F*bLjnN+cAr_?uMOfB#kP5J%h>>W3jp@S(8LK^LH+d@*;T ztC_+{o5x>}Wv9G2hby|2(lh}#Xvyc9UqATJwjTn|_DY^QF7(10pCd`2rq&P0Puk5< zap!wd#&ewp&Z~_fz3IlE;7$f5C)875(}I4xGXz$72^b+7@{_Gc4h^I4Gw~(koV~zD z`maZ>_4Yp}+GJhl{yuzhtJC~b*m*i@gR}b!WNQG+1xla(c1)_OfdLEt5BzYa#wYa6 z=SG+Z>O1b0@MYHAUGf@kYH(Wrns94u2c}$GD@;WE$aL$?3z2ETucBy7^~-fNa(iG! zEzQF|)S<8_0_~+k4nQ!v3_IQ?Z?1_2-aILz5dQ zWeh~piV#S`&u^7(K!dE%C%@ENN8Bd|;9B-hkEhj2v>(aaKQKDHnR6R6bfC=MS>koB zN}&e`0O^!MNt(>Xk{wMw0@a0pg!+Y~FTVE#NBsE^9YgEUgTQ4up*GtAtGFa=-kS%RU>Ql5@}MuQlsxI_ocmekQb30mCe zjJf;^ggzG%6J3v9{EM6c#S2a4)9bItYpebY5!HkkR5!(=s}vaVGRt!eg2or|u;z=O zVT^KutabA9X@(}85{$raEZapR_xa{w6VY~f05XIB0ckt8vRVy7HeUUc0QXWAiQZ!{Ld|B-n?|8+HOu7J6yqb=G^M+4ch3 z=nNl@A-Ez3xiqu_h4B-ohV5ByG2S2Jy-CaZdrO#ZZH>G>ID}{|)lfJ2eJwFxb2sn77kI#xBjX?+T8LyXCeFm?SbeKRr zq~Xem)Uqq#D0PT+kvO-08JcV}s<7zjN1#{$HQzY3pm%pd9#WYv9;`XoQ8!4jiCXeaJK~p_9*r__RW* z+4ghBC`3ibF_*o*Nv{Aw>30)Pz}l8Qg0tdcA20yZjkEhE%|bN$ryHy5YqCt)U<%Z0 zV5Q?H!wvA|&KKI9(6fwuWLcjE=ZLV}@=Ao0a6=F}}FkMtd@p##i9D~B?ty)+a^ z#38SG`lBz1m3MjfU=1Ux+Ubw*el=@bSt%|o)}FX&49I>w`K1{*exdH0S@P-UkPyDr zSbuy>IC7WIhrA0d@-K%|Kf&l+*arII`F4rIqcno_p^Wnd6veP{rS9wZ2knGzf~S(X zcrUQK}Bb(2MA`*D!HQs+1EF|a?rji zbKIY?>JeMoUls^@h6H157zZqeV7uRU%HuV8|1eHPyXCO9C^P-URekoB-L%PTxoDtB zu(dwx5rlaaC)FtAd)GylmhZOliJklD3PIWmQ=lyJ6m6fm98EsI{zoCm?XmkZH^WlG@_-}UnY+FDKN;9W52gw zb&ZxK{E>${QNgWE$Y!ciSlFyfxkD&$(dK(}$T|QExUh9&IgMaR4s+8%1`R~Q;{ERo zfY3&MLJV_Lxnm@07A_+gU!?!wfb+}S`5n`^8Mr{1sN#c36eP-M^=gC^&yV05@4I&K zW0k6@{z~HF;&J0uRDD5Vwp%XIa%S=u+gBMR5#XW#o#(~8nDv{ljt8&^2_?~NLvtZ! z_vY!$WqX0Vu+p2#%S-E^?w=bbGwaV4g-O;y7DbQ1Iz1!qaV7XS5)iD>{f}fcx)TZr z95B6+;*AqYuV`3Y#B~}wt(z6<+vUg^3RfED&Ro2FWZWl5kFrNi-=0Zm7V_Vx>$NvN3z(D$RutFHt!Hg>&sQ4nm9G~iqFZElcKPy^ce#p1=2 zEm78dNiI+;+*tq0`ZUu~w9 zy@3ehfq6fj2g*AY9NT@9s_GiKsFtM=IQl()ci!Ft{Wn1X##U;+Prw1#EJq|M+n_ZL z+FgZ!VnRvvDd5Q|m8+d60M6jn9ImoCcJ>Bbu^K8RbQ3>?L1*{{Y3sF4>H{^gOU^!x z-$U#jU=700WpSpDq`|hlIw)M_XD9Gq=ZGf$)UeFF#3G<#0iq!=)tvQ?ZZ+@EJ@c=l1? z9CQ$Pfac3a&kl~(HFdy6=#Jx~{R9z_WspDJ2yki1>7JqeEv405;Rr~6W8t@o79e;s)IqX#hiF84kfqI)*vGuZzL-gmcp z-yB@Qv&Z!e0$!vk%Q&z7t+M*tD8(aT(VrpRhL>Ci>*;y?zrHB^+F?~w$Xgpv$pGK! zhf*-2mx>OOG<}AG)e@nbbFeYLEdB!|+|mA^$VouGhXTGRxR>xnXKYxa*`H)0%-%@Z zxjBJ~6bW%apxB++mgf3vZ zv5?t%wMrzXIQrcMztHOy9aoOx&!G#T-5hF{$5Vi)N%WI;Cs-`dkFR)x%TB1za z`fFjr9^lA+XSz)t!6yr_yHP&?D>c5GWdZB^upI#4QAfv84P<4IKd#%WsQr9xxDiuS z&sAl;0*T#den;D3K9qPO_FRinw~gw>rNP({Q9+*K9aiyj;1II5BaIlCp@-rI%rR_> zJ2?#j6O8wdA#LblCiI$XUSoes0#Z>|ekFr$V%(T9eN*s0-T54a2VT{ir4GI3TmeY3 zL*N5uJOC!gzPHTPlk7UU0;|CL2gkdo9;@7X(V@X;)Tpmbwz~)@gXVelhaJnWD~MD(G74%BT0vs$p!q`wevp?`P7y5qXspQsMx7wp0_lvT69N>DSn zJc~eIvD`7eG`m5zasF4fK~6HQ4di!%Ok*7>C%+R-ZSkU&(=)l_o4)xE=7A)~HaEV% z%~GHQsU3X}-cMm>4JJr&9V`#XvxBp%J)P2#{bLJk9dMJ-r?acKD00r!TlO@z#}H6gW&?04XBaFvO)nFoH)3-XRrAb4587_{yE6plBGxO%I+>%G4hZKTdD|L?msto zNkf|`fYl9uAh-%t@qN!>w0|46hts^O<5{owYqLLiM=X=z(t_s_-z|A5(z3n#?TNfH&Cd|I-p>~ZKW>8QgJ4i_=yvY~>6gmA(l3+Eu(F;P zUx^EuQifq``h$Az1GzeNg=hmVD+`1@Po%{nL5YKA0)!LYA)~afK%$qC>gMdxu0tQL z0C;qD$kr3N!_aQpKFGhX9`RA5E1zBcp8Df(TcI(}z@oGAO|v)MsGxj?*^XDas<|z} zILOW}tHdpS-iBzof|s!bnl>b5DrmW${=#_C@z`{xYY~;`uSVDEMfAV7d5-l74O{wB z;rWlU^NMlvf44&12W9xr6QL*qAW?tpHt|#e^h-8VnMS}-uwn;n(Yolq=#kHKd#8(L z5IpqCDbaA} zzOA1~#gNfx?rs8tqg|kZ==$UOX)EPYiH*tgCT{;FSXz?4OV{?VofoWl7Vwu zd&?lS^<;v^vdWj9`8f|gE7Yu|`;aKX8>G16^;zh&Oz2D*;wlko-qx_5Fix9Y$vzy^ zCWT0IkMk~54rSLroytzAqHQ&2Q8jFXTVrH1*WrFnKn-#jvIr zN+_|t^TKp_$q-HiWnU0i&RBXGkrI#Kw1YZOD2glu`vrNvzODfZpHXb&Xk!pYfl# z0#-7>>13VsxG5FfEarY>sn2m;YWn+3aD=IY-xEQC zvl)zAJFu*PH%{OT6-qf-d5Iq9srfx1eH^@631K%WO-Jy)^p_;Timi|xxO=M~sLj;* za2;ye(g!GmCDqpD^?NrQPwiEq7&(jDpW;O9BSoUb-oBwh@Oso#yNZhGMDOM_ck;GOm2xSno z6fpTFSsn!K@Z4trlSaZ2cMWEu`Dc!Fv_=%~Uw|+hVzs5a7PT1-ot@{Qz7R^m9LgN7 zUhoQ10lhCH{OpQW~35Y2$_M}lz^gJdYc z06u1l)*>94Fn%Odf;ECvKOhLWFRnzb>az&>QLj>!n?H&CM4X#3A-{9}DyJDV$7D5{ zlh(WRO+wU*vB9RWH-NI1V?`hi+gzHk==o|2drv-E!c zr(kn(W?l|0U$JWQL`=&U`%`_7nCw?;FH{g?ly6u=`TbZQn&%VHMCAeM?lDLG=l!(dfQ>$e{D^ecH?kpf%XJXI=QyMf1l*YEn)a0|!(Gorg%zoe z*MV1alH@1ZUax z>=>mn?nkYbL&hd6e*!=j{Z_TTKxQGHw~WkyDRUz0dHYw(r2r8^8WOyWM_;UwAZ=Uy zJ+vjj+yPLuYMM@SVJL9#=p5f+s((4P3W`8L;m{e0k8h4}w@X|uLg*?%(SY@vk?@6k zkp}U}VlAEk4@ZKEcGfzUzgJv%&J*MhXPe~=%nD-8TcZXwXB`!>YE;fnX1uT?9maM@ zWI3K>)@D-i6^}qECZtwmj{jV-WNVn8yy;oj;VgKD5Y9(bY{;QFnHRs9G7SHC>z{Ez zuC+2f_DVK;}8L=k97@JR2Z@>Md^u@abEz*0r5 zT}=#N7D$;3wBQNwcx8=xQ!GS_+2BP$B=WbpNZ}<**>1RZ6h-?Gc5ibCu`nOdHep;* zN4Uy);w=X|s4Z_qM? zE=l9OOzavjMq5GbGu=bH6Rb6lxD|l>#_;d|Z;1lA z4f44mcmqH*?$dtL#5QC-V3`ABypFdA|3HJ%PS{&1)!!;gK7B6#g6);8p&cKoK}MKt zw=Pxf+c*dg@$~wz2cWJwRfQie&CEa{*v;Q5fp7y}gLLfiCOaO75ShBYS?wC5m)As? z((#QOiSIsm9*ZQ)y&s1Q0_tGa8j1HET@_-cJtk7-`k(QNt58#(Wfj?n1k!6JW}5j( z2gqry7w_+4yp9=y%)&k-8k+a50X)KAx;k7jLmJ-Ob3FW&2_iQT+=uKzk~#&ug7oO+ zO`5q-=~0xjf=oKT;?au@M$Qk9$jrDLQiI%d3vwH?z5=)soF7VEhM;Qpnp|t&NCV5n+9>eh-oy zXD8J&G)ZH^AOhS)Ild%+ngDS;g&Da6?W?_I++Qq}OY5*AG>0eZzqB(P4!qGm-U>S} zmpg?;1?ebr((Pu*If0YQb(5npKBf{Y&o%C3(i_5h? z*pC_F0B^cdTUxFdb0XEo^PIKOiCfl-6R*S~FClnu;UhHMx`Ll5e>O3Ny7G}<(l~rm zqS8-`P$;6g`w4ys0I^ZfM1^_)81c>%PEA5J-?X1~w6d>l^y8<=_MtN}xuVar&_Zzi^CaCLS zG?W`*2;NJC9|Q!In++qdO2R^bcvl$l7~)ZDyw(EXHttShopzPs3`GQ{J}^-P>mUxE zqRH}-cp$n|huBNeM$k=9*1yMsz)C2Q5CQj9t^L?VEQ5V03~E)iSP|9G$^nfYHcox^ zA(Z~$%R@E&kv*nF&WM9JdKVq|xXlv>IW>Ix=?^J)LGpx&{u);)_2!7f`9U2tCBR)k zn%a1LKeLhlRxGv{^xc3^y>uos*^%I4?!=PxW?JonDhz!U*^N(==w|bPU>{$n!B^pA8*_xNFJz}7N(!?2BU71i zFf;@j`hERkvk)&Jm#@ieAZSHVP>gh#p?q{P@^GC(V1$_OTFqOpkyM-W9j&a2WK_^Y zS0OkCQbvVeX0#$V+l^WHFtB3raM{V6nBqXdh8B*0_xX4lBiN?pk?>oP>pbZEdzI&f zR-tVNNRX0LOQv;sRX3!ddD@41!9u~Qy7RqMGii!9TU=Zge!Lt^MB;Hi?0@>v7{e)K zA73wsF0D+wiN6uc66ks8S`boXbWPuLDXI1?xUdEO_SD_Un8f^b>LCi~9^0eLd0>Yt z$LPThBkW!)b=bQfjQ&&gmqH^5iV5m(8g+#lY=;1~jUf6_YR*vL2lL;`w!QGKQ@Z@x zvHEuWPZl7*;kU;^i9QHoJRBI%o^0FAIz7^WW4I!pA!w8YSDiP zi-~jdPU_~>fD&z9>kMn~UyxiKrjh2ee8;BRC zdi_~$MrCg^i(^swvf>UWpZF7AhUN7n8}m4 z1?{JWSyuN8s_%qL@3Lq!yajXc7hjqck%%Uk|BS@y*u6y;P>+B)4Gj!GOFmC#JXytzjPc9}}k<(~s1 z9sU0ZIbkfOtAq)(0|wBN;SHuMKLC@^MtlLU4b z+`i$69cUK-nI~+tfp+g>_SThC>-L4wknt}?x2G24mFXX)89+X z)g!=)R~qX-D7(r4cEF+ZX+N|JVRC@Ly9=rEkpOrnVf14xV)JW$1*q0{Yu-k#5J$Pt z^Re@=vapts5VI?zibB4=bT1T|QJh>~(=?Ek2U&c$Y!!@p7a-(HQyXq?V_&Y#r{`4v zi(96U4{#XdJ#K|oleKw&Vn`;8L4^Zs6>XVT;pD;<*`gV1z~kWc0;hDf z=-7<^8`chS&`3N{c2W{qfx0ooVT`W;^8{Q2xMB+;ysL{)EqFt2Z3;2Fq%v#6b-uqs zJk7NX7^RVG`S;Vm7GtY?&bBJL1gU7F5pp(P!eRiu%z2_=5tUYz)j=*JzXGM6kUqcs zOHqn9X}sD`B0$GrhoG9bS|NFzXn|13-`z68XSGwQ-}3 zN^~jVkrzHlJ=|xms?B_7(_fU5M&_r@WVRQviNJVy1aX2yFOnC@9ta`yJERb!l$qRm z&-tP36p%{6!Jk$&tq@6;dsU2{Qu-QMP|Sw696*mGbR#%|nGKbS$8-18AtyCC3(^k= z4nh;3>Q985p_t=Yt)(#Y5xeq*=ugX>xJE;z6G-ktgcXe5jkVw})#Nmn-op`rEAS>+ zuO$uWmrE4Ev4a-|q9D!xyz(qzXGDG(%DnzAt6HHUzn-+!#D#(QYyJn%Wegc|BLDon zqZOx!NEAb0>#o;lx7j$rmoQdGlD586rr$sIrCtCz9h%B^tL%2CPbfX4MGc@gl-!Dd z?(e>5mOP)^aoA?KN!@7HV|a_io`;e~@ZKxp^{bCS(DGjbYOPp{Sp;+5JCm94Ffc}(mHZ}fGQ zoAt|th9MTB%2$g}A|6+sfR!8bES92{?Tht&Vt9?=%B^qTToRzq?gIG3$n-j1QypYP zcuib*+0y-sl5Xi4nbzl}J{BSw&>y&mPEYXpyoaa^_+;F;3(cPZG+Sq9Q;O{)(BxYL z9-j)Nxw$(M8Qqt$x_APzl%Uze@J9=i6g-_k3}=UL(W{95yn{^u*I9!`|7cuJq=bYb z_U~&CpzG;;|4%xE!e?R+S)8Jw(rRh$Gix+75qk*kyNK?NLsXQMZ_bg1IllNQIs;dUW92p4 zIldS6G02Uvlr8IIsv8e_HF%^DwHmwGlpbwoeb*D{R!rIcxJ%o!nUB4oH$aA?@gwc! zTL|;h0ed~OTn-Xo&=pC~Vqjo`o=P#_(c1qCW~nxK6CgWGOa)(w)>d7{iM1ILAeprY z`qD_r`T;Ieb(z>1bW9*B-GqJhCl=9NuOo;iDBl%XuZ#;*cr!BGE3i-hOc@q9axEO; zWH}~ZscXly6?u3HwSeqYT6K6L1d%jrN00Y$cC2%%u*H zD))8IAMzZ(4|`2PBuzHw8E%ff57x#?+~bkK^M|m+V98<^x`ejOkWvx^i#U)eJg|vLs=fO95MG)A16XsbsL=-k z#DTlDYJ*9v$Jfv1u#>(?_umTloUrokl^!9M&xd@!XMd!*Ri-9HtKR55s#YwB%GNps%4QUIeoY zg)iUuXZZm=P)MeSMr(v0UpAL&V>o&W*&+Gdwx&} zLa1U3LKkq;bWh->Lr~bAk{zta15AXFcxGDd4+w7(#V3oMHt)eKH75L8^WI8PlrL?1 zUivd+g)-+=W>zyg-K{_2(5!7>{#mZ@rIe+dDrUHYftiJ;4C!qul+f{8JZHd~Efoc` zUawfKkh=k><16NYmmFZ1*_4!gy0zALXRf#%-r2)e1KyIU_n7ujkQt|>}axKe}@+_caRO<~Y9;ndsA3Qm6JT(?Dbej0=*7Jjk+;~4|U z8(zlpjtsI6@<;ct8bQfFk198ZYPvhx2>7v3d}BW}yHv%&#P$0>0WNq?0s_jBU}=i! zJ;)WNhg0Ql{&^SDv>Q72xg4=WiweQ-qd|I-;JgCc1gjrB0>`4<28X!*)jI z4Mw_EA<7I60Wh3_p&t_(mM(daSA^GBx^H#PU%_MFXf5LA1O*>TCYXOJYiZKFKNt+$ zBUm{SvtI>AVJQrnMW9L~h=DH$57<}<5qmaILnp#8YT!`R5W=qMD81}piav5g9a)4_ z8>D*JgL2BPvZv`jH&ehXbfJkj*fATQ@z~1CN97J~M{yMIksG~1H2%>p_twCNHVTzzV zd6b@M^(w6N)&H;5z22b@N%IA^J zhRpW%U;83!@92|^% z_yrCySXV;Jq~##G1rY_;c}rQktU*H%h8KISx0Ke74`X&UeOkjJ8UqPn;HQ44sf&(d z^do=d5q8!Q@LUIME&xzqbb|ukts}#+75K7b3nHMq5JV|QLkJYk^dYkd@Zup8M`le< z%;eYJlg^PrIOK=JMKPG9|l6EFQwRWadKGZ^;b( zl9%$l&URNH)lx>G3c7!gj`}M>qI_Ke16DRKO!&K-oa*`hhaE^Hm3`q=)d{JPlNWI0 z=aK^a2C+N98`M+dyb}<(fea@^3O_*lpM67ez8h?LAojVUfy}nIUWm&JK$?D_QgtG- zMbb9o4B}Yezr#~*qj{&(7x31U0y@L%4v*={TrS7w?MN?F(EN>=Ph-?DSMkcG9E4)9 z8(?}e!Hci;!poP^Q_g+S&NDP*6c?dOXjkWrwmk&|uBkPk_RT_(H=6q6ImYZ1qo&}R z2kOE50@DJ@HBd$oRKWR$Y|DdV3oqqBvQeLQlbDDI`M%HHm>RjG zi3}=oZH)r^f$sv;^ab!LsMbYyL!7$R!mS|4!?MH91^}AJ`~yMS`j$&ax(l?Q(?6~> zp3#xBj=*R6=HVYfZrOhbqCN~>vjuZ_!^vF135Gx&^wB_Mfd+x6wCDn0h?eLVJQie* z-b{aU^mY$sSJuekWrSYGJ4lK=VuUYe03~Z!kK%HJ<)QrB$cRX=MdFgnzwrT{sX*pS zx$5GEzr6o@c#8@V5GV(NBk~EE5Tal$V@p@8!LaqPs!wsZTK3>6i>EW-^ngu;e7AK$ zM|DB`S!tZd3`7Sk7lp++P9U8k84n?u6Vrk_@|j@>nm(Sp%Z5sB8U5g^NPdBKlZ2_5 z?>sBvZxCtW()xayx4RKW#SMup9`H-s)j}<(`Ot_oM%Sg_(T!gVw#?D zUGDpfvM<{NIhCI`Lg+_WRlW#avW;nDby=TWzvTXP@bI9-b+}M?^%s8nP)T0cw{qOX zQLvr`JCc#D4^Zn()fmZG4}oaj&JJCS*pvWq*!K*^(fkae6o`p&%^aRhMBz#*~ z0x|-)nKi90bfl+omAMieFm>om`S&Ac%l((2DxIDV`Ee*4_twZ*XhD8RvI6wRLdNE< z|D=Ej|NqqT>G1Z$e+I8fy|!jug=e(N4;D8MAYu!EPP+ow)&J$-qDsBOd-O1V9pG zvDV1FIDg@EunCYTfDY{yytuZx`4ROU8*1(SeMB^*_93fteuQCT1ldzq` zC>&C_Mf}?$L;e88bFoTaVSqr9xP{r%TV8wIeMsOGijTgsKsh{?`>i68 zmBvxm;9aIw2_$mc&5X?7w84`OkdkD7z#~dTLP02`n$sLuDAu`2rmKe7!r{M-{=|Bt=*{^zp)|NmRkFrpL@Nh+FF zNLEEkq>!D;DtpV;B$-7bD>90V?48VPDr9DpJ+jB=cF_4fU$6J~{k?quflt4@&ewTf zdOnZmaXgO4<34V;>-}Bp>gJS0@L>VYJ8N0Laa?<7f(DQqwbUPb&xZ9^A_U9Xq-FBhr0bY;X zy>027_Y>FHVSu%?zajDI?Nj0`WJol+6@bGy$tR@od?6pNWZ)hRWhNhJmrFiBF6xXt zTOVo*f&q>Y33afaVCS!7Z?!JF7r9#=I;ts8=P8=lr7*7Nw)4x%VVg+uPqL*g0yKpu zf<7bVpsm8P?1{`RdFv#)pkWF0d9E0&rfi1FzzVmeMLwiy=&hINkQZ;6tO^!-r@AQ4 zoF4ZgaTlNVW1r_t6ocyAe_Sv4aXK88X~jwUfR4?tK7NgU)=g3TZ>7ddX%Z4En8C2u zqrc=v9|7eAntZ4xq07`^k~ao16SxIAESw+qD$`N!LsztEO-gQ&Lyi3xbc<1JC~eDm zt*m0AUGarlV2CYU+UjDY zv!J28nK7kg&sk^ZNK}FrO0*IE)4q90#e)_KRPNsJYUSig!M|YICdJ}82yBu>1`JJ_ z2A-58e8hGKm zh9Cz4;xk{O{Ugo%RB*E1;P0*A%_7%;&YqF?vO4t*G`!mckEPgEB-tSJho%eKPY%PK zZ5|Mu11!vF8*g*}e&NYOl8RLUCGQ?3kp?APDr#dpk{Gre?;nM#&$$;9K-J~tiuxxC z!gnYI>m}scwh2-OX0|eS;<8iPQNF9*`jw-YgeV|@dV&Xmsx`c9gbee0{gd#nDKMV7 zEz3mK@;XAoaFM(v{!lk}>XN~!joX$FK?~7T(dVIP)&(U9N)qL-7s4vt#0Z_W&HQF# z5BgmO{Nd007^xRPF54Ne5-!De^?QodpJFXvFdiJNepNK)b6t(k=1JsMuCf=$TZ8u# z@<~{ejLbI+#&*tl8ZGM@9LFP4<9UwFf1*HwjslrXwn`L+d!oRyBpf=<27M< z?jFVQJ5O-vonsx*iZQVVmw=@Q!N^pT4Xb#W*hdikVZ42tmZ&rGr#Q=I!8@8&Dtla| zOl_6$3z6O7|0sx+##$pHhma8^*w`0)i(Br)tTXV$7dS0s=UgL`CbqZT7ZE0cj||Hq zmNS|n&}gAKpT`>ZC7Q+FRD@{OEiE zDvN9OJ3Bjn?wM)G<;5JAEx*@*FaL2cKI<#6n^*C>=yScYmIxwW;e zCDv0rO0-%J6Ha3(rB(QiqbHUC%b}k3*gl8XZt}=95w;^n%{y|eI{ME^b5N!q(4mKi z!g~gjd6&=Z&?m?wR^+J9AFxMz2X6ja|Du-nxp13);nfA01_v&kJmZdCD^MW^{^&u2 zAM%uE$-tt(jhZpf&&)4QLlM{uZY$YErk&jfukTWo6>N~PvwNL?se}j-Cqjt2Q(J4S zB^0=bffAjOv4b}ILiPqs63dnBO{6GA@@cO`04wM=X&>Qd&VY_B zbvY3Z=Zp~P^Y0DyC!baDA>E7AF3MF9F;7fqo95@_tXf9{aU*7EoISKJkbGuqOMsdY zN{i@@;0(0X1GL2)`D{Z@f_N~8_cXF(_*sD2-?hW9vQ-1i5r!x<-KFQxwqvt}K88S9 zo%lI6CSh%mBrDTtL!!d?NM-d0^iqfQ9^lxCAG@#BvFM;QIBG;s5<@zIII}G=C;=!6 zdQ~!wE2)+2&^Tg?(J}sHu*|%UP+$3Y*tzVRsn4U+BLM~6-@*m z*{opzFoRi~^WrGqTd1~?ejt1!AjR1?9$eG2*?xs(6{V8|twcF#7^O4_e$tPi zQ#@4fmWqT)rloN(NUBS-3Op`|j|xYrZC>w`g*9l0Dts|NIli*}ssl3~{o9`9rrpBG z77}mf?|#3)q{_MKz=o{M{=e4+{9;~zqEvG0y1FKps)us1!0RMtpAe! zFnLtN>(Y{~uRlm8UGehNc<@veQwXdN8OWAV>m-E5E1$gp1tx2Y>HYTCA!D-jD*%GL zTFD$JL?AKqT+nQT!2$<-C{zIQZr=tT@)SS7xwT@qkr=aWuOlPOL}p1pLZf? zdWoBVDb6}2Ogw&qNNnSz^4TbKHP5J5K>kUkDQ3HGxQF*O`L2rss~y65RKNpE5U z0h-5lu=b&z!N|@hvt`wLq5uX;8-H$KINDp`{g?^n6ESil^4hmWF>6V12Frhxp*8CX zA^2~b2nfMZo5c!$3g8uq5j%xK30?*)L2?(D0y#$no*aRm1A8wJ zeJGY_e-8*o_c)9Ld|>E0X&B0jPfio50ejLg_QdOvLI~FQpZkNYya&dxxst746+HwJ z`mpoBi%{>Uf!>2zMqn^x~(_&hpQTm0uOt$n*aOey48Fo zZR4{ZRMxi~h8AWh`wO83rE*4P(NU*T#BKj!J(FkS$5>+aH!;r~V5k~@=-}j)E!A4f z%bt{(=x)ogbJWER1--7P1yEifBo=}C77E9aHisL&ZOiLbkHPN^tq#h{Tpv#T`eL7= z;LOARH0fMb&Dg0Xr>mRck@;&rUfUOr96NbXybFHg(+DL3bueCvaZ-YekO zVxNa$!&6PcI2Pvq+mytNfOZR(!)P{%OGo~HtpTo;`igo4QK)0m3DB71^)tTHM&Ukr zR$Jz;Zw%2#f$VkwG8%a#<`BxNGZZ?RI{KAbwgHVA>cg*LbRD}%d9yTFBt*x9DJgP3FL!wdT z4cAEHFlwt}#silmd^3|a(nyT^c(o(|=fpRySD=3aSzCuIu-36pD zR;q?zdA{>`f7OtGQc2tg;VJGoPK>K(k~Or6cL_ywmliQ!^792l8F5rn0FS^C%^dA1 zP{I}V2Qieu3ZLpe>5mBbVVeltuPbG@|TnUjSh-Xapt8Oso>$8=<@%3mt7p#Pbw5G9Hgx3$< zR*RJN5Csu?$kI)k>~WPG9B(Ba0yX;_=a3a7r$RcSrjuLO|Jr{Vclu)`A}s zBEsLOIdTvGH6>rV|3XrH_1N7q$9v~rt4gW94TQ5mMVf^iDK0cUf|39{w8%y=Un|4$ zK_hHM2!J!F{VM(K{pF#egbNjtZFqQBopHN-W;LB7QLEFC+l=Eu1|wro#zgOkHbt9I zE7SD5GejT&p2Q(NL9eIKq60CxddwzOd4K3Dpg8%J>;Q))eUH}KftwvbMA`J z3Zq2T7E}MCuGp=zaz8_@h5E6FELN1Am4%?;Vk&g(ZSY)i-LSiRKmBdWH5(Cz z&2l4>gm`_f$k2e!tUa-vV|to$|G;~2^MECSX>$LwCA71kLxt3DXyc-iR|^Uf7%Y*O zp#9?PB4jiK6v-dKRdKqv>-=ue6;Rkfy)I`dF&~Ds7qm-lzT+e(x5Zo~DqtGk!*tFU z*wM)jc;V?*W>~VaYz6%^$0-bY zSNS2tl|P+qI?s?ZY%ORXlDfT<_x}MW?fVA>wwj6(&O(To5lgmhlvC780DH)SQgV2H zVW}MU6VO4REF^Zwl*kT5|ClvUW1N1*(z0MWt_|7;DDGF0BbC8~K|$6EA!8iC1ZiE4 zD|)(v6P}d>n)_x@fAJWiFI1A?=jgs^d?-BnGSJ6+I#2#|+o)kqjxkX4ed5^A2Z{dS%kSW)H;_q&eYl|K){k@fMPFD|FsT8SDO-|5j>Y5X zD+@2Zb*vW*9Opj(-D4kH6zQfubJkUxaZ3hr%xx4ia9INo4t~Tm$MUg)0v5P%(0~S+ z1ff6q$!+b*sZA}1KO{#!vv;a}7|xw+IbWnXTxT9I_2%=%R}##@0JF*QrgE6_e0gJL zT!#_@_SsxKelU0B6StQhjXyt0SO_sm%m)Fo@tfkn<1{YcsCID`Ta@sdPUs@aMK6lb zl%#ChI*pR=TCwsC7(I)dbP$@m>*Y5(96(mj%nlhJ&5hH>HsQGu8!ztRnXRebH-kV@=I>NXvvz4~E9A0?bmhzbo*NJKwSG5FpKrcaVA8ij-+*l@@AWal%Rblxr*X_z4H2r< zaxH_1O-r=hiGyrLLo82AogrSdg6dNCLXQW%{KC{_rAs?U!Ihf+YV3;{U7Fd|Rzbl@ zebc{yNdQ3($!?3}WRGtH@K@^4nL>sMi~N>82xl>w@27>hq=g1CxdGiv^zOU8wfPfX z;-JwJa7H2Q_UN5t0CgE_Od?io)H{UW38Lu^1dPq)$WM~u6@%VAH=x0X!f|%oODMcv zuHy(&!SnqVIhK0*TWv?fwT$n=SOE$+(J{EYHT*yU)Bse~5VYcQj&tfMkvdz^+!?lLiN#P1ezUSdNUy#=zXKhN4Q8M=GLp0cMep<4R;J+(O3OR#T?mIdcmX)pO-PzEm-pti{%9UW z9cYER9VZREk`BPHhex|k(m%Y)v85VyEwl;#PA)+Kc;kPVe2_^XxWXhGgfkGyqYE0l zU=R!O2aF}Xy8}J}*6h8tH=^slZTN@G^SACveQRvaL;w$M4ne-?HE4xi2y+R+(l%*V zzmQJ_?V)ZkWq_mpwsOxF4+3Jii|6#DM|Uj7ZFAQBXlC-gwg_h#QS)i$BK!|HN&#}Z zp|-1j9StJEG+~heMcIj!y(Hhi@yf_tiyvl~z_DIDO*XqLL9Kv_ zilDz;F>)$nD>2zj5Ww!6#69&`&03{-XEw_HT6kmwu{vVkKsrsn9tb*M%0(Mtf_6lJ z?LrCWtnAHn*6~|TS>%ktR{A8e8=gyI->bXE)E@}r8&JfxClAG7I$_T9S`46&Q~hzb zk0}jWKFEdWXy&Zqo%#_~L8}f$ht8Dn8B+I>_Q|}(QiF3E#vdUmAk4AhT4T&Os#IH< z$D)L%tfv|Q5}Wl&oJlT^=)W>@(kn3p+60LVx8cK_BY@s->QI7Scm3-%hd&ZuMNSQejY;P(?~HSQd?fC zgohsW2>SCnnS3=#IJQzsh~L|lx4>|anijIO#2B@o<Qb;hn}UuQ-Adrv}lvX>ZwWm?ymvQsHPV6mBb5pPa@rMMw@m zB!(bW-_>3Dyn{OOnL;6+5F+lcxDTlHlCt)K7}c>!sZxaL%%(%JXK%w=aP#&X9ubl} z3Aa_7=S?bET8&5HzZAjlL}`lOK*Vin2wQr9RTa z%MMS#`2k+%P*BAv?jLLrt>H4Zl$}5@&6y0fg4)BJ;UC`mnmK<$lz?XmTghrY&hrm;7^FQYR`T!U?sSH79u8QP zNwY_jOmXWoFBfy}Kk)`QIMKe)qiFX8yoplTVvGNJs?}D*)&9jcWwHG@l_~D9opAmzfki6Isu}}UB?S|2T|WujE(+bSFu^wTw!B7=Xk^+^ zIEnVp>-)XRJimX&j+dB$hA9=#Hn}VoM;8enG{ay57)e{UKTWu-r$Z?%THjfRsse{@ z56 zAuth>bLQpgiD^M((Ri(5J6M-CI<~68;Qby;^qdlGa)6n3Hb#P~EW(q~gJQWM4d=)t`}*9A$yypKiz zfhPPCVt)TCoSRtuFou6R>iG&qdX%%_Xvb6U(BU*<*uu1a#~n;QCC+a&YOue^99Xqq zhb()qCnj&7*G(CJemqwHmR-9G3tSPHa67@Zvoc*#(z;Y-0rMX|S(V8{D26_^aoHl- z*3J4T3$%cm@D3YbdRb-WZoC&KXRZj}%KId30t*G?@8}>@*`0}f7zDthQG>A^M)-x- z>%$3fNa)!99@EGpzv*d@fuuC)3I71cfQ<3MMhNwcu34a4D<-5fyq=^1fURZZUUrtV z&%aE#6v84Ow^jE41I)hhw8hnQ6fQ5o@h0% zL&vbU>UcbG7n}pTY|=*Yh7(PP8op6_-=CPFsJpHz4jh!@+*&w}&Oe_GIJK|_M}mwt zGre~gY+zrMb$SU=+HlF29s=@iBfuU`phiRch8MdCf~&$mGc_spN5HN-f@wuqz5=1f zzMxoPUZO&|GBIB0UiM;|HB7c)0T=HpTN`lAoYL^Y$Pi=|fT^LHXl8t~KL^!LL;)?b zcde$Uh-r%8QeA!h7;ZtY$&H<#)?)e~W`R>eed%vYh%9v9y+>R`rwfv>s{Bh(2MnpX zy@H^BiiY(%(d-!;V4@41z-|Esei>{(mF!w%F(VjJR>S$nwNP}9q3*Mjgj>yX?FLwa zali7ncDi7xUAZSFdhjUV2H@-^yK_9isc0TEJeG8Me-dwz3^cX`R>RjXsa*>5w@K3f4G4gb{a~S;_@Njawf@g^U6F^h6IKV{>w-$#P4FPVC#zLGVTRx(g967G5{qB(#uyf8@=o)Rzm>GbL@pQ_Z+*lx1z zOjy;&;?8vKCFW;sd}v3$T~t+-Rl}Ntn`Zr+$whT5BjIxkn2ZYp`IR%--GCPpSw^hb z<-Xw1z3vbO?6~$R{BP1poHSmRXo`E*bP|A9O+R@!sGz!s&;F{>t<47#>pt?3NM=Mu ziRS;5|K6_rQZr@9r*B-zxTSR7^a_}-8soY{Tv!Grtlc0uz4YK<9K@aqZ4ic+8XrC* zjk(Mv7S{0igfN>m5%sQdOVm1ri^INU^rS$*>GE?nG(VcN9^qGF2r*G`UYsje9EsNs zo~gokI$^cPl{M6x^hc-!Vt+n?Cp;(jc74>P1FI3!JcAbEHvk{HOQ%RKfZyOQQ zw&U*VVDDDA*&zHm`;Pz)L}eLf?d~}se1)8SZY6<9cT{nq=T(DguxXbM26#xe_aj-C z-FNAs*~R0@N$}m6JkK<`b&56bTro>GsbdI~Omadl259C!*hW&Sa54=ULq0hkUL+S?D#M z?5)54zzwsX!6iUN3+Gr4N4eg72Ln6-8%WPdrk8w0&FBoUrV0sSRo%jwEay~a+xFU2 z&K+~Qpsf6@dzTM}ziZcV{6<zh3-#lWzd;~` z0&DR|YEBIAu@n)IF2I41RWlsH%oWVc!%EBW`*X_|MF2?fFx@ul4DSx=R-k5Ug8@ww zFDoj4SAL79CYI<53X{z*uPM@i8ehOfVz@QI%^>-bJFvFH<*m)fEG-n zW`H`e4Sueik;=pvN_z5~uaFcH@viJiGtjAx)y$j~F3iJsgEh@kFl#St{l6fOplkhY@r^ z$9jC9Baiki<0DbA)9-qE;6vt2Jxn|||BXa@AI2G)taa~vs)F-auN0P}(E^eG!8Eoa z$z{RJ3)`q>kl~`<;fNagc>FiZc&8(l=@-lC_zhaLZp$DuM4=VaV)as;uL!4{+bn1Y zNh(IJ*$=HO#iu#$hjD>iL?CL)dVSV)v=}OS?HFk3=uo?$;PaO zB<%7&4Fu^uKRDzjTX}S80ob`X=|Cl|=Uy`4Bi(lAHc}H%1VE=oLd0;67>$Uz&F@sJ z8t-X-hjxei)`yr~MD41!IE1QoT;=gES090a&L2?A(Y#73KD?*+G7p-$U@gE7YxH5` ztH0}OW!t}{(NeV`1Q=+bW1R4UTI~p?WTUnU`J_Y0XYQyc;vb=K&p|j=dFd#|0HCkj zs)_}Nc>OBOW4Jg(!cpe&Q=lgx7gey@!muEBV9+ED1!NPAcv!&3xWq|p7c`v-OD`s| zmb9^o!Nc(VEYlPMtEsP3XC7b0#DHRJwx-ztXqVa~+M3Pz6C4d+mJQTGfCiZCg15phu@6!m(1+rBCieU7F_UZ;N?Ug47& zCnTnZ-x>waoPotmYv{wHBU^GLEci~G4xUb&eUSd#=l)QvGXg&idfPkq-ig)+93Jd8 zh&*$)7j<7!3pI!!z1Wp(&x;-?@<$H00{@-o1=nJNAnKz*Oz=_Ft0Hwp(+*vR!roQJ z8PN=Qe5D&oGeog0=4w)}jY)h%mS4}&$o)cZ0!P@DoCDBZaZ$L37VW*0bG~aaX3%%` zl1-@wn*A?V%Ws~lg>6&{s4}o&vqsDRUhJiZH*HY+W<1_Ec<+fuUKIujxB$5(xMx2m zLWOX!^9AWVkv?!fI4{6uwnSpI(GlG(pun(~?5VG4Y#e_IAUd4Tv3oq&3K<|yU{Z5cB-$VpRG>nD`85I&w*Cm^955dVl0JNx z;=DK-E&8Lswc0foY_X!CUsXk)5l5QFVFJhN!!zH5sVyULH&vM?4d%b58j64QcT|5mF* z0RW^kIF#y6#yX#g&vHv?*e&gv2-xxv?vPV4BZM# zfiz1wc1G7kK}wAMBoO*A&Jv?^iqPRbtSk_bl@`L|I|0Nqt$@D?4o_98m!>+xX-(y4T zlUdhf{SJCxm74;V>BO|lyO)6EN7 zJv_+Wbio`MSCQC5^BU&90`F|`z9x6|r$*BQog2lB?=a5zeI`1|jm-9=|BIkL<2m6E znY06+ajr7+75bvkm>}?hY25N|;2u8hDU1U|*;IPos~xpVM1aH;4u}U(8XqE{7D-V? zL#-EgEef7)w4JYC%DZ=E%M8ktSoQ$%V}xy^Z8tk~PO#ws-~txI#}|o6g6kTKlMMS$ zc%K5p07KwEL>mnZGu>)fNkX3=)o6@9UFu25M|SEaSP@HTM(#_r7arZ<=foww`7ky5 z{0KR}QdGX5rnW52Y=357pSEZ~d$s-Zeh*U)ZVObdpktt$QHLW~aOE{~-148~FTK-- z5Y0ZM0NZ*e&(tF5`ubLD`A|BU%T5BsJQ7`sDA`iSC>zyl`nl|>k;a5&#oigM}VROY05T!5-u z7CK{TbFrk!4g?bM<$uFm9@DgZxnxOeqlXhkA~Dyl0y83eZgVIK?BnZrfd}ME#)lugNh! zj#56_7aZx)D3*hZ<+lJeZsxUB!ZiQWv(vMj_qul@t>3~vU2&lpt1qKswbSv}I7it_ldc$-Xp}~H`4%W_Oz&m!! zlNO$WhX6z#2`vrM=7en!Xj(KJY-D>YpE1E~aS&$R?lTO2wbX4yhEQR~!R?!9xH)S7BlL$tOvsCty=lEY zfHS!u@+A>du9}CG)_Wj+2(AVRTyr3CatG3?cr@Ujaw)lIa0l1o*!h6pCFbv$> zU5uJIU;$7%8h_G4k_74K;?k6m5YhUZ->g;W;QV>hh`f@F*L`U4*00Yy9_X{Z-yhn- zS+OD$s)?T}D@rJ#)~FFhD2qgszcrw|E&oCC+Oq&{2J*bHvdz=viM2Yl_zUPCOq316 zg#s-YD({OLNhh90qJx3@G{?*{)VjDQJqoK~HbDm!@e!D0kO=?+hI%drHL^jtju#iF z2m#I|qK8*kz9Q1Q;4=iCqG3IU(Lmy5FbOeZQrUusne@1@6>A$T$4KTKC|t_Lxgbn` z)9hdUHSsc>GClqDxI^knQGCrYPYAOyxL8fa_FD;7Y7`BCi}2Tj1L{b?MKBnqlg0!i zXgJ)W9B*xD*b`*Nc4nvj-46OEk^|I%Ap8rbWm^t)J;Iqd%L5~_4Ro;guBPKvk!5U|zLuz09KXICYZyoc= z9y?0E+!2Ln6B_Lm?A^m;jmxH#*N$c`jF^CPm6eI8Z{(w$@^J=&OiE?R(C9nLgUAs8=^4*O_^SnP)WfW z36kgH9V)M(jh#S3dYwYEIP8hU-Jxx&T99wW64O;~r^Nn%hS|vM7FWQ0w<(<{hXzOp z4_)U?CfPF+vs3A6$CXWUG5GlaLx8S5e=$Gbz;$zNBt|x5N>)RP`+eftmc8Do9L=#y zmDEd>9YF!hTg=-O8D~0a^ZqngR~$0U0?IFFq!A(7Wxtg4$Lh4x(ksXMITph@(nfU8 zQN*!ik_2__^66)K^Q5cO&#N{feJVX2vs;Q!WUj6KEg3KWqla#lh(a#WsqyrnO+!wL zUN8(l!25#`?kC6ls-C-}58*WdZxP=~fTK`pLcU|+tim-Xn63Dt*6n>J?wJ&o@4mf# zMBj~GIni<2yO1MNAlHouu}1c%2eD>bsk(z!Q2LGS+B!d<}S#xLy~qUKP4FjbAYnC2jR(z+1`%wdEV;q z0GqJ&b%76eA5i*nI!&V-t$Y!OV#&gLjG{iWIK8&B{|0dwBaDcY@2yzcn?URHQ8@== zjb$2mxLNZscbVg$i%b6xz};`=A%#(2#pq_ z4#g)tw<15hrCI+zE+bs8^Q)5#UP7vUKoV8cUFuB0YT+O|AXkPUI_Q--YbUD=NMyZd%-nomvqxbrzX4Dw>9X#9N&!*-2r#8%HRk4I7!?baYiqH=*!Qp@2} zFRhx;|8&%O*W=S4B7yjq1Y0*->UQNEC?Lo z0-cYAa)=%q23?I;%8R_V46 zy<@1&7YF}UWI*t@g9Val_bl^EaB-qaqN^0+*HATbs|oqZ32H9U`;t{z&uYGEkmSAJ zeQ*pU&3EH<0g+%!Az_8^*3dDQQ`n0JEf*GpUNek=GZVi{$(aoR^^>q-t{04E zgFy@I^fk7z8^>jLj8ffsv~yA#G?SOxHPTk!{_@1l(O1~iTBOlUl>QpBypD#jprb+! zWTx#!O&>Yl(`i;$Ps&?*Mqcc`ey{^>bOHuK!OgNG0gk4C`(lM!%+YPilwKhZj<`LP z)*BuY%_aj#K5PNrKYdSWVp@#y zCVf~!&4bau$Nz&8MkBJ_>)Z!Pe`^PH>F7Tq-_jnF9)^|#rJs}9-2*nqS>v+}ja_o& zwvWOuvd=7hlOLu11g6jP-xpfb;vTD~W`bu7ec}>(#<=9jOK^>&{!V!^xYaq6QH6a8 z=;NFywV}l>6wcAkwvLtIjkw<268u_aWOqOqkn&3_5DXD8|L|&RhP!yjASSFFb`09? zkW1|co86yEf7?1L1=5@}$#1X$ZadtJ&aV({^}|57`S*EW2gh1J_l71j?ea^K+elg`O&0m@t?i!S#Q{6Vr!R%=+U) zmlr`sa))MS;${U!M_|8R!HYl)A{RuBsOx2^;c*>1@mua1I!tals6GGyX1e7{BX%bw z(zDqGjmL?E0>!Wv61jzOAwavB zVbD9BXF(C8W4y2SczQnu;6dvxS4~p!b7&1%&~?uE+&PsKS{}5IT&_2?JSZ(3Mo|H0 zg0}5mZ4n1shZF|tt`kyG5DKAugxn7n00Jqm^Z8%kU^r@!j-h~ zoN+oR|FAOwdggqv$G4uF*)r^C&Eb}3yVld#yDY;AgFg7f`+1j&ma%Zmz8(J?ZzQUy zl&~HI_d|fGDi}sH47S_4bF^8tkvwC|`%zRqk&+K@>_o=yWv8fKaNwxNg4UtImEY`uqxn>rN7aADY|HWKU!YVr+FLp@BKBcrQCopou(eVRa9p*Sb zye-t?BQO_vI|G*urXW(}T9G_%gasF3LsTu_UIGPwcBlPoHRVUND{$`w?g}p(CKRG5 zSORz2p}=u0B{VkG->M8jjC?*d7m${7coARw`mP<@OV+7otdJUh6eiPsuoPyQk8sS~xy!B$C?g>;K%E9_ z|7#;9@xGR-W@77yCP28|VlRjM&yj7{T?3ZdhiXm5y)?cDkl+1^V*v$p`*bM{j8H&b zhA%li(J#zB4;nhiBTE$k4N@$X+Y-1%0sk8k+wJBzTaKr{=^D7UW(u9vna(UJ2i_RS zn<-+l^8>pRCAao_hiT*qM@QY+DDWsRm1)?Q-D17INe(M<$`hPVn5$i8pr)x1UmG37gp->v!T=acM`iK~klD7V z;fJSSh#(P{j3fXe&^chzmsv~A`AIu3a86Y2UKUY3{fRD&YOwsu4Yvy-nY)4p@EAL3 z9!R*I#zb;^+wkA2ktGfFM>`53%ZHxpyy}H1gq0#*&&$$jY?RuwR(ULh$WiT}Aac`p zS`e*(%2|1XNPwZ%>S7y4F=++RxU2@wUMm#;!hyO+EUWt;GNSZv2VMxl zNToS57nZOH9~jQ@n@4oBF`EXS55yO6#z8ACH?#qXQ?M!7mk<;hFz*ByTg+x{$8-#c zj{qFO>~4?WXeJi5_1@pi7f3?kI-OM99Q!Jz3HMR^C?I<&0ewa(biyID`-GEjRQ2{1 zY$lqvsC{7t=~xy~x7A3SdxMtQ6gj%v5-<|S-x*IY`_Cgidn|cM^6IaSbUs;zd2pXV zsU5C0El!b5&iKI(&%|s;qY@?V9GY}RVs_FJBiPxej_oIw=&}jKi-LgFk3+NgByntp z!2Erv2(~3LB$pMnB@P&yS#_6acog@act~R6;j{A$3U@!qn4ZYIQP7x?F^IckBQMgf;=f= z21o1PMdZEE&}9!uefvK3`|m$43Zqd0*SSk1dc-N;Z<}uGdU7(%Y1-Il;rRE3#Fj_f zB$`JLlS{3mpJ0=E{CJ1-2FK4#-I5af4=zqJcTmnEl;7~cQCfY3XDANv8fDx10RNPR zrjR}X&%gx!B%R7w9xlDpdfKCo(~lynFqs88pk5}nRRy-Z!zD0!VJ&nD}bVfMOu{vI_^TU9>aC0au0+Sqq~}9ZM@R-xBhsDNig_I)0iTURkc@ zIe&DSL-t$gMxD6jo{*c1-<+$L^7AGr9jZfw<2V;R+H^dN0#$e3s=SppEchqNVZzgK zZgeFipmg6t)JnXz3Hyg_vBHZVlUAzL)pNrJ;rNjfhC#yzK)FLC(8 zJF`^G>Kip1r9gAF&$!Rq@q@K(t&yvR z5&N`b$_mr+m-xlVve=d1)hlx=V|j}gSC;bb3pMpc`fvtCwEt;e{MbITXJwgbj_K-3 z-PPrDx<)-o-3u%#iXe>|2q8#i&yW`%<@qCl7VAqsI{b#Tq<#gEOzWa&R2(|qw9og%FU2@6el}Vu3R(0} zU;LW497XXpZ(Pz*c4etyxtn>Z(r95PDeo{}H23+16ml#R2R&a|k`w*a^YlwOzqRUS z#7VvlT<37;JGpOJZKblkDcd}~W|FsW;9!5#z~fyFa*j0?3{|^X@Plh><#0DE-@CHi z7zv-~8WyLTMKE{`bEj{@>lA+|B>*f2*7rk464J{y5$EzaP8q|K0?wivQo6 zFflvvW;Ac%r-epyo9rPvC+{%N)!mX8xTl#j(@#eEQ0VB;8GK&#+Fr37e{4yWL6BD%Bqe zN-5_3L-m4(E3%xbz6XA9yTY}zHT2KS8%qZ6fwhJ0bmZ?<@Z>HJh$RIu%OuW5juk1i z2k1W-4~RRiGRq@K9~D_D#Y>WQCubM&HO0k4T`jEZxtlZl3cV*AMg(t~72GlIKkjEJ z$YAX?#8X>F9OO4bJ~D8F{fTJ9X%6fkxkpv24>0OXkgk2*xt|TI|M0yu*;Q-r3}bxJu}|x5WS8U2cW$4yNWvZO zC#RQlDoTAj&u4XMU2)_-QXVqx)`06Cch0-8op&KVUx|!ubFQdXd6mY+Ds|hr`Hx2p z!(_PH^6Y*xY+w79Eq@PZ8I~FLR|gDR96xFGahTn`#drGTK1=r>d!pi7P7jpmb$#Du zf9KJ*gvQg-OnoP|es0XO5&}wlh`CMPKw~=ElZfp0H=O|<8 zt2=(@0-1A|A^&yZ{!0!uuJ8Y)vLQrO18v zKH=W|H8m&7=PO5pzD+Nvzqd}L zj;hrE$e6ibne+D0^hx#8Mw`}ten8{j2RIgYM=8U>G4n<7VCkG*`_hn2N8LwK%8(r6 zQ8E|z1kDW6H+~d@SK`{NyQK#`g@0$h=Dr(oo22?-P_w(N?3DP-hK8q?6Qjcg3r?O6 z&7U4;Sn|#~==5@rQKq3f6O)ib-LE9axrtXnB^Av&^9YoznAB^XN1nE4oif@=Je}T9 z18d2Ma?KYhoMYt0XVT(=Lo|ka^yD+-6Vpaja!ppA**da*fBHN~ZtcQFcKox@+vJb6 zTg-FTp3IwD?k;*m{#@U6c5M7x-P@$zoVyX3%3^OPjb6OTSS-ofzPTkqQ|6IzpTa=Ej=Rmgd; zDeNiXT;VO;;F#Q=KreS=^z7vcmcv;dH;+gw^SmzEuzFke``9C9$42VT5Tm#a@6)8i ziaC|tb_=o_5`9V50|`~{s>VjTj;hQAo?5Z;)(;E_BW)9XM$Z2E3#~R&brTcTHod!z zeStKYeZmi?nriWsi1RupTWxMFbFO}~6TMvOu%NG|UfK77qiLvr|^cJdit|uz;hk7a?WI_JB#oA`Nbc5 z)a~`^C~JschwpgNj5{=El5hZbu<+{RwU6KL^3RWpjZxGbsQH}@i6hrF9~1SzE~Sf&VK28%4BPa!j;Qw3+Z#gKO4Y^ z{IO5i_R74u-6>rvh{k*~K)@)HRcZHC&<0h}dY5Q`Eg%j9>IA680eZsQ% z8Tz6S`&y2aJ+ZM;uYKxR!dk=?U0cAu+j@5BNx6-ELgYL8|LsDvD9t`zklct0ne7P zL?K-0o$D0sxUV{oF0;3(^nL4#zry)He>(QQ!iAz_Y=698cdY*IB7q}`q9ls61w&m^ z7uR`H1ZMm$_V_bC@u&H?fTw?9LE73cFT-LQneshH-gdBH^IHy?^M&7uCrM2v+kD3} zIbijB$h`k~Zf!RbV^#A;P1Tvs(vOJjDGkcKo^W92=j{8WtT>6bYrM7(T;8(37`p_X zdpEw1qt09NfpAljC@W2hfz^Dy^QEWcU-VDYm)A6;HnMzNJ9^J+{&QQ>)^1BiJY_Js z)o0f$>mG4d6bkbC*JS6IZ zmB1uqdrgiwqbD7cnk=Ti;2g;zR~(<3JL-G!xb@9~Dq~B-U3-*=zb)n{P}0jO`;x9Z zburUfw=nolKSKY~s{gpc)5CoIxYP(%L&zM_onMvoD{@8bp=fzUF-}5scK7BA9@B8e*f3kpS z{9JUh;de!*M}ou5-{aMIF29x3cu_RC5}uQ$YnDgu?!DO#H-3EV=k}NS`@h#k(o4Gb zkHwQ`=KIbZo=OrGd-RrKs<+kF>bufgpWh#r+Iu%6@OgP%;o&8{Kz@q8w$QrNv+JDZ z>iVd7b+0}&I*+}-wERErcVSPi5Z)Bs7eCl;h|(RBx;(&P+(TI*wfAGMAt{vCAja*F}g$M^UAXZnvejK0yd_+2kZ)=g&gCN}u{`|V2A zxZ>wHN+Xri2Lv~ElwWAdwa1g3+)_zToUA`(dOKMC?{$4QKa_Hv=A`kGSKSertd0D@>U70?Il-{*%cAlW&<|NN!?&Ui@eJ}Ti>bv}pr{aW0 z&FzM_O#ea_o zxbZQhx#nT*C9|mbUB8(Hh!$LIzqYbR_@ezf?ZZa$esm(r+86&EB<*XAu);- zvKsenUD*bLd@h67{6L3$t3-YGv#G0|-jDUvNAQ)SjY7w-Q-yZ(K&Uiylf>+QHe zSsnf~8{Pz#H!`@--&#M_=a`*m4=ecftIePJi|tRjl^-F`AIO<;Z04m?U^42Zc*VST zUZY#>`Z?3a%)Seyxl}v<4`Xj04|U&#k9Xg7D`iR9vg9TyWZy!KJ8MGreapTt8Dn26 zMG<8;BV;$$vJGP^LKwRt+aL@^j3xWh_dQC_{XF0A@Adlif4y|xpL5Q2uIoAnx^$|5 z6QG^tcdB}B3wAFYyR{d0w1P<}jyEt@Bffvx&oqgqB*aunph~Nl`k+o(k{{=k|Hl}o z3ddSzpfN6=vhpz{99=$;Dv&-L0HTf4|)5DMNjaJHYMWo2@~?A|;48)b4%5tx8fTeN(0Ya?mH<@*uuAlQQ*L_a08< zJnp_M?s9|eEN;4Dd-esG4$PV%1+K>0?dd`;d-bypl{g-1_t`gO8Ll0J9qH&q{Y~rq zy;`!<;Gd+yby(lDveFC5$A3AEYJ9%ybH@7}MQ;z=6=ICANErC3r6xjUjrKs%#z)MJ zwgB9|RA?4Z27Rd`_K#8ca#rl~ z=j1!Zu;zD%S_aoMgKnj@pb zeg#v+tedEyXG8Jm-AtpTPm&ZCA>k-*34NJGvUsZ5KORwep6j_SYt{AVx9H2yNuYxK z!wJ+Rz_B;}JPur4k>zmddQwMVpy;h4iCP7|!y$k!MlG6mJpa%O*>>|TR-AN{NtBWs z(5>wjsdZ@m2&x0@aCrqkKlZ53?(eJ=S|(hoBXVApBXQT|4T8zGQzPG?Zar^XmwpRP z+hi!28&f5GHF5^V);Kw-W%nFj=bU6^G~pZ9@EF{%6~KbW2-q}d>7nNbUGs7m&80>ZfaHMF=l_q$$oCZq>q%TkZ$);3Rp|dwc!I@E6=@M zGdFy+nvNT_2~R#$xX9}Bj%F%BFNfkWm{vM4TDhS;G`UxA$cVc>2mQZphuV79#An>R zK1jN##;{$R!C?{d^y9~?o##dThtKCvgnZQNPyVq2f~k2NOr;c-#5~$5Z)IeM1%5fs z!-;wr_rR!zN~o-efL5p0G^H_)^nW$f9`;;l0trh4q|3rR`t!49!vyc_fj5?pMNQQ& z?+Jy|2*jXRZB5H?%tSHl3D=nBh#&S72`EQr4F?7NPJd%bu+6V5X`b_D7y7y9Js=F! z^vC?~r9bS$Sd@Bsv8wEN&!6E5hD|P6;Bby}iV%S0=(lK6PL2O#x2)u7{H=0fG~x6K zqu%Q}6iN#AFNMj^H>qwgSjAAEI3alBxG4G=H?LI?3mjzv_Gu3bhm3XV#`m_uD-ug! zJ!^>@gl^D!4XT403|4z;T|eCF7EH%<7vXeWacgcV`>H>hT3}1N4V^IW1z~y9=Dk}@ z3RINo>(I5YogR1_sU|SLh%HjIA7gIs&Y2>I?cEE+QQd*igttGkbH18FGesKqw%fCf zsZF%Z=bz(ENw1V!TTswJ0*u+K2^yXrVB9wYQ&RTkGjGj>^LkyK01HG(>6o4bQtuBJ zGo6%o%gdb`>3#UiEZ~C>Dod>P+~}3t@SnV=&9%2ud-c7I|X!ua8`wr$vc`2g}-vL3tFN zb!O?>MIGsv2L!c{xAQ)V>Kmq?O4-VL>G>%l*Z(Q626w2SMZ{cP4wIZ}sgIN?|CUzy z?$OgmJjD7<*Yz&uQ(D*>*h1VLFhb-MkqNM^M9Zh5{hU|DB`KbASrtD|%M26hS7pXm z+{rGgY`*Lu_Qwq#@jUBn;|BQB&_M^OI7^it&u61N^u&zEdg^0!3LE?z{_D#JAO-PD zHqB@ALuXAWRptQ?=Rb+jjjitjc)3>PZFo}kmy9THZpVG?O-T2;O<6!oohO-$xfhFMYABTaLON9!-oNM5}!{EMig+4=Yoxp zWdpTp&7a2fyTpbLS`Mz^Y;w#F2FB~EFRZh&x}^ATCsJkJx(TbG%Y8J-{I=_;E4CV7*qKnIgK;Zf_mRX^G;K5Mwdgi9i}SBIht_CaY=fCe7uD^W$-Ekq zcc}Q-(qiy0zY+%&zW2t@j?j4^E=mtOSyMb6uxAy|^|aVMw&!*u=XK}{o^(Hlpqpy8 zw&&z5p5$L7^oQ zVlR+^g@nCU&{0ooIJ-knATtlxDDqx4534ZLri>^@2R7FjT4~CI&p4>d{iBV<3s7}A z*l77!r54wW*Z^;gi0iDi`+KR9vZ8p+1MW^)lqc6Qc2i6jDz>PZLz+pFYzn1JZU2gw zz*l(`S1${(N;-{?CtnkK^U;E*e?@xM{Dt(;`KoivoL!hMibjrv1I-^4h$ zi}J+|DoO_+DA*#^XbAGSH1!3q$Xdz`Ff?-+{+)uFQZI_ltC2bj!|E;N-XG_IDugA- z6w?UM5xY-O`hN7|#W_h~UTwO735qNpF3RtaG21n?+v5}OPL?cR;UE@l0DW-}l)(bW z0>QFp>|5TZ0{ZRU(KXS@@R=Tje9CcUh?I4`g?LJe8JE%08;pZ~! z`z?}qE{jAdYIVhde5SVZxmOJ)Euj#->asq-bLn}Xez`%4_#f|Tz$H> zLBegKsO%(jp%FmsfVyI$n3g$m2v`eC+?Q}~LueTHqCw8!DuMn%+aX3VbYMyp4L3hHuIX+j2ec0Kj+ z%3sLNYwwow@06hR{D3$RTRKdJL~fl+#ZFt+Q`P3XGON<88%gS$))#7w8}q$OMY={? z(u@-zn4kG6wnE4i{^A&A&&cM+rheT#_+_oiJvn2olh4QAzZD^ZA-}T@_3f4|4%-e` zPyQ0E>&CH#!_RXDqY0az7X+Z+KJ~mPV*cZMgX&i@5Hl@JUlZ~{r4jfPi*Lz-pv(wr zwrFI`O|Gu@@64Ly#Nv`X^#xkrk|^?Qj>oYBx%J1%6jU_bL$_FU|YU|PO$Sv|D0H&<2BbS!7-U@3x;P9!0cP*&d zZ4E1qyV$qsoFqoC@JYs|J1`Ww9+a{Vy02;}8BXN2^=N2#bUu5^W}r(Yl}n7+=ozu1 zQHdDG0A6E&kY>$ZR2RM}FxF1Bde+}VSf-P?scOR%RCdqcfL%qa^F77%_f^@S)ho1; zwn5n7qBGI!xdW#IIWF+8qQUPk?HyScz}U0=Ve!gDqorC$oy%L@=2qdaSF_B%fv?qQ zW0`&3tluK2+^xg70`UF{VlY_Lxs(>k!ywI*ls9cvRjMLdF!U`O=XiGSI$g-ZI?jcL z-n-gUq?YS>Pe1>MqF4(hkCcj3v0}&CaR2_D;Ndy)8~ho?M%9c=7C_+<=MJ-t>MZda z``Y7htsLgLuKUM;jwsVSbqcjqi4iUHjSbB}KMmciQC;4VNB5gWc@HEP8+7zvU6+bn zJx~}%ui9Ub6wjOC-B*o{)N_-tic`El+rMPAC>JT+5v$?KMWW%+$HaED8(jx!*hUTH z_PRJAP&j@&6@Na3b}Y1+PoY2{b~E*{fR^*< zmw4A_k^JQy<+Lsh0|8eC25R!8rk-?=w9V?1I+pyrbG5J`U6HmJMx*h%ucyP1s!eWs zbDvK6luYvW#le34#z6nF_ayU`l>wXK=qnpL^E zF-|AJ1R4Q$qWVd0dVJ{o@1$;mJK}Q?Xen<>N-FX_m`Bc&qR>(}=IC?8D5i`>5aqmF z?GFJaSBo6wj~4K3^Gr4FjB6L2)nH3QRG*dHB8#t_b$A=(4QMa!f5hVcX|t4Czz7o| z^?ic=I`)G4#OFw7WA?e~F#KHh7+_uo16pYB_3Y%s3CU`ce#j2Z6br}aJ1+xLQt*$B)lB+;qi_H!WcJD+kxyBE|uBlbrYjH z{XKq^>kfs8vB9MkGP?1bCfqVg%LTSv-8s@uuD)$3;|hLH>A_pMa`5B`O|kOtW&}^J zwAVexXcs#B>+6TJ9h6o+Vt=A2KRH3V>%?TN@b^<4=c%w3-cON1zGhjnPc0J;u9arlF;spB)l6z?d7nHIK(-7|*{ZrJTEQ#&WVkMZgLXM49Aeep|m=g=y|!4>`jD;ovz}CYTKk)jI7=<{V*&2g56C+$XL#Q+nbTKk=6Oa ztWfTf?3_bwz(M8G7pw+FNqV+K9K;-aKd_*Fnh9Izz9Mj8wNiVzQ4|~@=-M;z^+OH$h@A< z_@5c&DC@QQlQptohEhHNox4tb(HJ)RtOU8$>pXbX6K5e0vPE(oQ}tZc>B%{m`BzAu zZb(`BVA-m+9iuE6b6=?+k9#Y^S<>7&Bpdz$U!TmDXmEB;*_Fol!0o7=i#F3`N!$fR@>ortMr*4d-@WQD8FlD zE+l(NQBZi1k*%o`@$$$AjnKBalc8I;wkG!FPcPNV2Qf*x7UWpwmy{LV`TN~6?&OAf z1c7eAPHQW}#=c#%%P=>iX*rGqy`=f#7L2m!es1&C@aOo5n35I6HAQAFw9Pg*s%F-;Ykn^Tt#pC`WY?)j`~lL+%HzpVnGxyVJaY1y=& z@3U$v3?sddJPFI|IZc!a^EzSwRC{taXZl*LW{yq!a8s^+?}_;+3G&F3j*NJLh#r_E zP%aQ(JwJVceSdPO=+c&Lz&6F3nr%c8`+?DDmz(Ii!{M1B`hgIdH4(S4-8{fiRtk-1Du{B zkz6>e3mHO-;KwQ%thRxDs2lTjNK@xRZ1Q*WbJ~B!myNvkT*_&s)3r7_cd`GYVc9ui z&R=48Su;K>Mcx@ExT#(Wx376z>4%TFj$SHX$DN{I~1_s024$cBFX-u8ds&h+&SgYL~ z8ksc<*E?pkbj}TIj`Q{_r%v0(FFEn8udveiS^!q+4%R{4Az#Q^kD zfNfs|nB;bRSA+{9bzc&mz~Zxf&z1vyndrEdSTaGz|EGAfhQB!Z%k*pkpWIX7V0y?y zAwGp(@L_N)&;`tg^96snuztyNE(%IR?7_C;pv7REuUV&h{?L85A#-u_n?$-Hm}naz ziI6<5hW@@gIa!BwM9kg3b}-ba^O3?5jetqB>k6*-id194+ikgg@=dQa+zI*O=>4PFPz`Nt1(mh`kIw&?F2< zeJW>s90J|YcA2!h3bKCSxG_$d5~9U{E8wRw?KCdKD=1tW-&wtU$LIo9>CV~MoX1<_ zmcPISK~3C$)?2+s41!l}pG%G_kcTOxx@n$#u1oS+}ihpJ~uIgk4E= z;QllB8IC~h>o^Wa-U+O$l#mB!a16Gq0}_b1+xN|Sz0#VnuWehG*3$isKLe`^QF61fX?(oS+)*5lNBr*^WV7bxc5LbG=---PPs z$~L>21!RiimtcTz@)%W|?Ek2z!pXZV*#z}S zpw&d8a>6g8XX4Q5(vI8%`>MhvQ?Gp5boJNhqHB>=0hyUgDI@-B7Zn?iGBII8JPtgT z?-t4BzO{(WO~2#pzcPfGd;3Q}9r*Rq0Wd!a+e*PaN*?q-OcbVVkTCvmGG1`WhG>#; zX53)Z>>OQHdO|*X51VU{d0?4LMPZZcAd~b57RxvW-GSj39@K6dM9}W@BMbI zhF8SS+tDg#j$l*Z8|5MRgFao6F(!ony?+y@Czsk2I9_Y}X=LT@!rP@R6kS<%Fh83d zsilS=8*>8A2>_c^wAZs(RNiCtLnfa@)uOt9gIS4*xoJ}chWeFW0BvN45!Nn1mxuD8_rgFD z-@25ew`Z^&pSGz?M30oy|j#L4R6ZU}tT;SzQN&sW?y(giT_o)$`2iMP6^G)9R zkUKc;Vq(onzSqD(SKy*D>FcIB9A(v&TE{MJi43I-nYLG+>;jmuGk+^!YY{EHHMTFG z`4r;dg)7R>teRH)3Sbe}V|~*NF&;$RHsI!iBMU}R?uNDsnf~v$>?eK7&L;i)i zc|k0)i%-rg4(`36iZ~_GnY}{JTvWZch_kzpWmEa!FE6eCN-V3XXKT(Ibb*g+6)1O# ze%Rt*rqCDgS9Q6)?0oq`G%aMNP3fAdvb_0H;! zb}#@V!k6pEmd`9B9}-LhdMI$Uwr#2J<1)Ge%5TN`-Vkj)Q_|t>t7mO1)iecs2=6=& zf||l3T|r}_(jkoniLr|Hx8HcfQleaA#0(}8dT9eZmY3JVeq{@gCZdeO$Ef|zRWCxe zjwb0*=bPAW$1|YZ3D$oItW5@*ME%vDDWi(G%ipi{^hR3WRd?hZYxQY6H?_Tm7#l!H zHD#?bZ9h*8f#jh!<2%Ix;|5h1@fiD(98IDm&;t_n-?p zmqum(!1f%$iR%s(?(+C~#EG<8pRym9=TcPU&7KkFR^fAFv{v{fe~pFCio=yv?HJsE zSK{3bt$fb$ps*~zp)SwF!UV(K;PnU;@CdA5(qnU0L-4q5$l;Qd!;Ewa+@}vO#@ceR z>bqd;8faOKp1|P}!RnF2of)4}lqMYul6^BJ6tHknaztkV$c-f+bdK#^qgg8xiB?@t z^}Pbp*(j4Fl+8C~?%jI^kypJNOb^{Rs{#DzspmQeL6$ZdERdoNp9WLq; zu4J0eY&=Zlh!(6PGdN%YvIaY~YiV?=M9Kf_9hjyE(8(mX&XxF#G=4+8DVAYvp)gF( zXkPrQ%~ya9NKTxXjqaK^bw^z%YzK&LbXaUIt>IqP)^lacdhc$6_Y6btzS{aulD}b8 ztd;G#+l6`zZm){ZsfCE0?aW@(ox1=iM{4e#e8_pJIGLy z=(vveIngEUfEusdbMGW*9vqd1Ddh zvCOWhg!6~Im$0d$MKjuX@4L1*EwTKntk@p#988$^q*6t8J+Yb7FX?Ggqf)9UAZR0x z&4;1hQwpEKQAOVfG=N;ngL>l^A`h=P#Un+Rwll$EN3SN(^uS6{Adej@?# z4UUP+{6Q%be-3#9cN+-%?OWey-CDmv$q}e!u$3X$=5g0-E1iT(yI?95( zfwx{9M?atUDJ_>8=fB$hnzs^)?_sqjIW!9j^SxhlUQygQ{4$1C z6|(J`4UIhnAdbIx?yk>YVrQ#Wucd3jPub*P!jppAMVwkiI$iQ@QdQPfzJgA<{2=zs zFl!ugSLi6vu6q%x+WPZus)&EU3&IRlGm6&{=GXgX1U>BHcD6GulTL-6#mC&wsP<-t z-XN7BvhK@48@r$qdB0Ppu<(`w^Uw3+4UCmyaQZ`ANWe{Vnn4p;%3tEEw0)M@4Q>aq zN#pLnR9prg!q@AIIN-^$g%_RY2avm9f2I%;uJ;r*_gSQ8l>DXn9O{GEM&;w6L3Uqh zTc3s~lpu_4a#C=%$YFoQSbT$gLf+;g7C)f!MH6_}+iDqn3|7>k37{r;bknDxNe40d;;NM2}59l;Emae}B`B+^E6cJcxvdFD%&%t(*Qkfext4EBnRB8y}NFM(2BiuxG6!sd=`NEE` z&>7s_K???w72EMR=8U~O^dR18=6*v7omUPtfHodN9-?HrMB*s1af|Jq7m1t8Qf z&ObxjJ&+3HhjBNKLT4_`FP-912k_R@Gq&*KpF;zq(!0Lj=Wlza@y^j_E;oTTTq_pI z7L-e7cA!IM-1(S#8iz;SX^{{B5b1mCc0!Mwaenc6tt8QCQZF`yh_xVL5;*!V-F_k@4rf}1(|9COXW?grQ{>k-uW3`H z@B&)lzTbL*3A#DyRJ7_LtYE1*PO>y*@gM$7WosrDRrp39=X{~=x=JG^NRCS~XE)kE zJjUMmoc6OW?HWSvHh!Fy5_Ejm)CYcpfN|xzV~lmt?36i^quHFDEML$f)7p5=xt!$1 z_qTyfi#YTNi!5zN-UbiAjQGt3X}z*2wp^vJSl2E<5@H1vemHd)2|3*ePuhtK%u%(g z;<29n3OGohH@-#ivq~}7TVnjhnnue&nJ8)Su!oCrNvAd~ePCL>1WOvAO*sG?G3C6Mdy& zA-R2w^KK()b{7aOu7VU3oJ%Ty@UdzKnmWb4l}K@lCY(iV<_MBE!c~YV*L3_{V}?uM zuO-wqrM%zo&_i4@v5)Y=yDaE{9XvbUeJ2!1x{oM7{<6`wP)`FH95`tEAA3J+0Zmx7 z^v=J6{3XwrQv1_L#}ME=DRu_!N=l~nuK5F2y1)K2DlFo-tc;mA>#ulW@U49gzKgi- ztv|Vtd#{gVosyRsyQ^eoqz~PSINb)=lvTd;uu5yaGjtYx@*eO6+0$YU`7JjqmRpSb zgK}6%h~|nlYzS2I0E~n>L$tf8vxKd(4b9OAAeA^8=s}9IxTqOvMR3BzOg_fn{$`o1 zzQqMj3)^#0;x}C>YMcwtZOkeuzU~(H{g2{X#!AIm2ZtA^U!Bml*3*%|2&uxagemC` z+lZ3exPrgieyaq}a5wf+N&+w%E==bOi$0T}@sN*|lW7QtSJLB`+6Ot$SdS#*9k}jP zy{6;=6PY{DKet3)N}%kry%)IBMI0^+z}=?$#f<`ukgjDz6F0;Tw^i*VkK!G9>?QwC zygw$!>(YcjmdO)*r5moHpjuifoH9jH$b1tUuY*!f_k-3E*vJ!2L17_%)BlDp>$R27 z^{(ZLxV64CVd@L_&i-aVfUNwKeiH~vOhIB{x2oFm_I#+|yP2PkzG`n}`0tOJ1n@!b}qz71+Eq)^q^wP>qPKs)~O&oNkkX-^k{%HvWyjeoB} zb23+{NDShZ<(>5upIIZR_r5A4Gx67}V$>yE; zPk*F@u@9Xf&9S;Av_cUCsTC?N006|=YDp6<6nM?_pKp-%&tgl}ftwM3B%){HQ{rum z*iBmt={vticVg(tYVIgt$Teqh(suX@(KSr%s-_6Hs2oO-IggXdZV0`oJ)><0XX92| zznTvF3zV&qM<1`9^RTb#mGmv8$i^#KLP>-I4A-5PL5`pwUUhb3XVxLPEf2lB>Ma{xuBly*GgwIR9?<%(?d>*nu4Hco zlmwP0;`4kqD_She+9cQX(lmT#{?)Uiz9-E4G+fYuqgqvkrZlc~wDvUQs3=gg$-u1* zeRFR=<}D^gQQKPvK@DMPcBvPt;A{D(FF67RmNa7jHroy-RKlEdP&F^Pdfmqu9Xd<~^LNyMfklpP(x6W)^q~Tmy(v$Bw3Co6Kdz9Fou)^5*KWWV6whp{Gb0{FnM5) zUZK%euXS{P*F;hRho%k&tfkO?RNk(Tq9&=B0_)IvdoSC$|EZ=N*4*8UfKeI1dK#mN zyD4pZR(}Hz3*6f5B-?6f{@(pnNJ&cc%>AXhoZA!2BC9_mxQ>(acjW_yI}pSt<{C6= z1ku}GHYxIh6YV0<7o4$qPtds?o){&k(KXY-6W%$NqxPB)5}{d4ZtjN;&se1kwsGkzyLODp`}LNfwUGq?EcdWa;e=r z^c?PVHjp%vtc7Y5h5xT_vdEYZL8v)k@==f2df~$bEv(rx^za!E116AJ#vmn~1IEle z{A*x_bz|%1R0_WFvta%Q27JiAm6>ehjj!u5Y*@m9MLs|`aQ}Hu5Fjz$3Pt^>jp*q; z;z?h9mdYbKq#L!T`V)!`0MP zsF7{pE#dE17X2ZwJ8DV6S$7Eq73EV~GhL4DIsHfM1D>Vqfl*V%SPeYaoeMv|))&g= zl%39Eay<_+c0#-b3YL}l7dU0PB-;umg88&`*)9A<>iVSB1syyWHPvGKX9ZjN7ENXW z957GgoA-nBA-HrZNyL%&Uwss`HM1(RX&t zgkXlUhrCCMY53;jD;y-|ZTd{Rt^9_Cz{y&xUaYwGAM8z^@^bGWdX3;wE zJA)!}>X72|3P1&-g}w|Q910hwB#^b3@FtFtO(vq4>C57|U&VABm*6$GqKf_a5uhfD zfk3L>ah=@G9WNiev5PYa4NO!rf->smJNxBpo@Uo9gq#k;ndJDdavEKUu_;2NNw)O^ zD5l0%I>4spClIUrnX^?##^X=%M58S96Y|kyxEfNC>v2*yH+1;TSath4U%-gsVm3Fr z7$K=42{yteT#EY~4M0>sP#jwhgBZI>V}%;RwN`yCbic6imL509p@VC^$PIi=d-&bG zt4RFl)m$1}Dp+yYk2aneZPHC#yKYqR1;o_b)OqKlA5npDg_qpN~OSw1m5=DB>j(H-u7XPE2<|#6+8X!#g54}>3TEFePIXI z)uxHJ9JW^O^*KPsK-^D0l~OTMYY{GVgt8orBwg9?dK;u&ya)L%tjP;YU~Kr+`Kv`` zh%(dH14qpr1k=3am_m?2m{ZPX6?d_O3zaB1Z)_mrEM4weu4&3D$sbiAxMu4vKkF^sJlOvjx( zw%e@w8`8`wVn7Xk_5Rh|G@rq~=iRTyveYhZh39!1vtG;y^C?WOA$!$|q_u}cI-W<& z&*`73HT~O~lba&=Q^&obuDwz3@XJA7CE=7joxf%@!borw&0Dzi%A9@88>y$4c$D7X zX&(E!zF`S{`!tXMc!0zOPT==;xey1p#JuUC`$l@j+jnOsvqxatm>v^OU+?pUwU5^uhUL;QB z7&)x`lD#3r0#@FmmBCl~i2fKy`TR%^PWqYtQtQRtjCkMk>jUqef42ZfU4LV#X`6%=dKyUMHEX<^vg_ASINwXlkZ?n@v=WBoy}OOw9kyGx z>-Q`HM(up?_X6N_A98a=TeWV3MNR%KWzllnF)m2Qs6F9SCDF_|i14myxiB_6K`ims zAIVlf@o6U}r*{2RKeQ>>Uoia$Pic#dRC=-&Oq_YUBUivagyFn+XhjRYSH4P8>)+{UIlDn+t4M6b=>t)9I|exyn8ed@@wG2vG0;jEEJw5I4!Tl z_Pp%kd~SEdAb&goZ!@b;g9$@lkX&D<#&v@47VY3I!Bh+1`=?Kz=qdtr?HJ2qs&p?e zvJ$Bq|2=b!1+R;QgtYhX47ycbc&6&1x*@Ip0dGL!5sLUh3oGL$-bx>VN~i62{j3RC zdufe^;vgr*0yQq%`sj^wFTs~fpZ32P!5MhZ`CB)HFuDTxKdfn16*~Pw>mrH)Gx8t? zrnhyl9R3q=y&40=Wby7@t@O-fYf9j)A{`b$4k^vZMoLk3mi!QsSMqE%V}t9pw+sc+ z0_)D&l(z|%=zIgq(kuu;LsHe(4sX&O@nj_3_$NKtn5Z7<Mfn0Yaz~Kj|H(M9NhppGWIzsrd*`?+98XsiZiiq$i2BFnuefjK_1{S3dzcp3CpTo! zlFz@^gS#jk`LE#K_eGDX97h+FjD2uL#b1uwBY>?Eo!T+Za3Dj_`>RuXl*6MiPEG6o z_t$J+5zy{`Ayl#=N(aR!KH-f^ZcgmCuh4X%y<$d9nsnNA$Ri8DU6bVR@OO8z;Xj$k zxUD(u@4d)wRzq$4xjm?;zQ{S&L$Y|nwc!B&uzY}YLsExPl2C0lARld(o0Q!`UF&)| zpIg{6AnpL__qgD4xYXsS!s%{H$Y?+6l9|#Pk7pHb6H@wF9Wd$ADAN4u9ui)?@#!03 zfh(sSaElbDQ|2ce@~AgTh}wItUzt;aq$G7;9Y7nf@~}B0vQ^{LHtuGzA1em#2tSE4FpLO_Vj}f99`V z$DIL^7u-{WSShseR!>^-PwYl#xz7`1MN30m{uoeHf z)PxU&97p(9OXKx(4dQQ}>-mJ`Jfm}9PY#mEKcN<2MJm!!JK3SML z3@(c{doZpfbW`;zTTrX+L08L>&3u|LyPs<#=rcIf;dj?;+&BEUKL8oE;6sGP6^t)k%&d*|v zMElBC)b9js_BfH|{z>ILL9RO?lJpSW%*d&z1Ia(Vt>N&0K1Y(uirVpe_$2yKK!5T! zoMpE%SR4`?0FmL0{`OX`3m1=E?bi25vmkOP9N5ee`TCjbY{W_ExcZrl2T<~E&MtTe zJR=FLU;Y*oi}24`#gFf8KMQ@_>1M_|M+o}ImaqBVFxUY;1iN2W+pL-!RxdLUIEvFZ ztJNowMUvg!Eg_GIs|tjcM_GONrGqKczZ;{Crzz8s7~ zmP=4)HM|ZvPPKHP@hcI&b^j{m^i_Z`YM^(_Nu2I7R!TD&dn^;C+~aKaOda&gf>ac| zp+{G+|1+K~i)2W4KAuK?aXeh9-Yyz6%6cC_+`j=s(5##EVnPwP!gz%NIs<8f%~g&v zMGFuT=iz7Ex__6OxLjbjl0qvFJ_s4`T<^0Wf|K$ym+9!fEfh}N=e7jlCvhL9E9pHG zttL0>B;GAP8w%VGku&_&LIDBPD+nnDEi`K3_0NeL^~X=B(sM@*0VnL%TWWxWK-c5Q zkUbONJcV-~Dtmv??*J@Uv(`ThkiTLkGvHw|Gg} zaGwJ4r=uDk{V=p6-Rg8p9AH2UvZ%${jA6XCgV6z~mq*VsZkt*s)bQhNO|mPLOHCFx z8cu}<6Gg1B{6;^@EVP^-wAzlb)A42%{OYo_ewTbju%B9HHzd6}cMV_u4`7VA`(@vA z$aNWlPxF*2G%Oh%XF94{W43sMUwM)y&4ec>kuCCRSK0=2mIPO{5DznTPbTS40Swud zPe;8oSu*-sauhn22+pLv`}_ZqDx||gb?V9yRS$nC1OB-*aAJ+LDXf@1U-_=ZU!2tR z+l<-*Ls7c*l%7m3Y*eJRDpL!qK`8CNhfD6?QdJ8Q={w-^6+A!dpw*Ijy$R|=0Ta|j zGFPaTF5?1d_{2?lJ>uHn_^o9=j@soWHkS6;7a# zS@m8<+Bl}jkDqPe0CdPwWtRbzj?xC_nx_#=94v1U`?_I8I{37Hi;^-1akfR^lKxQ7 z62_k{r+4%IQfJZJXdoB3elaB;_hx6s18muiBpXF2)88lRGP@7!%AR~~YmeuDMT9^EMH3Xp4&L(F2J-d#wo74j>XFdX>yrwl<_u4(6pz|>F`@j z5Y=qYPEdYy=j@uK>~fMz$JoE8D)lS6>;01sF_-Xma-x6O!?2=0j%Q$QW^AndhI3>h zt-q$MZ8AD!CHyn9{AxGtsl{B>}z7^NzWq%26o zN0dCE44!zTamDCJe~Fdf=zT`Q&!HtU8tq`iPCVd}FY1{X$+xss@(S2H2X3d5PA1^8 z`wg~O^1?*EnJO840-JBLmubLar*R;a{!`%+NQeHfR^|NPW;9-`0CeVgwDYzOl)!UwPiU{!M!#%?eTorX5 zEE*6^SIo;{zKOZdT^IT{F{<71XCDK`>1cGhUXA_(y3ut7wy76UiQN$85G$Vp zrMAN5gJe=sAkRDCj#W)C;+xWC)*ijs$%`Or-^&zAhb9li7j?yoZ4as0Y9W*NxW6|v z63_71!3_E*lB{8%#0FhwH1b16X%^j|j6=Nk|1qcW>P#D05n-PH7dPQiF6EEI=XT%W zGSr2czc0>I*3Pvs598ou(>|ao$`(9+>;g(2CTziIOpVLB23^h%RT`fwf{~j?Ybj;b zw4mG3*<{z46|fALsKv*JC{Nqu{NcD>O)LV>9nt+ucT?OD%D4PAWNRTIbj2;;CBeW? zGUlo547Y$2Yu_aRr3sE|xn0KjTv-{82dB>5Hw<-Xz~#Okx-r;ySxxbHd4UeQl4WuY zuxbgUb;?&Vbihq<{j9keZ>Ed@s}xu^ac$bYg*BS{le(>W(VfFcfbc=k(c2TeRi9DtS z0qQr^QtRKi4FkqcLvF_XqEnkn@Kt1}Mg!1cC4cKrF%G}=zCV3ya$oG+FS;lNBpKd& zfwb}1$((!#=Qs8G!Tep1gf*GHkWR9Z=6nIWxXPBBUpM5`__MOrXBcQyvn&Ces?Xfa zhkpc#`NIFjF9e9D z4hLccJZQmYvC;NQ!RV@|w-U|cKs|5Atx)B>j}j)OYu7T7horf!U6j|f^!1;M2Kf2# z4_tQm*NKgl72s=o`{LJNOiBDDrem{|?H76s*9-ZT^0ZRY<}oI*Mhq||u;#q@30>Gv zTD1sog_(n^Y?v10g3jEbu!yJzE%n3Buelt5vfa0fFIfh?KtQkZDoNlyePw5b_ey@y z%g6fAmm$e7vtQZ8Wd2mqwP2A^zsNC6xKKs*WA&noBj)R)5OTS35s#gPyGpe|c^KS! zzp=gbqB~!$O=`yyMA8jQ=D~cgqp-h|cJ<_4BU>5VV-bC|CycgKnO!y)d|be_)4Ft8 zDL0&Mqn1lXv*K{*dM#ixumk=QLrbMek7jM7mQ#pe|53A@7;F~qMy9Iu1VB*5i7=s-I19;W95&3bEEL_(L2$ShD_t!DPxI-7- z*OBI5bmyBj&+0^3l4%#*giL@7d{iS{wNMWxe5}df2yl_tW1qm|#KFCaY6F=}CAn+$i%X`1AGuRVFinpPQgL79S)2ZFrBiTSi}cqG zTcFK|1c!3?7J7~#v{uaeeaDykL0bNeN-xJqjk5s$<08?KC@IX=uOl1#cPPB+6u(2! zsGR}$J%Y*eqN2H4PlSp0Lpi=T`0br>z+Z3-qGW@UF6R}?j^8eBV$DBZdeN=5(8~Xz z3wfLoI(3*PFmOigkWwmfSmie*s?)(TvHyL`rREjnK3YOw_UfC?TQX>jN9AJ-NB#s1|UPaS07HWX=A?Nqlm zZU%JaY}7Z?592%RfgTQv#diwdRHK&J3;xItmAt8w%^H;3qIGch_pPIDcHiKaM;Z&U z^zu(wCdAuv_GFq-ZUxZX&;sI7FM!20_69%O#`B?E06x4%{~F>EeYDuFm>NtJOPOAR zu)ASHq0IO0%E9>!bGUN-|4)18{to5V#_{LcUG+xjgzQmtu!o8;PKAxOQiO5tsbj-0rwD+tUYVXg1z;rg<5`TjbN9Q1wddT!zZAXXeqQc z2jd6*O}Rp^Rl7&LMb)#v4y&X%W$w8<^}O8Ih*lwE{8!U)i$}?3L{S8J@1(HWwAPDyEcV6yhv&{`qDjV4><}>nU~6C4z#ky#el0|g@l6Pn+lBB5n{*ITQL<);FLB~o%`^lHZgF68nqFFDY-SgzYknqo*jJukneFa%MaGg1(0(uI64Dcq=o z_kP|La5hAia-`hE-vHqF%kpw`CYy) z%Qxhh^1eZp$G?)hbeokMO`H>zQR6yv`8BM?N3rXH3EC40X$q7*e%eQFNOA;R@z3GoqT-f47nSyg7nPP9_i+f^CTj1lqA^ zbJkwm8At%>BNQ!nX+_`;sQ@D?#NsVEc2;!Wz1?@6u9j5P1lp|3NE%PbYxZMiZ2a6= zP^vfkEN2uSm)?F-1;UzLkLi;91GbE)^GP8Z$bHi+0>5Ib-T`ABW*_;R3ReBpOk;xx zE(`IQ)7__2A5Cl4ma+pjHj2g`LbceQuo_M5Zath=#?bshAeyKp-M3?^IbAh1%4f zSXXgtrMj19J0wy$xV^0K*wV=X3;Jn|B2cpneCht!GOhQwi8Vb|vD`8+3N_3}`fZ!N^RCN0atW)DvpoNq6f{&hJ$x2gUbp)4$>8AJPEz=_t2OyGYYVzY z6daC)i+?{FOyxYwXefMgRK-V9x<;AovXgRSY`h?k|6)%I$1!aGySx13S(R^=-rDe} zI?60Q{!TAMOamJ(%K~dql-UmmepnjRcNkZE0~G(7m@69$vwETKpR{L7lSj^lK7kB* z>jLXQZpgE(D>XOn_Ur<}>>~pj??Inplp80oMZ&3Uv^82Uzs776Zf%G}rMy?*fNT;m z7*FkHX#G4)_T97`s=w*h&CzV(>ygT6Zg0?Ac2V`X&Z#Z*DFD{^ANxZ41^{Qihk?1 zpcw>$H%D3{Oaoj*zR`gLAhX6x#-6EKB7>zl_(;!2Ax@8lXcom{1CiiTA@a%*SQw@t znGI$W21D=E4*ykAy*oIi3$jouuz zFkWI$(+ojp3gW5O#>c#|6K^V!_)`dqgb-907=5>jtn{=;(V1BU1s zy1^AjvP?sJ99SaxhLZb2;eMFOP+>vHOCUbn$Xoq+q~I{)4LJl4FWMM288BX8~Y&5}<=5lW8sk5TR%{gB-Zl z52s;53x|W#CUWlJIck&ZwN!Mn*cH4R6CmoC>KkDw0$&ehwWchghXa+`@my^Ie8+#l^G$*y|7~A0FAOOMxF<4iMmn?Cbtx?VKduA z*j#|?LJ<~ng=@jLSX)spEO>=p7LmVk84&(aU8f%~7o-O>m>p?buLTGd{Gi9M)PyBf zN_HYN6J}N`bSS_?Do1PS3kCJ&B&-5nvlXBLOChvhUx-KC;`f51u~<|;@S&@Sgzgx9 zNxzf6!$5qp}0|wJa17~!?1aFxZnh1ds!%& z5B17w7W^vwqq_9(sz(6-^t}&iK#Yuxr60AO=u@(Nq0;-oMfMh&1+wzTbW?NA!vj#@ z^_q2j{#f|0id#5qK6INWuzLYKASu+`*)VGI2v{9!#g9kh7mgl>=AYx`tpWC}|4jN8 zSVrAT?x{8wf;= zy!eB6d$ey01i}?OdMKywWwtqY)z1X&y&9PI$u6y~el54I(Ggd_wYB9~x$PKV_Z32u z!AyN6MoLFTHK~xPVM-ycFzimg-Lrne%Wp2ny<~z#&Ri(NkzT3%1s)pb8O#N8> zfRR@6V*imuyqEGS+@KH7EUxp=PcZQEo3$7vCs-WP!;DrKgTd-N4(0E%C>2weKN6ZZ zJkM!s5gLkV7fEB;Sy5+-jEo#JlMYtJnzj$IDJv_7BGKfwLN& z4f$Xak-J3=-5}JG6f+~2y!6>pYbTb#L(!-3P+y^`9z1Z%)YSB3`r2_N*z&T!a{ny{ zN5@2r>s1-{7&5#}wGcmPTT^K{jAr(soS}76>|C;BU!s30CFnIv2?$gZDFS!zd0gcq zQ?#|SGx=Fv1RJF_8_Ed1R}M$Sk!DjCM?u;<&_}A~#tYF=Eij<~VNuaOyvCu=jG2O= zo=L(Zq{K~P2vgFX#8m%Nce2M|*J!-garFb=KZgee5MG*i>>sU$LB50J)YluLoKB{R z{q&OeqmnA+#ZO16kH1Mr3)J$%fQ`|{X7dPPkJcDKr+0qOYoHV*_iS6d(4=Pb=wSny z%r5{K{DgD%2{(&19Q3!z?|hes2}$#rG>qUz8#LpCcqj{F56EStpWpQ~Vx&Ub-o@PG z;0U#|^pB=GX@cdP`e`NNq7G$1&tcukWO(KM9IL3S6n-S8!QHt$<=C~o1Z@glRlY{)t&DB) zjuL6RiE57`Yx2B=AZrq=G1wmTMc+;370|x)XC5XK_a8sUO|vF{o8C|;hHW7`~*PG7gLV**NP7~xx1V$;wVsbOgD|{GsL~zrV=*J;fB;C7} zEXya&PxzllA~~fPF#ji%21Hi26kGLp6qP;ZpCE@om)&Y2KZK<^2PQu22^eAHq@oZ z!=Dsfb2|{otIjiXpcCCtuHEh=D^CF8=WH8&LJTaf8(|u>4 ztra4KL~U+vjuV5V?*gle&$u0FTDN?}$fss=qotpbZ>w=LxjQ=glY3v|CmL{HRoQu> zPKg58;jiUcS3{h|K--P-gSg-H$nY>ny~a6+NQ#sSfCVn{TL8JiDaQZRi@%&`aOEMaVbvAbVF}kM@H=6MY~b?hi6X? zKEz47on1NEf9r}G-NrauRSFXWn3zYw3ueo^1z2Co-lhYkP=6f#gw7ObtM;f*e6Y(u z)^@N!zF@1nA}9tZKY+W9iie3l49SRW6w2(3$Ko*3cJ2=^o&h!}YO1~ie~$mdlh!kQ z$IBaSc$D)mf6ZAC9nk)qQ`(I{m1F5vs;bRBB7loN4i^;=Z9sLEGiNXNo;^iestG=j zGPAa+AZl!P+}n3F8bfy0SkZg3dj7V zkIvo9?WG9Zq|vp&qw78Mia(sT&M4L9}?B~BZt%%!b6C}}HhCYE3ya>+W+qpooHvp+AcNjq?1>f7AS(e62!3N>o+ zssd~AV8SPe2W}xff2iYr*0n`6`8`-J%J6g-|8jBzKtpva#8*y87esfB6g1vEQ~$>7 zP()Jeui73uOKFMjsUaG0g-PRBQ+*gGj^j3Rcq4;)Jy6hk{w>n~%HQ)0`8XlkFWPY6 z>KAa|2qyGORQ}@4zZk)M8>5z)K^7c1il469AlqTej)6~#51ApmgJa$z6Z`xVi;6?T zDQ5-1Ja>(a=0Br7m)8=pu72l!rT?8EvVdc+7fuO!04SmMyA~k);6I>IL zI5ZdqpNTj*z30m0+E6AWSZ(2X7O2$AGOf)yJ~1(YLh)C!VUk(onZv0Bl48vX$Qo<< z$EPg)8M(R63#b;acLK#Wd&8DQaqh>GXz$1p~IUFj!+IX{#6vFgYR+`<5o~6FN?@f57TSW50)lS#Pg3Pq)dQFC#*%%ELfWkXSSIz?%Tj zYL$l@eUDp%9%gg!^w=mK(oUTaggs<6n>L;Sz?aKv0PPjDYJW@OPfA4ZY}wnL-NL-0 z&^EGh{ihBj@;WE^I&Z{HiJE;utF=b+jcOl38yn8ztx~PJXnxeLcm?MQJzu3}QSZn* z;;T)bee>yTF=B6pa^ELyUwdCRO|(QHu_It=soBlrcx36plo|~N?LO4SlRfi!hD%AL z_fCm2Um= zx|+z$w~1AJbkE$wYVF8d7XCv=^H*}TRf>8!eFmuAgNW?SCsLaYuq;Cs2bTTDjB`%a zPuVgq#bDAjofDork$ho(@ywY(2oUKUMy?uV&b%rqEO2ZzGG*1{`36b3oH2Z`@5sDV zJ~uj=qj&2LBh|FTolI%iyGqM~%QVib!snN1*eTD3%GU{N552O7czKWjk^F2{uQPLO zyRj6!!F(OoSp*Q&-kt|d=diEn2q9p&O6O2Fi;erfDo8|Vu0?mR_s%O)wR-*do~=e9 zn5fBk8c8+)0nQKOB^^JG9FjK?x|h(&xVBp5Wq5x|0o=vtFrwWfW)ZiUqo`!psC;9u zUdSmvP$c1o`y1%bH-=^f*XewHef^_C=!XQrfce7-Wi%c$@uW&Z`BX*W!z}k4%JcDU z6$>EB+KN_KGo8JgeherZ?*S81_Hxfxj3QsZO>E$^)dbkD;NDatg3p{Udn-yrGF9sE z5R)`@;3F#62lzye-tyG??1io1_`o0Q6}AVgf<@j*Bt=BLsc8J5dNsj3Bq<~D3jKuD zfCLh&b^+AIbNgLsp6St1CdRDkaj|3B-J`p)P)Dj5%^lsyL|*f3GKEl0NCf=F$dkB{ zu>F!lK%xz}Azp=GSsw6~Keub>&w!yF$v$soBH;=Br{Jwt}lY6vt_4J`OQ7gdm^cL&5hhllJBWO7T<3+8dB5R zadxhddGjJGUsV^uEI$W-jElGRlk2sD}L`p?R1nI#-k551FDTO@2;T>Y3W&@F@S&TEy~CksP1t;&n(FeFsf zlhwY;J@d@<(Y=8lCm)IFnR*7Sa;hO2sr`(5^RsO$_uNXxl&A%BcvZePVkdnh@8?^M zMzO{_ICKsZR=!~5glroNXb)G_Wx?#MI-xsUkS3%z@oy~CBTnDCRc5ItN_NhYN!WyW zPim;(XO@s5!$M@gRr>% zDi40tW2}o>^OjLxYF1hiOdOCizMTK1+14X!&#;=#yZtR2^L)!xM+iI)p7qeRn^waA zNibp3%>gV^kht2wwR4YgoaBU)fF+f~Mv3LHDp5~c*b>>Xb52gMJ6UEX2WGwZ%?V?tw~bY@ zk?kq({;2p#JhqEWg72I8GT2du0z7`E^2Z;B>p zaD-B$+}tpsxuM?y^|*c_YiF6&1j<`I??Z0}!*gln4qqc~EGbe>UrNfReV~mk9Wq&( z!|qL&{k*YaCbe+NC&W8mZ^e!I{;sXo1j#ij>wgs?_F^XyOmoM!x7Dx1)OzDH@oyHg zZY6T-HguZq@#F0YeUu|`C9FR=TXZmn!0td_kkNa^x{+g?m?{&c$WgYcn#6*Al7g<< zlW(iDOwe`LAVza9?IRc2`g1c&gcG8LcytP`+d9VNWDE2Eti=<0jqN{!F@yrC zA}Qm=WNa;YXg1p~*8sRtlKuYk zUd3dw@P>Kwo1;zDz0FEC_FnB_`L9|Q{rKmk_I~W!?EuHDg>-e@t;C;N(u3}8)Y9Fw ziZT^*jN{TWO9hdBa;t%Ru@D0fe5mP=uiW-*Sc`fzd7r#lHp0b{G@wphyHVg_Qn3!> zZ03UKwEFIlz;^D~sAu(SiMgfXnhC^WkmkRBKUm!3 z($*i4G^?P^NYO;;bu(P-%OWPvqpDm1JZZSs`jQq%Al-`$u$oYDSOBR<3 zQ5l@QCEsQvPVLk3`Ec^$FBC@$Z=)&mh!~0eGuL4s9-t}k-il(25SSB8a?;;#0q4&; zBUf~f+vD-2fu|R~OJ?Bl@eLk4?MP;|QS_h$^8Nr#ePcJ~eE5d3Oi;BF6nS6$GW}@Z zI|7Mvu33_U+eOguc)22+PT}+DuwW$RV?LepGD>M9i>J{p<%k=X_zE~N(b!9lwes*z z-+Phxo@*XWH>pw1?|#e8)vK^ZRCqo5pvfJIq~8yaxd% z^E8RLLY(a*bI&7CZmEf&=Y1n$n=0apNxA*$inBWcVYa4Iv*zJYovX;AR+!%UHdPs= zf!;^e6Pw7#Ru79Pt)530^Q7o-%8gvN^;htAUv~9ryJqT6qtIqmn=21z%D@=re?DtE zjY-y?_8PzuQoW)HUq^U47u-ZiBfl?2E}m&tO_NM*O3#~slgSn9mH0Eeu8e2CYz(_0 zb5}XIYth`u|I}l0r$!TzY+|-QxH9k2O+pmP?ty1@AG;ic94OPV4ZQfw5?V#e8P#)xhk6n0;=wO!H4ABo%G}M{?Fx!rHcO5)W%n@EzOw`VgZ6 z^dw1D8#8`+dUJfTY}P6wFG@mhJ*JyV3#uOluT#bm2k48G+;K0Yrr{g7I5(%cKcc27zl>St zbDM7*{$}`IC_&wjO(Bn<2a{YUL|oqPfs`JZa3*zFG!(@?9h_Y`_7822@29GNmef+K zC@q)>tAskRqv+YSEp!SFw_zfer$07Y>Sx#7ZP~w!T=%xQGi&*&K#xXHtbCWra}>pM zRd)O-06^5~+8)EkfQOZu?Sl$6?ni@h#PyC0V~R;DZKj>b3?8EkI`FvH1{_*iRyJq5 zU66)?_8~{HC)7W@ZX#qL)Ret6Rv`kjPMg~5} z!>YR{7yN!GCh2);>vuTAcI$P2r(NjbtFcw}$QDKl2KaDlZmTBJBVQrI1P8afc>gLTQ zq<|~;Onv!}bfq&FPuCqjzT9(lwA`x0ke71IsVLB9~2#y4a+*G1csTE{x;pn;*-nv-0c#XYt=~(( zOz449?RFPBBcEU5#kI7DT%TH}p$ktDyj!+krj>)E0?MrjXe6);0Tc33UFe?DH-ns+ z$~Lh&Q|9wnrZYLS1b13-*O);o59ft{$?^J*7D&wDb4$HZlKbrq+d-xoRpX^aw;&q= zd-H-+EI)6&FD+ZcYeupVurDG`^3K~MR#IZZm{y`WSbau z-jn7{(BHi6B}cWGE>*x7sJ zh?E$n$xc!Be8r+%SMg{z@m0u>G+OEguce-vK{i~r;4nB4$oSFI(+BSnd3!r6z-U{N z0P`cE$FiD%HrPl|$~Id;38JWzfAJ&tFMfo`nMb8-rZNeQ6EnKg875LRg#8zK^t z^lvvF+;osm)twGX+t>OWlok|+K@5%W2JZ1w5cd*AwJZ{6LWauzmSjS{Di8JB2m^Rz z23r{`W6kssbx z|DjCd%BlfcTBv`og%dEZ&+C*I>YkIqKrkTJ^5|t_u@6UwIiSysGctwYTodj3LMyR+ zmCbEh$%a<}t*>4-k7b^zc|&Awym&WykP6P6AMl{4SN1CjsT#``V&QUQiFc63b*%5W39MEn-lhtGi zfbV@dv!=y9bL>hJ%bBZq z>&f-?ylpPA5u<~2Ob@jrF;e4dygyS@>%esy{dGxJk(S!KQaR~K2ADQ7I8gf;f`UQe zgR7FS)3&06qEf*vLw7j{@+MF2NJTBObRcpqCoiva(bhx-! zKCG;rd!f>Zcz+p^D>559>d(3Tqh|IGAgj-2p~WM_$=L$k8bt#!)b_z-5|KJnZ4yC$ zjJEnV#Q7Il%IhG#)V=28mF?%+<4E)WhT#!7D3SM-AGU`mk3ah*5{b+$A#hC7i8Lq& z0%LUwIbBdw9f=Lq_ic_M>(R~eM03&G<|=Q9=hsJ82t6;`^OlaAl?lCeVhl%UN4v<6 zBwKLVTm%2sWYD^%VB*PvvpxngC>RLodxj4D#QyGltS>FSo68~)|9-ufNnwte`fe`X*|9&-w;N^45SF5A+*$_bpgjX*87j}Db9@P=q z*n1;%eooJov%7Ltw-6F}tIt~hpzc&iNK35_0Bv&GOs*%<2ascj4H~+pE`g--;k_?gbZk z@NauO?@2dBx$7FQI>^shQJA`6UIS}RE4NIwJl{4?TZt3cmX~<|*#AV+iRovusp)_l zB!XPOMt%OMU+^Ctu4b2jD2;f;Z|CS)d--AhVL}HFrC*c+EpbF!|JS5|)<+L5Ztz^c zTi`cH=|VLu3WOOiLW{T?>CdA>+{pCd7JNI9-j`Z47lzuyi{8g+FY&4IF6 zl%`$6?mB#n1z@?~#8>%4*#tb2yjb|lAm`rRP_CBqrLgC^Az@#D$8DTTB)S%7O|@~x zcG@oYph4yu$soAv+1}wK5Ows<_Ltb)VK>ssR%r6QjfVM2 zG{)`t5<+vvyTx%Xh&np`I@JAjM6Sa&5C_sx=QFIXwjA6qytPM~Po>#9`V=KvP%=fB zkX5)9As<<8O)t&BWTpltmeGc}c<$Y7%Fs+*7$yHlkISvC4&8vlm~K@`@+%UJ?{DfbJo;Z;mxVw?x!)7X~? z@NZZ&z9E~Dx39x1C!n<7#F5xSjp#+RVO52cuh^-!cYLDr!-ubLU!06fS`S{tl+-de z$mV?q%#^OCE|ZwK(a;9L@)e*gWqr~D+YL{UJyu*Uaf7u2X{XWr*YDZagp>22G9S5p z0>FfR)54v;S8Ow1@~?1yPBSnxOa@!_yf@eo%$U5LfYsLH*&Dq@y^ScPaOl@syz({E zu%}rNnygcTKkiD(1h0RIsRhpHbv?yqz!A=)P&V)U^7K=0h|HCP6op^NIRNjoFGcEo4Vv0e3wv*^hTMLa=4NW}34TH>PIf zhGB*3MAQuxud9N?eBZB_k#CS|>8~qj*(B?dvvgY=M#?DX^#SvG2}jqRtP zjF$BwuOLxs^pHzkP1h6YZUv;c=Z1R5b7F>>-NJH~0@X~YGqmrH+FFPsohRJSU+LPbXK?R|ljlvO#phF1< zS0*A?=*rDs-bQQcIA#fDKJq`Yr)!+$u;%*2?qV&#iM_QEqp@5_`8h%8F5z+^XkHg|*EDN`(Yd8= zxR3luK(Ks3ZbgL1Xz?zr2t2o9P~%6+IJfvYSK;2qkuoXt6nCe8Auwk<38fGi4o~q= zM~|%g%@QrWN?L^-jGdBYmUXmdBA#+PSR}pQn1g5S1UfWFced8&;I5n1NNdwL9Gn(C zd4oXUA4p)Pgm=hNe_^L|s3nQf;V%K6Pn(@zPF9HtsqW^{y%w-RqMP#j}_4{@KToou|>K zds)I%D1{|n>fN==)QX`mw_Qreh+oZ25d zPcnW?ZCFM(h`lX-b`cjT6#_hmj4LOuvw#&(%>jE=2h1AEb9DbyRZ{BOZXN5pMZpKq z6iw?&HRRONI{e`Lru6(BQafJfN13kN$B)OAvT8j-V0dO1CU`UxCNfz_y(c zH_fxAH-Z;}koGoj#g}3d_0k0AI;E_0at6SeO}hp& z)l6#?^BNT}!O@yr90R7?GVnPU81@ZM(ME8#zS-K`l-W+Ho%v?abD%!<@1m~5{>8xB z+HO^bhVa_PXM-+wm3l*LfHN^X`nFSQnOx8ox&9Q$ihr>@qQ0~og`$%t(_c#INCSfn zqKs-K7%^9+b=-}3d1}1YTR*zl$cOE(v0Hkasu~Uii zb%`F|7Fi9lYW#p3X{`b3UK6E{VSe_fR3PM77|e0+5J34i(i z1+?=kIu{My+q}*lL(jKE&MHl-F%h`0Y`DJpo3UP#3-2##1zl2b`L9KiT#sNTgnmE( z8^)AP7th;e?V| zp@hK{PoQe1apg;S6cFSJa@wW&3Dh>E-rh0I=%0RCjb@88%!#KbEtwF%b?bM#c8xf(hthus>*^Q(Mo;0JEde22oJzD4% z2B^o_4B9j)*~9+6fOk69FI;>H$O{eZrUdytwUkk^^v{r~KINVCXP6o;(!SU-o8@X= zh*WfJ*z-UuUl_iJGaFO*zL7WN)n;f*i0k+>eCSpyZ9i3fvzp}07mERQgKDW7^1hph z+%Z)wXrms$W4#4iu{x0z3qGT5BEe5-LiR>&=eBf&y(0lZVHM~OBsi1RlDS?;^K4m4-zmh^?FXNxTb=A zT4YNmywNO(>7iY5_ru;a?-xZq?5dTuQ{X*w%#$F_6N`J^z0>a84wEFu#-bM6NwIH- z*g`KOBtq5MW~Lwun@UKD&M6Y02*8)N&kumEO^{(TS%P{Lp$A?FJOWG+?-@~Wi#vXp?1M>g*WEqM;ehBKvC0-AH=?_O zVF&D#W4bob6hBV84Z1yQw(ywV%xGTB)mfU#e64Et%o3)uOe~sb$$5$e`9Vu1f7L0j z(Qcz8qu_qJ+WkegvZ^6w=NVv$r@hY?ehFYNx zXq(kd7?zC+ZIkInFh?IHPcNx$CU*#Tdqcjuo40h`tW{c@bYn>|xxK$t&8uEw34K{A z=MNRElnyGJ^p+(T=vf~@e6}mC&~rsPkC9an?o3=KIhm@7%&Et>pp);QXlR2LZu)lU1&Q?8Q2lSWxGDmof zf4M-6hA%$C zvlV2SEbcp;k;DdtX6Jc_KTFN+e7X^lg%z<6mT?tE+oa@Sfb&m2mp=_U~sA$)&A$R!~Qq z%hS{6-OuFB>&z149INP`xEaivQP^9K~yyuS1CUTwH%8=xN zc!#NDy@!1^@WG^AI3*=JVh_-9rTa_bfKL-I;qNP2*FGdlxezz^T6f z&bYYOx^^y|=*$Bla@3Ip0tB2k<16rJct}MI(+%%el*fC6QG9OZh#qHhwtqSybeP_uWr=)__IBsWBRM>_i+&o=%UYoNRH|06A?Nzu`p(Yq8JDfEZ!;JKx;uAEzzjaG zH9JBW47k<4VQU<-yd<1>fJT5ooEucDK(+;nXZc)_1eEOrX_qy>s0jyAn~Yx4zx?Z- z7FG;LfUD7v)@e+*`5_)?J;MWS1#~lkK_<_y>XZJyhk_V=PLT;aVk)bt(Q6Lw$OD?= z4th^Me(zJe57*(3*8L*_U{nz3NdA{N{{PSaUz3AiSsd})+?>sl5zyTLZa8wj`U?ZV zUvxruO8z1a2EgjqMQtN2EL>Y(548Spvr$^+RNLCImof3=>HN8QIfXh#-`38~Me|JZ z!5;C13b#t4sNgMrlj48+FbD3T*(u>Q`wTQRG*BpXdS(X5gn2w=%&*dfG3}2^B-^u6 zW;jr|5rTyu2K0$nc94etoAuEX+#z z(7~Zh^D)q`XRqk}i<^FZr7iU(UVn9cee9}WZSI5r=|wHhT6@HOCG(C#-hal3(N`(6 z*Uf2fZ_mrK0J1n23q(`znO{{kD9F;=x)Ll8v?&U%0KIaiaS$*-DJ}Mne-89uXNii_aylvLR_2%u&De`RaQJW=06CZ~5-uiv!5E z`zI?UyySwMD35y{ATpW0*awH>5=)f&O@Qtc8wn;uJLBQhKHN=O8X6y8-%Q9Kp?;eO zp3G0xeC+<>g}tIQ*aDIZZqR2j(EdoW3E0LqlMf2Oz(WOhG2FrOOdBXw6b&t{)y+)$ zKSQ6ZE9f!-gq7u%+}^77y#E2Bgh_3jIN3g*_;uTd7V6-xuCA!4sM!tt6D+p5sR?Rq zj8Kfq^(Me@qlc8wlf)hLYNW?Lw5kFfrPCK(%)hR{LZ*`UUO%2XIxc154XZ7eXk7n2 zvA@j7Aj!(H7ucTiKTmuJCFY>?vw!xC*Jm;ho*JkB zzM5THj~xQ+yF>v~x(X2Ko|#>#p0y+U*CTc++_iOeJ~F9Of_ibp$O6K%4w%tnspy=M ztq9Tbmi8i_hBhoaQokM3nC-ruYz=bB=Pa_O@HKcUtxB>=ok?r zwjw#LY)QvO{-L9n1s@*;=W;YC zZ?!osO)Irm8$X>72Vu=q-_?)Xvy3Q^@**7*ge44mXH1Uf|09e$aPryUq0d+uvh6@C zZj^4>^?!KbCB#l5o*3w&lz@bQ)zN<@TrLW5pfC+xz04o5j+d{g17yIsb4#l(Y0ud}j9FQZ;`R+_KVe_Ro!V#qI6pJg)(AkZ9Fe6;(YqL=ED) z;E`E$VGrf=O7zXmVM;|^WOVnlN3*hQg-b_EeuRZxjV#m+C8Zx*9dFpY6M6XaP3MzH z&B*AeD0|^V37?-_j7@-_zy;}o~E>m?h8U>a!_U`s3hp|e& zK9-Dj7atKydR$YJ!1GizBEpuoJ=rAUtMa5xF-mLl)b31?W5I%}@-zh`RwKc}s#Ja1 zBz6h>3360@V=*~QLHtXG|H_hwWh5C+D#0q{X@yg|SZ4_}n0 z^x^uNkLObmInL5S0%Szy3{li4XgMm^|KsJqenl=W*W8aXflKNgn|M z+?y`H9+ys7dt}ZghNC6_qk(GW@x*|&k*M6_bWeS~`?o;z3f}XB0lT)kwKdW7(K4Oa zq(QsrUQ4Cxm8L;AU@!kXY{Z>$;e0s2BuYR^uO5#*pZyo$|L#<-uN{l1j&+ow(s4Xe-q5r|1bR&bbJ&HbTUTU z+*tVqnTv4-fcl)7uV0J59DNxZ`|cK$P^U`zk6elsIv0C`isEQ}EileesPm;IGNi`r zQO}Y_v}T2$-yVmSmh>$B%nQzd2MM=f>g$(5+@*z&f|HVxfNB+>wRXpb(CC6z0_%YP zx`A(qrKDV$cWL75SI(J-9Q!#-1~}OYX^AX-5fVO?W=&Nr>$>j zXb5}#^ta7|MH>N)6-w4s z?k6SoPzlty=u`{y=kluT z=F(w)*jbV*O5|Ic<)_KTcdR%P^ycU!KKYMxDz3UPB{u#}Ih} zIBlBUGKBFgV5wuP$|$4kmUSKQs%`Ut<$)*+u2Tz@ZoE8?dnE}oFNoBp|Httg5B{>) zyHgonSgBcTE5TGRjL;C}OFuo);vj{6j|--b$p0wJ{Kx5heBvlW*ZvdPs|YMX?TeOn zvGrB4N|hE|NZ1g)j;W|MO&sx;w?IJY*Y(owFnlG~x!pR~+j}4p#0eeKvgSXXU4G4W z#=2q^o6Z>C)$sHQA#=&y$bXiWB3k9O*PI!z;YhC0&ko-|_f|XU=yx3bS(9wYDmeUH z^he!lE@iL#b8lYypDq~IRD&;jU*oVG3QtU_2Pjx63vc{7a9)i6#ezA>q|QFqTk#aw zN+fk)Gw*dVf#^q#yX(6J^>1bvQjS~<-RHKA44wVX`}Zql z%h}>eHW<=)5C01OIdOHR@-2YmMyv`m_33ONXNC!a?LR94=P$7SQC z-Smwg1HS3MHleu02J?BS+5Tgb`<)9wWCa>5b(@YFnQ(B;Z=WB1H)dH=yEQh+sqX%u zUxZlb_J3Bel|l2!Z>jL7!4)twlW|&dxAakK*vg?HKP<+@=fOcamVF3vGc zQSG^^-po>Hn^@J(XL~D(yACn>DS$Wl_dbEf<%P3OX14^S_@=lu`k8kc*sIU(Cf~>g zGA#c>E7Yi65;-gCeHiyXQBx(kU||_O>MQo67$1m6JfZpLw5RPcYP45VA5GPyp5v|f zot8eeTz-D)1?ij&Gw+RE#=&5G3;+C^nE zmCsM7Yki(}_Kwo`QUxa6f6v;6ajTkRjNUp)KH7(AZsM%>zS8$DXq2pSIYLB!JY4U2 zyw{s`WU8fD>HGZY6n<6^yOx z#w`66x1E|@fBJ4Mwja5;&lG-oJW)S!?VtFDtcU4Yf7mkRRi4bpsttBiM|BD=j$)mk zqZw)2cZzZXaRc9chpC@s1^SGh#wwC16=cNo16h-hv=uUwhPyvg1F3PNWKXd^9(}~B z591V~*$)G_k2g1RHv;$$j#|b;yh}LT`pEZLZ~MEO?XBVH=MRQ~^~Oe!uGJ>G@g|)1 zeyJKpF*JTFeXciSxxbg<{!)=APB+c)Q+l$p)nwojmV8o%Zq9eJpfyk;O*33tp_Hd^QzcxP{&8hSyth{}+4j z0o8Q+wTq&TG9t<-ARL#0IJA4BdvUS`LR#K26ClQ6Tn{05|NEr+htmKPL zy_%EkYr41-l%3pBa7~x};!C+|?vkx~(3fv!O!zS?-6XVm2QCPDP!P=|InO-D_iOdf zd+~UG4RHa7+fs5-%p862hk^FxG=E6F#iXn=qaVE2rjWLGRkX`}uI24bS#N(8 zQjxn~cgT#YzevTti=23z-@)d6Vo7Me1_B$&OKjm2%l%_?LUyAc_2PV10e7Ki2%OPO zJYYUqlDdsVb(@<~jP}>IR!$-`_vXC7?LVlg5FDwnI5Dxyx`qsz7iizr7U@&k6@$E9V zu=U8T)$p9F?V9=J(Z6xUG*sf17^X~K>qRoSlSs@g8h$&fv>p6Wtdg>P#;@bRVt6GX zTs*DIm!SVUFbQAXCNA)Co}8f@#-`H%V_0s8k&XXHbNJbk?zrFXkrIj5c~l_JBcppOZ#geex#h;GV9*R=cd|=-ITE z9n|F|gpr-vG=LcOB3R2=ZKso;`4ks$zASH%C?8P@9Ctkjc2n2{vG=PRTBp;BtAcAk zCNR7j;xB&Z=)?0r0B!OJy$Q)jxVUzQ_^Ah$wTAl#_=)Q}=}rRg!Py>`(tFjQwa9O; zde%a|`CMgK?gJF|q95w*+geXHyHsek%~!STmI={dq9FW45+y)$As@*N;w08y7^{$2UgVIo{aM6#%v@ zpcc+{NqaMb)3k-I(Nu#_@rwCH{TcpMc#4U1DaF*QJ@s;Vrtsq-gu3X$W<#i@ zXn)zavCWoJw_;_2EAY2xWv9(6WqXjGQ$b6>qO4gip*R@Gu9ExM7(-iXgXuRIUqd(x z%ii}uR_{xY6p^h|<(yi=cHwA%6w>py<;wfDl=4jC$Zlu!`zH^5&*g-r7T?2X+k0I3 z=fL3ed?BynG22|eZYFrW@|VpxbLv07z*$r8y5Iix+IyHw-~;?%?wt@PXX4E>=D3x} zI>tbEp>%K3g0Nt!*GDl07Y#@o7;f%zImse?$k+1yjOM)P9_iplHtn#NikrF+uClxE14UiMlD=ZXHjsv?L@#t z#WZ{@S!$#pdPa>IdLFqS^b9K}8{-^*T0BL1t4vb0+<9QYjhuq~>+d#xWtV#c%?d#@ zZO5&@8-z_Fa|Gn(n@3So_}K5m)do1ypq?w zHAsaDLl3OOg{mb3M@My)X%m#DdEobNzo`WmB*pvqnLQYULh5%*5$xShZeR`1g&!Cf zec?K_pvjoJ)25GCc7*nSvDtVw!2*_LNm-KKW&f}Jv~YO|X@vz3E)9WHzk8Z^mX%0F zQ00-R9@r{~I-N?)Rx}Kv7B&{yU`q>Sk$IDM>d)0h*2=9EZyc3qP&&$7*F6Vy@z5R9 z@4junF~Odb>$ouuy}*{lLnNs7Yw&$kLzmrOw4L&j!)GDY`ON`8tVSd$$}VNB?nVK<^lSzuwWtZJf`>bU~0v zQoXIg*CI1P*ovVY?#@GWwiLj3)IHeXPyhyU$Cum z6`wdcs|TEz6k>J@?NX`fABFXlLFfSVJtXDtIkfa_&g#$WNBY#xKp6s-B{vj)XD6TA z1Gwm5-lb)|2UyIhaTssueY2?{mJy+8Qb%Dmsx^XRcjpx(`!a>!&g4$3B2EC3E8>HL ztD;(q2YX<(Hquc4y+6xvG0Y0`>s`lkgsbpm$Ygu%!4I^w6+o zGs^5W<`EKjP_RuvG7cn^Mq|v9?95n&^XPmuTyPshs5Hbv5T-T#>ytQK&_>EWn^DQV ztc?suWO}22#=LG)V4~N) z9Qw}1_HFm(@}%;qI%thv68(AEE(YZGi}cV5k4kUk%_zx??{v8=w&t(VGQ}*vtbx1N zrAMn9h3;lwJ&Ha98_=Yxv2BIO--n!oEx&|TNE#S#tEi%@wsTNU(30sa->sW+`<6Tt z(z8$I#3)e*&GV@#pno$hYhN0%-;(mxCXU$75=%^tIS6A(dQ8hWlCCbSQ*)Wb;CMT# z;p?oouz{NO#jAUdTsN|sCD<4>kkc236}+RagvK=wIMH}cXWWI$NhcB$(OeMv2b9E= z>hB+q43@>_$*+;zqMscb>EvQ$Ok4JOu$6S6;5AtXob`>>lrhmEIjCPJlO>c}<%P%j zVuoM+qf1ru$L~&iv(=AQ%Mpf0n=NM16uAPLt7$f#pv3*IVWI7Trs1uyal-yl$=)=! zovEzc8%729@w%)=v){s;HdAW2M5%I)gOFYiP3xAn6rIq1&JHDvOQ>3A*}q7!RXs4cSr%uO zeaXbvQXTA}DPdMN>}Tuwo>0nm0`ar>^^m@Ads#Hpz@ab1epJK6P0gBl>m3{-81M3( z$0fU~Iqs+MYJ(sW7Z*gVm2xx+Y`wG56C3%E$h(m>jS*mym0RoCUhkTXKaG*aLW^v! zZ{?em-pVB(Suxl>j~&WQ2Ga!UaOn;Z%rp(*{XfoU!D`S5RcfKkN^II~Tppox{#HsC%=1s4T-*VMu_rnNUY_q;2+nY56v|EL- zP+qn<-#{S3pB5s0+LgrSBsa6lm3P}36g7l4 zQ*OxTdxO{|_Z0e^>_|b!tBF|R8RZ34ac5uaZ8#2wM@Ss(rM-^N|F%`IyJ0#SiH>be zlgIVw@o2d9zm^jdmd+J%?GLCBJ`!2O&Ez?i2uZox+8r6J%EnA0qNr1ebKPKxDTvFO zSdK)fL#V6w^EfAV!dtCVZ)(THswM{ini(OtRfS|w*#-=XUbwtBB-x|8%^BVzGU+Qu_-pC)ne8H9%0PMI=H&V1(>Ju}fEtdi}6 zj;rqdtq{4v_!cu+@71ISRh1wLzp&d)lPu5QVQFt7=oZ>1V<_@mVFz6pE7^1=wyM`! z=+{*6N8UtiHb*Kl@T>W44!FWp;^#(*t;Vz4-3SoM5eIAB&&ysB7uiaOc>qo5v> zfak9_l283CZN~=9&#o~$v%d?Ez%2e*JX4+ zQ(r}~fe7jQC|YhW76y~6yecj8)-CPBuko)`h0g|57Q|$i;Tx%P#E2K)Cq{F&!p&s| zN(3I{h0~o>7~j=R<-Fo$_x+hODqdd-Hxqb@7TA=0YNa&N9nvVjAZYzgaxZs^G>>~$ z9nx9G@mr+dI#N+R@CI?WG}2o4zVEPlAY~PQqW2ph&R-rA>fmfV$#4jSn8{}nw#miMwzp+ zlU+la))BNhO2-&#`2%z(9i6qA5!Xj!1GsFA=S#K)$g>oq0nwKM%qe>!0 z(muClIyTDa#3FG z3e&>mfPAQ_R-3C%yEuxg%sc^_yEaqZknZUiqk$-Q>p@(35(6-vL8$(~EO)Erq9z>E zdM74X|Mv;+wjt@pXsV)FD`={jI21n#4AKAW-7wra81q7RJicR($Z3p z>CE9(=bKHm#Osa~^#J#|L7oNnPZ-nll*_wL(~DbP$7})cB6FqdqxgqD$D`>tYa6Z#j3;JU;nPYSWFcI!L#P z42-~qeYC`g_GO3#Y7enf`8*f1pOwkx+ofmV#MyoDtnY+xnQo8mi{=0*qx9Pq-P}}* zU~tN!Jaa7LUZ9K=*2hRa*!!D0a8izUe6oJTzU$(OvleuxuGBG$TEJLj3+)Eb&;lFc zfaS+ACdm#KkJtJjRb5ejVxs`$XRRkqK)zI)`9|0A{)fDOEzoNzOVX)c%@QCYryU5M zb}%a8SkJ2BfGFOspy`=AwjRgC-QItW;F<-d*VGla<&~PO$mc&b^u_o5={ea=aJ+w= zhv)ZrepO~QiMAGO;Q7_*q1p>qepXI>aoA8Tw}BX-IfOsy{;1xpFGl zdtK03lgnI#{$^9u!A`x48*=oH1Xw^TIb3G89L4*f5 z!D>#F%_w5eg!I&|7?AVGfA>`8y{_7IvS4aYMgbUaJ$y_Yxp|KZN7ZWSx;ZdES`bX< z^RAwJP?TypEc0%KZuT1bNn^D_PGsI)i<{4184>!kq3e5>Dtg-gMsW<@2uo8VCN26= z>*dV%ordY!^XArRJMmHe%ypgNc2$qhQO3>G+`Jyres#SLUTWF|@EDwe!F9p{NR|w& zbjHW=K3plYLEi`@`_l_1nt99}<%8N1Y2G_~0m*)L8l6o!mK;~Ar=ZRG=BXDH^3p3Z zS%H0JVewIv&qz;a$5w3W@Wc?$t1F~=2t-$5Rbzx8m<%Cc6w<`9pT^AWc(0#6kZ1G;igCOLC}ScU`mm^t>xwFPLZw zHJjgwb&a9|K^UwCaUZRDQ?X+KBGwFJ?#)I9Vk=hgZ?}t{DaQM`Y*ge($XNH8lIcuF z(4qJPpS0WX38QyeS;|XtK*7cr%34wKYngwc7QLwMKly5XZuZI&{*jX{NP1RtkP6wt zCTc72eRT^i8yJ)d)a97~zdxa;Zcs%a9I;0^tz==QsSe${x8CTT_VE(ee#4xwzv!by zWk=JSuCA^#BUckm&;pIudJ~^vw{o^7<-+aMm}?}AzQ2Fku;U%vd|*P33;Ti^zVB@5 z1tb%tPwV!?^Wy!~leA0S`){O|=l~Rmw)`fu%&cUA%bq)MqiH)FkIO1as~^8oz03q6 zmWU{SI)YU^x8jjh6!Yq5-}!3E)-zG9R>adO2jLcyrtt|r*ISnj;OfVTR#c{@jcLY6 zV89$xZs7+38)hFAtD;`MAQL1!Ucg_x`L!OB8DJ2G37PKdu_d45ydbdDYRdqA!d4#B z+nVO-M*-IFE!O+c7QrU0;UpZfuA|L@HQXhlXY4$J=yBGzfR^Fv`W@?cUmB@&Uodn) z_giU{;ST)+fn2Xx7%QgQ>I1D~kvG|iXFDpUG77Cx0`BE+!6ImF4v;TAV9LEy*gFP5 z<`$Z1CjSB*mFo{;$cU}Mq%0<{ ztoUy?0SG`mbJr^_`h*L^@Tq$9&zJpOYRiNKu87KQjDMg+8Aa^QSSPLk06vEyK&sLs zx-4mBQ7HGGeA+N39IuqW!~{%#X+FhL1ETd*)Hb+7pw#GRKp;ZP!lEI7bbJtrK}00? z{78#GB89ARNe0#8)*6(?3mt>b1yM#pf}(TkTXm$irxs?OL9u=Ph30L(^&Gsx9&=RB0HmDy8A8v}W9!P* z6b-f*IQ{(uQlvj|!!`E0y>n0F&IK*+3eOsEd->$t85J|Jj8CbOD|)awgKIwL?ab#f zZ#mVZ%8jb!yE|VoJgoar_j;J$!4BE!W{j&Lpxx@==yOnieMkb|BN;lQDn5X^QO!QF ztPMgF=GJ!u@@d=cJmU4cqURaOT%TKE9`WqhULv59jaqHh(tSbPe%`LN1WGsl_J0zW z8YgEMuweqmk@(M&x5_e||LlwNzu{6Tz~Z~xKM5}nuQ}LR+s%)L)-jmTJBfy?s124W zXBA3RREd`=PYf%p>xUPFJ>?t-@LD^DN$Z4%`{z)L=LA$LxO_EAM+H1s4w4LmcAfRM zTxl4&Jt^y?uwROzsjl+&>V8@cd+7P?8g4Hghn8FKo35-rIwfzeA)lzQ>CAPkhzr0r zi-q2WF9Q;X_6r-*KSVb>!B}g>h69wQwqcExByJ?hP!E>LR)0?`R!|f-G*F&%?yRYq zf<@$vwKE&Ba-d=C%VIVCBJVf@Klx+A^*s40y=<-wz}#h|15R#fU$Aa7O+Z}Dt@qXso2Kg;(RgN?+yo@(a}q8 zxXh<0SI`*${wc53f>&=5{#m>gpB~2o=EQExO-oJO#@4QLf|pakr?DKGz+Y{^823k3#Y2*XZClCXOQPN5>?SWc@C{Z@>p~Iz|I? z0#aGoSoK(}G|F3ZTmAUiC4>T=gtIiiGb&dum3!Q};PmYB_3f&zm+v!M^p!9*wo{R% zm6^l&=x8b+!uH0N0nle(QbSSFEmq?hxm`8Ezs~k-&C7aFU+x`(JqLglsycZMkpicM zjU%h^1HnkzCt_5u)+K5$$Pizm8J{|=OR=SxWDWys!8usUHBB0@+qkn_A?bsmFXD1? zr8W?8RtMPSbQGt|=z|$fuVY}IIiKdM zwe#g3?&6j62Kp40pK8H<59ltSn(~eE;#SD~~QC zXVY|HJ6ad$1VukIh#f$ri;OT>6mZ*!h@9o;Iy(w#lXF@5Ub9hD4?yxJ9WH!PXwNV3 znzHBccW>v5wZt45VnsSKifYxD^xO}s_Cjr z=osR^RyDm)wL)3R6cyf6B7m6MorZ?Vl|i!G_)#D=ro`q?Y?0n(Lod~4vSIfKBa!SI7K!Xc=D8dCe3ZL?H|NDLd1KK51iQt;tkGz?+|E zbk6{#B>W-dsuA0;#!v?Pz)+9lkBOWqII3;0qi&9IKlwZD3db_YY!MRfCEzJo=(al# zD^zU-b{{{mT+Z-6ngnG#-te4PmM-sRE~$?yYHmz`7bH{ec# z(s3M-WJJCR3X_)>W|S+9(1w6`czp&7IU}In)a)MjHLxz&en305YV2;&AD2om`9jt^ z?Sao1a{48gNss#RU87U6T6}+QegAT2HwqXkeMP(LBRSF6z&s1p3ixkDbUqTwt@^?3 zO}#ED=Xxv-5Ykhc*1SFzmi`&bZ)m6dhrveue)aRifF&VMadKucZyhypR=QkpU&qRA zaGsu zojG!-mGFRtd^{KTLbK=+sfh4OuWo@-s_l&N)$PVAZUVD9RAp*Lt>(}{xlRrExsVWSErQNo^J&hbh=Qwp4_(`e>krnq^)Sf$;w@%O4hnAb6AGe* zNU>R5H$$=ZI2ZjG{MrT%=eHz!9NSC-3Jz>!>pAzoe|;S+Yps)XxXn!DB1hmH>(lx3 zgZ?tx=%uFff~O?Ql^IN9?aU%w@-0MAYHH?xzTtiTK^OQ!JQlrMTd)Cu8~iD>O=#Ij0ZUUG(>4@)V&2M(Iv&^cLU601DKR9hlNdM(vW+k6{K z8}7l~TrY#jx>+6JHMuiWHaBEZpe}z^cvD}-F+!WwvW=E|3k32z6KZnhmkJM(Vl^O^ zjpuSPgvR(W=sI$#8FdxaFpv^EthdmzF~7}UXO4s*o+x@^nQ6GtzK+Pux{w*ca4qk? z8={HTRzy?*+JC9v#9%JY-gB&Mp+VbmMwLThxW;3qvMfEkdN-zG?DBz`Ny&G8+};yn znj|=DlKd9e`x;`XoA2rY2eKQD&WCir1SJ3wnBw0+;lZ(Sadn`v=m}$Aat{FHasx{n zWtG^&J#-d83+_@C#nAOH1M?l>T0$lg>!oQbNEghOwNtp?Dxt*+{?eRO+!E4HTP}CC z;-QH{^QmB+&(9%r`qv(~i&t8hY+>NxW)oNawyi)aO%$IS{z74Q9i)Gk0d3-9mAw@{ z5kh+`r^F-nvPG+l`P7ztFd`n}X_`Fbxj^MON@4amUtEg(0uFON?Q$`AT`YN+>SO!%5EgGDJ$$|tQQ=(PgMqSvkje-_u`98sC;I2;b!&p>%#0T zPu+BH@8xFG>#61}cON*2SGw(P)j%1mCf`pw`$^*)HI>q~xU#SK;hT?__+n z(9^^!HE$&z4;i*v-zr%xmAM@bW^H1^Ig1KZ#Ml%1Kt0Xk;$o41i!i9inHT42K=^Xy z{obb45sHvlgl+F!OqmOFH}9^vnZBn~H#KFC!!4ei3$< z5q((_QWpaxR|I1W|y@FY=8CgiW>c+KeSSt4-~ds zy?S-=^+o$0kCnkI(Q;c%Nc zfE*C^X-9tdc%w~bH@|D#to{uOynT=J$0PLL@gtn>6syY(06yEDK$&_`3xRd#n&R8`4j{2jb4AoD|DseSW( zTe*5U;v<8g;2{%m@3&1da$`!m0mr{xF9U%n5&#tcwn|$-&G#4p|5&Z^lCaroKL}nr z@Pn@?It{)~X!I`mqr&3g<*s(d0%qbe`K7p>O-onA|064&$CT%pP)ruPlF|v&7Vzoz z0ihCSBB9V?S6l{DrNfapa`0d(t~ujiuVy{F%w{K58Sq*Sos7?W50=vUhI2jjOb0=j zo+Qmo1+hEo%1Ja~?X2}VM@Ol%s`BpX6&V0w0IuE1J)E0OCB(39jM1Ox=r46qSeagr5q+-&^WJfYe7}nwO%aH>dtD^%C{ZxCQ2RUS^B=}WB1cS|^IXDO0i2q&aP@Xu{UKm}3|xJ7LpMDi6kEJ27p7@qJA)Ob z334#`ziIS;QF4)YgJE&JVzXsD@7aWZXsjHME^sN#Qjz`)+JypeJ2;Iz(8-t>VN7?4<&27 z@i)mkq8$L$xsn6BC~%tbqNsRG`Yk1;7cZ!P{0|t@E>P?8-~SIfFH}zddw=0avl;&? z>-qQe;c#HAZ2JDqzwm!1=6`8D{_mQYE8do{`PZkGpFhEhx-M~b`neChpndfJW-qyL z5EU5g%~(stjwF)dHee}3;jf@$Eb^iW31QN-S#Jv)zNq;%pX!bc*ShOBePc(O3M7?m zQ+ld7);r8^Og%Dv*JX48>|^rQ{g1oMO`U~{UNJqwV>$!W0fE~ME5EU0dAImdR)19c zUpPem9)~216{Au6ud3UCJP((m@cv@JKxkvM;hr1-#p^Uq%*4%=*|?3)$QCf}0!pL# zXR(b&SOEV@J-A)%ChiX~I6zp5aVSx%Y(H8NNsqrvyt`F@Nq;w75`=8XrV zx3CS*dyt69kpwr#<Mo-4T z5h9mjdWTgrzU*Bq*AU9GQ*eRM1~GK*xI6yXM^{z-_>y-UD~$=^p6OB-xVDR4xf8Rz_)vU1D_d5s%XmD;68lVCm02!vYwhK92mh48gE#o7PM!u3 zlyM~XDZk0@yQO;_mTVnE_~5rB;D-ivs&I9_m753)iwte&&u%Av4PF!%CuhB+nWfe7 zZ^DuJRqjXJ>H(gw=@cxUgMBvRvHv6uKm`Cit|6rd+a$**ro4~OR165HO_f6N2mFuZ3m92~ctJvL_bePi(SU8my*PKyD!80k%y zzqHj? z;+dqqBMzOLd+6QjytcH=9C{gUh6&jc%pl&)4owuB3Z8i{SV-d!2d?JxX4(W?OctP2 zf^rOo)#NdMsr+K4n%9nC=v$i<`CUK9XVD|ciKiG9^oKtV(I|?nb+^Q)aK_JL{7WK( zLIy6?Qpuk^l<-OurzNw}+L>YIpFfFXJ6x-v-T|>ZgEN;*stcfN=8A)N-<(RS zuZv!2mU5Q95MXKTusJYv%;YT%_D}g@VKiLsk=^s9a}P7YuDFx)9Vm3y4s7=h59jJV zba!{R8P{KpBc(lYkQew03fd1N_uu(?JmO=Iy;D(FQE~VF;&2Nrbt3PU;vnM;Jyjqm zZ}<2Xrf7T8*1_}wP$@XGdQx7L?bo7tLsp~HpwQ1~c(u|$d!$$yScB&t+@|FEdDAPa zr&vIgQ6j{)q)X8~I{LvQ(1_+K!R+}VkV^5Js1M4@;OLu+Tt4@rg=i847x_VD=AG+w z9sHm?aY>-MeivV}YX@Xph?iIU`F)OD|1~%fcF?S0Ubk(dj)Xk$*d_2&czvHUCq3bC z`%q>OoD>)LA_h4pF)B(xa|tNlfjpk`8BS+(9i}Gi*YY6&pEEL+;+@Kmdu}M(&jY^^ z$bvYCad6pe%y~#Q`()xdN~4JO38M&?i971Q`Jj~!+w>(T(nPrM z97W=WjA15Tw8FaPN1udpF99hD|1A`c)JEz$k^2LLKeu;R?{E&*GOp6t%_UMLAV8%1 z{mz02gisaKjF3nDvZAU7dCqEogsE8E4L>EN z`fsuPZQG^+fNFrY7MtP2+fW|->Mi;~Gr{=eF6^ z;>8L1%~bCOAPL|Llvl3`n!w@Z?IJ|m(?vI|c|o)?^3LIK_9D27E{Dp+{$ zQYk!R2Y1bxo)ZaefBbhLABFziQtab~WAq~Yhul{Y24Ky=JzDp+WesPt$Sa`o!adJJ zd5U{N8c>7Dq*EU)uXTfOj(J)x=So)7w4D3bk97vK4vCk{2=FuNyGqjD9zcYQ7yTqekMr1)3ipIak1Bn&Qo;ZY{+u!C+yl}2= zNx2Yl0(^;zQ_bbkc3zH(u2w{cJxwYBzo2>k$giJ# z6~a1nBbDO!=Z7;xu@?Bh5KT>?F2pdA~Xw4UEWX(6R6I1#HN5j?+ip zfqPVb;`k54fGH$PURF)8qazouNgPhRPHmli zLD|nYXHqx8o3X#%43CP81hHTVc%MCk+`2$sAfse$ZEbJAR2O&()dCDZDCoBRj|8Sf zenv*d8jL7^;JsK70fWMwo|6=PH-I6<&-f^VyXti@W;c20qMCq3kg+-f91fszBctVV zxD7=zwp)3qC%kTPxa&hGboejR_W*2>30UrnYD26p=jab{55%m61+NlZs&MZ^vFf#bli6Zikz zFdMrhNWKGI`3#iicK)x_nxN0UUa;H2&BV9CfOCV#3&7(synu#KizmZKl}E>{*J=tJ zTqf$?*`O8s{$|>IX_o*77pRkzoe)7hgz4U&{TAzU2FOleNItqSLtdtAW(?X7K302LBDZ z{YB^i&VQT{#t`Y%H>+d*XeE>eIRC%t`r0iJM_dvK{pafmPFBh83bfd zvUOF%Spm?u6-)~uW2rJSWXTk=3<{7&@!rTJOQQfW9`FqCj>xd#1kg3fdZ2m6?O}f*WCkdnZM15g&(0&zK7*|R`Az9|a_}F*eCT28 zh_$w~4AmGKFZ`ARZcyR(bSNl0CgR%xzTr3)artBPrXs&z@c;yS`}beZFo9>tURA~O zF-6d=Qx8>izgEWTkkkAz8rCsCPI}TE6ii6G!od0`c=*^zI8hCGL1RTK*~eXV@vC4i zWw|boSnkKs$Ps@YkK`m7l#8+$poBVC^8h!EmLehRArM#qLv6bmwgD@MkIwqE+W153 zPUvS#?s=YqWtXwul&|FPuiz~8v|o%j;fY~1$ed89=G<)Q>>D_itEMk|^V z*&`l0I{=JHUj)i}eKN4sle@dSrJY$n9qx96R!h6KC-~!31BpgdTx#n1<545kZV!n0 zh1Q-ozfIWplh~c{3)_(92SFapse!YhT6u6D<%O zkb}vS1vRh3j*8^;`RcOk2U7A$jTs(m38+DBRXJPh5)xYXQIWeqd-NFY2zv+}d(ya3qpJ4?&r74(mIam=Zmwt?^X| z-V&$kneEAsF-Mft3MC}6Gh zfBL2m(>!FoMa8o4MGrOY#t98x*elVRQVvm6LD3tk*spy^h8+TR4rsE}l}pi@#HC@e z*Ym#YrKp+9J7#JyG~6bW=!F@0-duv7om}m6+tn-lOmOSV0g+Op7}b(f&j7_-ln;#W zPB{&EszazAr8O`BNE!%T7TGjUHHMaQ1pVf5Ug2l7^w_9LhF&xMH{OL%>NBhIDT}f; zqDS;JWeQy8zu6X}Zv^*zw!S}H?o`t&^Yg1;?w?1*V(qULND2dNqxv6#3`1rj5z`dmEh@$h||=Xl+!(wK+uCvYd$2 zhkb3hmd&g|U_lr1?_Kk$b61&+H_=lio_X!?98*#g#8IO<=nRW-DLkqkxN}40$Ya=M zs{Xse)zwu3fiUJPBq1)|62LVC0D+g^4E|n6GE4dbfm6kDpQJbUR?p(3p1)N;&P$nr z*heiV_2V-|`x%$Jgc~amZqHy2|qI6NJ&KQR#ixJQK@r8xByZ&WjEMjNiEn~o-fq1m+L1z+e9Yd~>L;U@#?ogR5rXW4k)b9--1RC|xP z_W+xI1CNP>0*o)D##`aUUz@lJ>6o>$?1Kb`0#L@wK z>BqRd(KZ4*Gv=+;=3F01p0ib+a@zOI<<$7M!-4Kk*%9}zGt!W+fmvuZ?+R1&nWNP6 zr6DX|G@3R3(MSaVACeat&Cl5W?~#)G#O2}AeC+{|bSJxk$n4%p%E3`}_9khz@h6<@ zh2LJ3PuA9NkxPUAFANf9fL`RL(uIM}XPRtPY zqh$%Ia2fBwcp)Q~4XP*3ftK9eVTwB!1C_E_(5VRb|C5Ih#Px%r+XBLt!Er`WM%jdUWpNw*H*q~Ynt3xZT%TvCsBEXy` zgmK|c3!_zn_-hZ3?|}tPA(HVEI{h)=&lozW|+O0cn_VLn~ zRKJ4SD0kPBwPUZ1YXy?-(TymdDke) zq(mSazFN}tz`-R#Xy|Z_`5g0bACq^sHD<1NVK3-I3twL3ZP7v4%=uwn7!maNabn0% z$lc?cj51S++-Jf5;S^)s-DPxsWE#|eu{oTz3KQv{v$xfq!NPRH5GuJnpRX_D7UATa z;sX0NlV|8ry`G2N0lB@kXKA1Jtix&amYeTerCj8*T(qC^6k@^t%mc)Jj<31gXGn=HrlH;>`h#RnTJ9&0C7aJ5tlce8 zLrtk`z4FtjtODbNv4M|*okg&+d)+Xg>o)|C2Vz^ z1h?b&%hIf4uYD{{9@GX>r0?baRg)66`@0)h=YvkRz1B2R|3vLV zHPL@Yvkf^>@z5=^*!SB^__7*cTTdF277J1$*UN}^H@eEaoRUP_IA!(Va$fW13gyer zq{%ePakKr!3BTC_q(1?o1Zf<)M;`yTuo8bPWK^@wvPHkvYXg5ksy*etf|AOZ zq&cW%%NrI>YP2(=vFeEHBcx2WA~|yrK{q?UDCDKp;#oByteN{rrC?-zdMYhAxa2=e zRNOal*h)lXz!ac6`d~b3Ci7F*ishxf1CIsQEIfbiR-*{S@nkYCt@Og>HLU3f%i*hJ z)fYcK^Q{GU^ny$_N8wM-#JiY&RK+it0R_31M55ukaq63AWyC zP8|{3>BD8fR)TH1)^t6M73;TlZc)h++iqGKOj@_c{sCK& zCykxO1j^M|6xBlLR^`txA=pb{+|Zas>34}VDfd^|f^>tzvtA|NWRXdgxsu>>CFUXv z%$fn*M-u+`&gAn&S3K{j;PmKlWHR5hug!<_g>LT;k{00D;SHBLpFrSZD|ofi8(V|5 zwZDmCh2AD)oXNb*HXLcZ7i(`$*f5;`cxso(1&>S6ABx;ThnLsx(c|1znr00T@R2*3 zd}I^;N#gvHFS}b^&CG+jE?<$#f&h2xgP|sjmM(jW;RT67{xIkVl;NK%V`(BNLp4^J zxQkLL+7zD)3`HuQXI8{~y*qG^HS4=v@-P^L_{0@GU$~?6zCIkkg?{q+i!lfy{%(zCyj+b@i2fipM_x@H!A@^vBc#&9`ICxA&oYYyqVBg5A*Cohs;F zhFtvb)lS!YUu#X4KHLbZ(t}pRiC~WjC%=ye-HG?l-Je{glAFI{u)HIihS+r8ixb1# z6%J^WGg9A2Fl=A@6t;>F%csxs6}TgV7|d*G4+)@K$iZ%oG(*nHqpDKTlRIUMuo9a-17Zbbw-pK-iirsy- z-ESD7gGrPmH9g{uA}Rj6ke>OivaOV{+pHt6FV0KP?lqYoq!^3ez7&9MyW15}FdS2a zb#WkG@8Ea1nRC7eW$0B2Dav(rT|&-x?iY?zP9ao#)C2eT7b-R_xy4VOz;vo{XG9=X zzWPfQxAKI^kJFBNe7QRx;jdP&87fr4EEVN5c|(-!DlcCApX(co9#2zmTr(z z&Y4#UrigesI4{088Q5R^V3yb4R4ZJu(mlS=I_RLSk<{#~3tNFX%0>7*F#k68g2(oo zre@^EcY=Gr(x|R|`Hcam#E5Op$j>*C{xG|7oN;o{6)6;qG<~pM>*3ur3SG+u`;2us zHuaMpX(-XST-GCCaDD}-ey{%?L|L8s3NBskgLJi{Kff!)FA!B#a3YO6z< zecV~bKolBNZD({JcaDR`QS+MvTW9=}h^tFQ!gjEoTKlUxgEvN<2LvyDxUV%Yv|zNN zUGC>%@3)z)hRXli_m!l7(AqaSO~@r6f_iCKr8XWsIEY+FKZ`^wcqJ#ViyO$D^E+q!>J4~>RmqL-4d#>(sykb~&>*L8&k+=SI(12>NR~JI$KUn)@ zUM>E34QMFdO&$z<43frcl}R(^`%nsf#$O|SQATH>JzoFwHhP7|sxub)S}u_^^PBQErV&CVR?5;58(dT(n`7MjSv3! z&&zO`77oa@52w_-6dnH`yuEc;lxzDg%#sBtsUls{N=Qk^5E4>DDM*c!Al(fkA|O%% z(l7%8(jYB4(jZEAcXtg86Z`Rf-?i5JeaHT5AN#j?_=97PVdjo2&+ELeTNnqC5xV@f zdx#t2763Wu_AkQxE_OHIm}F&s8qDq_ViK+EJpydNnta*!8vyn#@p)C zGoz$`O8oSy!m*GAZMn&f2z-X(M0c3v^T{ATgM+tsOb%xGpEXQAm8s$P>c<3tCjFIA z%4Wf#pIFKy?>k^m$?#J!oBUcd!B6VG@6tR*?1sWk{7022oMUKuE%InkfEUW5U`8SH z>k-HmftKIlQ3mv~fWSrOn*Gp!5HP;uJ;5hY4=Fe#Tshp9dPR^p_dY}HYLH9Q5&!Ap zw<9ta>{P%av;)d7+kD*MNocu9B4F0~v7*!a01PR5c@X-k)hNfo<$_D?c>X$(eYrC} zv9E<_H1bdGiY4oN7hm8%(RwNtyRFAsE!z8Z`nfM|Xfw9!~MBB$&(1+18^NmK|J-%ly%I z_NI|U^j1m)6p6&C<#wA4UM~rV_b^Wv8PZDZLbCHJ;6~N2PpZonan-nUal{F)!03y$ z%a%o4BD@sD<9i;^C)-%*7$Nno8+A2V_ zywZQ1!LT)#hKjQRP1vtGI3a-;VGzBJlhnt-k3aZTG%p3EEa1)x)5MTeoHhi(7nk3a z;g_gG+%m49tibpzj%4-vTJ)LgWhV((78#GML2tNd`mx4>t`k8GURUGW5M%%smEJWu zkWCc$g%!ZSAk$?!ps`HZ^=Ijee8O-_0dmd@Momb{%M)TQATU>9ZQYc2G?dD{JzeDz z=rJHl3_grGEK*ca_yO!neBlLf^bdf7ZGcR?<_2Ez@2l*7|9J6iT6-=fh!?r`%)rQG zx0~+DHnaa0OYFtZ>e>Oj)}vesrzok!irFkweMPV;Z!`jxAztdwm7oQjttr5slNciQ zYy>8Eip+s&|MsWQ7aMuFX2aIsc2XLIhG0zB1h6}YnZ6SYi>Lty7wAr#Q{v66ZJDc4$FX~Hou3ajwWMg1%-Tmg+4>sbmpPuq?uIS`<>d_bV*-C#C>J$)2D?oX?WsHoaWXfO6p6ML17S!jVWo2SY$NOdXW@;;pud!YCCLA4cFy`<;E%11@hz7l15Y z3h~9fvXgumz{I&tB5I~%=ETF|=UZH0yFx_7DKC+px-$L6L%N>qgTtqzEHN~!haVSd2 zx1CKe=@)R1OYn9IHts+>2z!!ItJ_`Cx-)K@HF`2V!GM_X-nB&puYRtF?tO3wzEG>o zYX=gpGTvZVhK%q0+b<>uiza7SmxYwXa?EY3jQ( zmY|5w(nwo@=d|WluOv%4A#P6{RJY`zYQ&oFRxmi3%X$1wX5 z9H+$Cng;|-0^Rlh94R?lsv8r4kz(RNE@Sqqh?WgxdGkfvQfaqb8{b9N#3+>sn&ZNifl#7UbkOD7GEA!h{Pi|q0io;} zX6Cuo*pr?ar^xvx75!_7M5SL34pp7P{QMZzjboSr&fRXb9J<=$;_U`=>RXp%jD5Q4 zDx5Au@xd~QGmD`0$3a}VFMj~IUfYwyWx&tog43JZyEj#vF2dpaSyNsQB&j3!CGXAF z3e-730|(oDAyBE@uTD@73}culs?Co%6rD8{Fl7A1=0_{dh)*Crd?=U#4BX}%k(_B` zbEmYuix{u?A8aP`tw7}^m`B3=R2q78UdIB;Y%q*d)l=9&A4Ca^{?=qH9P|RZ!g~5{ z6XN{zM~$I*z+%7oh2DgPy&mhl3iiO)a!Uzrhbfy}^HZx@{T*2CgJ$diTr7e^prP{A zu%`Pm3DR)ZKUoenw@po}rjE;d%Lmv?=U+kLr;>;0l=A>R3g;-6S+=>wTy`{~qg4Y?+)bl{PO&U9Fk1R7 zO#qb$gYVPrjzquxG&O)3=t#4HHE0yc`z?2kec$E)xKp=fzeYyic=KBIFXTHZpi$Be z$SZ=+pH*P%E3mk|c@ke!Lq7pDo-9A|ebk%eyQbL$#dWGf$>x=w1370kA)nn_b2n;a zyJR8yW65=F{QN`&_p;d_Ge-7ym9LFoaHev+aafj^ihav}m)m_Ms=LZ&-X)o@&oD+b zJ;S*g;j?f)FaWC+sJ4AfAESa>tY!wK`#@|RW)mi%6Xv+f>8xu|(4jc)h!WX~q=D{t zZRe7ipYKeH)ys~yty0Y?hIRb0dOXUTba?&7U>bTobO;qV|H$IC1@*sdNNHXRYanvW zxl!90FYoTyhNz`LSAbA^CW~18D*MKI#Hs8x{MLv9!l&&L(>VAvsb#|pgziO_=OvJ~ z_`M?r!^E){8juv8fYmt1>mnppU3p!gLRxCVDm?TnWO@=*ycrF1E-XZH9vIn8Dx^7aQ4& z@_NLo;Q}%Wf%Z7m-v|C8acSBwtBS^qF3vmrG_AfEDC+qF0OJz|t&0gqv8^r+)?yZTGzEW&4zPp)Hs|vIK1Xzxu@$n>Kp?gNfomn2WgP@j{UN1RPNqj2V)o((6xuLqf(0=6jWP-EG_%oc|U>? z2h8Um*-nSBN(AExR(Jeklx(R%7X^^718_~bGY~FSJNyE-D0ha)@8nrTIuOAjD`1rL znkj=L)L8>!$(Q(+rrk|3T&jQez zD@(6e`{X~n2x38UC(>0g5ESZ@>jkux(-n+(s9AauS7u6Uwm~f`O6$M#wx&RU&#bxe zv6t+~I15QM7xm}kg~+YbJ{?wfOV=D;I-X8#?uduIR-e{LB;WaUGS03gb*?kGH-$F! z4$m9E%NFGz#?w*zuXU;Ik(^ByGPpcQ`?KuS7QVn4MOU_nuB8OSPKl|=NihY99Q(mw5@oEo)cpkvdDAL)f41A!L zc6vA7gd8We9TW7lNnQmRToMvrWg^uzXZgvb_bop~XcTr*E zmegIMmK{bMp^x+qr|hBb+4vOxw1&fqI}f&56`nioCAirP7`27QHFH?z9AQ=X6^CX_ zM=!cst*ZXO`;M9o%xNDR8*2@^2D%v(7ZF7?c2N4zMBdRw%OiTl35U6MkVef z1EF!a9?;Uw5+uxj%l9}6qN%$4G=V7{nMsU8wnpUtC=SNND$VC$%qEebfL}#svcAH} zKVQ0&QIW&7SBx@Fo}S!<=FWGIsV;B zMEUg-JI52$Rmkop5tH%Z5}Q-fPN{gr9gYCpb&nxRQda_0)fuVie(+ADL&WxrEu4;w zMOGTRXUZYiC`Cgq8^kK-?zevOE36!QEIiDP?R! zIXQakV$6ig31hl!8s^&wBX8i{EBJMf=j`lkY+~X^+^A;NemVtVD8NHmUth0kA(8q+ zifo!IAWZT<=N>*0R!7N1Y|!%T`R$<-Te{<6Ya;`}x#yKw7QcRJYoUNsC(v=(0B;0- zxd#v9j<$CePPqf2CYpai0h%99%vSC}#RTQFR8Mj`{X_zNmSm*t=ySRykidT`@ z^WK@@R}_q995RYw?6xsbCxpDA67`&WKQH7w{TeBAifLO5w(~GnFk|zV_VkY!q>mWm z{3U6y6!~QryTOv5e$tOShst=fJr;b`$-NOBHQL=pXLa&HNXkZYfZ2ZbIuvx&{&>cv z$;kvayxPrI<~>cDp> z1G9#qqkuNa=Qzgdu+oxFjF0zG=s~M?KC#f#7Z(!#-Z3t7OE@6$0G zZ>S|VDLkr5LF}{=Ni$;?_9o*rasALUOqVSJ#a@s3#;0+0bcmi#+`PLeo5k38Wf)x;EVZZABIvEHJ zAQTO!@K!ezWR}P~sCrW$iaZySF849^R=-PlDqB?C`QN6>{9q5nwweN^Mhq1u@PLoNv_k3zsX*e`V0PYwr zz=$&0^1SP0*Ut>5Tan5?PG6_IY#= zVs~o6MQ+M-G4L>3NG5GB7rH#Hn#s$-ZM#u)FPzJ8llpG_<;Yah)@kT+x%I2tTja9L zYNVOXjYoTU{A6%O<1~}^e|A&DcFeYR4naYy$Ih#cU|&hWD%vGh@|N8GP1R(9=yLOB z$9K6Z63KwnapaPi7=L`mkfymoOexYm-q3khh5sffvK?J6{qX_b_wcDdbJ~Z7bh_g% zX0IryfaWaL8IHhliQWtBy^RO|-gq7L-^TJ6Gf(rDvH(xOJ_GbpbrIXmawX(2fA)nX zC}4KAqd01g{kI?MOx%oVMEJ>5U%m_{uDMz*pk91&d|Gus5mIrL%Wj*3n9a)ceM64Q zp9f=?D6k*-s(%~GlMjXqD16XS=-D|DbeCdVo|&C$tm~>L0#=}9{o+SI#BI0E2YnWY zTN`u==otiTqa8GE5HiP0GSYGu5xn)R!k05BVr#b|u=Szj z<s~~f1<62V_5Wdp~7V}DaTe!B&Kd?>hC(#J%qWQ4Q6(TTBDcJ6t>$`r`-Q`ZW_p5{_! zf6?wf0=-vICeb&%OQLJ#^$dH%@mWdRHlU)lHx(RCn-0`E6@P201yw-d(gY3U*|YJv zxw)B{nT{`SHKE}z#lZA?y0eZiEbu0j&j*O!(u5XT2&i1dl;hOvCeWHe@M$-JqW zQcxiv1492S+G-Q&wz7@ZNKuR&woT?jacK`QOIIr@+YkDv0yX4tFzFLGi)pi9gTL&j z_nn5XoJ-HNN=<3k`=}E3!_aS7=03?>F`P#7dYIOS5iP`AxNh6Oz4-dg3m#r0Bl1y5 z_vU^BCE))YvpF-sJ!XZS4GQ0Qg-fC=Q3@ZtP#e}zdmTa>we0(PGXO|FnNyqFRH zaf7D9_t~^66j#xT856BWnAa-VCJc(A#%K^1fU;(k+Mmu$;(F^_K^W_bnDOBv0ch*?FPFeX zK96GxIHuXnOHL?p@0WN2`>hof&taqY^ld{s>kPa!6?|lxk6=tuFnVJ676m8V`SDi? zuldwC({y8*yGGc9wDJBBOr!{s&l|To8nR0?SUOU~EMujanHkB5xV_D6(i$joCzc=5 z*y`)-SQ+5EJ$K{|M3}hFon8aW9d%*Q&G3YX+|ZF6mq)adn?>Pr_T%I?k@A+PD1 z%4-#E&#Poa1Sf0EqHfnvFD$-)@&}&S^W$)0eSQ5IZsfKNaHyDXfGB{X)#siCi=Ryxn>lRWwGH0p2VES9qmN7=hS-h@zxfRFYZOcHXvX5j zg-^@oCUpIR0yh8;s&WI*wpkdz*lj(OaG5)eBZ697yI}OjRSPke<1QEw^k|X@{K$Ax zaxBYqe>Xy4!3)g555QJsxJ4+cE6Zt6#!s$tH^VmgD8ifzSJtwgJeh>J%27&G&2tGl zU!;gdUjZ%+NSTOSppyX*uif3?t^A71oRn5{zCghG?&4)K5<5Z!J&a@U3EFk+nYMPe zkj=D@7@!5iL|TKb|E2DO*b``hEaJ?@{?u@25_h$U-93b&VHdVa;4@kX)Dne2cOMR? zm7L<%aVN7lbZ=Tsy`NM=iE69(bpiBx5J>9d#hbUqq=;%u3|mmOE@U7VA@l@z+`QrH91*i9}B`V zCH;Jbh5R47xrqq{$hxg=?EG?NV+220IqN!EUs?gYc0**C?RAH3Pl|vyf2PwuNhYZ` z=#vEIYW)ClC>?;ZUm7l|j$;9dfRN~^k+s_KtF4i>b33Hn-r14fVpmd>;d6n(;c|(_ z_Y5S%opXUJCo!FCr9$43+7dz%7NoHZN~>ri4wF|))iK(1l+`BuhDP+#NkY2s85>!C zo2G9oWK^Yt!fZe+euept9UJ?!C)G6=8V;OvvfjUU_%8czSCC+R@nm!2n}GCjR{2(I zflF}pDJr+8T-N_=Cs(zCbCWIj)$*}E@8Yg#A?Nipf6KzF0JP`I@o33nP!1<(-o+^b zzt4NE^w_$>t#nXAeP8ip6`h6Yh6kZ|C%|X8DKPqH2&F=`3nmh_mhNdP6{Fhf)39F| z4|QPi-5OGs>I>ygNr=(|Tev%T4jq~alEX}c&!*0y(7;SF+>8(R)@~2XB+uteky|dS z`kdeB;-meo87ijuWMH6#S@`?K`%7EwVDkka*WHhZb?+2@!!Fs`*4VKoH!7fkQq~C0 zYHbf0?e<-47IN!LZN9t9)QC1k9t{@|A&_JFwqU}hfJC?UwRP)?Mq(<1j8vgVW@bAq zE#Aep=%VtF*n#6iAG092+j5|fjH9mG#X|m(lLnNcCAX*&obp{f^L?y~d_0fQmzIZJ zT{+wNQHF-)f2LGKJm1XaHGGxadi~CL!cU(i-0TGS-El@8Hk(9q{oS3)#r1YQVdjvby(;ds+sE@!DSMFn|=U z<88G6&Uha@ofz1fo5^wBMu7swrtehH2Wcf43^p-S88MVuIZY_nL{zY(p`UNoT7PtX zG<^z_c>)$Tvy&A(+L#Si#47d6UB-g~*}huNXVUL~^j6$V%yh_6y~t+jYT*QDJQouO zAGFm;w$zC2vXu?%d0{uo=N`p3h>w>>8JwCg11QTjEU9Z*qi+)(tqn*JNGQ?qi~u?U zIW!T^F!l?SY7oXeh3InzVLElH%E*gB+zFJTkR?f~8y&~`O2eCk?Pi!|O8jO0j>?6; z^{CNEEEFP(=^DpE#XlP=CDOzal_PyXDO=bP`=!H}p$GFJK(N-I;>;KO^T{A2I+HmP z#`u{Uq@|@buPM!9)+(5;DS%2iRphpPL7S;Y{(MP-RnqY-_+gKy#aV)z+jETB1E1|YllC#_UN+FEr|xM^OJ9}eKlv_C8K}{BX0;%O zdyoNv34mTr)Z@l*sNC=`6^zrTQwMs!yfBtB&%oGm($zM94 zO4%0H_3~_F{Rz%)Yi|0qKwys4%$Lt?gJk%iLMCYA$4lJSNz8FZ+xWQ7VR2$^J%gQv6j@1ft8%xFh(~ay^zpNMyWO zH%%?nvBICqjV8_cLI1*Ie35^=V8F}1dC#gan@ry*j*g&4$>7npe557GFF*fIq`oVZgX;BFI(oWNYKzd4;b{TCovkPXL2z+ho5O+;dq@mhkPS z-6Gj$+H!E_G0GzV6v_OaHqd1dI@fmQI z-SRcyGMf<7m@30&V{HG8!qiIw@J#oSNU#@u<}Z3RVgW zR?bNS6i@5pz223y<;$YBNRbrjM#I@b+QnTMWY1Y>AduCq-0y0qf!7v^%-V?+*LWrM@g@tVrvd`yMwif00Iq*(ytYY-zZLiq}$yC7P zJpc|d5Z$Ri_#^?CzNZ2#@{kRn`40K7f?|7lu$Y(wGxf;FT#Zhx@(?IvYvphrY-{ zf2TkfK(1VHR)cL8!AJ3vKgmq0WrMkned~vPSW97{p8{1r)@sO3esL!l_ib*!v|pS$ z2^-EQ$vyU1wUQNw9he6e2ORq%S3E+(j?!dH3J_m~zlqC;Pmazb$I^E3=gmsyA;t&T zh^^Xq?9)~0f*4BM&y!6mG_cm&pD)J3t_Du$B&;MB5Uzpu(Ld!cr$c%@@cZa!wEMn? zbyeqTY#P*jWc>C$mzpafQ8rcNk*_KVunJxM-GN3}F2_G|WPN5R!}2Tjt7!4EQ}3A- zX8&?*W*$TjyGH@w(Ldhz?iVj58r@wn<^VbD_euCQ#0LPlg4Yp?7gnIF z^hwN7iF43=S;a%>Xk7Dt!NMsveG$_cM8PcW`RY8LbpK^_luaVLdYUR-%pU=A~03p{D$jma@qcfKhugc6!_6uzAaok2vxgTy>4yvJl zs-2DLoL#l`Mz-}<<1d$otbGsLB(f?9U$qUV!Do*$u91)NAdpg;w6pO1k=pF|Ck+r$ zCtLiY+VGjLr02t^g8ul<^~^Iz(a*2pEW7%AOJ!08Z%vT4eUS9$KQ>gbjH0n{hBf%+ z$S_Iwvh%9RBBBx5t2VnD1d)Dxd4&8zF2{mfAjn-cb>AUy;(ctQu|eDukX%k-rX@%8 z+RpUQVz9Nyf;f{`lY_9^@KLl1m@F(7$=O4F^d=OD&uC!#QH0%hhx3haKYy6oHQ*L0 zZCk(21TA`m^u<#e-kqdb2}%(Q@8A%?4IZ!ivjCV)A!((!Oa@D-YdMDQ66$V_(^#Ap~smmQC>Yq3$MFM1K( zCBSF3R0?=XDndz8r8~}~~FmeVP zjd)vv#@iYHbMjU_b>ckwdHZjn4lRf|gqy+k;3mxJn&dB*C{Cok=cL6w*5lXizga)c z1+ePG(7Uj03Z{bYoc3e6;&)A`l!`wMD$D2jHrTk)^N}BUe0j*!Q+_XdylkTGC-tX~NRO9DCUMYI5n?nMioIbFt!t+Y#XmOG&J<^sGba+@d*P=m@Oj@UJvPeB2hQzjq-{daU9ID814S^Oi7wFqrCV(kwmX<`ct1qSk zNtvXS1*+Ih#Idv#sMg})DOY?`B-oY)$Mf{uGG2ITXl{P6xh)T5@l^^R_<9q3e4u}; zTK=zz`RFT?u#biYTp z;+T<2CLZ3`zI(cWQyDSU)y>(NiSAhdie7dgdnHv_pr42R|7JqN(REa_ZKEefd8_kL z;@a9zz@!`z?3uJiTUF6&5(5w4qE6G=tTT`HUUDRHK7p~Qli!h$tqDBd**PB?X;{O5 zU&!hFh~rgx*$@pT*8@KVPV%IKn-KW@JkK7f7FdZ~8EJ@LKV^?B5r6$_9Jp#>?cT2j z4&pvB;RY!|W?5MoAJodvu%)HWk(G(8I6AzS9=xA*6n5VmGHt!!BvuE^ziRm#o&VfD zB}P)_LFZIbw}u}t)s#{p>mzic3BQtHXa4jNl^DUr(ZeZ|m`N)cXenRyr>N3NT71{{ z-jxQx{af`1t?~w0^}TzkMy8Nz&O{3)jqV{5h=H}_c+>g?3j#_+rU@8upjK^`Bf6?l zLMhTG>OueIW!>daK2Sbk+7W6D|6SS|`~w)_!_PW7!0iyIffr6r`#^1RO<5i2yXWA* zTlRD-Dm*;=w<~|8&;^A5*IE((jNN2Yc;>ld99$^Yll#?xX3tMm2HAcsf?I>dZnyC7 zz=T;*=$FE35~7T=QoiJi!BL8R4l0*fw^J69dpsFRKs>k}gBUZMVvh?zLRva!QCdM_sx2vHc5Dm{v zP5o%^A7pQ@Ufj`f>}y)QeXhpA_N*fn21~a0$DFM9>oGB#8yNv_!fgD?&=ZX=F|1G* zZ3*6=5iKI882t6?sc4HfL}Oggj;ZL&C@e*Yo zEU)RH&u-e@$^|@Bmj_T0k<{1{seb1c{BHQqp!l1QO3j+dNAU)_e&ZmEKXPX7RT$Jy(*a zq8REKsSn}?f_|U@wH&HFC2D_vf9pyD#jwx9b{3cnO(<)Ax_JR&S&xaa2CG#!Q3g%i zc4-CXdL|Aj4h@}PA)IJDkN5+F&oh1|g$d;yZ&TQ%z;~EOb-ckUqyUQ-jHl-1I)v)@ zZd{KQ%y#tuf!LJi#AkwWFV1Kk1*^9`@M}VWG z+g>PhjU=eA8t`J?kG#&jPFE6OS?|p$>UHsd0#^DzQ=zKYA$DdhTA@~d2s7!OKt$KA z&j3ieZ9p<$tdkoPDTRBq-K5hALD8Kr z6-5c90g$*A z@oE1>-v5&slr7vP0yD^Nvfm1J99D{YIf|3=Cl>0qD#-(Gh4RvQ z^8@DM?Yh%R>Nnk;6RTAMb?5iza{=sA^EqoY}XG>=#J$mDl3Qa zo!SB=r#j8)dKWmKhC-o^!In$yQ$U3^)gS8F=vn>`KI-v2iFf}8AH5F-X`fR62OqtE z>*8;H{2zHfu|4DB{2zSs?-dC@6#BpM_`lw@QJGZ);{I;R`N*LqN6U}bXOhN7```iB zkC`$rFlo`Z$M9o)!>l#P)zxfn%>`8uvgNghuF;IO3t8#0czl(TxZ@Z69{%Q2CQcI7 zENYC_e;_9(V*!zwWc#=* zv4IPf7>lIa9JX?5+69@!6ac+$E6RG|yTXlvoA#mywcxt7$>P^cC`u6(KIdrd{P6~5 zFj*6rq!TUHzCcXS7}jAvgG%yg`+({MQ~{7kkb+(@rmyxa>g zFrk;`B*DW|HvobXpbzkf4{T)h@o1I!{w=jwmi-i)hmwq8c0~5IvR|H3=ar7?{+nr# z{f=p8LOOl8PFs2;E0v)+VXrCW-J6K}t{PuR&|$?IK8VNKcM4x;UB1dKD<7acU0$=p zcN#wGn%V|viRQFL0OvqP&!U~S!Opblb+nnWL!rFWN!1nA_o z@DbuYP*%RjGXwEjjOX@Ew)7RY1 zkGzh<-T*nybZ}+oD9>vx#9rnB>-5;Nsf zf7r;GZa$ySi1Ai)@>FjfN{%%()eIw!wSQmwx$-gmY9-YlJ8R0?d>M!=A`Y6 z)Z(0|HPpCMBQ{wRRC$RrmLLo94U~698`Nj^I1`1vYGj50frj?9XzW2>0mT{0E4t z+xmRGmwmh@xe6dJ<+e3?PJlDU&`?KPL`YIhG<6M-0-@m)m;C^=P*C`_=160u6AhzKioGyY9YE>3Rs{7FFL_oUqpQziixo zNHP6|g_t~el}V??xGxHmagiCeVw8+}zOwHcCnPIjOX?(Fcw1Jq#?|y!ksz%fs{!r0 zjzuPs)K*4mt(LhEHJf_{P5u9e<0Yglg7>L;g_!EgmoIrlSxG4={WlhC(qiVjrR*q~ zt&_|zL4E(gFy?F)b2sTWAJGFuvOZn>vUjr&Q68N}v<;S|n*oYpnoeG>Y$wgp1~&^- zc=)nX;)(kn=1#I3q&8aqbb5O$2E#!NkX4MxwEGG1@bXK51pIT545)n1LV;2TrkQIFSVx6Y-HvoXgX=5{<+K`;-BEJiTY}U<) z-wF2qPD*9T4G?6Pp1hrriZS@+uC(B*YYd~idC>eXZorHOH-P3%Nqu-^aDRq9Ixtub zk};7A5F>kqxo33ngdacV=${+z>z(=O#%5RY_Zs7=_ndribE_N5X`jmP+>4po9zGJm zCmAGvqD|VZO(x(S_XrtVUaX_=Ya|n?V{zgx6hYiCf)CUfesh6M5l6%PlY6T?h~I@6;8dVRx~GGU!nX-aQMmN_{OOA=X?(KvmH zKI_$^7#x`c!2*wlkCe}C!^Om$G|j|5M*+}CY75LUrJO_LE>Ns0=b9Z$vKB#n^9F9& z&b{nxqPTo|&)Ny^OMg`)A%*EG7nIR zn4jdGNY1#Xsz%@Ck(E<+0K53fL0fnLf|6$Q9dl_Nl9NO@l4$XV3z^#OeD%{fBXc$?3buhoDa*#+CT8>U;v#! zO^~*&b!=1MiK(HWi!Ib#>yYc{5)u-6l*bg6w zw;g;GcqIn(mLOm-xI6dTuiYicKZ0YT%2t%ZGQ0iQhe^ZE1W5`}FHax|PP`uMdK%HF-**5x$b`a&>W%T&mVQ_}5> z{+^rxV3!9)ohJ4EbolL-UWWaJVBx(KG11~EpA%FmIKu|jCo|@{%1X9Wv*f6`b8S{? zptKohhu#4nQeitpm3V$FHF38u%lX`JYYdyyWRMG8UoMz@6!-?VhvYc-L{Jtx?EiJ z4Wp-b8$3}074#`CKt;tjE`l~g3>f2GauLvVy>8BpVfV1!oa=Z1XrKD8wV1m*>>D2c zr`2fRljZK_$|{~&Yuczmv%ukR!6I~4n&wXlNqNjt8(`n!BO(Z#d<*fG zUOj4b7y4F{xogMrCL!eSExAupJeg6L0-(EIUS7qDTf0C2QQJ#aCSUT*z@ShIFt1GY z0I5fhcW<^HtWJ#@>Eg87sS*l)IE?^_nIW%Pa+RqL{e7xn+DuvG#bOo3#r954WWceA zDaEpE6mr{jW7eP`-N^W<-0kg|U!7;;u`Y)d*HbL0=>o+lI|5^^GB5*h$L|oU-7vjU zSW(f#nWS^gEtH+DCG+Ge>3Bvyrq^LA;gwQ9&foojox&`z2SnO}t`4dXY&5JD53EF- zgv2AT2Ybk#_9~w5&)@#n#3poda&i=c%8E)#^m!csSoJ5fleBT3)Pz>C$DEtXuV26H zOK6f+R=dx}*S-NHj(!Nv-}uRHFB>%};jhHWp^zFS%G&p00Pj){!V9QJ72&?eBcp2& zu7FQ%^BY)Exq1{bIy_vNtP>(;2yxhIH$KJp5KW4m0;nHFR5c_wO?pOGmly4t?7rM& z-q+H?hx+Wz?96O%6HS@k{##Y8+xAin=TXIjEfK(tJ*le{dzj&gfiu8%c<`dy=eWxR zfpQ6hks^tolSMdXP2?xNEqVJp`=;ud4^lIR8j6$>f!n0Ko}U$5^aa%lXEC%qMCo(> zSra);1MiTljk?c}C&s#SV`SMnRP(2fLXfnbhm>qBcsDHn^b^@-&9h)*(VaT}`T z4gY7HJsOGdd#XV$TvtCQ{q4Fza+*NC=pVWLNKg$=p91YT@7o%-9C1}Ite%}gXOfKV z_rc;3H-~8SEO&CxUcJfL*Ex?&DTp8|9?beM^Wu)kJG)v^h5N;7rLrf_CHM5Rs%N|gcMy5x}4ygz9awizegcr zaOB$^Z=5qNY}EFXwA5^tzPex*{igXU&40K2{Rc9;ATMEnpTuJvF!gKmYD)jMGE*ZE z>zj+z-g47aaG$d6j*=(R<6XPWGZex|d4s2zqk}#sm8c3HdA08m0MRKnj)hDrzGa-L z2=;Oe)Q0!|`QMhO7a&SZfgD`)SiI^lEGMs0FDLf~>MlG>$Z9KZ>@YSqHX1Z$^J^Wj zwSY~_Hrza`9TsK;DtMet*1v*5G$F|&QUHv8;rl&L#o+dCQ#Ci)2T8P|dB?=j_@=Yn zvHZZkE$%f(YxG0v>`#ANoHw7p3NyTCDUXxWQ-YQg_T(D}0^4aWE!4x-QDV5 zo3whH$m#O!&bgS!&FZGPK+A+${COF8bZO{agRcEdkEsH9RQ4m6ZW0?|C4!#idmsGB z%61EN-lft!*X1rEBMTY1SaUTZA)J`9XX$oi)cNtZlz#6v*>RzmHxXO(jhcS`3csqf zx6s^$ts*N`cK$?aTmkImx35_CYG1D&WTWO4pFsi9%UasdJ68xO(_fI;j7CQg1KZwm z)_T*v-kGQtATbZIT}$y~uuia&zv9T{B-3ek?tUKp?X|oHLFVk@2%P5ZvqsZ(B(FQ( zn}pE6bz02a*C$_8H@j}AN-LR)u3E$lSO9oEd&T5!p4;bw`Qc-Xw{}<+Uwz0{Ax+g7 z|1nejW=wMQGfcB|mBEg3WdZ@H#5g$#3s0(UNL?&uSZ~J?1(N#o1|jMBq8}e!OHv<* zYA(A}4Cu3cq&eTYMy!+m&k_)tz~$g;guldA>86 zFNFhVG6Pi4Gcv$ehD{?dpS)O`V3hfFcFT?|_sI-PZKMfB0Qt4SZUE#@st;?_JbXY% zVlU;vv|p8K5t`22(d_@~B|fLiS@&mZLhIoKe41wWFwkpwatGcg>feQzfBop#MH;!< zh<&BCi()CKI4mcf57ky4ei?9>Zie<^F(wC6qChW@&Bw?^BkS{qUr?CJn-@{=$w zhDBSGiQKC;G&_e~=ZU%PE)H72ci5)DgVwDy^`SFq1;-d7hRyW4h>N+0Bzy)s%f83s z9k~V{a?4>zI*=WZFoPSd|=tj_;pgb<` zQa6}K*hfy?_chP@yOmYWqZ6N}^jxNj;F(naF>aw%Lqz!^O`m0Um3SPfrE0J%Avgd2 zgMCp@f`v%3AUt6$Vy3eU3W^9y2Zy-_7{sUD1~hdHz2@K?cp*DhW7>8tGrqyIsgp7s ze4hQT<-ApapRm{E`bQ*#JI9;5|M-B3F!ei@J@mErSVx{~b*&$3d+=;@$(s~zCsirh ziyABN^NYPI)G;@A#(k7~HntjBGq_3=sz60Lq}TJvOT^3dCy#-#e8h9Nx&;8NGLx%l-}@UQ&UE zKTo`X1%^*hK+juhB31&A;w>-oN)~oDHa1}QP7^OXgO0%@R z{7Qa)Z;hR){$K39XH-;M*ELv(h!PEefh;OX1Oq4;3ltN%M1lwsONtmw9@=*Oz&v3TF;?cJ6uxY55rPSV&d%2DjntSuo(`KvNe16+( zEwrdIczg3xjMG+-UlvQ#zdO zJbgFqnV75c-%*8?kD7pUaUa!zUz{5H*GoGGitijbZQ4nob7K%rKB#IKFD09}{A5wr z{}^58lw7*jq$+B9ax%D}z3p7;B3?;LD{8Ao_S~YHzx2t*UI(#Pbu%yM9bHOO9amg54^dZf zmebV6O!~*K|Io@dSz{DZr!Z;TCN(Y7rHJnF#n!@0jIq`o@|To-_A%czMV2Ln8!KJB zEKRnqAwF0;-3swdd^k%DA}(ozFq04!;WVFb4@b}jG+ZG)x9sL{X?W>5OI}J}ndVQi zQjNE*!tI6r+%GlQ^>&<9H&EkE-+L}*AxtsRg?NRp9;pyH8ujaWQ7G)pmv!8|tYNo8 z+kYoz(5~dl?m1oYc3zIQu-0y)fW_=$EWb3Kk~OWY%8a(_S>g*ZyRnzen0A&P-ZZ5 zud(gWo%{Fi3q*J~QVB&J3sHi*IBvWaKlV%jR=9uMfVk8esl1d>=16+@*2leEKE=?Y zPPi15$$S?;xCN*2iHeFYc3~ch#ko5SEiW&N+Dv29)6*X>3DT3gxu)l@eJLfRb`4Aw zKrZxXt;x18brLY3pNIl8b)z#uj4uUHDXlrYqs+ft2iduR! z&Rp+k`5hPLJ%5(*!ZWL)J2c=JQIb3Z9j$NbdVXud)-eABTSiICn}Iwnstff&h&u=W zY&uUFV4wM}T5*X{twF&3mCwF7Yh1AADi!^`&d$z!ACvM!Wcx^FuU+q+jzjKp#I778 zt5qcN*_Yln{`v;rf$W0CLf`I(V@irM3iy-}0Ynm69P*uxKo=aL11d!TaU^5SN|MUU z%L@w&OG@ma2#M2-{DZFPyl%9oc@{kf#CR!`13Q0ixG7fZ+qZ8*6MceE65B0#WDZ8D`2uTL(3c{LmN~x7OyZ^wF;@ zJlJkB(1;b8_MAH#nVXy2bnVgOT7Tw+1Ipv3a5ke=E_ji{*+|QoSIw~l5nPJxNY8+u z+x-TluG3@*yn?iH#=$rpRSWf@9iqbCtu?r@Mk0=KNkKeL1td-26a| zjA}tw#o{g<_DLL4u1RBz)|-!4fVsz}KNf~oPN#&}S=oJ1GC9T50XpFp@AvxjJa7_5 zonbc|3l})E_0J{+W zFCb}P4iFkcD?HCiJNcpSG@Sm!aj7{e7f&LNneQC{cMqUZ)j1a+sl;aI!e#`q9fA+*YI)Po0( zRximtYSz~sHrm6OWRIc)1e9C2jZI8Cyi{7djVlOC%3(>xuTib7?E0k1Ug)m=a}|xN zEHjry>|#82{7S?w#~$^p-5#iQS(oE4TTbE+DljkctV}+QBvUK<%9)jpiJwc;W@e1} zgrZRVB_xZL;?MhKnW?v~j&@-Q8j|)#Hvvee{^?_j+aFNVbyI^XU!D9gmXnPyK=gUE z7N0WTXBRKl%^T{YpO6iO;~bbtW&$fut4l>epJ|5PJJqMtSFZ}&#HKl0s(v+@`(%9} zGQ+~Xlhe45U;E+1v62_CYt&yUQb;!oUK`5 z`K9CX;z(4=dCLd?Dia`v!wIN(ne*d>{fRKka596VW5F)#&)*-J2~$nhtDkzmN=tvs z+PWb9^S5u`y|mH2TmJbHwj!mn2jlEmza->Ew<>qPDv5gRXLc)Qr!I zgySSCesJRLk8Ttn7G0c|18kC5!Q5Ue>f@n&E`>#GlO^kg(!N4NlkZDjbAHCNzR}W5}tAXA9uD`wsmL#s}Kw=)J}-`cErJ z-TCZ(*Mt1NZ!FR?a+6H3_9C?n3n|-X0}qtJM70ekew&+{hx4e!RMIZIA;q~J#L2xB zOp3h$qb&bWTk=!1$@4*bMgJPaj5jW>Jx6v2W?;$4GjDfDcjw^GvwV4^`1bp_Hut&0?)KXd|Fw83w0Z7Ts z{I|4d$=t@a|t7>%dkmhwA$bwzppVtuW2hZcZylfGw%p2`L-;-ovNCjJjgfs)%v+VcnS!5l}| zdlHykBGaD_CeeZflU`U$dRCSkL@aFZclYLqfmpLI)klqM7Uh)kFz=6Yu z@Bdg&pJB$~oS*0E_2QG{MMT@Z9-?D#S4D9JESrEH69 z-AC%qO}!SXNdC8yBe@i38gqv*|iQ zqsS2;O(ziyR>#Ij)6c-b0Qjr8LuNN=G_*FRC39@&MV)c5FWAXYj#`m@-u|uc<|!oR zE~I2VyMonHpueGC9)VM=nb!@@bJ?l{Lm1b>8mvbN`oz4A(uUdMmYiy)hW7E z^gzfP_9jhBOA8@eliqk!aanSa3F<)i>%$sBoARfAA;nU0m|d;%<@8Pj#Zd-QQe$Ph47a%7zOF7`9h1_ozYwW}gD;-%1Ixvv05GLK4l zOpJxMYSaM@7X~v1Bpk4GzK*rq`de#j;Q2XOMPVSnx<`2%1Vh`ANPq*4!y7q_bO zSIurIGBF$BM74fp!sd0d=)Vba;Aa`)oJL_|weDd~>msoTbr>A^bTuYsAzTPoR?Um3 zR)Y4UM;9ocw-cnMJq}a;%Sy*})3;zYLZO8o93Cd{D`;Pr#}6W$#me_x$19YqTkU@zdyga zn6FrqBl?K(Q`|3-jI0=3(A6Pig)F4RIuiWJ=~23~2j&+qAkqtauVTL(>iG71!hAL= zToA$L_~ir-TII;FII5K>D>3pQkcKXG8R}g4d+72Wq0~sZADcDAsR0;7lkRFvpY@_; z(Rpg82SPfKYdm{N&Y)4)=Cz*sd@X~u>&JnMSMa04PZMX_`)GgPqNJZ5 zIxr4g>JKdo$M98ek9BoB9&NmMSi`n_ZS`D;lZ^YxBDGHkYDxCn;J@Ujob%5|ecF~U znDSC2xs)qO@?pJZe!k>}Iu$|~D!JFE{988me<891FkzZ#CFtay%Nx%R{TZOu4uInX z#Gut#X7l9(rLw#mk9Gcrn zjy}WXAtn8ruV23s2sZcGHF%#G2gZN8Qt$iHyKumlz1I!kW&)n zgOW^Y7G4w@3rd*COVE%%Bq;=if_Mp+GqTjIWtnw*;QxDfD*ouDw%^JHt_j9yHcEp{NsUQX`yvWXL+1LL!= z4e2>KX{;gpC^0QP{R;Wbi1A{49jPcEij5FN4&yj+YSuzGKet?mSh5q6fx17wc9EBd zXE^N*R8uulASFpi&;vfX(4sdkyaIaeyuIzp@qd{kAQ->A*RwYe5{QuyL?Y!XT2(W? zlUYuZ6}-Wvm>MVz@pU(U9^isUhjdg+tGBBQJ$s>u@_>g~#ip?&ZjJVysi`Sm&+lI9 z)rUAe{Pq(~$B65-xhco3%uq|K*Cb7En72E7L0HErRhBy|+aPcy|JW>xi+amxZ@=4- zidw37;hlcjWad9)#NGe!8MA& zBgDkSscFyZ7xpg2NIa}ZH9wi@Rofw~ z2*hqOksJ25zH2N~Q&XK@{_*_8&^aajq3kIM{;6HyJ{>XW8RPMv6ubpWRfHYra zGxH8q2xFt8iNK)GN-{o??++~edF?{lAG945s>!?b^7!{drnzUdyUJVw*ww zCv@r$H8PY!)MTJ(XFIZtqkZJ8U%ON(S!I>YF0?uP%|u7=TICfxMa`IZ?;;=H?L}%} z=c-9*u_Edzny~RmthWUyi_aD*?gAN^j4@6vtMjvJQ@+a#&(emp*$t+)c|ULd`XlK-Jdk>>bUI5)T`3A*}T(_nd$89N^e zopaBkT)rq!92M)}Y|ajR3!5#!Dv@Wu4&4tgn9owY^T>={*?os1sdaQuAyIb*h}5c7 zz_8BB96#mk&Bpohl zefIg_W;x55h?$bBry8zi6x}hUy<~wv>@3}uxsDokj43!knkvV%<=dh;{=G=@;>wD7 zW%tr#YgL7df$Hc$uX2&%geL`WP$bBWQ)O!2@q#Gj>}=6t?gAIjIgeLxH=zllY+_x} zjR?&I$WTr(n|s20uG5zAVr8Llw&RLi@+SY!9ea-Z;y*i_Zh+y=!_9b>Gip6uDQKq| zkaA`Mu>7DhY&!8PU!s1w=biJY6T8<$x4Hbv!*|SG^4x2iArl#_!2bx8dZcI&1>XZBp~+Ue~0bHKVw_m@WKz`P4r?esp{pPa(x{m!~SgCS)H-EC~W8lV7pZ# z?G=WY=L^2)NnKAXiV)2U1Fb4jikF8Pe!clwx&bklfny=u@86}^h!V|Gv-jn4El06Y zYfpwoE!4@YZ>M9-M`Z@b&EV=WI#x;B1L=bD0-8FfPI5}+a78J)-TL9^t10OWp-Fnp zv#Ae1q6_sURsROJ%I2#&hhC^inmycfKf)SLk9hyNlwiE(M&vctmeJPX5!?Is0DN`# z?n&C_Zw`p;wnE+A;2tAT#2r;p|CJl8dKeP__{1TEDWW=#c`mXFd>qr~cJ=$?8yx6S zZDrEqraVxQ!5oAj?u8V4P7GGV~8Y{vB=$i#*suzJIzPq{q;{ zdi{Wp8drIh61bZEMY}ga3@A9xmJ9i~S3E8?bp4eB7t6?tt^<1b?Koyx_P?7{60w!~ z`A|#MmlGSB9It9;kOywj>4@9ii`Ov)ShV$BE;Yg=|N9~O+{w@3g37?N(RY)?Z`PYd z!VCGMo{+K$Ge4E$eEKcd(ze3_`z^#aW;6ZhK~&fed;(u9ldllQ7`Xk-;SP+NS73CDxoReMfuBTd z?KT}aPPoaC+l7Lu={I85CIaEL~Ki^vFw^mSP@ zh!kw|&P)Pn6?mha=U+#Cci*4KuHZH{% zrYqFON8AfSd)xb;ZI-4}AR_Msc@b$#k;nJSwp|^+ddbsw9bt*|11gZH+{iA>bzJXp z95WC{beZhZVTMos_1DvTm_#oeo)*RSY7=YpS~Xvxyw6p75j@0c$Tfa>SF#_ zv&{K3StPj~eWF=_(?PdM9okJH1C93b)z~X+a3N!1=iXPxh`+7>x8EWV_h!I6JmE2w zB7*DW?l%^w{!lI6@a%H0>;Y2S%SjP;=!q@-(+2o7iDEa8jRv5#uDhO0BwFf=IlQ6^l5edC$#td;ZRhZJzTZH^K(I7xSGz0aPLV*g*yD;4yoYnXbSR(j!U z{!GHBcfV#cost~#=8GazeA_sDccf>C=0AVyXL^#fq9v#)(-rk_x(^ZjJKi?s-lPrVnOH27%>bF;;0gO=N1B)O* z)2jLI6-7-b;D3P6hH3w_Y4AfzbZ|l5AmKa*uVYItA$uF%up?iKw)^tHHN+Rah<#4s z24^XE-$gPQH9;|i$Un)YVq>fszvDP7CTLHM(AlL*v9%oLv8yvJHJs&sM_&{@vN+b# zKB^$TOc!0Q{bGFMbiX8Jz%1SFWS}K3Fc1lL8c-uxVSQNl$TL`3>R*_6)CpA8a03Kl zcf3l$0Gtam9do{N>V81;idXbV(|ENT>B3gt-b*Ddf-K7JYI5j4?Y%{&=~|7+iChFi z^ELX$+A3o%}@`sVy}%faqdsXnxmjOkJKO}kUK|^*B89* zUY?!3avo$HF-1WJZ(EBYkTkws1?jjAs zj^tnCM+aYG$=PY!9jyx0{6wMpahUi}*n^ol|E3D`lz}hKTgq;vP#2il%37J_QYTP@ zRCKb}|7HTTpWu{AEx2@ZytMvQbh+Lk-k1%sXTvW|2Hg8zw)jbVTTXY!fTe~YE@X$1 zIHMM!M=5FhlOUj={9VqhAimEWN-XG%`FdW6B@nw!DwoFUD95>1$Zv|-aT-U)mt+yy#ZkWarL;KSf-+F0xWqB~pD!N^8R6Xw9+7nmA-*b6y zg&gEmirDp@nq!FYzb_#WKSEAWb$O~P1daJKz;1g|Byy)rybI_Wl~-gQpE|-LV=v&A zJ{D=x2)^kXI+P(M_2Zj)Gr3poUy%6!`1ZQT8TIotuYphHGA@OEmk_O1);VDH7u#%2 z%l3>edpDx`_87WL?t7DPpMG{%{q4(RcF)c8h_(RzR!A>W1qC=&!c3gdoI^1 z)^I4X<({=I7<--YgE;?BlNMJhaO*auoy>UR?y&3&Kwy8Itk>rmE`FVqC zKC3!M)Uq+}DdJ=KI>wO3%JgG_)G@&t>iyjZ5Y@XU+(w)3fw%H#(}JTar<&yo<5oizuLW#n^DnaPesp^Kocx2S)hrP zn#CuC|IGQ(=XmUk#V1+5c5P8YRhiaPSt}a>fHC$2>N+#SrWn{_96R+&U}W`mPE)0vxycQIm)r8ck-g849e!LGL1Rbkxu z0K5;A;4ex)X<9G1p1IPp+Nvtp} zGtaiXa=r(Fu!CwVJeg5{WpS5K?uD7vPy^ww-`p)em{#D|#kOBwv$5=9t<#%rkO{S#2={AcYA5C0ZMvF|7@79wI94CQ`aGNGw zg)H#7CxyS$I$?3X9-7N?Qm0Dyvr9_V(5-zK`0vjH6wF_?1x~*n;_`i^`F6aFzbCOv zntAt4n;p$jv~x*dU`JE??6af1R|^p$BHg=*ndhFI-0kq^cT7z)&NnCvoG>vp<%5)t zxg>UL1?0Cv-G4>k>?nb5P*li2#z+=aB=m336QC`a>GmJjVA4BUKIg!psw0QKPN3_A$fDzFJ2`Z!{#LBax9gav=$h_S zB0U8O6Bs6GvY8sWQ^0TE*svaoIEj|EqLRO2*5-b`@ad*k>HM)1`i!7UYu+7IXTScX z1=PVS$|K}}qeGNGL*P*axg?xU)!p?O>irS>>e*3{F$BKO1Lh=K&xQ&y>yx@Dh1GdB z?NMku66v(1)Z?)_kyPO)tR3e*^qypUV@c3I&fQjKaMkW~Kpg3*T1B_Mg7fkgn^sj` znXha}nEU;A)>vK+jdb=g?*>4BpKCPWy@H~E%v}dW(RJPDXy~Uu(cLQCMQ2Q&KuKF( zw7n(WPC|VC*e6lmINl{#t{nS>2W7j>m=TA7BT%613`o=COa% zHb}Jj(BL&8_so*oO^S0<-6GDByX zD(Tlk80^5cdu~8g+%xBAV*TUI?|kn5E-4_^4(?te&R5&ldyhWV=zYAp`i~%rd`tEB zNibNp6 zfNvl-HjHPx?`i@Wd73>sSObRQTpb6ZE?n*;5BLDz)-HR!1N4UCm6FE-CpruzVz)pX zzq@|W(L)dbyTiN+LDg1Nt}T6-7s|~3=D&7GWQfnbbikm(%30($v*V2Tg<|HJ8kS)- z{vbW{;N``qKXDSir}^XDS8jmKKvxIktiX)sfRouUko}#`UPF2UWN};8(Ku7jzGia; z-*A9?+k0O=X#a({^RKFhWfBRyuDTwIHN$PSGq@PDT$j||HxPPQ_>*Wfjg8G`JUd8# zQ0ugM=SPLwty8Q~iFPX!WljN1oTb_E)H816_ots+e}we>iDZKtHth=#+Xt7+t49)r zl2fv)ns3W>BmkM~b0;FH10GvGH~+(Jw`%NXZ+nPGKM8@mUOdERHQHD5acSwwOh0}r zN)xM7h4!@-d%C^3_Ns!AZzLjZKlo81{8ErXhCc0 z6`h0)1b&8poTH>qU%dPPh1%~?0~?iTv=Nc?^rS`m84!tf*P$;5U>zze`XO;Suio(! z>?WUc^t3Fd-vr!DKs|6;$Nip#@9DZ62>Nxx1e?%sY9jxUCw_{fm% z*KzVEp^!|rjV8|kyBe|y{lAPad?9{P`5F*dww&T9*?O)j4%soGF0N`73>%NEKT{m@ zchTI;_u+~)2}99#OufDgPn_oQMS4FnwIF%m0#*o1oRi4!ZxYef99HaB5Al3aj5GZ+ zWw*y)=9XE(a59rsI_@j)CGR7D6X&J-y}P@+t4mEwtKxNNXlPrEpr9a%iIp`x)b~Ey z;aJA>%F5{Gs@&Y~VlM0N`rI9k=liEqf8hk)x9R9DTwQdngeAuYQDw#D5P1$2+uxN= zD{<+Q2h*+vvU+5jnSq-*p5>*}M~_`B$`_!K_U3!ABp-WLv_aSOO~sy?{$*{%A#?jf z^e=}*7Xtdix9ny7f8aD#R6@4CseIw!oAanVlFJ_nm*kKSxs5)|Ll@cH-2Cm^owe7v z8^E7)boAd@O=n$SE`tjBP`zenM@Ot}@xitgrAlcbVVA1$uVGrl((3AOr;kHBW5-S3 zfgQ@5Rsc>@#}4xh3l-P+ZridmrgRzYaid}n!e(heMISF8Q0KDLCTo!6SME*m*|FF) z5|+WwIgY&n$;sz7967taD6A}+V+6$m3??~~oF1f*1M z5E0%;^_9^W>#qGdE#Z5s)e$6C=i3P=BPLe*YYnSHauNtRs!3V%A}F~bKW{Ls1X!mb zgHj?*(Ty+^#uYK$&uwm1&>QWb|BVk-7lB|$aUFG&(&;HQUGOuxqpq$lC)Y$X+SPJ5 z(qz^01yF!3{g0&syV)QhJ8s-R*xV=Nz2o8$9_T!1;UT1_&bmhc(Upg!fgKfVt#68J zYh$O(jhp!#eR3BlSA21pCt~vjg@$%{j2w;|r8usozsV*xV+2DKf6N=l!owxO>M6rH z5LMZ_w(3KImbCi(YHO8(xy{j+%rmnE!u!qX%SDQAtzS|uhf@ROM&}{L7Q5U4hODB= zWCU-HHm}#OfvN+-0T`~(gJgG4;Vwzl`1ch|9~bhk%?o(*(<^j$KXm28fYJu<5s*LO zZ+Vbjzw^z{XzoPom8(#?cBfMh%#|muf2Z>eq&?ABM07bC#OhNUC9b+g4Y^H=w_P>+ z8EC+ed;XZn>y&5*Vo=ok#pd_V59dfEo(@#khj8{fRE(Tpu%gL5e-}_7Nl0}?902s{ zwe6Q1i4Vl=M-|byt4Q;}=2ivTN-v=}%UJPf5PSuU0!0&r^U3kI>XrcvAlMR2zG9_CIwb}x*5xO zZYZNbua-t#lyNse(`n!VU#0#^r-i!=7^oJIaWm5M=!<0%jj)Iz&k=ks;9;)0@RhZ# z(6Wxp&QxM)hp5ot1|!scDN92`WB9~{@mAxm{ZUemj-n8tw7)f)&P&c1KB4$@DJs}p z+ILI7Awyd7@WIxZnVU9I{xoO&Oa-vSB|s<$d@Df2J{-=kdjGz`&O_zG-rq!**TPw#?q7%Aa4~R;Su5){#t~5t}1DUaWQ5E>9}^Zx%tYq z)23#xm8Wss2i;y7o2Yn-j!La<9carq4ZfqpI+TDPEC2&b>JWDyFp+7~z+hgxGJ-I` zakj^71zgzG1#d9u*KZW>ZM2*aOlOCxZ{R9o9Q0%4+<^4du1fgrxFN6)uNpQ&%KU(U z8QOV!m!;)X{Y>6m_RXc&c}6#&E=m}CIAVL{5Ryz0jy?tbDSr!99R}gD2XjHB_ola9 zd$CntN8}v1Ao$*#_aer>3z5e$)^8I|5CKR!BJ`OpNVd-n)Gc|mKN7;jaQQCI<+-_+ zhEl#1pxL#VYY3df5#^RgL>(}>Nz__Q>W3&ie{+?rb6dF_@VH((YiHIjC<@up8C<~% zsPd$#OZMrX-Ib*b@WuQtC~R))*nvCwN&}(|E@KOrsH9ZpnZbtGGB{=hFH}LpT_s0#3#T|I-wC75GUHvL1zk=_+8dV5u4%85u%k z$bESRi{FhNK$eH*U;YeqTv6NKC9hbn&>U0r?#lo z4YN4+7Zk;w_x7wC?aMtAOGm)%v}&g{OIpI5eO>OhDbWCuti+SWaXQDgYNL1Z8>aca zu=G@9(H(&kn+tGa*yMvM4F=E_<-)peWow%jbxgA8Ff*#qXq3FMK=T&8&kA4SOq20! zUxn~bHxVUmgai+#~r@JzK@NKuN<@!tE#j!?lo$@oYY?Z zczj&V>ZSIor}k>Aw}g!LRUsi88=KJ)ZT8ZO!=;+obU$32#~tn@7>-4nw{mb36%F$3 z6%Z}&Qh~O&>4S~iHrW_Up4yD|OFoeUe$o1lQy@dzy0aPkRFx1|eRlFarG(1O2=1!{ zpS(cN+dlRJi9H|H930r;e%YxL8Yh~>UVtFsJdr4AdjU>aJlpV|4b|0gfG-^kVfhnx z4PhS=4PmS5!4TH`640*T-ENQG{U^~%ib%9_d7AVOT9xEDaWv2(!0c7S<%HT47|P~n zz}2j1{z1nYuK+rn{12Ib7|v?#GPE4efZyyo3(>B(AM;^>mKJDRKD)-3Cw2)!xj99x z>z)*>tG^9OJSpgT>dHRy>9Yf$BCHk$g66h1`|?Hr1%2YAJtZ)sz&loBOcTAcy`^zW z$6+zDKOOg=h+-?)e5di+{e5Du^R^h`mkUa5D4l*CiwUfNqc;7SMN#5KE}jyLZ(Hj8 zzdH?(vc@Kr^dbo?y&es-bH0 z?(7yXa5s29&R2Sln;4K%E{4T6WG1&i^?Ov8QPk%!Td|V5Y`Ogao0Fqt6ONQc6)~7j z*#!eg9a51#L;hA4%Z3@!juYGBMOjJKWj=i8G#5|+!21JD=SbX&C8hdKLH&&OE(^_X zn}Ooy1Dod?!C|1n>v~J__j}He%pM0N#1`!(aMf+v-h15wJ6Ex4E|+ayC_YvRsd`Q& zDMklWF`)@s+FhR23yS^VYE0yt(G&%)Z;{6#+xn2Or9-3BN=NU2h-vL`B5{%#J6Wf- zeoM`4{qC|o=Ayd6j?*;I*Z=%n;(M*+w#5gLUZlD6Vq|AuWAv||+w|+zOLsf7{^$yRK2YR2B$%D$Lnu6$e|k zr(2%01pN+CNs%Af%nIBJddSnkp3gUyyoK^5nYD(a%m1-bG{f#eVO{TW*8 zH$7rB>(;a6ns+FU!1MF{)*M%xy*EP2L97?Vn1zKN`qzB;FZzjhx3=G37*UI&IQN1g z^4)*7_hm=15pAGdJe8jndv?r9f`#rP1I7M*IOjoP|MahZy^1xnalynPot!FCtJXWS zwi}Hk5|-&Ai@yD++FmXyAK^2W^Ta*aDko@;>#=S`q+2!crE(HSUgOh`zW!;rmNkal zfJViywY4)vftuLER0n3nhfcP2&cQ*CXL|=PZAssRQ%siRP#Zb@NEWF179YO4^jd(` z{Kn=E@9;IfLg7_u7`B{jy$(|u#+>?ml68l9Zdvngv1x+g(rl?TP%dl=MvA?ti-Ve{+*1&C-He2yhm@zGg(Fbb?>B zudxBr`ZDSSGjpi%`gQ=b=xCdxydP|x9BG&E?`_Wd@u^Iwiu0z%-vTOR+uMc<--!Yx z{5FB(Y%&kQXqGEgTT|6w>H9?#8iQGsym>v(WyiVOaozst{lu%=K>^QYaLqmhKB2a| zBBkm2*Jpz&8@l~{*;qHaYW08IV^#_RtK@`mmKF(Rbb+`dq3#l8ktXUd58O=|?rtrS zbw6l|dgT9ti1~k!NNHkk$OiIUqO1ws5(Ru!W2bI175e|FOZVRAf77x5wV(7-?%D*# zzx|RXBPjSpx|h=lu~>1g|N3wL)2IKh`$!iC>jF)v{_Uar`fpQCAEJk|%m1R+#a_$M z#wsqN)clpOiX=LuANpNT*a2@rl&f`TUti`0!R|uS$btmiDV|$C9~9$Wv2G{CH8s__ zwbE!tloxh^qGwrpN`^s=1rx%DkDERnC9ELboc^HxTccz;K+6Gd&T?ds_BJ~-WuT=? zBl6!g$6OH-`kwSuO#GKkH*>$}%xBZ3n>USrzfSLJ>2@yk*V@7C;EsU?5a)XI-;TqK zcCAH}QZ9Hb$bhNjz`I z_Q79NKO)`$a(NS%9k31;F#PcppbA`1-Cagb8`JNrPP&~qq>)id0dGlZsRpBGZ0q23Z+HG8B%B&NL|dfRCv_LTclT=)=Hl~gvqUPi zYiTTgT}%8lY=RR`;;P<>L!ECJYDS^U-*g?fA;xt)MBeGX#LIa z^lD+;6d(Kjp<1O|UMYvngikWN2&&xRco-XDy6IAyk7Aph-Z|2F>D5n%NeDvq9S#4b z#p;rpB+p-a>jgBp#pA9E3k&dwvoXCjI)Tp8u}w|vkxL2v7QA2vH?aEr)@+jT7=hL_ zLiRz=(RI8a9KVVfH@l=D;m)nu-hXzgM?tr;8%fTm@ccdhmE=MZCRBa0ISOy0sT ziYe<+^U2WqnMilVxHrJaG?b_F{~cC9q6Ta|Ab`Q!G6a=Z_V!9}tII<;c}ahWZi9b) zH(N%Suq}f+p)kz(CMJecV8(Jy=Q^DD`u1#B8~;&U%*N=niofG!T2pOfvs>P z@u$3&e$;PX#ksE|yT7(`s`2K#*Lr^zmk6CDT;G1>D~5`t{6!Eu&08DLb1d{1Iz1<_ z-soy6;4?qIaWK|hJ6K)rd1YI4ojKrl(CurFHU+m^{Di`plcceVmmpa$*p$KmB))4F zf><|VFI9$WKhVoErlYqBefCh^jeTkQMe(}m>8%|VhE>Du9~w~N(cB}pGTz#Hl^nB> zs#)bmev)e`()I1hu8oAIOk<6dk`nbou*xj$=Fi$1e_*{~PKV3n-AI{ONjh+`4g-FH ziOsLK6X}P$#KjG1%WUX05+{wxL>_mNDHz@L>k#Sw9iqm+PwPTATY*n$;^*zM_lzPh z@G@svmx43P@hQt6H6$mm^J3%mG^9IfWzd@T)M1=s*K8IXyeX1%m&gauE!X0q+P)7K z1N~(arnJ+v^qVnWJuK4?zHZWJtUVtH(9VN%>m%JKIeoPYe^wn9h--m0n zzj3^nBaK5o@nh=E_-7GxK&EEw48@S8AFx=ZEP8z7_HEUIHv`aM8+T%OaXatc?eQAB zq!y$_mg^lpB&%(7Cw+#Ij;e@{|2R*&U?^L@{NqHYl6<4H+70WbNbyl+IVGoG8eOsL zQxgiN4?b@$dpL&#DX*IgnpFkrmnk?5rH&W-yuBI-pefQZ(3pbxM%0-c=e&-jgc}QZ zkUCFoNP1_)URrLAW0qe;7*P@RduZuemcHgCBc_s!oBP~c?N9m29vsZ5gT+1b>1&3b zh-q5EV6ur#ZK-t{D06H>-E+l^6O*!d?AGDhdmlvu1$L4(*Tmj4);b-9b?W>P%8Uak zBc7BRoO`5TFBy_6ZWAYf`RV#SZyq`!JYbtV;__luE5+7vEr!Idox8X7XWp39=Czv{ z+#M|}`PZ3#)i;A!zyFT0yt>hzAz9nhSRE}L(P4gVDW$yddGe!;P^m8m4Ef6ohN7|Y z%RksH<%-C+vC{3gzhUlTrK-@~q=?{85|-#r4Y z9gjkRc)uv6bou_0?{7- zd9S$qN{?CRE|8S9{-UK;pDVZ_O}4mbb#7)whZcWCJl=heX)ENresN}N%6mPqvc$jJ zbEfx!8DXf$`&ySbKbR4QN~@JMM6UVV#!)DZ4SAdB2%jvnY_^G!;CGL)PT%!PRQjQ) zDeHL}M|rm~q{GY0RXDiX;;9r1n0tMEce0g+SyouBY{-eAu32L3a$Fi4c>mrnoL@%8 zk)k{ejv0BF?F6HX!rz2prr+B%8Mj-`J;falQAIw`2Y}({{A)iZ663Jb#J^Lh+5sG} zR$}R%DM4|q`XF(%%NhH5Q7qk!1}BmFv7HAqj2e>bDSK``S;4z{2MJ*+)u$W7m3lv1 zRC9fKDHwQ)qdHH~--ZhIK(&#?@G4*3wa+!Ldx}WoK7RenhMbag;rp%&FY?(51TOeV z1hPpw1UTRbp)9^rAmxz$by2xb-?>z--mdtsm-QkhrX$s;|6#3>OR1h@W%u3Apm!YC zohWMV9qD*UQqb{GeWO>Jej=P&CX51@h99x4o7yuBq5T zGP6cA+nH|B)c%L!v$tlxZI^RcpI@GE+#sLnG(WfPVsLu2zIZtsoa-#TS&r9C2RE2w zV{tj!4Av9lud(5yU1Xc5M(a*7UW0PM;e%>rwm`yD`7IKoPfE%2tuJqwFe8P{RsxnaE}H9F;jVA7YeI8d zcGy|07cdoKubA{Ix<%}0U1{4n(-g3+-p22VA1C>>Sha`YSL1U2=`xx(CDq8k{7EA+ zzxvyzU^U(`lc%qJC-u0f1jw)73uR!ccYZ5=}~JXPq+1ueFh+3xWT@=(Ftl#vU`m%`IOS{O*uZeCR9 z4+^!po^SsCHpAHML20tm0V`93&hl?y`32Y%s6*hCPj7|;!wL6qLd>a$DDDAi29G>f zSxObCAczZ2a89Q**{`ml+1J~Q(*#zXtGgpus-ALJWZ41c6ZN_7Au-(-Bnw291Fj@$ zlO}#{B0FF#xFKTPV*H^eP&Q9GrgfI{nTdqy@{})ulAceQ*Kvtx zOsV|EgZ^L?pQ%}9RxZh+Z8?Np1x8bz_m9h2yb`Bdw`rv?`@wL)gH8a=DHPg`cFV)k*8besxWr36e)5&QNpJ_b!mm z46n{?YN~1mdaNMSAZPu>+31rqthEnzl36WRpIuxwqJsDE)xX>T&LAL61YU=idlMX5 zE?|f)m)d2hfeTt;tJ6o5i8&f9B*n2Jt!PEms?AO2-A$2fJZ<;gXCO{e2q0((5?!>~Y_jL&sS#&SBU6n&iZ$k0XT& z=cPxcg8v`Xv}Vd={)f*bd44XM98yelcWdwJ%e{%yw=xg{Ty-Q51!N)LG8CH=z~|n& zWHWc4pn5MUzbd7VLI>eT3>%pkB;UgCn0b=4P+C2KzD81J|sxC_~`hc-^+HX zi?zowa|agnvi)DxU`SRlc^EIO3ugAMFgDc681la49SK1%Kp^VJbPiM%j8*{mK6Kv?{FtaW@$L@pooV#V z=u>^%Vg3YTG|9u@*O0$nLfI}*K=?Rv=y<)H-&G(-pqO6sc_8i zt7_=DNvO(m{A;h5l#WkWYsJlv1oByMQ917ANJU$i z#>A&f-g}RcqC>|@BJeoXX!qNQVWulp4F0pusWG4=S+N6H(r%fyi%ve@fWSPq^VGJI z^RO{9HTA>9wS>O-q(0{~_NAVjJfq0w&gYN@!fT_>AyP?X6TV=wv{SrAP&PHNC}r{U z5mo%!gI^?4y&HvhSmfiE$W4^>qAw{|#8yYR-Ew+vL(X3K#h;-xp^g5?ixYj#q8~{& z3)$sXoRD&sb5RvE!vvhA)F=jOf2G`>%G;h)rCjrUTN*x$$Vsvb2^PJb7}%dKVN|v) z%Q&K(`SBV!zYjk?(2ME2e7nfEqCMjZJNC75TSvK?@maZ<0p5E6X8ZeCjv6Z7bxt2l zudIxkh`d|xrR&b6p?pB+T%4n8pu6cXhHI8ph+Fx6`@ZXn9gVrr1HJU$4vQ?QPbco~ ziP)zyNN$Ak9SGf#+r=~q`s4-u!0?X(U`ZxS4;UE{%7LX%>_h{gEk^!7nJRa4fH{QW z?TCbNTov{90hzMVa4MaF4>qt!Dfnoo;CLw?-L&39k}az3>@>=9y#P+zJYWNbvImGe z;h=`RaWZ?T5{ILEhFzBET3y57dWa(7>|AB5A1M7+)7bTo$ufUwZ=nxsB5vDfTI?oG zQ>Va@(QWVCqPR~Dj5`}$E3~MXcw;wb=2x68?;7i_Z40I67i1s20M7wf7IOEB+XHS` zvtB{e54=CGGqXx2eW_O(ox%Jk@T^5u7KMHUYo$AiHmJKsf8&Sa60bVuFI+(VwpfZ# zQjqjP9#{jX>PH{TbHI`U4p3rZ;_Gxsx3YA~wag4@odjwNDK9(m z+x_d;c7jrFq&ay)_JSrT%)imVkOo0LYK8TV5xmu+I_1Y;7m;klhYfQZO%4puw3%)` z6?_r86@IF-^!FRV`mp z!Gyx9Y+UPWd@~EH>!*S|2QMOWdKFu0dK_CF;i&#=koNxz0^#dLPp){BVB6dg%O>KVGAmp|7T+rd%M{$=mw z6W6yRi?5Tjn**cm4=?-YQiJFSFT)dTR|s71p$$o_EcV@}Mb3MCBRoO~k&(MnViQ0A zp|hf(;0>f~+1T*SOBo7S#PtsjmTib*6&mlde89jU@ti1kyjgtNRU*9P?ubvV>$YIF zgZ4wRac%UYI098koZu+u-EQ8TF6;tqIX+o%T)j$bb1+nPLctEsWq6otBhi)z1Y zh2=%&4(s%Mg7qD83ky>3%PPRKWwPhw^qkkjaD$bVZO(A}dU?U_KJME5E!&Y)DRLMu zX`D`(8@VeCk$Sjf<*_`1Rs>n?CFQC)vQxDqIxz~(wPv_@(HHODu&A^L&=kQ0oOZIroIyhf%BS&M~~ivayOmuEX8; ztmjXK_)Iyj;c3(R-mTPH%4X(mqp`A#eMbS@#MNlDc#KVXk>h<9G#CcCV*qyr}Eb#4NuX)dKrxj@;z%gq~K!OdB%lTbIsPyuGhK1 zeqXhuWLC#T1oES$9k%yUM)rPQ5p4v%VjaignmvL~5;qbqn_|q}ZZ*b0IaI~ZzOf>` zl&vKOH5H^I)`kx5y0Evsgm_XIJ1l;{HA3}IZFY<{q$=fSqy1A5h{#rejU}wM17tsp zKd!ORV7wnt5ie6aaiOvYh2 z{2(cEghquePla%y+=^i6_;OzXkTsN$e;Yy8D=I)*;6?FE3CsMmX21b0Yz4*NZL0$>h{y8RqcF^YXZol`hC6zoG3BPO50eW3dZTWd= z#Bu!%jY})-gF_1}b0<^@>lbNw$nvPsRpPsGYGG!Gf7|*k#+G9vgJhLu?Ps__@`>@6 zz5Rgk-$&otVn%>smdky@nAg`M`)n<& zb23)_pZJuUR%IF<`AgrC8?gN2FbBy&=J!)qZg3SfWXGoM#XtA2 zmDu)|>vNl7b1{@lqL(&%sU!%l2Gv^NNdY@BxS2jyo<>Dqslb)uqPyfq1ND(cti#}f zKxE zm^oFG!2|P>LiS52?2i9yGG*0y3jjU$4*8!;{WxS5wfi~&w?w@`Fc^alny(yd z*oxV6)Ld4eBJK#T7JSlLdMg|I>ZAHpXKZYZQ7{WuB=k+Ub9FVGgK16exbMp#Lnm<2 zL1`M|&OiGG**s*PWEi^senO4Bij{f$g%<|<0j6<)F3~dQRo;E5wTX8fscG*W&*Rb% zay{g3mbDzofXhn}$F7`?teky(Vp5SGO;cU0)w5>s}uz5<25HTiw^O zZ0dA!k}Iw~DG+<#DP1~4$x$oBfo(>wV1Q&$?j`MSNUj`hNKP5~(k;95%)ZG5S=oO( zfuErmqFqDw3|2OK&Io`Dj3u2ks>S~%q3Vhv8@=_dJN zPfUIixh?9^C2||^^l!!O2!z|k8r@|Kv=TMg&hsTNi<1prb?>Iy zTC4rIVzO2N?5Lz46K|w-U%i9^*UYyYbBkY=*P=S#P82fOaZp=lMBFt>GhSI`pVT$K zBljHJOrNBQcTE*P8OG(|uRn(dRqt@v)j(^)8L2X@FtA}Tn>Y8rNd$FRtyuj=-XUk& z+%*zTMccV0VlO61Dv6-p8E=i-qX#{O;vvZoDk$VUxadD;RnR|~n|S##DfD8X7NXiN z6MBBirgHFIEL3cb(YIKkDS$P%{pzbDw@5l;KhML*)m6QQlGv`vA9g93t-@iQZ;+G2 zR8>ZwXXl*k8#Gmxh{f8ZcJXGps!lE|4?3-9W$n<%$NI4bNkdqK>uP>tN+DezA8Jly zT=z|V6^z(-s^CGIy<|j}Zt%MkD#;sBp(V~(Lq4Kbx`vO?KFPkCreI-v;SCu0VK4g+ zM7rh>#=a}HHZFGO$XY*bgqu!{iU$fd-jv^_&Zw~`1r86y%td)zvjA8iEYE~lAFB$x zxmpbKZJ8x7*V^c_X?I*-SOT|Z?q!e>7Wc;oK|pD~@#D)p;cf)XX-7T!9irHL4-!NU zZGh|;+F0R30fpYL+c~&nes_y2Yxc&VOI`qRKBU5t_IU2-p6=S;$7Cp;f^zCi=Q^Pk zr%c*=P}vBPzdNBui>N(($O|=JvLE}s!`@pMam6f}IiptBsJHD(N)cU)5W%JWw-A{{ z3_!X!=!fP8%v+wbzRjnB8RXD?MG&v4oqT)G;u^HPQJwhhub5iHxRWOFEBYNU3g^s^ zEPpNy)L!R(E~D0B;@)oV#E721dm5cRzWVbn`|swBnb%l_Lj!GET5H%dMusV0MfK`R zC*&>pjv@!S*Xbjj+ivufht;2XBj{}_a!29bUQt~$0 zWwO!1Zj2Fzrk`W2QsC-7lQzuaYNn;!(d2BD8p@D<$v42VxQ@#2&bGLKvFG+1h!xfwCb@PL; zLse*c;kj6Z7Ru7re|cmK0r8J`A7X2B9W&0i40bvJY?gO%wP&|TUIR=0t40DcYCQRQH(nVPcUb=x9!;wu8wALcZC$p1m`vgGyUTDYogLcHsWK4^L#FA zZ6aDI9sAr`S?V*qst7o!K?kCk)NzUYs=qD$N`eT87wXBfqp@?WV*>PVz9P3#hXyrr zXtp86{V`j^{JzDso}Zh3L1o)YVcM@^LoyXPG_%WwJm_yZq;P9qY>7V^sI((zJm~hs zfKFA~n6h%T(3%u_qWXi=A@P=10zXd$s*xkmX1YOlW35!kNhDN1vBM~$y)q;Aurd)9 z-5|?Fo9ZD+=nfcSN*6a(4agdkbuyKU_JPwvmwo{cOt^Sz1X@t;N>e03;<vIT0qoSMJ8FTqvH;_9O*4y#S|8Q{tF!VuNf)rInu74@U~TGokfj-|!- zujh*?PBLb(-_se>zrw=1W|k6=zB7kwGNbLDiDQ;7i@oGrOd2p_`Dmr!QcgS*=1N@d zJ9#N?)o6IIfmGh5fuNeaeU&Wd!?f2Trv6r0$F^>*ii%`}uQq{Hsx0@Ps-AJJA=@mJsE!rKr7y9=` zC=UGr4Xl}PaST0sBE*^9Q2snn)5-!0 zl%O^gO5(sY&Ya|#&Rwu9#xqMSP0S;~z2=-A|F@XiOu&eKO}9sOYdgEiYSd>S?A;@d zOA2Z10=`DpJZqAVCUqfce58AW%ecKL1#%AkpeGw2z1m*fae%5Y9>Qk=*;t%_`~tfX zhDjrWvY-_Z2L}gme$CbueFX|SH8nNY)A@lbD{ps`1n$_`C?fM6C3eqqhCKK_59`M` z#P--OVdm@&>O-$$d_T8oyKs#6@(+J=|MpOXMdpt?w%MM->SC{vZ9p#$7rh;#H5t8Y z6rWSiD6h<8bL|-cv^hg~d{+j}BV;>0h%_(>Ay>dd9f;n5Xh84exK;`>)J+aNc+A9{ zgIo`~p)Hh}LDD&xO8Y*TN5mJm9p6P(QQP+|+hgK~XMWrVi~HWtCa4hzt%cQWfrS|Q zG{EsSmN1cf^Sz_1MeIEWD6se$Bx6Ja!qF6x$=(4bKnXTDf;SiHS&)4%&Ke9ds;fE6 z)PeQ;rCqQ;BoF?Jek~q5wJzHb-JojohRR0)vJ8^4GpP634eG; z%x-js57$Gq==FMhDHKt$|A;`g3G|TzZsQx0(Q>p9(}Z3)PXZ^!ezNeCNu%NzV2-O9bWT;< zB8}fy$e4DcBwT6y0joDxDy-cGNkBHxk^@Wy6=$;r-jS~-Pr}KdNtzPaj5ajQ=gry% z6reZ`mIx};NJFW+Wu2K_klYo!Z|%(8NpC@ij?#EmBmvzGOdegze}<*(Pxv-&@EuAK z*Pfv6K`OYKO+-20JdEK6DSV1MqRvJ50NG|M+A(0V7ukvv15&!ca{Bxd`!3Qh2P!DL z0ydk=BVS8WXVJivLNdEO0(l{B^FyuNcA8L(HZ%|}oX0p#ds@2p+u6dz{e5uU^?&V} zmb$)oC?>>5Vc7<^CN!Y>>D9_Ya_40}RF4v|^>_D@IQqHl2< zY$wv~9<+I7;O0Sb4f(Ey_ys=Hou}lEQ&(~r&Xy!Lo?(4Mk~aqZnSb#T48G?LN2cc$ zk{*25>}Lt~y#@%x6=$3c%QC*RIaTX%#!Id;&yg3CV% z5RQ?byx>-VUW1Xl51e<<_aL((vQe@nQw?1M-)Y*w?YH|i&#s6j^dLxuTzLNJ7&)eR zBRDrf?|?LO(a8|HKo!=94!l{je!>e31&x9Zh6ERQJVnfz2a>8;y`;cKMezz^h#Z6n z-Na9}!}~bE`)M;!yHcaIa&M-rhdTV?U}VsHNq;-7j`s_cOoXc2+h;E?wX2Ycn_rMU zATXx>#cV3tSpp{Qn~MacVYltC1IUi+8ex^t&SYe?%lU_HDsrTypJcD$L9d|JA^W^p zY=Aur>(P#I{f#UN88pswHS<>^a*6L-^cA5P8@i2@JE{*{f^h;$Wb)-YdGWmDu#5;j zB%ZLw(L)D83IdJbJeWRhIA1z%@+24aMU#OnmjAgaN_{2qnHrbm)1B$Y24;edH!9GS z;2m^DZL{<6tm#;*Od8_W&r9Wn%wy%Rgh9O{-8W0@YZ?V3V{?)FK)?ZCU;HhQU-V2o z*E*me~z}#Zsj;K{2XWbXe6Ys=DQaK_aaG$FUla=emt;e6MNoD)_Os!A2k`o5)rJ^Lz zf&uA-(91~YAi7=@&Y{72_uYETUH%~ObrbSZh<(mhnfb4ME{%Zf(O*~rCf=ULW@++58ZGs@yY?jyd6CqC#!#{&GCQ;_E5 zCV{Pmj2J&G0)OZh9lRHMICG4vG@|3dy{ND7pG)HrOGd|ig1_hei38ij5FUR(hqBlX zD~-^(sg+t2%;={Yts!B-o4|s~_fJ&wy|x8V%puv(Zu?7+(HTh*8Uqt5>KTu-`(=_Q zTeAIAZo`0yYLN-C{W+e`RA*nrZBEbH54t$39i>DX6S>}?2p}E|Z{D*g#(7?fBZU5E z2wg~|`UTtKkUFn-{A(3rGoMlh`a%?6Rny_i60$gEMikUUIPAPsAF}%RfY|Fl4QrIBds+#4+)giimI6jDOQ_uNLU zYjnh4t(HjI^h81Fq!t+Ih@6y&7f`sqefxH0w5+V8q(#9_4SE(u`gtZA2fa3Bi5Fju zJj&|37%t8U7Qx^oy-cf<_r~_SGJ?=ZwZ!PWxXLwyCA`*_sgr5}Lf)5~`#4_uCV`uz zsEG78CDHQACHuCQ8=z01>16GAn_M6R6_j%$JPuj67Ko;OT&P9=wm*8^^=?S6j5xF^ zjNRhJkSqJa6-396X|db6LryMKEccNzP$0RF2dy@poSkdZaQ}HmWsropexfeSdM;pD zhPvmBcR6E}*~~HT&6ipn>Sxtj2H&r+S~Si8-1a^=%5f775rxkTSzA}-nnX}jfTRMF z0HMY>b;Bc>PlB*Bv_+4KgXgS~*h0AY*n#CsSh`z<7yq56rp87p4o=SJ&*^&_Hk27? zdJYbNbX3IV&GC0HOdKfc213AvKl>?Sk??i*jXwh3OaVgoZh7Ct$~qG(J}%FcOZ@R8 zlM#NK#2Fm>fqh|KcO&;McxTliD1ItLF_T;fagoi-j9v2oMP zk%JG@!n@bYbhdGk4<`k4_@2GXP`S8P3CnJh zh4R*P3TDIAqo%sU0N?S!uV;4i2OkGj10S~XH_fHe=$TexJ5%H+-+9GMB~uxc^R2Rb zbudk|mVaun)@5HT5KCpc&x-*bUwEg`(Yw!!s?tUy-Lcd~PTtzI%6|X644|E~e;=g8UViCqEk67S`2+jpKK3N92TU-l(rAQ*%81 z?EbJhaDHv<*G*OAJcXEhzI{86b%Srt=ca~(6r#|)Z{D0++uoW!cy7O=LWPu(H}YBb zhw!Y}D%DaPjk`)q!th385?BcJ924)q?>@p>;dvY5^in1FAV-g>A)IJKLHut&OUAuu zYBwmRRqN=2nWKY6!`Q!84n~m$Y8Wk)D8(S9I*d6ygeelnq-XMhX?yWwfP*9B7FX5B z>Ouj3U**LH}S|rP$|=>Ww!Cyl7TyTFM93RC6u)k0XQQxU@U4V2;aP zrpsPbqIU{=ysZ3~iqC%Ok`1~21g+$|gM<3-htJQ7{u-y(I02J2a72QxT+YNClJmgv zvZ@$Qy?5X`?hS___{`pKZgNB`ruMd~$XKb^cUUkJ(> z){s^xG3wvs4(gDh$EX;n=X1+P$JUg{Go3P%C3k|1SiaN=8GR_fCv*NA z?CV%&2c0tIl*>2YcMbRc~COvBcJ0e|O)v*Es(m z=3v82u%q(0mJ-M9OO=DHL3(ADGE$8HUPd`%3f;nQ1TL1~|5ldpq!)@WYW{UJ|8JJz z33^AD(DdTuutPiYdW}5loYic*tM{v4Og%XL|8yOc~QV})H>ZO`>QBH}gk1n>Ck_sO2XXZ2ukyaZCvBct*3s7CASJ+9LV zLEV~o-m^XemU_hmMWpSpUg(`6Zo^y_0{pVcl_ zHhAMq|4#L3?{!3df_2Zf3?POxJtf=)e>dA|~M57viHfXq=3uVjGFrTsSINm_7oOt9o{ zYIv@8)T4~wuI;a`;@5oy`d*2vGZ~VQ8;eLMX~g^6Ev6heyYWVVyN-WA0}sru9pS_; zLW9bIR8b%=R%ftJSb7TbSJN#MqSE`0qKkY`@+!1V`=3P(2!AL| z9r^GRm+cW)bIMZGKG{{Rj^kPadVlKWaRq0mBJwxMj1T%9zSmQhMEDELRHd88M|kuD zLl6-5H;T{^ME)*=&3^^sk*34%O+~{r*i2u{I5)l;T` z2EQy{JC2?Q`K#Simqk5)>W4@P@eY#V$0xrT2fU`oYT{(>zHgX<;-fq(TF7*HgX=6^zD2IDSZ z-Fr@K6@wcP5YYPY-~j=R#(+KZ$_$S_H+yqXT@vFED7vLktSb- z5$sE=iFhGNG{8bxm11Ifnm1BbTI%H38nk7O*hxFWb2r>goVkH=K8UNK9M++V*MB~hGxG|XoRPXFa6$xZpn5tAp?5J@h=S9+DX2h zd%s?%ff~r6RD|?U#Ba3@YCMK4#T=mjvhKQ(ehch)Jx@+ebJYCpy}h<4rJ(B+a;|qK z&HXHQ=G}165 z{Vw|_feH$|h@62nO1@d8#8^Q}s%y7FzI*?`UGkgn4pYpXjc~|aI~Mf6!=@}Row*** zkpnsu0q2J^doo#wU5hz9@snQu;Hc9B9gP;-)4!a;1#SiATp>ntJ*hI+lTJ_FGP3TX=IvKOe;VsL-|rd7$4L*$8U}cU%jAo`%@3ObTsx%c=;`zYjL3J@4)NLarx1B&QTdXI zbi8coqCaQ;q%{A#RKR9TksiWf&G4++9lSZ#tpPc2P`3?Ebiv^^GVZM?-_lDrBNad$ z+auDw;*BYFkncc<<$egk5Pd=fQFcc95}aZ4)tTf3vKvwqQrXogI;s6zyyq-(%!S zLMU^Tgv&QW1n4ik;_qLpT5h1q{pHld=kD&lAcaOmKp@SUb(r$#M4me9+1i>r(}lc* z;0lu+?8~#{ua3xf8(h->Pmo^c;nqSF_M~$ReJ0)O2l?3Znc6aKFH|8&&T6sd{pFX` z*P6xPzUNpHq7an)NF+fACpq=cEZe#MRvSoYWv7RIqub8AAfSq2%Cb(GcJJ^i=29?! zCg@{X>OJ9m-~=Q3^Nl!Nf1IREY#zL#t#Gn)_r@DNwyG9NA0G= zYE2VoIEWv`#*#s1of_ld+=#)FfY`^sW~`ht*=u@7D$g*;5yNydm)Ji_fdaW3BgQ&k zhBjy>2GJX%Ou+3;`{81GsCjkF3g}CPwL6+dIH0WJ?ZRBVJr&A<-0*k6#YeyqGF2Zg z^G$Qc{LTo1$`!gZPjmL~V& zxe=fhg%R&HY1S%bh#+~;h(>MK-`SDzF$IvAkaBA%Al%G6Mb)^^aZP2VPM68wW^Jh*!VZ zmu)nueouJ6%RC*5=9=z2w>!|M+*UU5l{2eZqkhEifF`!n!fZ$Q&w+6V%LwQ7ka8&FSXgFwg zV7EWHv={k!3;7k=bE!XsA%YDJYT82mPnl>-G~R78W+h8})+@-J><^>t(Afz!^yl|f zh{)lp)SoQ(gX8${{B=*q`p^|OtcpBw)}IN(B-`ZzVbZun-tB~oDCZyF>Q459psV)z z$E6iHzD=7%a82+Aaafyz@^37r_Ko@o1J;9#8s5GUMU1H_GfroaB(JJ*_=Xx+vT=!2;+1 zGyV`MBt%{W=f#H+5%|d6Ev-t!G50*So)P{If1X@Nu3EhW=veo~_b{JiSW5U~bRi}k zc>h1Y;XPm9dVUVr%kbwm|M{j4kq{4-JpTm(^?cW+!qf;}zk7wW-FiW#!k;IBs#0{& zX9o(=fbI|ch#G1sF)?2si2S)i3oOAKFLNt^HRW}Sc~)B5{p`~7>#Y_RS8%gl1vXN! z4H655)l9Vu#Yjp@f_g!#hH(YBWVN*7#OIWr5RKQmI>Qs?RexqGWFalYs7(c=jqGV% zp`rtZipc%*2Xx06Kgq~t?4F#= zJ53U#-^Zq4HBi~3zkcIJOPHg4M`i^Cj(^1*DxK76s~HFN%Zeh6A#!zyI>YXbxjVs3 z0aJW$c5zY6v0x|;zahW(?h+#X5xnT3xtz57E*Mq$4cU9R%?^&0oM)9?_ke;?9jk1B;j8@5lIJn>+ zX)J(Odlvq~vB>iXHwgBCpW&7py~mco%?9(#a{(xWE&{VB34++*siDJ$k8ix~?9Jz< zQo2jq#eEqZs3KGugv56`%v?p?;b@Cio(@QZ|0M)bbonRU>XS4 zq#3c>*~sgNbN?Dbx((^&HmBG}JIf|OaGaMmgT65{X-xKfgH{h`N6*7hQ*%l>7NQOk4Z0lP=*F_) zHTlJ0bw~upux+AsBBxs&L5PqRg6aNt$tmz@e)usrmx)khwAQr>f)}uO5WJX-ym-hx zT#~J?lFfML@I-_%9c6GuR+&G+zpli}J_t%c>Q2RKZUv8t5g4c}UsF@VFU>xrdoy<( zDfalOl+L?b!IIL`_YcXMX;PE%;W_d>a9>B-V-dk~069Sh13ZwALY@BO46&2zwETjF z2hOR|E%^E#IeaR*|a`3J{;N7DfKGs-eK4?WD)XtKo zzt+%hx*K`_@o3zaqvdzr3X`95h8|jX94a?8>NTad`;5-5gsdz`mRWo@(;{1vISgZw3NY*=%s@kY=R>OCcv6AsMKF6=k&<@Y`wjLJ3%DaYskcHf8oLg_bI|9d z=qzJy_F$>dTFsNBzeXDx%fk_IYA`u=f7QFp;*-6pXjX5n&TiCBfyqDyi(LB8{_fqK zja`3hl4ur^@eF%&pOYU~yvfMK1q=&ing43BMRT#dIS$ACh^6^7L`##~eZj?rPw+Aw zCHNhO-_(SWO17Vpna~Rs&X2$RfCM4k^p`uMdvuguY0PZ3?7>qDlb}obR&`@qv5LRd z=pLUPFJ*L{h#gn1mfN1arF8sV^OJ)um7`M5eY0cVevi5-O@#4@r#Rcz+ql#5@Icp@ zoEoxD<~H~3`;)}TH-9%s!SyqJ(E^wj*COm`4^_~#Ix|=?UXiq5qVT%j$2&YanzgFR zfm8MY?}h9ov`OK!={;N$V#DW^eTS(ilp?>2iY=7W*D39PlDfV&XQMVtkkl{yvUj%M zXR&g!4vxyZgW1+5?#-H{z&2rC2UzPgxQv_e!x%#C$A!;V!VYx0c{289TVaXNuWKxD z>+WO+gknH@zw_|9h6&^_2_~_WUkkzLFm>T@J_u#vK7yZczpY(YFndg2{^T_#l&aejF&1;!p1a+y1wP zVEOb1KO9-Uu;+cR+h`=lVdVKLPD7okWlqsEHRal5z+N?hp@?PT=Np$bv zB-_?X+*d9)m;K%pF&6f%tvk$`VHJd$w*&d_A>pR8NTY){3TgZwKPspJuu^9^4BP0Difwjy(fJC(z#?Fex`XJ4GaJqakeLpqzt=M0#xR_Z&EuNFu?ki>Ni@ zPbPVmjCk%;KQ7K}?pg)!5HclSvBa${<42-^*D%^4(Xn3?Jq-Z6q|^fRa%atJbGjne zYEG+GkLvnZn^)DT>E@Rst3?#}z?Z6^@0$MsG?}B8JO@t{&5vg)zKeLB4Lsf-tAAtS za)FHHfeha{ZkEjqYPoA5r}8p<;kWi_ zL=0KiWXLbl7XkryNk*!-+DoDhp0m-ApnHT++jSh4FJ(nzM(*AyS|OgVJ^p?0cz$xM zB8#M$HB|IeQfp-aiEP#Nb)6$y#Ylh(4gKH2ShnjyfNXIH3)@&ebWe+eH&tZmGl2j~ z0;o8irVj)IdiAR^BAwd-iIZJ~w^cuBr0dDPV8kbMwtRTza*yY$J9h$p9`cjMA;=LQ z@Vjx=Wp#ToluUY28LxRU9Bim75jw5lQJP6-#~jTG3Aych5XCX+B>=2d6XXvVR^`9_ z3_}7YL>kRF`IxSU36z3@NF!S6D>TzN7b05{@;lfX06WfJENTvZrSw*MQ8FLI62PJn zTV`7$MD@CWi?X^Scr1+;wkE5(dt!~X;vVK6-C6#}6(a5NqH5J_o}2Ju_&2l+iMV8WG4*r)eUJe;c>NHB9vp8*3&fiI_@?QN*<=WiLx*IF^{{w~ndr>PbB29;t!FN9J)iAQ7j<3l>h`jt%0vP%1Ew0EYR(}|K!01RU{#}&J-32| zL?J;!3jb@#MADNbD)%_{UmV(gQwVme;&z6 zYp!+CCP_i2uj>Ey{pSO6&uW+b?}2OoRiN-3;zdw5H&K`}YhmIB-G2)|Tbz_Lio# z4tk@p*w6N~xT21B3j8qQTkh68JSrfI>0H6*jF_Bw=&*-raT*W7!G0%M9UGfsfRl1F zM&+r_2BNJ*7)#&2y+kN|ld`J-+*d;F&`YHlC7!QVm!s7jVq_*1+>(H~3qy$7^G6ddrEF^N;5$ zb+tyVABEm@-$fgbx_CPOP!=1Vlu}0ES@xErkXYgv*ig zdXT;3jx@z<2EfLA*ogs78&oGb)o-1}9s z7S${xUmk;tW=T{VbkNOiFv`y0L?Gj~I23l;A21wx=ZE`*3Ye8EHP&ktt{?pPYA7Mj z%1bge*n}7x-}?T(VCsm;-Lh$8_%{2qONb~9e;T~2ch{({?Kda6#Ky+TX3VV(pnVy_ zx2TGx4Ktqp2BeS(hvvAa2{Ue{!d0B38c@cdzInNmxtVzFfd`RF?rIFV~ z5baW5`1hM7lLYvkb;bY;y8M+fG&KA)*ymqHmZw@Gdey;Th7$>L@O}kP>qlQ+>NXd~ zBg#na@PO6+w7Tc)-=YtsF+@-uf+fVMKkl`WAv{v*k@Beu6FwWL)NS0=EO(JSaUQyk z|DcT9RTIP%|KOLpECrE2QP=SzphY?qZzir%{70m4CfcX#?4ZVb*Ixu)g@<>xpiz_* zydLNDzVW5(rB$S>D_15oppoM99)VD3g7<}O52tk!s#TU*?K8%?cieI*I?iGyC9V0n zB7yLjQ?M5Cg7vTXlzXuMw(gE>RnC|aE5Vu+6I;c(+d0E|%vYFHwf-(}WUwi3C14M0 zBlC&_grFN{deL#U4!N?Z4#|Ra5FT`YU|B>t9z8NNoz@TY()g&RIb~rNAOi8rCvhX9 zbbbJ~;o0P*q)@AVDZ56%%t^asaBW_^6@dic+T`yYc{@+95M$QGRp`ncuEuq=r1L(Y z!uuWO9RMM`EKA5-WI%Uwy&j(|#bwtx)cAw(e$p}%c&nME;31h|jFwqMUeZA#+-#;3 zMRyw8a4vwvh`5>vx(FHR4XLrv&!6WgD%O3HFK^H-%SycFx%;`^Qt~N>>UK{S#3LfQc)}KjKC|?qE54rm z?D9vS)a5-(kh}*q=m_V3d}oEmbI~8Slo!1Is5wzGo&4k~f;TiHA=z`IJ`UnzgwlkK zIfzzh(0H9b7qnR8fN1Sz0!NOv&AqMF(N}}nfymJ12z%$a&Dyd};tLD8W`m+d8@BK$ zr89$wRg(VUZ5Nsf>3;W!Plwx!?JlB>A08^&&W2!MW{Kov&x}>8nuwLRN{d=K)J59x z<+j)Im7*guKWdR(q0LxY+-gi+sdGtKa8`LGBDr`8(UA`5(aFh))h&&z?eOjN>3^`o z`Ds7V30`pN*Z12p0#?75J+QG!TRU^SGM*aLk;?ovZnVtaps6NQ>x6Hn&z<{C-^V5% zyB|NBcy2Eq@29Ap>CAo2@lJD(;b0AsP+C89VEt_EvfGB1 zY~B7@%GbJkCssmp%~}SM=ADwuEva<2MZqC0oq+*{ZX7|C(~HAh+yrNB77gJ1h=+F` z{V{R>?C^WO?b8iVPU0eHy&HxDu3jns={{K2qy%`R4Z>B>|<$yORgf2FIS_WC5mjhril4a{IT=?2Y&G+SjT~ z6F8lRj~_bYxgU9L#mX2E(S|5` zNF3d|EC3Bs+dPmhD#D2Hz{u*%3v_o~2?n4|b@s!N)C%Wqw?p@)x6OcYWF7jaz6*nO zU@ZA)2zaBVp)8()&VsLe$NVf_xfk`efD>R|-Uss3W4D`y^>X>P905IiO?kb?K~=%% ziA=}ln6Nm+)I1Dit3y|JIy^ODQk%3%kwWW_snAA+T9k2 zy#rR0qaM~}k8!#WcHyURdEUqCy)Tr?=5p}o`I)8DtX0V$CcDpB59-nt@E6OiRCDt7 ziY45)cE$D7jbD^decY28w=W82q*Z~SQGLk%{*N?&U#C;C4K1?gT61?-nX?C%F_>8k z%aLfO*?S#0BuS^(VxR7O!wR70>vEZCy|EV;VsP2Fu}{gA%e zId?hscX$Mc-P1#!v%kGQUPoAC*666dwi!-;rq|+J=L}_&uok~;d%H_=ZCT9kX+#X` z(qM^3w6C_!#Z|%HR_gtD=H#}T-LF#nxTigmR0P~Oh~sG%<<=iP_6hf@B4lIY#9mxZ zY5!Q(I6{)~39qbUvcg1$2R%%_S?^CE7pL3HCccpR0>QDia#bz*NGX>;@2vBxX-g8m zL(J(uz-h>s6J)WXBl6`HP2GQ#rz@4<4B)9LNX_PYWEJXb8SzM`cU%0*!THm07)^@K z{7XKae|m~j#Rh`6GS=nP*$R1;>BztQRM6^S`kTVJ-zLTavEe7Ua0Rqh{+aCSqYMoP z4E{;Nd_angU_!mZT!6oISDj{47q<`B#*gNs+%M_{W98fG<|+i1+;020xBv3N>rkf zMac+Q6iEq67EuI5GKB$^oP-jPpu{2yNRA>oOB5wX36gWFH+Ny5bMCqC{dn*Gx$pb# z{$bnQRlD|Hd+oL69CM5@ZSG##ha~W1yWq0rdlM(tX zv(L>(TS}it8@&9v{Bnm%xmEV&QmNHKcb5*mtu_?>Q^Bcdx9ndkvbmLcE}<XRKbAfrj>r1jbUR|IkYQ=^9qoPSjn`av?6LHq$2fg?sONZ zq$zE`s`4B3Rckuv=1<^LGjm^XU3$&usq1X0GAAAw5gp3@-mJ|2b?xLSyiJW5z4viG z9WyQ7@|EhY-thbPYc$Q4?rt%ZixJ+3pLiU$GUsW2(sYe`r}o4cH|4ysLv0O|&7F0cfCpe6P$R2}Zt-tvB^qa7CNZIE?r#(cE_c3HXcvtf@1 z@#=+!-Kh~@omE1zx9W4`cJI;UUOaVDSwY~L=9c13w0C)@L|EFW*eD5V!7rY4df=f{ z#P9EKFZ0p>6mByY@^d7sFjmpide3UFD1Djh5oKtE zwbjQfi}x(EYXC9}C6YV+-Mi@^q+9JiI`s=Uh4AZ>FhM=W?csbqBau+&e%D(-&wuS1AfeZe>dBAT8(UPxK1QG`i{-dl3Pwg)DH$P z{am+H`cW6MGv4@ZUGMi&XPhU)`LqJt9koHRT+{0#Q!H$Ty)oiTKl&=Tyh`1&tg!-1 z+AL8zn|aDy-m30dyXlq|vqAI9E@Y-1;XPV~;;R-VFTMUcwA<4iOrpf%8*{= z-QHiz;XYFJ{sZCet6!(gv0rzEj#6G*m+ZeB^RBI);mIG9z{9MvoN^Wy^46e6jt66M&Lhs0@5vn2J-%y zG6Lash8y!=hLYwJ3AcfQjLC9Z!m3RM_Z^oNDD+)-UA1Z1Cxc>kO+y=t5Btz*-eH^2 zxYd=?&}Cakx0*C>3tT0j6upeR^(k4l$gF1*B9z}*Y3@sDF1;>BiPX#ZS?G6S*2QVL zc!+!Yg|>wxolvTQ_iZg8FR0-^^J8|Q%(k<{B8*w{qjZpzI{WazpJ%wc97m}g=GR!2 z^2AET-BMm$KTgpz9P(6FKK6uTcg)PT+LI)Qm(0wBE-abPtBG%ZoqZB?xR_&-p7$|! zA_V6>nEh!%FN*m`N3D)Eq8d^Zc8_c(yd^ zOZdR3aQ}gwB%F6zu4h8&$iooAj%Vk>4F9w1(^WG#NrFSltLLd!merw)EUs#DvM7Xc z9sc6EClCrc3nta7{brm$;QXz5cHT&8$KOM(PG|%~UCx`6tsXaEvmQ6;+>tp9DcBjj zM^XOX;>Q4~qRl&ty(Vq7ESjA7PZyW_-6l>oWW6pcUbw+pYig>=n^!JWWVAV^Es0|+ zKV!^*i;9dYoS*Z0FlaFA|5{PA>qo0;jwgM~tj@$HL7dCG&CjJr;pU>;=~1 z27_s}a`hEPB}!`}$BCXC9EH@u`$HyMMK`^S_!M-$MeP-Do6GZtsWL_tzulcJZ=zQb z8&|4`m)UtuMb~(M$AoskO>?B4rG-JS(F?%Ae&{`(nGt$l;Yfyb|b0Pr(Hh>-ugu#nDCWeh;t6Q5;{Q0Gjj6ue0`?$X`FB2 z_jD=-_moghSoT`?8fO#U^$3YY)wwSYU&WQk1jbiZHnU+DgPuQl2%){#OlAZGXZ)5I zd&8#`CTrt7mnby2!fNM=@6=pJky&fKNh|h7o;Cv@0TpqrlJ+g%D=6vA_oQX@mX5vN z>tdh^Ffal(nS}vzpJmHdkc7x6daf?6uU56(cp*thwpH|;7tjACD7d%rwR&JBbp+SF zqam!jcK^Yw%OLNXJ6!iFHeI zZ;RT{#eu`7CFpECW_m&T$*LhelEWSxZ~9w~#5Sdm7+sUust3(sOW z2xF01k#mb0dpptJxAU85Bl2~&YfSb^5>!hh7w7Vx2XGdBHgehtns0O*SKeEy*vPf! z+tAoj=E_jnG5N-}G&`(D>$>)>@&9>EIdV9^ZM+OyOmUClx3fLL0Ko}bbp6+-untaNq0v=@L_|% zMT!O!*lAP;yX+QOds68x7aBQz$&KOCfVBMfE^hh$039Pn_%foSgj_-0RErm>-!0pb zZ<^LVTT&I+SiOvzpL4rxWHnpX$kJ!yDonR>9BhX=#eyl4T}C^4nVpTutdINxOm({D zrSBw8^j?#R8~r%56wihrR_N=Zc|e)dvnz|8#$?!qvIUE%fD$f&TWhWB183)Imr5%!Go>2_Fuqr$^(13F8i#XGz-cWt!t{VkvXa?&^= zAe&?U_~d8}`+QuAIoBO>Zwf<&g9w%{5VC|m?XTL<2$CkQ_n)5CSEksP-lp>a#zhrGEc&W|Ja_P z)ZSz;TsL%#tQ;U1yC=?OZuvm?EkHljPq}K|vL`JU8H1gb&NkMl(KFh5ND))S7yZ}5 zmDD&b3)bz2ep&o zx7R`|MUDx?d&H^=L zz3+7KrPT?Bp1SEsj#DBudY=u}wmUy^&~65bN;(ef&z~^VD+EP?wt?%QPoRHHNk%-*~vIdwq6ob2jdM zs}$x;?Wh?3lHGbw(y3mi2kC6?Yc!1a-G`g$>$MFc^c-e3AQ!!z)i+)ZOv(B#=kVKw zJ0lJGvU`(;Z0=h(*3cME7=Q`C8xsx9Y&zAoQ55vZ22dQwU?#QpqNCn8M=V;a3{J{gcLZ65_;46GeV?Js$ikrsXq(v%ur+nlZzNYUhK4F?BV@ zL23iPO1euHl-EM7l~1`fGd1TojU{+Vxs)$|BLECjx{=`!uUf(z=Qp6g@sUv0*#7!a zb`ANt<(h|{hTkDIZptbT4Wfraxa;yYfW4MD%2nzW+#mkbIAc5vH@(@#Cu&W?sLl0g z{Acjt_0;41Gz_BwQq-G2BhU4exeXte%dozwp1R%cJx_0P7W$F%tW%y8$TQo~J-1Gk$Y) zp8QaMrVvQ;pc}2t30Oj~`*BgR(5-IeAm%?6@o8MZ*xJ$Z#a!d^b-z)+?E;SitPQ9(#avRCdmLHzzIoXW=Ow{5ndD(mNbHNQQ zZt*FLol>m$W@}803ts9$`5-Bvnh15elTZjo9+o%Y$%E(sIOT_%*pflW5X7@xbHI$& z7FqlU&n>RhTku!kX+ohEJmN{2s=StaXROuqG#1V(%#5C5HsXs)UYi<*g;i)KF;jQm zb?4S>(cJe1PmxIun(%iU9qaUE8pB(u{0yy)jwCns5-Xx87}5`~?ksr-shVx1%Fbi1 z44C@S+*s?Q@@Avs&)oGw-twNGlpJ)T-K{0!)GOX!Lo#1RU^e;&ug+sWd2gqaTP8=S zr!OuBU>{Eu|7=eh2xPi-Au5myBKAfFsDs&o{lRD`xm=<1&pHGx*F@h>Ms5c1j$7yH z+OCup*=77{reF@imb$H>)10N`8qFq)Pb<?!;7>7z@x~_wG%K(b1n) zd-D6I#tV59@pAqS4VsvZyU(`3skAf1dH0>~(`kR0scyK0fIcg@jft@^;=h{uKWR`{ zUd|U+AJT8=7Rz5h`PY`J=k_{VR8*A%`d&(#7+qiEjoMJlU>`sP5?TrUvjh5;UCR1x zQ!haVcA1)d(=(C|b+;avduJRt^s-hR$8E{0A^3*dI0JP4wkxpUl+5JyR*Gs^LvCf=AD%t|j-`<$r8*)orLbf9#E*YmCO4xKX z-4{!-?-|SQ`h{Ix{|eV_&Zqpl+)|QeCE-i>*BEYRjw&w^mdww?i#NV7+)69paUqn& zsvX_}j^W>io#M!fTgpb4c87*tTkq;#&`eUflZ)lA?=l1Ac|haGZ$A=7A0eZJN?1kv z4=CH#(KucN=q=ry7omkqdj?{pulKIR71Bj!*RUC1L|8rM*aew3u2BPDHFK%W^xicY zYYmIRz>T5qs$S&@vhjL9L*9+Z^{LRtq_QkUxS6aOys{@}z*9ZO%`y22^KoYPwRUB^ zM0rH-LxYvpxkBz$6=h*bBfkkXVTlmuJwiCPCQZC_JA1PfXWHtnRudURz(EqCZ6xn7 zokMk~=*01u#=Xr_gxDz83~P%Q2Udq4f4B6dlWX3wz%@TEZPDvdOe{$rR}W^AcbLF zFa2VD(#ICOY&adovvnr%X1LfEMU(W%;%iESX#XdAR~OA$>Y#MwSf-M(Y4v5DJe5AF zd%TqU?A4$tLhG{2sh-`{W{U8R^*W0(6IK^=F~R;vP*&Y+8F%SAwpEp?&T)B}FG_*0 z@6kv>eVp6%5sCa-RsU_Qt``iMZrha_Dd0#Ib31EiWLqpz zc-L+T$Jd&~`Ezkv)z*=Kh=Z@b2u5^#T`$pOsMzW5Sy5|~zuqHEZ=g%}I#~qkA;kN( zK!}vT&GyB#XKwN=^qtlG(ZiAPTXohIk6!1hnw%(K`~Kd&LiHIVJXOc}iSNt|lzQ}P zr^UqgTBM#BM3*JcbG+M~3gI`{M8A-S_$+0}bH1LgaWp`Jd-Q(zGb%b4QuIUxON;pk zd?fp|q*lR%Id5axhRgEQJ6{ngmhgIaie$ZppN9dZWY|(Gn>sTMHp1Y+k+uW7E zMtff<)c5IL_;M~q$_=p$-@R&1Y(R6p8NY_%<__kA%NhECcGq3asc#|tDPgC!Ti~)! z;`SIGX*o`0Hzl`5(q+>Wx4U4woFbodQd-|Xy>*zeHE(Xw@4m-1wsXXhW6rLPx$^?hp?>xi|G}My2S)->!Df|&ja&J1r_Jx22+yv6>dxtRr^H7Ix}vXupS9cD z?>TtM3P=cJ&4bZVz6@>>*0W#cX-l>;8^*lHJMIYAIgY=PTKwu6({L50vWf4u8opD*xp}fVZFJQg`%N?NTHECB zlD#N0Kb&mXTSZGHM(QC4f`Ba3!3|KhrC^72M`!$mHCvt9`?9A>rFZ=BHN%Q17{bnlfJ`vAFc+=j!-Mcn1wjQQlC_6W(dVvQ}4u zBx6@(uc0`z(>zNmx!;d8{sEiuS-$9?TgxV|AB7K^8|>yc0KA`IxHTXCot&}Dr!%1` zRfN`cA&8SR+I>7~S=TPBDIpuX-Ct;Q)x+Pf-dbD&cQGRB;!uWuy-_Zn`aC}HCOe1J zG6R{R?g#JNky-O@cjQ*tg>1#Q7O8G%>)kI6KD(4J!bTMuRfj)yt(_c^6C+G^I66VA zW1-V}>ZU%{#j8YSd&?2h^Xqp;*XP1rcJ+$VW;&;uc7}RaPN}sHw9(vg)KdEp)1|D- zVm)|eeStE8C7*VCEeg_!!L&;Qd)>csts~1E9z5;-I!}_*yP**vJLqup3#}(-2v^4Y zog_6=4YC2ApeV+sogISyf*F}6y+Ovp2wkr-AK3b5FValC=%H)85bt0Z<}@*AQj6s@ z=F6LWtdv>&Tv@2Q>~i5$eeZoL<#Rzz1B)wtGMd%e>Vfgk`HQ!cg6KT0wbZ}VLH%fR zAb(OInpztQU9_9OwwGd_3qdwJCh@Riy+6vYtCxqex2h8+_#X)pis>jUz1cCRl*(rE zZn!KmA4vlOd!DhzL{#KRzIfoqK-m}<>y+2jtd;G6!etZw-W&WbHk{k-i=7V9QQJ)p zBbLE*k^C?N^7u&&tY6EVPciKdHWnMB|e8DJhpv7iP;F-)~HA7TN*mC@d6y)y;Zq5?9=n zGoK9@KplK^c89@zXz7u!=V6`QuKZCc7L|l{PrGa3R%=}v;a!8u+lSH@E#%qTd`)e*dCzE+CMU&p{97+NY3JK8{HM+_(*N+M}7cbS=xI;BwVoh{WC6=z`6Efo{j35NlQ$#ik^z$l^9XU zcaGCp$`AJLk4R_(WNxtVdRgn%b@RNzi@)jKMk|)P`YB7EDK;Ojm&Lo`k^iUa0hj%?9 z*|gYbbav34W9=?>sY_$Z4#z**L8%+_At$`nP}P0zLzR6beK-DcXwE_KKfr&93tD%e zZ5;V!)wbl*V<=QOk>kZ86~-hQR`!^ehEdH>Jm&P(%f*vZ_+~?9LYuv1sV_t3J+tbi z4Vuz+gNKn|aP`lif4VU`1tB&tGhAW!+?+>i9cKiU7bwuq;yr3a9`mneiLKIjpVSVa z#I0tntx~Rw04=5y!zFCWfIgD)BnhK_%$CsWlJVJE;fdAzpLyoO`V(BtW@W3*_;Vs$ zI@LuA5``lTK1gc4t~tt5v^L^G*ckC1|3*hCb)RMaQf(_=2UnQk*3O_QwaxvsH?u1b zreX+5nvdphl}CSgeOU%<0&3Vt3BjPw?WZa}t5Dn9|7&N%uh14Ag%YiS9E$RaL^rPbf??JGS~4)-wkwLvoo$aUZ5Djb1KEJkm;H`pmQ0=?8L50dDc`{?753ruRD zhPCTpP13g|F9camtO_EQMUg;Qobyh(nV?MQa}lnE7x=>RM>{7dh5FD-x;yMtD17TV z{q}9tO_hgwIgfun8>P-Ew}nC%P;KO;R=qmIFW>+AqNzG5TZ+)UY2IArG0#tEG7UF( z7ZRA>T1cqo@7nzL%-%{%PaQZ7xa?ku;sU z3nu1?34Wr(wmCBJ1i-ZAEC@ueL0klCqSavKIyyuVibBbNT&8(^qi_XxS~czamEr}f z_cA@W1EH&w)>LJAVgt*aRwgyAJQP0h!2*M2b|XLX8C9bE87X^t$T@? z(_L#{o|+{5+-YT)lexry&@}Y3yMUeZAhs@gec8xEew`z;Tz~L^5w~4{>F`BGyO?cU z)wZ)y%j=7tp%|UO`YQK@(mJuh5A+3D^bI`U{JG+|28Ds>)7$$JV<467b#bzp0-gYp z{?qWVrjwyA%}QL(fl?S0%=}(H!yzTI{SEX8|%t8u!P^9vP-`SxdDKtRq_L&zqodPctwO_b3o^HZ@nMh zkc#{l#$F)XGt62o2;x{UK*Y@JRlXR+%7r+!LX=Fi56iU^O6=B3Bh5icK*@J+ZE0w) z+H_d6sUHtp-h7!vUZTTV#p9wr2Y#0zf0ZWJ?Z{CUoy(=RPWAF@cVzu&x)>5`2Uq2x zYmz!HU&SMQeAE7TvedOZ6BLIjf+Q1HJdRaNfh_R-DFb$HNI1l5`n%M>qP z2W#6zdAHXM7yt&VE2IMWA~5RNAC*PXQo76nQn>VP7cJ}RH+BhQM^40MXsG1>l&$r)$j0{i>x%cT0EGIbEqLK-C2Za$jZ@Z4J4L_ zUv)4fX7^TPs$Nij`c&9{qwD+i!AMwi5s)2-0xpXOxOmH8gAw^0FzDzu#%Jy8J|_U{ z(1W>zy;%$yHt?Nad62~Vi3TSH8=Ip1AY@TCLb#s~TlX8=?=_p;K!m_TFH49kY?01yk$;_|qkr3WqNyRUR z_}rE{RS%XmJ&}r@{&Oqq?sLn%>INp06KHuuABmlcu^+ASq6Dj*xcTCZFQJy9`mO|T z;31wW+*JD*L)iZAFc&4ZAY&Gxynyo91Srxp{%`{)lxl#uWJ^RDI6v$KBN9UJ#|ciI zzr7IuJS8VLS2Jb#E)4R%PF*G_Dkh-0IY7<4^@cza2fz8pUWj;t!mZBf_Md#7I;+Po za5-AiPE^8~-_A@>!2L3qvwoVcZ%O*tTZOlpUESQ6SjVSO);pfGn+KdS)Sw& zFg~7JmTj5N=JT1bRoIlv%wX`t>O39riX|3pmE=-BWB6(OOj`UCYE|*}xbT8^H#-Mi z-jSd}b4I1B6sR5fh`BWib4`M1VFSwcF>jY>529QVpM26%)x_Bv(d#nsomS&3@w#Ut zHymM)Q1PIH;vOj~nOyRbM!`PHN97-sfb5^@$5w+d7eLE=P>~Y^@rpc7Am6~)!Q?w| z;J~!uD&aX5H~K|<7#hyowm>RrHC^1w2IV|6^gTDL@mJv@yjnZyCvy4rb*kWi!V2(s zSS4_4ecxZpLUhW1uGui1s2D{YRkxgvBRI50=cf3_=TxWA#%n7pbtjx&iI-psLFMoF zd*sQ)FhdA)%saoO^(_s6p;!wj4dPL{9KrTKQc`T~ko04Q*nIxxht zk7)fH!5Z2Ju)?%J_b_JZeD8nCwJJ64^oanP?qvgcb=iy8-#aCdE^a|hb>l>bl_752&U z71W|v!s;Xr-iMfYngHfJ!OF zH@odabn0&B>A+TDduz)?=Y*oCg&-%*&yqokyPq?Xf0%qX0!qGxf}zusN7acy!e~Pf zgYkhXlfDP+L*v7phs&Zr&Dmc9uxZOj%HpFri<2UU4qNsdCsCRXH{hWG^8|*O%@Iid z*4IF|e{`W@^9JkW$=k@)2OerQ8_03WbGu%ta6l&k)Cn45)UVZWbzJ8=5FxkqcmAJc zHZ955aPLwG3@`i$lxBGP+MvWvZ;8lcWF}6RnFpVeeWazHU~D|CpI9(*=SC5tVSRezg(PPq)2u-D@?VIRopu zrX0`&ETWx$NFl6LKww2tB7Y4uvtADU^itwKK1uO4vnPU+AQFlW!=%yKFI(Oze7Tct z_50Qw)N%pnISO`rmo8m8qhSe~4u6Hfzg!Ch^MHF1Fc4~C=%}9)PFKg3wdR*Y+3R^T z9B^omI$Q3nt=fzpYb7TAn?roT?JCCg?1_L55;JDN|`LQT1T@|_n+xv85~b)w38h~526Yv&AR@O$`={QEgLAow2$;>tU1 zn75W3By~aOQ32tt3UOZgr80(4zysw^NVE(g8Hd)+K~qAL+tkRcIr>*XqLj0_jMU-! z@reW0rfzy&%el2?qki@{8R5%xKiwP(r(V_-r9u?sp#k+BCyS-&hpeaV$O{pBCrFtD zEWn(B$uO~V3ue9aRbQp|R}n`CL9dRWCMj3!@G)dzY8HPs*`5WQ0ppQ@{v5lD_&gFF z;%R+fK661vUlp=3Ah-jOO#cz@S#7|C`Rs|OAGL(jtC%gXtDGIR#Ukbje|#V#vlJ#;%6h3D`ho6htj2SdEXIrW!C)XPue0CZ>VA{h)*kei zaz8^C*{sBK?A}^vEkyb*UTMXgVB;A6>mA53ZX&K=NX!iOmaw(*q`ZRkydGQufYL;L z6NfngrX}@6AM0h%s(2&{Nrvv$^Mx4B-E>`P`EDOMLRo(x^+_W(LziW{BJ6w8)AMtE zLJ-Lm1w}i&Z3S~B`gZc-d3G4Et1fCyP|7I?V!n_M)V(J_wg8fPJev4Ze<440;zZ!T ze+s2ilam>vT>e;mx06q!l#d;|fnJh7tg-E%Ko!&SKC&s1pZ|6orxTIZ^BDYy$eh}P zBGy2U05sOe50LTln7Dy{7EkISsx@my#uWwng?dL6>L*a*`IinFG=K|1=74baa@+Yr z@I-(>4UDqD`NzJ=iJ0(#oe&a8Up6hh@uh*yYIuK7xXX%{M&G-5Y_i^Dz!lTXY+Z8p z+tO&g3HsyKI6a8gS(uqael0A_&ZdFN+&JqU7y-0KiE)7QH6;Tt2>uGb+^*J-o#mq9}S^2>??A-w>-;y^XkMiR9)Bmu-?wXPzOg@zN zZMyaase=BDxk_k^<4AJ`r8rn)FRcszqxRl+NJjpo^8sUP_-80{5~bpkDUi0G*&mw! zZC$N}9x~i6&>en&eKNhsb3L>4JhC$>~4D#%3N(>(wL|9|ah{r}y-`5#^O z|F3TzaeO|5|GQ;;ucbwmF&42LTjfHmmqCYz9LX%O{HvSr|EF0qUN@LTm}mur`lk%5 z>N*9mnTR#dLDUrpMZl;jZZb-U{Huc<^fVXlF+?x8bcqN8?|9T}c2kg^Bd(SAYvN^y#!~-fqpZFS&>1TbED+nK zO@t_hhO#gLH>74aS9 z7Z09Bl^f02uzC)0TMB~VA{?J?K)CkzV_Va^i!See{v&&i6$WEB@aHRtr?ExY5Q6D1 zjL8EEU+NV?#0g~he}4H(flg)}sudbLm@1_Y=^ufRT{?X~W^;saocPs93vptQQlNoE zD5x+!0d_jt@b>P(LB0bew|E-f-p;Bw{RF;FsN~~+g0zR~*`N&eF#Fpt$>HQ1WZ!P& zqUrOASQyfgNg*Qk+(BTe1#078prU`~5+F0c>=g zq)z+DKImkD<|JC|aYw=Vpcg=WmGJz$wD4^QMLNX4S-DP$0~{Dl!PLfw>5qMqg%!jS zeJgcSfN*JMz#b0vTY}MH&cwS@PEkbi(a#A9vWY#IDE_8UyGk%tW}HCmv$)9-uBVD% zbQ!py$~}thLv}j|f0j#Fk&#Ijx*35{_f>j}_=IWcLGb3A9%=;=cT{pB(PwO1fg>63 z=mrDx33dq9>qDiWyi*`@I#8Cn%j|UT9S$T*^=EE~v6y3)4j~z0tmsY>&C|h~q9oGG z1tl(R3UqI|feE4y^JpKzP~VOa!N-DzUzrZ9e8a%Ji}5`NNYx#vE?5eid`d;m@ezq; z<-a-pP&iS5q{9PHwSxIz5RP0mYV{*Ngu;9ITvXH@znvHp$fj51T>yzs}67Z5@IQW0btu_0#bK?0920&dK< z#d0PJ6&sFJ&mmXNvLy%O&Jbdn>ait@zIRPmdk1mH%jQSk?hlb-*|i7^iqtJ?9kp{p zl}~sz)!#)fR=WQj{tT$DAni=3IfuCpuR#S4?(xhM#B%u}xUhm*)cgJ90A6uNV(l6w z7!3A9n3jqPX#gm}R&9*5+#R32r9t-k)hmJ<0H}qk0^Mu7 ziir6Xax^1msg8&*q?YJCu#N&VG7A)h@!$mrN*PseS~m1e~Z$$Em?+ zv|9of$OS{!_!jvtik6s@#FB)DLOnsq3j%UDq|d?`I|HqZfFc?qMXHetCI?9^Cqv3f zV#)u2_HZR4*<`01!OaYP9Qh6k7)7X0Z$UN#rlMeKYzp_sI+GEBz$vgo%7RsEG?*X8 zg?&xY>WWQ46O?FCOg-RD8mRYN^O5t|=Y5o%cr)C7fW?44?Pgnx-{rj)ioi3@n1^US;)Te24?1Liq$h@Zow;401i=}PA2vuIVQ z{L3@cBAu8jqFBJbGdUXWx}o&i7sLkh@;bjcSJY5K3vkJ@*hxGN4Q)Hg z17rwKgpN29)Y+kOlVSo_^}?K%s7ntpzc3EtsNRD}ATihK5vU7RYHb=N_Nna(=aV7T{{~p7?Ebog5c^OVE#b?W(_iGHA6>bSsxx)4>vPjVl;C?9 zESTClg$Gw-{$cZgc?GbzrA?g*Z+6VgEZi--wwXZIBq(Km@NgazX1ictrneO3~6uVAm}pfyyi z*}o@lYpKE}ye0LQh(kM#xUJF&!WK#VG-9-Sz#~DJI$Xe(d~+No1^;Q{=T^NSlcR`$ zP6ou!!Cd}LQbFxi(*Z9NLnGT54wl3H{nSGgELWw;H1~L#DWR#6$LGVSF1mlVu?<-J z{Ghc2Y#9YA#Jyko6f)PIBSyRmD9p#EvZVs}hURgJLS7F1&vgP5Tm9lNgQ)9$u5Tg>944Q1{Q^YH`e}i0kN{df~Qxa*L)jq2< zG)OA-%w66uoR+qaG>9KVbOkWDkeSPo#!@2?=o(5eIY$F;T}taTxXXcE&-(!8I6-}; zTUbI@9Ec3}0;D9jn-+Y1nNnb(1O#G!e6?{7p{1pqo&!H+I0R9Dp4ht})DdFy48cRO z$0bDgms?J8_&r!5YqZV~Tg#PT5a>-)VT670ukr4Kn)P3@4#IPVL;CB#=0ZEF5NW}2 z(ZIXx#o4^=NB7C4jjuZ2TQior@=!04PuJb$>rg-6jd*Vqvdks-x*&KevY@&cz1rVq zVi%F6K{wQ$l$sBNVrr+6PYiRbCg_v0V1ZjdrVKDF=L#CCcFwRV|9iR~1GhhqhKB#y zzL}{GEFx-Foms|Dtsf1z5sWqthXC!erQJm>-fPVQ`&qrT0CEQ-M#b~O!6a;r8|f*u zZeu|ReG$3^u|Gsv(5{zUx~G@BuD>_rKbmtAqaP^(}vhghaN!>B3kF9-0%yJ-C6{O-{7**|@nqG)0iy9XZ8#^(>Y;s6%o z|8^PAARV_ct1-F6H{!zkdT?p8=j0cW#`3L^;cv>4h8bXHx&chiJX`V-{{5X3JnBNB zo&e3*6pAWxz+z>I!s5K7LB<6u&ITj+dpygl}FRh7vtXp97g(q`CL`?BI@qPIl z+@!j|3Yb!IDLE&{4vI2Ri-3|NjAC_7&~1-bfB(x^+T+yzCenRc%YzHhhXZ;80}DV+Cp zouZ=3EL_`+Ry(#L(bP{a9p_!s0g~Vp*Vzvpa(`7|H!JT67~ypIS&!{U8089_n6RMJ z3+}C75T{tCbSPXMLivpp28BD!UAeEfw5?|NeH-hxS6jZfiCcP6UO4x?XHBokB!{2P zV~c_4Is6pi(J;GQF&$25CYqbv-(N3b%leQF;oy0mSf}#gyLazOCuYGQG+v5rIYl(o zdb^uCwr{6(98cfVop6Y~YD_cSI0zaFjdPY{o*=t$5^ngR{`{#lCpN&=%F1!{M*O zv+Ye7WXJ9dcc71Z$UvY6?*tHn#8)CwNUd6!^rwqaO>&9o_~Fb~Yx}{C#qINSoXPq5 z+SH^GxcrM_GgDJR7loodULJzUT43YXD(_S)@s%|wzVk?@M_)h6;L(u+2=Kd)kxU!X z;W#VfEQN)`KOu-;SM6J~=j5i^Lvch~hyAOq;0vapGbv&C0Kag^anzNx{nxMLqX(wj zB=}TLFl@NuqzXnith}|kV^iRAVNf<($!>#eZlRb2qj^ssuMOJb6RWT9!0Hne_8bVo z?a76rX!=n~AamlCAC!+TH9MTL07WT8j6rm#}tD*I*c(Ghhz zr33fWa~W4(L$jL@Gg6Jlwq)_Dj}rn_RyRQmx*_w;O@TW+XoC3qdlD#b*K41%UC-~FI*?qt{46MHrB<|2CQs_{25%HnI zcyxDlu^ni*hZ*4$AmL7@p|Pt;DCKXc^rL*_I^{Ku?jvjJi5x5-V;fe#!NaE@ChHm_2jVh8|{iN6{gVFcc9-Q z^b?wepapSRXv^%$bYHTX>!#q+j^D#5U#DBWhN)E_2eru^MRv`!&mo);@TL``F6etQoIz$t zYo(vO>VDm0OXKDR&DZg~yw6j~EQlr;s-6_hmTX#XyGxI?LmJ(b*#@dB^}%utj+dT3 zouQB_ayp?N!n$zi?F(>t=!ZoQ#l~$7lO9{wqjeG_)E#BdC}V5=SiS0 z-uAvITGCptf7!51eFay?vi&95{~L{HIK=}@SmoSLIVeO&yU(q|!2McvId0c8lp zt^^v?&}xR>a=MUU3r$&0+qb#%2CiD1p+0c7=#Sak4{mf{_7ORl?16@t!K?&=7J<=o z_6Z8q6+l~Wo*Jckz0PEi&ym7VT2cUkGZH68R&csyxr1BI{fp3+_L^W3p-+G0H-7l-h?-n>!=83P{ZX=IukC;nLa%k)e>cAV8lg7s z((3H`O6A$di5sDWh1}~YJJA16vv6tVwb-2K5;R~G?`HXNl9acsK;UIh*v^-*uPz2* z6vUT0`R~HEUKQAX_MZ+#Fb|bqs*<87+O)NCSdW6rHesN2=Le3;dgI&i`|Rhp4JFpV z3)X#PkG1gQ-e+&inU=D+>CxmR9fHg)9|b=aBdr5Re$DC2V;}kvPStIfpISOZyea8x zw!kU{!(duP((ekKwCC@fX6j)Xxb-Dw&4n9*!NVgjx!!fLh`Hc!UAk6ZzHFUadzqv| z%|LW*z&fNm%s23cRw(s@r4N5;m+zSQ z4~vvNon0@p*@=l3U2pPY1gH-=Fw_iqF46 zM4{+lV?oO(awg+VoQd98@Hxbqp&jMp-55z-5*qR?{xdO<2MfeXc-l&8Y60=2g4j(D z*pI^vmT}>5PhMVES&t1}$h>;45IrJ6fz7c~iP#VPQ0h*|+7H_zngsBg$fFlt+Z12N zV#|-BrW?#-M9t41p!hG7fDz&S$&8A1MrPNdzp3n@x7QOt%f{V;RWwxDYsETU%q;$( z_aC+WRtebi6OUL<_(EVjsTBl>{1uS^!G)uj9WoE7vpy}AG>$(=8m39*uhjMD(8O+zR^GH}BBKCgpqYP)j#EkJNQ$XPy*V zIylhCaaxjRA5Q?>P!x7!Q*jc--HQj5F0;>qi5!XauGpXK$b?eqk8vqPTKkt@frcN@ ztvPmB3279_l>xhJO*fErz*^eCmX=^g)~xbY2}=Y8Wg` z92o6y@m}X(CpmWJf1H;n>6>Tq>c?tEeYs&GLs|ak!)ZcG-}MI%xTv7X!~W3;uI-xW zOPNC%2??gqtt4?jv;uq_53yi$_7VjoO(W3Nc>f@tQWHkFWnGvVrk0Zc*jW;f*h4`W z9a8u|u;Cj*FS0#l+RhmGQOLZIz8d?75V7yk{@)i~0#5hm_&@(r>><*KpUP^tFsZg;mOx5SyLv?CLS^;--p^z7AKL|cK#tVwPQ8m}W?@PjhLTG&BSXbE>$tLtC6tTa(L{g1L`PlkK(( z+||{jqsj4*t_iiC&Oh!W%874o7=2Xrnk_ux`Akv!>uo68-M>iHYYJUY0NX?P#ic#a z98X!neOBUnhZ$17`^F@kxBJHrrUtg7_!g~5MY2^zO)kj{dFG;`qIfw>&@{$W--lZv zeK#1#{$e&6c!SDM-5GQFJ%IIzZw75`67$?R+`!l2G2a>_?4c+L_E)<=J?L-P@h9r@MuHb@vj!4k|GaP1a`m?PC=5aa>HDN6mwJ;ZB z95P;LWgVuDP7Y`wfT2u>ZG-2|YLp@P19HfxU&NE$Ow(CkXffTk<&V(7Hh6prb%_nK zIBq$Hv|Rj=$rh7fWS^Ip*Kpzk`n8KtNPCUZdAcF}zz!6j2)CBs@{2l;3U8i|-tKG9 z)vPt_&b@d>YzN0A50h>+w>iqD#l&@-E74YJJ0+s8+Hf_f3%haiK|$IaMtyQL^rWO(Q?0JgAX>ki>sBuqv87QX?TPP?;C^y=wN(F@{pOb? z;K?yv`>p6JVOifYwUd(OkVQtj;~p8eW=xjp(omJ4z$46RgZBM=OEHD+L0BhNFXK{5 zbJCFP;W7eik0&=T$5np0vz9ZyFZwLQR)TxskSoG+cFf zeV(S^My0f>>e}bqCzaVEuE*I;ri!4akZ)1rb3ee5j1#-fCzJ;$yKa%LuZd_cqFvSPy^3%JQ zDW$!6zFP(Lqk*ex2VB)lzil7Wq(jeb`a1CXB=-pj26+Eg>3m;zf>=%3zm2MOUcs|d zYQmGpE?+AY2j|nYlqFSdsf~iQUUxvG}^gJ|`kfNOYNGsb;w4AdMrJb_$ zoMFh-!76+}OH8{8L*RSBa|M@F`|==DsLjK=Mn(T0WfXf)2b8r}zUk?9(x26wT+Giw zM;0z;7X&DW zegI|euF~(nLM{ICLw5DpsS_NQ)9(QkBT>L+U*-F8XzD)Nv$-+ffw7BzE1z`LCf@RA zFt+%GJXyKKQ_b0E=NHSV*$By${#w_J_#W@~GaWyt*S+6uZPdutBjkfui6*n}OIe)X zyV&z78Vxo(-Lf(tw@%gWd|A`=SXS2L(_tYY_e8={EbC{pKm^MA4SmH|<9Umqxl zfPjDjNT+lu-3Urc4;4bln3!&;S3v_x*6c z-1EseaQ0q%#c!>>Vm)zod>p;4ujA~2T^C5@CfZJB1>7PQNSLg1)Ld4$f=cgq8hCBH z2Pq7vJC5nmpR}!PKP7;^blKnAG?qt%He9)FarCf_5m z7`cUZxwN)0G&a3YP^&m(7HqUE9Qn#5k8l_!ZrerlTdxN@%EAO(inwO~^b2qn>?_fQ z{BvYo$#39%xVoQmwo{RX6wS1b!za7*ssJ_TWB!Cgn0A|NOzL(7lwU2Q6}zR^bZn$N zL^!_|I1t*#zFeX_KHsf3a z>$Z)*5&!xQe|2FGJ9!8B<+L#d;*^h+5#-Lk73$iFCm@18dP&!Kh4UnKKd-`O#tIOLZX?P_U2auYv%dWcgqa$%QE<9V8yGN>N zCL%X`KpSdV1H-LyvPl$Yg^1;%sv&`uTXRMdr6fe`UpAC^ud>Gv;Q7k+FJj< zpa>u(di#sTkB<*@7a6m4GSAJ6Zq8xBK4@P)DOc=1VSto&xMt;PP-l|5zOO+x10um= zOUDY7aW5MGS%`RX8L5Wwl}|TVH!= zct@_OXNWkd_=jN!rlaS5Kwq`2F@`boxvC{R$sIiR;hfkA6>}6g(Ij z1xK61osgD)qW$zdX}he0Y+D^PtdtGe8j5qX4u$rf3Lw7pVhAu>@*3*~c1urjn0Va> z&iALp2+@W9;!I3!I(&SAi-KU@z*L?YoNB;ymW?#eVdVC{#S}RG_k<5UC>D~+^YJHo z{0S~)pijZNhV|vl9GQN7{LO$)G-z$Y*8pm_Gg?K<=xbs71=)&#v+YLTv+F*?Q6w7J zE9p}-xwW||*o>nO`f-$iiT+}OlR^6qaSEd`0mddcI&V#fJ)J&beZ4N8@1|w#TXJY+X2jLhrLdP@Qqi(;Cz~N3OZy+kQkSByjgKN)>8*_6ntJD;n=u~m9X)(m;!rTf6@%4Wl)n1$qIT6erb1N3Sqq;~2Q zV7I&Q(gnI4@@yN27O>;?mN+hNxanhkQnHyMj1I1WYTOL)C`B6eJx3Gd$*A5PG`2Et z7kA)5T*p!C^y|IMW2u*962sXh|Dgb^rp|@8(-Hl~rM21VDF>JC^QRr1yzy;p-4_a8 z*l{{7lq2BpiPP&Yal>B+^cy>)3S%qyc=#d)`bu@=5phPdCL-d7JOv??=td**^WSjk z|FiRnrk%(b7F(Esk2EFaqr}mlIykj^voHd*kV&TR!w9FYWhQ_N*gL-u(yXGkjAfIz zo0`O2m(rsOt_%z8(3rWU-kIG>7Dn}O=7htAJISOu4!nl4ura(t@!xYtrMPQ<3 zmlTBLsm=Muuvji{wH_uue#rJR*p zd5M(mhXl+t3bfM=C#YBp`nz zKAH{$_tl-4DyAG&AZwEoAX0#Fzs&5p2mE^ca9UQ{xPWp|ucukv@yC;JZGxz-WMHbK zk=dTl=*~DyJR8}207h@Y^$RnuIbemOrWck^eHnmhHe;Klq9&pq^xOLpbe$l8a-j(a zwU6rVM3#^X8oZJeJ;r74gp1ag9@Hf8bWcD%R7s|bFt9_%)-Xr&=+SWinPXoe9ut-3MTr(=bwN@rrVu)R z|G1t~j^%5eH)&Wn*aN9v2WxrxN9s!7J=5qTaj3`h&j6)UT3RBES*l-80;LLW)S72X z6hSuDo_M@eAE%nAq69#ONq0Fr6g-%fjD_Zi2Ip2t zd04m8_fB@*OA~C@zpNpHJb@8o@`^frUXj4}*%69thqi=SbVUAv6a32;q~htE-HA>I zwwHAkrvWL_b*C-&HDQ9?L5$(MIv#b@@B3U{!y1Tktqh_238TuuSeA0nJSY{x6W~OY z?I(11n5gClQF~lT#fa|PLPw<*Uo@Rebl-}j;G@?2d{prJot;`2gpa!>%Gvn7Q6YM| zt%L&o4OyyDmbQ`d*(zx-vE$JvJ-B;EmX;Wf<9C0I^8r=){Y%3ZUE^}~fdn~xl z&!0wka3Iul2Kf4^7FA-z=wdKo#Po?l`Iu0k75=d}o-OYIa`E8pjow%}zW{GFBzDX< zch@GdON%lvr1V|lYRLZng8|n5>>))rf@6=&4#M58K#m&MQm<*AeK+t&67gb5@HZ3I z-{=aS-J(EAXFGX9X7QG%0ct>;^Gr|M_ONteEkH^q(=EQ(M>Jo!v|t$&SQPu-3Hwo5 z<=|4FiZ_GhbYrm_;P1FY5|%cI`7jQijU{pW1nzK~DY!fDU2ciQesZEc%0fIJ`0B<` zyOc(u`dtLU<|P0!R;00t_(F>M(mRzWA%srdfA(r*>gW*}1+8e&_5@yYIh_%8sJuYzt za0=uYfEzNu_4Cw_caDXk6c{AmYh(b65|zin*rvSu^oBhGcH(;+FC$}l0O4u=S9pw? zzlu^+rk<6q`g8Jw*(OgScDZTG3`GZrX6Y8T9Qzi_-o*yb;}*8)D7RHxgpD6V;(j$- zaq$%n^eHE3Ix*{cD|N!qSH*aO$Rv#;K)y6YguN&h-twET`KlzWIc27of`XHk5DglJ zS#Bj7ntgA>Z*g@N$h`t>bJQ?V3}8zi7iPDNS{K>PAM#xR5QEI}<>E7!jVFC#;G(98 zmw6PAcp@^DjW=X9nSx~Kq4O2=|Wj1l}w+LA@v*FBpS%NAd$`K{hOaG zW4Dd>(-c0BAw6rA8gYgYwkVq4IYr0nnVU@VKHG7tAxq_nxK{oDK${ z@T**uwAmk@P8Usd7cdfMxs(K*@b<=rluV#?=*DX4;ZnXmeGjiG@sAy0rkF*Xs*pG9 z{9N5g_2^>6QU*qgc+3qhorQG&sb)5!@92ibWgW7rBr?hp@*SdRArMFbRt-oq@&_?t zv-kZ=pQI}QDuzAHbIb5fGkSe;KP1;_rC-h$&;YZ~;Zvso5@9S9gyAg}ZjtVuy2z7J?EVV_7=V7LM@vl}A9>rE|A(NO#sI{5EW(!FvsJ>!f1Unf4pys@FiQ`mA!nc`u)oJ+7A z)6>ZAt;d2zrrqVB6D1}uE=(3Sl^YwUBM7+P?vE5nyJ2-&hETD4u?SoFHvU7D6nwGZ zh?{XCv$bcDhmp%FhObHdIBPhzd+e5qD%@gda}=y4kQ>-*k~s){)0P#ptCO{K zRNT8s5uTh19R2jsW!w*+#FTxs1>d)one(@Pagx-x^xc~WPr?@?y69f${UqUEXq`U6 zG7mKEP@!GZ?E9At{Kz}AHIHo3yfsIZf&d3SyWe??OfRh4zq}(z;kbV^ZMZ1zdSeS# zntf2TsAH3y<{qFL)#`Cdf^kten6s{4g|FVBPGw?MZR?s2hCA7nFIQCS8u;jL*^rPF zbn9;2D!7%z?=s4w9YTkx@_j9POdDyn7#Znarv3Eu_Y6Z!>^&P_Bnb68uWPXmIi;0Qu!V z0IVNA$#e9e#bPuAh$qS2HqxL?i1H8!9LH}nC}~z|s|NZOzQ(QTd(8L#pTmcxM~812 zY&{Ut{oJr#*4()=)7$8o=?6@i-HJ;UEm|ewv`bC;EI$(M($sl5ER_nG~ICjxcgn3%~m|VP2WCAvl;7#Mi&l+j5SKq3G06hy4mw(xvlb<`d^W zrOoFs{%;rfs>sy`>*|0#b&FGGf1Syp=mFR=hIZA?9ufRUGs0sx(81L4rC<%;`@Ho8 zOrlO!?rNb3{5UAjJpuB-e9V(kc9~TQ?2gHDK(23Mp8Q~*YG%qa{3BCi^7PV)W@DUb zqeJPS2L}1L8Y!?3^4p_d@@5{su=Q9T(}->fsYxbW%qY5@Bpe06hP;1aL%2)Gmj7^~ z6CLa8<*|E!t?cWn1#K>Ryb7;_n3f=4a~4=hf7x}UmHz0K{a$Ow?ICak)#fH5yjx5b_ACF^on%~hvGm!=x5S) zHPmgc;@+Ewa3=Y55OAO4(z~gKnc|@}pk!R_`Vfj^-=@2B&*+*-$eR1!&UMc`N(r>A zvM8FXdp0AE_j&L&g5izmC(~_y?(Q~+J0Z=PG)yvzlUx}q!3;!kP@2^l z7WoA0^psDsGkgFiUFctUk(V6LE_ih5(qA1F)9CJSXCkX>*^=uvU3#BIztdmaauFgz zr<&a?((?qU;)3SOX(7O!Ty3V8AOq2*`fLfP^udL+Ls%px_#B@ zcqY=rS@S-K`;C!Qr|iyd9ara!?u~-Sn`O}p076&N>YQ?v8rD#Scj@i_o za1fCDV%$4yTG=JV#z~A=g(_1wt|!dzf#J@)dWIT>f(~7SHe`kO;k4B=hSn4hcNQ7W z*vI=)`v9JXKCL)=J2!DeDQ&7jr+{O(?#hdW@0PY8_>G4|M;kGM0ZFrj)yAuEaAdjD zwZkW^rOh1cp!@X^Q1ZT3Kx8c^W{x<%4a3^8;FM@lQ@~Ra<05@*Ns^Vv;l*z(1*i#* z*UIY>KE9(U6zQ*%(*L#dPA~MFT*Ty|TeO}tA*SJWF&I3~jQO?vW1%Cutoc3qd}^(cp;NeENO!FCvsLD*bicd>~24yD;6 zXd$};@nXt;EK12q2AWKV$SjcBnCZ_gW#l-L2~S47>{RbuTc=p8dCC)PI!% z*dT9oMp)gl+8?$QXZZXZnIvFSK@&&oPgbf=4RZw?37Tv7=y)D8{I#Uj?&y4a1aI8B zE6A(wl#6paFcHqd zV)plH`t~pUJpx`P7+V0mueGJ?Vq`OrY9k_@dX^xCrcl8G+$llpP4ja%q0H+WZ(bVR z%M3GOZ!bp*jTg&OA2ln_0#HEA;Hh=c!0~MK*#Q|vkFI-JFiXu}zCRL8ryLMDXJc+* zJ$6Pm1n1u|Su#x{C@luqUiDwL=Sv#6j>}AUDVfv>-gX?iv6>g$lEq8!T!z1v%yi_6 zq>Fs{{a~-=FQht2Ob>>Ac(1|pl4>?670fIW82m_AP4WM|V{O9-KXUG3Ra!{bDxQ>A zerK*Nnz_)gH3f)r;C5UHpDjbh(r~rhkq)l$;i0DPMS5Kh6gt`iuM%`oxL-c&?)_Mh zXmtnz?ho>Y^D&X$K?S9yYHzJv-H?&oSWx&3?o%*N9p{q(!RRaV2#=O%ogN<{B=dr^ zz1lBKkSgFU~! zgQmaMBG+e41&Am8SM!ney6YQ&i@p$Nha!5ix5y=1uXW#|{mu%$dz$Q0Y0!u7Ngcee z2g(5>pnq&q{l@Y^5#Az|Y0tgk3HU7>3i3z7l?OE01C;`G>#6r++K{92FU zaI}W_pgTN|dj2c*o?G{j&I@QL?7Dk71kk?zqtI&MPPJt(*C@7x{uWzSpw~Tk`b$%j-!}c9o{O`;eDOuAr zqKd`#@iQAuTf4r{BAD7ywnoO>zu65t^<+B!7rWO1p+ch0*IyH~O+tX_9l1AhzF!{9 zg&gD)6EoN;3KqZC^8ql}$&-#V@9N-iWTzU!J-j&wj%EF=F&Jd^-<@AaQIb50VlnNc zIw$4o8nn?ppU$vqv4qgyPwqZsJ{}AKA)2=*3$O3Vn!5mdPx7Ss_iKB-2qw$vb^k|D zaN(+bP^CvN261m{d9VMyY#ZJ)9m@v}EmkvEOUp*U)nftJ8bjkwe#)~Df&iqRE+HUJ z1v4Qv`wEEl!G)IIKW;e+n|Is}C`{3fri9+kT+p*ai~kz)eg0kyTr~HU^~GM(!uR5T zk2uVuELSRQXryt&z#70eC+yuxCmO*^q~S>23)Hv-z6l9IB5*C?Xua7>LZM+iTs_by zq*FOs?_t6mYYuF6H>-HIz|H>^`)iAbIoIA10kh%}kIzYSvFW?f(d%oM#kb81yjWbc zgO;giV@MN$T%67yfp4pGv@Wt0BI?cn=8Zd}mme@WEb1J`JqRQfU^GGgv2sl50?v2r zKj*tX!K6K9M8tjV47dSqQJk3NXY+s)k>PBwceSVCdeiJCrSHXgF<$+32CCaFZm=Cv zRUJn}Bt0lc%?~y1NU@=;niALv)4mCkoaoDlB6-g{ZU0m_#A!OotaKIV(`~J-?e0Z% z^sG8=fH-^W7PUt45B$610ipR10gh~mzZmddI_G(s+$jx>3IT4R(uau!q8hXjzrDO3 z=USQWjl;aAc?zAiI9xq~`IIV6`ol*Pf4UJ&T zqhpY~OwhZW*Wx{&|K#vzXFBZ>F;LI42(mNyE7!83=ooewl~lC96ny4^ zfb=PL#7!I3K~n|eAjO)~u^uX#qxn1zQMLuC*YW`1J`d?)c%HQ@JAnEOCPK4JKme1# zXMD?ed`up<{nh!0wWc}^L`fO|dR|o>(7*~%dfN*n z$jb;`mGwqbY-`t(8ybB#_U{xHlY>-SvFC{yB;6t z0o>)B1BKSkoMhrGvuIvDZNB?AzI;tF$ojSsy{L&-316Y=fiU7_?{a{Dj=qVE<}NT% z9Kk>`JyKXpX|SOj4>st0-a&GH>8ROf>SW6EN>WfUB11j~kLm7UF(9pEn^mW{!!4F>pnhj*2}pK@iw8cl`^VK*EX<-103*@8kM#hm8)zCm}UX zEV?i|vCy>BgXY{;1!=L?RJc~`VQFq5%i5Fz+Hi<+BB}#8y!}z zeD!8bP0&i!4QOCF7ER}0EK!TKPJF(zqPz!q-L^Y?ZwD?2 z!o}wS-!j-$Fa7kCm6SWLS*K7cPb_T;f3%U0uL^3!eKdI-l znk|d8%@Jr#CZKM9G@XMjL=XXssQU+88XKvtGpQI8fknF&J9^-)vF9Y~gmvAK6UsNDj1^raNYW zt!noddhB>ep%Fd_fP15-^%+pjj&n!eE;p3>p_lH#&#t7J#UgN;^S>1HHP=D+%O8_E zm{WQf-e{({hFW|Muh#F^QES4^zI+W+)EnxkM1m~AZ*3QvJP3Hx5?-TrYU{;xLwNxQ zUq*e76VO~n6dlbTHf8)-Gw7GZEVoM7Z^>Z50k8Ctu=)TgX0J=xbGE<68P=&j8l!UUPc8kuJ?Ryt+0-qh^hP_<}0PMY%c<;Z~HN470vzVUvqDc0rF0 zvq^xBgJa=e-;(Tf4N7did2tt9@E-Do*0P>bA|1Q3=6VlK+ZpmJ8&q5(JQ>+ zs>3glLQSh&E{uzr$Ke457WX6MQ@xkP3uZMh5yY0xMK^af?vM1IcA_hMu47TH$NFoX z2?D&MUYTCN{Kg*H+EK6kvEY4N=dI$R&({8?5HI`x*k5cgrv@v>Tn)Wo9%Z@{?g?<$ z>s?4tu&cwnm->UdsLrBS(#o)lGHHTJFhEXtpE(K0Z8;OMGJJTde>wI}l+?J|?um{F zA)B`)$X6CZwzM&!Wqco7eaQdw`X0GpLPO|EAcM|T0$i9csbmH)!vlZKFmToTJaq0` z(wU~K%yoFuhb&9&Q8;n#<)2+AiG?q;gf90H7)%+Ft~XAJa4WEELeTZipK@`);hA7++G=f4 z9z%_kfWA<~N%zFF%JuQ>VlBgW{0#M9iwvOLxj?2aoE`k|v+8#iEZJqrvJ4@e-wvTu zPnfML3pqMZWX``_Z>H$t{oNgwz&-cCXY4VPARwKX4M<`8u>eHTqHN2-0=Dv*OS}Ev z9&^~z1a5N=;LTNiPX=$%u3^4IPH>3gUAKY(EAN!;(_WXj^4gKV*)y^I)fj8u5eDR! zUULa1ui#REe8DC6QH{86n5bWRFiHjpdWfF+YJSd^kJuC4=&Auyb8DS$1)7_`R{QvAKB=;4RyHs?5Pw$WYL+vTN`dNUHIIH^n@;0V+{ z%;eqd{n;{5$c}-b!h57xjzl7Xjx9&+&jcuS7J;NQgP0}GBh|lO$l$upApJJ%(ae*J zKp=oYqaFJ|h+o7sG`cM$>gHG7v{V4T=L961_U3{=*Z=)M$3GuX)XGF4AV85GFsAML zi35lV47hlAd3oJRNr}W~WNfTch2-Y{;4Ifmn8_pT$b^vcSSlhb!V@gnKQ`Z~u6 zpY6>{8hF=B`uha2{{QpmZ&xi%zIE;?wo`)V%s<(v-&Oipu2PHP+AiD`3nt1yryY)B zio6SyPT>N#%HeTQqhhxl7M96~>Dj39F_=TY0yD(^d1Uqf7RF!ym4OS$&;R=b-6Q+Y zqMzLf19tiMp?@d$Z#@adz3*B7sRFrUuK7>>5>V{;|M}~WA4o|{2VR^W%m-fiUtL}V z0>cdg8k?J&TUxFz&c?kqe=aUAq6gEJk&QZwJrwRkD zFI#y20RaJLhXau(^H&C?gi+A{>?*acZq2kMVDdf9=E-6g@ieczZE4!Z<=Ii0FHCxS zc{%sb;{D!Y8tOci!sC0L8Sgw@oVGi*=UbexzMB0@f@ijCVy%H!7q(?vjV&$t4#aXM zrly-*=FSC7im&mI#|fqSIiBXbyzG>z8mDWxo2KOpZ*e>SC1y}sz~#C9!{NTzKP4`l zr|0KYsh0=yd;A#N+uJRHR{_4hBa_blu`-66Mwo$DmrbW}3cS3$%0Rh*TrR|cPFyA4 zs}k#h*iWF#L2`Rn*N4mO+0s%cyTOG&7pw08jc-Gz9{iu}&YoXhOy^}iN_rhwgRXIz z54bpmIAF_)n;IK@ylVa)z-UrtlxjFGxQ+hBL_oeZf_1t-#;e3;Y&~|Ua$7p!DFegN z)>7aDEcH-1?1eQr@$t!iz8KZG_&RCRT+IiKmV?X7QWK8NgSf3^-E)3_2#HSd8i%NF zL6X?$#q3CWZ?-#F{DyIM+J}`eF6K>|SW>H-1zWc5Fr0B6gDZFFt}ek<=k*u-#1&Ur zk1t;PgkCv$Tuh1bbycX#G4?(7c%YnM&N$f>15jV2CcQlBVQ)Q8)3)?sRn(#525WFn z0RafZyvs9AN;?Czfx-cZ2=v%4#h()!|58c$w(MES2XMqMxubyze1z1VV1PI+r zHhA{3FCm&TO|C;fCQ$c3jYd{|x~IcA?V8dTwlvOQwYhrW;%6df{_V|PST|n}?475j z(|9N&#dHuFIDJ=(-2j8x_f)KOTBe2(?9Gc#@rZA$AH1uWqH(Wb?9Uaep;_q%1J;p}xN0a!P0>;{vrZL1)fGYMka&>trB{|ITq+ z4&nh-0Q7>Zt~1#))kdcYC0vYL4q5eabzB|KcfK|9B`j&@m67?RXH6N|6bk^W3>P=nMW({8(tS4 z=gzM7Q$q&T&LgniFjTQ=C1<-8+%s6~zcW-HBt6)gnj}(T6HPVM%c}ZQb{Q!6M;ZvDEsnW=FMRWLQw&KkyVEy2Vm(iZ zwd9hjmNw^++Af6Q6wDdPkzND7V9>!NH51p=&x^Kc$6D?YrHoDeQ}u?9bBq7p(f&L- z(j~6Ovqp>5(ipW|ltsAE6$GL3)RqquoP5j8vAOKY?s5lXJfG`Cxu- zUjN}Vi4Lxwx=y9zbLo0o@v{|I$vib<*)3LqS+ELP_BizQ`LTn zVbgGd<9O(v1yv9ZrHR28gZIe*p-@UZn`QNNJeY+t8W*#VHgwkb5|Q=_EFgtg*aIzh zJYv+E(H@m@N@q-t^d5P##Vp&w%0^#xhT3osXhgS&&A zu3Q5mDwWy6Ci1;amvs)~$6LX7xV2{gZvAE61BYCvr=FjS3e9`CK&p;yf~7-jB?N`h zUQ4p5ULj9w+j%(lSHdQMT@_`Wb>xNTg%)026Cui1SJ6Mksg`Bankk|=80k6jRZHJ2 zrP7F(^CYcDjw+h?mKMIMz*GxmaG0-Tz*H>HT;%*Ja`kdz#bFp7r2FAWf@XcSVjqo5 zRQFAc-=fh_%6+-pY{d! z^8BrBj5ZBONzG8PhSvHiMvu7UvbX$aM>+Lke0_bV^K&0L?K|-Un-sSv$Y$#>!LDbl zswIzQnuhQ*ULSevSZx-0MzBLhY?V`Ory%y1LHnhJsP8_TzPO7w5W*b0gUZ@!oST$N)Gj>YA8n0KPf&N zZ*qvC`Zr@VO~#$f4zOLM<9I6|uVzH37o4_XLo_i+7k2JFtiWmehXu}&G<6i^QnvYX zeh4}2@yoBWCriMl)a0F-5c6SeY6jwL65J7##`9bsBum3aLx7agC&B?^uN@8JH!paX zJvRx$EW(OB&AoP3c67^Ts2+cyDQTEG|9GccvFPVdSGU;`I~vZ^P$RiK)Zz2_w!;^C zTPy^O+4GRWA<^U*8ev+_UJ_Th-M1Dmt@$qLRC)kA=r`VOPB1fNCO-RZfBnw*-b1+z zUReT))NDx2(Dg^#e=4%Z$fy}b&6Ia4Xu+AMvIDmBZWyNy2O)W4PDBWwSH z1lH7=Z2{P-q(3spifre*yKc3EX+-4$LMI}SmcT}JcekJyJQH5Cb~9-v<`UJl)^r~2 ziSen6l$oyNq*B$69vm5LyzhOKyDytPH?NAej8s8}C#baq5OL>5<~+LKn9`Ox*S z(DS(7-TJ__y3hYiVd*>1O#hU58y|xLf92u|K_c4=-rUM}9%*Wyv%WDQ(%Je0=C6sQJ~Co-n7{R)m4vbAm@-dp{;sWKpXkBU5mAy2R=>}{WFLQq-y0lS+Nf26$xL=tx^*P}U@R3-fUIjVRDIJZ)sIYbp5tw^g?bL1 zcXEsMMw-4L+lSVMQIBWVJQgVN{x&Y06+lHtp~bkQTS+ed1YiXE5YPmVtfb`e?4~H7T#G%ZL_Fm(OjZ zpx>hn-;Zrw^hkG}-*xwuT^k{8T_cjn`58z3jCZSt^5Ay0dALvh@6#M8-mZVyn27Tb zYU&di=>@D|`U730THR@PSd zW%MTW-kwvMixKmP%?QPk8#EaI7Ffl@vsYK-&ZlYos<-Rsv;B5Nak0gXra$zzJs*>; z@bgS8r{@!wnVI!cmsuClGYdw)ep9cBGw;5=sA{F6?e8b;%2J#{wGZ1uI#SKs7apf& zH&}gGmj5HT-&5g3&rm&*w>q>K2dE{@SHDCnB!xUGVw&r+^WArL?rm9wS@NTguFq z#n$>OS`71$gOoGT$b6N>hhC*IMJd8=-t<07Tl^glwk<})dpw?hOsV^&PonflmE9zl ze4J=*;(fp`(Rw`q<-Oo_ZjudrL0`FAn?uCJu!vWn1h z!?u$@wTfWtHw*ie!X0)RXI6vNrRvOgzEFi$@1-{UHS(9x@16;a{``zAWg_)j$?0~k zoF1fdQv+|6JFr?_?^4fligO8PJvvhD+Ergbx#_Sxq^$+(k2^G6dbit2+^iJaC1TKa zA?dz>07tJTA|61YPlDI8;|e=!*~6+Z${3B)HtP$h92z8`PIehAY{uw^I_Uc{23_xr zo{M>6;+ax};eQi-dD+7x%$`oH1qhAt(FPeQC0){51)P2h?_(Q76TueB_K_s3$o@|v ziSQoINS=>_dic}!+8`R9jRjR>f|&b*UpC8=;%UaedbVKs(AwwcT}>BR*3zo-jd+!g zmxf`!fn3zIdw6zju$fh2;mlx45X*+i%ZV0_MdN2;%e$&}eRMxK+BOw-p>GWsJDeLe zPK&Hq6t2#U=g3Z39ZygCTwrkYZi9<}g(r>ddc8RhF|!putrr>x#sog=@tPWi4Em#Q zy-8JVDsCW!j=ONLG?dZYJD92F4qwBFO|JV@>q5;J+Njs>gzLgls3R|-;N2gY()$>! zKojT1`8;e0y9d&KlK*h#w#+PoG*_P?;8NVCOOWgKs zs}hA)QtWapAxI@zlR+jb;Fiq6n|A2;kMA~KgvqGnAD?~{eCzRb?9J0t{%#)1&;8Gz z@jND-*63zFl_Mi#zP#_7`+Usak#R%aK_!v^ z>hVNFbDO3R4df~Tl7Y`$pq} zzw8qT4tV1GS)}j(C^CdFC|cHx9y!*tp50+7I}Hn;jkAsS2#Sk~>-a&c5SmlUxU}da zT%!Y2L}#R0WQti(C6g|%Hcy(@-QP62(h|D>wdXRQZ1r>Z&wIlHbu=Tl5^0X$3SBr4 z)jRNi1pmlV1Qe&IQs|FcvvtkGCYv&ZUs44$#W*cKIBk(T`UM1R(J7tpE-D;NINHhD zKR1}AT-;cd2zuy>>1llRD6ozZm+4A$jHqm@~0`EJ}3Y>6fi>dIe?nqGRmybn+6<1_4YE*3g@Y=xDS7w zck@Kx!!Pibt>&yJVwqJ8CC(9UUb9(F7>l)Ab-sa>!}dos)i}e|C+`oPAt;B>UxC%m zQlx5>hox?PtY2TRof$AdAnVo=GLDBsUiUTJlUe1e985H4f@v)d~C< zZRz`|B0>yUpbcZ)1^2y~Omrj6SdQdk<1G&!T<~Dt+|9x^_Kp@px6~ zHPoN>)OKepl^OyZ3m>O9N9{W5ZTB-AOrnUrCfUiFa72ATrSNgNi4Qa9!EDJjPBjOG z@9n>F7<0(UMQ)|g#8V9+l?P?rLYt=Ik~5(B58J(FJt@#(PIejzOw%s>qT*ZkZG1`Z zcwEqA%4rx!Dnnl0FassHZ0t?LVTOO7tZ@Q#d)=&&wEpx9L6g?SlV98;50tlUAr9e{ zOh0a`(L0r)=q8FT>=x<+Z$G=pbwaKSzTXy9)ISiLmU2x1f=XBCC;N+w&tIFe*G?YI z>=_&&u)KCG+U;&fS$&5tUcMEHJM1QT4)BFgkd`opB5HYcYqA`RY*QF>P@Z{4dC=m; z{rY+wx^QO4?Fcd5X$@J0$z7^xjUq9H>fbi*UPhMIWw*@Vk&za9fO32}yyCGlv8Ny# zL5IXsMGBD;d7p=Je>F(pDEzM9yYLGGS2=Cj8puY^4X%%o+bU$>!M^=CoH__ATHp%$ zL)q5QXV-6&Q0edIH?f;?jwbfxB$=EpO@Z=ANaD@%?+tg#Fn5`3zGxfue?~{wd{{VY zPb-&?AY3+Z_dME|z$A#L((Hwd%}U#q?dt)c+v}UWZ6zV|8|=qL@zj7%)idc9p|ePG zH|<`g+?P&jY(nJ@wJQ3mXX1L#H&L=2D2@m(XoQJBXO6N~f4nIM1ay$*7?swl}iZ z6wlyu$)6)3H1*Y47l?ZBX|@d9KQvnzW8yb|L^=2kG6or$si~bRhnBH@q^V4jAZQs+ zWD++6*aIrI5hQp068&cWsh99OGDv4`bXxq6Tbo9N2t9(jb@rx!ly?90=-w75wx2xP zt~o8+198qrE{xIkc*Z%E%mVc4VL(*v_Hb#Do^Odsmuj+#T$i@wD&!5}1KmwqmF>D! zlaB5F%ZaQEY01W2sfC|}9Na*B>+J9+V&`{b^JsUAq=NUL+8Z;-x{pC=U)&Cjz zg@Xr4mv6MAcHiL74VtUQKA-gO&0L*(Fwa!#Yp?ldE$c+5gGn|5wZk}&9a{}*x@LqN zT;2T0{N+;w)a^1&ts;HB(3cBI1X$C#=Iv@i^CovI1!or9E*kv=v9wo5Tkn5lA>V=O zTgH9dBWOPNR@2mJL>Zlie`gRs$oqp2kpv_0#!X*tWaNYQ# z(Th#B7*L04N_|b$KlsO&mlqAh>JZr`XJ1hHL#OrnD}g92l9_EmB2lAf+LTTo!{Cpl za8?hz7AZWrxC`q_m->Jvw~(_13)OPM(3x!EU9vov$gMOQ1Dq;XY!Q>4LwSkR&txtI zDZPP#fi;VdcEQ7vfTmXK0vMK^``d1?cN~;1Q!Z4rKoR&S{a3FINC>lx6^zCqwC=?B zVx`1alOy{vt_IVkt4Cx7>wPbbRaLs(y!qy!zgmHyJnUIQkh=d|fLx}E%ch!P4Xp;v zj47%Do{_WpRdu#wqBZ5Xgr!-*sBHL`P1y?u`=OeE&CQ}{CpIfwm*C$XvmT!F8+W*| z3~Fx~jhB6rFJwwnQw*ya#bz!2O&k#Q>m&8-Q}N;7O`Rjk1Ya**%Ho--;QXv{I%P+c zwTca+KABqS4qaTJ6{1Db;^R@y zBA04yu0OVAZh1KMUc+)79v2q4E|hI|gh_bo?awrtx8&=TpyNBjRC(JZyOMiTRJd*j za-cVhF|#$bKi`629(1SE?~Ct?)?0e%8s7-cgVNvQpp~QjZR7W4&UOYz4a%g%qC~wz zO%;{vdnStj*fkPlQ$WH%Ja@Q%r2cN)KruOEm*`sBK=j1 z4V{JLsze=-5Fz3LtH@@%xx>SpC%hnsal_NA|2e~A*ja*Zl&OWbDq-b!%P zq3y#N8ct~R5vKoa(2S(uy+)kz=f5o+v0POb0n_Fo>K|q#_LPB?)%#%gs~dT8_Q|Bu zr^XII6Mf=pTXcGIPrc{A=d0aw7?(X`8b7%uz?H$zu$ zN(+^b!+W7%9C3RF6izK9E*7_0|AX)qPvNK3Q`0xI!}{TX9Hw)v(LVh3;*J2o-@wGy z!M+3{hY%xAlRtPOM+K6Qo$AcD_0Pp;v+NqniGPQsv6!q$R)h$*b_p1eecpLzfFGXa zC+>LvE@N12wTg0xnz&<(w4Y}r3qaXuo^|XX9kBqObu!~9q1Cn9#?j1Q**0Grx1>w^ z$wu1PbScH~7(`&`z9TEZ6O9%v5CI(H9E;9LuTAvV2Tj{jRF9#!!v}XSq$5&w=UafT zDqkWIO=soWR}KcUY900WX-ThD9(=w+$Tt(&CPeMlwvwgvj`7Syly<(M`Kz7GFEVfB zaCpP2OjDlI9gp%@zs_dRPgY{`tQQACKTrTn2q{vPQ=h8~^Xs{sSc6fk_C*4rhAEvv z!NJ#Jt)zRyY2(IL&4a0*vy&#>hHbz`(~-FHc5Y8u6-KsFu4!&Wl@)D6ie_UUC} zZQ-NLPS#;2^_1V~@fwA{E;_I*JMl$6c=}L83C@ll*0O3`ZUt;U8UE6D`As_Ta-te1 zA`JhG`E3@`{p!_+V@KK3oCkfEBA}*Ut7)I8grhwX1#nQ&%9MH7d2#E1R)>nqvR7xV zjpA08uyb3S_DdHBmQ%3^^2Yy$o!QX=yw~7Dl!@J`gyvxs9QE2{op2>B;e;P(?As~W z)YX0D*%-Y_2FwQHTdQxdf*cwX_Wbn!sqM<&+05E-?7NmhG!t#6s}9B+!#g2JY{C1C69C7p#zWM_lA~ zI`)$@NTlWHYjzwmSNtl_lPyjg7eI2RNf(;O5_Gci_WPJK<_AZEpwQ*ImuV}wIc2Vd z{t_%BRLC&HkdWpWN1M!C0>PNjo}Y7|`#K)cf0XjBEUDGvDt3 zJ&)V$$+_eDNCG9X9m?Q;w(cxNhA7mAlLmXC5K5RWyy;f`D$qgo!&v|L%0XRyj>9durz(sl<;7^zE2aM|~3q}Q>OKBPBryh=1MZ!1*qJvKfh%H*(OK3&`1NAJql7}by=n90k6m-|mJR-R2i=x?YcvJdB>X&|#*8MZ&5mBMN zNFTSbD!Bqy0xBmoJ_+yI60(7rx08)QFNI`%%dA6Ic$ZJ_Z#8l{j!pO{J=kz(@Od6m zP++p7{Gy^^Luq&1o>F-j1g>|+RdCwMKfcOq*~mxVkoHODA|Sa+rQ3%>y8p7x{|SsA zZL0Aj#U#RYrf+2aS+y;#jM?r3AeuImn6R!3rm15wid*OMDR}IjVdztg2{xnD+9hbSBNhHHU(oyh>K=??xqr9NzqkFa9w~3mN64^LNs17sjljW?wK<-9_&!<{NO8 zZCKABeYF^92yhlE%hU(vgXWFuKhyjtG^}D@WvZ`<#u%a~2n3@yQn{v8Z+#T`Rg`T5 z7%0Zw>0d9ejN&$TczMvX6m`Q+IZz5U6y!G8cr-$hPoHMzcVd7CR{(p$9Z&%?^`_Gz zHecjWkl_B}@41Hg^`}RSv5<%}Q5)4^nsVNww2ZH?T z-F6NH%6D_gxU!E>m$jkz1LS94bCPpP#i}NSaYe+sL?wHmko_n0eaqcAh2Jy}2U?cZ zD6Y4&D%pUdtKJGw0tQQS$9pYJDVJ{eK9-J45q4@_CTdU_s5GBpQM6R9_%VycIZ?jwR%)N>J%ckcINIw&(f{HevL;vaPi^{K`1avf`?-WcIf@SOcf$rn47K^1>bbp+jzMm?(mLlML2S z5+wmL#R0Xsw3r#a#}+d53vpg@>{$X39f*H>94Td?fQGG077+z)#~Fp)71Am@l)a7Y{A2Wj&IP}es#H)G?v ziLLj1@!1Z7n|wbKyHe(Cswa|qCIq^H16Rj~JKeA3ql=!d#dksD6iUs2Koo)g`oBh}|J~dK`u}>TKru1L`z^;Jcg*UL*FE0` P@&dN8a4@e#;mQ916huAB literal 0 HcmV?d00001 diff --git a/docs/assets/images/data_efficiency/data_efficiecy_fig2.png b/docs/assets/images/data_efficiency/data_efficiecy_fig2.png new file mode 100644 index 0000000000000000000000000000000000000000..07c088f0ce070106a2ffad00e9e20ce145f0076b GIT binary patch literal 78605 zcmd?RXH-*L`!5Ky$gagL4hD0MMOYE6AQP+k~eXgJua1ToS9^3=g zptB`p*=E7I1+hOZPoyb4J{)A!E{`lNASR`!t4BUnj3tw~^yX~Di8+TL*qaYeM^^aw z2d_rGZBng$A&st)Xq;&2kboA{@^%D?qrEnzo8nwrulKWV6!v*mi#^7QH+_BQVKN>* zXSn3v#zh7&3xdiOk_7WN`G_Q4n(J{3k^T-aVn`YQLGSK9n&MG zy&gV$klZ-crYp#ep!J?0>*F}$*{Je0f)W#wsYAM=@lOd0j9VAv7Fr@%1+*+lCHeE0 zkTv?>S;DQ3?l#_SfALG=YfR4@V+9Qi*hqtsu=`QG_H4ffO`JDywJj`Nf2d1(^#0@j z%SSa_kp1PG;{?175yIuSM?8==t6G7`;Q7FtyW+%`z>h_ytUKMP8hX`IZFDU-6BwGm z1h#Z%N51)c>RWczJM%7(J+nmB#f#+R3>Yrb!5HlDisjlV+zCq;nl2V+WH*Nv) z;#J2S=<}x)(Td^S*zlG6eQyHw|5Bd+Pn>kP_vKIGezWyz`?GTUPL`kze7Mrwp?LZ183sv!}7 z4Mkb=$dwqZmgj0#i25A=M^{WFT_wN&vBp)s>BdF{Qrc|`I&9*D;{~?U|8Qq{j`*}! z2#g_KBro{78^BZ{J;>kM@<*A0L5T;}S>|GEO8%g}J3A>a%(`Tn8y`JjGKoPt-i9%P zCiuVZY?#r`v}{!qBEpXQr#Do)Y>jnnP%Tpx{H<~M$v0{PnNP={JE9VlpikTRE55uV zF|Of-8lV3-)-B5Mo$Cay`Y6ts{3eh-)#%5%T!kHewm_CY8cKB@^(%x}x)Hng2c-N< zz7WF`Xv=sAoZ$P`8Ru+kOkttM6wJG~tcTG>lwhAX3p_FT;{5V$V3nu~b_y}jx|TV%JGkmK zC&SwAy;ghEce917iXafn)xA5Sm!Pm+-Kak-d)Xj&u+^RKh3?2vgdlWoO<3_9cKi36 zdSRy$$#47Y6^pZ@M%5%a6yvR@g_b`?+cTRryk|A_TzQ&)m9hCQM}gJQ zIjXhdgB{~L?OsT1(nn;U!)zt}ZJ{Sej@mm`AXACTnRx9J!Hqz>rma7ptP6CRYmW!} z3}rn1aYCYg+4ZWi7q%psEu>9?Y5vBM=($0Vt;UYS09uHZl5so5Q@?#aU+l5RbY4|u z#TPN0H1*fH^?x(VtoZgdDFF?!mD=2Vs5|fx2a8u`^R%Tl01UdW&}bxR9N3h63`6N5 z+AoX_k=dxFaCCrE`t(*37cFV+Uy=0d%LLgU5b+ta{v5xzJva7TZ#g(@)TlY=nbCgI zxy<6u!3 zLb4W)<-(cJ+j^2|R$KYt?Sk=7+e)!3cmC2l;^H5@%NEwXhpOqquH2i2h$`sv-#k(- z3Zn>iCj7*X8>&BZXmcUak*jt#8~Lz?U<9LucR#jxeE4m||0I^fEhOo8`1R@|%flw! zzxY~a0toIz+Q9hqHi#jkwcLp54*ftgL%kT>Wm*EQ(SPF%rBnz6MOU<_jf1S5--rTz zqO`l}NqgMQY_SJCTMmBFiVb^#RLnh&Fo72l+i)}rWfs+@PY?X|s}wQ7&&nID%Z)xT9ztl7pEj(n2ZBO~6% z&Tu{ZC<2~FA=3_wprVjcsn1rC`z(mFDKOd-oZDQbe;Iqbar~%mZNI?|hSgd{_Ow}~ z+1*RbPU&3CUen24plSnCk_V(`dfIE**pIh^l?a5}!LYc6rPbC=ACs-0!#!0R7I%tK z3KtZfL|;$YZMT6=@H4_Ar-H6NzMR-A{%p2?_%b#QEzBds)l=3~9M|4*agz4L_UNzJ zQq_gs*25>8ed%B=j9~Gd>gU`=yN~_!qy4E?wO+)U`cC8U^3O^>grUsfN3%-eL?V~j zy3I_9$&i$zrT60bn;xNjmXrfhXJcgW{#lJr#&dczTBN>PrH4`4=#AusWPW0ra^Foe zbB)8Voah)^jYT?sr9Ah&qSMMTt7&_=(frfH9JMfw!v(~X`YRt7AKLS$Hq6%Rm>DY+ z6RuFc*p{+iQ~S2(d_&OS@wU<3=!;jZ)aPiI+$e`1G2i44z4zN`$5GvMo%YHb>9S93dTzx z+>$Xv>fwNmuflXPVrYx*q2GsV)7yKSr?U5EOSnYk!gc5nbwlYJ=X8A&p9sf3RW4L~ z6xmm4Kr9=`%7A%yMKd12;JI2+VqK<4aRVN<-tuCru$SX&?LjYqLR5;+Uo=*I^U1+Q zs;`fs;$r;i4}5wQ=CeZ${Z=_+4$HdxY9deO5iLa74Kk;I0K2C43eji>=YqjFLz(Ob z$d1w@Il77mGEq;MXNE$c_s1m9_~O(t$=p$mvh;J77GMLO(?!<^Cw*$&nyk``_^)K< zdSe1A(|yj0`^+Wq8E^=C#X12MOYq)>rMvKwWX|CtHax>cg)NVn=EyZ##=;mqk$NjZ zqhX?`Y2@QLeGjAwkC4;u2|8D%Zdp1#2M49DIi)D!K)=^uzHnKwXO;V5!@W#56>oTn zEZDtE72F%{P!r=-kd0{_@+?lI5R53T4=5dViVtUx=D4L;SSA{j{~8*2^+~D=EkWE) zNKA?FqJ-Ao%d4bP>;^9A4zU#Vo+hPjVc<-x4dh1+Ln;OKg&gq6 zoHbZ_Hf{qRC>w}w47zCB)rlXhvx=#$wRW-FN@FtPFDrs~+>PmKWO^8a@k2WH$=nYNXgOkx!bCs-0P@Msv%DX*BDsbFE1OrKsNn?VmS8!YhS)d*MQq!T@7E6A%Vb}|q+|Kc zW9Un9P2vg~oLqz_nI&8bw#u5DgtJ|<%z0OmO-r=2yrQE;uO=d`mR>IqCZv;A8sJvW zZW)6qi|~< zqL#7VLjo35L-WgNy4PQfkz<70{LXZ&_Tu8<3N5D3U)c)V*Z9ViLxX2x-i+Gcs4hm| zxULO1)T=zP1H16iPik^Wm0AM&ok31fN?*Evm_(r$#k zb8xwOTA-EG7Ok%T>e!qH5=I9u889-Hq;iMn?rDAq7m6M#8=ZqKf4n`g;Dw$9NEG$)NJNVJLM z%6kn=Hte?AQXG)cN>Y)I4@Ae~CkENPj~djYp6|xsg@=o?3*ZJTp@Dg?Z`1bO%GlUV z$&GXY5&T!(@r3m)ck5N2$F7rR*@6o0hvj`)pbrk_xoZVDs~OsRU`xVfBm21*nD;SL zM0IlsQj@4_WK^gavaBp1=~gJXih+8+!$36F!&NBB)CyfFxp3U!pBP1D;MhsFRM%V;CIS zWVl0a{7;>64gF3)(<1$QA*0XT;E}uOk16NVR*F$N_!+02&eV>qxDO{1VDgtR?E?F2 z32K*Fb6zX>d2Xh^hXJ~-IpaIin!m%>NW3_jrWD=6?IEo9-1Sv0tq?B-YVFx$xBsT+ z&spp}&xK7Y(n=C#ymWOOaEwW4Q#^@%ULWCoMrss`#NOTn;QeD8CRd4?Z4+bft1oc9 zlVR6-9aIYre@r!WeprD=&F#)hErQtwF9gpoVG>m#JvTjlPo~^?@|EZF*;bb7HS-;g zz>}nW_uO#?BiLQ_Tc}lZ>R0Y7Gmzm=+ISwd`=!H@n2;PkMEm}QK#U;X+uAylXWT+) z|G8lr4}oE)>o9SZejK4e+q8duG48tLTOFDvrlYKW^B?&N$m;5dr@Gv?+*jfjm52jr zYG(P~#+V+oLZYL?u~>`Ld|LFq_T{R&JYMphQZwYhz$5g=OjR?&?ac|ys*BaOa}s|# zdoPQL)S!b2bVM>lvsojan3>f5UYfE6NLTAKLn|*WfXeK-ZszDKVfa!u6vy?k>%)Sn zru)zUhr=iwrTv8v7{$$FHXjwS>N^*q%*B1$CRFo~tPGM@Uc8;STS8Gx>r*Da{22-R z8sX77*R2X>cEgqHd5Lhf?tk-P8?aqF!5L@H`Nv>-7mph-#~`V`7TPst$fw$VdzaLTerO++W@l6;Syt^k|v_n}L^U!ll7=19* z!pw%f<&_CIA1v;4eB!jWv53Nrp_=WYD@OOCuuxST1(_7E6g8w$-uwFVeO)2Ym3o(# zTAJ?~uX7o9MHohfTkyzbcp3G__j=v13yZ2Fb$(i`Z#*R=s%$M&0| z2c9$yN>gupFc2+%pjR_Xs%3bF*wb&!u)Ea|mlGgoA6Zs9pj=?==d-*hH4O7Tr{ET& z9D$F4xcc^7rT+cPi`lNC5QNNUEA8YQoGNF#RmI$2HI#)1^U#PEAD)Ge*OD=??F~@N zDu$PgaS_rx2oor;LOQ=)DK36InI29-NloV6M$Q~h_7&CYpXQ-1rP6PEkMnB`OcY=-(U{R*|eTrqU@YL1`Nk$mOG zh5R%0_Rjn78RM;)g^j;^KNOE1YlN~?y1>Rl`ex)zQl{S5O_7!pp}tShvrPMZz+rEKW%(3hDIx zFb03vMRxVWt*RtSgJ@Ob)t-BQ5+bI96=+oEX3Ovf#deNAp(GDoxuARVPgeiuB0^c> z6zq9tBaK4yAi7#@W-yJ-rwSL^)`K42GI02(gPy;FW>n90tZ!suW8+jjvt1syno!If z$-Uj3sb+ISXM*uyA0xw*0Tr9#b0u*grdnHr@Umr6>7efQ7_?-X?p>wQ^a zHtyitbM6|K3>@&2+b#lIg9EJpHr?2^%Y(i^UVAwiU|)q)W1D4x?jy81)=JB1b1Uas zolsI>`(Go7K{E?38`|7c%{+~=9Ohv(dohdR$03lJ*1 zOmjH`#XtWN#pHNE{{9|Qq_P&y<+$&j@(zq=c)+)NE6sRLt|&LCRgSS5#R%8AA&arW z=I{xvC*D^E`!8~(Z2L%*sALcGjcJ(K2@cj^9o)|%VviQG6QqSKcJAHZ!F5LWwSwdm z%OY4x(ez85Nhe3bQMvswS7SXezZ8&PE$WqDfC#RglEzoP*677!Fu`r58e@pBwcw~H ze%{3{k~X8^clfJ@2avc-@5ApjD{EJ9*r_5RLJ533nwX~@+m)0kL+=2Xg#sSA>NkE)?FshTC%`5uy1tNI zhKSprt}K{-aOltKIJ-y9&WIs*{I^*DSf4-OWjmoT>R?$^%3lJPD#_OieD*jLOd(PK zz>LfLAb!v*&_w0TN`=)cU~kGm?xhu9)Q+H2K}P@s0>+iD&R$r`TfIR=SmjC#3B%s7 zn>^eN32Vcl)Jq36^uwd=qxAD^2c)j;zAAK*RHX5Zfi>Nm;cCVm$z$1-z@VAD%KXJ7 z*BEBquo{PBq~;Y$R2406-{4qT!VFkNE@RHC`%DQgCvel!wIsTp#E8U4G?mK^b9`I; zQ-GZal9&Hpk^au>Z9^$N+xK&Sxz}G7TD7xYWrT`r6fW%-#n;VA$QHs|ydujQCb#p9 z%VFOH9ks{HGWU5SHX#eK`f|)fS9|9pd9ZrB>NJE$vqL{&~x&Kwfg?bT~X4b*9L z7acK{(aJmCquI5M+n$)G|XePDg2u$%p1w z3YY@_u2FeWeG}Z_;2VBQ{r1FDz8?8F#l4Zax*tIZ;-icHTTHCszBuIrE4>9-9NZCc)>41v;!n6reyr; z4X#S&No9ckC~0eXo1C-}rt|jBztX{?HCt??w{6ZwSyAdXUu{~x9{P6MiGZtn>;p%A zY=h@7v%xfBwm1#$&Jzm$AA!6Oxha_(kf$8!t*S;|#)I$DJ{(rTWO9+&FHO-#PuFYW z(gku`SSwsth%k=8-roy8jhC`B4s^6)R)>5(n%C$2?y15S#H}i#l-VJ4eYRZ*5f2l_ z9M&DU6=`JW-Z<$+@EPSs^*Kj6aqM&4zE4BE^$-x0=P)&R=rq`tne$3@lvr_D*#|$} z(w66R|MtrKyC({T#SGXMQ8JYuG32tEH-`r3Da&|Ydl=7B|ADxyh4bej_A)b0VFx||-QoMW`mQt?fsfv7Hc=h#cxacT z=-Z+9JqneEPuKFL*UsI=+Qb4)|M&wey`S%_CGX-kdJgmyp*d1W0hJeJXsJ13 zeN*%?Nq$Tt<3=0p%! zH}W=Z4|r4lp1iRo(oqOa##+3F4*&ij=0a<)yWrFRbxzg1Qpy8#mmFPr{$z})FRR9G zRpGtzw;!eE>#;g%vAY+-ws$g)V_TiD<7`gKoaArfk?Y%@;uyuz?+(_nP0jMwuP&|6r zy&f;Js$<$UcX!d_6kBP;byyI@r|!o(#oao(y=k?fZB8ZdodLevAKBcTQO}R+_YRX?q_hf4t;^cZe$JcO|(6HoS=(a%F>Wb30u-6&87~WLqmH7#7Jb zEqG?g#DZ&22I<$-bi-nP3DbvB%uGqt2+*~2Jb+b|HFSF1;5Ir(;U=)mPCcZdfFOr4 z?hPDm75sBW^xZ1pf&g#^#L}CMA(pG{cK8ow{kzi+g8yo09M*GXmI>V%aTC%De0}X@ zs7i&s5qX&(LnWG9HqMpg_|f4Ew+6E`<%~0zMQ=>zY}~TDAAODY88KYui+<5jjEAvm zqr;oEKQ=G5KZKuIm;B5X-62W|iBGprrs9=Kf|U9>E%5kJ75x+e1Z@${_??L zOFu@&ps;#!xV2YCZvzEX=lqS3g`=h zwli1hFcqb2H~0d8wU=gD~I7nq>0J?fB7fp@2@!xUCXkQ02^FA522%c(Oe;Q`aqJ& zGwY)rtFr&nGj8`xx5DFyUnkWFXxD!9hg}azkflZ?Xm~vlJ-pJL^Zn;a@dcrRU+3!u zc9@RSI9p|N`ySyMCWiD!NX>5{TtDyKCml4O)cwV#ZW}Retfb%6-tyQxdF*kATEEey zCg|Z+FV`u66aw{|lqp|Z^|g$M$JC(4ROqqBy(b%L_cu7ogPnm>;9mWsJw;0We9|c* zV@a)=FIgpUqTPstRO|vrkx^my9Z6G)|23r`KA`lb?jBoh&5x9$VU<@N-b;X@-GpCLc(C<@)0_gNeCxPd|0TW}$9(u9 zk4V;b%&(DhfTM&;eFfNlSn+d^!3$xsLP@l{{-XV;5T}|5&Ru^QW;YomOY1lcY7jh%azHPQ7+nsPvaAuECX|i!Rub{sf5M*epds z?9yR3JlVUPxG?9);2%ahZ5NRB8JAxDF(DdBUNy~kj4%BkRLcLNGVD}+&>AfY4LqZf zAq5=wMGEOTSc9Yq=x`av<|w6b-}(o~22NpeWcYQnF}!tRuJV+Bp%nu4e%a32?fLWP z3tj1e+DBjf48!bXZ)#rdO*>8LH-G$2IU9K0mEVj6PyxhZi^nW>RLuw{=(bHbxAzpOzm6tBCK~<$nLzFSAlh-A@^0Y6_fY%KESNGp zRs2u9Na(oNrr-YR4pn$LIqkaj&0zebtSn9;%>Ov)Xo=B>)cJQy$9Zg>9X$J_BC{W&>AHC&VWck&?IA`cZ?a; ziEHAcHdAW4uVr^G+1KCwGw%9cygd3Ggz5a7CBL2QveARqbouIM#O@9V;Wk(Mu>zq{ z1y{%qlNm?9U5EWj9gEI`piCWm^FRFe0Q_6;%TkfxN`vI19=lI_75~6*Qn$Ur!OX;V z0IEb*pBAtpv0t;7^Y>%W{(60f??zQ3agRFj#U25?-Qz}Fz(CfMi#Z>eo134Po>xsF z1g{8IlmQ@|j>Z3gOMyWDAA`6)_igo?IjvIvv^6yXfN~{+u_S=TO`Tt8xhl(xHY7w4 z@N@oug*sz|^^yQ>h9aa<=1u}b04Od0ZsA@H1=yV97;w+O%B|&U*{{2FfkFw!q?-d; zA)=Mv1he4)T5rC7ya)h!kpUB#87 z>HpLvXJ6(E0u}I+RHNtQM;N|(x=m@RgcG*zv;#6xSY$S-oC06G0T(nl9wVWf*U zzX+fKAXR!o`O*$o!$?-pHa&(Cu{MIyb5*J$$mgj%Ejjb+r%5!6PF*G`r)H7N zJpTi-puh=C+`qg^n2RDVmX)mJig>Oi9!#gM`Yq8{A1JB$;8HurcB1DJyjy315FjY5 z&U&i#OIMDKZwcq`cvk)76Zg+K-br$sN(68qc^tvx%!{MK8Dvzdu| z-%iE?Kv#CDs@0`YGjV^B)7lraR0{?E*MezTK}tcMe4`jN-w+{a`VWCx}Q5p!>8KT$Mk%6`d9YT#DP5Es!`&+ia<;E z(cUhvi}{GW1}8DLF6ZyJd-E`A4lf}H0`VusOs)`rO5Z)5Z1`%`*rEiWRqvt(hOP=> z{cbIA0d5XbSBW z*8P2fZLh=aM7=Nz%a?8!3@i-8_opC5(i7BvwGRN99fS_gP0IVOCl2$Qa{w%My6~Bm z@9R}x9XaGAhPe;x1B!FZPC1p{FvGRym)%u>s-gUVyp}9d{nMtdYV}Z$_?QjA@)of5 zGA7sT0)A=0b`lA~P|{ZbUPpZsoqyP#Kgn-IrT;Gvzm+qvd5;InVH01*Ut(EB`- zpLRC+Aa+L&Uo6ry-;GZ81iBp3n{;M`k<7Y-l@%_Jk70m{vgE}d@Jo;)(UF0am3Y?& zt~$})tfU|gssZf`$lp`j^7SJ*ouIJeO!o`xxY4+FKZd8!a5OO&K#78qID5}052*KA zr25|nY#mH7zUf}qwh}yk$U_ocJ>2e{xS5CQRP?C9Fzw?xQsogtDJ!FJ=|qBB~&+c^U|sJ+*v^qa+%0rSHjG(y<-z2r zoK`&7(sBOH$@)lQPM0Y+qUouJ*Kz@Q`W4?5Cu*QxZ8Pr(BPjv9>Tkq9t+ZlvJ=}a% z*L#VJYNXQYC}2_btW4e`6abRl-0#_HT@DaA!Q$f!Pz7Dt!V}b?Ct|CVbWvrjBm{s6%hq*hKet7JaW{=Nxx>@=o~gJ zB1Bcz(K7DE=QG-3qF0Bq73g;jrc_G>pl4}P#s{2*BSpLcP@4x_rMo;WN(v(?o%Vrg zsw{La4m31%wa%0*^wKX^>{Zl-?Hih-aa!rIJ$vzTvq9hClrNmsU)sA!*h*r^%Gzal zo{w@#sWF+}EVs3lD)gxiU!}l)s!gdDahN|#SjTjNkM$s?xfRIWUR+zoi}rD8&8+Qj z=ag_Gb{riNW^pfbTsli>IcM5)yTP_*SA6C~{VkcC&TX?E#U^0GyhnwK3H4b_#cz0f z*8yN2Z9FY|4gdjBo}plDl9+3U#V3(jP)m0wtcQ=1Q>essh++$pU#mYqd$7_as0bS~ zw~;E62N&Qi)-*bpFoNZ8Z`3mxWiCjE^I|CP7=67zU~jGggexhILNJQ3uE9nLH{MIV z?{gUHI}QBGmVO9g1fzM+p!nh^ll$O84GmV9nMD1Tr7N2ydOx8hLT)A2oSDV-Z_h?^pqyVmSQW3OiMLKg(NnmIbVozCA3lm4*Mxv+4Dae|XQD7UqCE{X#qK4IR-AOD zWqBgCZ*)Ox|5m$-u0mw?rCwHcv+^7%?YJk*F)_RX{KIZ(g`XZY)!5Kp9xPQ#9+iv^YLC`TZQl(%6^r-S^{l;}v$3pbm+rPurmd~gY)p-Ev`$(^lk zwG0z0$E%n=TL4)#bfVgdqq|l3xO4Fo^|wF1f>(UH!^x|(v~Q@CWvpXtonxeNE}G$6 zG_hL*{i3(parvwYdya+I=;hOT0o}}+XVyZg14^Z;ZO8*tlfZL3`i0QkepW+R!8!7V z^Y%||DT3E%v5QdLX;}LcMf)e~bD#IvFGU~54%$jZ!7l5gQ4*y}H_|0UG1{g$2*;l4 z#cH>qut~b_A}pYk7i6gZUx^<`8g6hMO?$h^zKJI>?dy)~egi=liPyhkVUd(8uo#_l zJi^8SvxRTsd4|tZINs$IoVJyZ+}94^p7AwXR-i$=+2`zAp?t~5f(sT~P#&MnYSTLb zz?nsE(8}CU(=JVl?~9fbTZZK;a;t^x71}@77eBKt7_7ibPOI1zjfQ8&%TTHnS>b&$ zwF&`Id4j(M{7_|Zr?9bg8cpK^`IFH*ar_Aq;p}OL$Tmbg$Gt`y@&z)7+bN@o@xm+RuA4X@5rU zlOfP~rFiP)k>N@Q4$!yCe@KF0I}aRjj#qg{0-->7#j+YKYAxh)&t8@Xiaxkq& zJv8{efSo-L>5dL7O8_-bl&#a?qTY&Mjk`Wrv7jmnq7WF9Sr$L)F;KJ8CYqSR0Y@Gk zTbr4fb$LH$1ts;t92OmYK8R#eZN`!&>}+%P2|YDVV0o|@-0V}mc1_t3XNNU(vO#x! zuQSV>Ph(F>u%D!@!io=NO`kIu_f?{?xA?qX#E@`0=1O~4KMN#`95`TJ?QOSl#WW5L z@Di^UYVZx4&$*9Cf#ly;Qv6*W%3vEr5wm*pef5a0EAcmF2-*D2u~0)zPJMHK@szmwz=Ey}8pc?hhVclK0#~aXr>6H}tRXXjE*3 zU!V+Pf$1czl(VRTZ#*N8ZbcD1N-gjj|ewOB;9h9PQj9~h_=}~IM zSWJi(sAOy;PgGx+e;tOqnCw*j5lQF!%7ok~uIOjqv0KmoMOTnlh;h?b!v|v6X|RcS z>pkz7ui>k1f|wadv#hwOf4$7jw@-CcYpm0=m!4?MFYpDxtIopq^WxvMcbu?WErGP1 ze;y&BSd=wKStj8&=&MvZ3Y;kwJ?WK@XyIBxw?`<$uYalHVO!!;7wdgq%I5H_{k;*z z#POY0x>GH}UG4FcIAj{P3gr`O!zfy}U@K3+VOe;* zQbjBG4w7VLuMX0>K%O zOm_M@xD0Z1CCQ%J#Jk?0~xR|6}lKx!}@l`Z4hWP_@C;C>dBTU z{oLHit-E!=owTd1t|`tdo<|OxTZhip28NF=ZZpR{VBBQHXAQQKqZ9&4e>83@ZOe`O z3Y}(i^!)^Df4iz?W&;B=gJa;_54PL%3sF*B`(iwNF3B(%5Xk3WLht~{;;TthH=_0A z4&?>31@;6S9Uu^&^T_1T)z%4lYT#1?-xx$yx-_p)8@eIjecHTJkp8&TZmb*j%Ya^U zzNCRIKmJ-Kcj6VS^s7KNqc6p%|1Ob)QXq0xG2}UQK=oD>Ln{8&Eqsm6d1S6q;YEA` zw^>YcP~3JChxoF-LJ;nN)W8QM3wK2f)uU;#PYJ#>jV;p~VXWI?2Gj{JU~#I^otj@z z7^|U;z+-B4%vYS}+s0ugK(d4usey z$4Bn_0|OU*KPjtyg&BX0Ls@(eLToQYNP5q(AvpI0-0r!S>KoCjoSj`edjchw%)aJ`FIZJxQ4KNK(k=Q%!zY7@ znB{@tOacmDUivg;Z(JyqJy(oU3y+>>HE9(-oQQ6fWyfqgt}@*S^8!t-TpV$DHyQB(4R5)StIpZiSe(Kg;`?y*x+K40%^29&5TG|E4tcg%9heZz zcF0Tfa{Y%mRg2E#;*(qFOHe*d*ApJH!dwK-#Qb3Tq5VI?xPrTafhNa530&WoeQ+k( z=*TxTxoS`d z?K$#;{82jj?MNJ^6u=|+esUJ0h@1}D77J|Bs5eK#P;9Y~B`FFwX^#)*BbSeX=INa- z{G)*h7WR>Qv71`A`EduGz2WjCby|-#^l&@E!sx&LUYCug&LX+Bv#+?0&jfP z_>cH;&a@|_LcXNMO|*FOu+JRpjrSo+SAVcp`-FV18|Y-l)U|nWa`$ceV95t8XLDFO zP1d}gLT)7Q`W?$26t)X#?E!f6dPjb-Gd8HsA7vQ5eC~bfcu^{7X;fSu9J;4=B&ncF#&%nn%O{~tLB1ky?! zt0AqbPp`NF`}lS}Z2FAA{!VQ?4G5$h=1F$6zwo8|7-7ji@!(s1^;)CIevet@Y7?<* z)bC*U=M1oYs_x#o<+h4YT`=rs%v@^EyYudxd&ci#OnP;GI$*V8j<|33bE~DM=Gt5m zLajf^B#fVaC*k)f>&kAEuP@f;d{^f3st@;~zE1Oqyr`whJGKpPH;u2Cg1dp2kfj@5 z56lh*C{_uTM5C|ISGTURmDdVxeFS@f|>kOu})~bboGXG?(vf}QSBQ$U*}|?V`{U>eg{J@YE2I(nZdZjGJP_C zkA=l<@$>oeCb50~zR%nTf3H|8q(=vfY;qSzUaW_j?GN>%x6AwpWvgE|+W8V*)bSqQ z4kJm*h5s(^`b$>1xvwf>FPnU~%$g8eRFH%E4w=0V;voH3d9o(K>p9+=dgIpA^XGOV zf9sI9qPuE}F~8u^MvQ%4gwdPWF+_4W7MsG{o);Vw)l;8X}uEPkORP9?|r$u zdiVokp9(^%?F&FniXDgzVqpM!vc*;k6q@m^Ld>j z2cfYJ4E|d`7+HmN{`0*C>CuKzW;qo+>BI)$l<$zCGVc^yNdPG{sPyLH%pJgn?NX|c zm~l>pwIf4J-QkbFv=riAkw_>{q&hicZ`Lxd{B6Ap&n0}qZ#SVH=`^(+L@;TbS zXFe3Zn^@o{_PdEonTlW8Tgssu+t3@+bXa?|7YE3rt`bs3{yx&=w0K@%h#tSc!n2ZD zlwX0MHsbTW2I=@-7`G6+OeNhz-J0u@c5RkKbDb>R>%k%JQV1Re-CLeeo)1WW%er4A z)pH-wF|NAmD#4J>o6Cy8fnd{Xgn0NtfKIYu1GnJV0`e2L!Q}j4(r;(DT1Iyg#nkV-$~LyvkVwRny=4g~zPUdu^vIxo?2->yO8&!63ExBAZj?_&B1X`6!g1MoEnO1$vV6lVmk{_*spG%~ zBv3(V#>E)i@?a4FTfZeKiHd&4h_RC@Rol60AVX*n@!e+3f1AENFgc;LG2QK7P1HLK z5nl$rLN#$Q%_Y1A=#e(9kMx@-U~r%GMSr==+;=_Te+H0<2pR}T@C$yElBVGz=B-xj z?;MWFt$K}HG7Iy21_meYR*iQng?-(jc6#f=A!pkzqJP1%6yc^uvNfbAIX}~5t)`a1QP*P3oZYAyNu|IJ z@R50*H((S`>7Dz<5BHH!yk=vx;mwhI;fy zy*JwWyubaj3N5C&zmN5;j(nh%PJfTy5}M?`!2b3x#}-o3m8HwetJ*fSzx9Y4u`mEO z@hMdFT~@6}6W`r>aM7Swe4*D9GyplOsYgCDEeMYU{)a^K?BK+(?q9EvRBd_%DW26{ zO3qFJNC`L1rSSi!lzHL?@zU3uY>UrSODw$)RGh=y>qhb%ZVUVPjl~2a z$jU3ak{bcQ)L!fCJ`LT5^`%@bT!5yQ%wNOT=k@}cRsHE0!X$2c9HMkvQ=;LD!|0N# zuEPH3tocpE=VX38k5N^5)<+o5UAo(wijkh|KZDXbuq+fj!!WL)a9wZvNmd?%>@df5 zI8WvG(C#OnlWBb@uCsW5>8e(;{vipm{SvccCvygH^ycYa!S`B6SuqHP6sr<^9$S$( zrHi4Ua$F4iY$0rhj^l>EmbxZbZu`hlj}SB~gGGH%E>+TvXWKHumXdf@nXvDrx8;jha8EjS%<2+xxqi**@aY3C$Fy)RAWr2ew;NW$Gnp$m6z7F zRhE_xTr~@Pd%rNmYj^DC*NYzLG|P|Eh_#uT)}5(hr;@l^KIbzC?Y;N(mTJv`&=veD zN9K6sOnq6r8-QLh@K4^xsNk65*GWYy9gyM#x0F&okds>h`lZ3I|k#3PIOy{;GA)WLP#GD7 z*(XmiF7oI&5td|1c1x5^B7!I@r|HeW7aH`_{q$WR5KCVdn2P!j$SxaW%*=PV4m-@B zVc|RPq#*cnGrRlA{##XV77{vDT}wF60A(Y+PFk!}msEPJ!t8Ez=(X;Zg|#U*@8IN> z1qT_UhXyAAL^Z|*_+I}hy(7PF14CK@x0>6K(x?6Y*Fz}zmi{XWe#O&3BRiNe;1F|Q ze>o93gvuFpIgNO0#Qfxbv<#d*WUc~lXcWHZ`uOxh>W{VrKFUX({Ul2dDGP!Fk zbVj1IG&rv>pccrv%De4BmPay7N9Y({lQ78ss%p0c<$m}i=B(0nR~AHm+# zn(~DIKkU7ASQK3MHj1JkD58>zh$sUB0wMxR3Md`Y-2+IYl(a}Gh{({A(lK2jzKuUZ3~(oqxV_opYVze-1M{)?Rz>wbxqry|U@|A5I)A3BBuF1A7+M zT6o^d#6lScS1Mx=N}R@LAdR{^Nv9xO=wtPi)%GN08M099y+gyX5h%2Dy@{@gS$=O%89A_^;xOXMsmWv7S^>Tp0nf=s z`tHz42teX-&w(+G>f)~le6YjMp;He-&E!vKDMIvbJNXannl@0&{L%xX4KuQqjHCP3 zo{l-JL-L7x>-Eh$P+oL?7?V5Iuz{kM@;9Q#r=y*d*dEYzw(rLc(-qlT_-5_bix!|Z zq+yRzYh+~UL+X_2*d_3A9bm+1=!Xf}#!cWzkHQauw|e;91Ym2LUS6M1z`9XLif1S&g;aTa_iL~79dN&eA+nUOrXAUIb22J3q%L?gB$l-9Y`60AS^8X+ z5r*;8o@yqPSes=wP`*5WI!fW2UmZ?}_&cS!h9Wxk8Nppjy4q@5FMVm$jtcPrfu_vQ z&4Xmx)PpG_#j0RXBg3o0$}F1UUc4B0 z87&60()Gd!ewG_SSCSIQTt(-qBVgtkH6Aau-5jvkH_0|NwJ}hj+sjH6MnLMv@Q%jbt=lV)v~mR#bM$KEUZ2VY*?7Gej@7w z2i{EgVtQ)_WL2nZrF~E1rgzrj0sg>aAIGN&*g9*!U_C2Ef-P)nn&xE|T3UOby`)P4<4M!;N^Umc=r!sO923-LJnrI|;9+hX}bw6hnq*k`)) zAQY21rsmFKeyi0hJ7Br)@>v(14z}#7ET7S}5F-b@7|{lWBU!!iIQw2+H?|dnc~oO^ zTB%Iv=u&g=%aNC!p4k+&D@UqEZ>`o~Jp235@*7;$9HuK0`yV*eiA0mC>INOP+$T2Q z4yZAx2Lw_ZC|bXVm%K+g#gBT_=LG3~x#GW}w{*kBra5`e$K&Pw25kRelokQ;w+nH< z-@6jN=;g?VScDG8A5d|w^W{rsuF~jHkd1xxECG)}Ycd$sj`Lk))g(ml+Y>c8h9LEm zGqHFBhilzeJ`FL-P+U+Ekosz6Sl+>{lmju=k_c2JK3G<3Ar)!BvTRh|{HXVm0RGi3iySQ}C&2GWX#n!WK z>AFwG4-X~YB6iIcB^!ir>~~P07lz;d4|BG*I5NXaki4F5@js+$BEpat^ps$7jUTv~ z=(Fl-$;0F~hN)_BH6j98Qu(bptnJVh*AYy)Q)EMZ?iR%d9O2&N^AXmDg>e<%&kIT! z=dc`UTqRU=oSBH%{kjbiuZ<;sFtj)gd$AVHX(AY`uDbdGPOS-out#$5+BCJ+pVxeKG>x3(zu23nFQJXMS)j>n)m>JO_ zj7P_dv&cgJXsbgw=s!+5)*cQ!B7HuADHl^eg7wB3{9!2KZHlH%cRJdkuQd!nd)=>a z*@85eNP7X&#U6AaEVf#KykIskpXSWg;i}8@Nz2RzMrUn4d^EVQ6RV16dGMq`)y4-Myz@n}v!y1%p`%$8SeA4znC*62VK!yN?%C*3mGqS~6z=Vt)&I$nj*7}Uv1 zyg;XUw^e3@YNokJ`;aQR2dc8>3)_QDX?P*S6?pkConwuWKVTWWkP?jQP|6}7xO}NE z4_DRxf`p2T|Gzx}K)O` z@`ha&M{tSh6^vj8RaTrj{QviKqaE=*C|t+cwWF2Bf?g@w<(xQuWPd#*7Sj2 zmc+NSW{OPXHK(<=)+EDgOrIXfvCw8X=zDANR;1fu7G+Q0Hv1ipACEM|ZMeL92rIzZ za*bVzKLl86Ur_OUMiS-&vqsa=xPM80-Ju5AWXPh65*N^F%9Vv2c5X2dSo=#L+@+oh zGRRfj^)L@O(xZ08uo1M{L^`b^`9UJrc}b%yxwPuJnL4_b=6NYc+EgQ$QPM3= zIpu3Ee$rA`d~GSPp^evu$nA{yzlxAY{S8}g3V)?;*{#QP!6NEXXW!&Q2X*t#z{aC4 z$c*R&Ovz{^?8oyZeA!}h!XbOwFkBm(&h9>U3K{R!?`&B?^y~OiPWCU{oa~)QD6auB zw3ae;Dp?xViWS+59JG7vlxH8z@6haLU2WfkUOr0zRBh?OYq~7{CSZ&!z3{^uI`XQ) zqc-oJO9W@FsKeSA7w945vtNrBjMLxQb+_cHxQ&LKV#%^~zyXrq-B%%P~) zho5*q?yITE5TR97erydL21f>F1>%BS+jzjsvKE!I!MC=)y3Z;zj()Ex43J#c!p{jr z%cvghJF*w3 zD+BhVtM7VisS^J>$(+v}^?}W&Jxs4)<4n@vq3k!%Lq(VMCS6bYM5gmSCxwrT>Nl13 zvwYpBcHKKuS(9)afU|GHd?fE43lj;``IjZK!|AcJj>ZEHAPr!gofvF@pq=aIHP*~V z&;G+rwGEQbyt~k4Ln%@bJTzY)<$qvnW9X}5kJL}h*UZ$Jsm%6i%oQiw;rpo6hqPe& zid3(Y{p;jEtw&U*vH1_L&=$h%8Uq7^(!02FJ%{(r+na3|&aPl32s`)jb?ATP$qs8P4g`T!(&p8DnvyuMQ zQ>OAy2b~0r>|LBT0Uh&1q|N^gZrUf`HOdZhJzRia+XV$&)_QJsKE6`Q{qR6x#R0)9 z*jVe&k^MN$_ChF~KK{iN!9QHnHse8<6yVsl?W7qv_NfC$dFbE22hl3vS=)t0RMC-! zI*vJ|1)h8t)Pb>~o%hd^=iRH9J1hpjcUrFsJeem@A>5FXY2GLLk#U*kudu_5v`X~; zl86v8ILNgT`18f)JjyQEJswD>=0lyeKBqb#fo_udpWN!$xRtOF@3j5FA|TjU*G02d z$Lb|*F2eitJev)W1oJx4m1PijBcxcEp>dzI4HAGFo`8GhNNPt<-stT$GyHnVAsDdc z9_60&RCiZzXIm2)&Di&A@3wJt4yaAspO5K}qG3IueU_`l_W=C^L!Q&khT0;=Mm7?% z?rOzV$Xb$Ccvt@wis@ao8%M^DUuOCeQHz=Z13Aw#*MP{)`Xs8n(HrD;M<;Xla1SsW zx0mi}2Xt>d#$Qc{v%wS#PoW(sGr*UKvYl}qoUyFCgr-k06t^X0ckfbSZ7 z`6X`ZrmO#|icyT~n)dpt7W{Z;^K;I3-$NA38Evg zQ|`7mQN8Y;(R{{5;V%HdHP`NO0`5u%_k#$Fo_nl1ReQFAySDwh9*0so8!(EhiPI|K zCl5qTay#^65j^qsD#J)3ocXjXH%X~7c`SZm48k1INdO11br^8y;fRA{^XZ=;18mkG znE8a@fyBQ-0XQ5F-OC0?4{x#+V8IdF>CI=Fe2-_IF>$oKl+E`_Os~fv7DBQt zYXA7woxNMvvC%gIe#eclFCN>&035PD{-QJxAS4FDnf#wqAY^)cn& zsPVXFU`FxBi+o-4oU*yC>ZUFjb8X$Ze%cvG`5Bw>EcpAfRpqp} zr1zhgj)6Xo+8Blg{&iwZGe)=!;7cG%93zJwu41=nYqlpNI^9~YybJ@43do@&bkCMP zCfcpps6?t=$HDom2yjEaY06iWq||Wp1n8LC7QgP+a*kbHtw76dc!5bK1TA{+jWQ7h zRbjIGaNw5!lxcVbvSR3YWslJT2d9hNLr=S*6m5N5E-TNOj`{Y4dHg|-TP_cobp%$F zSjdhh;LyKsl}R`ikJ|@OO0w&tOv|TXWZ~-oR-#?LuehvV238Jama^=e{*~AkaVqfH8XG3!_@A3(h9}2bE(p+d8?9sARk+%4?qw(B(|0 zqVWev=$PU40E>j&;B}wRP8Y-}eH-K{6one!( z!lK;pAE3f0bP7+mhu!uAbpE;c)vF#ecXW%?bQwET#wnD@zPy)y-qAbF`yjWyeuq%AImKO;8Dd33;sI? zP>42nuwcd}ESeDi|5+bi)i0<(&PL~gpr2(Eh(pQA%33VHEmFwVpnk&LbD4@auzm7w z>9~7k;Nw~<>#DCi{ha;MZgx0I*op?XX9r-tfQ8zEiYwFxt}jZf@^}%(DgCK0U#Ngd zsNm!dIDByOA~Qn5%oL;Nn3Qw+R1knZie*2ujmdTNs#?QaizZd&aoV_SLocW@fMG6k zVo{pxn+(k5uE+w;{yR)n!SMH>t6wcW1`g%U2_H5Yrzkkg^~%~vxozK_BoG^pVAU@0 zB^co05?6tM$h;1gZJ)eQya1*l2$K6!u472WwWla<_sMxGx=1W6OptFUV&@Fp26$VV=O?FRMkd(=_gY&M3Y|TPiL;l&>RqZ0E$r z9l+qOB_Lml(g(ZPZ7~Q*&MacpO3cs;g;&G1;wtY}f{9?t1hoz@vOLElEoh5@*Y5_r zq7Ik^5xjv{Iqv!ho%cs=(Pdq7U!h~xTg56$Koo~M2>fE{`F9KqBQ(bS?j1%$dl5UI z!E59cf^fhbi&Qn3l~ze)N8@^GVm354`aQ!gi>t(11X;X?OM*OvU&BX3!`_yImty4e zP=|LP3a{3>A~!&!9Cd|^qBG_};Q@C`W+B>=4k78HQz@@`(c`rort8b*T&MN3C!zMm zNdH!M&r7?~NaB$L>MW1pfpx&zM0#HFTRe(uznB3bS-3^L``EpP<{{P5awEZVKha&F zZ!)K?^?q;STAk(Lhu|4}L>1L?KbEY7Cs%T)+2H};-G8ncSr(*Xrf5Ad0NAv+rkc%| zQ6`MMrmyOY5}IarB@#qZ`3yJjL3p800RPsz2#~Sw0Nr?#6O@YZ%Q6a4o>UQ+kh~4R8YYn$k?VsipJOW zZb!n0XlX5c>;$I2eg?KrbPghXpwM6U_OWyg)>N&eQi)pxmqi4Xi!1 z!;eGg5}n2JJqv0o!fWgT3p6X%_>AlOEpLP-J9z32%qNVRpe1_X%|O_P3^5 z=CU-SpWZCgsr{t3WNr&DZ}Jdjf7-uQr+^*LN@-1v!~p^%hb3xou1_eo&}q%2u?dC- zb1^Iw_C*eg%3oTNej3}t9}+U$QoUNrj??z?Xfr4Pda#yGC44{)H*M%$Vwo6N z4<&=PE?ws%c(cFJ06Mre*ejvY6@Cp9Q z%Rl9IM;$Wbg1{{7LM#+wBO<N;ZX@`+1L-4Y^c0SE4!N|-lwe&i^6PE34Be0?N;>a`j}`!PqU5vS z>!qdUSBOj-W;*21w~4QpLj_5yz_93o-a4G~9H!pglaLx@A38B``bg!CLI9VRYs?gb zm7oyQ(IhvZ%6sVy85@A4_^6qa@s}Ls&LUJyuE~efNPZ>9@*Z{)e35Y+Innm=3o$q) zfu3$qGHtp(Je}k5sW+~m>T2m5F2ayrJvW*#z9@Aj^}xf>A?*5g5K&cTL$l0RkU-r) zJD`-q@P;{CX9%Q)`qQ~G6jr~u)Zbv@52IG^g8Y$3{Nvvke@cG8hA#yEKt02ns|3q< zW7!edrkKhF(YH)}_dzAIdd|`K3A4I#(7mL-Ys#y5FIQ}k*kklXfn-f*_ikIA$K7lW z+E9u{Aa#AW@$zO_2$_R^!3H%YbhAipvw2_~F0~&usG0~EUsGb>RtISTZeUZ$lSHhY zg}eMU(Wa2FbL1@uh8iB4xKPyruCLAVkSy1GW>Wiu+?D6WHdvA_`M3yp+%oEEgrH7J zsC*im+`t9 zx~289cYOn6eX|X37UsvIgo2?vQ07O&amcU$=)SZ`D{;zz9aSafD@)XmGTg`$5j5b& z1CdEYP-e@xM%N3f;*T1FZ>^b+SIo*M7J80~mq@ck)Yy=BrnG65hPtw@wv$)BfM*5$ zXmz3ylFIu7$N19>KDd7*LAi&Z@TPd(UES{p6n$kVc8_2MEX}%N2grk?3%8|qRX=#V zhC?yZp>R|qk|Vjx?r+7B1H{hTYnw~x163pq5!=xqk|b4TG%xPO#xH-H_IedXd_ojq zt=%K3f^e*sbcS({KEuC*%PI)&qP`G#qesdjAf7>&GQ-$(i^Gn4uM3@m2wsIX4zavy zd{j#miOPctxJN|(Er2XZ6^ZG!cu^|UvsW9>N9c#{XF?$}L3Xg~p>6lM#OcP9LfYbX zTin0_AT@S~>j`ej+-FDH3BFg8vyFGtGoD=+yMtT8;h_<5T4pD`-m!Ru@hlC4QR^+|2=UX1vaYjlW9mPOg(Ms*&vm4nw{vd<&a7q5BXOw!#$t7)c})>=L}o~~VS`vIc<$5FR`@AI$>{T`vX z`?o$xt7LvYRD1^$BcD1u)#3jDB!awSQa7zeY~E_3wKN8`8>oH09C~252mK!wdZ$2` z72uMooy$(9y9^G>7|#?8=30K#yWMNi(rQ^9zW=AGGu;p;h<6)0=N??pm=p9`=|1bDZ z8V0v2p9x<*99u2seh8kcI0M|0_gXo_Z3uhhRA}}-;lq!}ZSp!>8BP2GTR0td%W zXrcLe$NPOmYX$bdI0v22J1&5EgMV8EF&y6D9tZ!bZ_k~>iU0@aA={S+BF2&`eRw!8 zj|gwz{?*-}=nwWcdhFPHHFJO$kna#-zfO*=X7~Sp;s3lGs9%FDs0Yi7*cX~;Zz{x3 z`6=+7{;VeWm;8G@LH_Z8*l%m}00lMTA1K=yGPhh-VbvR${bWEsHcI|1 zj1!Ivz;yyxvL2PDF;hTI_W3J>SzffDgxw(LHC!LCi{R{B#BMaE9ocTErO3Nm>fj#d zL45}q!F?*rAo)vQCq5T$`EOrDK1oi7wi}T)*S`gOn*ejcTod?zULMi%91r=Xqzcm# zc-ao^S(~%kJb!*^76u+Q*hd+*+8!O&4pz>0`~W!0o@*f%!CP{2JjvaC)Q=p)OWs#? z9ti5-HP((Lb4ddQ2WeZv;8Xo6$*8&h?;$zB^a_+w(MamclDug)p-j`L41fAw=kZV;$X3`nA9q>(St}|W`qzf@tz-iSp{PYDIt=r#lHLeoMb+_>6 znp#-YP9u{8Ex)n%PHs}4!M}&O*u8V&+ZGSuN0)tTBmu$IOV|yjyd0Z~81lWUTJY`5 zUSLLjysm0BRjMj7CjuakIGwdrX~+hV&-tF+{Gf`bbT#uk8MZ;p7+*){v`?|PrmJgK^a)+RMsw0jD_EpjGl;p<`Wd+ zlX^dj(9zs#qqhqE@$IB)W<*VIE<^)72giqKl6$1$orcyK1=hcKTz{fTO&_vT$Xn|_ z{V?6Srg*blcf?_(*`{ZUxSdj?cdmw*=A_>ddDHRgN5YkwhSERA>johijPro)m%DLu zJh0|11x3oE;Knzf*~Io?%59I`0u;LZCo>BLztA&pKPc3(7ieZ!xku#^rfaSHI&7~Y zNFy|`qLuCAYGm_acbxXrC1JtM%f$Du{k|=mp^$vSPSPbbf#^9sFGTBHOkLUqG(ws8 zJMytn4Vtdz$Evgn03{R1pr9+K3M>k)klF?hq-UW%E`ND^$#xn0O6c*8=eLzUgsyy_ zDb*(6B3XNx1Y&D{ZRh>dIoY*{p>RF^r?Z}Fv&Lp4dqTXJ7y5V9n-;4$Ch1&R#@;cr zJBHn660ll~x$m1;<7X(ZCClpx=Tk?Dbjt?iZ@Li&pz^x_w}R-h!(78Hwrd4D?mNRZ z+viW6C;oj`-&1Fl861f7qx^8)`0bt%C4wQVuBl|ZDJ?(1|3O|X9F>SFv1@Hi3Y0n0ao9D^fF52ZQ#&nKnw7J zZ+HItSu3hyb_e;W4+7(JkKW3y%)kK%fZ7yT!9~(3-2G5jztdK_Ju>{}bN|*%UlhEk zlJw2@?~MYn7jGFa?BJgSl@+gk7>Yl?jsKK18k1SJvSMQJi5=^YIz9x$Ph0u^>S4YW zmz{|&#Q`;~R?|@;FAI;uuYFIl&0@2hIJE_`Jwv?>@rQ~crbHMPq*bs!uGv2Pli$ZN zmL7tc&IM+xY7>#aoxo5#Pu|acfQwox@0#7r30<=Txe=oqkMs+eJXqNy;e=<`+qUv0 zgyge*VB;+$f#<%Mu7(TeGZ-Yb-a3PvQ`#@R@jg7YnxeLFdN&zOElR(C?Q!$^%H>0Y zhB!v(rf%4ShyR#}_+gvj5-30cQ6qNVcz8R*@cmkt-N9MenqJWdnsk0gf=aakRblA3 z%$PNvtI5vA-Og`96HMVFiwN9mU4DRGEJ3}TLPkvJU3b#F&GiJ`W!750;Q#37bJ@vK z?_+@>a94=YO*d)|>+S$OYMX1{-)f4>A20MDwW9b@qY8SGf-tX0L^B-BC4;vR=D9ZC zLv|~Bs|a0}aRaGFO9u55*M3t@2c}})!F@o9PtfOuu22uAmre-fR1=*G31b@x>5Sff zQ(F<|+W#dt{m{f@c&ha6kdKQi{mn|KR|+g_oQ(Z^F%l-(t+>n0NK_PvK& zC`Ih`$lgUT38b8p=VIlvtuokOL6zwNGOw;TzuiXL96`r2>BL0~DT0$nE(R~l6~kCl z*P_DS)6{vxIX@u+#qDY^8{ARPpCev^F6aW8lH(lA9v2`XaMusLwT9QeT)mCCcYE_$ zBOplpNBeYH*6TsMbem5N?x<&|1+7cp-F9|;aU*uSLbo4W^9RC@zac@h5bs4|CwT zi-{ajJ>Ujzx*nZ<*`nw|ElH7_a8`VC=P-G^P!r3yvBHvT8hVj;pVBC}fs_Hl- zDUF4mJ}Q#?myLQE)0MQk@P3bQSkzuVnEIOU3qa;5#+b{99d0UJ=Vr_PT#F?Ns5 z-z7!8zA=D~S>8bYhCi?3A+g)}Y;@QBxIUBy(^Qx*DEX1otXjZeVSLiY&Nve|`0Re0 z+UH3I<^&n!pltiHpWq4U-!D!8mrL3UE95v7q&caF_uDl`3y^OyF)@vD!aA*JZ=RPK zza3@X0M}-VUevW!sR7|m^54#9@X&ZCi&}q^?ctC6kmaAvmF?lHpd3!;-T%#2dJng` z9h!vdtO+{Ix5rV$a&rDUhyghne$Hml(;3XQdoKsE&%9rjEa)7!Usk1&Nss>@s#@<^ z32HFK`b5|2Gc+3X$rBdSaX6Kf`5 zwPPQ6kqEmZ?#uN0F;!rtAGI0JSdXRTd=gMyeEhmou|Hzzd=gw;dHj)8@y~BXRM#DU zbmAw)W&)2(5Y}kMF3??vpCq27%a&N{_2@x8_>;Yp(xi=%<18d`Jx8km}z z)6qxtMyuS%AL&T^{FbrxgX52UT#r67)?4K{{wSd4hyt{&VeG}P8Rgwr`T0@vn9K1; ztO7?Qowfy~@!%0~w4AbMj<`PCgkMOg{e090Vkt+F>drrBbM=)qdYyaKWh%sx52cCO zma26fra+JS{XnMGKJS;SJ*dSr)*fOGDFJUTmO3}?yfbq@)xO^kw~(=sZSq`hXHW!6 zW_%3oNuc(t$>n^LxP!&mAZ^s=Lu;3QYBsqaO~1G~@gK~~=vp5#;V9v$3E}h@E`UnX zKz?}C;ySaeF-t9(JqUZpZ4Tr9d6)U5OJ3LO!1Kc}_1! z{~BnZ%QcL3sg}F;a}IuwT`G4Bb1izlU%;B=1kOvZRBpHG?JOK|$&+LROk@@}^fdn( z$3$J)4^&sR^@d~TuIp^CBi}}FS`9i~1~?Sny=}!I!(}3H^+|XoWaK4t;;qT?Bpnu34K@WIL#ID5&Q*|FagKt;=R6UiI@5FAO#%~GkG)2dw zo4*4O1y0__jfan4;+&`4-#Jb`>Bbir|0EAJR!w)q?kUv>og~;z6Xz&LhTnugKi6Z5dJYExclb77+HjOYy^z=3uJ*hy?Qz8Rf@sPY?i4 z&3ZijF9&T&G(_B$j$_ZhcaOgqX$H9vN{J^$j6~D9m0k>}B{Us(=d@b6DP7j>HuX`r z5Sz}ToY8h>+n11->U7a@hqsq^{5&{DX2&+8R0T2ZbC$P(_eq(FL&?HBQ)I?3@brh{ zl_%e!`GI7Zyyn<)lLjP~VM#XgFJ_((qv0ZaT5!CS-#9wmI7cS#@ojI5VVnANO39+J zUgYuXz|&WcMFkl}4m8;~@&esH%0YoDJ@+2Me&yufUvZm8m-)CYW&ct#pne(cjq zFT4pKrGRxTNMY0;Ai zgZ8S{n^ob3ZE%lzbOUK@6z=KQY3MS}_{6*G&-Hpy} zbtJyfyomU6*X~1Zt79p*NcZ>|h zz%M?L_NjR5@&C1b3QM9jnl5o6*DGLh8H{dNU8@1hO-hUsBP>n_3LW*K_50vEM5cjU zYkLeFMExN#`)wHuCC_-3Wu+|N$qHM35acM%AHuElAY1DT(JYR6k@=L&Z=W|1=R9J_ z{{r0a_#iXrb5P6zo|F#E zoc}sMj4io~>Ef5wsOgI>bOUu@P*lS6@mV`q9P#;Bf@Dd#k`nELitT9?!G66w`Kfl3 zx6rOeuQbni_dHyAcNcT;jErxK*OKzB!(C)JZ@&B`omYn!=Z;q~8Lb-eUn&k?Ne!P4 zq0VYZo?q7d5Kn)e2@a?`$xGuYu+!lIrP*XCn|Gk*!;ma!4$3Q{gEC<`bX=6E)v)9> zt9z?5gt;lx;ceBAcT0DvT!5wxwrQh6H4Ha4Y263bhfch_{NLsT0DW2I-%l|)ktq%7 zJ}KXuZkjfQwa!cvM9vbe0aN0+MG{V9zB ziz2>kfF!-uYT#!%6=wB}(sez^MR7Jm#j8BT;o}dk*e$~lMfr=Och%`*_Yp-)Q#uOJ zK&kT{_jwB|ar7X6s?aDj>~{95>B|pFL$mSz;+IS}y0d!TDK3r!q#AwBSjcltI_BD_ zD|h)#1+@{5n!z^*+oUZvLjcX|m_wQovO}Jb>0J7=D@5xj^s-5CaEwbzZk$@uKeh%U zi${9^$=|xvlGOdo38`xbTCN+3x*q_0CM%FFqqBmQQUS=CV0aK^^)v})@&r=bcU|KC zf|LxTyGRCjQXEjXy(SqX;@r9qOzWQ<`bp#HahWp<=Uw~j>{K6?u>{^x*-2W~6>uVb z3|uMrS(*1aMB+QpYUQocTGo`%Fobs|`mk|kQtZoZr$`cpF5nBC34!WwZA%nehu;^X zOub+HuWf^qyox-YM?GveS*xIJgBFu?cFNEfrhu?5zqOz|*T5?1nOd4J(z-i2(0$iQ z4|~M+y$_p~qZ*!O=mP1K=WzNgEFYig`*~+)D3Upqpy|zVV}M{(;^43!MohCAA<9m2 zt(L)X9S5Y5SIc}~u;774i!^G9nM+n**PxJoWnquPw@C)#zL3ngt%6VQa$g9@*s&|)dM)<8*>D?K5w&TS-{ zk>zeA1POpv_O44)2qve`T#H?MmwCCNHYafgZvMGiN(8)5m*=`dN$NW}ZdL5HoOc7I zNO|eA{P`IpM$AmF?F-bU6kGXXo=D z2NacZ{dq}u1kYwUJU~vz0y8!`@qS^f?^&X=Tx%E|k$tBJmzluqB?L#Nu}i#9^+ulWce(oPI_UUSRhF*fuIwO&oHj=k-$=JGUY52E^D9w#s1 zk3&?2{m!06cj+y`cEAl)UlVjzbY_$)8V(9Tl>^K$>ZTBpo7=TY<{>=ZS*)xTuYP!1 z0m1!*`di`N??<&@ou4nU*I9PvJ*U>M>Rq}PXdVZN$2w!x^3M!F9Xw{aD?;D=YTCCd zh4tH--%ScK3?egRYyFEMSrxS$lMz}o3AmqL{oN>d*OP{z; zIvqAG4eA)4DnV(La$%;oCdOoUB*KzNq>Df?p2okojslPN15J;R;u9T1R%T4(r8(5& zs1$AnClNLd!PCK^%WfxbOQ!a}UxIO3U}U7GYlJan5RJUVOEkcWlwO9re`G_l~JXd11d( z*M}CA6=ZDLoGBm=o5m-K`VmMd6|$2&;sC%+==l9MM)NYPRe|5F%7B@d6B$1t1Cy@A zeKURUtXJ{&nqs)CeXGaAe4Frx2TUCKy2k&dM+wBRIxkbo(<(fOXY$57uU9GpDiR9s zvbI*g2*&D*ili1P^K2-d_k{>P8wKme18CT3+jdGn!nRUCp+Axp&*^*xff%~46`LY& zgvJGmX0N#;y@+Wai_yu__`+;bL=tL@;+`Q-tNi;u>B|u-r6eug=zN_xUH6J3UZ;wCK>unIxJbVCsq) z{BiO?PVWzVEL7;6*V2CRG=bLXe3AiAlt3*jOPw5CD8*K-0G*9FDpi-<1%?7kNR-uR zQ1-up@b#v(GIo(_NC;ku=}R39xy;$<7I#~nir37Am-va=bMZ&9ehGzAS9+rE60|<(>q;Io+dK@7^-8^5 zsZFSgNGgPO{RkXCF>K(%!OqyNOx?F9QQMYKV~RvDP?EUs|I5)!X=Ucgw88DM(9YEA zmM3qN%fzRfG+LchFaD~1QeMH*n1PNnz|mCOU)D|Syf=@YdecM z8U%WN6u&7+!A)zRIMAwr&SWL4Ua8+Pb49Q<;ePV`ct(ikFI(0|5f|hRCj5Wxqc#sk zmlC9;Y zKP>FNnBFK0CpX7q$|o`Rpo84SZE*04mFteIJyTbc$7>C|D;XYrLmAV=vGsWJm}}G%2%Q3)WZKBWP}Ubava0 z^_=Vw?Ezfe8`%%b`S~NnRR+F#Lb5>Snzwm*Cn)s}$d)V=4NV=Cfb&oMu(0pwF65Y# zVVSUa#DcVC948DN?zVjc^`WL#D^Rc@?0@c?I*I6&9H7N|Hn%%9ejeX3+{G^oFBVUC zrnoXcv6AbAz{niz#1WECj9Q-!l5#2zcZRL2)ffDbRBwvU zz3=!VCeWPpZfz4Upc+!UM_bCp`uuT0iMTG+Ypk!=y;iM0EG-m%-e^UWcVH$i~ zXIjaqdhhK$Im>a@@20+`$YOl)Pg8r)%Iu=aO*2G1Ad5*DiHnn>V_u%ugN1!lD)X0% zi;KllfA4h5+s%6(qgeQD%6|8zF)&=#<$Aec7rt zOMjj26VLBbVojhQ9!bZUOaH@FA#F5~^0Es=V*{JRRzKGUmjfF2x1{D$XaAHGy*i_+ zcUIYIKVv9={V9al3B$$D-b9=}lu4Logh9c#T5Mq2_FbBfNqyKQ**UE^V$Z}BWMpXG zn;idfW^+a3!Z+kb)&EchK5ovSKUCokh)gOfXywaOwek^CuD^N0yV%$m2Bl<^{f;X^ zd6QP z)S|BO{EWAo?&E0ndHwp|RDN@$hwl$fs-w%=JH;qsO7}(?Fnx_1#zfkZdRp{|QfBax zD998*o8sPp|$S z%C4}TjOCDQd+x8ymB~n-o-D;4L?rOcznm3eY)--xmG|3z`d1-RBAD>roSTWC&XC3C zCDA{*P#`aR?tuiM|MwKz{6sBM`ZXbOZ6eyWo(#p6G-aU}d2=9e_DmxBEJ~EP%FThJ z^TfC;19G2AUJ9+%T6%J3XG;qdP7(79hKJs(hs3x_%YJ<@-Z_AS$ z_8+cG2(hDz=KOgCO$H)9R7EDhh&M7e^<57d{4Dj8=n4~?e6G>|zPlP^SnB=|&08qh zcgb(1knXZUx;D zmC={hbpPGE@b>i(UQQoh!#O}T}?tSIJ&98ug7jJW`Q%$tlC`#Gv1B zmQlPr3a*QFE%Tzn>}_c+KP zg5~VBT`AtbT}G)j8O48|BG=WM+TJ@|A&D1~FfqVlbUxIl9T+GrY3iLbb(>_!VRd>- z9-=(HnchXj*e2+cargD83SFVR)Y(H2F8g~w{nv5x^z2^$j9C=XJ^3c+BWbwsNdm?! z84_>CgL*lwS9Ieq!O%Aiz4Mj#`{G`AmPyt;Bs0|1y&j`GFZcV&pg7($^oQc(zlYa1 zG4Jd%&ETI7qKtZk%v^mI*vU6Z<0g3aV!W8W?>!blyvQAfttpC%p^V#nZvLC}UzY%* zVr)6PRjh7=Xh~rR4|(eHS?_=|+LHgf**qN&QEp@MA_W{|u*ND&1pp;~EL6exUx_S$~p)&a1xEiB{>HJ2JLS zZR8Pocps$*ep6g&XT_z)|J->)yHK*)r=e-mmkdSI_AU#`-$uCVu!<%L0P4ua1Fghc z$i}T19|pZ=lZZ=GEoX;Gqg<^wd6U^zS(++(K~VUwn@qymEcOo(f{MaZEt5Qq=`p;H zteferRIVeW-iUe;S{A{Mlr@ma9R-_Dyq;{;l9yaQWt&_BO;N)dEcLynrW)?9;t+ko z>B5=++}>NMeQ*Br7AaaqWQMQN^2ndbm)9nhm82K>n8!i#`yGc#ZT`=>-%vljP9TRq zp^Ac!wjX-jq@1bm_{S-Zj=u7ro8>hW0unEY9beW}+5K1?@6hO7eYl+aM_(l!h3s3I z-N1^fPkSGp=WqGezzTv)@Dn5R3f=}KDjB^P%?_Ekag-; zP-WHm-UJc;Zqh5?9pS^FOqLa41fzu%2Zn&%&n|->8@)k!b{>8LF@v>Pwrb=C$5Q)% z4>s)dJ$2L$Bg0jq8>55ClV7^vY{Ztz?r~8;)^TF(!NbvFNp!zlnS6{!cf9wIxxU9g z4t0!-Wvq$eeAq2{I+Wb}sS3okMa9$A!RZO5D)pkg%gTjqf$OtOo(_tM*8oDs+8l=9%9r zJLjI`_6IiVz^!Ha<<8&FA7Bh;Z(5wIzT`5R7!5bm1$dr)UH7i)1F$1A?@|zOXdD1I z>To9s;2HMOc~fl_H7TvQkYKd9qMOky+Af^cJ)5p(8n3h-Jzh|+$ZFU2*T`}zzw;O| z2#$bSZ)pOyOhvvzNV7Zd-)?{aj)=Jvh?@*9F+vcM(Z)m|y~CEA)~SNscZt92FmuhT zbw;;E_+y^nf2iV-dPpwY6Zg6bxR8$7oNaP;Mz`J#tqS&V7cR(T-U5FMlISdYQ8p*1 z!jro%Fh%K-;%%Lza`U%gxlh;%>g7hW3fihON<}eAt8i=0sA{W9OR}@ppgsFFH7Q`d zF^|P{s@50Xp6F+wZVpyF^SRuQN-fu|NB@VO5Ak$1cQNPpA1Zy{=gkew)+a(N0#jk~ zyUAIHRIT}s)lU(ntPt$5Y8N_+K(3l>wscftYF8GSyAsuErMLZ3CgS{wqWzvK+r;W1C- zu*`sR-U7XEn}zN&xpRxCc1?sd-(Lr5WB3exGwL^aVQ0aBj_0T-47tVbO*e>X^ub$4Kimi>hiYWiDd{W*l$jgP7Z zxqi~mQiI{mOv_<%_0K^O?PlwSQbv_QJ1Rp4R^>3st=g0fpxt9j$$ zTXzLsEuEG9+1J_1@qb*u=;&iXi%e1cP@Wn{r&ktc@;MBO%Xp+hJDlTZurMN#%@wOy zvzFbY6AWJ@1nSHWU6Zc-1+Z05CJ{*!tCSA%%nXv53uUYF&-1Tg(S9-?Sv?=&x~qM6 zuvfQMXx8!KA?=xtctLB+L*rWkEri-kZ@Jg=l@EqhDB`QqyAo#_*^Er5sD6|S0bbv% zdoqy;w!mz6-i#QFL52Go_lujehSM=`3sx;ly*ZxA;D4Qoqc9zUW1U1GR*927e%iEO zI!?o~A~65#xW+OW;5owTmB?L_5AW>J)9W;=p}+yTT<@61=?lqS;|>sMS$m7R3-*t@%5)zeGrSlw@uj1Fev&A^46M|P-As(gV&-0z&%sM$-T5(ZY|pYzRNem zms(!@@5XqV)Uf`x#Lny9up3?A5KzW>gT9#7|03E>gv>0GUNrT71l{)1fR8mx&TH%e z8aA+W(#_<jf_l01FChJS>J1%w5@t-?-RPC&;mz(TQ~EJN|3kgcs%+XbHH+=G zlFBZ2m#?{pZA_b#nTKtXE}w=prf37+O-6;?RfrUjxJi3E7I+K;RY?gNz|nhwUe=v? z3dDdKPJL9iZBoBC@JO{VaJKh3>9lYb`r>yLlN*a9;j?oa<5$fE)dISgXt>O%g>#pi z?#;3Z=dmpt_y2zP9^V9wIu!F?u3gDBDAK0ZaS+_@Q|H}b=^A#PGz_!*U+jHnRFmD( zXsjrpsDMbfP^3u{P^yARFMJXZFnO*;9=lC;RQ6J-~h(+YXjIA+~M3%r3`ifE~5?$IND?(`FLWUdP<8pQtJMBmQ{ zm0H|RR$terTA{eqPvKBw@vEf(BT#Ig7=-6LqE%!CK4g*2MPG6V5+3axmqQ_XexE6c zR%ru(w2KYoSi$>6VpjD1k7AbhTYHI6&v3ZIbHXTb)Vw4ZQ<=ML!o+fnA{YA^mHEP6 zlvt6bP5FEXdHHpI2)SY&8CkkgxCWQ`J#EZIogIj3QimZUX9WhqCA#%ttR;fH{tz*0 zWni(ut6Z$we#oi-xPV!^eZRBQgsYta#8P?f`GG6ZsquYxZwmRwVd|O&3wT#fh!jz9 zZ1tvj1Lw=N{NaKV_-e;MJyY%2hx2Qv-Qd~FZ|#8ujCFmew||Q^T;gkYzhTr?)$$$V zt+@2c&8u+X^;JeTqr#=5>{}l$H~Z`Pj($>t4hGL99>~n`!ta-mQ1~-ETOAuSNh!&6 z%#a43GiHVy`#E#m`Hg1fmp%AyChr-#KdnJarRz%VrGeuU6y$= z*PMfAs~(qn5dqVK&bsHYRjWcEZoko1jfe6-y3jNBtqNk7<}WQdV1jjFiOr^t^L2#D4Us9{pVA2&C} z8DNC}Tp(SFIG220O4r?oY2o+yOgHb|N$OZqJ!f2^y{W;N0QvR6TMKQAV*=UG8;f6b}tr{|4qFz~sy z)Gtf z1c~G2NWbDkBbl&nkmPjqTQP4e+yUa0SaP{7^PA=tGpMS|1R&x4Gff7Z0M*<8JTUbi zI&^RmU4s0Oh76U28mxdMLWOBn%;@%C37|vJbxkRriIT~~2F5v*Dc&xB28Qh5DjctC zYH>7X6esy6DOYXJf|o^w+a1?WzL3LNH6`lCn1CzTg~x$Jvd7J?WH7^kpod<2+UxNM zHOwcazTw9SP%d&(>1EjD?Y4&o5m9%uXL?M^asIc~l#U#KOkM1T*UA34z4kSL`LjQ@ z??J?UKeS)RHeoM@V0~;lPq4l%n20{WkT2}Jg2rQ_>D+dndJ?dw(o!Uz^&0bNU_Cip z@`Eqj5cp%92AXhLhKl>?6D-G0X#16)AVGVH$%UvEY5-w84-nB0UP`$+F~O2A><$RlD}=T`GV1Gr(Q8T3aCFq z8WXAnj_(Ep^EEg2aO534F9?t)n(4Rio)o+GzEKeU04ReJsYpjX=M5gvA9XLva^X*|A1sqqwGSc(i(S2@ELV6>uw$Rkx8>&12S{U&KZi zF$q;{UE=#FRwu6iIICfhSP?@OZTgHW4=Rr5^KS7vpvo5h60>e|nWjmalNV5~Dilgz zjEx$04V+UvS8Vq$#aaZ)ip=P`j<7UQx`narNM|{g92x7?STNZHXU!8 zOW|tA_KesE>c_Hp;tU3g3G2S6LN7H6MfyQuT&F%UyGc@SvN%52yjt*t34axgQnyXd z;`6}rCOh-p7R9`$S%3_65G|(WlNqxZ7Ut9wR@~R$lkw}XpP&(~oUTA*Tc=^R^hvxz z=->JFdfs9A%|M+CpW@&QZi8GCY`f}aEmZs?=t;--A;+@$ga^AffxwklPCcwsXZLTh zw%)%H`fTcD6=W*wN4GR;GamA=0AzsyhBpXTyI7@S*)L4*rV+yl?S;MF*8Yw#1#cSt7eZq||)e z|Kz)mJsF_~(Y~>u9~_$ZdF|6^_4*S1u*Dmu-;9Lu+hi)EXIalgUFIhwy?CINZQ(=U zH;RF*hsdD#*o5F)jDVY0gID=Qg2v93NFdun=rrKoy>$@6EbnQk_Vl}FXu5cn zk83Z!NBVa+akZ$iZohsP#Blo4(SvB-x}!g27Oc$cBxUleOdeOrxs7}ykQlI6OD~KsA{7MfUtGFdu`tALN-um=C-hOyL z-X849MjzWYZlEgK-hrfv#V4|S{<7dS0d#v~tBndsvspM48|)vX zY@dnAzZXk`55S%@{3Rz{T*NCmP)Yt|u+!^7#w+)Q)tigPrIFQi}pN zs!{Gs6jtof`{>P-p1i}(ao@OKqYW@Z@9pg+dg#|9=+jP3S8A*bxP|smdy9jWLH}Oz zVF3PX2k?`zU#PP_I*W$^qxq%nt!)8WhX|-b`P~;NLVVKOpU|TI$HN!uq3laQ$_^#A z1NmzINiEf^Ge7~%BlL$K05x>>j_@m6^hI4u1)L+8RROwEe)$)BXv{wrMZkK_t-V$W z%pQCIF&d*(VBs`PE8qbnqPSvTc53mazuTifAi?4(;gKkUYRxYMy?qA)!81n~z&m~U zh5|Nt3r~i@K<~nyKKf!7Woq?n(RF16uS0((G(8m~s62OowD~UpEerukG}DWTR2`^Y zUlgt-Pi(umbLdXy{#J)({3C}7{zL2u;YE-;D#0WisHBPK{%fL3i(-I0iT_f~EV>3r z!<|(DMN7KCroH7qUj^q1JgLf=Ykz2sc6rk`V zd^Z2DZ-vsYrstJNQFt4Ht2bMpc2R#@?dg%7kqzHZ z@Xtditiwx65Kz+gU|)@}3#i=+hp6*A1@t@IFDd)m1M)!8kU|a2@yCNlHPsu^^6hHPx^bMUBx|YzMTy*aj+W)~XvOXa>PP#rU#QX33 zkaPKe{{`&z{hb-Z@6&_`fuHp{-1BX3# zCHI9m2+Y3H?@s`Z)tok9qtiH4i#@P7f#!O*zW*Y%jQ<15@B2ab<@VoKI~69p2PoeU zKwX;gqv$E3@v{N7cYojxDffM`cK&-S+c}Vwpn0#&@3Wu+1n^7#@k4Muo6eT(kR_0H zr`vG#HB>U}V235JaE->$*EbAt8(43_AhV1DZEFxe7C0^O`%2l4}HQk08-eakPu z*`W^j9t0!H2D9l|T-np%O2*Id4=8Hj2aFO&nloAg*SGH+??6+>G;|WE;|zkUWNPO8 z%Up&+dO(V)ry8LeEGoZvAIPD{Ctg}I0~vszeA#64;&UbZ`)Cn5trzjl8(;4V%yK^m zT-SSdANfs}ED;I$!1uMRKG{IP+~7WjzuoXwC?N$>JwY02{{^D=|N2e<8|Rq)1-D&; z<)juO%IN^RwevxDtZ`P<6)#z$Pl5(;-m>an}2RJyy!Q&;>xo*#IykPDz+FI*g@euv@#_S zR6d6G_n)lIJnt_k^EWsOzw?&icYxa19(cYet(@fOtt^bGUQH`e_A}%oY@U_As#>*R zpI7*%`#twz@Z|^Lzmx0~`L){j0oto6_)5cMLYrNFqK0yommxvo2tlVL>}tjik6P`& zd%0FjN98Jl#o@JW7rySX*ru15DGqr54nk13yh|`?uKKGa=koZ?yS1|3a^1hNMr~!? zu?w3xi`?vv^#16v2o4KXioqJ7NHc)1(;BvBNAUx0&Hz3Wf$w`2llmK6- zmKtEuH#xRwiNcXr8DK}E8DMziqaSlO6KW*?ZJ&-rJ;Cz2R*>1@8)$ z|82XEk3!>iYLvFi`>Hz9sjonu+eY>W1Od4s_8BSh{&F?_`0b=x!EV$*=Qb&>%A51< z)(dh#8XjSk!Maw|K;Bh(#!N)VTw2N3Pf6G!g~;MWLvj*t+~Z?bF^%&c7g<|qKG8x~ zc`(m)gd;UAM{j?Awr;Kw&?C42av-}V^GCYZJ#*MAuT1UsqkAv5t<|ztdt$$ZM}c!X zHg9!H*-xK8mOE>ibY`%Js>1vYe_b$DYU*f>Wp7sM^ibOC%6qNE)GD?YBP#@zVkZI| zfWoszPf{&`BC>SU!s&zl;q@k}*6;0V7mG~iaZFb;#`WibavQL-e_aWyV${cV7X!<) z?A1kTX)taeK~Sx8>s%SoqAeS%vpcIkWGPX{1fVRu8&fj$i0#GY+ZvKd_gFHOw|F9y zuYVGFN;mjLO8mt2TY zXyqAtXP}&$$EC#C)(?02vZhO(d(4L8UTLIq6ib$#Ph5p^LG0Ic*iI2G$_FN(icUr= zfX$0%UcKeee?!Aa^Kw8YTxLs(AR-eLCoU9{r6@M1-^tY9_Ruor6s`G)G;Fu)2uq#m*izJnScek#~ zz>dVzNLd9J7x$A<4@Td36Re40PYI6UcMAwfKEFKekTZT-SL&~n%t~rCU!wLf{I&jl z>BiF51?j@~!lcBazUDD&?q?;#Gh{T?(|HzL%O@T0ix+NUr(=?yq+84F9`DW0Z;hes zII4i6G)tL~@JbjO^HRWug-w&(p4N{by=YfgL}H4{0=iUc>TXJzR3>#e^j-9n@%gJT zsSPFD^}!j4_P!n?`V183c2i?ox&Kbie6E`3$^H40g7s0TdnaoMWTW#=6=ews5&@MF zidpBEjk5iGq)-IRRuw_^;q{RxYs0`+s@6rFG#u{-`FRRhyH>F@Utf;ICoKC}76X;j z$qD}|iw=8nMiLN#Wy~Mw1Ots?Qv>PL6t;Qp3&PeX%4GF&UaiUkh2RkQ6-*ebT0iLk z>a(%N%#S$G$*n7)9<919_nb5{prtqByN!LP`6Xr`4%85zr^(R2sp=AT8e~fJ*NIt& zQTR3lTGtEW9Q&#k1pxZr=^bMKB)HTUxsVzVfDW*<{=&QR7m{AHc zhnPYc07=*or;M+>JSA_K4?SkJj52WY=&-YFJe*#%A-F2~q|*>0XuPZ}G%0?cVV6 zGFX%r96RpVVIC1x?lNRmYIe_GMDuxwe0Nz)s&MYku@b1{Lky&^v|Q0ExnYq1E=O6) zJH^AXI)CMLlVdaVL`7oy6r0M)=5Keia^5Xn)U4nGYT$E+7c&%Z2HR)1mwhaI6OOgE zYHyaUxPM6P(~k6~3_DJl(RvZ6%Gxn5ixGwCDXpQqC|`L6+Cn;jT9GIIMl@gH7v;pm za7NENl&qrI-;1^MkHv*w{WyT3Zl2myd~`2k@D+7}5YVNFf=wPgSP(Dj%)pmv$9W8^ zvFd)YOKQ7Al9A+oX>Zw9Z^_54x0c6eW7mMvPw=~cg-?g&6^WvZxk{DADMcAXwu;ms zeyaZdAo-7|rm4?gUkw9u0cDqGJ9X2)AHqHiD7)5qLtHCqLg$pKzMsW0d-tTO$dO>n-z;S zDbKYZnW)w1J_dGh;7%|i^mI}5>)r|FK{D#NgjYv1)+H`MTgVgdx8Ppx3TRGC@M?WM z-b{GwuV-i|i_h+cIneN(8L@5Nm3m^~Ra;+}I||k;H@i0BcinRSE{eH}2RIo!XeW2_ zSW%6=uaN_`Kr7+Fq~=eoKCdvG0YDA#xKA=}7p=uktcA!#AuKIbZB>xFZHXLDfDR!S z{|fPk$hzt?8#>RH=|QdUlY!K8^-KH?ff!A<8=q4Yi*E`DI`%B=E{?|tn;>LMGC4<` z_-F1;Yx`q9aY66C2$_>ntz#PekoSoFUm@JZ4OP5E$pr6otYApqA93y{ddK#$wr^b}+98hD zj>o-97+b#@<=reAY$br1sq#n`-;v|#i~PEas7^Y4=kDFRhG3&fPQ>!Ac*@79uqY++ zsIA>6_tiRY4K1n+@o!#&)gRV~t~vBDnP0)zOFWD=#Egt}5wLu&!}yGvrTW4@M!f|( zl%_{wUOimD!?bABK2y_oh{^=d2)hC<0Yb^Za`7< ztVM;h>P_E8#6K-TrIlDMNr3&1<+DSNl-sh<;MNnEBQ{&PCJTJHfyUa#Z6{tSEoV@7t&8f)?5n=}DIdbSlF2x6vp0rR?n-L{l z^E4qKUGixSqgq`@kRjg@s~S10p)Y>$(?q%u?C~d)DmPe)a{_~FSXDh1Vhy8+15WKe zx_$dW*hUf^jxHfC(k=Xcv`GI1X0=ABsMMTrP{KaJKpXKs7DE}(sUcnWSQtiUxm+G; z(9fVbei|UtFV-?q`F2+z?f*nzF-Epi3W2kjId9mOs|WJ{`$8?rl(VBfR%5?^J)tCDEI9RSTfg z6MvnHOCkH)Ye7+7guD{rqzuyVf$wSe>)dobJkiBH%4AT}_EQO&h8mW+;;7y?e2EVc z*{9KKJB_jRA%10*6ayC1BvN-j+pG7B(74^A`w55*O`W zE4K2YM+mZe8f{T`1+n-<*ogJ6Ru# zrsSP0BwH;kq7+*-+|JATmTgr9R9v?1Ki$L`O6Ag12^1APv#F)RIR9B1%-gkI(Nl8D zh*nCKBKo3GSV1;HZ|M5p0SX$pQnJHvg1c6U+XX3oY(DT(_wCA{nG zBK<5$nI4HA_{KShp>o;Y3DnKz#|J%P220+NKxog`A~2`E-i@w9hZu#xJu{ft zR$3C2#)1r^b~jPyXb9Ea>0p6$i}#fxjRLVB)Z&o45(PH$gz@GRg1`l;lF%W)dc~!Z z7ay&k*4xb4hHGUU6T}3u^p{D)_%;BX{$l@Hxl#YcmXxh&!s1Qxt>xeK)DW=Lu;qTMf_|v(sek7oCs>Ql3y(8E~&_5`DB13fO)(|E;?VQ6S z1{kI3&O`H|<6?&P8lYMhg#4uX&e58p)4GzFx<5V8!UUU6f@-&e@{fCb`yF#%$RN)N zarVd~B*Hc`-Dwtmxml+`;5ttI-yr~j;wRK#zhe&M$>fa8{%vhQ%{m$F;@_AIWOLzK zHpqs-0pj61{2EH|JKY;E2C0dX8ud&gkmijVvxDe&<5ePs(j0xL=?JFbNp zsLjeA_;bSRPv|M*ejj3Y=R>Oe(EKl3eQq<7Ml5tAOFtaS>QzpDuA&VW(o0ee$KIx1Z z6xBP=u6>>x6-e+NG{=`pO+Pg(#WcxQZ35KRtG5gGS^ag;p+(W~ ziGL3IEp$ivgF5h1?FEldGMPEPz)PlsqZ?;i)nULL;tgoJ-^$B;u@m&3pvJMKG*ezK4mTF|5#)`v(1WrPiok-b0{R zuLbUhdrQ*fHtY941VA(YBxFWqZ!cPYshd{x%OXIHXA@~&KP0I#PpwG&MCoc~k zi=NFVKLQ~4%EyFsxjj5;W$(Xv$1#5Y&2|=IEpEUSb%So$a(_A3Gr0B!b9z#EFj&g` z-e7OKcMk>&KF%ppd6`!Ul*@wdWTAFoI$J2*VkZxtLx4{L(h&I^$pDVi zaMxG!N3wzoG=@_Mj}N`spE&sIo21=wYO%N5#f~21j*l?5wwDaPbC15hxgDasA)5lG z2M@oe;MK0|vWG({^xzJ3xyc&qVw?U$%ArAz9Av2-7ppgDz!2ml_); zvJF4QRPW~HR_|1xHW$<_pO!*bb2gycy-K1}P1S%K4T3O#Z+;FY4;L+9Z7?T?X@pZX zGQq4Y4~Y;kE9@hoZK|CfyQTg}TQ>KOI(DiLyG)P$>IL1_;y4PlJ>`dK!CeZdn6k{_ z9R=o6M5A|Ds-9#4JmW)&*$qKCP3Ckq#U!GjyLCZ}&~HsR%+qMRn~oZ02v01?X1Znz zpDJjPlKV1ob^kSqDzq02JvmisEsSkn#Et?zU(4cwgQs^sLdoM%agf@L$L|c*LgF85 z?(b^d?P~NelwQXO+lq2WGt=Xgt$-Oa#xm$}OJX%U*5in#?2E;t-Uk!xR&6Ix)Hh7F zqOeE-=%xa`=uji5~cvpsihZEEcra5T-vU{<6v_Tn-ZP^ z)wrbV6o3O&4o*l&ewLm{nDXG~_)TUupv*Byw{;9(c=pxtw3zRV!F3yRWX|ox84&0q zR^y^rgU10)*J(g;;{tU)>zGfaEk0qCm#h{>OQ&N-D+u<^C;Nl zgl-p?sOMG~a9+RcR;<3{>F1EGl7Z3P?J%!+R5wgv4Ot6IO z6cz`xKa2em6yOoJIpv5`E(E6QGKvb1kp~Nzo$tN7zv}yy>)51CzOfCRnxz#bkK8ig zv9G{Lvy7at;Lo}r!2S2xGIMiW06NTh20$tweqpzQk#gEMAji@m8sz)PQ(peqiT#*& zntyjQNv!JuFw%rZ6`46R?9FwGxdTBUJF@vst>*fJt(Q8V;JnLai*5_K*28?pf*adq zI@@zPF7xe8^DHqnP)KLfp?NnZW@po7UfgHb&SA{gbiABy)Id&50*y=Kebx>bmQlxb zS^(3%6oXrminqVp$P6>wZd>aVwp|IMsM$yXd(Qx0aSfP>;X0g`fs@S(Zoa(SJbSgh z&gFnwZzo||nQQFEV|Kfj-ihtzOeKa@MX1FtR6Iu6EsA6y_54M`qm-iaw5xX!y@x@y z{QIi_E0|pai|-?o*kwlwI@;&M>As_5);n+$C|+xLX>``lMEE*%<;}}zG+o)&hfj;5 zIS{KMA;>_HkImZHhPE33v1PXbsCEkSZ9+0qO*P&p>HspYN0m6WnyU}oHyruq=7EN# zb`|Ue53pFHm+S=^ajvMKov{q>8A}UyqWgME^RKU$T0KS}YAhbbB&ZNwm`2^S&JbXq zkElA6v0ZS>^;kXnbOI%)_V^#>!OmB4Zb^<(qjvp`6B4+avpZYd12oX2?t=9;L$wtwNCVkJdiSm2Ll7(dNceXj(|HXUv9}}X|I$0u?>Kncdofwx%28yDS=3?O`1v5D1+F)T~gW-@#vId#2#xmOdp5Ru`+J%4G*`ujg-E*ID7G*fhznGDlcNeKf>$iEgX^3Qq?xj+x_4Q4l#IuNWMzvHfKRL( zzxEA{H_v`aM%7ENR|=kD44ocvy(U97(7f)j9&%$LhJIZdydLSKB@w|G8fFoY#D^^+ z3l@Nw*s~^Go8#R@-IHh1lHk*6aA&vtn222Dn%q4EYRk+LBpg3DcWx^@JR9zoQs!FV z&#N7!mSp}FK_y5hk*Qc>Q*u}lm~@TT3yCt|Q4@v`e~OsaopjPz+3dyuP@nEZWKmg$ zFB+*a3o%QW_3|acwJ{?B<3bu+$G$PM&G`>Rg+RA}&e7)o1d=i$O2#+&dMVq>=kqVX zFSmR09^@3`bjIis;Rs}6$=`%;Li#yTkGQ1a?TgHbjv}DDYyz7k&3Pulf(2Xm{ z+jB6p$;mc{^AE^erLi0}2;DX0BWv(mCjfC(q|aUr%-9#i$NB6fV+A0L^zT}FKcmm@ z&WY?=>aINw&)Y8h5RJAS$Z#6Hann0XOz^DwO7$%$yfIqF-%Lxzil9(U;h|Odt@)$b zmon&&SZ-mt2Wf<=nopM6-H03rcmGh`0`VCyzGz}sN46;c%Dz5w7oN42w#1$^|2BNQ z*iVMDcx|KeMZiwB)%ExMLO0hpY&sBU@+KI`dkj zKzPtNN9G*0zg3eo_(hgUBLj1WTjixITi2hYK8BVIFFn#VF}rGa7<9htKCKV>;LLuV zo_k^i@CHtGFulq^xtA|(!8CYElusW=hmS`fKQKm!e?EQtlwVvVy`^=8hFX6RL(Cv) zQ|dbp+t&2-3AK)E5qwxD<^)QLjc8lAt;}*;V$#I0o~qtepf_ybKOxl+g#^XC_p(iV z5hH9<0eL*(G%z!g2ty5*Hl0-fGwXMa5M!%#9^0r5e5MxYB({vhIPOw!{ z&-rEJ9UUTRWX5U)a?|S=h(<=$o~%)I|4jCZFTY&$3f{I^9He6_rfRXvJ!T)A#6B*f zj*+ZjFqR)4GR)hSxDJ#ea;nYVe)sWWN76tJ!(uSn;y8##>7OHA2TF<%RtU?Io-dpM zsve2nvs3Fwe+-IaR~?Ya^vGSDd9hcawjcM^*sGb9?j8-V?qoP=T3Oh{Ft1eH?QCd#UyF?>oCGrzi)l!dDy&oN}S~}#o zRu3y-2D&;F<+PLx;kIl!9Aj^Y1;*W_Q+FF#-;vd*^u?MfX-}e^uZWH;!)|)WL=Yp) z7EU0K6p+$!z6N6Wd67Segcm2G0eCp5!D;wei^qF9lOd5*{?~T19_Tg61OcDR+loVSCGex0))lGC1B<%Az?tNo?a~3-E^vxOk@g5Ol`awm0~@H>|sd zTyF#jMiDvXUf(2(n>`UF(l=`ci*2Qs3-D>Mq>6(-@_1q=fA7)Iw-6&Ua=#aSkQ;(T znUCKhYA8J3>o4|`vTgYHhO1RBT~fUXQFA=O|`!Rm0Rq3VOql+}czWd1d9)AWx&8#N3wTjQ5Y|sR7*E zrWVCi;~dKIb)1vW=#4sRlTe7=Sd~wafj*Zy;#BaxYNZwurUAd?`oI`UQk|W#P8nk{ zB&DX6IR_+YM*T{I1>MSFpn2zS;;rzgui+Vkt9%V&Qrw8K5UJ>vG%Eo8$sf+Xr|evp zwF_X^_0H%|EyD}Bzy#!`cYowAOMp}Hm5^)3S|^oCS$5{0p%0IX*<2tfSw<@g@^X4C zHst3kOm@a7;_Kia=ldsbjyAJyO$xsB^_UUTPN(vaU*0OUHKXQ)JlsmH8*V7*0@%Ne zt|Bb3#0WUCOBV~n6%3Q;IJu_MuFL4M(J!X*Myy8c`pzH#Q%%?Fe+tnXT#$zgyVi08 zBq3!^1>BQP2B7H0AVCd$Y{89-c@Q(vRoD##KuG=yB$)-~^bzF}_qfvqF% z%7&Xf6Y<#DI_|lU&KGOmWuj-K{|LfJbBll9L>Ye+l<>0lGNAJCIeO+WH(O42_iEOM zBiYY#TJFHh>c=MCRqFM|kkKL6_@@eEWVWerYMF+U67?a?CRdsm;tad;+$taUQLQ|^ zJJgw9sem*U0gBXtsCs^g7YRfi{tR+ufWhdsi%l2{n$m6IJZRPINn4EC)g)lAUa4g3 zv79_UV9j={SvNq*Tu8pMGlFo#5Vyn4w>A6mgC30*P>N-JBiV#eCOtkP;*qiHmQ%ze zOvsvufz}pNn!_%C`j|9eF1r4k=F`Z)Mnng8*UT~}V)nYEhbS-9#4#V)I%5(4IFt z3O<2ka-wF&p8yq?*|{zHq;q{;?dE(~&)E5MV>U*xpt*VM8NVFilLqTtiWV}qpTzwr zp-M!4YDce`NvDwFv&`Bn5t~k1e#W@>DiB0vuWc+^s;`ekarY9H z(9>NzebpOn*syIwP8gV*-L=k&FwYn$H{RT&Qmhw0l^U{+fI0`sjPll^EmFN$(c(}3}D;4E1yG|;V z7DEgLm*`J`Kqfwd=4l5i;l*6w8_-U2uqMla!ttC9Z^ZD(d;>l(_7RWy0}tiX&t^s) zSVg?vUIxCJ_xtZqf5KI4E2)xjay%$D%AUg>(Woqy6DbJ(ZYsU-p8ODqi-P=U?b*x- zK3@QRJ+SW<*gE`9c$O)Xe7T^xpx$$jZ8CxNyW#uzV@VLG&Bq1Q)MZ`ug$ih}<;A-i8ec8-{idv55xvP=R8M2u<#@Oo2Jq2o>JX!hB5my)! zn+V7lQS4D%Jl?iNpD_>L#4*uU+0di z;WXdV_*^9~LPmkSCyoBzZDemSMXPC<4xg4(a+{DE;E!0@kif49P?hCT*BD7~K6oVu2fHv5}N52Pin56^cjwV^1A_cV^G%RrMoVg@#LON(7| zi>3ghqR)BpId7Dw@K}!D+DX~dIh!8eWr z1^P?=i@g6L@Beqo(-a!v^gstx%RxfKu$v756nQ|R$y_go$Y@djp;_g{@;Ris4)SwK zpSEjzOsQ%H~QthsFs$Lqb`k#0o zcv%7=2vpZ@2=w+jWqsbb&^_y3bX0w4>8``Xs!XqCcQ zf;C7)R=X-kTY4S74f4tHogcbx!Ey=Wa|9mT8U|1QGavVc!>>Vch(&XU1GD$`XGhVE z=--l}N)o$k=!=p0&hb1sG8A%{+r8w!EZ8LL_H;nbK(Pg4p*rmN>Zc!4`K(xStr zW<64w#i%*y3L~KI8bpI5q6$wN4>!kLU%C4_R1JQ_qF1F(t11xnwn)8)9M^ z;4799Q6j`%wmu>!UF8r|S$|zq789(tCUzp?7>C>g^H?vd;Xb#(%!okMffDw*EtCwn zb;+IAMYvWrWmDB|#e;x_<>PsS$%KZoicYlf^gjFn5!`auOlAjVco(8>vc*NiNj5Q4eOo43l~!dJcyH@|N_ft?z~ejJaI9 zHhlUCxH-FJqvh*k1I>|A1JiF#8`5B4=g=7pIR3LWh45zZ*RecI!;#xq*9E3KOszQ4 zriLW`SLMrL2~>Qhjq zt{v?@G{{uiMHKg6Ca%1@KgX0R2h^;PpD&?!U>Yt)aU4kqC!;XGu(S zl38fY(X6|LsLo-Zb{>#=48EIXZl5hL*@4Q9Lr<3ywh?AwGrYu&qN8%in)2MvSHDQU zB4StV>XLsbhjy{D+1#|}!?wc*)?L%*~;jXQ%0(>zDBdFtQPQ30b6!!=2-zshmJjdmYRDOEm{3-xc$(4KufX9^FxK3!Q@7mA9IWlW zrl!MmCw+-M0>ie`6Gh(vUI3VD1Is&Ik?~z#YLcuzMAVpq6*hzXWNu%z%uLb~2geoF zY3NNF`52O~w~QpBl{I3;uz?)4jxr=vpZ>b<^I`Bnu{5YBf0MqFSgyuhLZ!0pQMLA< zz!!qO<*1e&X)w`ucC1sDG+N0Cot+NdlsZ<(lBS$;%r(zVEcxgwk^J@#`;v4RC=;Kd z@u&|@H|N?LVI`y0Snj^*Nbm{jrAZ ztrKY*bMpz!AkF(Lo3pvh$?D|Sh3*-obd0bqUV(75ZM1(ypO7GrLaIoh99bzMC(pHi zt6CO$Z)Rm)R>pvm5b5{q4HkG(iM}~H))0V=#NtehL^z60&urX%Ij->7b7shT(rB{O z3UewK2n*b5%P$3lmo#|qy0fo{mqfkAM$Bm&a2E^juj7lIqet#AHU$!P&T%=ZnD)h0^_6;Yeu)Sk&?_m!2V6C&V@$VJ?+wfTE8VYk-FiI1Xo&;+}ZwNdT^y$*7Gm z`1vW|0CMtPT3eM#ixp$k!l}s>u|i#v-_o$;-lh&wYur#JNhdPOEnF^LnzKokbx@_C zqq2VWzAv5NRBSxFZdpd^6sLE>)w!p;q|}N1z{XCWw#Q0i_iD&w$pOc#pFM;O)E#^;hs?hGqS-%Pw_G zo!5ul*plH+JYWwW!nS@?u`&ZYS9-}yK*qvXNz`5=U)!B3q={P0p_A{!ImITZ6tBIn zd9sMWkpls(WF~x7gQvg1y1kC~&n!P=eN}pqj!0qK+3Aw+HgvN?1$qde2#Ew-XjF`k zdfCz)*HYg%CA??W9oi_h0{2c!V5a~5z~11Ek#o%NfcB1goa z>kk6SEm-~#Q~FG%a3SAD|ClN!VYAwDS0Me)kn5yD|7{;WEk?msQ0rkgzct-*n?bL- zlRf9cWQaOdWOlMwUmtQ%u9ic-DWoG$$+G8auexGLBC@~|c+PKQysP!`hn7}x9E%ut z;E^HUKk2b-w-L5K$wDvi>Q>naEwyv5mI?fj!2*_vg57ZBCALM_qg+V!kV_YG*&feZ zj}ENBDUz4fK^#K!&2&+>Je)?#dm}ZC)G+mD!X^^6d^94OhjUH#f*)n zRjovtw*-7pL?<~-^C!7sZ@mW0=BFL3c?P8Ep+!79$Iib$W=OkIJH(2;0eWG`vV zmvp!CZ6cG_?JPGdmo86p1Y+e(d2VwfD{1tMpPH6eg2*_LA?o6YAtLX?Q#Q^M7P+F_ z#PDN(&~r+nE<(xxYk?2pTb4@^fimD|n>tUmaJ}p|ck!nCy2F#1c3*zT)CXi9ciEe{ zkRc3^+YpV{4^jY7o-Hw+*(nSqjh~dtA#g1i927flCJ9|5_DJ20Sz`XD}jxpNT{?#bDFI{tX0Q{@W}1nAa}C9k&Mw-^lz6B3$>w@ zh_xP<95FZjvjXx9G4rrM@bLuIOYQ;-0R(WoUl^U947bH50FG*YOk|SBPY{IJuYC;H zyy+IDpQ%gebpae?24;x#9XR)`Hj2Xg^$qoz8ZMOccPZQ_n7XJEmqsy{;?m*SMNYc@ zpw&Q6LISsXrZM4}Q!^kzxL6d8L?O*u_)vN7R7`Sk2hQmmkNk!8+l`{c)1Q)E@AnWv~_x3uBH;fLysP0>Q|~ zX8WbZ&->Eb`Lj9C7HjA%YyW1y`UPv^2!xG3aAm4wk&o6x6tq0pT{730ucqEe0Q43g znwJ=d`T^LLE${*IVg22z)S$5~EGMi$q7!;RLzD zt##pI77QXejO`2kVU~Zfnw6YmVC(yCP!vcDg}af>0Wzf!^{A!1e5ccm!*`3y&8^v3 z^Hc1S0ea?hg0WVBXrtQ=heIS|Y|{xXxdYfnwUiQ@>KwV;j4xsOee3Kx z6NF-jN%w@(?0=sAh^v+vqR!>t82I*=`aiHNem`9whL^5`Ks;!fJGbxukIb9<{t+Z(hNx%xk-$H#ZGWPs z|7!!+Y^c|ase326xoH&+{)^3faTdOM%-o?g=SM`K(=W~gD@JMXLMzIng_Cd*x609O z4w&+P@eaTWyD2gUhD&}#^o{;vhyO0O)kn4~b;w4rA4{*pPW_83Ah$zS=e4opkU2AF z3Ov4>9q#?B(H*bDRAUoRs{wWRm|NrGf8pXiB+ft5`VqFd^e-BHDExC$)dpkDRk!g9 zosV$-e{mXA2(r09jUTpxB~(5~n9COCpZo4t**8w%kMjyNR(&_cW^EXS{9nK8iVsAk z@ZsED!(~IPemY5_=s9hOXALItv9>mA}Hn?oMUc$R-P~v#s3b4)OJNWSCywl&qn*}YDD%b8jAmrt4=Y#YC-mS5n zOEs6%k87Sam`Y}T(+cj0`_~ZHjuUG_7K(~r( z{d2gqPX;KUvd%TtkZ$57$QY@07{xsu{Z@LuGZ^4AbY#H6eQ+S%YIOGPK}$a+$nPS0i3GIIAk{L-%4*%0KBTHsQp4GN@1*9zJV8#+c<$pDT93+bDG`gRl% zd^OZ!HFC@>=}(7*M?+Z*Dd@s*TkANOGr=G)AcJ7YDKN2psCDh7M4he)fok2u^v+`r}0lR=G1~x>Y4szM- z-D8Z*Lmtq9t<`B)_-26dpgu*TNhope^CE6e&1>2xfdZTVC{PPGbQkOL?%6`YWr&r( z1#zP_c(wb9%pq%+h%vLBKMmJf9g@MxGUsAj-r_`k`Ka*tvC0sjfg~D)$8bt^{*y0L zXG%=}l|xt^Q&5Q;=|yghQEtL>A-sWm#XonSmLDLO$-mQqfHwp}RQ_cmwZZV#s&w3v zn=uAeC!@&(VeUX9bY`(j3)Pg|dHc7X*LO zmp)LjxFL$%4wS)-j@ume8v(3Fc$JjWBW$uu3rIQ6yJNRVaIpX!@~4BsXrRbY;Ay?R z%Qkm9`GL$keK9R8)<+<5OnY)C)@3)x!)P2M%!Mg&Cl`(^|F#3PFW%vL1YA#O4t!NCLHKv#!3e9)1YSEv+op4)!4+PW)(=(Wu4@Ec;d$&UCMsI zeoDc9Vxo)UiBLA)T0@p^&CUA)KU_eJp9cDIlLi1gQJPEy>4^AgK?wm`XSJhrEzMSg zw79epx;2qs1w{1h&B-Rj#cuyGDT(j4^Eea>a&sD`6$E~_YbW6zL-73V1~^Uz>xS|M zXgjHM{9TdHl|=NU%(X(NnC$%ijI1#{Mq&}z@D6a0i;|+g;$<*AjOnD+DOAM zao|_0!p3thoq+@2CWE+a#)A0I+$Rt!*$M^vHQcla1IzDy3}f22w1x3(vKqOebYMir#B!Yak*+Z_K|#wjAPN=7n~LHd17Jp0oi z4>LdbBU;D{A3(Ho{eQb+DbP|^pt}INm7l(lpl-XXgt~ZS7*v zz@z|q!4-uK>hoSJN?sL#*iA&6iGAciPEm#}l$*>aIOak=@+IyI4Y z*XdR4F4_r9K##cFL8>CMz+A`{(JDF96@h2Hi1*W(D>XW#WcHz5<^5H%ls;r8uJ zFaC6749qCzk8+Ydr?{jDE-Me=OAt|L`FZ2Jm(C{;Q_R%#7rApS1D0_w(trhtli+~OZx3pZ@OA8?X`}xI9voPg8PQAQ<3Cm&2+3784Zl z@Yr3=Uz`Io^HJViU|* zy6%2EOTuLtW4DX%#kUwB;m&@zsFhUi;VbfMxdF20AG;Z!1?#&c!}M#x0usCa;aAqm zxRaa`x;Jt1fJD28vFV`xmX_c?l80U>5I z+>N@+YdKoyW?K{iZ_R+;!P*J~`a;mXtJ9q&?f;-`xb58oEnF@X%4&&pNW^&nCJAB0 z^t3UqJektu81|dSK3nwDFe6SxPMT@AXKtFro;PY&ILn5}EFJzQPh;h1`0lS zpQL_#U`gwOuM40`Eg=w-h7V-!v;Io1AR<{zcdDL(sG_QV-(1S*Dj@2)_p^kq=Wj*z zFTkQL8`#e3m?smP&|=dM_To6nqtPfY65nCYOyUs=u&dqv2vEInn77k08hcK%f7F-6 zJR{Dhhj$#T7fPsd$8qD)jZ|_rL;R>`&h&Y zMkJRyKSI^}zhzPM7HRxiEnZq>>$&lk#J*`Ltr&fN(zp<@q;JobL{HonKf7+I zI6t%a#NxJ#B-*8iECRFO7IPBjss)YBw@ZKI82R$+!mAZ6V-2CP@r~i=hjTW0nmBag zR?DHl$HxAd<5ZRX80C!p4mN|0iGEn1NS=TWSNme8&uVJ1InL~>&45i%BE5znNhzhL zekjY;wQDrGG4vi@!Hlq3YF}Hi#U%O%pBHM~u15)f7~E89e7UI=>RCd%O|y zE&KSs=gOpGXC%1Y^}C+a`^wtQzoj4|z$Q1EN~y|TP4?2Fb0o<|&06cO>zleP=U%92 zab;YjRI~5>996_z&Ue?YTT`DNTSWK48xZeZWa25fTj=0Z%4D;=iP_muGPz-An(>7f zDF%_SXk}yBVP*2P)*0I@qsCR*1dWNVHEO=MUaR&fEaP{xBOD&AJ)1M^4om+birmw) z^U@tvP(%5cVC+Mhcu#%N`_|uJW%RQ;1#7Lz?cqw=73P8!3!jqPRS31qYL9WA zpBovOnx2QlcU*a0uc6EW^z{^H5={#WiQX1g?A^amj9@ zlJi+(CnXC?Q=owb96dL02c7J-0=tTeJv=n!&zmUuqjCmH%`ig+>_#7Pu7L98Z;L*z z^VCt8nS>3YAs^#ds+&kV{7R$9wI0Qt**0tHF}-saN&Un~kyw8s%*pc7#pAEW5;va3 zS8eeLefGLDq)Z{DZm-(%GpGyFC;4cqUwIH>qhEUhQ)pDGh0Us^8gG`s8f!Az6BX~L?mIL+)x zM}1H-hgAesTe+$XU(;zCs*P!Duf8oZ$#E;3pShW{JH^+A*x?ek;x8wrV+S>Pe4<8` zApWuK_tgAv-;=JUor-M>@cU{t{k6ljP|ItntT3Xaa8}maF#T!vFuQ!N%=wkTn}&%5tu`j#r)q>UJR=eSsQZJCmn*gkeN~*6 z|9jpwbiNSv^Xz-yj(de3|E%?HQpRV>4{Abi89ycgPFwc1iVxJk=@E0uEPxXZ4_a6m zIG;s5pMjeR#H9Z4EmeW_#S!q(A=SHegytG=VdBT5Q}0MJjo`!~m4+CKHwPU??&)pz zv5hPAJWVC7FgSF#ukS)mr6~E2YTSgfN@u3eGey-8@1zv_N?CDiUn`l-X4hkAnth_7 z>}oCe_`R`6M$M;cSjAWv)x6bO5VWxUyCpkL-DKNCn}D)Q3{JU!Wmn0?#la)A)S>N2 z6GtIu(~CNXAc_iyetT+X+4$C$G>e0~dn+~j)%c8sN?4C;4^-Rr%i1-sG`p19#Lvd` z&ic8w-iF~qx@!$`j(H6goU+3fmKR0Ueagb?o1sn@W?2hJ#dxqHng^zJ^_x#wGDdRQU#jaB-gHj6fbM%q$pDC~UB4+~5F;#Cr|ON-r{tsS1V6?d#Ch2Y>2(t>GSEX+=7NlaUs4{bjga=LQoi)U^Pima z$64%L-_6{JylGRoLr@usX*#I1R7QFwaWVF`2Es>-x0){(x@()Aoz`YPigemi z3{nA3zypFiPUpPl)-z0>2AN-1lXV+on7=OeRuZO)B%n{lT=4lxeRD;$)b zBk^-SL8GAz!eJE)8XHy6S|eVr`l1nfEk}=d%+u@MsikpaVY{koF`%_glLSN*qc3^+&14KUp!$1*mQV&v&Y`rp@yS$w*Hs+Taulom>BKaPMjKS9bKTulRZLuit{ zoaxdI!b$B-fa@a#E#R-6^|d$Q**#}D4xu7$sc1${kL@_bYnK=47Cwnm!4Y}sIkt<$ zOP==qIJ{cd%SSS^A^YH#jsAJvb9yz~D%phbKklj@)Z<80bdb#J)%LJcA?~>`y$sx1 zQE+7E_qO3DA8_}04A7-QT$^l}b`vHe%R(2Pgrr8#@y2AITcw~&A$X04?8)Z-Q~h8q z)=5|4W~>u=0-N;OS#}=D@t(?NLw+8=?#+Y)-QuLa5?}fDf|enzjtnOK2k0DH zEbBQmpfGyyY)MUD$I?DUL~YdLil8a&DlaRMp(0dY^|G`z&O1 zX*vsQ{Sz6VL{c)uzdQj*NSkygY(pijBC~6~gSl?@XPW9_1QqAy7RQg&G2dR=tS*DW zym{pnwDq@}KMd64Cx^eP3*z}A2t9p-cR|b3`L6Pf-a)wb4XBTG2aUb zcyE!dR9l$l4KLrWg^#sfxNd5ibH}Q{BKl!W6{Sr4=mQU!z1$*0u<;8)$G)!Q=_8VB zX3^_Hw{~17ENZ=6LM)!b01*^az)Lrqoh_j0No*AlHJz2w)42->q zrLCMBq17s7rD5TnT~z@Nmdd79!^+e!O?ANp!J&HC;~ZqZevYjbC%IA@de)-zxwlwR zzE`)dtUSnqrxSzc1k8|RuoB%?n&VI8LA9z2vJ^IKk)IM82Qd0FbdpJ zQ~wgi3>FZQKh%X!y>p0>nBm&-wp3iJYprL=E^MW-Vv8~=;~TqO7h8}MBBO@NPbkor z*#<)f-mCD>-2n6!;T^ykI?g7pQyK`hDG6LueENY||INAXCp>kNx;SCsQVvuuqd|sC zxFCyIi1D10TVHsNhOJDaVG_aR`^Y}ceG!SlXCaj2_i3Cxt6)gqruKG8w7QEm`RFGS zmg1y)-A`qFtZzthp$g4eeUsts28y~+o^bx>a`2dm=RH>#Rr-Oeg%Aw2C- zHf45XU1qZEOH@3cN6hD!(KeGGVb5wF2oAN1JlmPjR|=j3%X%g8!(G_G)6aM(h3X!Y zJex_qJ$LOJ$@Q_zy@np{;0t5qjzH`Z*GUk(<*gjJv)@_zvFnxD$>nP`La7P^MLOcs zZ)L^Nq_@c6yg%IHndUcqx6rQ0f3mPAE-lRBL~W^=vmN%-10ky_|CFSt4NhLw?jVT; z42ff*gaB$Ix2J-=;G1i|ad$+DF)?JklWM>7f(X1G?eJq`fhaF3M(tk|L*mph$13FI z;cPUJm?0gGzcPBg%4?5CVh=$YmJxsD4VL3V&M*b*f?cbo1+r%ci*5**z|DTVHyJrb zcv%QTn6`h4g4|lHWxBf|Jh0eH17RGc<2+b*uzTebJYnZKyvNgH+Cy6DkxB;($>de&b8e9epWASeb_X#OKHJ;k z5bF@*%>Cm5QmKhLtpy@OlvD^vaU;L>f!a*6>d! z3!+M84nzJ;YD|!ejr@iO%K3jF<`-fKHX{HR)<2+=869oA0qEL-C^Q~qxeTJCkVIhw z4DV~3Z|s6B8R}r#AQi&6N6`f~Jn>=7uG4!ES9}m1#8eOO@#278M?$}toW*9qS4fPk z`5j@s-0;oaiU5-BKVbGd!1}_{_5$G5?^gFs*Y!~WA3odBd;5+v_xT;@TD@g{(F)*~dY>>Ore?EheBw|QDsc-Nu#p3^O z-fk40xfe~DSFzL9>nYIow``ev0S#xi(QQ0u=&4PczXcuE6cw$av5fwYtOkkM-i#@E ze=B>iYYSDMA^4xg*>mb`YBuZqZ4C}~KP-N9s8;CztweLCsBG`4al`?0NzR_D@O|62 z@f^SP7bYl$nGGD-&`cWkF4=DzBWo>pINa{-vV#hx;PV6?D9R&t0GWtc<~{*_9U&d5 zf;>1X0&_r9X}y4!pH>D)Dv8h*(M_6Rz#aix?oWFp zg*Y50MP)tBWX4-=#Gd5>Dok>VC@omLHJ_DPe$FtP{Ia*zB-&7iPNO4fJ4{r6)hTOp2 z`XLM8qP-`-OW2uZM9YVnp-4Lx%u;a~qjouAUWn|h(K*c2i=1mkb01t|9A@TOJJ}zZ zB^M|M`kZfVchmJ|+>&|kt>VA&bi0_!uj9fVb?g1#C7P0RFqC`DmUQwe1YB$nmoR+> z4R@}(P%4sgg3{F^yO91wt-l?t%goh}Gfb-D(Sq#48XZ~SOO%y_Eh(bJ(vDra<7oYu zE^N8RP#0OURd4ShL*poY$ieJ7e%lg^kHkCNyPK0GisN_U*q_W$>O8zbtFBR%{DbUs zCNE4I8b$V6cLx_I7N*sGtn_JiWd7)p`jr&RO};RPfE9X=###tr$xF6tOtSU-SGe2T1}J&OyNs!L)3(ki<|_NMx{!yQHHXT#YAn*U_-S?_&W?FJWhxI>CYagYO;|DdMVLs8!gP-t`Ii%tld!EG z=X*#h^z5=XeSKtj8kcdqhPE+17vmF7Q;MDN{s3^N0llfUu&jZ~L&4lBoPpNHm7@?2 z-n~91?YVwo4)3QeW0)T!{?Ts--=ekk7Z}JR4L2>$TSPSh@#~$&qP%i+sq^ z{y6ZZcX|#7vwuMD)5B!l$15|-y*Dr5NuJFYNQHJ+Y|dBXxQb@fu!dsI`1uBPaQ)Utfx>=O#3J45pBPHygv#=jEU~TIcH6Gx&yYTF| zH{INll!DliFCVH7G_Os^KRq$_>nXJ_>8tqIQ{~HWtHm_@y`pxBbhW!HTbSy`?32-q z(}mv91|3gw{G;g3y+=*>X~x-)HYTfDHLq_ISyXEw0A3g@_OMUH9}$?M-cZ5mg_S$~ zsNc}Ftiq96s(`bSpuLC>A~5_xKNr%fmWcOYCiNbTO|ONY1bny3w@wX1P%aTcft!ft6n3c^wpWi4)MCVG2|5jV6VNaAPE_8*gQKTvR0U zbh2o7lWqbSl`2sgqJ$Bok3>K86g6NhFV}U$vp5q&45+b^?<=4WE*WoB(XA9GLpNDN zjs;>7i31)3+N?64?b76^<;Dq||97+SAy0xqdh9WL_BwLf4AQ6Rbr^TcGmmThQMnMa^1-*fTKIL;oh#_hvdtZOHk5p zA_HTqAGA3M2Naw$&(Y7lu0o&R{(um|3yq!JJ=Fs-3BKoYO2El|9M9lz;;?mDeARk< zBbca53`3IzOwve*A-a+0J9eEAK_xB4&O3hwTr^t)w}r-X`ihJQ_!;)Ks)urc{`nfu~!>iwk3r4YH(;b+z|c6^ZclMW~j z*o1W2FWU0|*+9UPg(#vUgU{)5s_W$)V%KO`V2zgUm(|NP&o zs7MuX;)0rl)tfW%)ViPrr+*~wSvdK=#Q4o9G(UEj-v%&agF)29+s8TnwJ{~TVktV$ znty*tclHH&=i^l} dWq#ZejwpUYerC@3yBfX(s2uEC?o|{2{{<_T6Y~H7 literal 0 HcmV?d00001 diff --git a/docs/assets/images/data_efficiency/data_efficiecy_fig3.png b/docs/assets/images/data_efficiency/data_efficiecy_fig3.png new file mode 100644 index 0000000000000000000000000000000000000000..52a44e7a23491a0846c54fcde2613d52b80451f2 GIT binary patch literal 146105 zcmeEtWmME%*Ec1Iv>+YQAkrN}ODHJ_lG5GHASETO3`h?#NQX!b(hZVB3@I%=ATWUR zJ9u5!eP3%mU!M2-^Ir>ChxLzr_HXas-us-hqqQ`Y2_8^CKtn?#P*qWUiH3H+2n`MM z*?kPuf4C+xc2IxNJzpxzqg9U3ZleZRwsPunXlT`mcvt54P-9#-6$4K+G%EPr4>~36 zI|2>u_Oq&@oUWhg-t{PmPBoC{hHJIg*5B7+ttY-IrSAL&J%;iYH)mOCXpUrV=*Xou zBf?i-(qDMcfb(|2dk}TJEUp z7|s;c)YMD|qP@ACn_fI?JzJ6eO(7#VH8gapa&WE*mNMUAY(b8{P!MX0WJt66+W_kw zO;4&Pk2goV%4rvx{Xr#B?m!?lAr^1zZzdfC#>&H}QyFBPh+tQs^nzRN?~m`46Y1TL z>l}Zlm*6#D9SuI+pA!zQZ$CxM*4VmVMS@3l1#2+pmxgf^l7GKnSJp)VEBW&76J=Gc z)Vf`|xq^%R`X*LBP@_SSEO6G7P4DV%Qd{EaR>hW}B4yCg>anQKoQ9FzVf)6HOz8)ro$-9~Lr~{q>ZHOvx4ocm_@hAbft@B77}901?Ew(G zn6)nMEu80!f84v9m5{%CZmD}$3J}&#Vd{sj9-cV_me_l6Hf``V%D3u|S?$~jS*m+8@rX9|1LkduhXi2c)A z$NERM(A5W6L9eA7G8Z063d6sVKbNO3?ITEysT1#-ToXOmp{NM@TdUav#5Zcu@Y(7@y+Zv!o|hhv(i~7`=A(-e;CAxdU~j4-$T)CBG1O1(c{9oq z-5>H9{%&k$?g26IZ8xu1Z$8mvNuPCsK00O_ztUjeA04XQ!Ji&H!khhco1eE{`Mxu% zU0@Xj)k3f{1z%l^faTcU)d$6XTK<#YfZ!Wi0@LG8JlujL z|Ec88L6m#KC@(cHvNQ2aF<{Y2KbQE&H-~y_aOiW=wuhdyz1fHU8p2Wg-o2?Qb%{8k z@JgFqS#BlSXAEfq=)6GIH?QJPgV;4+Z&9}u3Z&zILzbvznPlZjYVE;y$HgI0CY|ym zH$M(@kSfOeiYqs4rkmTeSQx-UjR_n?K=Mgbh|$F77mG*0vOQy7RhFFaiIHXtHK$B> z29$$sdbn=fiCFgQaqkU?r^W2c;3QsNqUvA{{w2e2?>xN9<>^4b4NSK5^oj=!dW+V* z8x=z2<&s@J@;G=hgE?y0CT`GXGN8#RKg9QOs-$+Ya@+URl3-#=76m!zJBVB+yZ_*B z2EovKLRI3)k!BY)r?fvGwpC(Iqk9*0wofdI&b`#OF*)QEEcKrMIlXQgP#XzaK9;5} zVgvlztpd*Xg?(p{Iz|T#4e33~Vb25LeNyw|_a?ZI^dt{DVM)h#$bm_3c{jcb{krs1 zgKP&@b!43(bRVDZfqKy4!7INv^B(6$rk2BTGSsEl#ECX@=`A~+A8mx*%`$GG?a3}> zB|y9ug0Cbs5MR6fq6q!(1*0dCm%{A0$%@i>_$@ABaM-V_?C2qrc{4P z1Bb+3v`@x#Rb1E&-~|sD2~dUKdUOTZ?;Cp<+-&@E8E5d$#4SXU zPiNSPpU5K4>u~MmYK!#D)2pDv0-T`iA&fgo5O;ck3vM&t11gw`KHSD1>AZJiC9w5G zDt*g)wl8im6lD~7^w&?`6sm^FewOS+SrW9PJpcEn>4RN0U_OkV@>@=!@yx{Rzr^6N z0Vqv>BD^Mfy)e7~qbaH>{DvCMZzY1l6u8a0z~_6#nz$~AE?$dkh)n@p!O2Y|Gcvyu zRDGG?;ZiArrqj-LHyHzWD1S*@*97k~^A>i1*_cf}#BNQ9Z@ZQodL1lY%qUu5os(g?h`f zEB=z}IAre~lSSELtP=>&ub^NvZu%an;95H*3q9TxS|b8a(EzQ#HVYaqV}e)XCx}h=p9|8M`Rs@lD5r z!Wlt#a>jeY$_hP#okZ?Bup>vhdGgy;+rsaxc-Hd8YL|qh%Wu1Q2BO#I@+<@w6X(z! z;!FJX4p`0+mFQDxFw9t2f7;~RgMhF1-M%jYAl9$=5ppOW*NN+q zBUqQI3B|&zviy)kmMg>};$k7}zofFr*~4W7N`+4>+dx+1DJ(Xnu$mM7(z>qb#hXdo zaCVyqmMbO&d&MLM5M|C)@nh|R490by2L;;LGJ^|6cR?t0Y`5DO<%2gKX;QJ5bwOzz#(A)elyNl0 zy0_Fn8K79uj&$n5q!i;lgd`mZpS;<UnM$l#FSk)U>R#01CxS=IZEQtpY;cyRW0+Qp?M0>*H7A# z#fM2VNNz>IG1a&T@>6G7uu@LR%E%x~U|5Oe7ySSgG^m7KrO(3NHm+S3i%`@j&GLu- zb<%l6k7XqIKmx@xakKMTrq{ia`6^8eJ6)5<(DQOPA^YkHa+*`(X$ji<=LZ883WP>o zt-{qmEFg7`vFeC3h~8HpMxoJ|Y%8RXKAEA%!wQ*j#Vm|lnXRXKT(lV&tItByWyEu+I&1Wpy4=M(XL zw05O_&Q&Y$ZAf}hf(7ab*Yr2`L!wr|Y1vY)PaAoWOT^mz8#F>2==xGWNAju7=_4qL zC}ZY`XoiH$UMkJkkcSBql};ajiPu>k@!oor-KA*dI4Yes+w)?-8shqru3*)6!40^N zmaX$d0`xwBOsD8YWOGf9S`vvr;;9ggAu|z=VUA({ZX)%9xy9rlrk)39IuN|kVSp?b z%)2+&6?Rjc_XY3l4cj)2nzO^G4BH$e76$$xs!=^%@d$+n6#m;)vwDys$V>KRfbyhW zPWbAq`@EJPq{6LrVPz<^QaDIS7`V8MaMyQg7_(E=nG~u_{R;)d?1EdLR6DN?>IxaY zldB_hO{iSrk_` z$q`V}zA%pR_#&x6?#7_loan1s;F)uLWQ$LGjmjAf-_v7Icip?oZy~l(3VbJ0!yw&* z )=)Y~$_=c>AreUEe0W zufJ+OJce0~ioL{hjf$*aFZlJwCGEbbC*Y$6{&RVyLz~Kzl5Bl}ALVB+!--6Y)(90& zz?>{*Qzbfh0+6DS@%+s_4YKlluRo$Jp#2sW4}4L#c6MhBs(%LJ1?~n>mk>##*X{pIB1yi=wYq z%F$)m*(;TCwW5|pyT_J?gziT_2_9S{9sG@Rj{;V_HI_7>`u5}$Y*Df%s*x{yWxM)w@ePREMjI@zoj^ul9ghGZJ) zG9K%I1&S=RTJDoS}HOP{(*5s1JS=cl3 zXT06_=O(q9w?bz6XfbR3J%SZZk|MI+i(?6R&YLNj2|JGMO9+1$h8(O$i1t2i<6b5Y z7bO(cXYmz$9a!vR;3pl{TR{Y;BcsCuBXY^Xm11-cpEh#mDpw~*|73C9fenT~PK%=? zA$EjlNoJkb(ATpAakitm{LU;=PbJBX3|4c+s5R6ol;S^y@dIL>KTh5umA`qNWVD-H z=OVaja9cqh_9QT}+rYESe?iLZWns}s2757Jl`X8%uunE3-4ZGr&(8#&O>^agyc*t< zMaIW9Vd?A&1&{eDs?oC9Xgv2<3)pggXk?OG;*_94I7~ zTHs>9XP=Z&z@bfXz-e*D9iyBU`0z%u?+x$#TJY4``?kO~ci^7&0|lCvqtTco?y^ZQ z!KQuFy}HnaxaV-5?wxEb+dkS8!F$}Lo4B9ZN@R3!3`uDdN>~k5l5}{VBa||{z8Kca zuGwrpGk2KSndavtp4x~^vZDJ2p931NyEeb6tqqwEhr9I#_Li%=K1PV1#MEw1FFw9T z(&5yFB)za8e_Ay}{>us*;TUtta!lZGHjuqCOXwD6U5blBDOz$X=&7}ZxT@WLvl>to zQ+X&>G#K7>8t&TB5rc`>!hb^61dmr6q>4BkPTFkg-r~Ho2&vt-83bNKx&c_L8|WYb z;LAD*cIQPfI!xQF!qN`xW6xJ&FtpdXm5q+_UX7@{u^>ry9TS8BW16V$FzZj`GMIJ1 zpgX|(RuK#@n(3b_1?V%1La=^(*7G06L=0A(omLnCnKHY2=3!J01w2D>H=kZ)s6KF0?T#+anek4%2^_1bbI zK-5PR5Zwg|Oa$Qv-CVKkB7X-8i}7fr*TWWPk#E3Jf>Py|!G(lCJta(&XjQ|Pyw5AZ zb5HAFL0-V1*(R~nIkA$AsVYc4Zl!%wK$xjkf6iXgAZ&k8)n*wxPdUpyhu?9+$Y5>1 znx|z2$vaNQ>Us=g1Ty(6H}pmIy<-=ojF(hZwAraV)WxBIZzjf`#<05E`dmG?2;8)m z$LcE%xx}AioYC`T-G3|+bI{fi<0^lB|Jcq6bGU6?=~MUs{fu?WmH_a(VFOhc=f1d| z)UeW!1c$VM`+SHhRfT6oijbyzq3n-fU+Pz&l^Rl;*VNcHUIz?k7O9EAUyEo$>>y4s|=NW&J88%lUq;54G~y|7`h z*$$O+z9E*H#*@Zm0L8Ooq?Cc(&Fbdj9Q&)H0jql|mF2Awh4tZM;|KOpRyLcly>h0$C={}8LvbQ6&lK{D6qXqb^5Q~GyW6Fl42 zjy*lj10wvjACkpCKXKLBxp;ibR>0eub9dU?=(0x}NxoZ6p>7hlSTd1v} zLL@P$EMkn;(t^2@_y95oII}`dy=Fzob5Kpi#z!;Ar!Tyv_nBmSZ~z6nNu_}8Aw-Ge zi^3(eLU_J3UZ#a;>fJxEvp@dOg9#_q{8-C$<=g&I=4RQ``UIPZGTxc*5<7XTkue|m z9&o5f?=AfSM^M86hZf!8JzZm?>O9U03vD&QEL7nqX5fOzU9~`JEpXBdmV#|Xz{fj7 z%*Z^6fELZ^CTM}Yn(-KbbI_Ue%^<;}AyLXa zaC)A5%;&=9F7_!~&V}r*LrZLb6)s1jgC(JITiG(fpq*!&aKRF^8_DMkuVOq?`@4oQ z3q`hh2CE=G6{o|-b&>_rhbP8|V5QfTuZJC!mI|J2)t(PUcEYa^g-J&6J^S7DVX-RO zy`F2F9v{jUg;KDnu-ox$1^(2SoaLm;0MouWWrefAvysV%lZx|sHpuseDk}cE7tFRK z4m18uj;Ud5ksiNqJM4~`6q*_oMgax^&+M4uXsHoYjx@OeLIP)P{*v3(zXWxPy3)U* zatJ%mjxFn#nj^trh#1pXO#2^ldppa2P>q$ObFq!jG!deJ?Hyir;f_$2nAhS;Mof0n&KWE%1hO+GG@}I6$@-H?clx5l61$ zY|zK?%nL4=oyKEGetq9zD4NAWdG9^}WK`EMXO-@emdattqfsYFPC_^=)2DLtrN*}f z)f&MQsld{5lT<7EJWujg9ZgooMz|VXSZqHX_Brkj41}sn%j$vaQx{0x5Ca^4`)j14 zM3ZBIccewbdObR5hiBQ)7Yq>@9Q?c_Qb+}yld&(ms+iOnC}Hm`53^wPE-d%ampk5} zu^QiOd8o?F_iAg8oACRj;Dus{k&sS=Y4Yke%cHc?nGFbgiqVe`&QYj^ACV5uK6Vg7^nmv>0o3stuo@1=-Mi`*+kJ` z({d0;{g+3k0TtAkl%2imRyiRbK~(AYePwCipN~uw>ca9t+ z+Qut|xAXRH&WWtY2 z&>(dG&LK$j0v!Wa5EoEY;s8s?M6H!=x&ISK8=XI`l9s2NJZKSAyW`Z*MjS{J-;&vm zmT+gnQwzSPtZw`5)aH}751;SqG((=yvId(zAWn415vOdi-vshlYl1oKeG0*;ik??- zW*6%NqEEsIKFolK?7XM)JuHaPl`3Gj*diDH_!R#^e~ey(Re`bR2ytG4;ZQ`IUB z4$wVdx;Zz%U9R>yI}KWvnm^%(?i-?KcYSHlJNbnR}wo0XUXI&QyTERbcz1N zw9Lu-#~5V>9~Y_2YHwK}%2!!^z;zf=qClNufX8hUZ^tzb1kx0_?%box4~){yc6D%( z*2Gh`+>mR;8Kf3+i0w?b4+*6ewOXZG$%nMB7g9V<^hs2*pGzb8rvCCNP=9x#<^JQbHU6yv)lvqjAm63m)K=s@`Ph;!D}Qr zsvOTMXpD(p;=4$w$84sss^nM`dU|MCCNec3KB#&?nyJVxA^#ELG<@*KSXjtW-vAEIrRdM&z6lrwmu`w-b3T4L!o z1?_r#;ODQYLX9OOx{;`t*sK%4RzrAHvs=VIK5N39&DK7u21rC%TWQl5M}PzJ!XXlT zIxT^w^GuVq2z>~$z)85yJ~s#wkEY7-p494MQm1TS>(y>ayRP7OV-%tOfv(0w z4bU~&VW%5G70)O9CF7AgA^^x9s+6pJtLTb#)24W!e|;bw@xbj??yz@?v(aDRRoo)u zE88xQn%9od{#9&$XZO_hP-Gk$MVY%?7v;fXO8|d4S>9`M(PC>2LV`Mj4y*0YKFaDq z8*Um6K7+haDIPS3gk$Rj^AB(_cc=Ykq z%$5QY(mxasj%$S=WEbG40dF@6loSdVd)zjio99pB0ftY@y_qw$H7@DBpgIY}_#fUd zQ?Zzv;#H|Dsr9U)b|vfr;3F;Ung#4Hn`I5DsdGwfe4J3B@of!z;i9~IgZaMP<%S@q zXP4nmj_gYL$747tr#b|7fk0{l^EWL@TE|sy~b|76H!|&kq1?JOnCrO z!mWJ1ddl;0O6A-i!#@;Y?JEW3zB>&^Ww+JR1Va+9JwlO+zyumr?yTV7Y|E&+a);;0 zuz~+9_qWdkDzyy`yv6trWHBj|hCfMFN%n~#IUc)tCpd-RsMwL=FgN37f)n~znbQ-o zFPj}y?@36$HX2oBg|{SJq%_@Z(zPJ%j0(Xkk#g0nq=M9povpx?+)W)`n~trDS1TH* z_yKE;H&S;#0;G8%^VeG@j(y5hpMaLQlny?W9;*)z+kTpq>3G;=-mZs7pi|u~w-_hq zG3`+0ZSPkh{IM5$TKw>bSoCzybG9<|I@0JQE>qe`R)6ZS_4O9)_#xE=h6)Gv79~Jl zz0#5astCz&-0^2Uuq&KfX{~|oV2iPnS&!0UmB!mNg zV#zmQ_ZN^PX*%$EQ+*Qtvx@EGc`!g~&-IA+15b^&shr^al5pUAnZwVjW?4x?6Z^i%5{FCdIP`~Cj? zI$SRodcKnVcU9i%n!-qAz}%ZQ@cYgCg)!jgmGi1z;CoYD75E3fSH!Mgexu($J05Ah zWv{(ypNy-2^}2WL#e$f73rA1pI%DA{3C-sf0d%07CI9N$kH->rFX}fL!l>duP!g2e zJfr@qtPcR0yKy_6ypS$wpX5wB8`;{bWTa|YKZ}7$7VNy$@jTAmbb%*E__TY!8FvxS z-W|;3AM2&feosTk&sG8W_BJK%CCYwk+6i(>)PXO1d_izh=I~QV4$g`|ePwJLlT#io z!%&MF-*I1ffk)Dy;qD9J0m+OKA3qnz%a$9-{(>UBK+Cg*J+C>(fnoPM0#udnl+zTS zPGeDb7#H^)>-~D?Oly-GZ764|&~26l*?0^))gXrn)hZ^g0GJnVpH!H8G1ZZ}xT7K) zu#e`mEeX3O2p7_;eODmva4c9h-aeN3vC!FR3knt9?7~WL#?Gk8jvbpG1-zHX z1)BCy<%0>oxUj=wp#^v+6Bn2|f&+?APRGw5H=m^OLMT4_fF|>migti3iKx8`!!rrC ziQxFt=p>F$zGeg^5Xe0hm-}kjOeNzzUMlnGEhdSrnYEUE=<5%&&uSwJ!eKBIg@=u4 zPk{R$v^PuKk8@kF`5xgR#ylr76=_O+cy~~__vy>w5EzC|W?H}p{l|?E9}#U2_EFyA z(->I8{FVWi^C)a&R&|iJLbj`bRC^%Pt~Q4S32oPvlxnEEHnsfB$iH1tslx{lm9(LI zYA}A6Inui(*&c;~Jt_k!;L1&^XIs?&xX8s_#;5`^{^-wbZV~c&Vfrg+OUsvLn znExEX>n{Q(BU55kCzByaRVx*FQht(7M*&6YpBbRI*_B!EgP>mou|G!@>GEB~hKVH< zjg>C=GI%~TtH)2dhh1T zzrk>9zs`egg38`BrQ0>h1w3a>SLGy}3y^64maXW@`2x&*!AkKCwbFH+B@_Qc`y_vI zF=?(tYwn4!47n)fb(~0f0G(Wb(0Hl3-Oo;_;~kLu@N*0oBYpqh`b>mw0bd zgMH9zheq_c<{N#x8vVioQ?tt!#U`7 zHL@c~M`^&1-cjbg1LOG?w?83gZx|N+?T3m};{JDkg(8=I6mVb_kdTdLuTIN?cM_vD+GkIY@(QdP8;|!00cS z-+m=gN#j3%GQj^={f)qd^gWw*kinm8`IwHa<{*KrBc$%xQEti=Ntp7%a}aljrTX@p z{m{ryeJHGWO{3ltvo6n9$I|#fJ0M>VwW4UmCi-h+E3((Vxx)aS3n!TTHcgV`Yi-Gb zHt>qWW@O2rt~K=7nytebqo|P&m0Z|wG6ag+SGg!xYFwYyW)y7lmZT)iql)}ZLMm@C zwJ1?q?L%^je#clEk0WEjl5M`No(f*hykft>d(K0K;QQW|@aMc!<8$Ub7W;Chp{f%p zBreSh<{v1^S}c$V5g`1z(c{Ks;23su3bc!g+p`vf0HmSFDm*%0qmJ4$s_XJZ<$bKK z%U9$Op)9lFkG8!CVcc;(>V_makw1IjtKFcj*F$Y^&OXVf2``&pupO3**?-mv4_Sj( zQYaDKzcm+ootcaqf%PY`_#4eIkVS&vOxbCFcw`J>N$`cW4Fy{>e;90lxkX$NiphSk zd-Z7^{O|#CBYi37l9j|0nJ}Gu90JwfY!GXylRtE+I^kT@?qW-R(;SE=Fx9GsFTP8AjhFo*PVZ$N+mNX2AjRdw5w7mTV)##` z0Qqsn^Ey+}qXam6M|s$s_~}@rGc83_I}M+`5oJp{-%^OGB_Xhac`q}O2GkQd_ke&U z1>Y?iL>S_jjj!k^>QiBbkVmd-4@vaXuphKP7*0^Y65~Z8elqvofnT84a2kLAm38Ze z`oru`>DYCI*kZKbarOBcL0rkiF&I{G>5!abiK+lPbY;~=ykh4dW2>%hzCN@7R)EO! zMLe;-0@*#v1AE*JVn(_su043W2|KKqG&F=3gbSzTw?_}-JP4~!%QW%$FYV0{am@qSbvqg zY;#;O?DQubtty$|iTgrvb5VO`` zQyy4Q#-B-<^+AU|^t*n=rHkcl#@iww(I$=>S#eW+9We*#G%U(`x|55+!9K~(TL8a* z|Mq;PmZM$aMG{W*!mSi4XW)b&qIGO``~m(h&2{>5QB~~b$$E6shQ*G*Y7SMzcix=S zSAdkbF?J9bpL*C`;vDoAlBX1a!J>Y5C@TFCv#VwxSq)*#J)iNNyokdkMav)-*AKKc zglR$>xziGyR6oNlVy#7}$+^4K4AKt^^ZYRi+-}u0abCB8$Ryn`Gm5V#Ckj!Ss-Bud zi>xndrB^tN@YB+eyTcadN~hc+1GTa;g+-y zl#F~Qaj1X{rw2Gr*@as0J%5bpiT59#aLtDW<-5TboI@%H34HvxmIn9yw$-hW>hGb0 zCnns98sDFDwT)iz~14YR@ zi9$L#i?QhALp&c_m)tY09U^Rcg9BWe{ie^<7TDKx3rCX3rZf+BC@*((+}AXUoiPwo z5DW3^(QyBn7;x6@r9$*uh*)_Kd=IsT9I4$sE@tx7z>7Ubv_Dy;1Ru7x+2jT)^E^UDl z?Tix9i?1c>j0)vYjI$~m?It<*vw-^by z7kO^&eD$Ttko8N(Pb6XF{`biT8#LwP6R3S~9rov)%FJp)EL|h3Kfc});$}BEqMX5= z^{LlbvGdX11SNz`9mQRNAJa6M>=mY58d3g%_}mlF`Ux)HHo6eji~&?M+o41P_WAl_ zr%bh_NQCbw-K^HrXIwy;Ym9GOFZ=kAvvo0Xub> zgxnrLXZe@R#_3q5Wr&H0nYCMBBm+;1;`%GjcA2pDVBa5Soef1{d=-ww`*UiB%eYAh z6_ZszVt; z#{}f0`7=bfod79n%VU5r@=4kV&zu-A7;WG!PtZscLySD`kakoc4Qho>nxuwMNweO3 z=P_|MLts-Iky`^g=zpAan}uTr{^4SrHZ6N#JexDhor0OH2A_i_t-AZ;ODTNRR7*uc3`Z z&4qWIo9`3TDy-5;aZ0;39k=h*)<`HLEC1XaAav?!XiyKKkeNe-kIlk<+PC%s$TUk= zkN-r|fTHptf%G=vtbfx6s>appl*fsmTUAT9|FGoBykMSYpVsQQfLWL4(69iO`glsZ z*RPxR`H|rIook|y}RTKoly1&>DCN5X&!;JR^gN<>z+O|1hAl_{$RB%G3dPKmnF zZs2Y>Y8gtPaC!F@j}W*R1)aC>%X2mBrdMPmK=4!7t+g!5@rBsz7>lKj*wRn>jJv(9 z&`}})d1#qmQ8X-%P?A3u?0w~zFjb6F^SM|ZY?sovn+SDqQJdoVNSO5)h`sY?0>ynv zfjV64e0MifcV*dZ1&I0saT}iwr1O01&((UWuiJFEXZ#CW$jE40ngt1)L}6`eoy{2E z<{&$)eW~|EJ1VYy%3}d+K`I)?3Z`obF*e-Ggz;C^ShN%bMoy!CoOyad zn&A5B{e!fXyH9+{(0DoyBQlC zQ2Cv_hLZ;!OE6Cf8JdNQeuS{MlfvqQS0&U~(=t9J#X^(CQQnKbtP(E{7++ zhuVt`%8E;p9o5Yne!Xi0`yU0?oH5^=X1-M)*!^8af0^`Yj#vAfKf4JYJ)@u5(mhn; z#64f{#U=d0q}kZJZa6Cj>Vu zNg1H7d{aYvM`{`RQ<;=xsW@z~lStN%XP6J9(tgioSe+R%Zyop8Ig^m3dACpE|DDg; z@nW)Y1^2*rrLuF`ThG>`l~M_kcH*V|Sz{%79&D8cZ-(auIpm2=_Ju+lA$aPBPhqD{ zZ}x)>H|*|Fvt_>@-*^WLdbr$L&?UIF0E4f-Vz|^yg(CBP*YQl<1O#is#*0#^a()|W zaPeJ2V(YCh>|00BhxTmgYhH-jgY!WnY_w~bjU!av*cnzg?@))|Q`ygRUtYRo4W%S% z@IZc@#z3*(vF1!X->=MO=)?8m#-Zld!11U%=*ANJR!M~>YV|iZ9a|~m21gc(6P`Gz z<5r&gb@ama_uatRx+5Zz`)g|UM z(SB&!0y}bQoBZgP=Yq%GF;V9<0L|;n-_(Q!Yg)!&Z*Rkv43XmIJDUUujxE;xx6q^1 zIE0^iM$uM54K6_Q$Ow}2bl(Z|;#Topx`9UD5JZ7Gd#o=Zo?paeFV70KYDv`M)?Utk zPwQw)IUq+S7(BF>2qr-t6mgE&%H4eAW)%5%E*WR4mz^3V#lgO5YJ@Scx!`yhI8G^B zrjG|z#VIyXF8DL2^<;O`cyDQ^aSKOA(?MD~ab%WvL6HowQPTdOe6kS>a*aimv>z2a z4t4N(jB1MUL^}JMFEyqgl9-^{WSAtnf)3l(QB@m+fuP2+U;+z(8NF`*jU)z`q zFWqW^Vj0e(QGBj`?-;fAia}DjkAULz3c{e>0l2fOdWs9AM+ZYmYckY^LQO^*13^^G zme}EUe}ExnxZY`8>KS{WbQNI0m1=Ri{2I;oviFiY6FI8i&JX^Yjz}6Auz+?>ep`3J zrnFyb@+JBy+nUm3on#LzZG=(o*?JFd@DU{UN$oyJv#*F%HEn&9GkR z$k2Y%;<7(Z!*jsBcEHNd)Hl6NL4IeX`?%-EGd;nWQ$yw0zE2i2u_2@a{T~vY9jHmi zYBt}|XlQDJBt41nSH|u?wNTSMhaw;;LhQaHZpR_2T;_RUy|3OUJ%heY9obFLDt5H2 zi=67Ea=sd=95+x{m($>`(GIm81^?WYAV3tt&kWZT-;LA|)&l0ew~DNNp73OANRWIU z7FPAc{7Tm3hO*e?#e2kzX+XXU#t0Z1s)%pJu3#+Xpj|oqOg)5^lWh9ZM! z48H^QP?et5S0mMloZgfk01OxGG?Et4CEjFCxTYO;>T|Y*9rKRKCro0&dqWV^W?c>$%4^*t~`BzhJX6{m(_!h7KMB7*^{0&%Cu!j3zJ-M@N z%@;dm#xdgSOLP7=SLc_gZb>(?tB_ldFqE^l;hCKHpGC4sI20{ni3{NYE_lfyqBc9h}mrTM2VwuWnaTrcpZ zXK2WYfZF(<&!D8LWVzGItoy`rr{8~iihWrBXfh!ao8sTE7`RiA&w>z?Z}C4ZrY5R) zy~j0cg&h%xz5nzm?>#@dYhb1mCE)o-Sd_f_Np}G5LG9hloB8MOA?8ujxtmknMeJXX z{)NEbA3(f2b-q&{N;QVpJ${z;lB)V=e{0nHO`Isr#$C^}q|2Y#_{d$>-)RT+x{4bM z)Z4QQ$}a!B{O9yH-rh_Cl%M`bo~GQu`9BKhZPN?pUra-2j89$v`OuN$4~n(n$bU_~ z#$>E@c;5vTcjE4JQS$>9wEqW()Hh{ql&&H}D3!!Y1zr8VIN6=9H0f$%XJ=Qds{;gH zAV;z#MO_wTbufe4gMTcO+x;`^y?vA}2LBq;AvX6e{u$2lp`hSC`p#7q?tS0yXdPF_ z`4tg^H2w?6Xi49Fz9Y6 zWL49Bpz#d#uJw9I{vA+G6t%83O_}NG+>k#*-`bp--G7EQUClc+lLG?-Tvg3SH)m}! z=`zp0XG{9h%LEO*t=BO<5wC6XJ@^_J#d{_dctIBZ`)s8vD*J$U?~zYi@U1jx4_~5! zr<87+cleJ{)#J@I%>Ec28u|pgv$1-~xzX8^e}*;YrYGqKsE*8o;M+@aeM8^tixYiQ z|A@<({kusZUC;UK^M6a9NLF*y6S?TANr?RKbRgK{O62v;dl$aF&FL$h1_FV8RR>-L z80X9;BBD`M%yi7NlV7EydDZR|##&VuwLPaDME~oTdoFzQ=2A<-p00Y+=D9g?eR-CX z)iJ7qi^m6xrx7J=`4y&cY}QwgQN7xHd{5InU}`g0LqlT*iU)VHNc~l+seL6fby%PH z$DsbFeT`JQ{%vk|W;<$iSjN(Um~U`&6jzy+QN@TdjrvteRQ_6*sOo|?*~cFByK|yQ zQ(>+B(8uhLzoG0q^9PF1oT&a9Lawt+mi`%{+$P7~i;$XL`fWdfcYoJH7}&&SVux-T zt5QF(T&1a2Kagd2!!s}1^Pj+Ifu?I&apQAYx3ZXHMj7Fq+Fu74K1&N-!iE!DB^##oKGxxQS;bMeCVv9#hQX#+)IWqm>>8;LjXM~X!422e|_ z)0GiqfF`W(RRC6jz8MNfBv!gtD+9#B`^A0bRYIhQg%PM3GI zf5#Vsp+N*px2+m9E>sIOwWnjI;#xyqK{tk}$qtn{x0Q)Jpe!(Wr{obM=9yGtrftQA z(@K*xVv6upY)QOhs*sbZ{NmAXUc-mKZw2d$P#fZKjiZ)_bvh~+n#E|pZY?Pqu)Py< z_?){8*zsiMN`)N*r*RIf$-FO5odMSE!5`axI~Vww9&+hD;}Bn0TMQ(Tz^8sQeXmRI z*!l~HX}KF7s!{a8_jCzjIY#q0X+X_+vVWQ4UcY^cA!xz%6%P|gn3#yQT=xp5VmkRU{k>4Ys`tYDy20+LP#rwd|e(LA#g& z-;!0iFerr~o4r-L-o|bi&oChI?T-2%oO$e)YzlmH6l92Hmhuu>x1xm1%a+Za=M?Is<(HEcbOS2`x~8b2 zEQT2kYB^3{BZ}+vs2?>cn#wPbEw_d0;U^go1una)<#wx*#?hn+wF}2&qD3k6pb-eq zv40eIdPbM@9HU;~O^)A5(LPl8zd`1bzsp(uAV|1BjCdf>$=BjBCfh8K@2UA39LEd- zZL<}9G9z(9#O<8(PkJy?8GQBYaWss~<$ziZ-IBdv8MoTgdnti}_xw<%)0K45!fDAI zgHJliOfh+AXR_O#=W38eGHfH>Q_HPjjpNQP?>O32;!^71^m{grn#A#UYZTeg<9SPc z)Ku{7dY!q2E3n<{m;5uYtcJs+oma#rT$1ECrFCDY*$2p6R_AO?(Lo%q%4$dk!V<-g zc_AIr4VC)9;Mnsi(Mzt(q?{**?BMsD_IzpyBiPaugD^%!PWVW(CcmN2=fzH;La1Ry zElE;`Z#fZtQ+mzRGf!-w!u$n_NgK9bTYrBdpYaX;S6_OhS0hG#X~ z&-O=DO>51wa%eJwU}{7+c>b5|DmM4)*B zZa3Zi*FqN=I`7$o6a8p1n5Vj(ZC2Qpqi>D#r;^<*#0j~B%U(yx*OFfeVJtjT$_Tq) zr8ntuMfd-_b1y8TC_CMG?WdTDd(maNF#EZOr|Y=06-}pCo2g`Mc%gMF^VcWO!x1pg z^RbXhn3EH+I;pu0xgK>WgrU)3bG7YYQ}fFWn1n${M*jC-Db^EOS;vwsDi|BM^Yc!2 zr6OTHlvs!8YRf&wujMV{Z3#CHv(;bTa5fkg`_QD56jb|v`jo?6?5l58!be?J7FNXf zH0bk_b3q%)pEw$d!?~QBQRUtEq3)Fr<4Xw`$4=ik{gn2E>;7)y76|W!T552jnD=kU zNkKAi81Hz-wfo$oezb{{>GMdm#F@}&rEqfPs4dq+Gyxx5B(Lxk^X(Cq@8l*tPB#cN zKa(+!Qe1=nHOE&gd8+m>GTQZXRAlbfU$9OcIp*K^92>u4FIYjMC4=MAE8+o8@CHAb zs%Ln7G_yT0%g&N88O)Ga$@7t%M69HNJqro<`jZ5MCC%6PNOpGYiW2zY&~z~fZ{$RlRynKvYy z^^?GcXPjh*h>60^9I)G$>H9I$z8VK{4SvR%bu2n<>8*HWFAgPUo?%X`&~CHu8O-W@ zmbE*}>}rU<7c9p{FRVJx{XE0VL)6g4iI=fHN_M5z-9%_(Hx;+Vo1jAEFpe*iZE`is z8|l6xeD~38&t1waY%q`NPSUs0JOw3sx+@Q&=!$&897nzNcCIk#IZw-S$ENgX&rYy% z^*_R?^hqvx{zjg5IdI`m1bJ(J0O3ptHULE+%k!+^~D)_!qUpV)RUsH?^dUBT%#sL-s<+r?=yB`Xv~U=E}GL5|4=_(S~fCE)!{2d zqxQTm&d2YXbV&MrxT!&k>G?Rrvwx2H7`2)_VX%`d;Z?<|s|~1UM<|KBlAbh1URLp* zV`4bxe_++GLCuv(en=kd{RGR!4$I=hA!sQ8p-f#9iwu^%Z!-6}07awg@->>mbmgh% zhwh}>I@No|Mgqj;d?PRNFj*4uY8sL)x<;!~!U+UlinFzaFKU;zz?fcce5f(OhqFFewKmGCJ;?Qvi;vDqOvVhx7yJ3J?_2~l z`?#65`Z(D;5n?xQ=`@_C6(y1o;+cdw!mLR`Ak&B+xz!m+rk_TEMyAGY{dfmQ=@TbW z&nN4cGeN)<#X{q=`dXHTvWqYj_k?&*35(nOmEMN0Mnt$~t~-=>AN`>cG=$;bT}?FX)z* zcM5$Tg*}=7-0Ez9f#zN=5gXl|n)!p=gtYxTL#wNBcj{~yP1rY;-l04@tzz16?MelZ z;T~rRArd|j?_>RyUC=gXBO-yS8dj*~aolvAXgeW*gTx+$uA}G;%u`ks|3fNJ|MM)^ zQRmdmmUa)gLa=0v)1g~wcVrla1geZw{qU^%p34rHq4t4$0#-d+k3qHqy}}UYbF>Y8 zl1vlAW^CyUGC0?wFVuonvCi-Q36i}OQ@OA(8y)>B+OA^ex-%=1i*b&Qn8#UUT?itd zdLfyQ5*JKLs(|Tw^A1ces=sZHXuO*Qn%BEIU?^V~2VTy0RU(5oBThaMv1a?`&+2x* zJO0H_x(|6G=}+@E;X>)AN#smlQc+T+Km~7zhE?2>x`R)wqJq468Wtin&qM}iN%mCD zST0=6ZyH`HwaA$z!D=AIwg2DL=Vaw|)8k(p8kz4N`=$^6@&M%}Yd&p?-gYro#%VXL zZqMEm6FGMx$_mx;SBsq5J5p1ll12jtvRF0&yN35B~lB6PCu(43l-qxdlfK$_z z6yfZ94GgdTE{}1^kB}#0$VvhfN+j{|E6uz?azP<_c-h zh0I7V32b54AqVre({O5)XNaul(i{vgTAe+}eXC~~X-S58b-HIp`Z$jo+k`}zV5+W` z%GhW<4Kt}po#V&+LFDL6eu{V0`)+DM+{N~KY$XASzd;8s1jF?yMYO75fH$^AS33lM z^D6&&%sLYR{$?0uEaSWy3jf>8M+t+9%RM;4(wmzbs)G$gga_YrKD&W>BVkleY(ziQ zRzmf4mq_7c%Q`>o;;>n{x@=!u-e1-}?D^v-Fw7LA*&g_ZkS@25#I^M*94?y07u8?X zYg|$oc)Xi7NYU;^s2&h9$L=y2q~~XNPM?Wk6BJ~AHG(#iH6OY>uI}29*QD|@grQ_A zKL=PuH6lB69ztz;W5Hz~*+?ItZP6=8@_=B@%zkt`J#=P^NqQsnL2_r7vPz_A!n4nkfGQyqCwrrN(VjfXK#mD+iJp|SubJ87Tv+dNpy(mr z5y$I7Jcn%(sl>1)qOir#GLa|=-d_80X8j5AaV!*gqNhx2nLVk@?L5>}Fc zxMQbj;&GPc@+XlFAvJD_%i2Dzc;Cjo1$Mlay*M+=wCrCF(i4?%L~gRglAI|>Y8fVbjuz@AmN zE#r!~NKya@r&|HFsL5PnxHPXK1M_FY&|IV533Y)%mA=2>6gsTmh!{O;o~CQ?w9>k! zRzp5G3fJ-9iqXHHGvLo#wqqb!%)1<7ro`&joBU)q+J;6bSFO)6JJb7wW@hj_gC*zLxMqg{bcYxm`rVg=;tG>0vKLP`%HA4{p2t_2XYST zK#Xyz3nga=IcttYO~*5EEyUUj)pLbPwLf4otdIg60>SD1a-*8PqWUgCvWMV{y3;52 zH%Oir@Ln-Wsd7#534>tTSrO?Xv6tUpJuvu*^BQwJAq1gXY-RXNJkWzByULmhNn=r@ z60?hUz4C9_AceLeaeNn#VJ82|5O&pa%aiYYi(;fB4s+oZ%V@fqAU$M&-CuEAn7y?M zzOU?c@dk5iolJ(bb?tJeavuKyftaAn+*w)Qk%B1$WHK{Gv-7Ga5 zX$YngGZ6Zdmy*>aXUl$mBagz~?B?cmsMkc(#*LY1qmjOc!Cfq$JKLaykNWzvB{|i7 z^AfeNGzZ{hk!@uFJ6V&%i)2cHqgcX4VrRWrwJHuhx>{t1C!9_fjlhJl-)asX4aBHW z$Y8_%a2Qs=wO^L+tfqrS&!f`*ZD?B0dwr0z_8f)HnpLe37Vn3V{kI}Rb&>W%%P?*m z>*10jT~~@UVDDTL=jiyBXtcagm92une7!PLsvoEv*5F@H(xHTEI87?IO!J;cb`fft z5Mz2IQ6-zlq>=edR&&u%f&Qt%$3}#FGiUxBx{z?t+m2y2l&6QBFc_I=mjve}5YXi8 zN7a?_tOw4s1~7>iY@Av-499EhT_*?EIb4$~TnSDDQMC`8=FWJgX6P=}(v2c$s3i9w z;EpKjVM)J-2XNMbtIou4ljgP#LOCI9xo+oR&V(BB(v9A{@M?r)R~cTIS)@?7W=LFr z_L6ITHorIS5N1C6Cc4ZE6Wkx*-tck+FN4eUlL%cC&8PaTHYaDD4&DDGwPQc*u^kHn zvPjSnA!8nnY}?>Hf(g^eI+cM3T@S#&9+Rd5RDd1C{T{txUbj7U1xJd$W|U!rv$EH7 zgJX7Ri#Hk4W%xu+l=b|A>Ey8Sl8~7C4jxnTh_39&QsjC6pD(8v!UK8EKguM{f@IX` z-+NCsHHY}Dvc?Fh>8}?Zo{{g_C%;omGeTCQh<+2@f~TrI*Wf13t-2DTrXiRlfB2PW za69Z?wAdr*=+5cz%z%;44mRmUyws4({Bp$^OtUAz{VpyLOm3}7TV%Nx74J*rmu zBiKS6#Al-w;p&teOA&9IBsG`GE@{>aKYOhtkh_x=tEUz9^Q@1DXMo__2aM3i&Dd7F z1*khkT_THt7pkn3hQKV=6@F8zfxH7YA~fQ75!}$_t^T_&MlqLjj7wdPzR;g~*F-%^iIvghFJ$IQ z26M2vldYL1ZNaiC1*i@QJUuMzn3J>GlDcLDu#!v`4BXE*q{r?0lI^Qy2gdjv11(|$ zG3Y0a7>581D6(xT)z=eE|9DC5`v4%3<}l`a1EVrkKjlhpw@ja)f(yhV zc`Fl5BDm}0WTEUp&p)*%XpY*q?-xsgl&Lx#So!3OxBOzI`1O3L>noUF0`CZRVLj^T z(%P}QAPDSmKa`13QM6ErVe{A$zK{^PXT`zvD67CU1t}}!!Wt(zdAB<|qwc*gsX-N_ zb$#<-0y!A`d$@#BweULUFa)z+jFJ<4Z55GORN%~5QKBLY=5Aj@{#_M9`Ytx78lDt$ zW3;6;R9UU*0;xt}c_S_?^@r{ryrj#f7GtJ&Ao;^#cGD?XQXOyZA5Qabh94-Yed~=r z$H=4bS;-**;~kLr7N_@L*Xm1DybSPdU=G|a$B1NDd&^NR3St(u+A*LV(T)!N@+8-3 zCr4K5KkfoYHH{yZ87?izr6rT6HD&!u!j^|rMM51`I83xo_9x=HP56qe>eHPsMxEk9 zg&dMOq5;=qN;9_>kg0-`fqPDrx%x8}a5^X_(@ANBp&^4guB2KY;z!@gkYCgX8z}_t|1IGDB`?`S(X)=gtR?))_EM4jbGm6cc6zB3IIsg_uu3YR1eZo13{> zDGZ0$^4Jn7xy%$aCszS;-R=5i@dB)4!}aGF&c^H|d%MFidfNxNpkJV7fm32)vCwCG zu=uPF;FTz_M`g}s&b(*51ga4F)kQ_8VkMuXb*u0{s z6Di0XX`)F`04#^S!JXv@Lk6Er=dJ5yxutdD_74do>zl~T(dw#I(MN9zzkTyeQRC8U zr+WHWjej>7cF!e{C4ySVM&<$zs01Db|87X38TxV#ULY5R*Q>kKJ9HmM+!bwx7Zyku zv}yv5&3PWOox?xuT%m1}lnXt^|MgH`#7JL2;I1?dJ#@ZSA8ERZx@cI4J0N+)tCS*R zJnC5)zw=T|HB=s-{jnGhE7b3q6nq^p>Vc~=dold%-5?vDS=T{k*#BM;`4yFmp_$MZ1XR}1hfZcvj8t=9Ah?Jn>aN89Dr9X+TM@_ni$hees^ zMi_S3k~Lgc)Z(p<$Fuoed8t|W9J%B*|7&IX&l}x^mC6J4RELDYjob_r;1?hr?6Mx8 zM7CJTPHeQZ9evpc()H&xkS#)Bby1Tse!0x6I{bzS$ zkiYmm8~A^?g$xXG>nO8PVrGS&<{ey<0)Q>coqP3KK8J6x)5)RQ3@@9ODJR$VHKxJA zrG_Ho^0gU_G1~z&lVi83)BlOz_YS@jn9|@1?vIN>U&5A-Ug3BpZY#x9WcKuboPm2S zu{+y5NrTIiRp*X(*6_3$gp4yRNFd&=AeCi0ZXlhU3rb;FBENWfunZY~ritz{rD)`# zY?b2Kxf-$*h{^sjXZjdbp%<|TrCd5t<%iP-=k!;Mfyknxp*hi%>qg-4yj>BDTeTyD zoLuvgSzxGATr~^u10#C?Mmya)@Xbe4G@juqyq{SHv$Lvot8AUstvO0L?n!sU@9JjaD^joFt za+I`bz9w`2V2S`V0*YnEHR##}F_L`BCfi2?d6BXB3JLg$^>GR~u@I0~ApF31gIcG` zEQVlO=Cx0_q?-*t9weW0S|F(aVKy-u&;E{A84N@BZJeEt>Rr~nu#+qGB&gMwmVvU-myIZGVWJoyjBlhGk3Q4FwG!_DZ4Rm(MG~g=T)Ldaw&H>} zl%mju*zqD}!zy;x2lmvMTcLH?f3~6>*SxFcPYL}#a zmkzeN4q$dht}PAS(zT@kYYsV!qksBy4j5QI!d0aZ`;7A@ZLq9me6pUN3IGcH-tZop zbPv1=YrpRl-|DgwoaKDh8x3!~2>gDqE7;{Ih(j)cZ!q*Ebg1s@T}uNw|<7v z3rjz^)sS;Gs^MfnsLSfJYXOshSuJ8dctN)Sr1iz4u3llmBiU8->kzA=fCk0029xP` z=s7l}w0eDho^m^%!@TPCrlh?@(!~^*{U!aUP@%=n_hVz0qn<>*4syayXRhG^~`NV;=A#j9($6WSZouuAh zyItc9k;m`L54t`87^9xfrV^~KOd3gV-te`QG(VIH29BI;;VajxY(G7@FVYb~8<7;j z65*KTkV@}a$aCS4yb*7+eiWq=EBNi(c-E;RRF{ZMAks8u_fh+N{^h(=t-L6$7KxNK zO#(uoJQSSe)7KA|Tiz3|QovYmf6y9-Zg6_0NP^U*p$bV!8<`X--e%9{?_JFBIINL| zY<&(K+pKTY-k^W1l!5vcuPG-9zU+jej5^P4XWdCdS%xq@6W z0E^jUz}O8)mep`igL&A5=y?R4t~VE|zR4MV3<7uL=-5!}}t=|Fjj3eDSK?n6+$NT?(s;msigumt|1 z^7S$72rp_0E0eh%x*HCI#O;pVhWH8oBS47S=Otc9OIP9p2a>ecWX&i&PJkOYhbzT| zP0%l&EcDd@##Jlp&smzdLYN}>&sp-{r>L?zbv#R>3oKf84Prr$GAs@m1nmAVl+>jr z)_IfRDxJIQzr{a|Im|b?ZXyuIo0%9 zrGhtNA{;KtCz*i_n7lYjXOg%sRSBg0NK<+H=hc(Ri?YhgcrJUzSO7?`t>Fm9nG`5A zWOZnPl4Dow@(P!p469d*PE)_iRqmqa&?O{d-tsF%Z^JY~^{0Yrwg+W(c7$&W(#eoQ zm@wNzWl z9Wc`5g`TwY5-j?$yZ%96S(9ity9N!ZnURAb0U5Z8UifOnsi2az)o1>Zy@)^M&)qgdPlH$|nM;eU2SAbe0`z{kn z$k!rdnKR`5m>=>8PYPDmv^|`+#@IH$nq&KX^9zxTw*we1I81`$2EQ z$Yu?mLKCD79Y)~fdTwYJdp+V<7H?mKf0Dx2IN4D%g~z?o&hq`sRTWOMo~R%YgmSnK z-T1{%>qnG6IS)x4&$|UDSlH6Fc9;Gxv%YPf?2(xgwdn{+U-$$jVqa;ld;X!E95C5L zi82&H+}G&P17rSJ$i}v9x#2WcsTRTz8)(@lf~Q%`0QLi!+5NUq1L_lC{J+|sap1W!MjrN z`VxEqE528M9yd>pxv60qC;BlP)M%{Xvv22rNq}Xi2Roj5ZYr6bNFXBeiXb1Cpvauv zaul|U)?P?un{xkKiC~UBiB?yB+wR$_;tixuyHD+98J2BD8dqc7Ed$#k&^lJ@Tr^;4h*0<>N^Om^Il$7XQdEYq|@zb)qftRb{?#w{xiWu#yFDYC2j_CMYbbj=P!GkV#!+>8B z8w2hr8H$u;HS_{Fafn~iXbiOtoY(nO7u9M4dBz7{Ft`S)$(en2mxAiLU9ygEIlb=* zOO%U+P731ea(x?1z&%+9EsKe^W8j9zQiyh2!T*N4CZT(md;FQ7oFv(ip3R~gYwxW* z1*|hTPJF2^E-kb_8+*Qg4~{%RBJ+_b+7~kypW03qFVvb)dEawbJQ552->iO2}>Cof6NM;`1Dm`vNH5Qrgt%psvqz&S`3cB)Y z&}}8hk_Hm>^eN1^S`(6!r2Mr-&)G|TnFAZPu0RuDaVA~R!TsS95ME()0 z-gI~J<}f9OkuMq5qKMXEr(F)=oqRzZ;cPXrEQAw{!Zy|3*ncyXB>B13t#A=SoZxw@i0^ znGMu@_UX=oL_+a`B~G)>&4Wa~KlwRG3YX#5S19q)1g5yitrK~zr(+^B22{A{5zo5C z%8(LeWhJD$waF=O98eM}m)UpZ>7nV&5jMA8e(tIbc0RI=?+Xh#ZjuzVDI1Di2K1zF zqK#7s6RC2&G4bNvDsRy7g|MTW0r-)1_2s(;6LH%BZ6d9%E>v74PFgh{iF^8@Qa=@Z zp^E3wT3?1xFTY%XSLhBqALl$~@sRj==>t5X-H!Ug>}uU{$n)opvGp_|5^8*?;$3&R zdlsY+va+=V5Z9B3*@^beDx4^~vlEn;kVqc`|6{hqBtco^0>pEc>T)IpFsY2%^!#h?e(0QCU6@&ldr;YMMa*eli#+995R4p3ZwSjw%-la7FGJJe>W@+WBkCAy z*lMR820yZjNLpU@#g^23t0wGV`Eh~dP|w7Ye`*yRpN0j9`d3X(w=m`?4E7{0`SR>lu*>5x&N(mif+jb5XxktVTcnQ!~5&Q(n zMzM)Rg_Z_ibZ=6L);u_<{rjkb~Fy~K2`3d8S=2lVpG^7Bc}^A4g_ zQ-uCLOeSo%$5m!PWey-f!?F|S^Sv^Ep=>8_0HS@NrqT;%g<60*PQaX?^B|a)g=>pI z(6DlvUW7bXi5BHuAl&ANI+I?-%@%KG`%2A&OG1nVz8ZQJdgGH3TXkIhVh<_2v{oZi z?-1Wg?#nvbTDG9XL3JyAQiPs}i9o0C>REa#bwl|SARfG6s+@f^#U|H#!6|#Bdp6Ii zH4D7{kl!?)oA%XBH z)S$!T{+EWmQ9S6h_`!PFA&uI2DioW!?Cj(SG;B`-b0o+OL=4CZGTSJf9yOCV#+XXqD?~TH#pk`23Y@tI&mxSAMh87;}XheQ=oVGnYV7v|AD^3g0 zun3)vG^xy4+QWl3jE$kIz;d`-FJ7q@Ly9~Jd) zxk~X^{bil(99+o<%mxovK3dZ$)o7 zFEl1hrli$dU~)Cj=-@rC0wW0tSGic_y=8LDL}gFY;TSlS5xB zaZ@w|5KvNV2%)|D10l~L0MiK=Vs%_6dOi~SGPn?yI4)WcV0hu@nkis%?!*IZGdui~ zUyB4w`V`vb!Qe}&H(rK-EI_=nDo3l>vEOO|7yyAAed}# zHsI!AZ`AYpDuD4wEKsn@HpUVTtW*1;KXV^I6-qWIyca;$eOu9@sM0cM^%MRa%LX=$ zf6nw*DXJn5e%)OE0$5W9{SW<*bo`HEZ%$(=u>j6ggSc@IbRCocbL%<6-8=eLdSW{? z2(FI^@%n!j-BXymZzs(ZZ%>1CN0VIR&AxzUS+};a>CqxU(1Q|QxJmaW^&Nr9#*cZ? zeqIj>{$dMFN!iJWdBWe%#mT8J^RExD`-a=f>ATX60o;Ce$6JA_a8=;N%#hzPum1tB zLF2=U=3|fpasRuvf@M~pi*~}60N9j1fJdm<^RDC37(lprat{Xg_%{?)9bN|mybT6a zB75PgJ~tPOmNB&Ek`}wPYD_M`PN{4r$9E`ka#p?gx8K9}??0iTQ9tI|;bC>X@xere zUm|~_3E?i28XsmGDL@+%6e##hJDdPWau!Vh23=nLhhbLstNCQX`|4<_fsab$=4|NZ zuu$|xLHI{1H%sy5nkmDJE>^!bCE!_9|L79;7W^4u^_#a;I=o zTuk@0Tzvn<(F02lFI_pz$v3a>*xUvEHp2gXUV`f@0Pc!Lj-U6sLlI9YXI24f-K&;A zp+xD~5|;mIv8r!`BKtC<(b-o={|kK!J#^`8F#={ME6soufJ}Hw)1mN}C3td=CD^{L zzj!`LCweA7_!#5&VqNP|8Fd=K5ad2Ly*(gEF8gtFy?Aqx&*~e_uHkcBm36fj>))?b zDp5(F9{R8dk)Z<+frZ7~LB-8^#jno?gTUo!A_73OF)L2P46vCg<_?2^xoY1AB(~D8 zj+hqT54AQIU3Zw%-`c5Ly#5_3+OOuh4qUzi1#-%Yu@f?BSg26oS0V4?D1IngvUbT* z^kN3!Z$(n*qkKfU)76xL`2GQa>WS~~yHjd(b?`G5fm-BvB(~+Orh-QB9I!n#e`tz! zEfiM}Ss=r)2*QI(vBLk#)*jLfX5H~R1kmrxZ${Vgf@w4C0DE$V^yb*{=6H?O zKSf<#$mPU7-Qp7|vHq7UqNiIEd;{Kl=00-0V zQ|rw|>kUL!^s9%*x+KNKYZz=DUI z@SdLZsru~$g4+_s_$|2~8XT#{gxW*t0sUQ0#xr1_4K4?`0F%>C8P_`i+jO%^Cu)Mp zDJTKNQE5L4MXxqDL{9j-#r9VF5&%t!O<}<=dHwaH9_++1Q#i1Svfag-tHqloitD9l z-UW}--;|+b_!O5vkVFA(YP~w1Yd!5^t*;V$%tHzH$vG4i*o_WxXRBR1D4+s~OZgJC z!~1{I1BUp+w_koIO{PCCdu2uT&$laDpPd|4_-9Ceh{)IZd^~GgL{v&o`Vrtt+TlZ$ zw#ss-<##Y^6wB!4S=P;2Qp;gJ^tG*L1CI4BxSuFF-P&T9a$rMc5Y(cheNLft+@Cni zm(ljQtq%wLW5NK-(d`Rdfhw79)|nQQ{~f#&>OzxtP1^I#+AQ-VZf2Q1TfL3L^jFL)6bFtyh6_p zaAp2`kw)h4(js4!MMw_?C$tw=GC}{aaq)7Uck$->1PUgeo2HO((Tgh44x$yo#k0Y! z16X?OuAJYOR7EJ7I9k%df~|VcTV~zHwQphu+kc^_%n(3@wnegdiWoS@!Q`OD5m=R@!x z1-Q`tZku5LSj+B3B#x>bfZzRWX3nvdjFxcSM2Pfbr&c8d28$m}C4mNdCF@m>|L2R{ zCXe2mZ>^V#F`O1A&q_^4s(khnvO49L#&zEVzccf;DL^YFu$&=fMjQa}&qqWkz%)7h zNOCs7FZA2JH@Z2WTD;nIH}bza%48irG;RPM@$Cn^b1GnAFjZ34Zts3gM7zeYE4@MF z=WKcaKvEDnqzxE%)Rlvz1XZ!LWhg=cn|hQo>s-9s;@ zWjtjafvyA zB=Q^Pdc5;7AP&0OLHGFna@t$r<<8%G%X9J=MHYB0YpBGjYFd-vplW%hfr|$0eI`O7 zFuGCP!tM2)931~g4h`5sc~I!ET<*HJ3Ib$Tv5n7zN0|X#fG}ADG&)c|@d0M_76&rG zjR*hWph<`{$pD#r6cGN;Q(qv6xH;_+9aJCWU~;}w3n*@9mhP50`9I5iN$DX66+B~X zNABNeRt-SSyf_;xNjtbl><1LSpO2U8??%*m-mbm7 z1FcOGn3m8&?c(xxZouyC+<+M3jgWtq$1`*bAPBwr8$>*T%-b2+Qbx*M|p zuk{+G!-1$9uVyy^dsRDfs#7#48_odE%iGvL|S0(SO}G7l9x z8pnTxei!o`ANA#~N?hQeJ3J)RW zKf7{&RKtBL`~Q9|fQ-MRC;m({*y#f17JP4Vhx!;JTp(D>67l`2Wa0MUHgUTB{&k7p zaTd3z`k%igYaJUKo2zql$tYY-)G;)1;MD&!07KBiAnsOAq0;={b;#}>9v)7X=^M;h z@3+U!+%e|w76pauE#iS1=Dia?2f=-S?o;j<)!=vKwXZmIKBe*lxNCQQryodf)GekX zCq52tU7kghWQe&S#Iu z8TP^P>Uz@%_U+^B5PV?r1hV(*slE>hf0JBbY(KA9qt4 z>}BIct;dd6`*3%8fD9}f4zu(7;A&`jnPE}Aj-hcOARskZgQ54I9z7e_GP<#%uhnY7 znWQqZF6Bnxb+1d*{^Y-h$loxOQZw0SxA;%`@;5>U=6L)+s#S9_tBR}`Ks+{Y&t^Tb z0VO~0DEUE1cpaixh&r1EbU0oD5iTZ;nEiL@R@j=aNjees+$)h%T`z5v+Xj#fk49L} z;Y&Xx%(N_p%vMPz^U}T~-oxP$h^gd! zsS6EN;i7V}Zxb@UMGV}Ba|lP@I6^#ti%_y7649E1AM29k=Ma{MTipW|um0xa4fm#x zKpT*f%T|DC1wfkJQ{{VE^E1Hm7aJjvQT)LnRKJ^=cH|7CU}#_I*K^@uMZZ=ct;6$Q zOO5b;Rc^lOI#H-?A61aNGRqCU);_*AVnfjSH9Rx!G3*u3)3PV^XK|$>d#u#o>z%0# zK~_dmocH>A@6Xm?*MB`|iU*X1=31@4{}lW8jMRPZVMej#)4CdM-`{~TCIH$xNNE}g z6KF>lf4{$3;QwMQ-x^c(6;@_c8#udI^Z(TPejNLx(eFy&n#3#P5mt|Z_LU7RjzOqf**EMBhbybSw7)bK_&9$@tEeYM9USd|- z##uNBs$OgDZ+6*%-(T!~jpT7WLod?%u*gKhX9~R8tsC|C34J5|lXNY4Y0Lb% zKJu||x{t{^Gs`;9c?yWO=dt$-zi^N%6nZn8<;A0Mhs`^v^+rvD%9^&|&Nox^eh-|a zE&=E~r_6R@x-1Q6WH}RC&~A(vJ17iXHfCkf2)>bkLX53cu75RU!X$~)tl>Y} z_1}>-(>DA0NghfJQAc!T@E*Bj6#48m zJ)8G2OozbGMw{$Du`FXWe_hD@A@sgzIjCx7N-NGGsp31{&1Z%OkO%0J53k};f3!ls zwX$p(+v-rUztP(O3ph#uY`}MW?#(w_?p4b={gjz_e!6uS_Llz`AGiP1PVBhP(PMkm zg<5!=DB6TW*MRP%;f*;lEV5`3i)}-tROG~^>j6jLTw?iId>BAt#lN&ZhYeRD?!>Bu z<;@CBbvXb)Xj7_|S^M#}8~u-q+<^*s6WQGL?IS|0GIqUf4W_DkC?LYv(8InlFf!C8 z`h6!RmM~Snl#3|G3dSqCHJB55XrNI4KG~SVF-~ASk9o#Ke;vc-ZKBv*0cT&3w`k99i&-cPTlxud8dkP8UmicnH(v31&$xS;K@!orJBB1SaF6;GVJ? zEhd8fUvH5Fw8y2N&`y+g4pddM7O^C+O-wqzo>KX$^#7%Qxmevv^;>~ATV_5hef%@F z37jx-kGLSW&#ds8DPw8)2}yM9IDADBOyoC!T+^(ApN=h zwy1LRYUT4uOxW`4+6n6G$>S~emoIbHwujU%och~WCIQ~niruG-qQx7Gd=Nc-x~gj@|jVKT&* zMK^mt@_*)3)q*l;>*uv7tZpLWA1&IFUui4w5s)`1OSADf+-Zp7c5qnjZZ(^6kh_v6O)sVvgl=Z??I5hVB0>!D&LYHfT+8@ zx~s+i@sOORw&M+QYPeX>qel8pl2aP(CS<=UxjC|4LhAIrT87hYs}p0y;)ezxhG%s) z_1RF>{H`rbh==P6drJc$t5J?TVs2d%4^P1^$(Y(7W7;yg~*lt03VyYJGz$hrE`PoO=Mond|PiXv* z^axcFRhKf-E)$)tmh4JJn(9}E(QKT5F%HiAx}j6DrA-q zf9w>}U8IZrgBRU$`0?fCP zR|O_5e)s?7Q^%Ic{7Uw4dtt-7%g@NSnVrHlOb3k7m^w~{Z>!;(wdz-95@m50S=83E zW9HgQe}k#4hN&T_?OR}Dq^22orZ6#CyAyTdhVY-5EVr#L+wv`#lDzR@E_*zR=OfY_ z%Zk*GR9wdcgnq6>Y-V=E)pH7j?|)0mjk9YxqXx`v6?Q;ZR#27>-gKtPu zy^rMH`v6Fc?^VC_Qb3>RdKE)%;x1>(3~>JPV_!iIl}AGwsS*aq&^5Rb;jP#$N?7AJ zf2#UDwYdAr+o^wZF;R$%A;d!+4A6}hmsL1zaH+`oXf_3cg8twJI}*8tii5 zWFzptRJqv6rTIyHLsOXyBQaqP*b-lMjxDSF(|u=D=W7y2QecWLL$d!mG{y;=CZ*!d z1@f+^A$y@|%QLFFM=_mX9-?6llCq~*%xbb!8g!P+oiUi|5z4P^TmbzU;Ip;wx9WCJ z{#o+()DEzyzO`ki{KyX5aiV9yo>MQ^$%~oEcvq;O^GAMM#T`AW!jEIU?1YT+Ka4|b zl6?ARP-AsbWdfB>5+1`tN#Hi>Nh=J8A>cgn)$ST2(7rc@<~Dd3iP|v1$NC;N5-=j+ zBh`bIyBJ_S+@>ocB@f~C4V;yg^g66gR1xdUBB3}#Ck9X68rU5Ln?P5d;20EV86|F# zd#9(htmrSRLTCgC0jmwJ-*B?J9)D<`;s}L%$3#gPRXGhsYIt>-E3Bm}U!kjRjwm4% z0Pb4|z6=!Mlf;EIa4JoYq2?5N!e_^=@v91Dw}H=sQyzjrwk(S?>s@1=GBxBMjLc;h zz;gMBPJ{4rR&CeFhA|_Vqj(nf)FgV<*tb>u-{(+M`>r*#ws1btZ%+odn3zoR^F3$ZHqn@dh<(`1~Wdk{w zX`-2)PFV-fERwHX=hu!#Y$i*_rWuiu6$A4=pdn(p(eUaDJ}?=Ook+;ug#Yf)^>zlL zo4Jf#w1p5>sRu+aN*fd$nA!Z&t0myZ^bsCK*~3NW$9=C<&T6~uQ7U%cu9 zhZS2oS!IOSu!Cs6>CtOD-ff84j$dV6+RZ1)>2!ET2 znJ`E)5NEb~jxfsvBPFi&ze2fS!@3PA8LG?QRx+a;6h@Q|MYR2|; zl+u9BI(=qFyHN&qXDF9V<4(yziQ}7z^0|+gIkQMUmEMbw|L%4JSUxZ!z&kV8?kY~f zEU%5UO2$DweH!NKlD%88u~;}Nh1!}iL!VKFWo;RoF-k@T$s(8dr94fFUUsRPNvev* zdi2Ykt$$6OQsdBi{l;?gKwgwzFelXIfZlx(-o99`B552PA#RS)mC7%xb}*4v$hFu@ zK+7#0b7?^V_FRqZ-J$*`Jqz!9&C2)G`g!C?_^KdphW8p97wD=V308Oz87NGBOsd6K z0$IA%CW`!)yuj|)bo>+c9nE*tmuJP)Son9%AIts+Y~F?9GF-PKpf>h#HnK>+vzCi+NP;uY$CEYtRfXzGY(*hgdn=6-4F2@VtcNE($)4C z^c5OXSehwBxv*;!rC?Mgy-P8RpEz!ISd`1l+Rl)iWe6tWTltMH^bBTFMilvjZf(@o zAak6n{sh0r)8%}3Z@g`2ISyX3hbt8UQ`i&-`#BiaZovug2kyy`zJy0Z8(Omu7Du~T zWFH*)-a&*F{QD+xHs($cR4iqmF;fJvY;>R{$ve0He;9kquqxYi3sgWrML;A40Z~e6 zK|oSTkrwF&rBga5AR(=y(o9OaOG3It=}t*$P`W0Q^Sl%B^ZWMR=bZIxU2AzUpZAG7 z#~8O!uLpOlAb!PiN`ria*Ep>VM3qnDCX$z*5s7C*ZMdzvW;dUk!6C?_o8s2Pw zr%YMPOhncUK6G&=Ky~J7aOXGTSdd}lskJ2syss?@Re7ETW6S%?IvxHj79ZaNg}20P z(!=NrR_kfh=O5e$BW+UdI+_NwaDRAW&7^&+r%y~aYF5CKL`!1wpaav0%R+-WwD~^8phM7ui ze4OPrgQo?}9#A@+!R7!0l^p!93tqWu63Z9&6!4h<3MY82?b>{KEy&TK@D~~Ze!3ru z;!hb1|HnSxD{#g#ZJSdrG9joPLO-zPn)ZL5EtFwt>zT<>(RAG|T+A*^9_T^_= zJW1g@p&}wTiLz6#8JTD)93yOn0@JsihTaycn)%23HS2YUyHPn0U~em(BlskngpKLT zcRq0cIXTeSUJk%Km!3vIF5IPAJ%yaWx&LsMG}F}R&vu)c9!u+zzjWx<`>R@TR+MP5 zIK_{Fy-pMr0`qa^x#o(m)me@tHya+TG#_YO;9*wV>A6kth(6LxiK)GCISW6;vl?q> zdVXsXTf`_$XFn_4lbXqzRhv-6?e4&;0Q52-Y>=ib3z_{)LV)g0k35~&IJcrLS|)uH z=XF8#OFjk}=2KQ3WskAWhj!_Du{Qp8T%C6^Cq-jQoFlZ#%j=B(UstOGRy&!@`6#74 ze9Hex8=u`$n=(f4n$cxI6|icmuwTPHFp8@>7S4%0|3^VxPa0?FRIU)ba9ltEvIUSECX*ml(q zIYAjkzEd%#rfJ$d8bGGqe+_asId5GH)A0E1qjOJG8_&w_sJs<)K>wDuE@O)$U3?A8 z7G5*s@h}eJuMO*EpeGc!keeuy< z9u1;ok&rN)A^a;{S11%LkK-`Sd~VpDAdg2?pLC0D*qv7r7Q6D&od0}FGs~PcjFUc3 zWZ=Ua1Ain**n1gP6RnDe{?DGVU)tc;x!0-}{8x_qr+>GG(uEz1WM?^7Diz|NAnt>; zt{(lmrz>mu*@XzB+(VP-w)HhXIBx`oQo62>gyvFfW`z9qvQFEx{>9|Z)9>LHH*9%A zQ7jm0^p8#azgk}qkV|`V*N<>(OmpXcF2c+e2|LWfzciQdzLqBiCV#->L7wxhmC{XjMkT=05d z@=f7yDjwVTm_Oe7!A+J4223=U4hQ?tPB@(uje!HumO7TTS7JdhO_d2lQQ!Xrwmbdz z9*}l^wRRZ5o&i0wmd6sQM`b7UxHpQC$~1MX#kGqVDV7c=NDnJ2IoVoeC4qGlrFN5S zM>+?N9FR=GD77~5Z;vRSDJlP}SGD`QFa8J1*L4{@srQortCFV!RLFycWp#JZw7;~S zJdv9LEyQ-$d7#BU&OHG>iN_RJcYX~dls%s7124k!-Qn@y3V2p!ca%pySqNewO@Ol1mVf1OclylOh7^b8y54n&JF7~J` zRFpan1yeF643^1Q*pbyY{{%n(zkbSXKr-@ra_9TY4z8{-i0@^<&6#v;a6aL}_Z-ya zz}~lB;L(06uSpYszmscFmCFuw^+4b%3;J!wpj6VpazN~IT6X*&XZKet5+#Fk(5`pX zaIkL)Y#$ZV;s_I{$-ZSqa6t<1?$nP?Qe}xQt@e+o95VGJZt(vAvl=9sxJ@@ChT1FI4jj}{(Ftb?XY+mO=>UX>y-Tktzi zy(kXt%LX9$uIYY^qM^r+o2niq8JV>ivZU>YuzPW931@chk6C~aF^Qs ztq|C@e+|6DX*bUOPrE!X$U(837;oZ$_{#O0gHW4{Gc2Rv2uS(+pzmZ0@BakNaa$b# zT4mK=!#=s2DX)d73k%h~k{+zN_K}?EDn+fDzINKO#k5-6oR68SVO!Rtt_xM119_*8 z&e1em=&$A~fS8r7>7MN9PP!~m3V6fpy{Lcb(!Kzw;1TpgueyH#4^UG37Yw1joqOpw z@DactyO1#AVc*MNS}4KhrS)N-?;HS#7}RKj-=J}TY9D31_V9ex`c?fKH`#76NAR z)NF)yxzBT6AM6K|d^lnm{C#;0Xf%i@WsvwZ?|FRiPgRl$?f!vmpsd$wFJh$@xk6$6 zx!yG7GWUTs`53F9+wQ9Q%=Y#>3UH@*j{z3)NEp#5thWfk2f_^R2m$aU3T$_@_6b_P zjj^|wg$UNOSv+FiD|i6_nmG=js;eP#?Y93wK2SL3ZyRd>O`Ae}%$Ux4=QY|$`OA?v zyn=$&pw-(E%M^`TT3!^8`{lt`X&*@+uqjpY9Z*q{=C5%D7k zsyPHu<#E|p@^Sa6z1xTV_+u@Hw-&mfj(Wl9#!N7q5J1aPbOHhSn}W|?m0P;m@X03# zR5<;lTE4DoJ2l@C`v{tybC-R^rR`9q=@NUhRDWQ3Qx>{?F)Vus#>U`C z-(vgxOJsJ1A57>mQ^UFVI+~&kKt>tZs<*FZs|9R%Y%db*-Ipi*gm9qd9P|Dg`slbF z`xS!0?gFg!284ZNgB?h(f4)le%0LkyxnC+LzRkFEXZ&E`CMW0EwfLx6t@ty)iLz*< zbMa2!ZzGG2K7Dr$zJLB|g}R;-QNwj3&8x8V5#Tf0Q-j?0*8NIfpF8>C&!YbWiU# znBo;?17*f*0K9x5zzys)zHY$RSt6U8%ne|E%(?%$I_y0Fr(VU;-A7-bYBJBS_SeMX zd|r^bk29%2bm45-g7*kn=#+Zg5|aS%ScFy8&J1YRISp~3~ z06Y183p0j_+37c`ZVT*u{q15ca4-N}1|3ZNK|0!rq0z)y`U2qj*uwh{ZR_U&2KkPi zeIx6*dZNvBpZo1iz+}*~a02xAYBQ;B+^2JdCvx($)(2v|3SX`R!LA4jZvUNZcS(kW z%BwbwF8EXeQ${Cmc{s-kpd$g0&q09czJ|vZgL?T>7}Qg+mS5)phI6GpaCW=rf`Avm z?D@#GL-drx!z!Nd``zb+X=5Q=8S)GX`N;%1kUHc=J=ff8pg+e+>y zq3tAm4&Q`>;Z~vDiJzX4_emZ=aN~sfX&Z(40;Zqa+gymR_sFzW0;Pq1E%w|>a}3Vr zjX$c{?vSfSuE%>Y0_mtZ=`RTA^S2fEYY~+*`F+%g#C?vL zbFr^!Mw$WG&g^V{&p$*1eI@rF03i=-<2RY$Bu1hy$&dkKkI=RRTM>>Uyq~n2JvQ?e zAIXBeQ>?hbHg1n}ET3bW0cx%`0yb$m$I&g3&h{P`ggJqktmS3=2jf~SbC+i!Qdjk0 z=RF(DWjmkyCPvXIlt1+BU@#%*Jb-kwacBeql1Z^Jaw9z2RGE>)Z;Ks$;KB2#w!DHK zKe%K#RWlvjC7TU|6@~8f_nHxCVc&2i>5%BvQ{SuI#uYXJ^pX>PQW74mje9-p4-gP| z)c1hE18V8DNb|6`CUXp=B%lCHcN$TE;QR2NZ3y+P8a;rh`-i%4<6qQ;SBP5_86hyi z>Uvrdl^2^iKt_Pfs12o zVd8&4n!rVO9)V4Ed-0-yBVs?h_AncUXoX#N0VM5jxajbfJakgnTI&X8#K;YtY?#Xaq<{@W$Ib;SP@%I6CzC#AvA7k1^px{BXf}l|m z6SL<8EvsTq(HutM95y}`d*;Or{yF?G4w02Ror`broBRr=%y%X+jg89ifyBWXXcFvI zpwoAk1rSSLXAiV?pQZRs6-c=<&(2@4&;@Tj_(-bj&H+ys@qZ?Ak({#^E`1& z#pj=^_@<4(i@w>Ms(;z?^lhFs%Zc7qcz|dZSul|Ja-C!6k)8lS*C)F+Y=E=lq=nt=MFCm=pnq#V15QC4)>Vk# zvGI}`;dMWz=H&P%Y=#g%Wrf~+H(+EmLBQpzk%o@v!E<_LTG-twzZzjSk9xHVv`!oc zyru9?WM0`Fxhr%qe7ET)h+#m#zW_rnba|}XUG_l#IrndlhQyEPw!GB+f$M5WaYa3K zkKk)$_NdJKV)R&1}!qNT#s}1 zBA8fc?A^d08PcjFmKonbBm)1~-o|s5!@gEnL|I zJ|k~G{g=?@oAA%b!xig6kK+|xV}fjhC1Qv#cBkder8smsU-4n(tFSgruP>E|Dk0`< zYYsyf-}wZbb{+d3-8Vc-(oMVu!yHln>jGPUf`b;{h%EL1JPY{tVitI@1gO;;Zq?)%g!PF2bfM-0%8lBS zy%PZN@@xbKJ|iEPduENP?u>JsBRWmd$(??1W8+%*n%@%4VBuhdT5f%uNBx5OK5o>v z-U?9X_TGg;Uc?^H&etBj^U3a7snYVH2Ad4!54?52-D??$gQ+fuS0QbG}cHv zlWVs>LQ+i>iY@ZKs;@1jga4vT(;yqPp7#MGw*vkZorC1g*K;RZ7lhNgGjU=2tKfal zQlCUctN4M)5dW3}?o|&e9p?@?x)&N zjE&dE3fH=BuLXAAnhqER#ZZi*El-#X0*9wmlvYRRBr>aqxMJFQN=E&NGc>xSH0>=g z?(Y|bCPYP|e(6n2Q z?H(@jJ13WPhGzQc?Is*F>6#DbszLuf(aXT>Bi8`=yP&2p7aT^P*pbs<(`Qg^j~8%Xb&cc<_b#`Yg7Y?WIZ^Wi2@+UCkwVF*g*q zXz$FST3l(gS-2yowZC){Cw?VTLc%FT`KyD%S*SvDxR-HeoP4dne-2E%XZU8x zU@@JYj~JUt0}l)8<){3elZQq}TlPZayod-wxxH6VRs_gohp`S2g-g9DOU<VJQ}Yp-Un8sbRCtwjG#*5lWxT)ev!`L(|wXMKlaUZ}C( zsx7(aaxtlP!Z+QCgpxT7{roz>f*EUI{6nLPr14C7{{B}j1qd3G)O|ooZ}W15>|dE1 zNTQ^H)rEV2Lc$z#mkWwg)<^cC6mWq^<(46K-M1n`Lg2ItzB+lRsZ3_!Z?f-A`31tl z$XRU%7*56>9XEv4i{~HyDj3Q;?x8*=hyZyra!wxU3#Qn`$Hn=I#Sus3X&-_AarX5E z)NH$E&Q=|nN=Mv}eqwcsQFqK;A^LC>AfLmLOYplp;2%Qvn(8Xhg9Lx;H4QDgILO(Mr&IDxG%ZUzv- z_9*v*6}a1~?*7M=8My-ojo-(QQ&o+QS8AXeshQ)g#nlz1aB$MfcegXz7`$3x(^MP~}*zD(c9+tS0iS)wib z^DD|uanA!v$;G`-fYrd>hBpPu1fQ%YYXJYlWK9zkN~_)WK%FPHBMpV)2fX87yJOZ{ z;dAX*=l#$>l!G&vqA{*FEfV9$I@o&(8L%f{=IkEJfdf78g6cbgFls*D54tHnuKt(0 zG&j8q+<+&PKOZ6g%$xyq5_7c);9qFuX#G#7)a2$KZ1aC1!*8yP-f>o?he%Il91sK7 zCA`Wp6wLmEhIeVGoFsH0?N;zo~MLYZZulF~G-2F>u+>8}G6m;8T